From 3ae87d522265ef0359eb1410be5d5cface80179a Mon Sep 17 00:00:00 2001 From: Jose Jaime Valenciano Date: Fri, 1 Jul 2022 10:59:44 -0600 Subject: [PATCH] Sync upstream telegraf (#15) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: update go.opentelemetry.io/collector/pdata from v0.48.0 to v0.49.0 (#10984) * docs: add missing slash in mongodb readme (#10994) * chore: update pull request with master when running CI (#10993) * chore: add readme linter (#10916) * fix: correctly parse various numeric forms (#10923) * fix: add mariadb_dialect to address the MariaDB differences in INNODB_METRICS (#10486) * docs: correct influx parser type setting in README (#11004) * fix: use correct auth token with consul_agent (#11001) * docs: add openstack metadata external plugin (#10999) * fix: use sprint to cast to strings in gnmi (#11010) * chore: Fix readme linter errors for output plugins (#10951) * fix: allow Makefile to work on Windows (#11015) * fix(cmd): Also allow 0 outputs when using test-wait parameter (#11013) * fix: add mutex to gnmi lookup map (#11008) * fix: bump github.com/aws/aws-sdk-go-v2/config from 1.13.1 to 1.15.3 (#10998) * Update changelog (cherry picked from commit 234a4489b716ca1212d1f7a935805793a225c147) * fix: remove duplicate influxdb listener writes (#10976) * fix: use external xpath parser for gnmi (#11024) * chore: Adding influx's semantic commit and PR message checker, so we … (#11009) * chore: Adding influx's semantic commit and PR message checker, so we can deprecate semantic-pull-requests * feat: change commit history for semantic validation to 1 (last commit) * chore: remove influxdata/validate-semantic-github-messages github workflow (#11036) * feat: create and push nightly docker images to quay.io (#11000) * fix: reduce log level in disk plugin (#10925) * chore: increase timeout for darwin packaging (#11041) * chore: enable linting of shell scripts (#11031) * feat(outputs.http): Support configuration of `MaxIdleConns` and `MaxIdleConnsPerHost` (#10954) * fix: datadog count metrics (#10979) * Update changelog (cherry picked from commit 53863d2eb563b19742f89bccff08a09811c205da) * style: align plugin renaming (#10868) * chore(inputs/disk): add deprecation notice to legacy mountpoints setting (#10948) * fix: have telegraf service wait for network up (#11042) * feat: add influx semantic commits checker, checks only last commit. (#11037) * fix: re-init azure monitor http client on context deadline error (#11030) * fix: do not error when closing statsd network connection (#11043) * fix: deprecate useless database config option (#11044) * fix(inputs.couchbase): Don't assume metrics will all be of the same length (#11045) * fix(inputs.couchbase): Don't assume metrics will all be of the same length * fix: move to one line * feat(exec, execd): add an option to pass a custom environment to their child process (#11049) * refactor: replace strings.Replace with strings.ReplaceAll (#11079) * docs: correct copy-and-paste of udp to tcp (#11080) * chore: update opentelemetry plugins (#11085) * fix(outputs.Wavefront): If no "host" tag is provided, do not add "telegraf.host" tag (#11078) Co-authored-by: ffaroo1 * fix: bump github.com/showwin/speedtest-go from 1.1.4 to 1.1.5 (#10722) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: MyaLongmire * docs: note about listsnapshots cause zfs parse failures (#11091) * feat(inputs.vsphere): Collect resource pools metrics and add resource pool tag in VM metrics (#10574) * Collect Resource Pool metrics; Add rpname tag on VM metrics * Update vSphere readme file * Update vSphere readme file * Correct typo in vSphere Readme * Correct Markdown of metrics.md * Fix metrics file * Fix code in endpoint (filter); add some tests * Update plugins/inputs/vsphere/endpoint.go That's true I commit this suggestion Co-authored-by: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> * Removed Context and Endpoint from getResourcePoolName func Co-authored-by: Simon LAMBERT Co-authored-by: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> * feat: add mount option filtering to disk plugin (#11039) * Update changelog (cherry picked from commit c07868f5a69e89630b98a88ddfb7d66571949df3) * fix: Output erroneous namespace and continue instead of error out (#11069) * fix: check net.Listen() error in tests (#11093) * docs: fix socket_writer output format link (#11101) * feat: Artifactory Webhook Receiver (#10918) * test: add test for mysql gatherGlobalVariables using sql-mock (#10987) * chore(inputs/file): More clear error messages (#11104) * fix: Update gopsutil from v3.22.3 to v3.22.4 to allow for HOST_PROC_MOUNTINFO. (#11107) * fix: bump github.com/wavefronthq/wavefront-sdk-go from 0.9.10 to 0.9.11 (#10970) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * fix: log instance name in skip warnings (#10995) * feat(parsers/logfmt): Add tag support (#11060) * feat: allow other fluentd metrics apart from retry_count, buffer_queu… (#11056) * fix(parsers/nagios): metrics will always return a supported status co… (#11062) Co-authored-by: Morten Urban * test: remove unused riemann from docker-compose (#11118) * fix: elasticsearch output float handling test (#11120) * test: remove unecessary flag in sql input (#11115) * test: use supported version of elasticsearch (#11111) * chore(processors): migrate sample configs into separate files (#11125) * chore(aggregators): migrate sample configs into separate files (#11130) * chore(outputs): migrate sample configs into separate files (#11131) * chore(inputs_m-z): migrate sample configs into separate files (#11133) * chore(inputs_a-l): migrate sample configs into separate files (#11132) * fix: use readers over closers in http input (#11083) * feat: add slab metrics input plugin (#11075) Co-authored-by: reimda Co-authored-by: Joshua Powers * feat: do not error if no nodes found for current config with xpath parser (#11102) * feat: [inputs/burrow] fill more http transport parameters (#6948) Co-authored-by: Sven Rebhan <36194019+srebhan@users.noreply.github.com> * fix: avoid calling sadc with invalid 0 interval (#11140) * fix(inputs.burrow): Move Dialer to variable and run `make fmt` (#11149) * fix: doc interval setting for internet speed plugin (#11150) * test: switch internet speed to enable file download (#11146) * test: update mongodb output to use test containers (#11137) * test: force elasticsearch to index batch data (#11153) * docs: update review docs (#11147) * fix: Improve slab testing without sudo. (#11151) * feat: add external huebridge input plugin (#11159) * test: add generic test-container code for re-use, migrate mysql (#11157) * fix: bump github.com/aws/aws-sdk-go-v2/config from 1.15.3 to 1.15.7 (#11166) * test: avoid data race in tcp-listener test (#11144) * feat(intel_powerstat): add Max Turbo Frequency and introduce improvements (#11035) * chore: add readme linter to CI (#11020) * feat(inputs.cpu): Add tags with core id or physical id to cpus (#11141) * fix: Remove any content type from prometheus accept header (#11082) In #6745, the prometheus accept header was changed to accept any media type. However, our prometheus plugin only accepts text. With the release of newer versions of prometheus, the OpenMetrics type is now available and could potentially be setup as the exclusive response type. As this new content type is not supported, Telegraf should not accept it. The original issue, #6523, was filed around getting a 406. The issue had comments from the rabbit-mq maintainers who made changes to their code to be less regid resolving the issue. The change to telegraf was made afterwards anyway. fixes: #10248 * fix: search services file in /etc/services and fall back to /usr/etc/services (#11179) * chore: Embed sample configurations into README for inputs (#11136) * test: migrate crate to test-containers code (#11165) * test: migrate nats to test-containers (#11170) * test: migrate mqtt to test containers (#11172) * test: migrate redis to test-containers (#11174) * test: migrate memcached to test-containers (#11176) * fix: Convert slab plugin to new sample.conf. (#11181) * test: migrate aerospike to test-containers (#11177) * feat: add field key option to set event partition key (#11076) * test: migrate opcua to test-containers (#11171) * test: migrate nsq to test containers (#11173) * test: migrate openldap tests to test-containers (#11169) * feat: Google API Auth (#11084) * chore: embed sample configurations into README for outputs (#11182) * chore: Embed sample configurations into README for processors (#11189) * chore: Embed sample configurations into README for aggregators (#11190) * test: remove rabbitmq container not used (#11175) * test: migrate pgbouncer to test-containers (#11186) * fix(inputs/snmp): Reconnect TCP agents if needed (#11163) * fix: update golangci-lint from v1.45.2 to v1.46.2 (#11191) * fix: redis plugin goroutine leak triggered by auto reload config mechanism (#11143) * feat: Add constant 'algorithm' to the mock plugin (#11188) * fix(plugins/amqp): move from `streadway/amqp` to `rabbitmq/amqp091-go` (#11192) * test: migrate zookeeper to test-containers (#11185) * fix: bump github.com/sensu/sensu-go/api/core/v2 from 2.13.0 to 2.14.0 (#11021) * docs: remove recommendation for prometheus metric_version=2 (#11158) * chore: update OpenTelmetry plugins (#11194) * fix: bump go.opentelemetry.io/otel/metric from 0.28.0 to 0.30.0 (#11088) * chore: correctly spell embed (#11200) * test: actually skip flaky snmp test (#11199) This test had a short skip added to it four years ago and a comment that the test has random failures. While working on the integration tests, which run all tests this test started showing up in the errors. While the two asserts at the end could possibly be updated, it is worth having someone look deeper into understanding why this change is required. In order to get integration tests running, this skips this test always. * test: refactor testcontainer port lookup (#11198) * fix(inputs/snmp): switch new Reconnect method to be a value receiver (#11197) * chore: update gopkg.in/yaml.v3 from v3.0.0 to v3.0.1 (#11213) * chore: update github.com/opencontainers/runc from v1.0.2 to v1.1.2 (#11212) * test: enable logging with testcontainers (#11211) * fix: bump github.com/nats-io/nats-server/v2 from 2.7.4 to 2.8.4 (#11221) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sebastian Spaink * fix: Update sample.conf for prometheus (#11217) * fix: (outputs/sql) table existence cache (#10812) * fix(inputs/modbus): #11105 fix requests starting with an omitted field (#11202) * feat(inputs.nginx_plus_api): Gather slab metrics (#10448) * fix: Upgrade xpath and fix code (#11241) * chore: Bump riemann-go-client and remove replacement (#11236) * docs: Initial update of integration test docs (#11210) * test: migrate kafka to testcontainers (#11206) * test: migrate mcrouter to test-containers (#11208) * test: migrate postgres to testcontainers (#11209) * chore: Bump apcupds and remove the replacement as issue is fixed upstream. (#11239) * test: migrate elasticsearch to testcontainers (#11207) * feat(inputs.sqlserver): Update query store and latch performance counters (#11216) * fix: update moby/ipvs dependency from v1.0.1 to v1.0.2 (#11242) * test: remove docker-compose.yml test file (#11243) * docs: update test-container docs (#11244) * fix: re-add event to splunk serializer (#11237) * chore: upgrade windows circleci size (#11249) * chore: rename circleci executor (#11247) * chore: update go from v1.18.1 to v1.18.3 (#11248) * test: harden running of testcontainer integration tests (#11245) * test: add circle ci integration testing on PRs (#11246) * test: remove restore integration test cache (#11255) * feat(intel_powerstat): add uncore frequency metrics (#11254) * docs: add missing word <3 (#11262) * fix: update modernc.org/sqlite from v1.10.8 to v1.17.3 (#11260) * chore: Fix readme linter errors for processor, aggregator, and parser plugins (#10960) * fix: bump github.com/tidwall/gjson from 1.10.2 to 1.14.1 (#11264) * Bump github.com/tidwall/gjson from v1.10.2 to v1.14.1 * Fix node references which are switched to parent relative now. * fix: update github.com/containerd/containerd from v1.5.11 to v1.5.13 (#11266) * chore: Fix readme linter errors for input plugins A-D (#10964) * chore: Fix readme linter errors for input plugins E-L (#11214) * fix: remove full access permissions (#11261) * fix: add missing build constraints for sqlite (#11272) * feat: Migrate xpath parser to new style (#11218) * fix: Always build README-embedder for host-architecture (#11253) * chore: Fix readme linter errors for input plugins M-Z (#11274) * fix(inputs/directory_monitor): Add support for multiline file parsing (#11234) * test: add install go for linux, use in integration tests (#11281) * Update build version to 1.24.0 * Update changelog for v1.23.0 (cherry picked from commit 7317a819f0b6d04b0f5b18bfa8f463536384fb1c) * fix: Don't rebase on master when building packages (#11291) * fix: Remove all rebase logic from CI (#11293) * fix: bump cloud.google.com/go/monitoring from 1.2.0 to 1.5.0 (#11295) * feat(inputs.x509_cert): add smtp protocol (#11271) Co-authored-by: dreiekk * fix(parsers/xpath): Reduce debug messages when empty selection is allowed (#11302) * feat: add default appType as config option to groundwork output (#11300) * feat: Make the command "config" a subcommand (#11282) * feat: make the command "config" a subcommand * fix: backwards compatible support telegraf *filters* config * fix: Prevent concurrent map writes to c.UnusedFields (#11311) * docs: explain directly connecting to mongo node (#11314) * chore: Remove prefix to use the default `chore(deps)` (#11315) * test: update wait for statement for postgres (#11309) * test: mark integration tests correctly (#11317) * chore(deps): Bump github.com/aws/aws-sdk-go-v2/credentials from 1.12.2 to 1.12.5 (#11297) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore(deps): Bump google.golang.org/grpc from 1.46.2 to 1.47.0 (#11318) * chore(deps): Bump k8s.io/client-go from 0.23.3 to 0.24.1 (#11223) * chore(deps): Bump github.com/go-logfmt/logfmt from 0.5.0 to 0.5.1 (#11299) * chore: make apt-get instructions consistent and add GPG fingerprint (#11326) * test: add coveralls coverage to master (#11256) * chore(deps): Bump github.com/aws/aws-sdk-go-v2/service/dynamodb (#11328) * chore(deps): Bump go.mongodb.org/mongo-driver from 1.9.0 to 1.9.1 (#11320) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore(deps): Bump github.com/gophercloud/gophercloud from 0.24.0 to 0.25.0 (#11321) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore(deps): Bump google.golang.org/api from 0.74.0 to 0.84.0 (#11338) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: MyaLongmire * chore: fix typo (recieve -> receive) (#11341) * chore(deps): Bump github.com/fatih/color from 1.10.0 to 1.13.0 (#11340) * fix: filter out views in mongodb lookup (#11280) * chore(deps): Bump github.com/aws/aws-sdk-go-v2/service/timestreamwrite from 1.3.2 to 1.13.6 (#11322) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * docs: Update etc/telegraf.conf and etc/telegraf_windows.conf (#11344) Co-authored-by: Tiger Bot <> * feat(redis): add Redis 6 ACL auth support (#9333) * feat(x509_cert): add proxy support (#9319) * chore(deps): Bump github.com/Shopify/sarama from 1.32.0 to 1.34.1 (#11319) * docs: Sync sample.conf for recent features (#11348) * chore: move agent config into seperate file (#11337) * feat: Migrate json parser to new style (#11226) * docs: Update etc/telegraf.conf and etc/telegraf_windows.conf (#11351) Co-authored-by: Tiger Bot <> Co-authored-by: Sebastian Spaink * fix: don't require listeners to be present in overview (#9315) * feat: Add CSV serializer (#11307) * fix: Restore sample configurations broken during initial migration (#11276) * feat: Migrate json_v2 parser to new style (#11343) * docs: Update etc/telegraf.conf and etc/telegraf_windows.conf (#11365) Co-authored-by: Tiger Bot <> * chore(deps): Bump github.com/dynatrace-oss/dynatrace-metric-utils-go from 0.3.0 to 0.5.0 (#11342) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore(deps): Bump github.com/nats-io/nats.go from 1.15.0 to 1.16.0 (#11339) * chore(deps): Bump cloud.google.com/go/pubsub from 1.18.0 to 1.22.2 (#11349) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * docs: swap bytes sent/recv descriptions in nfsclient (#11376) * chore(deps): Bump go.opentelemetry.io/collector/pdata from 0.52.0 to 0.54.0 (#11369) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore(deps): Bump github.com/jackc/pgx/v4 from 4.15.0 to 4.16.1 (#11346) * chore(deps): Bump cloud.google.com/go/bigquery from 1.8.0 to 1.33.0 (#11379) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore(deps): Bump github.com/Azure/azure-kusto-go from 0.6.0 to 0.7.0 (#11378) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * feat: migrate wavefront parser to new style (#11374) * feat: allow collecting node-level metrics for Couchbase buckets (#9717) * feat: Migrate collectd parser to new style (#11367) * chore(deps): Bump cloud.google.com/go/pubsub from 1.22.2 to 1.23.0 (#11394) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore(deps): Bump github.com/aws/aws-sdk-go-v2/service/kinesis (#11380) * chore: Remove 'github.com/satori/go.uuid' replacement (#11240) * chore: Remove 'github.com/cisco-ie/nx-telemetry-proto' replacement (#11401) * chore(deps): Bump github.com/aws/aws-sdk-go-v2/service/ec2 from 1.1.0 to 1.46.0 (#11382) * test: add nightly package testing to amd64/linux (#11377) * fix: resolve jolokia2 panic on null response (#11397) * fix: Sync back sample.confs for inputs.couchbase and outputs.groundwork. (#11413) * docs: Update etc/telegraf.conf and etc/telegraf_windows.conf (#11414) Co-authored-by: Tiger Bot <> * chore(deps): Bump github.com/golang-jwt/jwt/v4 from 4.4.1 to 4.4.2 (#11395) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * chore(deps): Bump github.com/vmware/govmomi from 0.27.3 to 0.28.0 (#11396) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sven Rebhan <36194019+srebhan@users.noreply.github.com> * chore: move printing of sample config file out of config.go (#11409) * feat: migrate form_urlencoded parser to new style (#11381) * feat(outputs/wavefront): make maximum http batch size configurable (#11201) * docs: Update etc/telegraf.conf and etc/telegraf_windows.conf (#11419) Co-authored-by: Tiger Bot <> * feat: migrate value parser to new style (#11407) * feat: Migrate graphite parser to new style (#11405) * fix: Bring back old xpath section names (#11335) * feat: Migrate logfmt parser to new style (#11366) * test: exchange confused parameters (expected vs actual) (#11422) * chore(deps): Bump github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs from 1.15.4 to 1.15.8 (#11415) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * feat: Migrate dropwizard parser to new style (#11371) * feat: migrate grok to new parser style (#11408) * chore(deps): Bump github.com/influxdata/influxdb-observability/otel2influx from 0.2.21 to 0.2.22 (#11416) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * test: run check-update on dnf/yum (#11430) * feat: adding aws metric streams input plugin (#11233) * docs: Update etc/telegraf.conf and etc/telegraf_windows.conf (#11431) Co-authored-by: Tiger Bot <> * Remove added modules * Add again missing modules * Fix README for compliance * Reduce lines length * Fix trailing spaces and indentation * Fix code warnings * Update go sum * Update licenses for dependencies * Fix race in test Co-authored-by: Jacob Marble Co-authored-by: Adam Zwakenberg Co-authored-by: Sebastian Spaink <3441183+sspaink@users.noreply.github.com> Co-authored-by: reimda Co-authored-by: Joshua Powers Co-authored-by: Marc <980978+MarcHagen@users.noreply.github.com> Co-authored-by: Alexander Kapshuna Co-authored-by: Adam Rowan <92474478+bear359@users.noreply.github.com> Co-authored-by: Dmitry Lavrukhin <34265546+lawdt@users.noreply.github.com> Co-authored-by: bewing Co-authored-by: Thomas Casteleyn Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sebastian Spaink Co-authored-by: Tyson Kamp Co-authored-by: RaviKiran K Co-authored-by: Jimmy Rimmer <74691101+jrimmer-housecallpro@users.noreply.github.com> Co-authored-by: M.D Co-authored-by: Eng Zer Jun Co-authored-by: Peter (Stig) Edwards Co-authored-by: Farukh Ali Co-authored-by: ffaroo1 Co-authored-by: MyaLongmire Co-authored-by: 6monlambert <32525162+6monlambert@users.noreply.github.com> Co-authored-by: Simon LAMBERT Co-authored-by: Nathan Ferch Co-authored-by: Sergey Lanzman Co-authored-by: lambdaq Co-authored-by: sammcadams-8451 <44906845+sammcadams-8451@users.noreply.github.com> Co-authored-by: Felix Edelmann Co-authored-by: Sven Rebhan <36194019+srebhan@users.noreply.github.com> Co-authored-by: Kuldeep Doneriya <101704094+dkuldeep22@users.noreply.github.com> Co-authored-by: Sakerdotes Co-authored-by: Morten Urban Co-authored-by: Nobuhiro MIKI Co-authored-by: Sokolov Yura Co-authored-by: Holger Co-authored-by: bkotlowski Co-authored-by: Martin Molnar Co-authored-by: Heiko Schlittermann Co-authored-by: crflanigan <69858641+crflanigan@users.noreply.github.com> Co-authored-by: zhiyuan-mojie <351843010@qq.com> Co-authored-by: Ruoshan Huang Co-authored-by: TimurDela <32736336+TimurDela@users.noreply.github.com> Co-authored-by: glennlod <81913666+glennlod@users.noreply.github.com> Co-authored-by: David Barbarin <68589619+dba-leshop@users.noreply.github.com> Co-authored-by: Jan-Gerd Tenberge <97132060+ns-jtenberge@users.noreply.github.com> Co-authored-by: skillor Co-authored-by: dreiekk Co-authored-by: Vladislav Senkevich Co-authored-by: Jamie Strandboge Co-authored-by: Julien Pivotto Co-authored-by: telegraf-tiger[bot] <76974415+telegraf-tiger[bot]@users.noreply.github.com> Co-authored-by: Alexander Krantz Co-authored-by: papapiya <402561078@qq.com> Co-authored-by: Luke Winikates <521457+LukeWinikates@users.noreply.github.com> Co-authored-by: Cillian McCabe --- .circleci/config.yml | 829 +- .gitattributes | 1 + .github/ISSUE_TEMPLATE/BUG_REPORT.yml | 74 + .github/ISSUE_TEMPLATE/Bug_report.md | 46 - .github/PULL_REQUEST_TEMPLATE.md | 27 +- .github/dependabot.yml | 9 + .github/workflows/golangci-lint.yml | 36 + .github/workflows/linter.yml | 59 + .github/workflows/readme-linter.yml | 23 + .github/workflows/semantic.yml | 15 + .gitignore | 8 + .golangci.yml | 135 + .markdownlint.yml | 6 + CHANGELOG.md | 1644 +- CONTRIBUTING.md | 96 +- EXTERNAL_PLUGINS.md | 39 +- Makefile | 472 +- README.md | 488 +- SECURITY.md | 6 + accumulator.go | 2 +- agent/accumulator.go | 7 +- agent/agent.go | 188 +- agent/agent_posix.go | 1 + agent/agent_windows.go | 1 + agent/tick.go | 83 +- agent/tick_test.go | 228 +- appveyor.yml | 35 - assets/GopherAndTiger.png | Bin 0 -> 74224 bytes assets/TelegrafTiger.png | Bin 0 -> 7748 bytes assets/windows/icon.icns | Bin 0 -> 508472 bytes assets/windows/tiger.ico | Bin 0 -> 17598 bytes build_version.txt | 2 +- cmd/telegraf/README.md | 1 + cmd/telegraf/telegraf.go | 308 +- cmd/telegraf/telegraf_posix.go | 5 +- cmd/telegraf/telegraf_windows.go | 57 +- config/README.md | 1 + config/aws/credentials.go | 95 +- config/config.go | 2286 +- config/config_test.go | 690 +- config/deprecation.go | 330 + config/printer/agent.conf | 90 + config/printer/printer.go | 381 + config/testdata/addressbook.proto | 28 + config/testdata/azure_monitor.toml | 4 + config/testdata/parsers_new.toml | 60 + config/testdata/parsers_old.toml | 60 + config/testdata/single_plugin_env_vars.toml | 15 + config/testdata/special_types.key | 5 + config/testdata/special_types.pem | 11 + config/testdata/special_types.toml | 11 +- config/testdata/telegraf-agent.toml | 8 - config/testdata/wrong_cert_path.toml | 5 + config/types.go | 46 +- config/types_test.go | 59 + docker-compose.yml | 105 - docs/AGGREGATORS.md | 156 +- docs/AGGREGATORS_AND_PROCESSORS.md | 14 +- docs/COMMANDS_AND_FLAGS.md | 68 + docs/CONFIGURATION.md | 113 +- docs/DATA_FORMATS_INPUT.md | 9 +- docs/DATA_FORMATS_OUTPUT.md | 8 +- docs/DOCKER.md | 3 + docs/EXTERNAL_PLUGINS.md | 84 +- docs/FAQ.md | 42 +- docs/INPUTS.md | 52 +- docs/INTEGRATION_TESTS.md | 166 + docs/LICENSE_OF_DEPENDENCIES.md | 206 +- docs/NIGHTLIES.md | 32 + docs/OUTPUTS.md | 41 +- docs/PROCESSORS.md | 114 +- docs/PROFILING.md | 3 +- docs/README.md | 3 + docs/SQL_DRIVERS_INPUT.md | 51 + docs/SUPPORTED_PLATFORMS.md | 199 + docs/TEMPLATE_PATTERN.md | 24 +- docs/TLS.md | 47 +- docs/WINDOWS_SERVICE.md | 18 +- docs/developers/CODE_STYLE.md | 8 + docs/developers/DEPRECATION.md | 88 + docs/developers/LOGGING.md | 79 + docs/developers/METRIC_FORMAT_CHANGES.md | 49 + docs/developers/PACKAGING.md | 66 + docs/developers/PROFILING.md | 66 + docs/developers/README.md | 1 + docs/developers/REVIEWS.md | 174 + docs/developers/SAMPLE_CONFIG.md | 81 + docs/maintainers/CHANGELOG.md | 43 + docs/maintainers/LABELS.md | 69 + docs/maintainers/PULL_REQUESTS.md | 72 + docs/maintainers/RELEASES.md | 107 + etc/telegraf.conf | 4336 +- etc/telegraf_windows.conf | 10009 ++++- filter/filter.go | 24 +- go.mod | 523 +- go.sum | 3300 +- info.plist | 16 + internal/choice/choice.go | 4 +- internal/content_coding.go | 63 +- internal/content_coding_test.go | 19 +- internal/exec_unix.go | 3 +- internal/exec_windows.go | 3 +- internal/globpath/globpath.go | 47 +- internal/globpath/globpath_test.go | 105 +- internal/globpath/testdata/log[!.log | 0 internal/goplugin/noplugin.go | 3 +- internal/goplugin/plugin.go | 1 + internal/http.go | 22 +- internal/internal.go | 146 +- internal/internal_test.go | 252 +- internal/process/process.go | 16 +- internal/process/process_posix.go | 1 + internal/process/process_test.go | 13 +- internal/process/process_windows.go | 1 + internal/rotate/file_writer.go | 11 +- internal/rotate/file_writer_test.go | 70 +- internal/snmp/config.go | 12 +- .../loadMibsFromPath/linkTarget/emptyFile | 0 .../loadMibsFromPath/root/dirOne/dirTwo/empty | 0 .../testdata/loadMibsFromPath/root/symlink | 1 + internal/snmp/testdata/mibs/testmib | 22 + internal/snmp/translate.go | 280 + internal/snmp/translate_test.go | 153 + internal/snmp/translator.go | 5 + internal/snmp/wrapper.go | 83 +- internal/templating/template.go | 2 - internal/type_conversions.go | 200 + internal/usage.go | 17 +- internal/usage_windows.go | 10 +- logger/event_logger.go | 34 +- logger/event_logger_test.go | 20 +- logger/logger.go | 85 +- logger/logger_test.go | 81 +- metric.go | 14 +- metric/metric.go | 41 +- metric/metric_test.go | 34 +- metric/series_grouper.go | 87 +- metric/series_grouper_test.go | 37 + metric/tracking.go | 3 +- metric/tracking_test.go | 20 +- models/buffer.go | 22 +- models/buffer_test.go | 5 +- models/filter.go | 26 +- models/filter_test.go | 24 +- models/log.go | 39 +- models/makemetric.go | 2 +- models/running_aggregator.go | 8 +- models/running_input.go | 1 + models/running_input_test.go | 48 +- models/running_output.go | 88 +- models/running_output_test.go | 53 +- models/running_parsers.go | 97 + models/running_processor.go | 34 +- parser.go | 39 + plugin.go | 41 +- plugins/aggregators/all/all.go | 4 + plugins/aggregators/basicstats/README.md | 50 +- plugins/aggregators/basicstats/basicstats.go | 136 +- .../aggregators/basicstats/basicstats_test.go | 119 +- plugins/aggregators/basicstats/sample.conf | 11 + plugins/aggregators/deprecations.go | 6 + plugins/aggregators/derivative/README.md | 217 + plugins/aggregators/derivative/derivative.go | 184 + .../aggregators/derivative/derivative_test.go | 416 + plugins/aggregators/derivative/sample.conf | 16 + plugins/aggregators/final/README.md | 14 +- plugins/aggregators/final/final.go | 29 +- plugins/aggregators/final/final_test.go | 24 +- plugins/aggregators/final/sample.conf | 10 + plugins/aggregators/histogram/README.md | 74 +- plugins/aggregators/histogram/histogram.go | 79 +- .../aggregators/histogram/histogram_test.go | 133 +- plugins/aggregators/histogram/sample.conf | 40 + plugins/aggregators/merge/README.md | 9 +- plugins/aggregators/merge/merge.go | 36 +- plugins/aggregators/merge/merge_test.go | 73 +- plugins/aggregators/merge/sample.conf | 5 + plugins/aggregators/minmax/README.md | 16 +- plugins/aggregators/minmax/minmax.go | 22 +- plugins/aggregators/minmax/minmax_test.go | 4 +- plugins/aggregators/minmax/sample.conf | 8 + plugins/aggregators/quantile/README.md | 136 + plugins/aggregators/quantile/algorithms.go | 108 + plugins/aggregators/quantile/quantile.go | 140 + plugins/aggregators/quantile/quantile_test.go | 635 + plugins/aggregators/quantile/sample.conf | 26 + plugins/aggregators/starlark/README.md | 119 + plugins/aggregators/starlark/sample.conf | 29 + plugins/aggregators/starlark/starlark.go | 115 + plugins/aggregators/starlark/starlark_test.go | 432 + .../aggregators/starlark/testdata/merge.star | 31 + .../starlark/testdata/min_max.star | 53 + plugins/aggregators/valuecounter/README.md | 23 +- plugins/aggregators/valuecounter/sample.conf | 10 + .../aggregators/valuecounter/valuecounter.go | 25 +- .../valuecounter/valuecounter_test.go | 4 +- plugins/common/auth/basic_auth.go | 23 + plugins/common/auth/basic_auth_test.go | 33 + plugins/common/cookie/cookie.go | 121 + plugins/common/cookie/cookie_test.go | 271 + plugins/common/encoding/decoder_reader.go | 2 +- plugins/common/encoding/decoder_test.go | 4 +- plugins/common/http/config.go | 70 + plugins/common/kafka/config.go | 95 + plugins/common/kafka/sasl.go | 1 - plugins/common/kafka/scram_client.go | 3 +- plugins/common/logrus/hook.go | 8 +- plugins/common/oauth/config.go | 32 + .../parallel/ordered.go | 0 .../parallel/parallel.go | 0 .../parallel/parallel_test.go | 12 +- .../parallel/unordered.go | 0 plugins/common/proxy/connect.go | 140 + plugins/common/proxy/dialer.go | 37 + plugins/common/proxy/proxy.go | 57 + plugins/common/proxy/socks5.go | 22 + plugins/common/proxy/socks5_test.go | 70 + plugins/common/shim/README.md | 5 +- plugins/common/shim/config.go | 13 +- plugins/common/shim/config_test.go | 23 +- plugins/common/shim/example/cmd/main.go | 16 +- plugins/common/shim/goshim.go | 11 +- plugins/common/shim/goshim_test.go | 18 +- plugins/common/shim/input_test.go | 18 +- plugins/common/shim/logger.go | 11 +- plugins/common/shim/output_test.go | 2 +- plugins/common/shim/processor.go | 26 +- plugins/common/shim/processor_test.go | 47 +- plugins/common/shim/testdata/special.conf | 3 +- .../starlark/builtins.go | 82 +- .../starlark/field_dict.go | 110 +- plugins/common/starlark/logging.go | 47 + .../{processors => common}/starlark/metric.go | 21 +- plugins/common/starlark/starlark.go | 182 + .../starlark/tag_dict.go | 60 +- plugins/common/tls/config.go | 72 +- plugins/common/tls/config_test.go | 85 +- plugins/common/tls/utils.go | 6 +- plugins/inputs/activemq/README.md | 20 +- plugins/inputs/activemq/activemq.go | 83 +- plugins/inputs/activemq/activemq_test.go | 24 +- plugins/inputs/activemq/sample.conf | 26 + plugins/inputs/aerospike/README.md | 124 +- plugins/inputs/aerospike/aerospike.go | 255 +- plugins/inputs/aerospike/aerospike_test.go | 303 +- plugins/inputs/aerospike/sample.conf | 41 + plugins/inputs/aliyuncms/README.md | 158 + plugins/inputs/aliyuncms/aliyuncms.go | 506 + plugins/inputs/aliyuncms/aliyuncms_test.go | 517 + plugins/inputs/aliyuncms/discovery.go | 464 + plugins/inputs/aliyuncms/sample.conf | 100 + plugins/inputs/all/all.go | 31 + plugins/inputs/amd_rocm_smi/README.md | 74 + plugins/inputs/amd_rocm_smi/amd_rocm_smi.go | 286 + .../inputs/amd_rocm_smi/amd_rocm_smi_test.go | 90 + plugins/inputs/amd_rocm_smi/sample.conf | 7 + .../amd_rocm_smi/testdata/vega-10-XT.json | 77 + .../testdata/vega-20-WKS-GL-XE.json | 165 + plugins/inputs/amqp_consumer/README.md | 25 +- plugins/inputs/amqp_consumer/amqp_consumer.go | 167 +- plugins/inputs/amqp_consumer/sample.conf | 74 + plugins/inputs/apache/README.md | 30 +- plugins/inputs/apache/apache.go | 104 +- plugins/inputs/apache/apache_test.go | 3 +- plugins/inputs/apache/sample.conf | 20 + plugins/inputs/apcupsd/README.md | 15 +- plugins/inputs/apcupsd/apcupsd.go | 127 +- plugins/inputs/apcupsd/apcupsd_test.go | 65 +- plugins/inputs/apcupsd/sample.conf | 8 + plugins/inputs/aurora/README.md | 27 +- plugins/inputs/aurora/aurora.go | 59 +- plugins/inputs/aurora/aurora_test.go | 24 +- plugins/inputs/aurora/sample.conf | 24 + plugins/inputs/azure_storage_queue/README.md | 19 +- .../azure_storage_queue.go | 24 +- .../inputs/azure_storage_queue/sample.conf | 10 + plugins/inputs/bcache/README.md | 37 +- plugins/inputs/bcache/bcache.go | 46 +- plugins/inputs/bcache/bcache_test.go | 66 +- plugins/inputs/bcache/bcache_windows.go | 4 + plugins/inputs/bcache/sample.conf | 10 + plugins/inputs/beanstalkd/README.md | 21 +- plugins/inputs/beanstalkd/beanstalkd.go | 37 +- plugins/inputs/beanstalkd/beanstalkd_test.go | 44 +- plugins/inputs/beanstalkd/sample.conf | 8 + plugins/inputs/beat/README.md | 151 + plugins/inputs/beat/beat.go | 199 + plugins/inputs/beat/beat6_info.json | 7 + plugins/inputs/beat/beat6_stats.json | 137 + plugins/inputs/beat/beat_test.go | 203 + plugins/inputs/beat/sample.conf | 33 + plugins/inputs/bind/README.md | 77 +- plugins/inputs/bind/bind.go | 40 +- plugins/inputs/bind/bind_test.go | 33 +- plugins/inputs/bind/json_stats.go | 50 +- plugins/inputs/bind/sample.conf | 10 + plugins/inputs/bind/xml_stats_v2.go | 16 +- plugins/inputs/bind/xml_stats_v3.go | 46 +- plugins/inputs/bond/README.md | 73 +- plugins/inputs/bond/bond.go | 198 +- plugins/inputs/bond/bond_test.go | 94 +- plugins/inputs/bond/sample.conf | 18 + plugins/inputs/burrow/README.md | 61 +- plugins/inputs/burrow/burrow.go | 86 +- plugins/inputs/burrow/burrow_test.go | 32 +- plugins/inputs/burrow/sample.conf | 41 + plugins/inputs/cassandra/README.md | 89 +- plugins/inputs/cassandra/cassandra.go | 128 +- plugins/inputs/cassandra/cassandra_test.go | 52 +- plugins/inputs/cassandra/sample.conf | 15 + plugins/inputs/ceph/README.md | 81 +- plugins/inputs/ceph/ceph.go | 165 +- plugins/inputs/ceph/ceph_test.go | 121 +- plugins/inputs/ceph/sample.conf | 42 + plugins/inputs/cgroup/README.md | 42 +- plugins/inputs/cgroup/cgroup.go | 28 +- plugins/inputs/cgroup/cgroup_linux.go | 28 +- plugins/inputs/cgroup/cgroup_notlinux.go | 1 + plugins/inputs/cgroup/cgroup_test.go | 153 + plugins/inputs/cgroup/sample.conf | 14 + plugins/inputs/chrony/README.md | 128 +- plugins/inputs/chrony/chrony.go | 17 +- plugins/inputs/chrony/chrony_test.go | 11 +- plugins/inputs/chrony/sample.conf | 4 + plugins/inputs/cisco_telemetry_mdt/README.md | 86 +- .../cisco_telemetry_mdt.go | 304 +- .../cisco_telemetry_mdt_test.go | 456 +- .../cisco_telemetry_util.go | 876 + .../inputs/cisco_telemetry_mdt/sample.conf | 40 + plugins/inputs/clickhouse/README.md | 52 +- plugins/inputs/clickhouse/clickhouse.go | 172 +- plugins/inputs/clickhouse/clickhouse_test.go | 121 +- .../inputs/clickhouse/dev/docker-compose.yml | 3 + plugins/inputs/clickhouse/dev/init_schema.sql | 6 + plugins/inputs/clickhouse/dev/mysql_port.xml | 3 + .../inputs/clickhouse/dev/test_dictionary.xml | 43 +- plugins/inputs/clickhouse/sample.conf | 65 + plugins/inputs/cloud_pubsub/README.md | 17 +- .../{pubsub.go => cloud_pubsub.go} | 115 +- .../{pubsub_test.go => cloud_pubsub_test.go} | 53 +- plugins/inputs/cloud_pubsub/sample.conf | 74 + .../inputs/cloud_pubsub/subscription_stub.go | 2 +- plugins/inputs/cloud_pubsub_push/README.md | 22 +- .../{pubsub_push.go => cloud_pubsub_push.go} | 117 +- ...push_test.go => cloud_pubsub_push_test.go} | 23 +- plugins/inputs/cloud_pubsub_push/sample.conf | 49 + plugins/inputs/cloudwatch/README.md | 94 +- plugins/inputs/cloudwatch/cloudwatch.go | 482 +- plugins/inputs/cloudwatch/cloudwatch_test.go | 236 +- plugins/inputs/cloudwatch/sample.conf | 96 + .../cloudwatch_metric_streams/README.md | 142 + .../cloudwatch_metric_streams.go | 434 + .../cloudwatch_metric_streams_test.go | 397 + .../cloudwatch_metric_streams/sample.conf | 31 + .../testdata/records.gz | Bin 0 -> 38037 bytes plugins/inputs/conntrack/README.md | 49 +- plugins/inputs/conntrack/conntrack.go | 33 +- .../inputs/conntrack/conntrack_notlinux.go | 1 + plugins/inputs/conntrack/conntrack_test.go | 42 +- plugins/inputs/conntrack/sample.conf | 14 + plugins/inputs/consul/README.md | 49 +- plugins/inputs/consul/consul.go | 56 +- plugins/inputs/consul/sample.conf | 37 + plugins/inputs/consul_agent/README.md | 38 + plugins/inputs/consul_agent/consul_agent.go | 177 + .../inputs/consul_agent/consul_agent_test.go | 97 + plugins/inputs/consul_agent/consul_structs.go | 32 + plugins/inputs/consul_agent/sample.conf | 19 + .../testdata/response_key_metrics.json | 42 + plugins/inputs/couchbase/README.md | 260 +- plugins/inputs/couchbase/couchbase.go | 437 +- plugins/inputs/couchbase/couchbase_data.go | 228 + plugins/inputs/couchbase/couchbase_test.go | 144 +- plugins/inputs/couchbase/sample.conf | 31 + plugins/inputs/couchdb/README.md | 21 +- plugins/inputs/couchdb/couchdb.go | 40 +- plugins/inputs/couchdb/sample.conf | 9 + plugins/inputs/cpu/README.md | 21 +- plugins/inputs/cpu/cpu.go | 100 +- plugins/inputs/cpu/cpu_test.go | 136 +- plugins/inputs/cpu/sample.conf | 12 + plugins/inputs/csgo/README.md | 40 + plugins/inputs/csgo/csgo.go | 184 + plugins/inputs/csgo/csgo_test.go | 55 + plugins/inputs/csgo/sample.conf | 10 + plugins/inputs/dcos/README.md | 47 +- plugins/inputs/dcos/client.go | 53 +- plugins/inputs/dcos/client_test.go | 5 +- plugins/inputs/dcos/creds.go | 12 +- plugins/inputs/dcos/dcos.go | 89 +- plugins/inputs/dcos/dcos_test.go | 1 - plugins/inputs/dcos/sample.conf | 42 + plugins/inputs/deprecations.go | 56 + plugins/inputs/directory_monitor/README.md | 71 + .../directory_monitor/directory_monitor.go | 410 + .../directory_monitor_test.go | 459 + plugins/inputs/directory_monitor/sample.conf | 47 + plugins/inputs/disk/README.md | 42 +- plugins/inputs/disk/disk.go | 65 +- plugins/inputs/disk/disk_test.go | 369 +- plugins/inputs/disk/sample.conf | 13 + .../disk/testdata/issue_10297/1/mountinfo | 2 + .../inputs/disk/testdata/success/1/mountinfo | 2 + plugins/inputs/diskio/README.md | 57 +- plugins/inputs/diskio/diskio.go | 110 +- plugins/inputs/diskio/diskio_linux.go | 51 +- plugins/inputs/diskio/diskio_linux_test.go | 73 +- plugins/inputs/diskio/diskio_other.go | 3 +- plugins/inputs/diskio/diskio_test.go | 3 +- plugins/inputs/diskio/sample.conf | 27 + plugins/inputs/disque/README.md | 14 +- plugins/inputs/disque/disque.go | 73 +- plugins/inputs/disque/disque_test.go | 20 +- plugins/inputs/disque/sample.conf | 7 + plugins/inputs/dmcache/README.md | 56 +- plugins/inputs/dmcache/dmcache.go | 18 +- plugins/inputs/dmcache/dmcache_linux.go | 3 +- plugins/inputs/dmcache/dmcache_linux_test.go | 1 + plugins/inputs/dmcache/dmcache_notlinux.go | 1 + plugins/inputs/dmcache/sample.conf | 4 + plugins/inputs/dns_query/README.md | 17 +- plugins/inputs/dns_query/dns_query.go | 52 +- plugins/inputs/dns_query/dns_query_test.go | 87 +- plugins/inputs/dns_query/sample.conf | 20 + plugins/inputs/docker/README.md | 121 +- plugins/inputs/docker/client.go | 22 +- plugins/inputs/docker/docker.go | 324 +- plugins/inputs/docker/docker_test.go | 436 +- plugins/inputs/docker/docker_testdata.go | 6 +- plugins/inputs/docker/sample.conf | 72 + plugins/inputs/docker/stats_helpers.go | 25 +- plugins/inputs/docker_log/README.md | 23 +- plugins/inputs/docker_log/docker_log.go | 113 +- plugins/inputs/docker_log/docker_log_test.go | 8 +- plugins/inputs/docker_log/sample.conf | 38 + plugins/inputs/dovecot/README.md | 72 +- plugins/inputs/dovecot/dovecot.go | 80 +- plugins/inputs/dovecot/dovecot_test.go | 55 +- plugins/inputs/dovecot/sample.conf | 18 + plugins/inputs/dpdk/README.md | 251 + plugins/inputs/dpdk/dpdk.go | 233 + plugins/inputs/dpdk/dpdk_connector.go | 163 + plugins/inputs/dpdk/dpdk_connector_test.go | 183 + plugins/inputs/dpdk/dpdk_notlinux.go | 4 + plugins/inputs/dpdk/dpdk_test.go | 388 + plugins/inputs/dpdk/dpdk_utils.go | 117 + plugins/inputs/dpdk/dpdk_utils_test.go | 138 + plugins/inputs/dpdk/mocks/conn.go | 146 + plugins/inputs/dpdk/sample.conf | 32 + plugins/inputs/ecs/README.md | 35 +- plugins/inputs/ecs/client.go | 6 +- plugins/inputs/ecs/client_test.go | 33 +- plugins/inputs/ecs/ecs.go | 64 +- plugins/inputs/ecs/ecs_test.go | 8 +- plugins/inputs/ecs/sample.conf | 26 + plugins/inputs/ecs/stats.go | 1 - plugins/inputs/elasticsearch/README.md | 61 +- plugins/inputs/elasticsearch/elasticsearch.go | 294 +- .../elasticsearch/elasticsearch_test.go | 157 +- plugins/inputs/elasticsearch/sample.conf | 59 + plugins/inputs/elasticsearch/testdata_test.go | 2002 + plugins/inputs/elasticsearch_query/README.md | 185 + .../elasticsearch_query/aggregation_parser.go | 153 + .../elasticsearch_query/aggregation_query.go | 217 + .../elasticsearch_query.go | 250 + .../elasticsearch_query_test.go | 760 + .../inputs/elasticsearch_query/sample.conf | 71 + .../elasticsearch_query/testdata/nginx_logs | 500 + plugins/inputs/ethtool/README.md | 32 +- plugins/inputs/ethtool/ethtool.go | 34 +- plugins/inputs/ethtool/ethtool_linux.go | 67 +- plugins/inputs/ethtool/ethtool_notlinux.go | 1 + plugins/inputs/ethtool/ethtool_test.go | 179 +- plugins/inputs/ethtool/sample.conf | 16 + plugins/inputs/eventhub_consumer/README.md | 29 +- .../eventhub_consumer/eventhub_consumer.go | 137 +- plugins/inputs/eventhub_consumer/sample.conf | 84 + .../{EXAMPLE_README.md => example/README.md} | 45 +- plugins/inputs/example/example.go | 115 + plugins/inputs/example/example_test.go | 439 + plugins/inputs/example/sample.conf | 3 + plugins/inputs/exec/README.md | 37 +- plugins/inputs/exec/exec.go | 110 +- plugins/inputs/exec/exec_test.go | 149 +- plugins/inputs/exec/sample.conf | 26 + plugins/inputs/execd/README.md | 33 +- plugins/inputs/execd/execd.go | 47 +- plugins/inputs/execd/execd_posix.go | 15 +- plugins/inputs/execd/execd_test.go | 44 +- plugins/inputs/execd/execd_windows.go | 1 + plugins/inputs/execd/sample.conf | 30 + plugins/inputs/execd/shim/goshim.go | 21 +- plugins/inputs/execd/shim/goshim_posix.go | 8 +- plugins/inputs/execd/shim/goshim_windows.go | 1 + plugins/inputs/execd/shim/shim_posix_test.go | 10 +- plugins/inputs/execd/shim/shim_test.go | 19 +- plugins/inputs/fail2ban/README.md | 23 +- plugins/inputs/fail2ban/fail2ban.go | 46 +- plugins/inputs/fail2ban/fail2ban_test.go | 23 +- plugins/inputs/fail2ban/sample.conf | 4 + plugins/inputs/fibaro/README.md | 16 +- plugins/inputs/fibaro/fibaro.go | 44 +- plugins/inputs/fibaro/fibaro_test.go | 6 +- plugins/inputs/fibaro/sample.conf | 12 + plugins/inputs/file/README.md | 28 +- plugins/inputs/file/file.go | 70 +- plugins/inputs/file/file_test.go | 213 +- plugins/inputs/file/sample.conf | 27 + plugins/inputs/filecount/README.md | 15 +- plugins/inputs/filecount/filecount.go | 83 +- plugins/inputs/filecount/filecount_test.go | 54 +- .../inputs/filecount/filesystem_helpers.go | 1 - .../filecount/filesystem_helpers_test.go | 21 +- plugins/inputs/filecount/sample.conf | 32 + plugins/inputs/filestat/README.md | 24 +- plugins/inputs/filestat/filestat.go | 54 +- plugins/inputs/filestat/filestat_test.go | 115 +- plugins/inputs/filestat/sample.conf | 9 + plugins/inputs/fireboard/README.md | 21 +- plugins/inputs/fireboard/fireboard.go | 48 +- plugins/inputs/fireboard/fireboard_test.go | 3 +- plugins/inputs/fireboard/sample.conf | 10 + plugins/inputs/fluentd/README.md | 57 +- plugins/inputs/fluentd/fluentd.go | 147 +- plugins/inputs/fluentd/fluentd_test.go | 121 +- plugins/inputs/fluentd/sample.conf | 14 + plugins/inputs/github/README.md | 38 +- plugins/inputs/github/github.go | 139 +- plugins/inputs/github/sample.conf | 24 + plugins/inputs/gnmi/README.md | 37 +- plugins/inputs/gnmi/gnmi.go | 371 +- plugins/inputs/gnmi/gnmi_test.go | 286 +- plugins/inputs/gnmi/sample.conf | 68 + plugins/inputs/graylog/README.md | 31 +- plugins/inputs/graylog/graylog.go | 100 +- plugins/inputs/graylog/graylog_test.go | 20 +- plugins/inputs/graylog/sample.conf | 38 + plugins/inputs/haproxy/README.md | 57 +- plugins/inputs/haproxy/haproxy.go | 97 +- plugins/inputs/haproxy/haproxy_test.go | 50 +- plugins/inputs/haproxy/sample.conf | 30 + plugins/inputs/hddtemp/README.md | 12 +- .../inputs/hddtemp/go-hddtemp/hddtemp_test.go | 53 +- plugins/inputs/hddtemp/hddtemp.go | 14 +- plugins/inputs/hddtemp/hddtemp_test.go | 16 +- plugins/inputs/hddtemp/sample.conf | 11 + plugins/inputs/http/README.md | 46 +- plugins/inputs/http/http.go | 127 +- plugins/inputs/http/http_test.go | 318 +- plugins/inputs/http/sample.conf | 66 + plugins/inputs/http_listener_v2/README.md | 46 +- .../http_listener_v2/http_listener_v2.go | 286 +- .../http_listener_v2/http_listener_v2_test.go | 218 +- plugins/inputs/http_listener_v2/sample.conf | 50 + plugins/inputs/http_response/README.md | 26 +- plugins/inputs/http_response/http_response.go | 180 +- .../http_response/http_response_test.go | 200 +- plugins/inputs/http_response/sample.conf | 70 + plugins/inputs/httpjson/README.md | 53 +- plugins/inputs/httpjson/httpjson.go | 110 +- plugins/inputs/httpjson/httpjson_test.go | 97 +- plugins/inputs/httpjson/sample.conf | 46 + plugins/inputs/hugepages/README.md | 68 + plugins/inputs/hugepages/hugepages.go | 281 + .../inputs/hugepages/hugepages_notlinux.go | 4 + plugins/inputs/hugepages/hugepages_test.go | 228 + plugins/inputs/hugepages/sample.conf | 7 + .../testdata/invalid/1/anode3/dir_lock | 0 .../free_hugepages/dir_lock | 0 .../hugepages-1048576kB/nry_hugepages | 1 + .../1/node0/hugepages/hugepages-2048kB | 0 .../hugepages-aaaa1048576kB/free_hugepages | 1 + .../hugepages1048576kB/free_hugepages | 1 + .../inputs/hugepages/testdata/invalid/1/node1 | 1 + .../testdata/invalid/1/node4b/dir_lock | 0 .../hugepages-1048576kB/nr_hugepages | 1 + .../inputs/hugepages/testdata/invalid/meminfo | 8 + .../inputs/hugepages/testdata/valid/meminfo | 51 + .../hugepages-1048576kB/free_hugepages | 1 + .../hugepages-1048576kB/nr_hugepages | 1 + .../nr_hugepages_mempolicy | 1 + .../nr_overcommit_hugepages | 1 + .../hugepages-1048576kB/resv_hugepages | 1 + .../hugepages-1048576kB/surplus_hugepages | 1 + .../hugepages/hugepages-2048kB/free_hugepages | 1 + .../hugepages/hugepages-2048kB/nr_hugepages | 1 + .../hugepages-2048kB/nr_hugepages_mempolicy | 1 + .../hugepages-2048kB/nr_overcommit_hugepages | 1 + .../hugepages/hugepages-2048kB/resv_hugepages | 1 + .../hugepages-2048kB/surplus_hugepages | 1 + .../hugepages-1048576kB/free_hugepages | 1 + .../hugepages-1048576kB/nr_hugepages | 1 + .../hugepages-1048576kB/surplus_hugepages | 1 + .../hugepages/hugepages-2048kB/free_hugepages | 1 + .../hugepages/hugepages-2048kB/nr_hugepages | 1 + .../hugepages-2048kB/surplus_hugepages | 1 + .../hugepages-1048576kB/free_hugepages | 1 + .../hugepages-1048576kB/nr_hugepages | 1 + .../hugepages-1048576kB/surplus_hugepages | 1 + .../hugepages/hugepages-2048kB/free_hugepages | 1 + .../hugepages/hugepages-2048kB/nr_hugepages | 1 + .../hugepages-2048kB/surplus_hugepages | 1 + plugins/inputs/icinga2/README.md | 38 +- plugins/inputs/icinga2/icinga2.go | 68 +- plugins/inputs/icinga2/icinga2_test.go | 5 +- plugins/inputs/icinga2/sample.conf | 21 + plugins/inputs/infiniband/README.md | 13 +- plugins/inputs/infiniband/infiniband.go | 20 +- plugins/inputs/infiniband/infiniband_linux.go | 14 +- .../inputs/infiniband/infiniband_notlinux.go | 1 + plugins/inputs/infiniband/infiniband_test.go | 6 +- plugins/inputs/infiniband/sample.conf | 3 + plugins/inputs/influxdb/README.md | 281 +- plugins/inputs/influxdb/influxdb.go | 51 +- plugins/inputs/influxdb/influxdb_test.go | 18 +- plugins/inputs/influxdb/sample.conf | 25 + plugins/inputs/influxdb_listener/README.md | 19 +- .../influxdb_listener/influxdb_listener.go | 391 +- .../influxdb_listener_benchmark_test.go | 6 +- .../influxdb_listener_test.go | 757 +- plugins/inputs/influxdb_listener/sample.conf | 47 + plugins/inputs/influxdb_v2_listener/README.md | 25 +- .../influxdb_v2_listener.go | 125 +- .../influxdb_v2_listener_benchmark_test.go | 6 +- .../influxdb_v2_listener_test.go | 319 +- .../inputs/influxdb_v2_listener/sample.conf | 32 + plugins/inputs/intel_pmu/README.md | 236 + plugins/inputs/intel_pmu/activators.go | 205 + plugins/inputs/intel_pmu/activators_test.go | 432 + plugins/inputs/intel_pmu/config.go | 239 + plugins/inputs/intel_pmu/config_test.go | 230 + plugins/inputs/intel_pmu/intel_pmu.go | 434 + .../intel_pmu/intel_pmu_notamd64linux.go | 4 + plugins/inputs/intel_pmu/intel_pmu_test.go | 555 + plugins/inputs/intel_pmu/mocks.go | 407 + plugins/inputs/intel_pmu/reader.go | 249 + plugins/inputs/intel_pmu/reader_test.go | 522 + plugins/inputs/intel_pmu/resolver.go | 150 + plugins/inputs/intel_pmu/resolver_test.go | 376 + plugins/inputs/intel_pmu/sample.conf | 47 + plugins/inputs/intel_powerstat/README.md | 300 + plugins/inputs/intel_powerstat/dto.go | 37 + plugins/inputs/intel_powerstat/file.go | 173 + .../inputs/intel_powerstat/file_mock_test.go | 147 + .../inputs/intel_powerstat/intel_powerstat.go | 812 + .../intel_powerstat_notlinux.go | 4 + .../intel_powerstat/intel_powerstat_test.go | 644 + plugins/inputs/intel_powerstat/msr.go | 312 + .../inputs/intel_powerstat/msr_mock_test.go | 132 + plugins/inputs/intel_powerstat/msr_test.go | 189 + plugins/inputs/intel_powerstat/rapl.go | 266 + .../inputs/intel_powerstat/rapl_mock_test.go | 81 + plugins/inputs/intel_powerstat/rapl_test.go | 116 + plugins/inputs/intel_powerstat/sample.conf | 16 + .../inputs/intel_powerstat/unit_converter.go | 50 + plugins/inputs/intel_rdt/README.md | 148 +- plugins/inputs/intel_rdt/intel_rdt.go | 132 +- plugins/inputs/intel_rdt/intel_rdt_test.go | 18 +- plugins/inputs/intel_rdt/intel_rdt_windows.go | 1 + plugins/inputs/intel_rdt/processes.go | 1 + plugins/inputs/intel_rdt/publisher.go | 104 +- plugins/inputs/intel_rdt/publisher_test.go | 183 +- plugins/inputs/intel_rdt/sample.conf | 28 + plugins/inputs/internal/README.md | 71 +- plugins/inputs/internal/internal.go | 17 +- plugins/inputs/internal/internal_test.go | 12 +- plugins/inputs/internal/sample.conf | 4 + plugins/inputs/internet_speed/README.md | 37 + .../inputs/internet_speed/internet_speed.go | 94 + .../internet_speed/internet_speed_test.go | 44 + plugins/inputs/internet_speed/sample.conf | 12 + plugins/inputs/interrupts/README.md | 19 +- plugins/inputs/interrupts/interrupts.go | 64 +- plugins/inputs/interrupts/interrupts_test.go | 12 +- plugins/inputs/interrupts/sample.conf | 13 + plugins/inputs/ipmi_sensor/README.md | 65 +- plugins/inputs/ipmi_sensor/connection.go | 36 +- plugins/inputs/ipmi_sensor/connection_test.go | 57 +- .../ipmi_sensor/{ipmi.go => ipmi_sensor.go} | 160 +- .../{ipmi_test.go => ipmi_sensor_test.go} | 174 +- plugins/inputs/ipmi_sensor/sample.conf | 43 + plugins/inputs/ipset/README.md | 27 +- plugins/inputs/ipset/ipset.go | 58 +- plugins/inputs/ipset/ipset_test.go | 8 +- plugins/inputs/ipset/sample.conf | 12 + plugins/inputs/iptables/README.md | 87 +- plugins/inputs/iptables/iptables.go | 36 +- plugins/inputs/iptables/iptables_nocompile.go | 1 + plugins/inputs/iptables/iptables_test.go | 1 + plugins/inputs/iptables/sample.conf | 18 + plugins/inputs/ipvs/README.md | 20 +- plugins/inputs/ipvs/ipvs.go | 20 +- plugins/inputs/ipvs/ipvs_notlinux.go | 1 + plugins/inputs/ipvs/sample.conf | 3 + plugins/inputs/jenkins/README.md | 42 +- plugins/inputs/jenkins/client.go | 14 +- plugins/inputs/jenkins/jenkins.go | 119 +- plugins/inputs/jenkins/jenkins_test.go | 75 +- plugins/inputs/jenkins/sample.conf | 47 + plugins/inputs/jolokia/README.md | 14 +- plugins/inputs/jolokia/jolokia.go | 141 +- plugins/inputs/jolokia/jolokia_test.go | 59 +- plugins/inputs/jolokia/sample.conf | 54 + plugins/inputs/jolokia2/README.md | 87 +- plugins/inputs/jolokia2/client_test.go | 91 +- .../inputs/jolokia2/{ => common}/client.go | 49 +- .../inputs/jolokia2/{ => common}/gatherer.go | 25 +- .../jolokia2/{ => common}/gatherer_test.go | 8 +- .../inputs/jolokia2/{ => common}/metric.go | 2 +- .../jolokia2/{ => common}/point_builder.go | 36 +- .../jolokia2/examples/kafka-connect.conf | 90 + plugins/inputs/jolokia2/examples/kafka.conf | 60 +- plugins/inputs/jolokia2/jolokia.go | 21 - plugins/inputs/jolokia2/jolokia2.go | 24 + .../inputs/jolokia2/jolokia2_agent/README.md | 31 + .../jolokia2/jolokia2_agent/jolokia2_agent.go | 97 + .../jolokia2/jolokia2_agent/sample.conf | 23 + .../inputs/jolokia2/jolokia2_proxy/README.md | 39 + .../jolokia2/jolokia2_proxy/jolokia_proxy.go | 98 + .../jolokia2/jolokia2_proxy/sample.conf | 31 + .../{jolokia_test.go => jolokia2_test.go} | 88 +- plugins/inputs/jolokia2/jolokia_agent.go | 115 - plugins/inputs/jolokia2/jolokia_proxy.go | 123 - .../inputs/jti_openconfig_telemetry/README.md | 14 +- .../auth/authentication_service.pb.go | 314 +- .../auth/authentication_service.proto | 1 + .../auth/authentication_service_grpc.pb.go | 101 + .../jti_openconfig_telemetry/collection.go | 2 +- .../inputs/jti_openconfig_telemetry/gen.go | 11 + ...lemetry.go => jti_openconfig_telemetry.go} | 139 +- ...st.go => jti_openconfig_telemetry_test.go} | 56 +- .../jti_openconfig_telemetry/oc/oc.pb.go | 2328 +- .../jti_openconfig_telemetry/oc/oc.proto | 1 + .../jti_openconfig_telemetry/oc/oc_grpc.pb.go | 293 + .../jti_openconfig_telemetry/sample.conf | 49 + plugins/inputs/kafka_consumer/README.md | 31 +- .../inputs/kafka_consumer/kafka_consumer.go | 207 +- .../kafka_consumer/kafka_consumer_test.go | 139 +- plugins/inputs/kafka_consumer/sample.conf | 99 + .../inputs/kafka_consumer_legacy/README.md | 17 +- .../kafka_consumer_legacy.go | 58 +- .../kafka_consumer_legacy_integration_test.go | 20 +- .../kafka_consumer_legacy_test.go | 48 +- .../inputs/kafka_consumer_legacy/sample.conf | 26 + plugins/inputs/kapacitor/README.md | 332 +- plugins/inputs/kapacitor/kapacitor.go | 45 +- plugins/inputs/kapacitor/kapacitor_test.go | 12 +- plugins/inputs/kapacitor/sample.conf | 17 + plugins/inputs/kernel/README.md | 33 +- plugins/inputs/kernel/kernel.go | 53 +- plugins/inputs/kernel/kernel_notlinux.go | 1 + plugins/inputs/kernel/kernel_test.go | 70 +- plugins/inputs/kernel/sample.conf | 3 + plugins/inputs/kernel_vmstat/README.md | 210 +- plugins/inputs/kernel_vmstat/kernel_vmstat.go | 23 +- .../kernel_vmstat/kernel_vmstat_notlinux.go | 1 + .../kernel_vmstat/kernel_vmstat_test.go | 50 +- plugins/inputs/kernel_vmstat/sample.conf | 3 + plugins/inputs/kibana/README.md | 34 +- plugins/inputs/kibana/kibana.go | 61 +- plugins/inputs/kibana/kibana_test.go | 4 +- plugins/inputs/kibana/sample.conf | 18 + .../basic_kibana_telegraf.conf | 75 + .../test_environment/docker-compose.yml | 48 + .../kibana/test_environment/run_test_env.sh | 3 + plugins/inputs/kibana/testdata_test6_5.go | 1 + plugins/inputs/kinesis_consumer/README.md | 53 +- .../kinesis_consumer/kinesis_consumer.go | 202 +- .../kinesis_consumer/kinesis_consumer_test.go | 211 + plugins/inputs/kinesis_consumer/sample.conf | 66 + plugins/inputs/knx_listener/README.md | 65 + .../knx_listener/knx_dummy_interface.go | 28 + plugins/inputs/knx_listener/knx_listener.go | 182 + .../inputs/knx_listener/knx_listener_test.go | 188 + plugins/inputs/knx_listener/sample.conf | 22 + plugins/inputs/kube_inventory/README.md | 93 +- plugins/inputs/kube_inventory/client.go | 91 +- plugins/inputs/kube_inventory/client_test.go | 17 +- plugins/inputs/kube_inventory/daemonset.go | 39 +- .../inputs/kube_inventory/daemonset_test.go | 159 +- plugins/inputs/kube_inventory/deployment.go | 24 +- .../inputs/kube_inventory/deployment_test.go | 157 +- plugins/inputs/kube_inventory/endpoint.go | 64 +- .../inputs/kube_inventory/endpoint_test.go | 271 +- plugins/inputs/kube_inventory/ingress.go | 51 +- plugins/inputs/kube_inventory/ingress_test.go | 237 +- .../{kube_state.go => kube_inventory.go} | 98 +- plugins/inputs/kube_inventory/node.go | 27 +- plugins/inputs/kube_inventory/node_test.go | 175 +- .../inputs/kube_inventory/persistentvolume.go | 19 +- .../kube_inventory/persistentvolume_test.go | 88 +- .../kube_inventory/persistentvolumeclaim.go | 31 +- .../persistentvolumeclaim_test.go | 225 +- plugins/inputs/kube_inventory/pod.go | 72 +- plugins/inputs/kube_inventory/pod_test.go | 694 +- plugins/inputs/kube_inventory/sample.conf | 48 + plugins/inputs/kube_inventory/service.go | 45 +- plugins/inputs/kube_inventory/service_test.go | 169 +- plugins/inputs/kube_inventory/statefulset.go | 42 +- .../inputs/kube_inventory/statefulset_test.go | 261 +- plugins/inputs/kubernetes/README.md | 37 +- plugins/inputs/kubernetes/kubernetes.go | 109 +- plugins/inputs/kubernetes/kubernetes_pods.go | 2 +- plugins/inputs/kubernetes/kubernetes_test.go | 12 +- plugins/inputs/kubernetes/sample.conf | 26 + plugins/inputs/lanz/README.md | 47 +- plugins/inputs/lanz/lanz.go | 42 +- plugins/inputs/lanz/lanz_test.go | 15 +- plugins/inputs/lanz/sample.conf | 7 + plugins/inputs/leofs/README.md | 147 +- plugins/inputs/leofs/leofs.go | 27 +- plugins/inputs/leofs/leofs_test.go | 65 +- plugins/inputs/leofs/sample.conf | 5 + plugins/inputs/linux_sysctl_fs/README.md | 14 +- .../inputs/linux_sysctl_fs/linux_sysctl_fs.go | 54 +- .../linux_sysctl_fs/linux_sysctl_fs_test.go | 17 +- plugins/inputs/linux_sysctl_fs/sample.conf | 3 + plugins/inputs/logparser/README.md | 28 +- plugins/inputs/logparser/logparser.go | 98 +- plugins/inputs/logparser/logparser_solaris.go | 1 + plugins/inputs/logparser/logparser_test.go | 100 +- plugins/inputs/logparser/sample.conf | 52 + plugins/inputs/logstash/README.md | 26 +- plugins/inputs/logstash/logstash.go | 213 +- plugins/inputs/logstash/logstash_test.go | 262 +- plugins/inputs/logstash/sample.conf | 31 + plugins/inputs/logstash/samples_logstash7.go | 140 + plugins/inputs/lustre2/README.md | 61 +- plugins/inputs/lustre2/lustre2.go | 150 +- plugins/inputs/lustre2/lustre2_test.go | 129 +- plugins/inputs/lustre2/lustre2_windows.go | 4 + plugins/inputs/lustre2/sample.conf | 16 + plugins/inputs/lvm/README.md | 79 + plugins/inputs/lvm/lvm.go | 290 + plugins/inputs/lvm/lvm_test.go | 211 + plugins/inputs/lvm/sample.conf | 4 + plugins/inputs/mailchimp/README.md | 16 +- plugins/inputs/mailchimp/chimp_api.go | 53 +- plugins/inputs/mailchimp/mailchimp.go | 42 +- plugins/inputs/mailchimp/mailchimp_test.go | 59 +- plugins/inputs/mailchimp/sample.conf | 12 + plugins/inputs/marklogic/README.md | 14 +- plugins/inputs/marklogic/marklogic.go | 52 +- plugins/inputs/marklogic/marklogic_test.go | 6 +- plugins/inputs/marklogic/sample.conf | 18 + plugins/inputs/mcrouter/README.md | 19 +- plugins/inputs/mcrouter/mcrouter.go | 58 +- plugins/inputs/mcrouter/mcrouter_test.go | 132 +- plugins/inputs/mcrouter/sample.conf | 8 + plugins/inputs/mdstat/README.md | 53 + plugins/inputs/mdstat/mdstat.go | 309 + plugins/inputs/mdstat/mdstat_notlinux.go | 4 + plugins/inputs/mdstat/mdstat_test.go | 191 + plugins/inputs/mdstat/sample.conf | 5 + plugins/inputs/mem/README.md | 12 +- plugins/inputs/mem/{memory.go => mem.go} | 36 +- .../mem/{memory_test.go => mem_test.go} | 16 +- plugins/inputs/mem/sample.conf | 3 + plugins/inputs/memcached/README.md | 43 +- plugins/inputs/memcached/memcached.go | 61 +- plugins/inputs/memcached/memcached_test.go | 167 +- plugins/inputs/memcached/sample.conf | 15 + plugins/inputs/mesos/README.md | 481 +- plugins/inputs/mesos/mesos.go | 185 +- plugins/inputs/mesos/mesos_test.go | 97 +- plugins/inputs/mesos/sample.conf | 42 + plugins/inputs/minecraft/README.md | 29 +- plugins/inputs/minecraft/client.go | 48 +- plugins/inputs/minecraft/client_test.go | 8 +- .../inputs/minecraft/internal/rcon/rcon.go | 118 +- plugins/inputs/minecraft/minecraft.go | 38 +- plugins/inputs/minecraft/sample.conf | 13 + plugins/inputs/mock/README.md | 71 + plugins/inputs/mock/mock.go | 146 + plugins/inputs/mock/mock_test.go | 106 + plugins/inputs/mock/sample.conf | 30 + plugins/inputs/mock_Plugin.go | 31 - plugins/inputs/modbus/README.md | 400 +- plugins/inputs/modbus/configuration.go | 62 + .../inputs/modbus/configuration_register.go | 254 + .../inputs/modbus/configuration_request.go | 306 + plugins/inputs/modbus/modbus.go | 884 +- plugins/inputs/modbus/modbus_test.go | 1428 +- plugins/inputs/modbus/request.go | 69 + plugins/inputs/modbus/sample.conf | 196 + .../inputs/modbus/sample_general_begin.conf | 46 + plugins/inputs/modbus/sample_general_end.conf | 8 + plugins/inputs/modbus/sample_register.conf | 50 + plugins/inputs/modbus/sample_request.conf | 92 + plugins/inputs/modbus/type_conversions.go | 54 + plugins/inputs/modbus/type_conversions16.go | 138 + plugins/inputs/modbus/type_conversions32.go | 200 + plugins/inputs/modbus/type_conversions64.go | 182 + plugins/inputs/mongodb/README.md | 66 +- plugins/inputs/mongodb/mongodb.go | 221 +- plugins/inputs/mongodb/mongodb_data.go | 138 +- plugins/inputs/mongodb/mongodb_data_test.go | 140 +- plugins/inputs/mongodb/mongodb_server.go | 151 +- plugins/inputs/mongodb/mongodb_server_test.go | 57 +- plugins/inputs/mongodb/mongodb_test.go | 56 +- plugins/inputs/mongodb/mongostat.go | 159 +- plugins/inputs/mongodb/mongostat_test.go | 43 +- plugins/inputs/mongodb/sample.conf | 38 + plugins/inputs/monit/README.md | 26 +- plugins/inputs/monit/monit.go | 275 +- plugins/inputs/monit/monit_test.go | 83 +- plugins/inputs/monit/sample.conf | 18 + plugins/inputs/mqtt_consumer/README.md | 68 +- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 326 +- .../mqtt_consumer/mqtt_consumer_test.go | 209 +- plugins/inputs/mqtt_consumer/sample.conf | 79 + plugins/inputs/multifile/README.md | 40 +- plugins/inputs/multifile/multifile.go | 58 +- plugins/inputs/multifile/multifile_test.go | 16 +- plugins/inputs/multifile/sample.conf | 23 + plugins/inputs/mysql/README.md | 263 +- plugins/inputs/mysql/mysql.go | 903 +- plugins/inputs/mysql/mysql_test.go | 245 +- plugins/inputs/mysql/sample.conf | 100 + plugins/inputs/mysql/v1/mysql.go | 12 +- plugins/inputs/mysql/v2/convert.go | 57 +- plugins/inputs/mysql/v2/convert_test.go | 45 +- plugins/inputs/nats/README.md | 17 +- plugins/inputs/nats/nats.go | 42 +- plugins/inputs/nats/nats_freebsd.go | 1 + plugins/inputs/nats/nats_test.go | 27 +- plugins/inputs/nats/sample.conf | 7 + plugins/inputs/nats_consumer/README.md | 5 +- plugins/inputs/nats_consumer/nats_consumer.go | 72 +- plugins/inputs/nats_consumer/sample.conf | 48 + plugins/inputs/neptune_apex/README.md | 87 +- plugins/inputs/neptune_apex/neptune_apex.go | 42 +- .../inputs/neptune_apex/neptune_apex_test.go | 276 +- plugins/inputs/neptune_apex/sample.conf | 14 + .../inputs/net/{NET_README.md => README.md} | 45 +- plugins/inputs/net/net.go | 46 +- plugins/inputs/net/net_test.go | 7 +- plugins/inputs/net/sample.conf | 14 + plugins/inputs/net_response/README.md | 10 +- plugins/inputs/net_response/net_response.go | 145 +- .../inputs/net_response/net_response_test.go | 133 +- plugins/inputs/net_response/sample.conf | 25 + .../NETSTAT_README.md => netstat/README.md} | 17 +- plugins/inputs/{net => netstat}/netstat.go | 28 +- plugins/inputs/netstat/sample.conf | 3 + plugins/inputs/nfsclient/README.md | 196 + plugins/inputs/nfsclient/nfsclient.go | 482 + plugins/inputs/nfsclient/nfsclient_test.go | 207 + plugins/inputs/nfsclient/sample.conf | 27 + plugins/inputs/nfsclient/testdata/mountstats | 231 + plugins/inputs/nginx/README.md | 39 +- plugins/inputs/nginx/nginx.go | 45 +- plugins/inputs/nginx/nginx_test.go | 34 +- plugins/inputs/nginx/sample.conf | 14 + plugins/inputs/nginx_plus/README.md | 38 +- plugins/inputs/nginx_plus/nginx_plus.go | 52 +- plugins/inputs/nginx_plus/nginx_plus_test.go | 23 +- plugins/inputs/nginx_plus/sample.conf | 14 + plugins/inputs/nginx_plus_api/README.md | 59 +- .../inputs/nginx_plus_api/nginx_plus_api.go | 61 +- .../nginx_plus_api/nginx_plus_api_metrics.go | 138 +- .../nginx_plus_api_metrics_test.go | 284 +- .../nginx_plus_api/nginx_plus_api_types.go | 23 +- plugins/inputs/nginx_plus_api/sample.conf | 16 + plugins/inputs/nginx_sts/README.md | 17 +- plugins/inputs/nginx_sts/nginx_sts.go | 39 +- plugins/inputs/nginx_sts/nginx_sts_test.go | 18 +- plugins/inputs/nginx_sts/sample.conf | 14 + plugins/inputs/nginx_upstream_check/README.md | 58 +- .../nginx_upstream_check.go | 81 +- .../nginx_upstream_check_test.go | 29 +- .../inputs/nginx_upstream_check/sample.conf | 28 + plugins/inputs/nginx_vts/README.md | 39 +- plugins/inputs/nginx_vts/nginx_vts.go | 39 +- plugins/inputs/nginx_vts/nginx_vts_test.go | 18 +- plugins/inputs/nginx_vts/sample.conf | 14 + plugins/inputs/nomad/README.md | 32 + plugins/inputs/nomad/nomad.go | 164 + plugins/inputs/nomad/nomad_metrics.go | 53 + plugins/inputs/nomad/nomad_test.go | 104 + plugins/inputs/nomad/sample.conf | 12 + .../nomad/testdata/response_key_metrics.json | 48 + plugins/inputs/nsd/README.md | 13 +- plugins/inputs/nsd/nsd.go | 85 +- plugins/inputs/nsd/nsd_test.go | 25 +- plugins/inputs/nsd/sample.conf | 17 + plugins/inputs/nsq/README.md | 11 +- plugins/inputs/nsq/nsq.go | 44 +- plugins/inputs/nsq/nsq_test.go | 6 +- plugins/inputs/nsq/sample.conf | 11 + plugins/inputs/nsq_consumer/README.md | 4 +- plugins/inputs/nsq_consumer/nsq_consumer.go | 85 +- .../inputs/nsq_consumer/nsq_consumer_test.go | 85 +- plugins/inputs/nsq_consumer/sample.conf | 29 + plugins/inputs/nstat/README.md | 648 +- plugins/inputs/nstat/nstat.go | 119 +- plugins/inputs/nstat/nstat_test.go | 13 +- plugins/inputs/nstat/sample.conf | 10 + plugins/inputs/ntpq/README.md | 36 +- plugins/inputs/ntpq/ntpq.go | 33 +- plugins/inputs/ntpq/ntpq_test.go | 65 +- plugins/inputs/ntpq/sample.conf | 4 + plugins/inputs/nvidia_smi/README.md | 60 +- plugins/inputs/nvidia_smi/nvidia_smi.go | 50 +- plugins/inputs/nvidia_smi/nvidia_smi_test.go | 8 +- plugins/inputs/nvidia_smi/sample.conf | 9 + plugins/inputs/opcua/README.md | 124 +- plugins/inputs/opcua/opcua.go | 551 + plugins/inputs/opcua/opcua_client.go | 424 - plugins/inputs/opcua/opcua_client_test.go | 110 - plugins/inputs/opcua/opcua_test.go | 357 + plugins/inputs/opcua/opcua_util.go | 101 +- plugins/inputs/opcua/sample.conf | 87 + plugins/inputs/openldap/README.md | 109 +- plugins/inputs/openldap/openldap.go | 80 +- plugins/inputs/openldap/openldap_test.go | 284 +- plugins/inputs/openldap/sample.conf | 23 + plugins/inputs/openntpd/README.md | 16 +- plugins/inputs/openntpd/openntpd.go | 58 +- plugins/inputs/openntpd/openntpd_test.go | 72 +- plugins/inputs/openntpd/sample.conf | 10 + plugins/inputs/opensmtpd/README.md | 29 +- plugins/inputs/opensmtpd/opensmtpd.go | 58 +- plugins/inputs/opensmtpd/opensmtpd_test.go | 22 +- plugins/inputs/opensmtpd/sample.conf | 10 + plugins/inputs/openstack/README.md | 371 + plugins/inputs/openstack/openstack.go | 904 + plugins/inputs/openstack/sample.conf | 51 + plugins/inputs/opentelemetry/README.md | 104 + plugins/inputs/opentelemetry/grpc_services.go | 85 + plugins/inputs/opentelemetry/logger.go | 16 + plugins/inputs/opentelemetry/opentelemetry.go | 107 + .../opentelemetry/opentelemetry_test.go | 82 + plugins/inputs/opentelemetry/sample.conf | 24 + plugins/inputs/opentelemetry/writer.go | 32 + plugins/inputs/openweathermap/README.md | 20 +- .../inputs/openweathermap/openweathermap.go | 132 +- .../openweathermap/openweathermap_test.go | 110 +- plugins/inputs/openweathermap/sample.conf | 30 + plugins/inputs/passenger/README.md | 24 +- plugins/inputs/passenger/passenger.go | 165 +- plugins/inputs/passenger/passenger_test.go | 67 +- plugins/inputs/passenger/sample.conf | 11 + plugins/inputs/pf/README.md | 74 +- plugins/inputs/pf/pf.go | 23 +- plugins/inputs/pf/sample.conf | 7 + plugins/inputs/pgbouncer/README.md | 28 +- plugins/inputs/pgbouncer/pgbouncer.go | 52 +- plugins/inputs/pgbouncer/pgbouncer_test.go | 82 +- plugins/inputs/pgbouncer/sample.conf | 11 + plugins/inputs/phpfpm/README.md | 10 +- plugins/inputs/phpfpm/child.go | 64 +- plugins/inputs/phpfpm/fcgi.go | 49 +- plugins/inputs/phpfpm/fcgi_client.go | 27 +- plugins/inputs/phpfpm/fcgi_test.go | 39 +- plugins/inputs/phpfpm/phpfpm.go | 153 +- plugins/inputs/phpfpm/phpfpm_test.go | 113 +- plugins/inputs/phpfpm/sample.conf | 34 + plugins/inputs/ping/README.md | 85 +- plugins/inputs/ping/ping.go | 469 +- plugins/inputs/ping/ping_notwindows.go | 99 +- plugins/inputs/ping/ping_test.go | 305 +- plugins/inputs/ping/ping_windows.go | 1 + plugins/inputs/ping/ping_windows_test.go | 74 +- plugins/inputs/ping/sample.conf | 51 + plugins/inputs/postfix/README.md | 30 +- plugins/inputs/postfix/postfix.go | 53 +- plugins/inputs/postfix/postfix_test.go | 48 +- plugins/inputs/postfix/postfix_windows.go | 4 + plugins/inputs/postfix/sample.conf | 5 + plugins/inputs/postfix/stat_ctim.go | 1 + plugins/inputs/postfix/stat_ctimespec.go | 1 + plugins/inputs/postfix/stat_none.go | 1 + plugins/inputs/postgresql/README.md | 90 +- plugins/inputs/postgresql/postgresql.go | 95 +- plugins/inputs/postgresql/postgresql_test.go | 127 +- plugins/inputs/postgresql/sample.conf | 37 + plugins/inputs/postgresql/service.go | 66 +- .../inputs/postgresql_extensible/README.md | 75 +- .../postgresql_extensible.go | 238 +- .../postgresql_extensible_test.go | 106 +- .../inputs/postgresql_extensible/sample.conf | 54 + plugins/inputs/powerdns/README.md | 19 +- plugins/inputs/powerdns/powerdns.go | 34 +- plugins/inputs/powerdns/powerdns_test.go | 37 +- plugins/inputs/powerdns/sample.conf | 7 + plugins/inputs/powerdns_recursor/README.md | 26 +- .../powerdns_recursor/powerdns_recursor.go | 43 +- .../powerdns_recursor_test.go | 85 +- plugins/inputs/powerdns_recursor/sample.conf | 10 + plugins/inputs/processes/README.md | 19 +- plugins/inputs/processes/processes.go | 12 +- .../inputs/processes/processes_notwindows.go | 7 +- plugins/inputs/processes/processes_test.go | 17 +- plugins/inputs/processes/processes_windows.go | 1 + plugins/inputs/processes/sample.conf | 3 + plugins/inputs/procstat/README.md | 35 +- plugins/inputs/procstat/native_finder.go | 9 +- .../procstat/native_finder_notwindows.go | 1 + .../procstat/native_finder_windows_test.go | 18 +- plugins/inputs/procstat/pgrep.go | 6 +- plugins/inputs/procstat/process.go | 19 +- plugins/inputs/procstat/procstat.go | 316 +- plugins/inputs/procstat/procstat_test.go | 158 +- plugins/inputs/procstat/sample.conf | 45 + .../inputs/procstat/win_service_notwindows.go | 3 +- .../inputs/procstat/win_service_windows.go | 1 + plugins/inputs/prometheus/README.md | 247 +- plugins/inputs/prometheus/consul.go | 208 + plugins/inputs/prometheus/kubernetes.go | 408 +- plugins/inputs/prometheus/kubernetes_test.go | 215 +- plugins/inputs/prometheus/parser.go | 225 +- plugins/inputs/prometheus/parser_test.go | 145 +- plugins/inputs/prometheus/prometheus.go | 256 +- plugins/inputs/prometheus/prometheus_test.go | 158 +- plugins/inputs/prometheus/sample.conf | 90 + plugins/inputs/proxmox/README.md | 36 +- plugins/inputs/proxmox/proxmox.go | 189 +- plugins/inputs/proxmox/proxmox_test.go | 33 +- plugins/inputs/proxmox/sample.conf | 17 + plugins/inputs/proxmox/structs.go | 41 +- plugins/inputs/puppetagent/README.md | 109 +- .../inputs/puppetagent/last_run_summary.yaml | 39 +- plugins/inputs/puppetagent/puppetagent.go | 78 +- .../inputs/puppetagent/puppetagent_test.go | 70 +- plugins/inputs/puppetagent/sample.conf | 4 + plugins/inputs/rabbitmq/README.md | 33 +- plugins/inputs/rabbitmq/rabbitmq.go | 281 +- plugins/inputs/rabbitmq/rabbitmq_test.go | 777 +- plugins/inputs/rabbitmq/sample.conf | 56 + .../testdata/{ => set1}/exchanges.json | 0 .../testdata/{ => set1}/federation-links.json | 0 .../rabbitmq/testdata/{ => set1}/memory.json | 0 .../rabbitmq/testdata/{ => set1}/nodes.json | 0 .../testdata/{ => set1}/overview.json | 0 .../rabbitmq/testdata/{ => set1}/queues.json | 0 .../rabbitmq/testdata/set2/exchanges.json | 104 + .../testdata/set2/federation-links.json | 1 + .../inputs/rabbitmq/testdata/set2/memory.json | 31 + .../inputs/rabbitmq/testdata/set2/nodes.json | 417 + .../rabbitmq/testdata/set2/overview.json | 1 + .../inputs/rabbitmq/testdata/set2/queues.json | 356 + plugins/inputs/raindrops/README.md | 38 +- plugins/inputs/raindrops/raindrops.go | 73 +- plugins/inputs/raindrops/raindrops_test.go | 22 +- plugins/inputs/raindrops/sample.conf | 4 + plugins/inputs/ras/README.md | 29 +- plugins/inputs/ras/ras.go | 34 +- plugins/inputs/ras/ras_notlinux.go | 1 + plugins/inputs/ras/ras_test.go | 33 +- plugins/inputs/ras/sample.conf | 5 + plugins/inputs/ravendb/README.md | 221 + plugins/inputs/ravendb/ravendb.go | 393 + plugins/inputs/ravendb/ravendb_dto.go | 199 + plugins/inputs/ravendb/ravendb_test.go | 388 + plugins/inputs/ravendb/sample.conf | 36 + .../ravendb/testdata/collections_full.json | 19 + .../ravendb/testdata/collections_min.json | 19 + .../ravendb/testdata/databases_full.json | 49 + .../ravendb/testdata/databases_min.json | 49 + .../inputs/ravendb/testdata/indexes_full.json | 25 + .../inputs/ravendb/testdata/indexes_min.json | 25 + .../inputs/ravendb/testdata/server_full.json | 73 + .../inputs/ravendb/testdata/server_min.json | 72 + plugins/inputs/redfish/README.md | 54 +- plugins/inputs/redfish/redfish.go | 61 +- plugins/inputs/redfish/redfish_test.go | 230 +- plugins/inputs/redfish/sample.conf | 21 + plugins/inputs/redis/README.md | 204 +- plugins/inputs/redis/redis.go | 372 +- plugins/inputs/redis/redis_test.go | 353 +- plugins/inputs/redis/sample.conf | 35 + plugins/inputs/redis_sentinel/README.md | 209 + .../inputs/redis_sentinel/redis_sentinel.go | 439 + .../redis_sentinel/redis_sentinel_test.go | 325 + .../redis_sentinel/redis_sentinel_types.go | 113 + plugins/inputs/redis_sentinel/sample.conf | 19 + .../testdata/sentinel.info.response | 71 + plugins/inputs/rethinkdb/README.md | 12 +- plugins/inputs/rethinkdb/rethinkdb.go | 62 +- plugins/inputs/rethinkdb/rethinkdb_data.go | 4 +- .../inputs/rethinkdb/rethinkdb_data_test.go | 13 +- plugins/inputs/rethinkdb/rethinkdb_server.go | 83 +- .../inputs/rethinkdb/rethinkdb_server_test.go | 13 +- plugins/inputs/rethinkdb/rethinkdb_test.go | 7 +- plugins/inputs/rethinkdb/sample.conf | 16 + plugins/inputs/riak/README.md | 17 +- plugins/inputs/riak/riak.go | 36 +- plugins/inputs/riak/riak_test.go | 6 +- plugins/inputs/riak/sample.conf | 4 + plugins/inputs/riemann_listener/README.md | 43 + .../riemann_listener/riemann_listener.go | 361 + .../riemann_listener/riemann_listener_test.go | 53 + plugins/inputs/riemann_listener/sample.conf | 27 + plugins/inputs/salesforce/README.md | 47 +- plugins/inputs/salesforce/salesforce.go | 40 +- plugins/inputs/salesforce/salesforce_test.go | 4 +- plugins/inputs/salesforce/sample.conf | 18 + plugins/inputs/sensors/README.md | 33 +- plugins/inputs/sensors/sample.conf | 8 + plugins/inputs/sensors/sensors.go | 59 +- plugins/inputs/sensors/sensors_notlinux.go | 1 + plugins/inputs/sensors/sensors_test.go | 24 +- plugins/inputs/sflow/README.md | 33 +- plugins/inputs/sflow/metricencoder.go | 5 +- plugins/inputs/sflow/packetdecoder_test.go | 3 +- plugins/inputs/sflow/sample.conf | 11 + plugins/inputs/sflow/sflow.go | 41 +- plugins/inputs/sflow/sflow_test.go | 6 +- plugins/inputs/sflow/types.go | 36 +- plugins/inputs/sflow/types_test.go | 43 + plugins/inputs/sflow_a10/README.md | 57 +- plugins/inputs/sflow_a10/metricencoder.go | 14 +- plugins/inputs/sflow_a10/packetdecoder.go | 21 +- .../inputs/sflow_a10/packetdecoder_test.go | 33 +- plugins/inputs/sflow_a10/sflow_a10.go | 60 +- plugins/inputs/sflow_a10/sflow_a10_test.go | 42 +- plugins/inputs/sflow_a10/test.go | 7 +- plugins/inputs/sflow_a10/types.go | 10 +- plugins/inputs/sflow_a10/types_test.go | 2 +- plugins/inputs/slab/README.md | 60 + plugins/inputs/slab/sample.conf | 4 + plugins/inputs/slab/slab.go | 128 + plugins/inputs/slab/slab_notlinux.go | 4 + plugins/inputs/slab/slab_test.go | 49 + plugins/inputs/slab/testdata/slabinfo | 23 + plugins/inputs/smart/README.md | 174 +- plugins/inputs/smart/sample.conf | 49 + plugins/inputs/smart/smart.go | 424 +- plugins/inputs/smart/smart_test.go | 694 +- plugins/inputs/snmp/README.md | 214 +- plugins/inputs/snmp/gosmi.go | 123 + plugins/inputs/snmp/gosmi_test.go | 943 + plugins/inputs/snmp/netsnmp.go | 256 + plugins/inputs/snmp/sample.conf | 75 + plugins/inputs/snmp/snmp.go | 586 +- plugins/inputs/snmp/snmp_mocks_generate.go | 2 + plugins/inputs/snmp/snmp_mocks_test.go | 13 +- plugins/inputs/snmp/snmp_test.go | 557 +- plugins/inputs/snmp/testdata/bridgeMib | 1467 + plugins/inputs/snmp/testdata/bridgeMibImports | 554 + plugins/inputs/snmp/testdata/foo | 30 + plugins/inputs/snmp/testdata/fooImports | 169 + plugins/inputs/snmp/testdata/ifPhysAddress | 84 + .../inputs/snmp/testdata/ifPhysAddressImports | 254 + plugins/inputs/snmp/testdata/server | 57 + plugins/inputs/snmp/testdata/serverImports | 174 + plugins/inputs/snmp/testdata/snmpd.conf | 17 - plugins/inputs/snmp/testdata/tableBuild | 57 + plugins/inputs/snmp/testdata/tableMib | 2613 ++ plugins/inputs/snmp/testdata/tableMibImports | 119 + plugins/inputs/snmp/testdata/tcpMib | 786 + plugins/inputs/snmp/testdata/tcpMibImports | 639 + plugins/inputs/snmp/testdata/test.mib | 58 - plugins/inputs/snmp_legacy/README.md | 238 +- plugins/inputs/snmp_legacy/sample.conf | 90 + plugins/inputs/snmp_legacy/snmp_legacy.go | 284 +- plugins/inputs/snmp_trap/README.md | 61 +- plugins/inputs/snmp_trap/gosmi.go | 21 + plugins/inputs/snmp_trap/netsnmp.go | 89 + plugins/inputs/snmp_trap/sample.conf | 35 + plugins/inputs/snmp_trap/snmp_trap.go | 185 +- plugins/inputs/snmp_trap/snmp_trap_test.go | 556 +- plugins/inputs/socket_listener/README.md | 45 +- plugins/inputs/socket_listener/sample.conf | 59 + .../inputs/socket_listener/socket_listener.go | 131 +- .../socket_listener/socket_listener_test.go | 113 +- plugins/inputs/socketstat/README.md | 59 + plugins/inputs/socketstat/sample.conf | 7 + plugins/inputs/socketstat/socketstat.go | 216 + plugins/inputs/socketstat/socketstat_test.go | 126 + .../inputs/socketstat/socketstat_windows.go | 4 + .../socketstat/testdata/tcp_no_sockets.txt | 1 + .../socketstat/testdata/tcp_traffic.txt | 7 + .../socketstat/testdata/udp_no_sockets.txt | 1 + .../socketstat/testdata/udp_traffic.txt | 4 + plugins/inputs/solr/README.md | 19 +- plugins/inputs/solr/sample.conf | 11 + plugins/inputs/solr/solr.go | 66 +- plugins/inputs/solr/solr_test.go | 31 +- plugins/inputs/solr/testdata3_test.go | 3 +- plugins/inputs/solr/testdata7_test.go | 3 +- plugins/inputs/solr/testdata_test.go | 9 +- plugins/inputs/sql/README.md | 178 + plugins/inputs/sql/drivers.go | 9 + plugins/inputs/sql/drivers_sqlite.go | 14 + plugins/inputs/sql/drivers_sqlite_other.go | 4 + plugins/inputs/sql/sample.conf | 77 + plugins/inputs/sql/sql.go | 464 + plugins/inputs/sql/sql_test.go | 346 + .../sql/testdata/clickhouse/expected.sql | 15 + .../inputs/sql/testdata/mariadb/expected.sql | 36 + .../inputs/sql/testdata/postgres/expected.sql | 41 + plugins/inputs/sqlserver/README.md | 492 +- plugins/inputs/sqlserver/azuresqldbqueries.go | 696 + .../sqlserver/azuresqldbqueries_test.go | 450 + .../sqlserver/azuresqlmanagedqueries.go | 562 + .../sqlserver/azuresqlmanagedqueries_test.go | 379 + .../inputs/sqlserver/azuresqlpoolqueries.go | 477 + .../sqlserver/azuresqlpoolqueries_test.go | 313 + plugins/inputs/sqlserver/azuresqlqueries.go | 1180 - plugins/inputs/sqlserver/connectionstring.go | 100 + plugins/inputs/sqlserver/sample.conf | 139 + plugins/inputs/sqlserver/sqlqueriesV2.go | 77 +- plugins/inputs/sqlserver/sqlserver.go | 440 +- plugins/inputs/sqlserver/sqlserver_test.go | 331 +- plugins/inputs/sqlserver/sqlserverqueries.go | 417 +- plugins/inputs/stackdriver/README.md | 33 +- plugins/inputs/stackdriver/sample.conf | 78 + plugins/inputs/stackdriver/stackdriver.go | 233 +- .../inputs/stackdriver/stackdriver_test.go | 33 +- plugins/inputs/statsd/README.md | 127 +- plugins/inputs/statsd/datadog.go | 21 +- plugins/inputs/statsd/running_stats_test.go | 5 +- plugins/inputs/statsd/sample.conf | 80 + plugins/inputs/statsd/statsd.go | 395 +- plugins/inputs/statsd/statsd_test.go | 662 +- plugins/inputs/suricata/README.md | 47 +- plugins/inputs/suricata/sample.conf | 13 + plugins/inputs/suricata/suricata.go | 110 +- plugins/inputs/suricata/suricata_test.go | 216 +- plugins/inputs/suricata/suricata_testutil.go | 38 - plugins/inputs/suricata/testdata/test2.json | 21 + plugins/inputs/suricata/testdata/test3.json | 1 + plugins/inputs/swap/README.md | 13 +- plugins/inputs/swap/sample.conf | 3 + plugins/inputs/swap/swap.go | 16 +- plugins/inputs/swap/swap_test.go | 2 +- plugins/inputs/synproxy/README.md | 30 +- plugins/inputs/synproxy/sample.conf | 3 + plugins/inputs/synproxy/synproxy.go | 14 +- plugins/inputs/synproxy/synproxy_linux.go | 1 + plugins/inputs/synproxy/synproxy_notlinux.go | 1 + plugins/inputs/synproxy/synproxy_test.go | 22 +- plugins/inputs/syslog/README.md | 94 +- plugins/inputs/syslog/commons_test.go | 24 +- plugins/inputs/syslog/nontransparent_test.go | 57 +- plugins/inputs/syslog/octetcounting_test.go | 53 +- plugins/inputs/syslog/rfc3164_test.go | 123 + plugins/inputs/syslog/rfc5426_test.go | 41 +- plugins/inputs/syslog/sample.conf | 54 + plugins/inputs/syslog/syslog.go | 246 +- plugins/inputs/syslog/syslog_test.go | 25 +- plugins/inputs/sysstat/README.md | 190 +- plugins/inputs/sysstat/sample.conf | 52 + plugins/inputs/sysstat/sysstat.go | 152 +- .../inputs/sysstat/sysstat_interval_test.go | 42 +- plugins/inputs/sysstat/sysstat_notlinux.go | 1 + plugins/inputs/sysstat/sysstat_test.go | 74 +- plugins/inputs/system/README.md | 32 +- plugins/inputs/system/mock_PS.go | 20 +- plugins/inputs/system/ps.go | 122 +- plugins/inputs/system/sample.conf | 3 + plugins/inputs/system/system.go | 28 +- plugins/inputs/systemd_units/README.md | 48 +- plugins/inputs/systemd_units/sample.conf | 16 + ...ystemd_units_linux.go => systemd_units.go} | 86 +- .../systemd_units/systemd_units_notlinux.go | 1 + ...ts_linux_test.go => systemd_units_test.go} | 8 +- plugins/inputs/tail/README.md | 27 +- plugins/inputs/tail/multiline.go | 27 +- plugins/inputs/tail/multiline_test.go | 129 +- plugins/inputs/tail/sample.conf | 66 + plugins/inputs/tail/tail.go | 139 +- plugins/inputs/tail/tail_solaris.go | 1 + plugins/inputs/tail/tail_test.go | 361 +- plugins/inputs/tcp_listener/README.md | 13 +- plugins/inputs/tcp_listener/sample.conf | 4 + plugins/inputs/tcp_listener/tcp_listener.go | 62 +- .../inputs/tcp_listener/tcp_listener_test.go | 178 +- plugins/inputs/teamspeak/README.md | 51 +- plugins/inputs/teamspeak/sample.conf | 12 + plugins/inputs/teamspeak/teamspeak.go | 68 +- plugins/inputs/teamspeak/teamspeak_test.go | 34 +- plugins/inputs/temp/README.md | 17 +- plugins/inputs/temp/sample.conf | 3 + plugins/inputs/temp/temp.go | 14 +- plugins/inputs/temp/temp_test.go | 3 +- plugins/inputs/tengine/README.md | 10 +- plugins/inputs/tengine/sample.conf | 14 + plugins/inputs/tengine/tengine.go | 241 +- plugins/inputs/tengine/tengine_test.go | 21 +- plugins/inputs/tomcat/README.md | 20 +- plugins/inputs/tomcat/sample.conf | 18 + plugins/inputs/tomcat/tomcat.go | 49 +- plugins/inputs/tomcat/tomcat_test.go | 12 +- plugins/inputs/trig/README.md | 27 + plugins/inputs/trig/sample.conf | 4 + plugins/inputs/trig/trig.go | 19 +- plugins/inputs/trig/trig_test.go | 4 +- plugins/inputs/twemproxy/README.md | 15 + plugins/inputs/twemproxy/sample.conf | 6 + plugins/inputs/twemproxy/twemproxy.go | 32 +- plugins/inputs/twemproxy/twemproxy_test.go | 14 +- plugins/inputs/udp_listener/README.md | 12 +- plugins/inputs/udp_listener/sample.conf | 3 + plugins/inputs/udp_listener/udp_listener.go | 62 +- .../inputs/udp_listener/udp_listener_test.go | 386 +- plugins/inputs/unbound/README.md | 33 +- plugins/inputs/unbound/sample.conf | 24 + plugins/inputs/unbound/unbound.go | 102 +- plugins/inputs/unbound/unbound_test.go | 33 +- plugins/inputs/uwsgi/README.md | 47 +- plugins/inputs/uwsgi/sample.conf | 11 + plugins/inputs/uwsgi/uwsgi.go | 55 +- plugins/inputs/uwsgi/uwsgi_test.go | 13 +- plugins/inputs/varnish/README.md | 791 +- plugins/inputs/varnish/sample.conf | 38 + .../inputs/varnish/test_data/varnish4_4.json | 1478 + .../test_data/varnish6.2.1_reload.json | 2173 + .../inputs/varnish/test_data/varnish6.6.json | 2154 + .../varnish/test_data/varnish_types.json | 24 + .../varnish/test_data/varnish_v1_reload.txt | 474 + .../varnish/test_data/varnishadm-200.json | 10 + .../varnish/test_data/varnishadm-reload.json | 51 + plugins/inputs/varnish/varnish.go | 347 +- plugins/inputs/varnish/varnish_test.go | 247 +- plugins/inputs/varnish/varnish_windows.go | 1 + plugins/inputs/vault/README.md | 39 + plugins/inputs/vault/sample.conf | 19 + .../vault/testdata/response_key_metrics.json | 40 + plugins/inputs/vault/vault.go | 194 + plugins/inputs/vault/vault_metrics.go | 40 + plugins/inputs/vault/vault_test.go | 97 + plugins/inputs/vsphere/METRICS.md | 77 +- plugins/inputs/vsphere/README.md | 359 +- plugins/inputs/vsphere/client.go | 51 +- plugins/inputs/vsphere/endpoint.go | 210 +- plugins/inputs/vsphere/finder.go | 24 +- plugins/inputs/vsphere/sample.conf | 195 + plugins/inputs/vsphere/tscache.go | 15 +- plugins/inputs/vsphere/vsphere.go | 346 +- plugins/inputs/vsphere/vsphere_test.go | 84 +- plugins/inputs/webhooks/README.md | 52 +- plugins/inputs/webhooks/artifactory/README.md | 511 + .../artifactory/artifactory_webhook.go | 120 + .../artifactory_webhook_mock_json.go | 439 + .../artifactory/artifactory_webhook_models.go | 260 + .../artifactory/artifactory_webhook_test.go | 152 + plugins/inputs/webhooks/filestack/README.md | 2 + .../webhooks/filestack/filestack_webhooks.go | 16 +- .../filestack/filestack_webhooks_events.go | 4 +- plugins/inputs/webhooks/github/README.md | 92 +- .../inputs/webhooks/github/github_webhooks.go | 33 +- .../webhooks/github/github_webhooks_models.go | 106 +- .../webhooks/github/github_webhooks_test.go | 4 +- plugins/inputs/webhooks/mandrill/README.md | 2 + .../webhooks/mandrill/mandrill_webhooks.go | 18 +- .../mandrill/mandrill_webhooks_events.go | 4 +- plugins/inputs/webhooks/papertrail/README.md | 24 +- .../webhooks/papertrail/papertrail_test.go | 32 +- .../papertrail/papertrail_webhooks.go | 33 +- plugins/inputs/webhooks/particle/README.md | 19 +- .../webhooks/particle/particle_webhooks.go | 24 +- .../particle/particle_webhooks_test.go | 46 +- plugins/inputs/webhooks/rollbar/README.md | 16 +- .../webhooks/rollbar/rollbar_webhooks.go | 16 +- .../rollbar/rollbar_webhooks_events.go | 24 +- plugins/inputs/webhooks/sample.conf | 50 + plugins/inputs/webhooks/webhooks.go | 81 +- plugins/inputs/webhooks/webhooks_test.go | 7 + plugins/inputs/win_eventlog/README.md | 93 +- plugins/inputs/win_eventlog/event.go | 3 +- plugins/inputs/win_eventlog/sample.conf | 77 + .../inputs/win_eventlog/syscall_windows.go | 3 +- plugins/inputs/win_eventlog/util.go | 4 +- plugins/inputs/win_eventlog/util_test.go | 3 +- plugins/inputs/win_eventlog/win_eventlog.go | 96 +- .../win_eventlog/win_eventlog_notwindows.go | 1 + .../inputs/win_eventlog/win_eventlog_test.go | 3 +- .../inputs/win_eventlog/zsyscall_windows.go | 3 +- plugins/inputs/win_perf_counters/README.md | 325 +- plugins/inputs/win_perf_counters/kernel32.go | 1 + plugins/inputs/win_perf_counters/pdh.go | 145 +- plugins/inputs/win_perf_counters/pdh_386.go | 24 + plugins/inputs/win_perf_counters/pdh_amd64.go | 24 + .../win_perf_counters/performance_query.go | 49 +- plugins/inputs/win_perf_counters/sample.conf | 136 + .../win_perf_counters/win_perf_counters.go | 332 +- .../win_perf_counters_integration_test.go | 259 +- .../win_perf_counters_notwindows.go | 1 + .../win_perf_counters_test.go | 389 +- plugins/inputs/win_services/README.md | 41 +- plugins/inputs/win_services/sample.conf | 9 + plugins/inputs/win_services/win_services.go | 62 +- .../win_services_integration_test.go | 20 +- .../win_services/win_services_notwindows.go | 1 + .../inputs/win_services/win_services_test.go | 73 +- plugins/inputs/wireguard/README.md | 18 +- plugins/inputs/wireguard/sample.conf | 5 + plugins/inputs/wireguard/wireguard.go | 29 +- plugins/inputs/wireguard/wireguard_test.go | 9 +- plugins/inputs/wireless/README.md | 13 +- plugins/inputs/wireless/sample.conf | 5 + plugins/inputs/wireless/wireless.go | 21 +- plugins/inputs/wireless/wireless_linux.go | 21 +- plugins/inputs/wireless/wireless_notlinux.go | 1 + plugins/inputs/wireless/wireless_test.go | 15 +- plugins/inputs/x509_cert/README.md | 33 +- plugins/inputs/x509_cert/sample.conf | 27 + plugins/inputs/x509_cert/x509_cert.go | 271 +- plugins/inputs/x509_cert/x509_cert_test.go | 330 +- plugins/inputs/xtremio/README.md | 117 + plugins/inputs/xtremio/sample.conf | 18 + .../xtremio/testdata/sample_bbu_response.json | 20 + .../testdata/sample_get_bbu_response.json | 15 + plugins/inputs/xtremio/xtremio.go | 383 + plugins/inputs/xtremio/xtremio_test.go | 202 + plugins/inputs/xtremio/xtremio_types.go | 98 + plugins/inputs/zfs/README.md | 195 +- plugins/inputs/zfs/sample.conf | 21 + plugins/inputs/zfs/zfs.go | 46 +- plugins/inputs/zfs/zfs_freebsd.go | 63 +- plugins/inputs/zfs/zfs_freebsd_test.go | 55 + plugins/inputs/zfs/zfs_linux.go | 157 +- plugins/inputs/zfs/zfs_linux_test.go | 159 +- plugins/inputs/zfs/zfs_other.go | 1 + plugins/inputs/zipkin/README.md | 139 +- .../stress_test_write/stress_test_write.go | 30 +- .../cmd/thrift_serialize/thrift_serialize.go | 29 +- plugins/inputs/zipkin/codec/codec.go | 2 +- plugins/inputs/zipkin/codec/codec_test.go | 3 - plugins/inputs/zipkin/codec/jsonV1/jsonV1.go | 2 +- .../gen-go/zipkincore/GoUnusedProtection__.go | 5 + .../gen-go/zipkincore/zipkinCore-consts.go | 47 + .../thrift/gen-go/zipkincore/zipkinCore.go | 1556 + plugins/inputs/zipkin/codec/thrift/thrift.go | 14 +- .../inputs/zipkin/codec/thrift/thrift_test.go | 6 +- plugins/inputs/zipkin/handler.go | 4 +- plugins/inputs/zipkin/handler_test.go | 7 +- plugins/inputs/zipkin/sample.conf | 4 + plugins/inputs/zipkin/zipkin.go | 28 +- plugins/inputs/zipkin/zipkin_test.go | 9 +- plugins/inputs/zookeeper/README.md | 13 +- plugins/inputs/zookeeper/sample.conf | 19 + plugins/inputs/zookeeper/zookeeper.go | 77 +- plugins/inputs/zookeeper/zookeeper_test.go | 38 +- plugins/outputs/all/all.go | 13 + plugins/outputs/amon/README.md | 26 +- plugins/outputs/amon/amon.go | 69 +- plugins/outputs/amon/sample.conf | 10 + plugins/outputs/amqp/README.md | 25 +- plugins/outputs/amqp/amqp.go | 215 +- plugins/outputs/amqp/amqp_test.go | 56 +- plugins/outputs/amqp/client.go | 12 +- plugins/outputs/amqp/sample.conf | 95 + .../outputs/application_insights/README.md | 26 +- .../application_insights.go | 109 +- .../application_insights_test.go | 173 +- .../diagnostic_message_subscriber.go | 2 +- .../mocks/diagnostics_message_subscriber.go | 4 +- .../application_insights/mocks/transmitter.go | 4 +- .../outputs/application_insights/sample.conf | 21 + .../application_insights/transmitter.go | 10 +- plugins/outputs/azure_data_explorer/README.md | 263 + .../azure_data_explorer.go | 246 + .../azure_data_explorer_test.go | 234 + .../outputs/azure_data_explorer/sample.conf | 25 + plugins/outputs/azure_monitor/README.md | 120 +- .../outputs/azure_monitor/azure_monitor.go | 159 +- .../azure_monitor/azure_monitor_test.go | 36 +- plugins/outputs/azure_monitor/sample.conf | 29 + plugins/outputs/bigquery/README.md | 75 + plugins/outputs/bigquery/bigquery.go | 230 + plugins/outputs/bigquery/bigquery_test.go | 165 + plugins/outputs/bigquery/sample.conf | 16 + plugins/outputs/cloud_pubsub/README.md | 15 +- .../{pubsub.go => cloud_pubsub.go} | 89 +- .../{pubsub_test.go => cloud_pubsub_test.go} | 21 +- plugins/outputs/cloud_pubsub/sample.conf | 49 + plugins/outputs/cloud_pubsub/topic_gcp.go | 2 - plugins/outputs/cloud_pubsub/topic_stubbed.go | 4 +- plugins/outputs/cloudwatch/README.md | 106 +- plugins/outputs/cloudwatch/cloudwatch.go | 174 +- plugins/outputs/cloudwatch/cloudwatch_test.go | 81 +- plugins/outputs/cloudwatch/sample.conf | 42 + plugins/outputs/cloudwatch_logs/README.md | 95 + .../cloudwatch_logs/cloudwatch_logs.go | 373 + .../cloudwatch_logs/cloudwatch_logs_test.go | 548 + plugins/outputs/cloudwatch_logs/sample.conf | 58 + plugins/outputs/cratedb/README.md | 14 +- plugins/outputs/cratedb/cratedb.go | 101 +- plugins/outputs/cratedb/cratedb_test.go | 124 +- plugins/outputs/cratedb/sample.conf | 13 + plugins/outputs/datadog/README.md | 26 +- plugins/outputs/datadog/datadog.go | 126 +- plugins/outputs/datadog/datadog_test.go | 41 +- plugins/outputs/datadog/sample.conf | 18 + plugins/outputs/deprecations.go | 11 + plugins/outputs/discard/README.md | 4 +- plugins/outputs/discard/discard.go | 19 +- plugins/outputs/discard/sample.conf | 3 + plugins/outputs/dynatrace/README.md | 212 +- plugins/outputs/dynatrace/dynatrace.go | 356 +- plugins/outputs/dynatrace/dynatrace_test.go | 400 +- plugins/outputs/dynatrace/sample.conf | 36 + plugins/outputs/elasticsearch/README.md | 196 +- .../outputs/elasticsearch/elasticsearch.go | 267 +- .../elasticsearch/elasticsearch_test.go | 551 +- plugins/outputs/elasticsearch/sample.conf | 79 + plugins/outputs/event_hubs/README.md | 39 + plugins/outputs/event_hubs/event_hubs.go | 150 + plugins/outputs/event_hubs/event_hubs_test.go | 162 + plugins/outputs/event_hubs/sample.conf | 20 + plugins/outputs/exec/README.md | 17 +- plugins/outputs/exec/exec.go | 91 +- plugins/outputs/exec/exec_test.go | 15 +- plugins/outputs/exec/sample.conf | 19 + plugins/outputs/execd/README.md | 15 +- plugins/outputs/execd/execd.go | 27 +- plugins/outputs/execd/execd_test.go | 23 +- plugins/outputs/execd/sample.conf | 20 + plugins/outputs/file/README.md | 5 +- plugins/outputs/file/file.go | 65 +- plugins/outputs/file/file_test.go | 131 +- plugins/outputs/file/sample.conf | 27 + plugins/outputs/graphite/README.md | 30 +- plugins/outputs/graphite/graphite.go | 116 +- plugins/outputs/graphite/graphite_test.go | 151 +- plugins/outputs/graphite/sample.conf | 42 + plugins/outputs/graylog/README.md | 45 +- plugins/outputs/graylog/graylog.go | 348 +- plugins/outputs/graylog/graylog_test.go | 380 +- plugins/outputs/graylog/sample.conf | 24 + plugins/outputs/groundwork/README.md | 61 + plugins/outputs/groundwork/groundwork.go | 379 + plugins/outputs/groundwork/groundwork_test.go | 147 + plugins/outputs/groundwork/sample.conf | 26 + plugins/outputs/health/README.md | 10 +- plugins/outputs/health/compares.go | 3 +- plugins/outputs/health/health.go | 90 +- plugins/outputs/health/health_test.go | 18 +- plugins/outputs/health/sample.conf | 38 + plugins/outputs/http/README.md | 84 +- plugins/outputs/http/http.go | 262 +- plugins/outputs/http/http_test.go | 302 +- plugins/outputs/http/sample.conf | 98 + plugins/outputs/influxdb/README.md | 14 +- plugins/outputs/influxdb/http.go | 92 +- plugins/outputs/influxdb/http_test.go | 115 +- plugins/outputs/influxdb/influxdb.go | 139 +- plugins/outputs/influxdb/influxdb_test.go | 11 +- plugins/outputs/influxdb/sample.conf | 77 + plugins/outputs/influxdb/udp.go | 5 +- plugins/outputs/influxdb/udp_test.go | 19 +- plugins/outputs/influxdb_v2/README.md | 8 +- plugins/outputs/influxdb_v2/http.go | 142 +- .../outputs/influxdb_v2/http_internal_test.go | 57 +- plugins/outputs/influxdb_v2/http_test.go | 113 +- .../{influxdb.go => influxdb_v2.go} | 100 +- .../{influxdb_test.go => influxdb_v2_test.go} | 3 +- plugins/outputs/influxdb_v2/sample.conf | 51 + plugins/outputs/instrumental/README.md | 17 +- plugins/outputs/instrumental/instrumental.go | 82 +- .../outputs/instrumental/instrumental_test.go | 62 +- plugins/outputs/instrumental/sample.conf | 13 + plugins/outputs/kafka/README.md | 45 +- plugins/outputs/kafka/kafka.go | 258 +- plugins/outputs/kafka/kafka_test.go | 91 +- plugins/outputs/kafka/sample.conf | 151 + plugins/outputs/kinesis/README.md | 162 +- plugins/outputs/kinesis/kinesis.go | 177 +- plugins/outputs/kinesis/kinesis_test.go | 554 +- plugins/outputs/kinesis/sample.conf | 63 + plugins/outputs/librato/README.md | 39 +- plugins/outputs/librato/librato.go | 172 +- plugins/outputs/librato/librato_test.go | 31 +- plugins/outputs/librato/sample.conf | 16 + plugins/outputs/logzio/README.md | 32 +- plugins/outputs/logzio/logzio.go | 67 +- plugins/outputs/logzio/sample.conf | 15 + plugins/outputs/loki/README.md | 37 + plugins/outputs/loki/loki.go | 194 + plugins/outputs/loki/loki_test.go | 429 + plugins/outputs/loki/sample.conf | 25 + plugins/outputs/loki/stream.go | 70 + plugins/outputs/loki/stream_test.go | 157 + plugins/outputs/mongodb/README.md | 44 + plugins/outputs/mongodb/mongodb.go | 220 + plugins/outputs/mongodb/mongodb_test.go | 442 + plugins/outputs/mongodb/sample.conf | 34 + .../mongodb/testdata/auth_scram/setup.js | 3 + .../mongodb/testdata/auth_x509/setup.js | 5 + plugins/outputs/mqtt/README.md | 74 +- plugins/outputs/mqtt/mqtt.go | 95 +- plugins/outputs/mqtt/mqtt_test.go | 22 +- plugins/outputs/mqtt/sample.conf | 62 + plugins/outputs/nats/README.md | 9 +- plugins/outputs/nats/nats.go | 66 +- plugins/outputs/nats/nats_test.go | 21 +- plugins/outputs/nats/sample.conf | 33 + plugins/outputs/newrelic/README.md | 25 +- plugins/outputs/newrelic/newrelic.go | 87 +- plugins/outputs/newrelic/newrelic_test.go | 28 +- plugins/outputs/newrelic/sample.conf | 22 + plugins/outputs/nsq/README.md | 21 +- plugins/outputs/nsq/nsq.go | 44 +- plugins/outputs/nsq/nsq_test.go | 22 +- plugins/outputs/nsq/sample.conf | 12 + plugins/outputs/opentelemetry/README.md | 68 + plugins/outputs/opentelemetry/logger.go | 16 + .../outputs/opentelemetry/opentelemetry.go | 166 + .../opentelemetry/opentelemetry_test.go | 143 + plugins/outputs/opentelemetry/sample.conf | 33 + plugins/outputs/opentsdb/README.md | 87 +- plugins/outputs/opentsdb/opentsdb.go | 110 +- plugins/outputs/opentsdb/opentsdb_http.go | 9 +- plugins/outputs/opentsdb/opentsdb_test.go | 79 +- plugins/outputs/opentsdb/sample.conf | 26 + plugins/outputs/prometheus_client/README.md | 23 +- .../prometheus_client/prometheus_client.go | 122 +- .../prometheus_client_v1_test.go | 69 +- .../prometheus_client_v2_test.go | 37 +- plugins/outputs/prometheus_client/sample.conf | 42 + .../outputs/prometheus_client/v1/collector.go | 2 - .../outputs/prometheus_client/v2/collector.go | 3 +- plugins/outputs/riemann/README.md | 45 +- plugins/outputs/riemann/riemann.go | 102 +- plugins/outputs/riemann/riemann_test.go | 16 +- plugins/outputs/riemann/sample.conf | 32 + plugins/outputs/riemann_legacy/README.md | 19 + .../{riemann.go => riemann_legacy.go} | 61 +- .../riemann_legacy/riemann_legacy_test.go | 46 + .../outputs/riemann_legacy/riemann_test.go | 27 - plugins/outputs/riemann_legacy/sample.conf | 8 + plugins/outputs/sensu/README.md | 98 + plugins/outputs/sensu/sample.conf | 89 + plugins/outputs/sensu/sensu.go | 422 + plugins/outputs/sensu/sensu_test.go | 210 + plugins/outputs/signalfx/README.md | 27 + plugins/outputs/signalfx/sample.conf | 17 + plugins/outputs/signalfx/signalfx.go | 238 + plugins/outputs/signalfx/signalfx_test.go | 660 + plugins/outputs/socket_writer/README.md | 14 +- plugins/outputs/socket_writer/sample.conf | 37 + .../outputs/socket_writer/socket_writer.go | 68 +- .../socket_writer/socket_writer_test.go | 74 +- plugins/outputs/sql/README.md | 178 + plugins/outputs/sql/sample.conf | 53 + plugins/outputs/sql/sql.go | 281 + plugins/outputs/sql/sql_test.go | 412 + plugins/outputs/sql/sqlite.go | 14 + plugins/outputs/sql/sqlite_test.go | 132 + .../sql/testdata/clickhouse/expected.txt | 36 + .../sql/testdata/clickhouse/initdb/init.sql | 1 + .../outputs/sql/testdata/mariadb/expected.sql | 40 + .../sql/testdata/mariadb/initdb/script.sql | 4 + .../sql/testdata/postgres/expected.sql | 45 + .../sql/testdata/postgres/initdb/init.sql | 2 + plugins/outputs/stackdriver/README.md | 49 +- plugins/outputs/stackdriver/counter_cache.go | 96 + .../outputs/stackdriver/counter_cache_test.go | 166 + plugins/outputs/stackdriver/sample.conf | 16 + plugins/outputs/stackdriver/stackdriver.go | 168 +- .../outputs/stackdriver/stackdriver_test.go | 134 +- plugins/outputs/sumologic/README.md | 20 +- plugins/outputs/sumologic/sample.conf | 51 + plugins/outputs/sumologic/sumologic.go | 149 +- plugins/outputs/sumologic/sumologic_test.go | 70 +- plugins/outputs/syslog/README.md | 18 +- plugins/outputs/syslog/sample.conf | 76 + plugins/outputs/syslog/syslog.go | 123 +- plugins/outputs/syslog/syslog_mapper.go | 10 +- plugins/outputs/syslog/syslog_mapper_test.go | 34 +- plugins/outputs/syslog/syslog_test.go | 75 +- plugins/outputs/timestream/README.md | 71 +- plugins/outputs/timestream/sample.conf | 117 + plugins/outputs/timestream/timestream.go | 465 +- .../timestream/timestream_internal_test.go | 67 +- plugins/outputs/timestream/timestream_test.go | 545 +- plugins/outputs/warp10/README.md | 7 +- plugins/outputs/warp10/sample.conf | 26 + plugins/outputs/warp10/warp10.go | 93 +- plugins/outputs/warp10/warp10_test.go | 24 +- plugins/outputs/wavefront/README.md | 68 +- plugins/outputs/wavefront/sample.conf | 54 + plugins/outputs/wavefront/wavefront.go | 183 +- plugins/outputs/wavefront/wavefront_test.go | 30 +- plugins/outputs/websocket/README.md | 51 + plugins/outputs/websocket/sample.conf | 39 + plugins/outputs/websocket/websocket.go | 204 + plugins/outputs/websocket/websocket_test.go | 221 + .../outputs/yandex_cloud_monitoring/README.md | 14 +- .../yandex_cloud_monitoring/sample.conf | 10 + .../yandex_cloud_monitoring.go | 71 +- .../yandex_cloud_monitoring_test.go | 21 +- plugins/parsers/EXAMPLE_README.md | 9 +- plugins/parsers/all/all.go | 17 + plugins/parsers/collectd/README.md | 15 +- plugins/parsers/collectd/parser.go | 109 +- plugins/parsers/collectd/parser_test.go | 70 +- plugins/parsers/collectd/testdata/authfile | 1 + plugins/parsers/csv/README.md | 85 +- plugins/parsers/csv/parser.go | 338 +- plugins/parsers/csv/parser_test.go | 887 +- plugins/parsers/dropwizard/README.md | 170 +- plugins/parsers/dropwizard/parser.go | 156 +- plugins/parsers/dropwizard/parser_test.go | 298 +- plugins/parsers/form_urlencoded/README.md | 14 +- plugins/parsers/form_urlencoded/parser.go | 52 +- plugins/parsers/graphite/README.md | 8 +- plugins/parsers/graphite/config.go | 6 +- plugins/parsers/graphite/parser.go | 100 +- plugins/parsers/graphite/parser_test.go | 549 +- plugins/parsers/grok/README.md | 68 +- plugins/parsers/grok/influx_patterns.go | 2 +- plugins/parsers/grok/parser.go | 169 +- plugins/parsers/grok/parser_test.go | 352 +- plugins/parsers/influx/README.md | 15 +- plugins/parsers/influx/escape.go | 9 +- plugins/parsers/influx/handler.go | 6 +- .../parsers/influx/influx_upstream/README.md | 4 + .../parsers/influx/influx_upstream/parser.go | 299 + .../influx/influx_upstream/parser_test.go | 892 + plugins/parsers/influx/machine.go | 33196 ++-------------- plugins/parsers/influx/machine.go.rl | 20 +- plugins/parsers/influx/machine_test.go | 36 +- plugins/parsers/influx/parser.go | 48 +- plugins/parsers/influx/parser_test.go | 647 +- plugins/parsers/json/README.md | 62 +- plugins/parsers/json/json_flattener.go | 77 + plugins/parsers/json/parser.go | 249 +- plugins/parsers/json/parser_test.go | 668 +- plugins/parsers/json_v2/README.md | 271 + plugins/parsers/json_v2/parser.go | 680 + plugins/parsers/json_v2/parser_test.go | 117 + .../json_v2/testdata/10670/expected.out | 1 + .../parsers/json_v2/testdata/10670/input.json | 10 + .../json_v2/testdata/10670/telegraf.conf | 25 + .../testdata/array_of_objects/expected.out | 2 + .../testdata/array_of_objects/input.json | 14 + .../testdata/array_of_objects/telegraf.conf | 9 + .../testdata/complex_nesting/expected.out | 3 + .../testdata/complex_nesting/input.json | 31 + .../testdata/complex_nesting/telegraf.conf | 9 + .../testdata/fields_and_tags/expected.out | 2 + .../testdata/fields_and_tags/input.json | 46 + .../testdata/fields_and_tags/telegraf.conf | 14 + .../fields_and_tags_complex/expected.out | 5 + .../fields_and_tags_complex/input.json | 87 + .../fields_and_tags_complex/telegraf.conf | 10 + .../testdata/large_numbers/expected.out | 3 + .../json_v2/testdata/large_numbers/input.json | 17 + .../testdata/large_numbers/telegraf.conf | 22 + .../measurement_name_int/expected.out | 1 + .../testdata/measurement_name_int/input.json | 19 + .../measurement_name_int/telegraf.conf | 9 + .../mix_field_and_object/expected.out | 1 + .../testdata/mix_field_and_object/input.json | 44 + .../mix_field_and_object/telegraf.conf | 15 + .../multiple_arrays_in_object/expected.out | 8 + .../multiple_arrays_in_object/input.json | 24 + .../multiple_arrays_in_object/telegraf.conf | 11 + .../testdata/multiple_json_input/expected.out | 2 + .../testdata/multiple_json_input/input_1.json | 87 + .../testdata/multiple_json_input/input_2.json | 134 + .../multiple_json_input/telegraf.conf | 18 + .../testdata/multiple_timestamps/expected.out | 2 + .../testdata/multiple_timestamps/input.json | 12 + .../multiple_timestamps/telegraf.conf | 10 + .../nested_and_nonnested_tags/expected.out | 12 + .../nested_and_nonnested_tags/input.json | 174 + .../nested_and_nonnested_tags/telegraf.conf | 16 + .../nested_array_of_objects/expected.out | 2 + .../nested_array_of_objects/input.json | 36 + .../nested_array_of_objects/telegraf.conf | 15 + .../json_v2/testdata/nested_tags/expected.out | 2 + .../json_v2/testdata/nested_tags/input.json | 16 + .../testdata/nested_tags/telegraf.conf | 12 + .../testdata/nested_tags_complex/expected.out | 3 + .../testdata/nested_tags_complex/input.json | 35 + .../nested_tags_complex/telegraf.conf | 14 + .../json_v2/testdata/null/expected.out | 1 + .../parsers/json_v2/testdata/null/input.json | 40 + .../json_v2/testdata/null/telegraf.conf | 8 + .../json_v2/testdata/object/expected.out | 5 + .../json_v2/testdata/object/input.json | 87 + .../json_v2/testdata/object/telegraf.conf | 12 + .../testdata/object_timestamp/expected.out | 3 + .../testdata/object_timestamp/input.json | 19 + .../testdata/object_timestamp/telegraf.conf | 12 + .../json_v2/testdata/optional/expected.out | 0 .../json_v2/testdata/optional/input.json | 3 + .../json_v2/testdata/optional/telegraf.conf | 15 + .../testdata/optional_objects/expected.out | 3 + .../testdata/optional_objects/input_1.json | 1 + .../testdata/optional_objects/input_2.json | 1 + .../testdata/optional_objects/telegraf.conf | 21 + .../subfieldtag_in_object/expected.out | 1 + .../testdata/subfieldtag_in_object/input.json | 97 + .../subfieldtag_in_object/telegraf.conf | 17 + .../subfieldtag_in_object_2/expected.out | 4 + .../subfieldtag_in_object_2/input.json | 10 + .../subfieldtag_in_object_2/telegraf.conf | 16 + .../json_v2/testdata/timestamp/expected.out | 4 + .../json_v2/testdata/timestamp/input.json | 25 + .../json_v2/testdata/timestamp/telegraf.conf | 11 + .../testdata/timestamp_ns/expected.out | 2 + .../json_v2/testdata/timestamp_ns/input.json | 7 + .../testdata/timestamp_ns/telegraf.conf | 11 + .../testdata/timestamp_rfc3339/expected.out | 1 + .../testdata/timestamp_rfc3339/input.json | 4 + .../testdata/timestamp_rfc3339/telegraf.conf | 8 + .../json_v2/testdata/types/expected.out | 4 + .../parsers/json_v2/testdata/types/input.json | 22 + .../json_v2/testdata/types/telegraf.conf | 105 + .../json_v2/testdata/wrong_path/expected.out | 0 .../json_v2/testdata/wrong_path/input.json | 3 + .../json_v2/testdata/wrong_path/telegraf.conf | 46 + plugins/parsers/logfmt/README.md | 15 +- plugins/parsers/logfmt/parser.go | 63 +- plugins/parsers/logfmt/parser_test.go | 114 +- plugins/parsers/nagios/README.md | 4 +- plugins/parsers/nagios/parser.go | 103 +- plugins/parsers/nagios/parser_test.go | 127 +- plugins/parsers/prometheus/README.md | 22 + plugins/parsers/prometheus/common/helpers.go | 36 + plugins/parsers/prometheus/parser.go | 188 + plugins/parsers/prometheus/parser_test.go | 475 + .../parsers/prometheusremotewrite/README.md | 50 + .../parsers/prometheusremotewrite/parser.go | 84 + .../prometheusremotewrite/parser_test.go | 158 + plugins/parsers/registry.go | 280 +- plugins/parsers/registry_test.go | 85 + plugins/parsers/temporary/json_v2/types.go | 40 + plugins/parsers/temporary/xpath/types.go | 25 + plugins/parsers/value/README.md | 30 +- plugins/parsers/value/parser.go | 55 +- plugins/parsers/value/parser_test.go | 227 +- plugins/parsers/wavefront/README.md | 9 +- plugins/parsers/wavefront/element.go | 52 +- plugins/parsers/wavefront/parser.go | 71 +- plugins/parsers/wavefront/parser_test.go | 257 +- plugins/parsers/wavefront/scanner.go | 34 +- plugins/parsers/wavefront/token.go | 32 +- plugins/parsers/xpath/README.md | 571 + plugins/parsers/xpath/json_document.go | 65 + plugins/parsers/xpath/msgpack_document.go | 39 + plugins/parsers/xpath/parser.go | 602 + plugins/parsers/xpath/parser_test.go | 1406 + .../parsers/xpath/protocolbuffer_document.go | 156 + .../parsers/xpath/testcases/addressbook.conf | 28 + .../parsers/xpath/testcases/addressbook.dat | 17 + .../parsers/xpath/testcases/earthquakes.conf | 44 + .../xpath/testcases/earthquakes.quakeml | 20 + .../xpath/testcases/field_tag_batch.conf | 14 + .../xpath/testcases/field_tag_batch.json | 12 + .../parsers/xpath/testcases/multisensor.xml | 31 + .../testcases/multisensor_explicit_basic.conf | 17 + .../testcases/multisensor_explicit_batch.conf | 28 + .../multisensor_selection_batch.conf | 23 + .../xpath/testcases/openweathermap_5d.json | 127 + .../xpath/testcases/openweathermap_5d.xml | 38 + .../xpath/testcases/openweathermap_json.conf | 29 + .../xpath/testcases/openweathermap_xml.conf | 28 + .../xpath/testcases/protos/addressbook.proto | 28 + .../xpath/testcases/protos/person.proto | 13 + .../xpath/testcases/protos/phonenumber.proto | 15 + plugins/parsers/xpath/testcases/tracker.msg | 1 + .../xpath/testcases/tracker_msgpack.conf | 24 + plugins/parsers/xpath/xml_document.go | 65 + plugins/processors/all/all.go | 3 + plugins/processors/aws/ec2/README.md | 72 + plugins/processors/aws/ec2/ec2.go | 263 + plugins/processors/aws/ec2/ec2_test.go | 47 + plugins/processors/aws/ec2/sample.conf | 47 + plugins/processors/clone/README.md | 12 +- plugins/processors/clone/clone.go | 22 +- plugins/processors/clone/clone_test.go | 39 +- plugins/processors/clone/sample.conf | 10 + plugins/processors/converter/README.md | 21 +- plugins/processors/converter/converter.go | 104 +- .../processors/converter/converter_test.go | 32 + plugins/processors/converter/sample.conf | 28 + plugins/processors/date/README.md | 13 +- plugins/processors/date/date.go | 49 +- plugins/processors/date/date_test.go | 37 +- plugins/processors/date/sample.conf | 24 + plugins/processors/dedup/README.md | 7 +- plugins/processors/dedup/dedup.go | 55 +- plugins/processors/dedup/dedup_test.go | 67 +- plugins/processors/dedup/sample.conf | 4 + plugins/processors/defaults/README.md | 30 +- plugins/processors/defaults/defaults.go | 35 +- plugins/processors/defaults/defaults_test.go | 5 +- plugins/processors/defaults/sample.conf | 15 + plugins/processors/deprecations.go | 6 + plugins/processors/enum/README.md | 34 +- plugins/processors/enum/enum.go | 128 +- plugins/processors/enum/enum_test.go | 109 +- plugins/processors/enum/sample.conf | 23 + plugins/processors/execd/README.md | 120 +- plugins/processors/execd/execd.go | 58 +- plugins/processors/execd/execd_test.go | 92 +- plugins/processors/execd/sample.conf | 15 + plugins/processors/filepath/README.md | 427 +- plugins/processors/filepath/filepath.go | 50 +- plugins/processors/filepath/filepath_test.go | 1 + plugins/processors/filepath/sample.conf | 30 + plugins/processors/ifname/README.md | 7 +- plugins/processors/ifname/ifname.go | 221 +- plugins/processors/ifname/ifname_test.go | 28 +- plugins/processors/ifname/sample.conf | 58 + plugins/processors/ifname/ttl_cache.go | 16 +- plugins/processors/noise/README.md | 88 + plugins/processors/noise/noise.go | 137 + plugins/processors/noise/noise_test.go | 378 + plugins/processors/noise/sample.conf | 21 + plugins/processors/override/README.md | 11 +- plugins/processors/override/override.go | 22 +- plugins/processors/override/override_test.go | 33 +- plugins/processors/override/sample.conf | 10 + plugins/processors/parser/README.md | 18 +- plugins/processors/parser/parser.go | 56 +- plugins/processors/parser/parser_test.go | 403 +- plugins/processors/parser/sample.conf | 17 + plugins/processors/pivot/README.md | 9 +- plugins/processors/pivot/pivot.go | 21 +- plugins/processors/pivot/sample.conf | 6 + plugins/processors/port_name/README.md | 18 +- plugins/processors/port_name/port_name.go | 79 +- .../processors/port_name/port_name_test.go | 2 +- plugins/processors/port_name/sample.conf | 18 + plugins/processors/port_name/services_path.go | 3 +- .../port_name/services_path_notwindows.go | 22 +- plugins/processors/printer/README.md | 6 +- plugins/processors/printer/printer.go | 15 +- plugins/processors/printer/sample.conf | 2 + plugins/processors/regex/README.md | 62 +- plugins/processors/regex/regex.go | 253 +- plugins/processors/regex/regex_test.go | 715 +- plugins/processors/regex/sample.conf | 64 + plugins/processors/rename/README.md | 9 +- plugins/processors/rename/rename.go | 14 +- plugins/processors/rename/rename_test.go | 15 +- plugins/processors/rename/sample.conf | 18 + plugins/processors/reverse_dns/README.md | 9 +- plugins/processors/reverse_dns/rdnscache.go | 17 +- .../processors/reverse_dns/rdnscache_test.go | 13 +- plugins/processors/reverse_dns/reverse_dns.go | 111 + ...reversedns_test.go => reverse_dns_test.go} | 35 +- plugins/processors/reverse_dns/reversedns.go | 156 - plugins/processors/reverse_dns/sample.conf | 46 + plugins/processors/s2geo/README.md | 14 +- plugins/processors/s2geo/s2geo.go | 30 +- plugins/processors/s2geo/s2geo_test.go | 2 +- plugins/processors/s2geo/sample.conf | 12 + plugins/processors/starlark/README.md | 149 +- plugins/processors/starlark/sample.conf | 21 + plugins/processors/starlark/starlark.go | 153 +- plugins/processors/starlark/starlark_test.go | 634 +- .../starlark/testdata/compare_metrics.star | 26 + .../drop_fields_with_unexpected_type.star | 30 + .../starlark/testdata/drop_string_fields.star | 14 + .../processors/starlark/testdata/fail.star | 13 + .../processors/starlark/testdata/iops.star | 55 + .../starlark/testdata/json_nested.star | 46 + .../processors/starlark/testdata/logging.star | 19 + .../processors/starlark/testdata/math.star | 14 + .../starlark/testdata/multiple_metrics.star | 26 + .../testdata/multiple_metrics_with_json.star | 27 + .../processors/starlark/testdata/pivot.star | 4 +- .../rename_prometheus_remote_write.star | 16 + .../starlark/testdata/schema_sizing.star | 96 + .../starlark/testdata/sparkplug.star | 320 + .../starlark/testdata/time_date.star | 19 + .../starlark/testdata/time_duration.star | 17 + .../starlark/testdata/time_set_timestamp.star | 15 + .../starlark/testdata/time_timestamp.star | 22 + .../testdata/time_timestamp_nanos.star | 22 + .../starlark/testdata/value_filter.star | 6 +- plugins/processors/streamingprocessor.go | 4 - plugins/processors/strings/README.md | 78 +- plugins/processors/strings/sample.conf | 59 + plugins/processors/strings/strings.go | 89 +- plugins/processors/strings/strings_test.go | 149 +- plugins/processors/tag_limit/README.md | 7 +- plugins/processors/tag_limit/sample.conf | 7 + plugins/processors/tag_limit/tag_limit.go | 39 +- .../processors/tag_limit/tag_limit_test.go | 19 +- plugins/processors/template/README.md | 64 +- plugins/processors/template/sample.conf | 9 + plugins/processors/template/template.go | 22 +- .../processors/template/template_metric.go | 13 + plugins/processors/template/template_test.go | 51 +- plugins/processors/topk/README.md | 53 +- plugins/processors/topk/sample.conf | 55 + plugins/processors/topk/test_sets.go | 25 +- plugins/processors/topk/topk.go | 118 +- plugins/processors/topk/topk_test.go | 94 +- plugins/processors/unpivot/README.md | 14 +- plugins/processors/unpivot/sample.conf | 6 + plugins/processors/unpivot/unpivot.go | 27 +- plugins/serializers/EXAMPLE_README.md | 11 +- plugins/serializers/carbon2/README.md | 24 +- plugins/serializers/carbon2/carbon2.go | 68 +- plugins/serializers/carbon2/carbon2_test.go | 191 +- plugins/serializers/csv/README.md | 55 + plugins/serializers/csv/csv.go | 176 + plugins/serializers/csv/csv_test.go | 181 + plugins/serializers/csv/testcases/basic.conf | 8 + plugins/serializers/csv/testcases/basic.csv | 2 + plugins/serializers/csv/testcases/header.conf | 10 + plugins/serializers/csv/testcases/header.csv | 3 + .../csv/testcases/nanoseconds.conf | 10 + .../serializers/csv/testcases/nanoseconds.csv | 2 + plugins/serializers/csv/testcases/prefix.conf | 11 + plugins/serializers/csv/testcases/prefix.csv | 3 + .../serializers/csv/testcases/rfc3339.conf | 11 + plugins/serializers/csv/testcases/rfc3339.csv | 3 + .../serializers/csv/testcases/semicolon.conf | 11 + .../serializers/csv/testcases/semicolon.csv | 3 + plugins/serializers/graphite/README.md | 19 +- plugins/serializers/graphite/graphite.go | 71 +- plugins/serializers/graphite/graphite_test.go | 369 +- plugins/serializers/influx/README.md | 5 +- plugins/serializers/influx/escape.go | 9 +- plugins/serializers/influx/influx.go | 26 +- plugins/serializers/influx/influx_test.go | 659 +- plugins/serializers/influx/reader.go | 2 +- plugins/serializers/influx/reader_test.go | 273 +- plugins/serializers/json/README.md | 13 +- plugins/serializers/json/json.go | 27 +- plugins/serializers/json/json_test.go | 116 +- plugins/serializers/msgpack/README.md | 43 + plugins/serializers/msgpack/metric.go | 104 + plugins/serializers/msgpack/metric_gen.go | 417 + .../serializers/msgpack/metric_gen_test.go | 236 + plugins/serializers/msgpack/metric_test.go | 147 + plugins/serializers/msgpack/msgpack.go | 43 + plugins/serializers/msgpack/msgpack_test.go | 132 + plugins/serializers/nowmetric/README.md | 13 +- plugins/serializers/nowmetric/nowmetric.go | 19 +- .../serializers/nowmetric/nowmetric_test.go | 95 +- plugins/serializers/prometheus/README.md | 19 +- plugins/serializers/prometheus/collection.go | 27 +- .../serializers/prometheus/collection_test.go | 419 +- plugins/serializers/prometheus/convert.go | 9 +- .../prometheusremotewrite/README.md | 44 + .../prometheusremotewrite.go | 343 + .../prometheusremotewrite_test.go | 700 + plugins/serializers/registry.go | 100 +- plugins/serializers/splunkmetric/README.md | 10 +- .../serializers/splunkmetric/splunkmetric.go | 31 +- .../splunkmetric/splunkmetric_test.go | 207 +- plugins/serializers/wavefront/README.md | 14 +- plugins/serializers/wavefront/wavefront.go | 34 +- .../serializers/wavefront/wavefront_test.go | 56 +- scripts/alpine.docker | 18 - scripts/buster.docker | 15 - scripts/check-deps.sh | 6 +- scripts/check-dynamic-glibc-versions.sh | 6 +- scripts/check-file-changes.sh | 12 + scripts/ci-1.15.docker | 23 - scripts/{ci-1.14.docker => ci.docker} | 2 +- scripts/docker-entrypoint.sh | 8 - scripts/generate_config.sh | 27 + scripts/generate_versioninfo/main.go | 46 + scripts/install_gotestsum.sh | 46 + scripts/installgo_linux.sh | 35 + scripts/installgo_mac.sh | 46 + scripts/installgo_windows.sh | 25 + scripts/local_circleci.sh | 6 + scripts/mac-signing.sh | 102 + scripts/release.sh | 179 - scripts/rpm/post-remove.sh | 7 + scripts/stretch.docker | 15 - scripts/telegraf.service | 4 +- scripts/telegraf_entry_mac | 13 + scripts/windows-signing.ps1 | 29 + selfstat/selfstat.go | 12 +- selfstat/stat.go | 1 - selfstat/timingStat.go | 1 - testutil/accumulator.go | 29 +- testutil/capturelog.go | 60 + testutil/container.go | 143 + testutil/container_test.go | 73 + testutil/file.go | 84 + testutil/metric.go | 22 +- testutil/metric_test.go | 4 +- testutil/pki/cacert.pem | 26 +- testutil/pki/cakey.pem | 40 +- testutil/pki/client.pem | 45 + testutil/pki/clientcert.pem | 27 +- testutil/pki/clientenc.pem | 48 + testutil/pki/clientenckey.pem | 30 + testutil/pki/clientkey.pem | 38 +- testutil/pki/server.pem | 45 + testutil/pki/servercert.pem | 27 +- testutil/pki/serverkey.pem | 38 +- testutil/pki/tls-certs.sh | 29 +- testutil/socket.go | 32 + testutil/testutil.go | 9 +- testutil/testutil_test.go | 2 - testutil/tls.go | 36 +- tools/package_lxd_test/README.md | 24 + tools/package_lxd_test/container.go | 278 + tools/package_lxd_test/lxd.go | 195 + tools/package_lxd_test/main.go | 117 + tools/readme_config_includer/generator.go | 170 + tools/readme_linter/README.md | 29 + tools/readme_linter/assert.go | 146 + tools/readme_linter/main.go | 122 + tools/readme_linter/plugin.go | 33 + tools/readme_linter/rules.go | 210 + tools/readme_linter/set.go | 32 + 2206 files changed, 185411 insertions(+), 79502 deletions(-) create mode 100644 .github/ISSUE_TEMPLATE/BUG_REPORT.yml delete mode 100644 .github/ISSUE_TEMPLATE/Bug_report.md create mode 100644 .github/dependabot.yml create mode 100644 .github/workflows/golangci-lint.yml create mode 100644 .github/workflows/linter.yml create mode 100644 .github/workflows/readme-linter.yml create mode 100644 .github/workflows/semantic.yml create mode 100644 .golangci.yml create mode 100644 .markdownlint.yml create mode 100644 SECURITY.md delete mode 100644 appveyor.yml create mode 100644 assets/GopherAndTiger.png create mode 100644 assets/TelegrafTiger.png create mode 100644 assets/windows/icon.icns create mode 100644 assets/windows/tiger.ico create mode 120000 cmd/telegraf/README.md create mode 120000 config/README.md create mode 100644 config/deprecation.go create mode 100644 config/printer/agent.conf create mode 100644 config/printer/printer.go create mode 100644 config/testdata/addressbook.proto create mode 100644 config/testdata/azure_monitor.toml create mode 100644 config/testdata/parsers_new.toml create mode 100644 config/testdata/parsers_old.toml create mode 100644 config/testdata/special_types.key create mode 100644 config/testdata/special_types.pem create mode 100644 config/testdata/wrong_cert_path.toml delete mode 100644 docker-compose.yml create mode 100644 docs/COMMANDS_AND_FLAGS.md create mode 100644 docs/DOCKER.md create mode 100644 docs/INTEGRATION_TESTS.md create mode 100644 docs/NIGHTLIES.md create mode 100644 docs/SQL_DRIVERS_INPUT.md create mode 100644 docs/SUPPORTED_PLATFORMS.md create mode 100644 docs/developers/CODE_STYLE.md create mode 100644 docs/developers/DEPRECATION.md create mode 100644 docs/developers/LOGGING.md create mode 100644 docs/developers/METRIC_FORMAT_CHANGES.md create mode 100644 docs/developers/PACKAGING.md create mode 100644 docs/developers/PROFILING.md create mode 120000 docs/developers/README.md create mode 100644 docs/developers/REVIEWS.md create mode 100644 docs/developers/SAMPLE_CONFIG.md create mode 100644 docs/maintainers/CHANGELOG.md create mode 100644 docs/maintainers/LABELS.md create mode 100644 docs/maintainers/PULL_REQUESTS.md create mode 100644 docs/maintainers/RELEASES.md create mode 100644 info.plist create mode 100644 internal/globpath/testdata/log[!.log create mode 100644 internal/snmp/testdata/loadMibsFromPath/linkTarget/emptyFile create mode 100644 internal/snmp/testdata/loadMibsFromPath/root/dirOne/dirTwo/empty create mode 120000 internal/snmp/testdata/loadMibsFromPath/root/symlink create mode 100644 internal/snmp/testdata/mibs/testmib create mode 100644 internal/snmp/translate.go create mode 100644 internal/snmp/translate_test.go create mode 100644 internal/snmp/translator.go create mode 100644 internal/type_conversions.go create mode 100644 metric/series_grouper_test.go create mode 100644 models/running_parsers.go create mode 100644 parser.go create mode 100644 plugins/aggregators/basicstats/sample.conf create mode 100644 plugins/aggregators/deprecations.go create mode 100644 plugins/aggregators/derivative/README.md create mode 100644 plugins/aggregators/derivative/derivative.go create mode 100644 plugins/aggregators/derivative/derivative_test.go create mode 100644 plugins/aggregators/derivative/sample.conf create mode 100644 plugins/aggregators/final/sample.conf create mode 100644 plugins/aggregators/histogram/sample.conf create mode 100644 plugins/aggregators/merge/sample.conf create mode 100644 plugins/aggregators/minmax/sample.conf create mode 100644 plugins/aggregators/quantile/README.md create mode 100644 plugins/aggregators/quantile/algorithms.go create mode 100644 plugins/aggregators/quantile/quantile.go create mode 100644 plugins/aggregators/quantile/quantile_test.go create mode 100644 plugins/aggregators/quantile/sample.conf create mode 100644 plugins/aggregators/starlark/README.md create mode 100644 plugins/aggregators/starlark/sample.conf create mode 100644 plugins/aggregators/starlark/starlark.go create mode 100644 plugins/aggregators/starlark/starlark_test.go create mode 100644 plugins/aggregators/starlark/testdata/merge.star create mode 100644 plugins/aggregators/starlark/testdata/min_max.star create mode 100644 plugins/aggregators/valuecounter/sample.conf create mode 100644 plugins/common/auth/basic_auth.go create mode 100644 plugins/common/auth/basic_auth_test.go create mode 100644 plugins/common/cookie/cookie.go create mode 100644 plugins/common/cookie/cookie_test.go create mode 100644 plugins/common/http/config.go create mode 100644 plugins/common/kafka/config.go create mode 100644 plugins/common/oauth/config.go rename plugins/{processors/reverse_dns => common}/parallel/ordered.go (100%) rename plugins/{processors/reverse_dns => common}/parallel/parallel.go (100%) rename plugins/{processors/reverse_dns => common}/parallel/parallel_test.go (89%) rename plugins/{processors/reverse_dns => common}/parallel/unordered.go (100%) create mode 100644 plugins/common/proxy/connect.go create mode 100644 plugins/common/proxy/dialer.go create mode 100644 plugins/common/proxy/proxy.go create mode 100644 plugins/common/proxy/socks5.go create mode 100644 plugins/common/proxy/socks5_test.go rename plugins/{processors => common}/starlark/builtins.go (75%) rename plugins/{processors => common}/starlark/field_dict.go (64%) create mode 100644 plugins/common/starlark/logging.go rename plugins/{processors => common}/starlark/metric.go (73%) create mode 100644 plugins/common/starlark/starlark.go rename plugins/{processors => common}/starlark/tag_dict.go (70%) create mode 100644 plugins/inputs/activemq/sample.conf create mode 100644 plugins/inputs/aerospike/sample.conf create mode 100644 plugins/inputs/aliyuncms/README.md create mode 100644 plugins/inputs/aliyuncms/aliyuncms.go create mode 100644 plugins/inputs/aliyuncms/aliyuncms_test.go create mode 100644 plugins/inputs/aliyuncms/discovery.go create mode 100644 plugins/inputs/aliyuncms/sample.conf create mode 100644 plugins/inputs/amd_rocm_smi/README.md create mode 100644 plugins/inputs/amd_rocm_smi/amd_rocm_smi.go create mode 100644 plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go create mode 100644 plugins/inputs/amd_rocm_smi/sample.conf create mode 100644 plugins/inputs/amd_rocm_smi/testdata/vega-10-XT.json create mode 100644 plugins/inputs/amd_rocm_smi/testdata/vega-20-WKS-GL-XE.json create mode 100644 plugins/inputs/amqp_consumer/sample.conf create mode 100644 plugins/inputs/apache/sample.conf create mode 100644 plugins/inputs/apcupsd/sample.conf create mode 100644 plugins/inputs/aurora/sample.conf create mode 100644 plugins/inputs/azure_storage_queue/sample.conf create mode 100644 plugins/inputs/bcache/bcache_windows.go create mode 100644 plugins/inputs/bcache/sample.conf create mode 100644 plugins/inputs/beanstalkd/sample.conf create mode 100644 plugins/inputs/beat/README.md create mode 100644 plugins/inputs/beat/beat.go create mode 100644 plugins/inputs/beat/beat6_info.json create mode 100644 plugins/inputs/beat/beat6_stats.json create mode 100644 plugins/inputs/beat/beat_test.go create mode 100644 plugins/inputs/beat/sample.conf create mode 100644 plugins/inputs/bind/sample.conf create mode 100644 plugins/inputs/bond/sample.conf create mode 100644 plugins/inputs/burrow/sample.conf create mode 100644 plugins/inputs/cassandra/sample.conf create mode 100644 plugins/inputs/ceph/sample.conf create mode 100644 plugins/inputs/cgroup/sample.conf create mode 100644 plugins/inputs/chrony/sample.conf create mode 100644 plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go create mode 100644 plugins/inputs/cisco_telemetry_mdt/sample.conf create mode 100644 plugins/inputs/clickhouse/dev/init_schema.sql create mode 100644 plugins/inputs/clickhouse/dev/mysql_port.xml create mode 100644 plugins/inputs/clickhouse/sample.conf rename plugins/inputs/cloud_pubsub/{pubsub.go => cloud_pubsub.go} (63%) rename plugins/inputs/cloud_pubsub/{pubsub_test.go => cloud_pubsub_test.go} (84%) create mode 100644 plugins/inputs/cloud_pubsub/sample.conf rename plugins/inputs/cloud_pubsub_push/{pubsub_push.go => cloud_pubsub_push.go} (63%) rename plugins/inputs/cloud_pubsub_push/{pubsub_push_test.go => cloud_pubsub_push_test.go} (92%) create mode 100644 plugins/inputs/cloud_pubsub_push/sample.conf create mode 100644 plugins/inputs/cloudwatch/sample.conf create mode 100644 plugins/inputs/cloudwatch_metric_streams/README.md create mode 100644 plugins/inputs/cloudwatch_metric_streams/cloudwatch_metric_streams.go create mode 100644 plugins/inputs/cloudwatch_metric_streams/cloudwatch_metric_streams_test.go create mode 100644 plugins/inputs/cloudwatch_metric_streams/sample.conf create mode 100644 plugins/inputs/cloudwatch_metric_streams/testdata/records.gz create mode 100644 plugins/inputs/conntrack/sample.conf create mode 100644 plugins/inputs/consul/sample.conf create mode 100644 plugins/inputs/consul_agent/README.md create mode 100644 plugins/inputs/consul_agent/consul_agent.go create mode 100644 plugins/inputs/consul_agent/consul_agent_test.go create mode 100644 plugins/inputs/consul_agent/consul_structs.go create mode 100644 plugins/inputs/consul_agent/sample.conf create mode 100644 plugins/inputs/consul_agent/testdata/response_key_metrics.json create mode 100644 plugins/inputs/couchbase/couchbase_data.go create mode 100644 plugins/inputs/couchbase/sample.conf create mode 100644 plugins/inputs/couchdb/sample.conf create mode 100644 plugins/inputs/cpu/sample.conf create mode 100644 plugins/inputs/csgo/README.md create mode 100644 plugins/inputs/csgo/csgo.go create mode 100644 plugins/inputs/csgo/csgo_test.go create mode 100644 plugins/inputs/csgo/sample.conf create mode 100644 plugins/inputs/dcos/sample.conf create mode 100644 plugins/inputs/deprecations.go create mode 100644 plugins/inputs/directory_monitor/README.md create mode 100644 plugins/inputs/directory_monitor/directory_monitor.go create mode 100644 plugins/inputs/directory_monitor/directory_monitor_test.go create mode 100644 plugins/inputs/directory_monitor/sample.conf create mode 100644 plugins/inputs/disk/sample.conf create mode 100644 plugins/inputs/disk/testdata/issue_10297/1/mountinfo create mode 100644 plugins/inputs/disk/testdata/success/1/mountinfo create mode 100644 plugins/inputs/diskio/sample.conf create mode 100644 plugins/inputs/disque/sample.conf create mode 100644 plugins/inputs/dmcache/sample.conf create mode 100644 plugins/inputs/dns_query/sample.conf create mode 100644 plugins/inputs/docker/sample.conf create mode 100644 plugins/inputs/docker_log/sample.conf create mode 100644 plugins/inputs/dovecot/sample.conf create mode 100644 plugins/inputs/dpdk/README.md create mode 100644 plugins/inputs/dpdk/dpdk.go create mode 100644 plugins/inputs/dpdk/dpdk_connector.go create mode 100644 plugins/inputs/dpdk/dpdk_connector_test.go create mode 100644 plugins/inputs/dpdk/dpdk_notlinux.go create mode 100644 plugins/inputs/dpdk/dpdk_test.go create mode 100644 plugins/inputs/dpdk/dpdk_utils.go create mode 100644 plugins/inputs/dpdk/dpdk_utils_test.go create mode 100644 plugins/inputs/dpdk/mocks/conn.go create mode 100644 plugins/inputs/dpdk/sample.conf create mode 100644 plugins/inputs/ecs/sample.conf create mode 100644 plugins/inputs/elasticsearch/sample.conf create mode 100755 plugins/inputs/elasticsearch_query/README.md create mode 100644 plugins/inputs/elasticsearch_query/aggregation_parser.go create mode 100644 plugins/inputs/elasticsearch_query/aggregation_query.go create mode 100644 plugins/inputs/elasticsearch_query/elasticsearch_query.go create mode 100644 plugins/inputs/elasticsearch_query/elasticsearch_query_test.go create mode 100644 plugins/inputs/elasticsearch_query/sample.conf create mode 100644 plugins/inputs/elasticsearch_query/testdata/nginx_logs create mode 100644 plugins/inputs/ethtool/sample.conf create mode 100644 plugins/inputs/eventhub_consumer/sample.conf rename plugins/inputs/{EXAMPLE_README.md => example/README.md} (73%) create mode 100644 plugins/inputs/example/example.go create mode 100644 plugins/inputs/example/example_test.go create mode 100644 plugins/inputs/example/sample.conf create mode 100644 plugins/inputs/exec/sample.conf create mode 100644 plugins/inputs/execd/sample.conf create mode 100644 plugins/inputs/fail2ban/sample.conf create mode 100644 plugins/inputs/fibaro/sample.conf create mode 100644 plugins/inputs/file/sample.conf create mode 100644 plugins/inputs/filecount/sample.conf create mode 100644 plugins/inputs/filestat/sample.conf create mode 100644 plugins/inputs/fireboard/sample.conf create mode 100644 plugins/inputs/fluentd/sample.conf create mode 100644 plugins/inputs/github/sample.conf create mode 100644 plugins/inputs/gnmi/sample.conf create mode 100644 plugins/inputs/graylog/sample.conf create mode 100644 plugins/inputs/haproxy/sample.conf create mode 100644 plugins/inputs/hddtemp/sample.conf create mode 100644 plugins/inputs/http/sample.conf create mode 100644 plugins/inputs/http_listener_v2/sample.conf create mode 100644 plugins/inputs/http_response/sample.conf create mode 100644 plugins/inputs/httpjson/sample.conf create mode 100644 plugins/inputs/hugepages/README.md create mode 100644 plugins/inputs/hugepages/hugepages.go create mode 100644 plugins/inputs/hugepages/hugepages_notlinux.go create mode 100644 plugins/inputs/hugepages/hugepages_test.go create mode 100644 plugins/inputs/hugepages/sample.conf create mode 100644 plugins/inputs/hugepages/testdata/invalid/1/anode3/dir_lock create mode 100644 plugins/inputs/hugepages/testdata/invalid/1/node0/hugepages/hugepages-1048576kB/free_hugepages/dir_lock create mode 100644 plugins/inputs/hugepages/testdata/invalid/1/node0/hugepages/hugepages-1048576kB/nry_hugepages create mode 100644 plugins/inputs/hugepages/testdata/invalid/1/node0/hugepages/hugepages-2048kB create mode 100644 plugins/inputs/hugepages/testdata/invalid/1/node0/hugepages/hugepages-aaaa1048576kB/free_hugepages create mode 100644 plugins/inputs/hugepages/testdata/invalid/1/node0/hugepages/hugepages1048576kB/free_hugepages create mode 100644 plugins/inputs/hugepages/testdata/invalid/1/node1 create mode 100644 plugins/inputs/hugepages/testdata/invalid/1/node4b/dir_lock create mode 100644 plugins/inputs/hugepages/testdata/invalid/2/node1/hugepages/hugepages-1048576kB/nr_hugepages create mode 100644 plugins/inputs/hugepages/testdata/invalid/meminfo create mode 100644 plugins/inputs/hugepages/testdata/valid/meminfo create mode 100644 plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/free_hugepages create mode 100644 plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/nr_hugepages create mode 100644 plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/nr_hugepages_mempolicy create mode 100644 plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/nr_overcommit_hugepages create mode 100644 plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/resv_hugepages create mode 100644 plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/surplus_hugepages create mode 100644 plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/free_hugepages create mode 100644 plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/nr_hugepages create mode 100644 plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/nr_hugepages_mempolicy create mode 100644 plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/nr_overcommit_hugepages create mode 100644 plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/resv_hugepages create mode 100644 plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/surplus_hugepages create mode 100644 plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-1048576kB/free_hugepages create mode 100644 plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-1048576kB/nr_hugepages create mode 100644 plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-1048576kB/surplus_hugepages create mode 100644 plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-2048kB/free_hugepages create mode 100644 plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-2048kB/nr_hugepages create mode 100644 plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-2048kB/surplus_hugepages create mode 100644 plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-1048576kB/free_hugepages create mode 100644 plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-1048576kB/nr_hugepages create mode 100644 plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-1048576kB/surplus_hugepages create mode 100644 plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-2048kB/free_hugepages create mode 100644 plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-2048kB/nr_hugepages create mode 100644 plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-2048kB/surplus_hugepages create mode 100644 plugins/inputs/icinga2/sample.conf create mode 100644 plugins/inputs/infiniband/sample.conf create mode 100644 plugins/inputs/influxdb/sample.conf create mode 100644 plugins/inputs/influxdb_listener/sample.conf create mode 100644 plugins/inputs/influxdb_v2_listener/sample.conf create mode 100644 plugins/inputs/intel_pmu/README.md create mode 100644 plugins/inputs/intel_pmu/activators.go create mode 100644 plugins/inputs/intel_pmu/activators_test.go create mode 100644 plugins/inputs/intel_pmu/config.go create mode 100644 plugins/inputs/intel_pmu/config_test.go create mode 100644 plugins/inputs/intel_pmu/intel_pmu.go create mode 100644 plugins/inputs/intel_pmu/intel_pmu_notamd64linux.go create mode 100644 plugins/inputs/intel_pmu/intel_pmu_test.go create mode 100644 plugins/inputs/intel_pmu/mocks.go create mode 100644 plugins/inputs/intel_pmu/reader.go create mode 100644 plugins/inputs/intel_pmu/reader_test.go create mode 100644 plugins/inputs/intel_pmu/resolver.go create mode 100644 plugins/inputs/intel_pmu/resolver_test.go create mode 100644 plugins/inputs/intel_pmu/sample.conf create mode 100644 plugins/inputs/intel_powerstat/README.md create mode 100644 plugins/inputs/intel_powerstat/dto.go create mode 100644 plugins/inputs/intel_powerstat/file.go create mode 100644 plugins/inputs/intel_powerstat/file_mock_test.go create mode 100644 plugins/inputs/intel_powerstat/intel_powerstat.go create mode 100644 plugins/inputs/intel_powerstat/intel_powerstat_notlinux.go create mode 100644 plugins/inputs/intel_powerstat/intel_powerstat_test.go create mode 100644 plugins/inputs/intel_powerstat/msr.go create mode 100644 plugins/inputs/intel_powerstat/msr_mock_test.go create mode 100644 plugins/inputs/intel_powerstat/msr_test.go create mode 100644 plugins/inputs/intel_powerstat/rapl.go create mode 100644 plugins/inputs/intel_powerstat/rapl_mock_test.go create mode 100644 plugins/inputs/intel_powerstat/rapl_test.go create mode 100644 plugins/inputs/intel_powerstat/sample.conf create mode 100644 plugins/inputs/intel_powerstat/unit_converter.go create mode 100644 plugins/inputs/intel_rdt/sample.conf create mode 100644 plugins/inputs/internal/sample.conf create mode 100644 plugins/inputs/internet_speed/README.md create mode 100644 plugins/inputs/internet_speed/internet_speed.go create mode 100644 plugins/inputs/internet_speed/internet_speed_test.go create mode 100644 plugins/inputs/internet_speed/sample.conf create mode 100644 plugins/inputs/interrupts/sample.conf rename plugins/inputs/ipmi_sensor/{ipmi.go => ipmi_sensor.go} (60%) rename plugins/inputs/ipmi_sensor/{ipmi_test.go => ipmi_sensor_test.go} (84%) create mode 100644 plugins/inputs/ipmi_sensor/sample.conf create mode 100644 plugins/inputs/ipset/sample.conf create mode 100644 plugins/inputs/iptables/sample.conf create mode 100644 plugins/inputs/ipvs/sample.conf create mode 100644 plugins/inputs/jenkins/sample.conf create mode 100644 plugins/inputs/jolokia/sample.conf rename plugins/inputs/jolokia2/{ => common}/client.go (80%) rename plugins/inputs/jolokia2/{ => common}/gatherer.go (93%) rename plugins/inputs/jolokia2/{ => common}/gatherer_test.go (90%) rename plugins/inputs/jolokia2/{ => common}/metric.go (99%) rename plugins/inputs/jolokia2/{ => common}/point_builder.go (87%) create mode 100644 plugins/inputs/jolokia2/examples/kafka-connect.conf delete mode 100644 plugins/inputs/jolokia2/jolokia.go create mode 100644 plugins/inputs/jolokia2/jolokia2.go create mode 100644 plugins/inputs/jolokia2/jolokia2_agent/README.md create mode 100644 plugins/inputs/jolokia2/jolokia2_agent/jolokia2_agent.go create mode 100644 plugins/inputs/jolokia2/jolokia2_agent/sample.conf create mode 100644 plugins/inputs/jolokia2/jolokia2_proxy/README.md create mode 100644 plugins/inputs/jolokia2/jolokia2_proxy/jolokia_proxy.go create mode 100644 plugins/inputs/jolokia2/jolokia2_proxy/sample.conf rename plugins/inputs/jolokia2/{jolokia_test.go => jolokia2_test.go} (86%) delete mode 100644 plugins/inputs/jolokia2/jolokia_agent.go delete mode 100644 plugins/inputs/jolokia2/jolokia_proxy.go create mode 100644 plugins/inputs/jti_openconfig_telemetry/auth/authentication_service_grpc.pb.go create mode 100644 plugins/inputs/jti_openconfig_telemetry/gen.go rename plugins/inputs/jti_openconfig_telemetry/{openconfig_telemetry.go => jti_openconfig_telemetry.go} (73%) rename plugins/inputs/jti_openconfig_telemetry/{openconfig_telemetry_test.go => jti_openconfig_telemetry_test.go} (75%) create mode 100644 plugins/inputs/jti_openconfig_telemetry/oc/oc_grpc.pb.go create mode 100644 plugins/inputs/jti_openconfig_telemetry/sample.conf create mode 100644 plugins/inputs/kafka_consumer/sample.conf create mode 100644 plugins/inputs/kafka_consumer_legacy/sample.conf create mode 100644 plugins/inputs/kapacitor/sample.conf create mode 100644 plugins/inputs/kernel/sample.conf create mode 100644 plugins/inputs/kernel_vmstat/sample.conf create mode 100644 plugins/inputs/kibana/sample.conf create mode 100644 plugins/inputs/kibana/test_environment/basic_kibana_telegraf.conf create mode 100644 plugins/inputs/kibana/test_environment/docker-compose.yml create mode 100755 plugins/inputs/kibana/test_environment/run_test_env.sh create mode 100644 plugins/inputs/kinesis_consumer/kinesis_consumer_test.go create mode 100644 plugins/inputs/kinesis_consumer/sample.conf create mode 100644 plugins/inputs/knx_listener/README.md create mode 100644 plugins/inputs/knx_listener/knx_dummy_interface.go create mode 100644 plugins/inputs/knx_listener/knx_listener.go create mode 100644 plugins/inputs/knx_listener/knx_listener_test.go create mode 100644 plugins/inputs/knx_listener/sample.conf rename plugins/inputs/kube_inventory/{kube_state.go => kube_inventory.go} (55%) create mode 100644 plugins/inputs/kube_inventory/sample.conf create mode 100644 plugins/inputs/kubernetes/sample.conf create mode 100644 plugins/inputs/lanz/sample.conf create mode 100644 plugins/inputs/leofs/sample.conf create mode 100644 plugins/inputs/linux_sysctl_fs/sample.conf create mode 100644 plugins/inputs/logparser/sample.conf create mode 100644 plugins/inputs/logstash/sample.conf create mode 100644 plugins/inputs/logstash/samples_logstash7.go create mode 100644 plugins/inputs/lustre2/lustre2_windows.go create mode 100644 plugins/inputs/lustre2/sample.conf create mode 100644 plugins/inputs/lvm/README.md create mode 100644 plugins/inputs/lvm/lvm.go create mode 100644 plugins/inputs/lvm/lvm_test.go create mode 100644 plugins/inputs/lvm/sample.conf create mode 100644 plugins/inputs/mailchimp/sample.conf create mode 100644 plugins/inputs/marklogic/sample.conf create mode 100644 plugins/inputs/mcrouter/sample.conf create mode 100644 plugins/inputs/mdstat/README.md create mode 100644 plugins/inputs/mdstat/mdstat.go create mode 100644 plugins/inputs/mdstat/mdstat_notlinux.go create mode 100644 plugins/inputs/mdstat/mdstat_test.go create mode 100644 plugins/inputs/mdstat/sample.conf rename plugins/inputs/mem/{memory.go => mem.go} (74%) rename plugins/inputs/mem/{memory_test.go => mem_test.go} (92%) create mode 100644 plugins/inputs/mem/sample.conf create mode 100644 plugins/inputs/memcached/sample.conf create mode 100644 plugins/inputs/mesos/sample.conf create mode 100644 plugins/inputs/minecraft/sample.conf create mode 100644 plugins/inputs/mock/README.md create mode 100644 plugins/inputs/mock/mock.go create mode 100644 plugins/inputs/mock/mock_test.go create mode 100644 plugins/inputs/mock/sample.conf delete mode 100644 plugins/inputs/mock_Plugin.go create mode 100644 plugins/inputs/modbus/configuration.go create mode 100644 plugins/inputs/modbus/configuration_register.go create mode 100644 plugins/inputs/modbus/configuration_request.go create mode 100644 plugins/inputs/modbus/request.go create mode 100644 plugins/inputs/modbus/sample.conf create mode 100644 plugins/inputs/modbus/sample_general_begin.conf create mode 100644 plugins/inputs/modbus/sample_general_end.conf create mode 100644 plugins/inputs/modbus/sample_register.conf create mode 100644 plugins/inputs/modbus/sample_request.conf create mode 100644 plugins/inputs/modbus/type_conversions.go create mode 100644 plugins/inputs/modbus/type_conversions16.go create mode 100644 plugins/inputs/modbus/type_conversions32.go create mode 100644 plugins/inputs/modbus/type_conversions64.go create mode 100644 plugins/inputs/mongodb/sample.conf create mode 100644 plugins/inputs/monit/sample.conf create mode 100644 plugins/inputs/mqtt_consumer/sample.conf create mode 100644 plugins/inputs/multifile/sample.conf create mode 100644 plugins/inputs/mysql/sample.conf create mode 100644 plugins/inputs/nats/sample.conf create mode 100644 plugins/inputs/nats_consumer/sample.conf create mode 100644 plugins/inputs/neptune_apex/sample.conf rename plugins/inputs/net/{NET_README.md => README.md} (78%) create mode 100644 plugins/inputs/net/sample.conf create mode 100644 plugins/inputs/net_response/sample.conf rename plugins/inputs/{net/NETSTAT_README.md => netstat/README.md} (77%) rename plugins/inputs/{net => netstat}/netstat.go (73%) create mode 100644 plugins/inputs/netstat/sample.conf create mode 100644 plugins/inputs/nfsclient/README.md create mode 100644 plugins/inputs/nfsclient/nfsclient.go create mode 100644 plugins/inputs/nfsclient/nfsclient_test.go create mode 100644 plugins/inputs/nfsclient/sample.conf create mode 100644 plugins/inputs/nfsclient/testdata/mountstats create mode 100644 plugins/inputs/nginx/sample.conf create mode 100644 plugins/inputs/nginx_plus/sample.conf create mode 100644 plugins/inputs/nginx_plus_api/sample.conf create mode 100644 plugins/inputs/nginx_sts/sample.conf create mode 100644 plugins/inputs/nginx_upstream_check/sample.conf create mode 100644 plugins/inputs/nginx_vts/sample.conf create mode 100644 plugins/inputs/nomad/README.md create mode 100644 plugins/inputs/nomad/nomad.go create mode 100644 plugins/inputs/nomad/nomad_metrics.go create mode 100644 plugins/inputs/nomad/nomad_test.go create mode 100644 plugins/inputs/nomad/sample.conf create mode 100644 plugins/inputs/nomad/testdata/response_key_metrics.json create mode 100644 plugins/inputs/nsd/sample.conf create mode 100644 plugins/inputs/nsq/sample.conf create mode 100644 plugins/inputs/nsq_consumer/sample.conf create mode 100644 plugins/inputs/nstat/sample.conf create mode 100644 plugins/inputs/ntpq/sample.conf create mode 100644 plugins/inputs/nvidia_smi/sample.conf create mode 100644 plugins/inputs/opcua/opcua.go delete mode 100644 plugins/inputs/opcua/opcua_client.go delete mode 100644 plugins/inputs/opcua/opcua_client_test.go create mode 100644 plugins/inputs/opcua/opcua_test.go create mode 100644 plugins/inputs/opcua/sample.conf create mode 100644 plugins/inputs/openldap/sample.conf create mode 100644 plugins/inputs/openntpd/sample.conf create mode 100644 plugins/inputs/opensmtpd/sample.conf create mode 100644 plugins/inputs/openstack/README.md create mode 100644 plugins/inputs/openstack/openstack.go create mode 100644 plugins/inputs/openstack/sample.conf create mode 100644 plugins/inputs/opentelemetry/README.md create mode 100644 plugins/inputs/opentelemetry/grpc_services.go create mode 100644 plugins/inputs/opentelemetry/logger.go create mode 100644 plugins/inputs/opentelemetry/opentelemetry.go create mode 100644 plugins/inputs/opentelemetry/opentelemetry_test.go create mode 100644 plugins/inputs/opentelemetry/sample.conf create mode 100644 plugins/inputs/opentelemetry/writer.go create mode 100644 plugins/inputs/openweathermap/sample.conf create mode 100644 plugins/inputs/passenger/sample.conf create mode 100644 plugins/inputs/pf/sample.conf create mode 100644 plugins/inputs/pgbouncer/sample.conf create mode 100644 plugins/inputs/phpfpm/sample.conf create mode 100644 plugins/inputs/ping/sample.conf create mode 100644 plugins/inputs/postfix/postfix_windows.go create mode 100644 plugins/inputs/postfix/sample.conf create mode 100644 plugins/inputs/postgresql/sample.conf create mode 100644 plugins/inputs/postgresql_extensible/sample.conf create mode 100644 plugins/inputs/powerdns/sample.conf create mode 100644 plugins/inputs/powerdns_recursor/sample.conf create mode 100644 plugins/inputs/processes/sample.conf create mode 100644 plugins/inputs/procstat/sample.conf create mode 100644 plugins/inputs/prometheus/consul.go create mode 100644 plugins/inputs/prometheus/sample.conf create mode 100644 plugins/inputs/proxmox/sample.conf create mode 100644 plugins/inputs/puppetagent/sample.conf create mode 100644 plugins/inputs/rabbitmq/sample.conf rename plugins/inputs/rabbitmq/testdata/{ => set1}/exchanges.json (100%) rename plugins/inputs/rabbitmq/testdata/{ => set1}/federation-links.json (100%) rename plugins/inputs/rabbitmq/testdata/{ => set1}/memory.json (100%) rename plugins/inputs/rabbitmq/testdata/{ => set1}/nodes.json (100%) rename plugins/inputs/rabbitmq/testdata/{ => set1}/overview.json (100%) rename plugins/inputs/rabbitmq/testdata/{ => set1}/queues.json (100%) create mode 100644 plugins/inputs/rabbitmq/testdata/set2/exchanges.json create mode 100644 plugins/inputs/rabbitmq/testdata/set2/federation-links.json create mode 100644 plugins/inputs/rabbitmq/testdata/set2/memory.json create mode 100644 plugins/inputs/rabbitmq/testdata/set2/nodes.json create mode 100644 plugins/inputs/rabbitmq/testdata/set2/overview.json create mode 100644 plugins/inputs/rabbitmq/testdata/set2/queues.json create mode 100644 plugins/inputs/raindrops/sample.conf create mode 100644 plugins/inputs/ras/sample.conf create mode 100644 plugins/inputs/ravendb/README.md create mode 100644 plugins/inputs/ravendb/ravendb.go create mode 100644 plugins/inputs/ravendb/ravendb_dto.go create mode 100644 plugins/inputs/ravendb/ravendb_test.go create mode 100644 plugins/inputs/ravendb/sample.conf create mode 100644 plugins/inputs/ravendb/testdata/collections_full.json create mode 100644 plugins/inputs/ravendb/testdata/collections_min.json create mode 100644 plugins/inputs/ravendb/testdata/databases_full.json create mode 100644 plugins/inputs/ravendb/testdata/databases_min.json create mode 100644 plugins/inputs/ravendb/testdata/indexes_full.json create mode 100644 plugins/inputs/ravendb/testdata/indexes_min.json create mode 100644 plugins/inputs/ravendb/testdata/server_full.json create mode 100644 plugins/inputs/ravendb/testdata/server_min.json create mode 100644 plugins/inputs/redfish/sample.conf create mode 100644 plugins/inputs/redis/sample.conf create mode 100644 plugins/inputs/redis_sentinel/README.md create mode 100644 plugins/inputs/redis_sentinel/redis_sentinel.go create mode 100644 plugins/inputs/redis_sentinel/redis_sentinel_test.go create mode 100644 plugins/inputs/redis_sentinel/redis_sentinel_types.go create mode 100644 plugins/inputs/redis_sentinel/sample.conf create mode 100644 plugins/inputs/redis_sentinel/testdata/sentinel.info.response create mode 100644 plugins/inputs/rethinkdb/sample.conf create mode 100644 plugins/inputs/riak/sample.conf create mode 100644 plugins/inputs/riemann_listener/README.md create mode 100644 plugins/inputs/riemann_listener/riemann_listener.go create mode 100644 plugins/inputs/riemann_listener/riemann_listener_test.go create mode 100644 plugins/inputs/riemann_listener/sample.conf create mode 100644 plugins/inputs/salesforce/sample.conf create mode 100644 plugins/inputs/sensors/sample.conf create mode 100644 plugins/inputs/sflow/sample.conf create mode 100644 plugins/inputs/sflow/types_test.go create mode 100644 plugins/inputs/slab/README.md create mode 100644 plugins/inputs/slab/sample.conf create mode 100644 plugins/inputs/slab/slab.go create mode 100644 plugins/inputs/slab/slab_notlinux.go create mode 100644 plugins/inputs/slab/slab_test.go create mode 100644 plugins/inputs/slab/testdata/slabinfo create mode 100644 plugins/inputs/smart/sample.conf create mode 100644 plugins/inputs/snmp/gosmi.go create mode 100644 plugins/inputs/snmp/gosmi_test.go create mode 100644 plugins/inputs/snmp/netsnmp.go create mode 100644 plugins/inputs/snmp/sample.conf create mode 100644 plugins/inputs/snmp/testdata/bridgeMib create mode 100644 plugins/inputs/snmp/testdata/bridgeMibImports create mode 100644 plugins/inputs/snmp/testdata/foo create mode 100644 plugins/inputs/snmp/testdata/fooImports create mode 100644 plugins/inputs/snmp/testdata/ifPhysAddress create mode 100644 plugins/inputs/snmp/testdata/ifPhysAddressImports create mode 100644 plugins/inputs/snmp/testdata/server create mode 100644 plugins/inputs/snmp/testdata/serverImports delete mode 100644 plugins/inputs/snmp/testdata/snmpd.conf create mode 100644 plugins/inputs/snmp/testdata/tableBuild create mode 100644 plugins/inputs/snmp/testdata/tableMib create mode 100644 plugins/inputs/snmp/testdata/tableMibImports create mode 100644 plugins/inputs/snmp/testdata/tcpMib create mode 100644 plugins/inputs/snmp/testdata/tcpMibImports delete mode 100644 plugins/inputs/snmp/testdata/test.mib create mode 100644 plugins/inputs/snmp_legacy/sample.conf create mode 100644 plugins/inputs/snmp_trap/gosmi.go create mode 100644 plugins/inputs/snmp_trap/netsnmp.go create mode 100644 plugins/inputs/snmp_trap/sample.conf create mode 100644 plugins/inputs/socket_listener/sample.conf create mode 100644 plugins/inputs/socketstat/README.md create mode 100644 plugins/inputs/socketstat/sample.conf create mode 100644 plugins/inputs/socketstat/socketstat.go create mode 100644 plugins/inputs/socketstat/socketstat_test.go create mode 100644 plugins/inputs/socketstat/socketstat_windows.go create mode 100644 plugins/inputs/socketstat/testdata/tcp_no_sockets.txt create mode 100644 plugins/inputs/socketstat/testdata/tcp_traffic.txt create mode 100644 plugins/inputs/socketstat/testdata/udp_no_sockets.txt create mode 100644 plugins/inputs/socketstat/testdata/udp_traffic.txt create mode 100644 plugins/inputs/solr/sample.conf create mode 100644 plugins/inputs/sql/README.md create mode 100644 plugins/inputs/sql/drivers.go create mode 100644 plugins/inputs/sql/drivers_sqlite.go create mode 100644 plugins/inputs/sql/drivers_sqlite_other.go create mode 100644 plugins/inputs/sql/sample.conf create mode 100644 plugins/inputs/sql/sql.go create mode 100644 plugins/inputs/sql/sql_test.go create mode 100644 plugins/inputs/sql/testdata/clickhouse/expected.sql create mode 100644 plugins/inputs/sql/testdata/mariadb/expected.sql create mode 100644 plugins/inputs/sql/testdata/postgres/expected.sql create mode 100644 plugins/inputs/sqlserver/azuresqldbqueries.go create mode 100644 plugins/inputs/sqlserver/azuresqldbqueries_test.go create mode 100644 plugins/inputs/sqlserver/azuresqlmanagedqueries.go create mode 100644 plugins/inputs/sqlserver/azuresqlmanagedqueries_test.go create mode 100644 plugins/inputs/sqlserver/azuresqlpoolqueries.go create mode 100644 plugins/inputs/sqlserver/azuresqlpoolqueries_test.go delete mode 100644 plugins/inputs/sqlserver/azuresqlqueries.go create mode 100644 plugins/inputs/sqlserver/connectionstring.go create mode 100644 plugins/inputs/sqlserver/sample.conf create mode 100644 plugins/inputs/stackdriver/sample.conf create mode 100644 plugins/inputs/statsd/sample.conf create mode 100644 plugins/inputs/suricata/sample.conf delete mode 100644 plugins/inputs/suricata/suricata_testutil.go create mode 100644 plugins/inputs/suricata/testdata/test2.json create mode 100644 plugins/inputs/suricata/testdata/test3.json create mode 100644 plugins/inputs/swap/sample.conf create mode 100644 plugins/inputs/synproxy/sample.conf create mode 100644 plugins/inputs/syslog/rfc3164_test.go create mode 100644 plugins/inputs/syslog/sample.conf create mode 100644 plugins/inputs/sysstat/sample.conf create mode 100644 plugins/inputs/system/sample.conf create mode 100644 plugins/inputs/systemd_units/sample.conf rename plugins/inputs/systemd_units/{systemd_units_linux.go => systemd_units.go} (68%) rename plugins/inputs/systemd_units/{systemd_units_linux_test.go => systemd_units_test.go} (92%) create mode 100644 plugins/inputs/tail/sample.conf create mode 100644 plugins/inputs/tcp_listener/sample.conf create mode 100644 plugins/inputs/teamspeak/sample.conf create mode 100644 plugins/inputs/temp/sample.conf create mode 100644 plugins/inputs/tengine/sample.conf create mode 100644 plugins/inputs/tomcat/sample.conf create mode 100644 plugins/inputs/trig/README.md create mode 100644 plugins/inputs/trig/sample.conf create mode 100644 plugins/inputs/twemproxy/README.md create mode 100644 plugins/inputs/twemproxy/sample.conf create mode 100644 plugins/inputs/udp_listener/sample.conf create mode 100644 plugins/inputs/unbound/sample.conf create mode 100644 plugins/inputs/uwsgi/sample.conf create mode 100644 plugins/inputs/varnish/sample.conf create mode 100644 plugins/inputs/varnish/test_data/varnish4_4.json create mode 100644 plugins/inputs/varnish/test_data/varnish6.2.1_reload.json create mode 100644 plugins/inputs/varnish/test_data/varnish6.6.json create mode 100644 plugins/inputs/varnish/test_data/varnish_types.json create mode 100644 plugins/inputs/varnish/test_data/varnish_v1_reload.txt create mode 100644 plugins/inputs/varnish/test_data/varnishadm-200.json create mode 100644 plugins/inputs/varnish/test_data/varnishadm-reload.json create mode 100644 plugins/inputs/vault/README.md create mode 100644 plugins/inputs/vault/sample.conf create mode 100644 plugins/inputs/vault/testdata/response_key_metrics.json create mode 100644 plugins/inputs/vault/vault.go create mode 100644 plugins/inputs/vault/vault_metrics.go create mode 100644 plugins/inputs/vault/vault_test.go create mode 100644 plugins/inputs/vsphere/sample.conf create mode 100644 plugins/inputs/webhooks/artifactory/README.md create mode 100644 plugins/inputs/webhooks/artifactory/artifactory_webhook.go create mode 100644 plugins/inputs/webhooks/artifactory/artifactory_webhook_mock_json.go create mode 100644 plugins/inputs/webhooks/artifactory/artifactory_webhook_models.go create mode 100644 plugins/inputs/webhooks/artifactory/artifactory_webhook_test.go create mode 100644 plugins/inputs/webhooks/sample.conf create mode 100644 plugins/inputs/win_eventlog/sample.conf create mode 100644 plugins/inputs/win_perf_counters/sample.conf create mode 100644 plugins/inputs/win_services/sample.conf create mode 100644 plugins/inputs/wireguard/sample.conf create mode 100644 plugins/inputs/wireless/sample.conf create mode 100644 plugins/inputs/x509_cert/sample.conf create mode 100644 plugins/inputs/xtremio/README.md create mode 100644 plugins/inputs/xtremio/sample.conf create mode 100644 plugins/inputs/xtremio/testdata/sample_bbu_response.json create mode 100644 plugins/inputs/xtremio/testdata/sample_get_bbu_response.json create mode 100644 plugins/inputs/xtremio/xtremio.go create mode 100644 plugins/inputs/xtremio/xtremio_test.go create mode 100644 plugins/inputs/xtremio/xtremio_types.go create mode 100644 plugins/inputs/zfs/sample.conf create mode 100644 plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/GoUnusedProtection__.go create mode 100644 plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore-consts.go create mode 100644 plugins/inputs/zipkin/codec/thrift/gen-go/zipkincore/zipkinCore.go create mode 100644 plugins/inputs/zipkin/sample.conf create mode 100644 plugins/inputs/zookeeper/sample.conf create mode 100644 plugins/outputs/amon/sample.conf create mode 100644 plugins/outputs/amqp/sample.conf create mode 100644 plugins/outputs/application_insights/sample.conf create mode 100644 plugins/outputs/azure_data_explorer/README.md create mode 100644 plugins/outputs/azure_data_explorer/azure_data_explorer.go create mode 100644 plugins/outputs/azure_data_explorer/azure_data_explorer_test.go create mode 100644 plugins/outputs/azure_data_explorer/sample.conf create mode 100644 plugins/outputs/azure_monitor/sample.conf create mode 100644 plugins/outputs/bigquery/README.md create mode 100644 plugins/outputs/bigquery/bigquery.go create mode 100644 plugins/outputs/bigquery/bigquery_test.go create mode 100644 plugins/outputs/bigquery/sample.conf rename plugins/outputs/cloud_pubsub/{pubsub.go => cloud_pubsub.go} (63%) rename plugins/outputs/cloud_pubsub/{pubsub_test.go => cloud_pubsub_test.go} (91%) create mode 100644 plugins/outputs/cloud_pubsub/sample.conf create mode 100644 plugins/outputs/cloudwatch/sample.conf create mode 100644 plugins/outputs/cloudwatch_logs/README.md create mode 100644 plugins/outputs/cloudwatch_logs/cloudwatch_logs.go create mode 100644 plugins/outputs/cloudwatch_logs/cloudwatch_logs_test.go create mode 100644 plugins/outputs/cloudwatch_logs/sample.conf create mode 100644 plugins/outputs/cratedb/sample.conf create mode 100644 plugins/outputs/datadog/sample.conf create mode 100644 plugins/outputs/deprecations.go create mode 100644 plugins/outputs/discard/sample.conf create mode 100644 plugins/outputs/dynatrace/sample.conf create mode 100644 plugins/outputs/elasticsearch/sample.conf create mode 100644 plugins/outputs/event_hubs/README.md create mode 100644 plugins/outputs/event_hubs/event_hubs.go create mode 100644 plugins/outputs/event_hubs/event_hubs_test.go create mode 100644 plugins/outputs/event_hubs/sample.conf create mode 100644 plugins/outputs/exec/sample.conf create mode 100644 plugins/outputs/execd/sample.conf create mode 100644 plugins/outputs/file/sample.conf create mode 100644 plugins/outputs/graphite/sample.conf create mode 100644 plugins/outputs/graylog/sample.conf create mode 100644 plugins/outputs/groundwork/README.md create mode 100644 plugins/outputs/groundwork/groundwork.go create mode 100644 plugins/outputs/groundwork/groundwork_test.go create mode 100644 plugins/outputs/groundwork/sample.conf create mode 100644 plugins/outputs/health/sample.conf create mode 100644 plugins/outputs/http/sample.conf create mode 100644 plugins/outputs/influxdb/sample.conf rename plugins/outputs/influxdb_v2/{influxdb.go => influxdb_v2.go} (57%) rename plugins/outputs/influxdb_v2/{influxdb_test.go => influxdb_v2_test.go} (97%) create mode 100644 plugins/outputs/influxdb_v2/sample.conf create mode 100644 plugins/outputs/instrumental/sample.conf create mode 100644 plugins/outputs/kafka/sample.conf create mode 100644 plugins/outputs/kinesis/sample.conf create mode 100644 plugins/outputs/librato/sample.conf create mode 100644 plugins/outputs/logzio/sample.conf create mode 100644 plugins/outputs/loki/README.md create mode 100644 plugins/outputs/loki/loki.go create mode 100644 plugins/outputs/loki/loki_test.go create mode 100644 plugins/outputs/loki/sample.conf create mode 100644 plugins/outputs/loki/stream.go create mode 100644 plugins/outputs/loki/stream_test.go create mode 100644 plugins/outputs/mongodb/README.md create mode 100644 plugins/outputs/mongodb/mongodb.go create mode 100644 plugins/outputs/mongodb/mongodb_test.go create mode 100644 plugins/outputs/mongodb/sample.conf create mode 100644 plugins/outputs/mongodb/testdata/auth_scram/setup.js create mode 100644 plugins/outputs/mongodb/testdata/auth_x509/setup.js create mode 100644 plugins/outputs/mqtt/sample.conf create mode 100644 plugins/outputs/nats/sample.conf create mode 100644 plugins/outputs/newrelic/sample.conf create mode 100644 plugins/outputs/nsq/sample.conf create mode 100644 plugins/outputs/opentelemetry/README.md create mode 100644 plugins/outputs/opentelemetry/logger.go create mode 100644 plugins/outputs/opentelemetry/opentelemetry.go create mode 100644 plugins/outputs/opentelemetry/opentelemetry_test.go create mode 100644 plugins/outputs/opentelemetry/sample.conf create mode 100644 plugins/outputs/opentsdb/sample.conf create mode 100644 plugins/outputs/prometheus_client/sample.conf create mode 100644 plugins/outputs/riemann/sample.conf create mode 100644 plugins/outputs/riemann_legacy/README.md rename plugins/outputs/riemann_legacy/{riemann.go => riemann_legacy.go} (65%) create mode 100644 plugins/outputs/riemann_legacy/riemann_legacy_test.go delete mode 100644 plugins/outputs/riemann_legacy/riemann_test.go create mode 100644 plugins/outputs/riemann_legacy/sample.conf create mode 100644 plugins/outputs/sensu/README.md create mode 100644 plugins/outputs/sensu/sample.conf create mode 100644 plugins/outputs/sensu/sensu.go create mode 100644 plugins/outputs/sensu/sensu_test.go create mode 100644 plugins/outputs/signalfx/README.md create mode 100644 plugins/outputs/signalfx/sample.conf create mode 100644 plugins/outputs/signalfx/signalfx.go create mode 100644 plugins/outputs/signalfx/signalfx_test.go create mode 100644 plugins/outputs/socket_writer/sample.conf create mode 100644 plugins/outputs/sql/README.md create mode 100644 plugins/outputs/sql/sample.conf create mode 100644 plugins/outputs/sql/sql.go create mode 100644 plugins/outputs/sql/sql_test.go create mode 100644 plugins/outputs/sql/sqlite.go create mode 100644 plugins/outputs/sql/sqlite_test.go create mode 100644 plugins/outputs/sql/testdata/clickhouse/expected.txt create mode 100644 plugins/outputs/sql/testdata/clickhouse/initdb/init.sql create mode 100644 plugins/outputs/sql/testdata/mariadb/expected.sql create mode 100644 plugins/outputs/sql/testdata/mariadb/initdb/script.sql create mode 100644 plugins/outputs/sql/testdata/postgres/expected.sql create mode 100644 plugins/outputs/sql/testdata/postgres/initdb/init.sql create mode 100644 plugins/outputs/stackdriver/counter_cache.go create mode 100644 plugins/outputs/stackdriver/counter_cache_test.go create mode 100644 plugins/outputs/stackdriver/sample.conf create mode 100644 plugins/outputs/sumologic/sample.conf create mode 100644 plugins/outputs/syslog/sample.conf create mode 100644 plugins/outputs/timestream/sample.conf create mode 100644 plugins/outputs/warp10/sample.conf create mode 100644 plugins/outputs/wavefront/sample.conf create mode 100644 plugins/outputs/websocket/README.md create mode 100644 plugins/outputs/websocket/sample.conf create mode 100644 plugins/outputs/websocket/websocket.go create mode 100644 plugins/outputs/websocket/websocket_test.go create mode 100644 plugins/outputs/yandex_cloud_monitoring/sample.conf create mode 100644 plugins/parsers/all/all.go create mode 100644 plugins/parsers/collectd/testdata/authfile create mode 100644 plugins/parsers/influx/influx_upstream/README.md create mode 100644 plugins/parsers/influx/influx_upstream/parser.go create mode 100644 plugins/parsers/influx/influx_upstream/parser_test.go create mode 100644 plugins/parsers/json/json_flattener.go create mode 100644 plugins/parsers/json_v2/README.md create mode 100644 plugins/parsers/json_v2/parser.go create mode 100644 plugins/parsers/json_v2/parser_test.go create mode 100644 plugins/parsers/json_v2/testdata/10670/expected.out create mode 100644 plugins/parsers/json_v2/testdata/10670/input.json create mode 100644 plugins/parsers/json_v2/testdata/10670/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/array_of_objects/expected.out create mode 100644 plugins/parsers/json_v2/testdata/array_of_objects/input.json create mode 100644 plugins/parsers/json_v2/testdata/array_of_objects/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/complex_nesting/expected.out create mode 100644 plugins/parsers/json_v2/testdata/complex_nesting/input.json create mode 100644 plugins/parsers/json_v2/testdata/complex_nesting/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/fields_and_tags/expected.out create mode 100644 plugins/parsers/json_v2/testdata/fields_and_tags/input.json create mode 100644 plugins/parsers/json_v2/testdata/fields_and_tags/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/fields_and_tags_complex/expected.out create mode 100644 plugins/parsers/json_v2/testdata/fields_and_tags_complex/input.json create mode 100644 plugins/parsers/json_v2/testdata/fields_and_tags_complex/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/large_numbers/expected.out create mode 100644 plugins/parsers/json_v2/testdata/large_numbers/input.json create mode 100644 plugins/parsers/json_v2/testdata/large_numbers/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/measurement_name_int/expected.out create mode 100644 plugins/parsers/json_v2/testdata/measurement_name_int/input.json create mode 100644 plugins/parsers/json_v2/testdata/measurement_name_int/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/mix_field_and_object/expected.out create mode 100644 plugins/parsers/json_v2/testdata/mix_field_and_object/input.json create mode 100644 plugins/parsers/json_v2/testdata/mix_field_and_object/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/multiple_arrays_in_object/expected.out create mode 100644 plugins/parsers/json_v2/testdata/multiple_arrays_in_object/input.json create mode 100644 plugins/parsers/json_v2/testdata/multiple_arrays_in_object/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/multiple_json_input/expected.out create mode 100644 plugins/parsers/json_v2/testdata/multiple_json_input/input_1.json create mode 100644 plugins/parsers/json_v2/testdata/multiple_json_input/input_2.json create mode 100644 plugins/parsers/json_v2/testdata/multiple_json_input/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/multiple_timestamps/expected.out create mode 100644 plugins/parsers/json_v2/testdata/multiple_timestamps/input.json create mode 100644 plugins/parsers/json_v2/testdata/multiple_timestamps/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/expected.out create mode 100644 plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/input.json create mode 100644 plugins/parsers/json_v2/testdata/nested_and_nonnested_tags/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/nested_array_of_objects/expected.out create mode 100644 plugins/parsers/json_v2/testdata/nested_array_of_objects/input.json create mode 100644 plugins/parsers/json_v2/testdata/nested_array_of_objects/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/nested_tags/expected.out create mode 100644 plugins/parsers/json_v2/testdata/nested_tags/input.json create mode 100644 plugins/parsers/json_v2/testdata/nested_tags/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/nested_tags_complex/expected.out create mode 100644 plugins/parsers/json_v2/testdata/nested_tags_complex/input.json create mode 100644 plugins/parsers/json_v2/testdata/nested_tags_complex/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/null/expected.out create mode 100644 plugins/parsers/json_v2/testdata/null/input.json create mode 100644 plugins/parsers/json_v2/testdata/null/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/object/expected.out create mode 100644 plugins/parsers/json_v2/testdata/object/input.json create mode 100644 plugins/parsers/json_v2/testdata/object/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/object_timestamp/expected.out create mode 100644 plugins/parsers/json_v2/testdata/object_timestamp/input.json create mode 100644 plugins/parsers/json_v2/testdata/object_timestamp/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/optional/expected.out create mode 100644 plugins/parsers/json_v2/testdata/optional/input.json create mode 100644 plugins/parsers/json_v2/testdata/optional/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/optional_objects/expected.out create mode 100644 plugins/parsers/json_v2/testdata/optional_objects/input_1.json create mode 100644 plugins/parsers/json_v2/testdata/optional_objects/input_2.json create mode 100644 plugins/parsers/json_v2/testdata/optional_objects/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object/expected.out create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object/input.json create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/expected.out create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/input.json create mode 100644 plugins/parsers/json_v2/testdata/subfieldtag_in_object_2/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/timestamp/expected.out create mode 100644 plugins/parsers/json_v2/testdata/timestamp/input.json create mode 100644 plugins/parsers/json_v2/testdata/timestamp/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/timestamp_ns/expected.out create mode 100644 plugins/parsers/json_v2/testdata/timestamp_ns/input.json create mode 100644 plugins/parsers/json_v2/testdata/timestamp_ns/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/timestamp_rfc3339/expected.out create mode 100644 plugins/parsers/json_v2/testdata/timestamp_rfc3339/input.json create mode 100644 plugins/parsers/json_v2/testdata/timestamp_rfc3339/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/types/expected.out create mode 100644 plugins/parsers/json_v2/testdata/types/input.json create mode 100644 plugins/parsers/json_v2/testdata/types/telegraf.conf create mode 100644 plugins/parsers/json_v2/testdata/wrong_path/expected.out create mode 100644 plugins/parsers/json_v2/testdata/wrong_path/input.json create mode 100644 plugins/parsers/json_v2/testdata/wrong_path/telegraf.conf create mode 100644 plugins/parsers/prometheus/README.md create mode 100644 plugins/parsers/prometheus/common/helpers.go create mode 100644 plugins/parsers/prometheus/parser.go create mode 100644 plugins/parsers/prometheus/parser_test.go create mode 100644 plugins/parsers/prometheusremotewrite/README.md create mode 100644 plugins/parsers/prometheusremotewrite/parser.go create mode 100644 plugins/parsers/prometheusremotewrite/parser_test.go create mode 100644 plugins/parsers/registry_test.go create mode 100644 plugins/parsers/temporary/json_v2/types.go create mode 100644 plugins/parsers/temporary/xpath/types.go create mode 100644 plugins/parsers/xpath/README.md create mode 100644 plugins/parsers/xpath/json_document.go create mode 100644 plugins/parsers/xpath/msgpack_document.go create mode 100644 plugins/parsers/xpath/parser.go create mode 100644 plugins/parsers/xpath/parser_test.go create mode 100644 plugins/parsers/xpath/protocolbuffer_document.go create mode 100644 plugins/parsers/xpath/testcases/addressbook.conf create mode 100644 plugins/parsers/xpath/testcases/addressbook.dat create mode 100644 plugins/parsers/xpath/testcases/earthquakes.conf create mode 100644 plugins/parsers/xpath/testcases/earthquakes.quakeml create mode 100644 plugins/parsers/xpath/testcases/field_tag_batch.conf create mode 100644 plugins/parsers/xpath/testcases/field_tag_batch.json create mode 100644 plugins/parsers/xpath/testcases/multisensor.xml create mode 100644 plugins/parsers/xpath/testcases/multisensor_explicit_basic.conf create mode 100644 plugins/parsers/xpath/testcases/multisensor_explicit_batch.conf create mode 100644 plugins/parsers/xpath/testcases/multisensor_selection_batch.conf create mode 100644 plugins/parsers/xpath/testcases/openweathermap_5d.json create mode 100644 plugins/parsers/xpath/testcases/openweathermap_5d.xml create mode 100644 plugins/parsers/xpath/testcases/openweathermap_json.conf create mode 100644 plugins/parsers/xpath/testcases/openweathermap_xml.conf create mode 100644 plugins/parsers/xpath/testcases/protos/addressbook.proto create mode 100644 plugins/parsers/xpath/testcases/protos/person.proto create mode 100644 plugins/parsers/xpath/testcases/protos/phonenumber.proto create mode 100644 plugins/parsers/xpath/testcases/tracker.msg create mode 100644 plugins/parsers/xpath/testcases/tracker_msgpack.conf create mode 100644 plugins/parsers/xpath/xml_document.go create mode 100644 plugins/processors/aws/ec2/README.md create mode 100644 plugins/processors/aws/ec2/ec2.go create mode 100644 plugins/processors/aws/ec2/ec2_test.go create mode 100644 plugins/processors/aws/ec2/sample.conf create mode 100644 plugins/processors/clone/sample.conf create mode 100644 plugins/processors/converter/sample.conf create mode 100644 plugins/processors/date/sample.conf create mode 100644 plugins/processors/dedup/sample.conf create mode 100644 plugins/processors/defaults/sample.conf create mode 100644 plugins/processors/deprecations.go create mode 100644 plugins/processors/enum/sample.conf create mode 100644 plugins/processors/execd/sample.conf create mode 100644 plugins/processors/filepath/sample.conf create mode 100644 plugins/processors/ifname/sample.conf create mode 100644 plugins/processors/noise/README.md create mode 100644 plugins/processors/noise/noise.go create mode 100644 plugins/processors/noise/noise_test.go create mode 100644 plugins/processors/noise/sample.conf create mode 100644 plugins/processors/override/sample.conf create mode 100644 plugins/processors/parser/sample.conf create mode 100644 plugins/processors/pivot/sample.conf create mode 100644 plugins/processors/port_name/sample.conf create mode 100644 plugins/processors/printer/sample.conf create mode 100644 plugins/processors/regex/sample.conf create mode 100644 plugins/processors/rename/sample.conf create mode 100644 plugins/processors/reverse_dns/reverse_dns.go rename plugins/processors/reverse_dns/{reversedns_test.go => reverse_dns_test.go} (63%) delete mode 100644 plugins/processors/reverse_dns/reversedns.go create mode 100644 plugins/processors/reverse_dns/sample.conf create mode 100644 plugins/processors/s2geo/sample.conf create mode 100644 plugins/processors/starlark/sample.conf create mode 100644 plugins/processors/starlark/testdata/compare_metrics.star create mode 100644 plugins/processors/starlark/testdata/drop_fields_with_unexpected_type.star create mode 100644 plugins/processors/starlark/testdata/drop_string_fields.star create mode 100644 plugins/processors/starlark/testdata/fail.star create mode 100644 plugins/processors/starlark/testdata/iops.star create mode 100644 plugins/processors/starlark/testdata/json_nested.star create mode 100644 plugins/processors/starlark/testdata/logging.star create mode 100644 plugins/processors/starlark/testdata/math.star create mode 100644 plugins/processors/starlark/testdata/multiple_metrics.star create mode 100644 plugins/processors/starlark/testdata/multiple_metrics_with_json.star create mode 100644 plugins/processors/starlark/testdata/rename_prometheus_remote_write.star create mode 100644 plugins/processors/starlark/testdata/schema_sizing.star create mode 100644 plugins/processors/starlark/testdata/sparkplug.star create mode 100644 plugins/processors/starlark/testdata/time_date.star create mode 100644 plugins/processors/starlark/testdata/time_duration.star create mode 100644 plugins/processors/starlark/testdata/time_set_timestamp.star create mode 100644 plugins/processors/starlark/testdata/time_timestamp.star create mode 100644 plugins/processors/starlark/testdata/time_timestamp_nanos.star create mode 100644 plugins/processors/strings/sample.conf create mode 100644 plugins/processors/tag_limit/sample.conf create mode 100644 plugins/processors/template/sample.conf create mode 100644 plugins/processors/topk/sample.conf create mode 100644 plugins/processors/unpivot/sample.conf create mode 100644 plugins/serializers/csv/README.md create mode 100644 plugins/serializers/csv/csv.go create mode 100644 plugins/serializers/csv/csv_test.go create mode 100644 plugins/serializers/csv/testcases/basic.conf create mode 100644 plugins/serializers/csv/testcases/basic.csv create mode 100644 plugins/serializers/csv/testcases/header.conf create mode 100644 plugins/serializers/csv/testcases/header.csv create mode 100644 plugins/serializers/csv/testcases/nanoseconds.conf create mode 100644 plugins/serializers/csv/testcases/nanoseconds.csv create mode 100644 plugins/serializers/csv/testcases/prefix.conf create mode 100644 plugins/serializers/csv/testcases/prefix.csv create mode 100644 plugins/serializers/csv/testcases/rfc3339.conf create mode 100644 plugins/serializers/csv/testcases/rfc3339.csv create mode 100644 plugins/serializers/csv/testcases/semicolon.conf create mode 100644 plugins/serializers/csv/testcases/semicolon.csv create mode 100644 plugins/serializers/msgpack/README.md create mode 100644 plugins/serializers/msgpack/metric.go create mode 100644 plugins/serializers/msgpack/metric_gen.go create mode 100644 plugins/serializers/msgpack/metric_gen_test.go create mode 100644 plugins/serializers/msgpack/metric_test.go create mode 100644 plugins/serializers/msgpack/msgpack.go create mode 100644 plugins/serializers/msgpack/msgpack_test.go create mode 100644 plugins/serializers/prometheusremotewrite/README.md create mode 100644 plugins/serializers/prometheusremotewrite/prometheusremotewrite.go create mode 100644 plugins/serializers/prometheusremotewrite/prometheusremotewrite_test.go delete mode 100644 scripts/alpine.docker delete mode 100644 scripts/buster.docker create mode 100755 scripts/check-file-changes.sh delete mode 100644 scripts/ci-1.15.docker rename scripts/{ci-1.14.docker => ci.docker} (95%) delete mode 100755 scripts/docker-entrypoint.sh create mode 100755 scripts/generate_config.sh create mode 100644 scripts/generate_versioninfo/main.go create mode 100755 scripts/install_gotestsum.sh create mode 100644 scripts/installgo_linux.sh create mode 100644 scripts/installgo_mac.sh create mode 100644 scripts/installgo_windows.sh create mode 100755 scripts/local_circleci.sh create mode 100644 scripts/mac-signing.sh delete mode 100644 scripts/release.sh delete mode 100644 scripts/stretch.docker create mode 100644 scripts/telegraf_entry_mac create mode 100644 scripts/windows-signing.ps1 create mode 100644 testutil/capturelog.go create mode 100644 testutil/container.go create mode 100644 testutil/container_test.go create mode 100644 testutil/file.go create mode 100644 testutil/pki/client.pem create mode 100644 testutil/pki/clientenc.pem create mode 100644 testutil/pki/clientenckey.pem create mode 100644 testutil/pki/server.pem create mode 100644 testutil/socket.go create mode 100644 tools/package_lxd_test/README.md create mode 100644 tools/package_lxd_test/container.go create mode 100644 tools/package_lxd_test/lxd.go create mode 100644 tools/package_lxd_test/main.go create mode 100644 tools/readme_config_includer/generator.go create mode 100644 tools/readme_linter/README.md create mode 100644 tools/readme_linter/assert.go create mode 100644 tools/readme_linter/main.go create mode 100644 tools/readme_linter/plugin.go create mode 100644 tools/readme_linter/rules.go create mode 100644 tools/readme_linter/set.go diff --git a/.circleci/config.yml b/.circleci/config.yml index 2d3c152fedc8b..74b3c7a4da1cd 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,33 +1,190 @@ ---- -defaults: - defaults: &defaults +version: 2.1 +orbs: + win: circleci/windows@2.4.0 + aws-cli: circleci/aws-cli@1.4.0 + +executors: + telegraf-ci: working_directory: '/go/src/github.com/influxdata/telegraf' - environment: - GOFLAGS: -p=8 - go-1_14: &go-1_14 - docker: - - image: 'quay.io/influxdb/telegraf-ci:1.14.9' - go-1_15: &go-1_15 + resource_class: large docker: - - image: 'quay.io/influxdb/telegraf-ci:1.15.2' - mac: &mac - macos: - xcode: 11.3.1 + - image: 'quay.io/influxdb/telegraf-ci:1.18.3' + environment: + GOFLAGS: -p=4 + mac: working_directory: '~/go/src/github.com/influxdata/telegraf' + resource_class: medium + macos: + xcode: 13.2.0 environment: HOMEBREW_NO_AUTO_UPDATE: 1 - GOFLAGS: -p=8 + GOFLAGS: -p=4 -version: 2 +commands: + generate-config: + parameters: + os: + type: string + default: "linux" + steps: + - checkout + - attach_workspace: + at: '/build' + - run: ./scripts/generate_config.sh << parameters.os >> + - store_artifacts: + path: './new-config' + destination: 'new-config' + check-changed-files-or-halt: + steps: + - run: ./scripts/check-file-changes.sh + test-go: + parameters: + os: + type: string + default: "linux" + arch: + type: string + default: "amd64" + gotestsum: + type: string + default: "gotestsum" + cache_version: + type: string + default: "v3" + steps: + - checkout + - check-changed-files-or-halt + - when: + condition: + equal: [ linux, << parameters.os >> ] + steps: + - restore_cache: + key: linux-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + - attach_workspace: + at: '/go' + - when: + condition: + equal: [ darwin, << parameters.os >> ] + steps: + - restore_cache: + key: darwin-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + - run: 'sh ./scripts/installgo_mac.sh' + - when: + condition: + equal: [ windows, << parameters.os >> ] + steps: + - run: rm -rf /c/Go + - restore_cache: + key: windows-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + - run: 'sh ./scripts/installgo_windows.sh' + - run: choco install mingw + - run: ./scripts/install_gotestsum.sh << parameters.os >> << parameters.gotestsum >> + - unless: + condition: + equal: [ "386", << parameters.arch >> ] + steps: + - run: echo 'export RACE="-race"' >> $BASH_ENV + - run: | + GOARCH=<< parameters.arch >> ./<< parameters.gotestsum >> -- ${RACE} -short -cover -coverprofile=coverage.out ./... + - when: + condition: + and: + - equal: [ "master", << pipeline.git.branch >> ] + - equal: [ "linux", << parameters.os >> ] + - equal: [ "amd64", << parameters.arch >> ] + steps: + - run: + name: "Installing goveralls" + command: go install github.com/mattn/goveralls@latest + - run: + name: "Remove plugins/parsers/influx/machine.go from coverage" + command: sed -i '/github.com\/influxdata\/telegraf\/plugins\/parsers\/influx\/machine.go/d' coverage.out + - run: + name: "Create report" + command: /go/bin/goveralls -coverprofile=coverage.out -service=circle-ci -repotoken=${COVERALLS_TOKEN} + - when: + condition: + equal: [ linux, << parameters.os >> ] + steps: + - save_cache: + name: 'Saving cache' + key: linux-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + paths: + - '~/go/src/github.com/influxdata/telegraf/gotestsum' + - when: + condition: + equal: [ darwin, << parameters.os >> ] + steps: + - save_cache: + name: 'Saving cache' + key: darwin-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + paths: + - '/go/src/github.com/influxdata/telegraf/gotestsum' + - '/usr/local/Cellar/go' + - '/usr/local/bin/go' + - '/usr/local/bin/gofmt' + - when: + condition: + equal: [ windows, << parameters.os >> ] + steps: + - save_cache: + name: 'Saving cache' + key: windows-go-<< parameters.cache_version >>-{{ checksum "go.sum" }} + paths: + - 'C:\Go' + - 'C:\Users\circleci\project\gotestsum.exe' + package-build: + parameters: + type: + type: string + default: "" + nightly: + type: boolean + default: false + steps: + - checkout + - check-changed-files-or-halt + - attach_workspace: + at: '/go' + - when: + condition: + equal: [ windows, << parameters.type >> ] + steps: + - run: make versioninfo + - when: + condition: << parameters.nightly >> + steps: + - run: + command: 'NIGHTLY=1 make package include_packages="$(make << parameters.type >>)"' + no_output_timeout: 30m + - unless: + condition: + or: + - << parameters.nightly >> + steps: + - run: + command: 'make package include_packages="$(make << parameters.type >>)"' + no_output_timeout: 30m + - store_artifacts: + path: './build/dist' + destination: 'build/dist' + - persist_to_workspace: + root: './build' + paths: + - 'dist' jobs: - deps: - <<: [ *defaults, *go-1_15 ] + test-go-linux: + executor: telegraf-ci steps: - checkout - restore_cache: key: go-mod-v1-{{ checksum "go.sum" }} + - check-changed-files-or-halt - run: 'make deps' - run: 'make tidy' + - run: 'make check' + - run: 'make check-deps' + - test-go - save_cache: name: 'go module cache' key: go-mod-v1-{{ checksum "go.sum" }} @@ -37,188 +194,592 @@ jobs: root: '/go' paths: - '*' - macdeps: - <<: [ *mac ] + test-go-linux-386: + executor: telegraf-ci steps: - checkout - restore_cache: - key: mac-go-mod-v1-{{ checksum "go.sum" }} - - run: 'brew install go' # latest + key: go-mod-v1-{{ checksum "go.sum" }} + - check-changed-files-or-halt + - run: 'GOARCH=386 make deps' + - run: 'GOARCH=386 make tidy' + - run: 'GOARCH=386 make check' + - test-go: + arch: "386" + test-integration: + machine: + image: ubuntu-2204:current + resource_class: large + steps: + - checkout + - check-changed-files-or-halt + - run: 'sh ./scripts/installgo_linux.sh' - run: 'make deps' - - run: 'make tidy' - - save_cache: - name: 'go module cache' - key: mac-go-mod-v1-{{ checksum "go.sum" }} - paths: - - '~/go/pkg/mod' - - '/usr/local/Cellar/go' - - '/usr/local/bin/go' - - '/usr/local/bin/gofmt' - - persist_to_workspace: - root: '/' - paths: - - 'usr/local/bin/go' - - 'usr/local/Cellar/go' - - 'usr/local/bin/gofmt' - - 'Users/distiller/go' + - run: 'make test-integration' + test-go-mac: + executor: mac + steps: + - test-go: + os: darwin + test-go-windows: + executor: + name: win/default + shell: bash.exe + size: xlarge + steps: + - test-go: + os: windows + gotestsum: "gotestsum.exe" - test-go-1.14: - <<: [ *defaults, *go-1_14 ] + windows-package: + parameters: + nightly: + type: boolean + default: false + executor: telegraf-ci steps: - - attach_workspace: - at: '/go' - - run: 'make' - - run: 'make test' - test-go-1.14-386: - <<: [ *defaults, *go-1_14 ] + - package-build: + type: windows + nightly: << parameters.nightly >> + darwin-amd64-package: + parameters: + nightly: + type: boolean + default: false + executor: telegraf-ci steps: - - attach_workspace: - at: '/go' - - run: 'GOARCH=386 make' - - run: 'GOARCH=386 make test' - test-go-1.15: - <<: [ *defaults, *go-1_15 ] + - package-build: + type: darwin-amd64 + nightly: << parameters.nightly >> + darwin-arm64-package: + parameters: + nightly: + type: boolean + default: false + executor: telegraf-ci steps: - - attach_workspace: - at: '/go' - - run: 'make' - - run: 'make check' - - run: 'make check-deps' - - run: 'make test' - test-go-1.15-386: - <<: [ *defaults, *go-1_15 ] + - package-build: + type: darwin-arm64 + nightly: << parameters.nightly >> + i386-package: + parameters: + nightly: + type: boolean + default: false + executor: telegraf-ci + steps: + - package-build: + type: i386 + nightly: << parameters.nightly >> + ppc64le-package: + parameters: + nightly: + type: boolean + default: false + executor: telegraf-ci + steps: + - package-build: + type: ppc64le + nightly: << parameters.nightly >> + riscv64-package: + parameters: + nightly: + type: boolean + default: false + executor: telegraf-ci + steps: + - package-build: + type: riscv64 + nightly: << parameters.nightly >> + s390x-package: + parameters: + nightly: + type: boolean + default: false + executor: telegraf-ci + steps: + - package-build: + type: s390x + nightly: << parameters.nightly >> + armel-package: + parameters: + nightly: + type: boolean + default: false + executor: telegraf-ci + steps: + - package-build: + type: armel + nightly: << parameters.nightly >> + amd64-package: + parameters: + nightly: + type: boolean + default: false + executor: telegraf-ci + steps: + - package-build: + type: amd64 + nightly: << parameters.nightly >> + arm64-package: + parameters: + nightly: + type: boolean + default: false + executor: telegraf-ci + steps: + - package-build: + type: arm64 + nightly: << parameters.nightly >> + mipsel-package: + parameters: + nightly: + type: boolean + default: false + executor: telegraf-ci + steps: + - package-build: + type: mipsel + nightly: << parameters.nightly >> + mips-package: + parameters: + nightly: + type: boolean + default: false + executor: telegraf-ci + steps: + - package-build: + type: mips + nightly: << parameters.nightly >> + static-package: + parameters: + nightly: + type: boolean + default: false + executor: telegraf-ci + steps: + - package-build: + type: static + nightly: << parameters.nightly >> + armhf-package: + parameters: + nightly: + type: boolean + default: false + executor: telegraf-ci + steps: + - package-build: + type: armhf + nightly: << parameters.nightly >> + nightly: + executor: telegraf-ci steps: - attach_workspace: - at: '/go' - - run: 'GOARCH=386 make' - - run: 'GOARCH=386 make check' - - run: 'GOARCH=386 make test' - test-go-darwin: - <<: [ *mac ] + at: '/build' + - run: + command: | + aws s3 sync /build/dist s3://dl.influxdata.com/telegraf/nightlies/ \ + --exclude "*" \ + --include "*.tar.gz" \ + --include "*.deb" \ + --include "*.rpm" \ + --include "*.zip" \ + --acl public-read + docker-nightly: + machine: + image: ubuntu-2004:current steps: + - run: + name: login to quay.io + command: docker login --username="${QUAY_USER}" --password="${QUAY_PASS}" quay.io + - run: + name: clone influxdata/influxdata-docker + command: git clone https://github.com/influxdata/influxdata-docker + - run: + name: build and push telegraf:nightly + command: | + cd influxdata-docker/telegraf/nightly + docker build -t telegraf . + docker tag telegraf quay.io/influxdb/telegraf-nightly:latest + docker image ls + docker push quay.io/influxdb/telegraf-nightly:latest + - run: + name: build and push telegraf:nightly-alpine + command: | + cd influxdata-docker/telegraf/nightly/alpine + docker build -t telegraf-alpine . + docker tag telegraf-alpine quay.io/influxdb/telegraf-nightly:alpine + docker image ls + docker push quay.io/influxdb/telegraf-nightly:alpine + amd64-package-test-nightly: + machine: + image: ubuntu-2004:current + steps: + - checkout - attach_workspace: - at: '/' - - run: 'make' - - run: 'make check' - - run: 'make test' - - package: - <<: [ *defaults, *go-1_15 ] + at: '.' + - run: sudo apt update && sudo apt install -y snapd + - run: sudo snap install lxd + - run: sudo lxd init --auto + - run: sudo usermod -a -G lxd $(whoami) + - run: cd tools/package_lxd_test && go build + - run: ./tools/package_lxd_test/package_lxd_test --package $(find ./dist -name "*_amd64.deb") + - run: ./tools/package_lxd_test/package_lxd_test --package $(find ./dist -name "*.x86_64.rpm") + package-sign-windows: + executor: + name: win/default + shell: powershell.exe steps: + - checkout + - check-changed-files-or-halt - attach_workspace: - at: '/go' - - run: 'make package' - - store_artifacts: - path: './build/dist' - destination: 'build/dist' - - release: - <<: [ *defaults, *go-1_15 ] + at: '/build' + - run: + name: "Sign Windows Executables" + shell: powershell.exe + command: | + ./scripts/windows-signing.ps1 + - persist_to_workspace: + root: './build' + paths: + - 'dist' + package-sign-mac: + executor: mac + working_directory: /Users/distiller/project + environment: + FL_OUTPUT_DIR: output + FASTLANE_LANE: test + shell: /bin/bash --login -o pipefail steps: + - checkout + - check-changed-files-or-halt - attach_workspace: - at: '/go' - - run: 'make package' - - store_artifacts: - path: './build/dist' - destination: 'build/dist' - nightly: - <<: [ *defaults, *go-1_15 ] + at: '.' + - run: + command: | + sh ./scripts/mac-signing.sh + - persist_to_workspace: + root: './build' + paths: + - 'dist' + package-consolidate: + docker: + - image: alpine steps: - attach_workspace: - at: '/go' - - run: 'NIGHTLY=1 make package' - - run: 'make upload-nightly' + at: '.' + - run: + command: | + cd dist && find . -type f -name '._*' -delete - store_artifacts: - path: './build/dist' + path: './dist' destination: 'build/dist' + - run: + command: | + echo "This job contains all the final artifacts." + share-artifacts: + executor: aws-cli/default + steps: + - checkout + - check-changed-files-or-halt + - run: + command: | + PR=${CIRCLE_PULL_REQUEST##*/} + printf -v payload '{ "pullRequestNumber": "%s" }' "$PR" + curl -X POST "https://182c7jdgog.execute-api.us-east-1.amazonaws.com/prod/shareArtifacts" --data "$payload" + generate-config: + executor: telegraf-ci + steps: + - generate-config + generate-config-win: + executor: + name: win/default + shell: bash.exe + steps: + - generate-config: + os: windows workflows: version: 2 check: jobs: - - 'macdeps': + - 'test-go-linux': + filters: + tags: + only: /.*/ + - 'test-go-linux-386': + filters: + tags: + only: /.*/ + - 'test-go-mac': + filters: + tags: # only runs on tags if you specify this filter + only: /.*/ + - 'test-go-windows': filters: tags: only: /.*/ - - 'deps': + - 'test-integration': filters: tags: only: /.*/ - - 'test-go-1.14': + - 'windows-package': requires: - - 'deps' + - 'test-go-windows' filters: tags: only: /.*/ - - 'test-go-1.14-386': + - 'darwin-amd64-package': requires: - - 'deps' + - 'test-go-mac' filters: tags: only: /.*/ - - 'test-go-1.15': + - 'darwin-arm64-package': requires: - - 'deps' + - 'test-go-mac' filters: tags: only: /.*/ - - 'test-go-1.15-386': + - 'i386-package': requires: - - 'deps' + - 'test-go-linux-386' filters: tags: only: /.*/ - - 'test-go-darwin': + - 'ppc64le-package': requires: - - 'macdeps' + - 'test-go-linux' filters: - tags: # only runs on tags if you specify this filter + tags: + only: /.*/ + - 'riscv64-package': + requires: + - 'test-go-linux' + filters: + tags: + only: /.*/ + - 's390x-package': + requires: + - 'test-go-linux' + filters: + tags: + only: /.*/ + - 'armel-package': + requires: + - 'test-go-linux' + filters: + tags: + only: /.*/ + - 'amd64-package': + requires: + - 'test-go-linux' + filters: + tags: + only: /.*/ + - 'arm64-package': + requires: + - 'test-go-linux' + filters: + tags: + only: /.*/ + - 'armhf-package': + requires: + - 'test-go-linux' + filters: + tags: + only: /.*/ + - 'static-package': + requires: + - 'test-go-linux' + filters: + tags: + only: /.*/ + - 'mipsel-package': + requires: + - 'test-go-linux' + filters: + tags: + only: /.*/ + - 'mips-package': + requires: + - 'test-go-linux' + filters: + tags: only: /.*/ - - 'package': + - 'generate-config': + requires: + - 'amd64-package' + filters: + branches: + only: + - master + - 'generate-config-win': + requires: + - 'windows-package' + filters: + branches: + only: + - master + - 'share-artifacts': requires: - - 'test-go-darwin' - - 'test-go-1.14' - - 'test-go-1.14-386' - - 'test-go-1.15' - - 'test-go-1.15-386' - - 'release': + - 'i386-package' + - 'ppc64le-package' + - 'riscv64-package' + - 's390x-package' + - 'armel-package' + - 'amd64-package' + - 'mipsel-package' + - 'mips-package' + - 'darwin-amd64-package' + - 'darwin-arm64-package' + - 'windows-package' + - 'static-package' + - 'arm64-package' + - 'armhf-package' + filters: + branches: + ignore: + - master + - release.* + tags: + ignore: /.*/ + - 'package-sign-windows': requires: - - 'test-go-darwin' - - 'test-go-1.14' - - 'test-go-1.14-386' - - 'test-go-1.15' - - 'test-go-1.15-386' + - 'windows-package' filters: + tags: + only: /.*/ + branches: + ignore: /.*/ + - 'package-sign-mac': + requires: + - 'darwin-amd64-package' + - 'darwin-arm64-package' + filters: + tags: + only: /.*/ + branches: + ignore: /.*/ + - 'package-consolidate': + requires: + - 'i386-package' + - 'ppc64le-package' + - 's390x-package' + - 'armel-package' + - 'amd64-package' + - 'mipsel-package' + - 'mips-package' + - 'static-package' + - 'arm64-package' + - 'armhf-package' + - 'riscv64-package' + - 'package-sign-mac' + - 'package-sign-windows' + filters: tags: only: /.*/ branches: ignore: /.*/ + nightly: jobs: - - 'deps' - - 'macdeps' - - 'test-go-1.14': + - 'test-go-linux' + - 'test-go-linux-386' + - 'test-go-mac' + - 'test-go-windows' + - 'windows-package': + name: 'windows-package-nightly' + nightly: true + requires: + - 'test-go-windows' + - 'darwin-amd64-package': + name: 'darwin-amd64-package-nightly' + nightly: true + requires: + - 'test-go-mac' + - 'darwin-arm64-package': + name: 'darwin-arm64-package-nightly' + nightly: true + requires: + - 'test-go-mac' + - 'i386-package': + name: 'i386-package-nightly' + nightly: true + requires: + - 'test-go-linux-386' + - 'ppc64le-package': + name: 'ppc64le-package-nightly' + nightly: true + requires: + - 'test-go-linux' + - 'riscv64-package': + name: 'riscv64-package-nightly' + nightly: true + requires: + - 'test-go-linux' + - 's390x-package': + name: 's390x-package-nightly' + nightly: true + requires: + - 'test-go-linux' + - 'armel-package': + name: 'armel-package-nightly' + nightly: true + requires: + - 'test-go-linux' + - 'amd64-package': + name: 'amd64-package-nightly' + nightly: true + requires: + - 'test-go-linux' + - 'arm64-package': + name: 'arm64-package-nightly' + nightly: true + requires: + - 'test-go-linux' + - 'armhf-package': + name: 'armhf-package-nightly' + nightly: true + requires: + - 'test-go-linux' + - 'static-package': + name: 'static-package-nightly' + nightly: true requires: - - 'deps' - - 'test-go-1.14-386': + - 'test-go-linux' + - 'mipsel-package': + name: 'mipsel-package-nightly' + nightly: true requires: - - 'deps' - - 'test-go-1.15': + - 'test-go-linux' + - 'mips-package': + name: 'mips-package-nightly' + nightly: true requires: - - 'deps' - - 'test-go-1.15-386': + - 'test-go-linux' + - nightly: requires: - - 'deps' - - 'test-go-darwin': + - 'amd64-package-test-nightly' + - 'arm64-package-nightly' + - 'armel-package-nightly' + - 'armhf-package-nightly' + - 'darwin-amd64-package-nightly' + - 'darwin-arm64-package-nightly' + - 'i386-package-nightly' + - 'mips-package-nightly' + - 'mipsel-package-nightly' + - 'ppc64le-package-nightly' + - 'riscv64-package-nightly' + - 's390x-package-nightly' + - 'static-package-nightly' + - 'windows-package-nightly' + - docker-nightly: requires: - - 'macdeps' - - 'nightly': + - 'nightly' + - amd64-package-test-nightly: requires: - - 'test-go-darwin' - - 'test-go-1.14' - - 'test-go-1.14-386' - - 'test-go-1.15' - - 'test-go-1.15-386' + - 'amd64-package-nightly' triggers: - schedule: cron: "0 7 * * *" diff --git a/.gitattributes b/.gitattributes index 21bc439bf797e..7769daa83cb06 100644 --- a/.gitattributes +++ b/.gitattributes @@ -3,3 +3,4 @@ README.md merge=union go.sum merge=union plugins/inputs/all/all.go merge=union plugins/outputs/all/all.go merge=union +**/testdata/** test eol=lf diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml new file mode 100644 index 0000000000000..a5f023371d6db --- /dev/null +++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml @@ -0,0 +1,74 @@ +name: Bug Report +description: Create a bug report to help us improve +labels: ["bug"] +body: + - type: markdown + attributes: + value: | + Thanks for taking time to fill out this bug report! We reserve Telegraf issues for bugs for reproducible problems. + Please redirect any questions about Telegraf usage to our [Community Slack](https://influxdata.com/slack) or [Community Page](https://community.influxdata.com/) we have a lot of talented community members there who could help answer your question more quickly. + - type: textarea + id: config + attributes: + label: Relevant telegraf.conf + description: Place config in the toml code section. This will be automatically formatted into toml, so no need for backticks. + render: toml + validations: + required: true + - type: textarea + id: logs + attributes: + label: Logs from Telegraf + description: Please include the Telegraf logs, ideally with `--debug` used. + render: text + validations: + required: true + - type: input + id: system-info + attributes: + label: System info + description: Include Telegraf version, operating system, and other relevant details + placeholder: ex. Telegraf 1.20.0, Ubuntu 20.04, Docker 20.10.8 + validations: + required: true + - type: textarea + id: docker + attributes: + label: Docker + description: If your bug involves third party dependencies or services, it can be very helpful to provide a Dockerfile or docker-compose.yml that reproduces the environment you're testing against. + validations: + required: false + - type: textarea + id: reproduce + attributes: + label: Steps to reproduce + description: Describe the steps to reproduce the bug. + value: | + 1. + 2. + 3. + ... + validations: + required: true + - type: textarea + id: expected-behavior + attributes: + label: Expected behavior + description: Describe what you expected to happen when you performed the above steps. + validations: + required: true + - type: textarea + id: actual-behavior + attributes: + label: Actual behavior + description: Describe what actually happened when you performed the above steps. + validations: + required: true + - type: textarea + id: additional-info + attributes: + label: Additional info + description: Include gist of relevant config, logs, etc. + validations: + required: false + diff --git a/.github/ISSUE_TEMPLATE/Bug_report.md b/.github/ISSUE_TEMPLATE/Bug_report.md deleted file mode 100644 index 28c6237ac75d1..0000000000000 --- a/.github/ISSUE_TEMPLATE/Bug_report.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -name: Bug report -labels: bug -about: Create a report to help us improve - ---- - - -### Relevant telegraf.conf: - -```toml - -``` - -### System info: - - - -### Docker - - - -### Steps to reproduce: - - - -1. ... -2. ... - -### Expected behavior: - - - -### Actual behavior: - - - -### Additional info: - - diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 527555bdfc7a8..67b65a26247fb 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,5 +1,26 @@ ### Required for all PRs: -- [ ] Signed [CLA](https://influxdata.com/community/cla/). -- [ ] Associated README.md updated. -- [ ] Has appropriate unit tests. + + +- [ ] Updated associated README.md. +- [ ] Wrote appropriate unit tests. +- [ ] Pull request title or commits are in [conventional commit format](https://www.conventionalcommits.org/en/v1.0.0/#summary) + + + +resolves # + + diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000000..c1de7d8fd2824 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,9 @@ +version: 2 +updates: + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "weekly" + ignore: + # Dependabot isn't able to update this packages that do not match the source, so anything with a version + - dependency-name: "*.v*" diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml new file mode 100644 index 0000000000000..a383fe65af3ee --- /dev/null +++ b/.github/workflows/golangci-lint.yml @@ -0,0 +1,36 @@ +name: golangci-lint +on: + push: + branches: + - master + pull_request: + branches: + - master + schedule: + # Trigger every day at 16:00 UTC + - cron: '0 16 * * *' +jobs: + golangci-pr: + if: github.ref != 'refs/heads/master' + name: lint-pr-changes + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v2 + - uses: actions/checkout@v2 + - name: golangci-lint + uses: golangci/golangci-lint-action@v2 + with: + version: v1.46.2 + only-new-issues: true + golangci-master: + if: github.ref == 'refs/heads/master' + name: lint-master-all + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: golangci-lint + uses: golangci/golangci-lint-action@v2 + with: + version: v1.46.2 + only-new-issues: true + args: --issues-exit-code=0 diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml new file mode 100644 index 0000000000000..057a5e0ba25d7 --- /dev/null +++ b/.github/workflows/linter.yml @@ -0,0 +1,59 @@ +--- +################################# +################################# +## Super Linter GitHub Actions ## +################################# +################################# +name: Lint Code Base + +# +# Documentation: +# https://help.github.com/en/articles/workflow-syntax-for-github-actions +# + +############################# +# Start the job on all push # +############################# +on: + push: + branches-ignore: [master, main] + # Remove the line above to run when pushing to master + pull_request: + branches: [master, main] + +############### +# Set the Job # +############### +jobs: + build: + # Name the Job + name: Lint Code Base + # Set the agent to run on + runs-on: ubuntu-latest + + ################## + # Load all steps # + ################## + steps: + ########################## + # Checkout the code base # + ########################## + - name: Checkout Code + uses: actions/checkout@v2 + with: + # Full git history is needed to get a proper list of changed files within `super-linter` + fetch-depth: 0 + + ################################ + # Run Linter against code base # + ################################ + - name: Lint Code Base + uses: github/super-linter@v4.9.2 + env: + VALIDATE_ALL_CODEBASE: false + DEFAULT_BRANCH: master + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + LINTER_RULES_PATH: '.' + MARKDOWN_CONFIG_FILE: .markdownlint.yml + VALIDATE_MARKDOWN: true + VALIDATE_BASH: true diff --git a/.github/workflows/readme-linter.yml b/.github/workflows/readme-linter.yml new file mode 100644 index 0000000000000..c0b5dc49cd735 --- /dev/null +++ b/.github/workflows/readme-linter.yml @@ -0,0 +1,23 @@ +name: Lint plugin readmes +on: +# push: +# branches-ignore: master + pull_request: + branches: # Names of target branches, not source branches + - master +jobs: + run-readme-linter: + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v3 + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Get changed files + id: changed-files + uses: tj-actions/changed-files@v18.7 + with: + base_sha: ${{ github.event.pull_request.base.sha }} + files: plugins/**/README.md + - name: Run readme linter on changed files + run: go run ./tools/readme_linter ${{ steps.changed-files.outputs.all_changed_files }} diff --git a/.github/workflows/semantic.yml b/.github/workflows/semantic.yml new file mode 100644 index 0000000000000..7dc9f439d0460 --- /dev/null +++ b/.github/workflows/semantic.yml @@ -0,0 +1,15 @@ +--- +name: "Semantic PR and Commit Messages" + +on: + pull_request: + types: [opened, reopened, synchronize, edited] + branches: + - master + +jobs: + semantic: + uses: influxdata/validate-semantic-github-messages/.github/workflows/semantic.yml@main + with: + CHECK_PR_TITLE_OR_ONE_COMMIT: true + diff --git a/.gitignore b/.gitignore index df2b3d06643c5..293407ff4a6f6 100644 --- a/.gitignore +++ b/.gitignore @@ -3,5 +3,13 @@ /telegraf /telegraf.exe /telegraf.gz +/tools/package_lxd_test/package_lxd_test +/tools/readme_config_includer/generator /vendor .DS_Store +process.yml +/.vscode +/*.toml +/*.conf +resource.syso +versioninfo.json diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000000000..6c0af3db9e9f0 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,135 @@ +linters: + disable-all: true + enable: + # - telegraflinter + - bodyclose + - dogsled + - errcheck + - goprintffuncname + - gosimple + - govet + - ineffassign + - nakedret + - nilerr + - predeclared + - revive + - sqlclosecheck + - staticcheck + - typecheck + - unconvert + - unused + - varcheck + +linters-settings: + revive: + rules: + - name: argument-limit + arguments: [ 6 ] + - name: atomic + - name: bare-return + - name: blank-imports + - name: bool-literal-in-expr + - name: call-to-gc + - name: confusing-naming + - name: confusing-results + - name: constant-logical-expr + - name: context-as-argument + - name: context-keys-type + - name: deep-exit + - name: defer + - name: dot-imports + - name: duplicated-imports + - name: early-return + - name: empty-block + - name: empty-lines + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf +# - name: flag-parameter #disable for now + - name: function-result-limit + arguments: [ 3 ] + - name: identical-branches + - name: if-return + - name: imports-blacklist + arguments: [ "log" ] + - name: import-shadowing + - name: increment-decrement + - name: indent-error-flow + - name: modifies-parameter + - name: modifies-value-receiver + - name: package-comments + - name: range + - name: range-val-address + - name: range-val-in-closure + - name: receiver-naming + - name: redefines-builtin-id + - name: string-of-int + - name: struct-tag + - name: superfluous-else + - name: time-naming + - name: unconditional-recursion + - name: unexported-naming + - name: unhandled-error + arguments: ["fmt.Printf", "fmt.Println", "fmt.Print"] + - name: unnecessary-stmt + - name: unreachable-code + # - name: unused-parameter + - name: var-declaration + - name: var-naming + - name: waitgroup-by-value + nakedret: + # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 + max-func-lines: 1 + +run: + # timeout for analysis, e.g. 30s, 5m, default is 1m + timeout: 10m + + # which dirs to skip: issues from them won't be reported; + # can use regexp here: generated.*, regexp is applied on full path; + # default value is empty list, but default dirs are skipped independently + # from this option's value (see skip-dirs-use-default). + # "/" will be replaced by current OS file path separator to properly work + # on Windows. + skip-dirs: + - assets + - docs + - etc + - scripts + # - plugins/parsers/influx/machine.go + + # which files to skip: they will be analyzed, but issues from them + # won't be reported. Default value is empty list, but there is + # no need to include all autogenerated files, we confidently recognize + # autogenerated files. If it's not please let us know. + # "/" will be replaced by current OS file path separator to properly work + # on Windows. + skip-files: + - plugins/parsers/influx/machine.go* + +issues: + # Maximum issues count per one linter. Set to 0 to disable. Default is 50. + max-issues-per-linter: 0 + + # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. + max-same-issues: 0 + + exclude: + - don't use an underscore in package name #revive:var-naming + + exclude-rules: + - path: plugins/parsers/influx + linters: + - govet + + - path: _test\.go + text: "parameter.*seems to be a control flag, avoid control coupling" + + - path: (^agent/|^cmd/|^config/|^filter/|^internal/|^logger/|^metric/|^models/|^selfstat/|^testutil/|^plugins/serializers/) + text: "imports-blacklist: should not use the following blacklisted import: \"log\"" + linters: + - revive + +output: + format: tab diff --git a/.markdownlint.yml b/.markdownlint.yml new file mode 100644 index 0000000000000..893179487d310 --- /dev/null +++ b/.markdownlint.yml @@ -0,0 +1,6 @@ +{ + "MD013": false, + "MD033": { + "allowed_elements": ["br"] + } +} diff --git a/CHANGELOG.md b/CHANGELOG.md index 23c7d2d063743..7578c4041cc47 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,160 +1,1321 @@ -## v1.16.1 [2020-10-28] + +# Changelog + +## v1.23.0 [2022-06-13] + +### Bugfixes + +- [#11272](https://github.com/influxdata/telegraf/pull/11272) Add missing build constraints for sqlite +- [#11253](https://github.com/influxdata/telegraf/pull/11253) Always build README-embedder for host-architecture +- [#11140](https://github.com/influxdata/telegraf/pull/11140) Avoid calling sadc with invalid 0 interval +- [#11093](https://github.com/influxdata/telegraf/pull/11093) Check net.Listen() error in tests +- [#11181](https://github.com/influxdata/telegraf/pull/11181) Convert slab plugin to new sample.conf. +- [#10979](https://github.com/influxdata/telegraf/pull/10979) Datadog count metrics +- [#11044](https://github.com/influxdata/telegraf/pull/11044) Deprecate useless database config option +- [#11150](https://github.com/influxdata/telegraf/pull/11150) Doc interval setting for internet speed plugin +- [#11120](https://github.com/influxdata/telegraf/pull/11120) Elasticsearch output float handling test +- [#11151](https://github.com/influxdata/telegraf/pull/11151) Improve slab testing without sudo. +- [#10995](https://github.com/influxdata/telegraf/pull/10995) Log instance name in skip warnings +- [#11069](https://github.com/influxdata/telegraf/pull/11069) Output erroneous namespace and continue instead of error out +- [#11237](https://github.com/influxdata/telegraf/pull/11237) Re-add event to splunk serializer +- [#11143](https://github.com/influxdata/telegraf/pull/11143) Redis plugin goroutine leak triggered by auto reload config mechanism +- [#11082](https://github.com/influxdata/telegraf/pull/11082) Remove any content type from prometheus accept header +- [#11261](https://github.com/influxdata/telegraf/pull/11261) Remove full access permissions +- [#11179](https://github.com/influxdata/telegraf/pull/11179) Search services file in /etc/services and fall back to /usr/etc/services +- [#11217](https://github.com/influxdata/telegraf/pull/11217) Update sample.conf for prometheus +- [#11241](https://github.com/influxdata/telegraf/pull/11241) Upgrade xpath and fix code +- [#11083](https://github.com/influxdata/telegraf/pull/11083) Use readers over closers in http input +- [#11149](https://github.com/influxdata/telegraf/pull/11149) `inputs.burrow` Move Dialer to variable and run `make fmt` +- [#10812](https://github.com/influxdata/telegraf/pull/10812) `outputs.sql` Table existence cache + +### Features + +- [#10880](https://github.com/influxdata/telegraf/pull/10880) Add ANSI color filter for tail input plugin +- [#11188](https://github.com/influxdata/telegraf/pull/11188) Add constant 'algorithm' to the mock plugin +- [#11159](https://github.com/influxdata/telegraf/pull/11159) Add external huebridge input plugin +- [#11076](https://github.com/influxdata/telegraf/pull/11076) Add field key option to set event partition key +- [#10818](https://github.com/influxdata/telegraf/pull/10818) Add fritzbox as external plugin +- [#11037](https://github.com/influxdata/telegraf/pull/11037) Add influx semantic commits checker, checks only last commit. +- [#11039](https://github.com/influxdata/telegraf/pull/11039) Add mount option filtering to disk plugin +- [#11075](https://github.com/influxdata/telegraf/pull/11075) Add slab metrics input plugin +- [#11056](https://github.com/influxdata/telegraf/pull/11056) Allow other fluentd metrics apart from retry_count, buffer_queu… +- [#10918](https://github.com/influxdata/telegraf/pull/10918) Artifactory Webhook Receiver +- [#11000](https://github.com/influxdata/telegraf/pull/11000) Create and push nightly docker images to quay.io +- [#11102](https://github.com/influxdata/telegraf/pull/11102) Do not error if no nodes found for current config with xpath parser +- [#10886](https://github.com/influxdata/telegraf/pull/10886) Generate the plugins sample config +- [#11084](https://github.com/influxdata/telegraf/pull/11084) Google API Auth +- [#10607](https://github.com/influxdata/telegraf/pull/10607) In Lustre input plugin, support collecting per-client stats. +- [#10912](https://github.com/influxdata/telegraf/pull/10912) Migrate aggregator plugins to new sample config format +- [#10924](https://github.com/influxdata/telegraf/pull/10924) Migrate input plugins to new sample config format (A-L) +- [#10926](https://github.com/influxdata/telegraf/pull/10926) Migrate input plugins to new sample config format (M-Z) +- [#10910](https://github.com/influxdata/telegraf/pull/10910) Migrate output plugins to new sample config format +- [#10913](https://github.com/influxdata/telegraf/pull/10913) Migrate processor plugins to new sample config format +- [#11218](https://github.com/influxdata/telegraf/pull/11218) Migrate xpath parser to new style +- [#10885](https://github.com/influxdata/telegraf/pull/10885) Update etc/telegraf.conf and etc/telegraf_windows.conf +- [#6948](https://github.com/influxdata/telegraf/pull/6948) `inputs.burrow` fill more http transport parameters +- [#11141](https://github.com/influxdata/telegraf/pull/11141) `inputs.cpu` Add tags with core id or physical id to cpus +- [#7896](https://github.com/influxdata/telegraf/pull/7896) `inputs.mongodb` Add metrics about files currently open and currently active data handles +- [#10448](https://github.com/influxdata/telegraf/pull/10448) `inputs.nginx_plus_api` Gather slab metrics +- [#11216](https://github.com/influxdata/telegraf/pull/11216) `inputs.sqlserver` Update query store and latch performance counters +- [#10574](https://github.com/influxdata/telegraf/pull/10574) `inputs.vsphere` Collect resource pools metrics and add resource pool tag in VM metrics +- [#11035](https://github.com/influxdata/telegraf/pull/11035) `inputs.intel_powerstat` Add Max Turbo Frequency and introduce improvements +- [#11254](https://github.com/influxdata/telegraf/pull/11254) `inputs.intel_powerstat` Add uncore frequency metrics +- [#10954](https://github.com/influxdata/telegraf/pull/10954) `outputs.http` Support configuration of `MaxIdleConns` and `MaxIdleConnsPerHost` +- [#10853](https://github.com/influxdata/telegraf/pull/10853) `outputs.elasticsearch` Add healthcheck timeout + +### Dependency Updates + +- [#10970](https://github.com/influxdata/telegraf/pull/10970) Update github.com/wavefronthq/wavefront-sdk-go from 0.9.10 to 0.9.11 +- [#11166](https://github.com/influxdata/telegraf/pull/11166) Update github.com/aws/aws-sdk-go-v2/config from 1.15.3 to 1.15.7 +- [#11021](https://github.com/influxdata/telegraf/pull/11021) Update github.com/sensu/sensu-go/api/core/v2 from 2.13.0 to 2.14.0 +- [#11088](https://github.com/influxdata/telegraf/pull/11088) Update go.opentelemetry.io/otel/metric from 0.28.0 to 0.30.0 +- [#11221](https://github.com/influxdata/telegraf/pull/11221) Update github.com/nats-io/nats-server/v2 from 2.7.4 to 2.8.4 +- [#11191](https://github.com/influxdata/telegraf/pull/11191) Update golangci-lint from v1.45.2 to v1.46.2 +- [#11107](https://github.com/influxdata/telegraf/pull/11107) Update gopsutil from v3.22.3 to v3.22.4 to allow for HOST_PROC_MOUNTINFO. +- [#11242](https://github.com/influxdata/telegraf/pull/11242) Update moby/ipvs dependency from v1.0.1 to v1.0.2 +- [#11260](https://github.com/influxdata/telegraf/pull/11260) Update modernc.org/sqlite from v1.10.8 to v1.17.3 +- [#11266](https://github.com/influxdata/telegraf/pull/11266) Update github.com/containerd/containerd from v1.5.11 to v1.5.13 +- [#11264](https://github.com/influxdata/telegraf/pull/11264) Update github.com/tidwall/gjson from 1.10.2 to 1.14.1 + +## v1.22.4 [2022-05-16] + +### Bugfixes + +- [#11045](https://github.com/influxdata/telegraf/pull/11045) `inputs.couchbase` Do not assume metrics will all be of the same length +- [#11043](https://github.com/influxdata/telegraf/pull/11043) `inputs.statsd` Do not error when closing statsd network connection +- [#11030](https://github.com/influxdata/telegraf/pull/11030) `outputs.azure_monitor` Re-init azure monitor http client on context deadline error +- [#11078](https://github.com/influxdata/telegraf/pull/11078) `outputs.wavefront` If no "host" tag is provided do not add "telegraf.host" tag +- [#11042](https://github.com/influxdata/telegraf/pull/11042) Have telegraf service wait for network up in systemd packaging + +### Dependency Updates + +- [#10722](https://github.com/influxdata/telegraf/pull/10722) `inputs.internet_speed` Update github.com/showwin/speedtest-go from 1.1.4 to 1.1.5 +- [#11085](https://github.com/influxdata/telegraf/pull/11085) Update OpenTelemetry plugins to v0.51.0 + +## v1.22.3 [2022-04-28] + +### Bugfixes + +- [#10961](https://github.com/influxdata/telegraf/pull/10961) Update Go to 1.18.1 +- [#10976](https://github.com/influxdata/telegraf/pull/10976) `inputs.influxdb_listener` Remove duplicate influxdb listener writes with upstream parser +- [#11024](https://github.com/influxdata/telegraf/pull/11024) `inputs.gnmi` Use external xpath parser for gnmi +- [#10925](https://github.com/influxdata/telegraf/pull/10925) `inputs.system` Reduce log level in disk plugin back to original level + +## v1.22.2 [2022-04-25] + +### Bugfixes + +- [#11008](https://github.com/influxdata/telegraf/pull/11008) `inputs.gnmi` Add mutex to gnmi lookup map +- [#11010](https://github.com/influxdata/telegraf/pull/11010) `inputs.gnmi` Use sprint to cast to strings in gnmi +- [#11001](https://github.com/influxdata/telegraf/pull/11001) `inputs.consul_agent` Use correct auth token with consul_agent +- [#10486](https://github.com/influxdata/telegraf/pull/10486) `inputs.mysql` Add mariadb_dialect to address the MariaDB differences in INNODB_METRICS +- [#10923](https://github.com/influxdata/telegraf/pull/10923) `inputs.smart` Correctly parse various numeric forms +- [#10850](https://github.com/influxdata/telegraf/pull/10850) `inputs.aliyuncms` Ensure aliyuncms metrics accept array, fix discovery +- [#10930](https://github.com/influxdata/telegraf/pull/10930) `inputs.aerospike` Statistics query bug +- [#10947](https://github.com/influxdata/telegraf/pull/10947) `inputs.cisco_telemetry_mdt` Align the default value for msg size +- [#10959](https://github.com/influxdata/telegraf/pull/10959) `inputs.cisco_telemetry_mdt` Remove overly verbose info message from cisco mdt +- [#10958](https://github.com/influxdata/telegraf/pull/10958) `outputs.influxdb_v2` Improve influxdb_v2 error message +- [#10932](https://github.com/influxdata/telegraf/pull/10932) `inputs.prometheus` Moved from watcher to informer +- [#11013](https://github.com/influxdata/telegraf/pull/11013) Also allow 0 outputs when using test-wait parameter +- [#11015](https://github.com/influxdata/telegraf/pull/11015) Allow Makefile to work on Windows + +### Dependency Updates + +- [#10966](https://github.com/influxdata/telegraf/pull/10966) Update github.com/Azure/azure-kusto-go from 0.5.0 to 0.60 +- [#10963](https://github.com/influxdata/telegraf/pull/10963) Update opentelemetry from v0.2.10 to v0.2.17 +- [#10984](https://github.com/influxdata/telegraf/pull/10984) Update go.opentelemetry.io/collector/pdata from v0.48.0 to v0.49.0 +- [#10998](https://github.com/influxdata/telegraf/pull/10998) Update github.com/aws/aws-sdk-go-v2/config from 1.13.1 to 1.15.3 +- [#10997](https://github.com/influxdata/telegraf/pull/10997) Update github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs +- [#10975](https://github.com/influxdata/telegraf/pull/10975) Update github.com/aws/aws-sdk-go-v2/credentials from 1.8.0 to 1.11.2 +- [#10981](https://github.com/influxdata/telegraf/pull/10981) Update github.com/containerd/containerd from v1.5.9 to v1.5.11 +- [#10973](https://github.com/influxdata/telegraf/pull/10973) Update github.com/miekg/dns from 1.1.46 to 1.1.48 +- [#10974](https://github.com/influxdata/telegraf/pull/10974) Update github.com/gopcua/opcua from v0.3.1 to v0.3.3 +- [#10972](https://github.com/influxdata/telegraf/pull/10972) Update github.com/aws/aws-sdk-go-v2/service/dynamodb +- [#10773](https://github.com/influxdata/telegraf/pull/10773) Update github.com/xdg/scram from 1.0.3 to 1.0.5 +- [#10971](https://github.com/influxdata/telegraf/pull/10971) Update go.mongodb.org/mongo-driver from 1.8.3 to 1.9.0 +- [#10940](https://github.com/influxdata/telegraf/pull/10940) Update starlark 7a1108eaa012->d1966c6b9fcd + +## v1.22.1 [2022-04-06] + +### Bugfixes + +- [#10937](https://github.com/influxdata/telegraf/pull/10937) Update gonum.org/v1/gonum from 0.9.3 to 0.11.0 +- [#10906](https://github.com/influxdata/telegraf/pull/10906) Update github.com/golang-jwt/jwt/v4 from 4.2.0 to 4.4.1 +- [#10931](https://github.com/influxdata/telegraf/pull/10931) Update gopsutil and associated dependencies for improved OpenBSD support +- [#10553](https://github.com/influxdata/telegraf/pull/10553) `inputs.sqlserver` Fix inconsistencies in sql*Requests queries +- [#10883](https://github.com/influxdata/telegraf/pull/10883) `agent` Fix default value for logfile rotation interval +- [#10871](https://github.com/influxdata/telegraf/pull/10871) `inputs.zfs` Fix redundant zfs pool tag +- [#10903](https://github.com/influxdata/telegraf/pull/10903) `inputs.vsphere` Update vsphere info message to debug +- [#10866](https://github.com/influxdata/telegraf/pull/10866) `outputs.azure_monitor` Include body in error message +- [#10830](https://github.com/influxdata/telegraf/pull/10830) `processors.topk` Clarify the k and fields topk params +- [#10858](https://github.com/influxdata/telegraf/pull/10858) `outputs.http` Switch HTTP 100 test case values +- [#10859](https://github.com/influxdata/telegraf/pull/10859) `inputs.intel_pmu` Fix slow running intel-pmu test +- [#10860](https://github.com/influxdata/telegraf/pull/10860) `inputs.cloud_pubsub` Skip longer/integration tests on -short mode +- [#10861](https://github.com/influxdata/telegraf/pull/10861) `inputs.cloud_pubsub_push` Reduce timeouts and sleeps + +### New External Plugins + +- [#10462](https://github.com/influxdata/telegraf/pull/10462) `external.psi` Add psi plugin + +## v1.22.0 + +### Influx Line Protocol Parser + +There is an option to use a faster, more memory-efficient +implementation of the Influx Line Protocol parser. + +### SNMP Translator + +This version introduces an agent setting to select the method of +translating SNMP objects. The agent setting "snmp_translator" can be +"netsnmp" which translates by calling external programs snmptranslate +and snmptable, or "gosmi" which translates using the built-in gosmi +library. + +Before version 1.21.0, Telegraf only used the netsnmp method. Versions +1.21.0 through 1.21.4 only used the gosmi method. Since the +translation method is now configurable and "netsnmp" is the default, +users who wish to continue using "gosmi" must add `snmp_translator = +"gosmi"` in the agent section of their config file. See +[#10802](https://github.com/influxdata/telegraf/pull/10802). + +### New Input Plugins + +- [#3649](https://github.com/influxdata/telegraf/pull/3649) `inputs.socketstat` Add socketstat input plugin +- [#9697](https://github.com/influxdata/telegraf/pull/9697) `inputs.xtremio` Add xtremio input +- [#9782](https://github.com/influxdata/telegraf/pull/9782) `inputs.mock` Add mock input plugin +- [#10042](https://github.com/influxdata/telegraf/pull/10042) `inputs.redis_sentinel` Add redis sentinel input plugin +- [#10106](https://github.com/influxdata/telegraf/pull/10106) `inputs.nomad` Add nomad input plugin +- [#10198](https://github.com/influxdata/telegraf/pull/10198) `inputs.vault` Add vault input plugin +- [#10258](https://github.com/influxdata/telegraf/pull/10258) `inputs.consul_agent` Add consul agent input plugin +- [#10763](https://github.com/influxdata/telegraf/pull/10763) `inputs.hugepages` Add hugepages input plugin + +### New Processor Plugins + +- [#10057](https://github.com/influxdata/telegraf/pull/10057) `processors.noise` Add noise processor plugin + +### Features + +- [#9332](https://github.com/influxdata/telegraf/pull/9332) `agent` HTTP basic auth for webhooks +- [#10307](https://github.com/influxdata/telegraf/pull/10307) `agent` Improve error logging on plugin initialization +- [#10341](https://github.com/influxdata/telegraf/pull/10341) `agent` Check TLSConfig early to catch missing certificates +- [#10404](https://github.com/influxdata/telegraf/pull/10404) `agent` Support headers for http plugin with cookie auth +- [#10545](https://github.com/influxdata/telegraf/pull/10545) `agent` Add a collection offset implementation +- [#10559](https://github.com/influxdata/telegraf/pull/10559) `agent` Add autorestart and restartdelay flags to Windows service +- [#10515](https://github.com/influxdata/telegraf/pull/10515) `aggregators.histogram` Add config option to push only updated values +- [#10520](https://github.com/influxdata/telegraf/pull/10520) `aggregators.histogram` Add expiration option +- [#10137](https://github.com/influxdata/telegraf/pull/10137) `inputs.bond` Add additional stats to bond collector +- [#10382](https://github.com/influxdata/telegraf/pull/10382) `inputs.docker` Update docker client API version +- [#10575](https://github.com/influxdata/telegraf/pull/10575) `inputs.file` Allow for stateful parser handling +- [#7484](https://github.com/influxdata/telegraf/pull/7484) `inputs.gnmi` add dynamic tagging to gnmi plugin +- [#10220](https://github.com/influxdata/telegraf/pull/10220) `inputs.graylog` Add timeout setting option +- [#10530](https://github.com/influxdata/telegraf/pull/10530) `inputs.internet_speed` Add caching to internet_speed +- [#10243](https://github.com/influxdata/telegraf/pull/10243) `inputs.kibana` Add heap_size_limit field +- [#10641](https://github.com/influxdata/telegraf/pull/10641) `inputs.memcached` gather additional stats from memcached +- [#10642](https://github.com/influxdata/telegraf/pull/10642) `inputs.memcached` Support client TLS origination +- [#9279](https://github.com/influxdata/telegraf/pull/9279) `inputs.modbus` Support multiple slaves with gateway +- [#10231](https://github.com/influxdata/telegraf/pull/10231) `inputs.modbus` Add per-request tags +- [#10625](https://github.com/influxdata/telegraf/pull/10625) `inputs.mongodb` Add FsTotalSize and FsUsedSize fields +- [#10787](https://github.com/influxdata/telegraf/pull/10787) `inputs.nfsclient` Add new rtt per op field +- [#10705](https://github.com/influxdata/telegraf/pull/10705) `inputs.openweathermap` Add feels_like field +- [#9710](https://github.com/influxdata/telegraf/pull/9710) `inputs.postgresql` Add option to disable prepared statements for PostgreSQL +- [#10339](https://github.com/influxdata/telegraf/pull/10339) `inputs.snmp_trap` Deprecate unused snmp_trap timeout configuration option +- [#9671](https://github.com/influxdata/telegraf/pull/9671) `inputs.sql` Add ClickHouse driver to sql inputs/outputs plugins +- [#10466](https://github.com/influxdata/telegraf/pull/10466) `inputs.statsd` Add option to sanitize collected metric names +- [#9432](https://github.com/influxdata/telegraf/pull/9432) `inputs.varnish` Create option to reduce potentially high cardinality +- [#6501](https://github.com/influxdata/telegraf/pull/6501) `inputs.win_perf_counters` Implemented support for reading raw values, added tests and doc +- [#10535](https://github.com/influxdata/telegraf/pull/10535) `inputs.win_perf_counters` Allow errors to be ignored +- [#9822](https://github.com/influxdata/telegraf/pull/9822) `inputs.x509_cert` Add exclude_root_certs option to x509_cert plugin +- [#9963](https://github.com/influxdata/telegraf/pull/9963) `outputs.datadog` Add the option to use compression +- [#10505](https://github.com/influxdata/telegraf/pull/10505) `outputs.elasticsearch` Add elastic pipeline flags +- [#10499](https://github.com/influxdata/telegraf/pull/10499) `outputs.groundwork` Process group tags +- [#10186](https://github.com/influxdata/telegraf/pull/10186) `outputs.http` Add optional list of non retryable http status codes +- [#10202](https://github.com/influxdata/telegraf/pull/10202) `outputs.http` Support AWS managed service for prometheus +- [#8192](https://github.com/influxdata/telegraf/pull/8192) `outputs.kafka` Add socks5 proxy support +- [#10673](https://github.com/influxdata/telegraf/pull/10673) `outputs.sql` Add unsigned style config option +- [#10672](https://github.com/influxdata/telegraf/pull/10672) `outputs.websocket` Add socks5 proxy support +- [#10267](https://github.com/influxdata/telegraf/pull/10267) `parsers.csv` Add option to skip errors during parsing +- [#10749](https://github.com/influxdata/telegraf/pull/10749) `parsers.influx` Add new influx line protocol parser via feature flag +- [#10585](https://github.com/influxdata/telegraf/pull/10585) `parsers.xpath` Add tag batch-processing to XPath parser +- [#10316](https://github.com/influxdata/telegraf/pull/10316) `processors.template` Add more functionality to template processor +- [#10252](https://github.com/influxdata/telegraf/pull/10252) `serializers.wavefront` Add option to disable Wavefront prefix conversion + +### Bugfixes + +- [#10803](https://github.com/influxdata/telegraf/pull/10803) `agent` Update parsing logic of config.Duration to correctly require time and duration +- [#10814](https://github.com/influxdata/telegraf/pull/10814) `agent` Update the precision parameter default value +- [#10872](https://github.com/influxdata/telegraf/pull/10872) `agent` Change name of agent snmp translator setting +- [#10876](https://github.com/influxdata/telegraf/pull/10876) `inputs.consul_agent` Rename consul_metrics -> consul_agent +- [#10711](https://github.com/influxdata/telegraf/pull/10711) `inputs.docker` Keep data type of tasks_desired field consistent +- [#10083](https://github.com/influxdata/telegraf/pull/10083) `inputs.http` Add metadata support to CSV parser plugin +- [#10701](https://github.com/influxdata/telegraf/pull/10701) `inputs.mdstat` Fix parsing output when when sync is less than 10% +- [#10385](https://github.com/influxdata/telegraf/pull/10385) `inputs.modbus` Re-enable OpenBSD modbus support +- [#10790](https://github.com/influxdata/telegraf/pull/10790) `inputs.ntpq` Correctly read ntpq long poll output with extra characters +- [#10384](https://github.com/influxdata/telegraf/pull/10384) `inputs.opcua` Accept non-standard OPC UA OK status by implementing a configurable workaround +- [#10465](https://github.com/influxdata/telegraf/pull/10465) `inputs.opcua` Add additional data to error messages +- [#10735](https://github.com/influxdata/telegraf/pull/10735) `inputs.snmp` Log err when loading mibs +- [#10748](https://github.com/influxdata/telegraf/pull/10748) `inputs.snmp` Use the correct path when evaluating symlink +- [#10802](https://github.com/influxdata/telegraf/pull/10802) `inputs.snmp` Add option to select translator +- [#10527](https://github.com/influxdata/telegraf/pull/10527) `inputs.system` Remove verbose logging from disk input plugin +- [#10706](https://github.com/influxdata/telegraf/pull/10706) `outputs.influxdb_v2` Include influxdb bucket name in error messages +- [#10623](https://github.com/influxdata/telegraf/pull/10623) `outputs.groundwork` Set NextCheckTime to LastCheckTime to avoid GroundWork to invent a value +- [#10749](https://github.com/influxdata/telegraf/pull/10749) `parsers.influx` Add new influx line protocol parser via feature flag +- [#10777](https://github.com/influxdata/telegraf/pull/10777) `parsers.json_v2` Allow multiple optional objects +- [#10799](https://github.com/influxdata/telegraf/pull/10799) `parsers.json_v2` Check if gpath exists and support optional in fields/tags +- [#10798](https://github.com/influxdata/telegraf/pull/10798) `parsers.xpath` Correctly handling imports in protocol-buffer definitions +- [#10602](https://github.com/influxdata/telegraf/pull/10602) Update github.com/aws/aws-sdk-go-v2/service/sts from 1.7.2 to 1.14.0 +- [#10604](https://github.com/influxdata/telegraf/pull/10604) Update github.com/aerospike/aerospike-client-go from 1.27.0 to 5.7.0 +- [#10686](https://github.com/influxdata/telegraf/pull/10686) Update github.com/sleepinggenius2/gosmi from v0.4.3 to v0.4.4 +- [#10692](https://github.com/influxdata/telegraf/pull/10692) Update github.com/aws/aws-sdk-go-v2/service/dynamodb from 1.5.0 to 1.13.0 +- [#10693](https://github.com/influxdata/telegraf/pull/10693) Update github.com/gophercloud/gophercloud from 0.16.0 to 0.24.0 +- [#10702](https://github.com/influxdata/telegraf/pull/10702) Update github.com/jackc/pgx/v4 from 4.14.1 to 4.15.0 +- [#10704](https://github.com/influxdata/telegraf/pull/10704) Update github.com/sensu/sensu-go/api/core/v2 from 2.12.0 to 2.13.0 +- [#10713](https://github.com/influxdata/telegraf/pull/10713) Update k8s.io/api from 0.23.3 to 0.23.4 +- [#10714](https://github.com/influxdata/telegraf/pull/10714) Update cloud.google.com/go/pubsub from 1.17.1 to 1.18.0 +- [#10715](https://github.com/influxdata/telegraf/pull/10715) Update github.com/newrelic/newrelic-telemetry-sdk-go from 0.5.1 to 0.8.1 +- [#10717](https://github.com/influxdata/telegraf/pull/10717) Update github.com/ClickHouse/clickhouse-go from 1.5.1 to 1.5.4 +- [#10718](https://github.com/influxdata/telegraf/pull/10718) Update github.com/wavefronthq/wavefront-sdk-go from 0.9.9 to 0.9.10 +- [#10719](https://github.com/influxdata/telegraf/pull/10719) Update github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs from 1.12.0 to 1.13.0 +- [#10720](https://github.com/influxdata/telegraf/pull/10720) Update github.com/aws/aws-sdk-go-v2/config from 1.8.3 to 1.13.1 +- [#10721](https://github.com/influxdata/telegraf/pull/10721) Update github.com/aws/aws-sdk-go-v2/feature/ec2/imds from 1.6.0 to 1.10.0 +- [#10728](https://github.com/influxdata/telegraf/pull/10728) Update github.com/testcontainers/testcontainers-go from 0.11.1 to 0.12.0 +- [#10751](https://github.com/influxdata/telegraf/pull/10751) Update github.com/aws/aws-sdk-go-v2/service/dynamodb from 1.13.0 to 1.14.0 +- [#10752](https://github.com/influxdata/telegraf/pull/10752) Update github.com/nats-io/nats-server/v2 from 2.7.2 to 2.7.3 +- [#10757](https://github.com/influxdata/telegraf/pull/10757) Update github.com/miekg/dns from 1.1.43 to 1.1.46 +- [#10758](https://github.com/influxdata/telegraf/pull/10758) Update github.com/shirou/gopsutil/v3 from 3.21.12 to 3.22.2 +- [#10759](https://github.com/influxdata/telegraf/pull/10759) Update github.com/aws/aws-sdk-go-v2/feature/ec2/imds from 1.10.0 to 1.11.0 +- [#10772](https://github.com/influxdata/telegraf/pull/10772) Update github.com/Shopify/sarama from 1.29.1 to 1.32.0 +- [#10807](https://github.com/influxdata/telegraf/pull/10807) Update github.com/nats-io/nats-server/v2 from 2.7.3 to 2.7.4 + +## v1.21.4 [2022-02-16] + +### Bugfixes + +- [#10491](https://github.com/influxdata/telegraf/pull/10491) `inputs.docker` Update docker memory usage calculation +- [#10636](https://github.com/influxdata/telegraf/pull/10636) `inputs.ecs` Use current time as timestamp +- [#10551](https://github.com/influxdata/telegraf/pull/10551) `inputs.snmp` Ensure folders do not get loaded more than once +- [#10579](https://github.com/influxdata/telegraf/pull/10579) `inputs.win_perf_counters` Add deprecated warning and version to win_perf_counters option +- [#10635](https://github.com/influxdata/telegraf/pull/10635) `outputs.amqp` Check for nil client before closing in amqp +- [#10179](https://github.com/influxdata/telegraf/pull/10179) `outputs.azure_data_explorer` Lower RAM usage +- [#10513](https://github.com/influxdata/telegraf/pull/10513) `outputs.elasticsearch` Add scheme to fix error in sniffing option +- [#10657](https://github.com/influxdata/telegraf/pull/10657) `parsers.json_v2` Fix timestamp change during execution of json_v2 parser +- [#10618](https://github.com/influxdata/telegraf/pull/10618) `parsers.json_v2` Fix incorrect handling of json_v2 timestamp_path +- [#10468](https://github.com/influxdata/telegraf/pull/10468) `parsers.json_v2` Allow optional paths and handle wrong paths correctly +- [#10547](https://github.com/influxdata/telegraf/pull/10547) `serializers.prometheusremotewrite` Use the correct timestamp unit +- [#10647](https://github.com/influxdata/telegraf/pull/10647) Update all go.opentelemetry.io from 0.24.0 to 0.27.0 +- [#10652](https://github.com/influxdata/telegraf/pull/10652) Update github.com/signalfx/golib/v3 from 3.3.38 to 3.3.43 +- [#10653](https://github.com/influxdata/telegraf/pull/10653) Update github.com/aliyun/alibaba-cloud-sdk-go from 1.61.1004 to 1.61.1483 +- [#10503](https://github.com/influxdata/telegraf/pull/10503) Update github.com/denisenkom/go-mssqldb from 0.10.0 to 0.12.0 +- [#10626](https://github.com/influxdata/telegraf/pull/10626) Update github.com/gopcua/opcua from 0.2.3 to 0.3.1 +- [#10638](https://github.com/influxdata/telegraf/pull/10638) Update github.com/nats-io/nats-server/v2 from 2.6.5 to 2.7.2 +- [#10589](https://github.com/influxdata/telegraf/pull/10589) Update k8s.io/client-go from 0.22.2 to 0.23.3 +- [#10601](https://github.com/influxdata/telegraf/pull/10601) Update github.com/aws/aws-sdk-go-v2/service/kinesis from 1.6.0 to 1.13.0 +- [#10588](https://github.com/influxdata/telegraf/pull/10588) Update github.com/benbjohnson/clock from 1.1.0 to 1.3.0 +- [#10598](https://github.com/influxdata/telegraf/pull/10598) Update github.com/Azure/azure-kusto-go from 0.5.0 to 0.5.2 +- [#10571](https://github.com/influxdata/telegraf/pull/10571) Update github.com/vmware/govmomi from 0.27.2 to 0.27.3 +- [#10572](https://github.com/influxdata/telegraf/pull/10572) Update github.com/prometheus/client_golang from 1.11.0 to 1.12.1 +- [#10564](https://github.com/influxdata/telegraf/pull/10564) Update go.mongodb.org/mongo-driver from 1.7.3 to 1.8.3 +- [#10563](https://github.com/influxdata/telegraf/pull/10563) Update github.com/google/go-cmp from 0.5.6 to 0.5.7 +- [#10562](https://github.com/influxdata/telegraf/pull/10562) Update go.opentelemetry.io/collector/model from 0.39.0 to 0.43.2 +- [#10538](https://github.com/influxdata/telegraf/pull/10538) Update github.com/multiplay/go-ts3 from 1.0.0 to 1.0.1 +- [#10454](https://github.com/influxdata/telegraf/pull/10454) Update cloud.google.com/go/monitoring from 0.2.0 to 1.2.0 +- [#10536](https://github.com/influxdata/telegraf/pull/10536) Update github.com/vmware/govmomi from 0.26.0 to 0.27.2 + +### New External Plugins + +- [apt](https://github.com/x70b1/telegraf-apt) - contributed by @x70b1 +- [knot](https://github.com/x70b1/telegraf-knot) - contributed by @x70b1 + +## v1.21.3 [2022-01-27] + +### Bugfixes + +- [#10430](https://github.com/influxdata/telegraf/pull/10430) `inputs.snmp_trap` Fix translation of partially resolved OIDs +- [#10529](https://github.com/influxdata/telegraf/pull/10529) Update deprecation notices +- [#10525](https://github.com/influxdata/telegraf/pull/10525) Update grpc module to v1.44.0 +- [#10434](https://github.com/influxdata/telegraf/pull/10434) Update google.golang.org/api module from 0.54.0 to 0.65.0 +- [#10507](https://github.com/influxdata/telegraf/pull/10507) Update antchfx/xmlquery module from 1.3.6 to 1.3.9 +- [#10521](https://github.com/influxdata/telegraf/pull/10521) Update nsqio/go-nsq module from 1.0.8 to 1.1.0 +- [#10506](https://github.com/influxdata/telegraf/pull/10506) Update prometheus/common module from 0.31.1 to 0.32.1 +- [#10474](https://github.com/influxdata/telegraf/pull/10474) `inputs.ipset` Fix panic when command not found +- [#10504](https://github.com/influxdata/telegraf/pull/10504) Update cloud.google.com/go/pubsub module from 1.17.0 to 1.17.1 +- [#10432](https://github.com/influxdata/telegraf/pull/10432) Update influxdata/influxdb-observability/influx2otel module from 0.2.8 to 0.2.10 +- [#10478](https://github.com/influxdata/telegraf/pull/10478) `inputs.opcua` Remove duplicate fields +- [#10473](https://github.com/influxdata/telegraf/pull/10473) `parsers.nagios` Log correct errors when executing commands +- [#10463](https://github.com/influxdata/telegraf/pull/10463) `inputs.execd` Add newline in execd for prometheus parsing +- [#10451](https://github.com/influxdata/telegraf/pull/10451) Update shirou/gopsutil/v3 module from 3.21.10 to 3.21.12 +- [#10453](https://github.com/influxdata/telegraf/pull/10453) Update jackc/pgx/v4 module from 4.6.0 to 4.14.1 +- [#10449](https://github.com/influxdata/telegraf/pull/10449) Update Azure/azure-event-hubs-go/v3 module from 3.3.13 to 3.3.17 +- [#10450](https://github.com/influxdata/telegraf/pull/10450) Update gosnmp/gosnmp module from 1.33.0 to 1.34.0 +- [#10442](https://github.com/influxdata/telegraf/pull/10442) `parsers.wavefront` Add missing setting wavefront_disable_prefix_conversion +- [#10435](https://github.com/influxdata/telegraf/pull/10435) Update hashicorp/consul/api module from 1.9.1 to 1.12.0 +- [#10436](https://github.com/influxdata/telegraf/pull/10436) Update antchfx/xpath module from 1.1.11 to 1.2.0 +- [#10433](https://github.com/influxdata/telegraf/pull/10433) Update antchfx/jsonquery module from 1.1.4 to 1.1.5 +- [#10414](https://github.com/influxdata/telegraf/pull/10414) Update prometheus/procfs module from 0.6.0 to 0.7.3 +- [#10354](https://github.com/influxdata/telegraf/pull/10354) `inputs.snmp` Fix panic when mibs folder doesn't exist (#10346) +- [#10393](https://github.com/influxdata/telegraf/pull/10393) `outputs.syslog` Correctly set ASCII trailer for syslog output +- [#10415](https://github.com/influxdata/telegraf/pull/10415) Update aws/aws-sdk-go-v2/service/cloudwatchlogs module from 1.5.2 to 1.12.0 +- [#10416](https://github.com/influxdata/telegraf/pull/10416) Update kardianos/service module from 1.0.0 to 1.2.1 +- [#10396](https://github.com/influxdata/telegraf/pull/10396) `inputs.http` Allow empty http body +- [#10417](https://github.com/influxdata/telegraf/pull/10417) Update couchbase/go-couchbase module from 0.1.0 to 0.1.1 +- [#10413](https://github.com/influxdata/telegraf/pull/10413) `parsers.json_v2` Fix timestamp precision when using unix_ns format +- [#10418](https://github.com/influxdata/telegraf/pull/10418) Update pion/dtls/v2 module from 2.0.9 to 2.0.13 +- [#10402](https://github.com/influxdata/telegraf/pull/10402) Update containerd/containerd module to 1.5.9 +- [#8947](https://github.com/influxdata/telegraf/pull/8947) `outputs.timestream` Fix batching logic with write records and introduce concurrent requests +- [#10360](https://github.com/influxdata/telegraf/pull/10360) `outputs.amqp` Avoid connection leak when writing error +- [#10097](https://github.com/influxdata/telegraf/pull/10097) `outputs.stackdriver` Send correct interval start times for counters + +## v1.21.2 [2022-01-05] + +### Release Notes + +Happy New Year! + +### Features + +- Added arm64 MacOS builds +- Added riscv64 Linux builds +- Numerous changes to CircleCI config to ensure more timely completion and more clear execution flow + +### Bugfixes + +- [#10318](https://github.com/influxdata/telegraf/pull/10318) `inputs.disk` Fix missing storage in containers +- [#10324](https://github.com/influxdata/telegraf/pull/10324) `inputs.dpdk` Add note about dpdk and socket availability +- [#10296](https://github.com/influxdata/telegraf/pull/10296) `inputs.logparser` Resolve panic in logparser due to missing Log +- [#10322](https://github.com/influxdata/telegraf/pull/10322) `inputs.snmp` Ensure module load order to avoid snmp marshal error +- [#10321](https://github.com/influxdata/telegraf/pull/10321) `inputs.snmp` Do not require networking during tests +- [#10303](https://github.com/influxdata/telegraf/pull/10303) `inputs.snmp` Resolve SNMP panic due to no gosmi module +- [#10295](https://github.com/influxdata/telegraf/pull/10295) `inputs.snmp` Grab MIB table columns more accurately +- [#10299](https://github.com/influxdata/telegraf/pull/10299) `inputs.snmp` Check index before assignment when floating :: exists to avoid panic +- [#10301](https://github.com/influxdata/telegraf/pull/10301) `inputs.snmp` Fix panic if no mibs folder is found +- [#10373](https://github.com/influxdata/telegraf/pull/10373) `inputs.snmp_trap` Document deprecation of timeout parameter +- [#10377](https://github.com/influxdata/telegraf/pull/10377) `parsers.csv` empty import tzdata for Windows binaries to correctly set timezone +- [#10332](https://github.com/influxdata/telegraf/pull/10332) Update github.com/djherbis/times module from v1.2.0 to v1.5.0 +- [#10343](https://github.com/influxdata/telegraf/pull/10343) Update github.com/go-ldap/ldap/v3 module from v3.1.0 to v3.4.1 +- [#10255](https://github.com/influxdata/telegraf/pull/10255) Update github.com/gwos/tcg/sdk module from v0.0.0-20211130162655-32ad77586ccf to v0.0.0-20211223101342-35fbd1ae683c and improve logging + +## v1.21.1 [2021-12-16] + +### Bugfixes + +- [#10288](https://github.com/influxdata/telegraf/pull/10288) Fix panic in parsers due to missing Log for all plugins using SetParserFunc. +- [#10288](https://github.com/influxdata/telegraf/pull/10288) Fix panic in parsers due to missing Log for all plugins using SetParserFunc +- [#10247](https://github.com/influxdata/telegraf/pull/10247) Update go-sensu module to v2.12.0 +- [#10284](https://github.com/influxdata/telegraf/pull/10284) `inputs.openstack` Fix typo in openstack neutron input plugin (newtron) + +### Features + +- [#10239](https://github.com/influxdata/telegraf/pull/10239) Enable Darwin arm64 build +- [#10150](https://github.com/influxdata/telegraf/pull/10150) `inputs.smart` Add SMART plugin concurrency configuration option, nvme-cli v1.14+ support and lint fixes. +- [#10150](https://github.com/influxdata/telegraf/pull/10150) `inputs.smart` Add SMART plugin concurrency configuration option, nvme-cli v1.14+ support and lint fixes + +## v1.21.0 [2021-12-15] + +### Release Notes + +The signing for RPM digest has changed to use sha256 to improve security. Please see the pull request for more details: [#10272](https://github.com/influxdata/telegraf/pull/10272). + +Thank you to @zak-pawel for lots of linter fixes! + +### Bugfixes + +- [#10268](https://github.com/influxdata/telegraf/pull/10268) `inputs.snmp` Update snmp plugin to respect number of retries configured +- [#10225](https://github.com/influxdata/telegraf/pull/10225) `outputs.wavefront` Flush wavefront output sender on error to clean up broken connections +- [#9970](https://github.com/influxdata/telegraf/pull/9970) Restart Telegraf service if it is already running and upgraded via RPM +- [#10188](https://github.com/influxdata/telegraf/pull/10188) `parsers.xpath` Handle duplicate registration of protocol-buffer files gracefully +- [#10132](https://github.com/influxdata/telegraf/pull/10132) `inputs.http_listener_v2` Fix panic on close to check that Telegraf is closing +- [#10196](https://github.com/influxdata/telegraf/pull/10196) `outputs.elasticsearch` Implement NaN and inf handling for elasticsearch output +- [#10205](https://github.com/influxdata/telegraf/pull/10205) Print loaded plugins and deprecations for once and test flags +- [#10214](https://github.com/influxdata/telegraf/pull/10214) `processors.ifname` Eliminate MIB dependency for ifname processor +- [#10206](https://github.com/influxdata/telegraf/pull/10206) `inputs.snmp` Optimize locking for SNMP MIBs loading +- [#9975](https://github.com/influxdata/telegraf/pull/9975) `inputs.kube_inventory` Set TLS server name config properly +- [#10230](https://github.com/influxdata/telegraf/pull/10230) Sudden close of Telegraf caused by OPC UA input plugin +- [#9913](https://github.com/influxdata/telegraf/pull/9913) Update eclipse/paho.mqtt.golang module from 1.3.0 to 1.3.5 +- [#10221](https://github.com/influxdata/telegraf/pull/10221) `parsers.json_v2` Parser timestamp setting order +- [#10209](https://github.com/influxdata/telegraf/pull/10209) `outputs.graylog` Ensure graylog spec fields not prefixed with _ +- [#10099](https://github.com/influxdata/telegraf/pull/10099) `inputs.zfs` Pool detection and metrics gathering for ZFS >= 2.1.x +- [#10007](https://github.com/influxdata/telegraf/pull/10007) `processors.ifname` Parallelism fix for ifname processor +- [#10208](https://github.com/influxdata/telegraf/pull/10208) `inputs.mqtt_consumer` Mqtt topic extracting no longer requires all three fields +- [#9616](https://github.com/influxdata/telegraf/pull/9616) Windows Service - graceful shutdown of telegraf +- [#10203](https://github.com/influxdata/telegraf/pull/10203) Revert unintented corruption of the Makefile +- [#10112](https://github.com/influxdata/telegraf/pull/10112) `inputs.cloudwatch` Cloudwatch metrics collection +- [#10178](https://github.com/influxdata/telegraf/pull/10178) `outputs.all` Register bigquery to output plugins +- [#10165](https://github.com/influxdata/telegraf/pull/10165) `inputs.sysstat` Sysstat to use unique temp file vs hard-coded +- [#10046](https://github.com/influxdata/telegraf/pull/10046) Update nats-sever to support openbsd +- [#10091](https://github.com/influxdata/telegraf/pull/10091) `inputs.prometheus` Check error before defer in prometheus k8s +- [#10101](https://github.com/influxdata/telegraf/pull/10101) `inputs.win_perf_counters` Add setting to win_perf_counters input to ignore localization +- [#10136](https://github.com/influxdata/telegraf/pull/10136) `inputs.snmp_trap` Remove snmptranslate from readme and fix default path +- [#10116](https://github.com/influxdata/telegraf/pull/10116) `inputs.statsd` Input plugin statsd parse error +- [#10131](https://github.com/influxdata/telegraf/pull/10131) Skip knxlistener when writing the sample config +- [#10119](https://github.com/influxdata/telegraf/pull/10119) `inputs.cpu` Update shirou/gopsutil from v2 to v3 +- [#10074](https://github.com/influxdata/telegraf/pull/10074) `outputs.graylog` Failing test due to port already in use +- [#9865](https://github.com/influxdata/telegraf/pull/9865) `inputs.directory_monitor` Directory monitor input plugin when data format is CSV and csv_skip_rows>0 and csv_header_row_count>=1 +- [#9862](https://github.com/influxdata/telegraf/pull/9862) `outputs.graylog` Graylog plugin TLS support and message format +- [#9908](https://github.com/influxdata/telegraf/pull/9908) `parsers.json_v2` Remove dead code +- [#9881](https://github.com/influxdata/telegraf/pull/9881) `outputs.graylog` Mute graylog UDP/TCP tests by marking them as integration +- [#9751](https://github.com/influxdata/telegraf/pull/9751) Update google.golang.org/grpc module from 1.39.1 to 1.40.0 + +### Features + +- [#10200](https://github.com/influxdata/telegraf/pull/10200) `aggregators.deprecations.go` Implement deprecation infrastructure +- [#9518](https://github.com/influxdata/telegraf/pull/9518) `inputs.snmp` Snmp to use gosmi +- [#10130](https://github.com/influxdata/telegraf/pull/10130) `outputs.influxdb_v2` Add retry to 413 errors with InfluxDB output +- [#10144](https://github.com/influxdata/telegraf/pull/10144) `inputs.win_services` Add exclude filter +- [#9995](https://github.com/influxdata/telegraf/pull/9995) `inputs.mqtt_consumer` Enable extracting tag values from MQTT topics +- [#9419](https://github.com/influxdata/telegraf/pull/9419) `aggregators.all` Add support of aggregator as Starlark script +- [#9561](https://github.com/influxdata/telegraf/pull/9561) `processors.regex` Extend regexp processor do allow renaming of measurements, tags and fields +- [#8184](https://github.com/influxdata/telegraf/pull/8184) `outputs.http` Add use_batch_format for HTTP output plugin +- [#9988](https://github.com/influxdata/telegraf/pull/9988) `inputs.kafka_consumer` Add max_processing_time config to Kafka Consumer input +- [#9841](https://github.com/influxdata/telegraf/pull/9841) `inputs.sqlserver` Add additional metrics to support elastic pool (sqlserver plugin) +- [#9910](https://github.com/influxdata/telegraf/pull/9910) `common.tls` Filter client certificates by DNS names +- [#9942](https://github.com/influxdata/telegraf/pull/9942) `outputs.azure_data_explorer` Add option to skip table creation in azure data explorer output +- [#9984](https://github.com/influxdata/telegraf/pull/9984) `processors.ifname` Add more details to logmessages +- [#9833](https://github.com/influxdata/telegraf/pull/9833) `common.kafka` Add metadata full to config +- [#9876](https://github.com/influxdata/telegraf/pull/9876) Update etc/telegraf.conf and etc/telegraf_windows.conf +- [#9256](https://github.com/influxdata/telegraf/pull/9256) `inputs.modbus` Modbus connection settings (serial) +- [#9860](https://github.com/influxdata/telegraf/pull/9860) `inputs.directory_monitor` Adds the ability to create and name a tag containing the filename using the directory monitor input plugin +- [#9740](https://github.com/influxdata/telegraf/pull/9740) `inputs.prometheus` Add ignore_timestamp option +- [#9513](https://github.com/influxdata/telegraf/pull/9513) `processors.starlark` Starlark processor example for processing sparkplug_b messages +- [#9449](https://github.com/influxdata/telegraf/pull/9449) `parsers.json_v2` Support defining field/tag tables within an object table +- [#9827](https://github.com/influxdata/telegraf/pull/9827) `inputs.elasticsearch_query` Add debug query output to elasticsearch_query +- [#9241](https://github.com/influxdata/telegraf/pull/9241) `inputs.snmp` Telegraf to merge tables with different indexes +- [#9013](https://github.com/influxdata/telegraf/pull/9013) `inputs.opcua` Allow user to select the source for the metric timestamp. +- [#9706](https://github.com/influxdata/telegraf/pull/9706) `inputs.puppetagent` Add measurements from puppet 5 +- [#9644](https://github.com/influxdata/telegraf/pull/9644) `outputs.graylog` Add graylog plugin TCP support +- [#8229](https://github.com/influxdata/telegraf/pull/8229) `outputs.azure_data_explorer` Add json_timestamp_layout option + +### New Input Plugins + +- [#9724](https://github.com/influxdata/telegraf/pull/9724) Add intel_pmu plugin +- [#9771](https://github.com/influxdata/telegraf/pull/9771) Add Linux Volume Manager input plugin +- [#9236](https://github.com/influxdata/telegraf/pull/9236) Openstack input plugin + +### New Output Plugins + +- [#9891](https://github.com/influxdata/telegraf/pull/9891) Add new groundwork output plugin +- [#9923](https://github.com/influxdata/telegraf/pull/9923) Add mongodb output plugin +- [#9346](https://github.com/influxdata/telegraf/pull/9346) Azure Event Hubs output plugin + +## v1.20.4 [2021-11-17] + +### Release Notes + +- [#10073](https://github.com/influxdata/telegraf/pull/10073) Update go version from 1.17.2 to 1.17.3 +- [#10100](https://github.com/influxdata/telegraf/pull/10100) Update deprecated plugin READMEs to better indicate deprecation + +Thank you to @zak-pawel for lots of linter fixes! + +- [#9986](https://github.com/influxdata/telegraf/pull/9986) Linter fixes for plugins/inputs/[h-j]* +- [#9999](https://github.com/influxdata/telegraf/pull/9999) Linter fixes for plugins/inputs/[k-l]* +- [#10006](https://github.com/influxdata/telegraf/pull/10006) Linter fixes for plugins/inputs/m* +- [#10011](https://github.com/influxdata/telegraf/pull/10011) Linter fixes for plugins/inputs/[n-o]* + +### Bugfixes + +- [#10089](https://github.com/influxdata/telegraf/pull/10089) Update BurntSushi/toml from 0.3.1 to 0.4.1 +- [#10075](https://github.com/influxdata/telegraf/pull/10075) `inputs.mongodb` Update readme with correct connection URI +- [#10076](https://github.com/influxdata/telegraf/pull/10076) Update gosnmp module from 1.32 to 1.33 +- [#9966](https://github.com/influxdata/telegraf/pull/9966) `inputs.mysql` Fix type conversion follow-up +- [#10068](https://github.com/influxdata/telegraf/pull/10068) `inputs.proxmox` Changed VM ID from string to int +- [#10047](https://github.com/influxdata/telegraf/pull/10047) `inputs.modbus` Do not build modbus on openbsd +- [#10019](https://github.com/influxdata/telegraf/pull/10019) `inputs.cisco_telemetry_mdt` Move to new protobuf library +- [#10001](https://github.com/influxdata/telegraf/pull/10001) `outputs.loki` Add metric name with label "__name" +- [#9980](https://github.com/influxdata/telegraf/pull/9980) `inputs.nvidia_smi` Set the default path correctly +- [#10010](https://github.com/influxdata/telegraf/pull/10010) Update go.opentelemetry.io/otel from v0.23.0 to v0.24.0 +- [#10044](https://github.com/influxdata/telegraf/pull/10044) `inputs.sqlserver` Add elastic pool in supported versions in sqlserver +- [#10029](https://github.com/influxdata/telegraf/pull/10029) `inputs.influxdb` Update influxdb input schema docs +- [#10026](https://github.com/influxdata/telegraf/pull/10026) `inputs.intel_rdt` Correct timezone handling + +## v1.20.3 [2021-10-27] + +### Release Notes + +- [#9873](https://github.com/influxdata/telegraf/pull/9873) Update go to 1.17.2 + +### Bugfixes + +- [#9948](https://github.com/influxdata/telegraf/pull/9948) Update github.com/aws/aws-sdk-go-v2/config module from 1.8.2 to 1.8.3 +- [#9997](https://github.com/influxdata/telegraf/pull/9997) `inputs.ipmi_sensor` Redact IPMI password in logs +- [#9978](https://github.com/influxdata/telegraf/pull/9978) `inputs.kube_inventory` Do not skip resources with zero s/ns timestamps +- [#9998](https://github.com/influxdata/telegraf/pull/9998) Update gjson module to v1.10.2 +- [#9973](https://github.com/influxdata/telegraf/pull/9973) `inputs.procstat` Revert and fix tag creation +- [#9943](https://github.com/influxdata/telegraf/pull/9943) `inputs.sqlserver` Add sqlserver plugin integration tests +- [#9647](https://github.com/influxdata/telegraf/pull/9647) `inputs.cloudwatch` Use the AWS SDK v2 library +- [#9954](https://github.com/influxdata/telegraf/pull/9954) `processors.starlark` Starlark pop operation for non-existing keys +- [#9956](https://github.com/influxdata/telegraf/pull/9956) `inputs.zfs` Check return code of zfs command for FreeBSD +- [#9585](https://github.com/influxdata/telegraf/pull/9585) `inputs.kube_inventory` Fix segfault in ingress, persistentvolumeclaim, statefulset in kube_inventory +- [#9901](https://github.com/influxdata/telegraf/pull/9901) `inputs.ethtool` Add normalization of tags for ethtool input plugin +- [#9957](https://github.com/influxdata/telegraf/pull/9957) `inputs.internet_speed` Resolve missing latency field +- [#9662](https://github.com/influxdata/telegraf/pull/9662) `inputs.prometheus` Decode Prometheus scrape path from Kuberentes labels +- [#9933](https://github.com/influxdata/telegraf/pull/9933) `inputs.procstat` Correct conversion of int with specific bit size +- [#9940](https://github.com/influxdata/telegraf/pull/9940) `inputs.webhooks` Provide more fields for papertrail event webhook +- [#9892](https://github.com/influxdata/telegraf/pull/9892) `inputs.mongodb` Solve compatibility issue for mongodb inputs when using 5.x relicaset +- [#9768](https://github.com/influxdata/telegraf/pull/9768) Update github.com/Azure/azure-kusto-go module from 0.3.2 to 0.4.0 +- [#9904](https://github.com/influxdata/telegraf/pull/9904) Update github.com/golang-jwt/jwt/v4 module from 4.0.0 to 4.1.0 +- [#9921](https://github.com/influxdata/telegraf/pull/9921) Update github.com/apache/thrift module from 0.14.2 to 0.15.0 +- [#9403](https://github.com/influxdata/telegraf/pull/9403) `inputs.mysql`Fix inconsistent metric types in mysql +- [#9905](https://github.com/influxdata/telegraf/pull/9905) Update github.com/docker/docker module from 20.10.7+incompatible to 20.10.9+incompatible +- [#9920](https://github.com/influxdata/telegraf/pull/9920) `inputs.prometheus` Move err check to correct place +- [#9869](https://github.com/influxdata/telegraf/pull/9869) Update github.com/prometheus/common module from 0.26.0 to 0.31.1 +- [#9866](https://github.com/influxdata/telegraf/pull/9866) Update snowflake database driver module to 1.6.2 +- [#9527](https://github.com/influxdata/telegraf/pull/9527) `inputs.intel_rdt` Allow sudo usage +- [#9893](https://github.com/influxdata/telegraf/pull/9893) Update github.com/jaegertracing/jaeger module from 1.15.1 to 1.26.0 + +### New External Plugins + +- [IBM DB2](https://github.com/bonitoo-io/telegraf-input-db2) - contributed by @sranka +- [Oracle Database](https://github.com/bonitoo-io/telegraf-input-oracle) - contributed by @sranka + +## v1.20.2 [2021-10-07] + +### Bugfixes + +- [#9878](https://github.com/influxdata/telegraf/pull/9878) `inputs.cloudwatch` Use new session API +- [#9872](https://github.com/influxdata/telegraf/pull/9872) `parsers.json_v2` Duplicate line_protocol when using object and fields +- [#9787](https://github.com/influxdata/telegraf/pull/9787) `parsers.influx` Fix memory leak in influx parser +- [#9880](https://github.com/influxdata/telegraf/pull/9880) `inputs.stackdriver` Migrate to cloud.google.com/go/monitoring/apiv3/v2 +- [#9887](https://github.com/influxdata/telegraf/pull/9887) Fix makefile typo that prevented i386 tar and rpm packages from being built + +## v1.20.1 [2021-10-06] + +### Bugfixes + +- [#9776](https://github.com/influxdata/telegraf/pull/9776) Update k8s.io/apimachinery module from 0.21.1 to 0.22.2 +- [#9864](https://github.com/influxdata/telegraf/pull/9864) Update containerd module to v1.5.7 +- [#9863](https://github.com/influxdata/telegraf/pull/9863) Update consul module to v1.11.0 +- [#9846](https://github.com/influxdata/telegraf/pull/9846) `inputs.mongodb` Fix panic due to nil dereference +- [#9850](https://github.com/influxdata/telegraf/pull/9850) `inputs.intel_rdt` Prevent timeout when logging +- [#9848](https://github.com/influxdata/telegraf/pull/9848) `outputs.loki` Update http_headers setting to match sample config +- [#9808](https://github.com/influxdata/telegraf/pull/9808) `inputs.procstat` Add missing tags +- [#9803](https://github.com/influxdata/telegraf/pull/9803) `outputs.mqtt` Add keep alive config option and documentation around issue with eclipse/mosquitto version +- [#9800](https://github.com/influxdata/telegraf/pull/9800) Fix output buffer never completely flushing +- [#9458](https://github.com/influxdata/telegraf/pull/9458) `inputs.couchbase` Fix insecure certificate validation +- [#9797](https://github.com/influxdata/telegraf/pull/9797) `inputs.opentelemetry` Fix error returned to OpenTelemetry client +- [#9789](https://github.com/influxdata/telegraf/pull/9789) Update github.com/testcontainers/testcontainers-go module from 0.11.0 to 0.11.1 +- [#9791](https://github.com/influxdata/telegraf/pull/9791) Update github.com/Azure/go-autorest/autorest/adal module +- [#9678](https://github.com/influxdata/telegraf/pull/9678) Update github.com/Azure/go-autorest/autorest/azure/auth module from 0.5.6 to 0.5.8 +- [#9769](https://github.com/influxdata/telegraf/pull/9769) Update cloud.google.com/go/pubsub module from 1.15.0 to 1.17.0 +- [#9770](https://github.com/influxdata/telegraf/pull/9770) Update github.com/aws/smithy-go module from 1.3.1 to 1.8.0 + +### Features + +- [#9838](https://github.com/influxdata/telegraf/pull/9838) `inputs.elasticsearch_query` Add custom time/date format field + +## v1.20.0 [2021-09-17] + +### Release Notes + +- [#9642](https://github.com/influxdata/telegraf/pull/9642) Build with Golang 1.17 + +### Bugfixes + +- [#9700](https://github.com/influxdata/telegraf/pull/9700) Update thrift module to 0.14.2 and zipkin-go-opentracing to 0.4.5 +- [#9587](https://github.com/influxdata/telegraf/pull/9587) `outputs.opentelemetry` Use headers config in grpc requests +- [#9713](https://github.com/influxdata/telegraf/pull/9713) Update runc module to v1.0.0-rc95 to address CVE-2021-30465 +- [#9699](https://github.com/influxdata/telegraf/pull/9699) Migrate dgrijalva/jwt-go to golang-jwt/jwt/v4 +- [#9139](https://github.com/influxdata/telegraf/pull/9139) `serializers.prometheus` Update timestamps and expiration time as new data arrives +- [#9625](https://github.com/influxdata/telegraf/pull/9625) `outputs.graylog` Output timestamp with fractional seconds +- [#9655](https://github.com/influxdata/telegraf/pull/9655) Update cloud.google.com/go/pubsub module from 1.2.0 to 1.15.0 +- [#9674](https://github.com/influxdata/telegraf/pull/9674) `inputs.mongodb` Change command based on server version +- [#9676](https://github.com/influxdata/telegraf/pull/9676) `outputs.dynatrace` Remove hardcoded int value +- [#9619](https://github.com/influxdata/telegraf/pull/9619) `outputs.influxdb_v2` Increase accepted retry-after header values. +- [#9652](https://github.com/influxdata/telegraf/pull/9652) Update tinylib/msgp module from 1.1.5 to 1.1.6 +- [#9471](https://github.com/influxdata/telegraf/pull/9471) `inputs.sql` Make timeout apply to single query +- [#9760](https://github.com/influxdata/telegraf/pull/9760) Update shirou/gopsutil module to 3.21.8 +- [#9707](https://github.com/influxdata/telegraf/pull/9707) `inputs.logstash` Add additional logstash output plugin stats +- [#9656](https://github.com/influxdata/telegraf/pull/9656) Update miekg/dns module from 1.1.31 to 1.1.43 +- [#9750](https://github.com/influxdata/telegraf/pull/9750) Update antchfx/xmlquery module from 1.3.5 to 1.3.6 +- [#9757](https://github.com/influxdata/telegraf/pull/9757) `parsers.registry.go` Fix panic for non-existing metric names +- [#9677](https://github.com/influxdata/telegraf/pull/9677) Update Azure/azure-event-hubs-go/v3 module from 3.2.0 to 3.3.13 +- [#9653](https://github.com/influxdata/telegraf/pull/9653) Update prometheus/client_golang module from 1.7.1 to 1.11.0 +- [#9693](https://github.com/influxdata/telegraf/pull/9693) `inputs.cloudwatch` Fix pagination error +- [#9727](https://github.com/influxdata/telegraf/pull/9727) `outputs.http` Add error message logging +- [#9718](https://github.com/influxdata/telegraf/pull/9718) Update influxdata/influxdb-observability module from 0.2.4 to 0.2.7 +- [#9560](https://github.com/influxdata/telegraf/pull/9560) Update gopcua/opcua module +- [#9544](https://github.com/influxdata/telegraf/pull/9544) `inputs.couchbase` Fix memory leak +- [#9588](https://github.com/influxdata/telegraf/pull/9588) `outputs.opentelemetry` Use attributes setting + +### Features + +- [#9665](https://github.com/influxdata/telegraf/pull/9665) `inputs.systemd_units` feat(plugins/inputs/systemd_units): add pattern support +- [#9598](https://github.com/influxdata/telegraf/pull/9598) `outputs.sql` Add bool datatype +- [#9386](https://github.com/influxdata/telegraf/pull/9386) `inputs.cloudwatch` Pull metrics from multiple AWS CloudWatch namespaces +- [#9411](https://github.com/influxdata/telegraf/pull/9411) `inputs.cloudwatch` Support AWS Web Identity Provider +- [#9570](https://github.com/influxdata/telegraf/pull/9570) `inputs.modbus` Add support for RTU over TCP +- [#9488](https://github.com/influxdata/telegraf/pull/9488) `inputs.procstat` Support cgroup globs and include systemd unit children +- [#9322](https://github.com/influxdata/telegraf/pull/9322) `inputs.suricata` Support alert event type +- [#5464](https://github.com/influxdata/telegraf/pull/5464) `inputs.prometheus` Add ability to query Consul Service catalog +- [#8641](https://github.com/influxdata/telegraf/pull/8641) `outputs.prometheus_client` Add Landing page +- [#9529](https://github.com/influxdata/telegraf/pull/9529) `inputs.http_listener_v2` Allows multiple paths and add path_tag +- [#9395](https://github.com/influxdata/telegraf/pull/9395) Add cookie authentication to HTTP input and output plugins +- [#8454](https://github.com/influxdata/telegraf/pull/8454) `inputs.syslog` Add RFC3164 support +- [#9351](https://github.com/influxdata/telegraf/pull/9351) `inputs.jenkins` Add option to include nodes by name +- [#9277](https://github.com/influxdata/telegraf/pull/9277) Add JSON, MessagePack, and Protocol-buffers format support to the XPath parser +- [#9343](https://github.com/influxdata/telegraf/pull/9343) `inputs.snmp_trap` Improve MIB lookup performance +- [#9342](https://github.com/influxdata/telegraf/pull/9342) `outputs.newrelic` Add option to override metric_url +- [#9306](https://github.com/influxdata/telegraf/pull/9306) `inputs.smart` Add power mode status +- [#9762](https://github.com/influxdata/telegraf/pull/9762) `inputs.bond` Add count of bonded slaves (for easier alerting) +- [#9675](https://github.com/influxdata/telegraf/pull/9675) `outputs.dynatrace` Remove special handling from counters and update dynatrace-oss/dynatrace-metric-utils-go module to 0.3.0 + +### New Input Plugins + +- [#9602](https://github.com/influxdata/telegraf/pull/9602) Add rocm_smi input to monitor AMD GPUs +- [#9101](https://github.com/influxdata/telegraf/pull/9101) Add mdstat input to gather from /proc/mdstat collection +- [#3536](https://github.com/influxdata/telegraf/pull/3536) Add Elasticsearch query input +- [#9623](https://github.com/influxdata/telegraf/pull/9623) Add internet Speed Monitor Input Plugin + +### New Output Plugins + +- [#9228](https://github.com/influxdata/telegraf/pull/9228) Add OpenTelemetry output +- [#9426](https://github.com/influxdata/telegraf/pull/9426) Add Azure Data Explorer(ADX) output + +## v1.19.3 [2021-08-18] + +### Bugfixes + +- [#9639](https://github.com/influxdata/telegraf/pull/9639) Update sirupsen/logrus module from 1.7.0 to 1.8.1 +- [#9638](https://github.com/influxdata/telegraf/pull/9638) Update testcontainers/testcontainers-go module from 0.11.0 to 0.11.1 +- [#9637](https://github.com/influxdata/telegraf/pull/9637) Update golang/snappy module from 0.0.3 to 0.0.4 +- [#9636](https://github.com/influxdata/telegraf/pull/9636) Update aws/aws-sdk-go-v2 module from 1.3.2 to 1.8.0 +- [#9605](https://github.com/influxdata/telegraf/pull/9605) `inputs.prometheus` Fix prometheus kubernetes pod discovery +- [#9606](https://github.com/influxdata/telegraf/pull/9606) `inputs.redis` Improve redis commands documentation +- [#9566](https://github.com/influxdata/telegraf/pull/9566) `outputs.cratedb` Replace dots in tag keys with underscores +- [#9401](https://github.com/influxdata/telegraf/pull/9401) `inputs.clickhouse` Fix panic, improve handling empty result set +- [#9583](https://github.com/influxdata/telegraf/pull/9583) `inputs.opcua` Avoid closing session on a closed connection +- [#9576](https://github.com/influxdata/telegraf/pull/9576) `processors.aws` Refactor ec2 init for config-api +- [#9571](https://github.com/influxdata/telegraf/pull/9571) `outputs.loki` Sort logs by timestamp before writing to Loki +- [#9524](https://github.com/influxdata/telegraf/pull/9524) `inputs.opcua` Fix reconnection regression introduced in 1.19.1 +- [#9581](https://github.com/influxdata/telegraf/pull/9581) `inputs.kube_inventory` Fix k8s nodes and pods parsing error +- [#9577](https://github.com/influxdata/telegraf/pull/9577) Update sensu/go module to v2.9.0 +- [#9554](https://github.com/influxdata/telegraf/pull/9554) `inputs.postgresql` Normalize unix socket path +- [#9565](https://github.com/influxdata/telegraf/pull/9565) Update hashicorp/consul/api module to 1.9.1 +- [#9552](https://github.com/influxdata/telegraf/pull/9552) `inputs.vsphere` Update vmware/govmomi module to v0.26.0 in order to support vSphere 7.0 +- [#9550](https://github.com/influxdata/telegraf/pull/9550) `inputs.opcua` Do not skip good quality nodes after a bad quality node is encountered + +## v1.19.2 [2021-07-28] + +### Release Notes + +- [#9542](https://github.com/influxdata/telegraf/pull/9542) Update Go to v1.16.6 + +### Bugfixes + +- [#9363](https://github.com/influxdata/telegraf/pull/9363) `outputs.dynatrace` Update dynatrace output to allow optional default dimensions +- [#9526](https://github.com/influxdata/telegraf/pull/9526) `outputs.influxdb` Fix metrics reported as written but not actually written +- [#9549](https://github.com/influxdata/telegraf/pull/9549) `inputs.kube_inventory` Prevent segfault in persistent volume claims +- [#9503](https://github.com/influxdata/telegraf/pull/9503) `inputs.nsq_consumer` Fix connection error when not using server setting +- [#9540](https://github.com/influxdata/telegraf/pull/9540) `inputs.sql` Fix handling bool column +- [#9387](https://github.com/influxdata/telegraf/pull/9387) Linter fixes for plugins/inputs/[fg]* +- [#9438](https://github.com/influxdata/telegraf/pull/9438) `inputs.kubernetes` Attach the pod labels to kubernetes_pod_volume and kubernetes_pod_network metrics +- [#9519](https://github.com/influxdata/telegraf/pull/9519) `processors.ifname` Fix SNMP empty metric name +- [#8587](https://github.com/influxdata/telegraf/pull/8587) `inputs.sqlserver` Add tempdb troubleshooting stats and missing V2 query metrics +- [#9323](https://github.com/influxdata/telegraf/pull/9323) `inputs.x509_cert` Prevent x509_cert from hanging on UDP connection +- [#9504](https://github.com/influxdata/telegraf/pull/9504) `parsers.json_v2` Simplify how nesting is handled +- [#9493](https://github.com/influxdata/telegraf/pull/9493) `inputs.mongodb` Switch to official mongo-go-driver module to fix SSL auth failure +- [#9491](https://github.com/influxdata/telegraf/pull/9491) `outputs.dynatrace` Fix panic caused by uninitialized loggedMetrics map +- [#9497](https://github.com/influxdata/telegraf/pull/9497) `inputs.prometheus` Fix prometheus cadvisor authentication +- [#9520](https://github.com/influxdata/telegraf/pull/9520) `parsers.json_v2` Add support for large uint64 and int64 numbers +- [#9447](https://github.com/influxdata/telegraf/pull/9447) `inputs.statsd` Fix regression that didn't allow integer percentiles +- [#9466](https://github.com/influxdata/telegraf/pull/9466) `inputs.sqlserver` Provide detailed error message in telegraf log +- [#9399](https://github.com/influxdata/telegraf/pull/9399) Update dynatrace-metric-utils-go module to v0.2.0 +- [#8108](https://github.com/influxdata/telegraf/pull/8108) `inputs.cgroup` Allow multiple keys when parsing cgroups +- [#9479](https://github.com/influxdata/telegraf/pull/9479) `parsers.json_v2` Fix json_v2 parser to handle nested objects in arrays properly + +### Features + +- [#9485](https://github.com/influxdata/telegraf/pull/9485) Add option to automatically reload settings when config file is modified + +## v1.19.1 [2021-07-07] + +### Bugfixes + +- [#9388](https://github.com/influxdata/telegraf/pull/9388) `inputs.sqlserver` Require authentication method to be specified +- [#9456](https://github.com/influxdata/telegraf/pull/9456) `inputs.kube_inventory` Fix segfault in kube_inventory +- [#9448](https://github.com/influxdata/telegraf/pull/9448) `inputs.couchbase` Fix panic +- [#9444](https://github.com/influxdata/telegraf/pull/9444) `inputs.knx_listener` Fix nil pointer panic +- [#9446](https://github.com/influxdata/telegraf/pull/9446) `inputs.procstat` Update gopsutil module to fix panic +- [#9443](https://github.com/influxdata/telegraf/pull/9443) `inputs.rabbitmq` Fix JSON unmarshall regression +- [#9369](https://github.com/influxdata/telegraf/pull/9369) Update nat-server module to v2.2.6 +- [#9429](https://github.com/influxdata/telegraf/pull/9429) `inputs.dovecot` Exclude read-timeout from being an error +- [#9423](https://github.com/influxdata/telegraf/pull/9423) `inputs.statsd` Don't stop parsing after parsing error +- [#9370](https://github.com/influxdata/telegraf/pull/9370) Update apimachinary module to v0.21.1 +- [#9373](https://github.com/influxdata/telegraf/pull/9373) Update jwt module to v1.2.2 and jwt-go module to v3.2.3 +- [#9412](https://github.com/influxdata/telegraf/pull/9412) Update couchbase Module to v0.1.0 +- [#9366](https://github.com/influxdata/telegraf/pull/9366) `inputs.snmp` Add a check for oid and name to prevent empty metrics +- [#9413](https://github.com/influxdata/telegraf/pull/9413) `outputs.http` Fix toml error when parsing insecure_skip_verify +- [#9400](https://github.com/influxdata/telegraf/pull/9400) `inputs.x509_cert` Fix 'source' tag for https +- [#9375](https://github.com/influxdata/telegraf/pull/9375) Update signalfx module to v3.3.34 +- [#9406](https://github.com/influxdata/telegraf/pull/9406) `parsers.json_v2` Don't require tags to be added to included_keys +- [#9289](https://github.com/influxdata/telegraf/pull/9289) `inputs.x509_cert` Fix SNI support +- [#9372](https://github.com/influxdata/telegraf/pull/9372) Update gjson module to v1.8.0 +- [#9379](https://github.com/influxdata/telegraf/pull/9379) Linter fixes for plugins/inputs/[de]* + +## v1.19.0 [2021-06-17] + +### Release Notes + +- Many linter fixes - thanks @zak-pawel and all! +- [#9331](https://github.com/influxdata/telegraf/pull/9331) Update Go to 1.16.5 + +### Bugfixes + +- [#9182](https://github.com/influxdata/telegraf/pull/9182) Update pgx to v4 +- [#9275](https://github.com/influxdata/telegraf/pull/9275) Fix reading config files starting with http: +- [#9196](https://github.com/influxdata/telegraf/pull/9196) `serializers.prometheusremotewrite` Update dependency and remove tags with empty values +- [#9051](https://github.com/influxdata/telegraf/pull/9051) `outputs.kafka` Don't prevent telegraf from starting when there's a connection error +- [#8795](https://github.com/influxdata/telegraf/pull/8795) `parsers.prometheusremotewrite` Update prometheus dependency to v2.21.0 +- [#9295](https://github.com/influxdata/telegraf/pull/9295) `outputs.dynatrace` Use dynatrace-metric-utils +- [#9368](https://github.com/influxdata/telegraf/pull/9368) `parsers.json_v2` Update json_v2 parser to handle null types +- [#9359](https://github.com/influxdata/telegraf/pull/9359) `inputs.sql` Fix import of sqlite and ignore it on all platforms that require CGO. +- [#9329](https://github.com/influxdata/telegraf/pull/9329) `inputs.kube_inventory` Fix connecting to the wrong url +- [#9358](https://github.com/influxdata/telegraf/pull/9358) upgrade denisenkom go-mssql to v0.10.0 +- [#9283](https://github.com/influxdata/telegraf/pull/9283) `processors.parser` Fix segfault +- [#9243](https://github.com/influxdata/telegraf/pull/9243) `inputs.docker` Close all idle connections +- [#9338](https://github.com/influxdata/telegraf/pull/9338) `inputs.suricata` Support new JSON format +- [#9296](https://github.com/influxdata/telegraf/pull/9296) `outputs.influxdb` Fix endless retries + +### Features + +- [#8987](https://github.com/influxdata/telegraf/pull/8987) Config file environment variable can be a URL +- [#9297](https://github.com/influxdata/telegraf/pull/9297) `outputs.datadog` Add HTTP proxy to datadog output +- [#9087](https://github.com/influxdata/telegraf/pull/9087) Add named timestamp formats +- [#9276](https://github.com/influxdata/telegraf/pull/9276) `inputs.vsphere` Add config option for the historical interval duration +- [#9274](https://github.com/influxdata/telegraf/pull/9274) `inputs.ping` Add an option to specify packet size +- [#9007](https://github.com/influxdata/telegraf/pull/9007) Allow multiple "--config" and "--config-directory" flags +- [#9249](https://github.com/influxdata/telegraf/pull/9249) `outputs.graphite` Allow more characters in graphite tags +- [#8351](https://github.com/influxdata/telegraf/pull/8351) `inputs.sqlserver` Added login_name +- [#9223](https://github.com/influxdata/telegraf/pull/9223) `inputs.dovecot` Add support for unix domain sockets +- [#9118](https://github.com/influxdata/telegraf/pull/9118) `processors.strings` Add UTF-8 sanitizer +- [#9156](https://github.com/influxdata/telegraf/pull/9156) `inputs.aliyuncms` Add config option list of regions to query +- [#9138](https://github.com/influxdata/telegraf/pull/9138) `common.http` Add OAuth2 to HTTP input +- [#8822](https://github.com/influxdata/telegraf/pull/8822) `inputs.sqlserver` Enable Azure Active Directory (AAD) authentication support +- [#9136](https://github.com/influxdata/telegraf/pull/9136) `inputs.cloudwatch` Add wildcard support in dimensions configuration +- [#5517](https://github.com/influxdata/telegraf/pull/5517) `inputs.mysql` Gather all mysql channels +- [#8911](https://github.com/influxdata/telegraf/pull/8911) `processors.enum` Support float64 +- [#9105](https://github.com/influxdata/telegraf/pull/9105) `processors.starlark` Support nanosecond resolution timestamp +- [#9080](https://github.com/influxdata/telegraf/pull/9080) `inputs.logstash` Add support for version 7 queue stats +- [#9074](https://github.com/influxdata/telegraf/pull/9074) `parsers.prometheusremotewrite` Add starlark script for renaming metrics +- [#9032](https://github.com/influxdata/telegraf/pull/9032) `inputs.couchbase` Add ~200 more Couchbase metrics via Buckets endpoint +- [#8596](https://github.com/influxdata/telegraf/pull/8596) `inputs.sqlserver` input/sqlserver: Add service and save connection pools +- [#9042](https://github.com/influxdata/telegraf/pull/9042) `processors.starlark` Add math module +- [#6952](https://github.com/influxdata/telegraf/pull/6952) `inputs.x509_cert` Wildcard support for cert filenames +- [#9004](https://github.com/influxdata/telegraf/pull/9004) `processors.starlark` Add time module +- [#8891](https://github.com/influxdata/telegraf/pull/8891) `inputs.kinesis_consumer` Add content_encoding option with gzip and zlib support +- [#8996](https://github.com/influxdata/telegraf/pull/8996) `processors.starlark` Add an example showing how to obtain IOPS from diskio input +- [#8966](https://github.com/influxdata/telegraf/pull/8966) `inputs.http_listener_v2` Add support for snappy compression +- [#8661](https://github.com/influxdata/telegraf/pull/8661) `inputs.cisco_telemetry_mdt` Add support for events and class based query +- [#8861](https://github.com/influxdata/telegraf/pull/8861) `inputs.mongodb` Optionally collect top stats +- [#8979](https://github.com/influxdata/telegraf/pull/8979) `parsers.value` Add custom field name config option +- [#8544](https://github.com/influxdata/telegraf/pull/8544) `inputs.sqlserver` Add an optional health metric + +### New Input Plugins + +- [Alibaba CloudMonitor Service (Aliyun)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/aliyuncms) - contributed by @i-prudnikov +- [OpenTelemetry](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/opentelemetry) - contributed by @jacobmarble +- [Intel Data Plane Development Kit (DPDK)](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/dpdk) - contributed by @p-zak +- [KNX](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/knx_listener) - contributed by @DocLambda +- [SQL](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/sql) - contributed by @srebhan + +### New Output Plugins + +- [Websocket](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/websocket) - contributed by @FZambia +- [SQL](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/sql) - contributed by @illuusio +- [AWS Cloudwatch logs](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/cloudwatch_logs) - contributed by @i-prudnikov + +### New Parser Plugins + +- [Prometheus Remote Write](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/prometheusremotewrite) - contributed by @helenosheaa +- [JSON V2](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/json_v2) - contributed by @sspaink + +### New External Plugins + +- [ldap_org and ds389](https://github.com/falon/CSI-telegraf-plugins) - contributed by @falon +- [x509_crl](https://github.com/jcgonnard/telegraf-input-x590crl) - contributed by @jcgonnard +- [dnsmasq](https://github.com/machinly/dnsmasq-telegraf-plugin) - contributed by @machinly +- [Big Blue Button](https://github.com/SLedunois/bigbluebutton-telegraf-plugin) - contributed by @SLedunois + +## v1.18.3 [2021-05-20] + +### Release Notes + +- Added FreeBSD armv7 build + +### Bugfixes + +- [#9271](https://github.com/influxdata/telegraf/pull/9271) `inputs.prometheus` Set user agent when scraping prom metrics +- [#9203](https://github.com/influxdata/telegraf/pull/9203) Migrate from soniah/gosnmp to gosnmp/gosnmp and update to 1.32.0 +- [#9169](https://github.com/influxdata/telegraf/pull/9169) `inputs.kinesis_consumer` Fix repeating parser error +- [#9130](https://github.com/influxdata/telegraf/pull/9130) `inputs.sqlserver` Remove disallowed whitespace from sqlServerRingBufferCPU query +- [#9238](https://github.com/influxdata/telegraf/pull/9238) Update hashicorp/consul/api module to v1.8.1 +- [#9235](https://github.com/influxdata/telegraf/pull/9235) Migrate from docker/libnetwork/ipvs to moby/ipvs +- [#9224](https://github.com/influxdata/telegraf/pull/9224) Update shirou/gopsutil to 3.21.3 +- [#9209](https://github.com/influxdata/telegraf/pull/9209) Update microsoft/ApplicationInsights-Go to 0.4.4 +- [#9190](https://github.com/influxdata/telegraf/pull/9190) Update gogo/protobuf to 1.3.2 +- [#8746](https://github.com/influxdata/telegraf/pull/8746) Update Azure/go-autorest/autorest/azure/auth to 0.5.6 and Azure/go-autorest/autorest to 0.11.17 +- [#8745](https://github.com/influxdata/telegraf/pull/8745) Update collectd.org to 0.5.0 +- [#8716](https://github.com/influxdata/telegraf/pull/8716) Update nats-io/nats.go 1.10.0 +- [#9039](https://github.com/influxdata/telegraf/pull/9039) Update golang/protobuf to v1.5.1 +- [#8937](https://github.com/influxdata/telegraf/pull/8937) Migrate from ericchiang/k8s to kubernetes/client-go + +### Features + +- [#8913](https://github.com/influxdata/telegraf/pull/8913) `outputs.elasticsearch` Add ability to enable gzip compression -#### Release Notes +## v1.18.2 [2021-04-28] - - [#8318](https://github.com/influxdata/telegraf/pull/8318) `common.kafka` kafka sasl-mechanism auth support for SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +### Bugfixes -#### Bugfixes +- [#9160](https://github.com/influxdata/telegraf/pull/9160) `processors.converter` Add support for large hexadecimal strings +- [#9195](https://github.com/influxdata/telegraf/pull/9195) `inputs.apcupsd` Fix apcupsd 'ALARMDEL' bug via forked repo +- [#9110](https://github.com/influxdata/telegraf/pull/9110) `parsers.json` Make JSON format compatible with nulls +- [#9128](https://github.com/influxdata/telegraf/pull/9128) `inputs.nfsclient` Fix nfsclient ops map to allow collection of metrics other than read and write +- [#8917](https://github.com/influxdata/telegraf/pull/8917) `inputs.snmp` Log snmpv3 auth failures +- [#8892](https://github.com/influxdata/telegraf/pull/8892) `common.shim` Accept larger inputs from scanner +- [#9045](https://github.com/influxdata/telegraf/pull/9045) `inputs.vsphere` Add MetricLookback setting to handle reporting delays in vCenter 6.7 and later +- [#9026](https://github.com/influxdata/telegraf/pull/9026) `outputs.sumologic` Carbon2 serializer: sanitize metric name +- [#9086](https://github.com/influxdata/telegraf/pull/9086) `inputs.opcua` Fix error handling - - [#8331](https://github.com/influxdata/telegraf/pull/8331) `inputs.sqlserver` SQL Server Azure PerfCounters Fix - - [#8325](https://github.com/influxdata/telegraf/pull/8325) `inputs.sqlserver` SQL Server - PerformanceCounters - removed synthetic counters - - [#8324](https://github.com/influxdata/telegraf/pull/8324) `inputs.sqlserver` SQL Server - server_properties added sql_version_desc - - [#8317](https://github.com/influxdata/telegraf/pull/8317) `inputs.ras` Disable RAS input plugin on specific Linux architectures: mips64, mips64le, ppc64le, riscv64 - - [#8309](https://github.com/influxdata/telegraf/pull/8309) `inputs.processes` processes: fix issue with stat no such file/dir - - [#8308](https://github.com/influxdata/telegraf/pull/8308) `inputs.win_perf_counters` fix issue with PDH_CALC_NEGATIVE_DENOMINATOR error - - [#8306](https://github.com/influxdata/telegraf/pull/8306) `inputs.ras` RAS plugin - fix for too many open files handlers +## v1.18.1 [2021-04-07] +### Bugfixes + +- [#9082](https://github.com/influxdata/telegraf/pull/9082) `inputs.mysql` Fix 'binary logs' query for MySQL 8 +- [#9069](https://github.com/influxdata/telegraf/pull/9069) `inputs.tail` Add configurable option for the 'path' tag override +- [#9067](https://github.com/influxdata/telegraf/pull/9067) `inputs.nfsclient` Fix integer overflow in fields from mountstat +- [#9050](https://github.com/influxdata/telegraf/pull/9050) `inputs.snmp` Fix init when no mibs are installed +- [#9072](https://github.com/influxdata/telegraf/pull/9072) `inputs.ping` Always call SetPrivileged(true) in native mode +- [#9043](https://github.com/influxdata/telegraf/pull/9043) `processors.ifname` Get interface name more effeciently +- [#9056](https://github.com/influxdata/telegraf/pull/9056) `outputs.yandex_cloud_monitoring` Use correct compute metadata URL to get folder-id +- [#9048](https://github.com/influxdata/telegraf/pull/9048) `outputs.azure_monitor` Handle error when initializing the auth object +- [#8549](https://github.com/influxdata/telegraf/pull/8549) `inputs.sqlserver` Fix sqlserver_process_cpu calculation +- [#9035](https://github.com/influxdata/telegraf/pull/9035) `inputs.ipmi_sensor` Fix panic +- [#9009](https://github.com/influxdata/telegraf/pull/9009) `inputs.docker` Fix panic when parsing container stats +- [#8333](https://github.com/influxdata/telegraf/pull/8333) `inputs.exec` Don't truncate messages in debug mode +- [#8769](https://github.com/influxdata/telegraf/pull/8769) `agent` Close running outputs when reloadinlg + +## v1.18.0 [2021-03-17] + +### Release Notes + +- Support Go version 1.16.2 +- Added support for code signing in Windows + +### Bugfixes + +- [#7312](https://github.com/influxdata/telegraf/pull/7312) `inputs.docker` CPU stats respect perdevice +- [#8397](https://github.com/influxdata/telegraf/pull/8397) `outputs.dynatrace` Dynatrace Plugin: Make conversion to counters possible / Changed large bulk handling +- [#8655](https://github.com/influxdata/telegraf/pull/8655) `inputs.sqlserver` SqlServer - fix for default server list +- [#8703](https://github.com/influxdata/telegraf/pull/8703) `inputs.docker` Use consistent container name in docker input plugin +- [#8902](https://github.com/influxdata/telegraf/pull/8902) `inputs.snmp` Fix max_repetitions signedness issues +- [#8817](https://github.com/influxdata/telegraf/pull/8817) `outputs.kinesis` outputs.kinesis - log record error count +- [#8833](https://github.com/influxdata/telegraf/pull/8833) `inputs.sqlserver` Bug Fix - SQL Server HADR queries for SQL Versions +- [#8628](https://github.com/influxdata/telegraf/pull/8628) `inputs.modbus` fix: reading multiple holding registers in modbus input plugin +- [#8885](https://github.com/influxdata/telegraf/pull/8885) `inputs.statsd` Fix statsd concurrency bug +- [#8393](https://github.com/influxdata/telegraf/pull/8393) `inputs.sqlserver` SQL Perfmon counters - synced queries from v2 to all db types +- [#8873](https://github.com/influxdata/telegraf/pull/8873) `processors.ifname` Fix mutex locking around ifname cache +- [#8720](https://github.com/influxdata/telegraf/pull/8720) `parsers.influx` fix: remove ambiguity on '\v' from line-protocol parser +- [#8678](https://github.com/influxdata/telegraf/pull/8678) `inputs.redis` Fix Redis output field type inconsistencies +- [#8953](https://github.com/influxdata/telegraf/pull/8953) `agent` Reset the flush interval timer when flush is requested or batch is ready. +- [#8954](https://github.com/influxdata/telegraf/pull/8954) `common.kafka` Fix max open requests to one if idempotent writes is set to true +- [#8721](https://github.com/influxdata/telegraf/pull/8721) `inputs.kube_inventory` Set $HOSTIP in default URL +- [#8995](https://github.com/influxdata/telegraf/pull/8995) `inputs.sflow` fix segfaults in sflow plugin by checking if protocol headers are set +- [#8986](https://github.com/influxdata/telegraf/pull/8986) `outputs.nats` nats_output: use the configured credentials file + +### Features + +- [#8887](https://github.com/influxdata/telegraf/pull/8887) `inputs.procstat` Add PPID field to procstat input plugin +- [#8852](https://github.com/influxdata/telegraf/pull/8852) `processors.starlark` Add Starlark script for estimating Line Protocol cardinality +- [#8915](https://github.com/influxdata/telegraf/pull/8915) `inputs.cloudwatch` add proxy +- [#8910](https://github.com/influxdata/telegraf/pull/8910) `agent` Display error message on badly formatted config string array (eg. namepass) +- [#8785](https://github.com/influxdata/telegraf/pull/8785) `inputs.diskio` Non systemd support with unittest +- [#8850](https://github.com/influxdata/telegraf/pull/8850) `inputs.snmp` Support more snmpv3 authentication protocols +- [#8813](https://github.com/influxdata/telegraf/pull/8813) `inputs.redfish` added member_id as tag(as it is a unique value) for redfish plugin and added address of the server when the status is other than 200 for better debugging +- [#8613](https://github.com/influxdata/telegraf/pull/8613) `inputs.phpfpm` Support exclamation mark to create non-matching list in tail plugin +- [#8179](https://github.com/influxdata/telegraf/pull/8179) `inputs.statsd` Add support for datadog distributions metric +- [#8803](https://github.com/influxdata/telegraf/pull/8803) `agent` Add default retry for load config via url +- [#8816](https://github.com/influxdata/telegraf/pull/8816) Code Signing for Windows +- [#8772](https://github.com/influxdata/telegraf/pull/8772) `processors.starlark` Allow to provide constants to a starlark script +- [#8749](https://github.com/influxdata/telegraf/pull/8749) `outputs.newrelic` Add HTTP proxy setting to New Relic output plugin +- [#8543](https://github.com/influxdata/telegraf/pull/8543) `inputs.elasticsearch` Add configurable number of 'most recent' date-stamped indices to gather in Elasticsearch input +- [#8675](https://github.com/influxdata/telegraf/pull/8675) `processors.starlark` Add Starlark parsing example of nested JSON +- [#8762](https://github.com/influxdata/telegraf/pull/8762) `inputs.prometheus` Optimize for bigger kubernetes clusters (500+ pods) +- [#8950](https://github.com/influxdata/telegraf/pull/8950) `inputs.teamspeak` Teamspeak input plugin query clients +- [#8849](https://github.com/influxdata/telegraf/pull/8849) `inputs.sqlserver` Filter data out from system databases for Azure SQL DB only + +### New Inputs + +- [Beat Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/beat) - Contributed by @nferch +- [CS:GO Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/csgo) - Contributed by @oofdog +- [Directory Monitoring Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/directory_monitor) - Contributed by @InfluxData +- [RavenDB Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/ravendb) - Contributed by @ml054 and @bartoncasey +- [NFS Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/nfsclient) - Contributed by @pmoranga + +### New Outputs + +- [Grafana Loki Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) - Contributed by @Eraac +- [Google BigQuery Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/loki) - Contributed by @gkatzioura +- [Sensu Output Plugin](https://github.com/influxdata/telegraf/blob/master/plugins/outputs/sensu) - Contributed by @calebhailey +- [SignalFX Output Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/signalfx) - Contributed by @keitwb + +### New Aggregators + +- [Derivative Aggregator Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/derivative) - Contributed by @KarstenSchnitter +- [Quantile Aggregator Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/aggregators/quantile) - Contributed by @srebhan + +### New Processors + +- [AWS EC2 Metadata Processor Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/processors/aws/ec2) - Contributed by @pmalek-sumo + +### New Parsers + +- [XML Parser Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/parsers/xml) - Contributed by @srebhan + +### New Serializers + +- [MessagePack Serializer Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/serializers/msgpack) - Contributed by @dialogbox + +### New External Plugins + +- [GeoIP Processor Plugin](https://github.com/a-bali/telegraf-geoip) - Contributed by @a-bali +- [Plex Webhook Input Plugin](https://github.com/russorat/telegraf-webhooks-plex) - Contributed by @russorat +- [SMCIPMITool Input Plugin](https://github.com/jhpope/smc_ipmi) - Contributed by @jhpope + +## v1.17.3 [2021-02-17] + +### Bugfixes + +- [#7316](https://github.com/influxdata/telegraf/pull/7316) `inputs.filestat` plugins/filestat: Skip missing files +- [#8868](https://github.com/influxdata/telegraf/pull/8868) Update to Go 1.15.8 +- [#8744](https://github.com/influxdata/telegraf/pull/8744) Bump github.com/gopcua/opcua from 0.1.12 to 0.1.13 +- [#8657](https://github.com/influxdata/telegraf/pull/8657) `outputs.warp10` outputs/warp10: url encode comma in tags value +- [#8824](https://github.com/influxdata/telegraf/pull/8824) `inputs.x509_cert` inputs.x509_cert: Fix timeout issue +- [#8821](https://github.com/influxdata/telegraf/pull/8821) `inputs.mqtt_consumer` Fix reconnection issues mqtt +- [#8775](https://github.com/influxdata/telegraf/pull/8775) `outputs.influxdb` Validate the response from InfluxDB after writing/creating a database to avoid json parsing panics/errors +- [#8804](https://github.com/influxdata/telegraf/pull/8804) `inputs.snmp` Expose v4/v6-only connection-schemes through GosnmpWrapper +- [#8838](https://github.com/influxdata/telegraf/pull/8838) `agent` fix issue with reading flush_jitter output from config +- [#8839](https://github.com/influxdata/telegraf/pull/8839) `inputs.ping` fixes Sort and timeout around deadline +- [#8787](https://github.com/influxdata/telegraf/pull/8787) `inputs.ping` Update README for inputs.ping with correct cmd for native ping on Linux +- [#8771](https://github.com/influxdata/telegraf/pull/8771) Update go-ping to latest version + +## v1.17.2 [2021-01-28] + +### Bugfixes + +- [#8770](https://github.com/influxdata/telegraf/pull/8770) `inputs.ping` Set interface for native +- [#8764](https://github.com/influxdata/telegraf/pull/8764) `inputs.ping` Resolve regression, re-add missing function + +## v1.17.1 [2021-01-27] + +### Release Notes + +Included a few more changes that add configuration options to plugins as it's been while since the last release + +- [#8335](https://github.com/influxdata/telegraf/pull/8335) `inputs.ipmi_sensor` Add setting to enable caching in ipmitool +- [#8616](https://github.com/influxdata/telegraf/pull/8616) Add Event Log support for Windows +- [#8602](https://github.com/influxdata/telegraf/pull/8602) `inputs.postgresql_extensible` Add timestamp column support to postgresql_extensible +- [#8627](https://github.com/influxdata/telegraf/pull/8627) `parsers.csv` Added ability to define skip values in csv parser +- [#8055](https://github.com/influxdata/telegraf/pull/8055) `outputs.http` outputs/http: add option to control idle connection timeout +- [#7897](https://github.com/influxdata/telegraf/pull/7897) `common.tls` common/tls: Allow specifying SNI hostnames +- [#8541](https://github.com/influxdata/telegraf/pull/8541) `inputs.snmp` Extended the internal snmp wrapper to support AES192, AES192C, AES256, and AES256C +- [#6165](https://github.com/influxdata/telegraf/pull/6165) `inputs.procstat` Provide method to include core count when reporting cpu_usage in procstat input +- [#8287](https://github.com/influxdata/telegraf/pull/8287) `inputs.jenkins` Add support for an inclusive job list in Jenkins plugin +- [#8524](https://github.com/influxdata/telegraf/pull/8524) `inputs.ipmi_sensor` Add hex_key parameter for IPMI input plugin connection + +### Bugfixes + +- [#8662](https://github.com/influxdata/telegraf/pull/8662) `outputs.influxdb_v2` [outputs.influxdb_v2] add exponential backoff, and respect client error responses +- [#8748](https://github.com/influxdata/telegraf/pull/8748) `outputs.elasticsearch` Fix issue with elasticsearch output being really noisy about some errors +- [#7533](https://github.com/influxdata/telegraf/pull/7533) `inputs.zookeeper` improve mntr regex to match user specific keys. +- [#7967](https://github.com/influxdata/telegraf/pull/7967) `inputs.lustre2` Fix crash in lustre2 input plugin, when field name and value +- [#8673](https://github.com/influxdata/telegraf/pull/8673) Update grok-library to v1.0.1 with dots and dash-patterns fixed. +- [#8679](https://github.com/influxdata/telegraf/pull/8679) `inputs.ping` Use go-ping for "native" execution in Ping plugin +- [#8741](https://github.com/influxdata/telegraf/pull/8741) `inputs.x509_cert` fix x509 cert timeout issue +- [#8714](https://github.com/influxdata/telegraf/pull/8714) Bump github.com/nsqio/go-nsq from 1.0.7 to 1.0.8 +- [#8715](https://github.com/influxdata/telegraf/pull/8715) Bump github.com/Shopify/sarama from 1.27.1 to 1.27.2 +- [#8712](https://github.com/influxdata/telegraf/pull/8712) Bump github.com/newrelic/newrelic-telemetry-sdk-go from 0.2.0 to 0.5.1 +- [#8659](https://github.com/influxdata/telegraf/pull/8659) `inputs.gnmi` GNMI plugin should not take off the first character of field keys when no 'alias path' exists. +- [#8609](https://github.com/influxdata/telegraf/pull/8609) `inputs.webhooks` Use the 'measurement' json field from the particle webhook as the measurment name, or if it's blank, use the 'name' field of the event's json. +- [#8658](https://github.com/influxdata/telegraf/pull/8658) `inputs.procstat` Procstat input plugin should use the same timestamp in all metrics in the same Gather() cycle. +- [#8391](https://github.com/influxdata/telegraf/pull/8391) `aggregators.merge` Optimize SeriesGrouper & aggregators.merge +- [#8545](https://github.com/influxdata/telegraf/pull/8545) `inputs.prometheus` Using mime-type in prometheus parser to handle protocol-buffer responses +- [#8588](https://github.com/influxdata/telegraf/pull/8588) `inputs.snmp` Input SNMP plugin - upgrade gosnmp library to version 1.29.0 +- [#8502](https://github.com/influxdata/telegraf/pull/8502) `inputs.http_listener_v2` Fix Stop() bug when plugin fails to start + +### New External Plugins + +- [#8646](https://github.com/influxdata/telegraf/pull/8646) [Open Hardware Monitoring](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) Input Plugin + +## v1.17.0 [2020-12-18] + +### Release Notes + +- Starlark plugins can now store state between runs using a global state variable. This lets you make custom aggregators as well as custom processors that are state-aware. +- New input plugins: Riemann-Protobuff Listener, Intel PowerStat +- New output plugins: Yandex.Cloud monitoring, Logz.io +- New parser plugin: Prometheus +- New serializer: Prometheus remote write + +### Bugfixes + +- [#8505](https://github.com/influxdata/telegraf/pull/8505) `inputs.vsphere` Fixed misspelled check for datacenter +- [#8499](https://github.com/influxdata/telegraf/pull/8499) `processors.execd` Adding support for new lines in influx line protocol fields. +- [#8254](https://github.com/influxdata/telegraf/pull/8254) `serializers.carbon2` Fix carbon2 tests +- [#8498](https://github.com/influxdata/telegraf/pull/8498) `inputs.http_response` fixed network test +- [#8414](https://github.com/influxdata/telegraf/pull/8414) `inputs.bcache` Fix tests for Windows - part 1 +- [#8577](https://github.com/influxdata/telegraf/pull/8577) `inputs.ping` fix potential issue with race condition +- [#8562](https://github.com/influxdata/telegraf/pull/8562) `inputs.mqtt_consumer` fix issue with mqtt concurrent map write +- [#8574](https://github.com/influxdata/telegraf/pull/8574) `inputs.ecs` Remove duplicated field "revision" from ecs_task because it's already defined as a tag there +- [#8551](https://github.com/influxdata/telegraf/pull/8551) `inputs.socket_listener` fix crash when socket_listener receiving invalid data +- [#8564](https://github.com/influxdata/telegraf/pull/8564) `parsers.graphite` Graphite tags parser +- [#8472](https://github.com/influxdata/telegraf/pull/8472) `inputs.kube_inventory` Fixing issue with missing metrics when pod has only pending containers +- [#8542](https://github.com/influxdata/telegraf/pull/8542) `inputs.aerospike` fix edge case in aerospike plugin where an expected hex string was converted to integer if all digits +- [#8512](https://github.com/influxdata/telegraf/pull/8512) `inputs.kube_inventory` Update string parsing of allocatable cpu cores in kube_inventory + +### Features + +- [#8038](https://github.com/influxdata/telegraf/pull/8038) `inputs.jenkins` feat: add build number field to jenkins_job measurement +- [#7345](https://github.com/influxdata/telegraf/pull/7345) `inputs.ping` Add percentiles to the ping plugin +- [#8369](https://github.com/influxdata/telegraf/pull/8369) `inputs.sqlserver` Added tags for monitoring readable secondaries for Azure SQL MI +- [#8379](https://github.com/influxdata/telegraf/pull/8379) `inputs.sqlserver` SQL Server HA/DR Availability Group queries +- [#8520](https://github.com/influxdata/telegraf/pull/8520) Add initialization example to mock-plugin. +- [#8426](https://github.com/influxdata/telegraf/pull/8426) `inputs.snmp` Add support to convert snmp hex strings to integers +- [#8509](https://github.com/influxdata/telegraf/pull/8509) `inputs.statsd` Add configurable Max TTL duration for statsd input plugin entries +- [#8508](https://github.com/influxdata/telegraf/pull/8508) `inputs.bind` Add configurable timeout to bind input plugin http call +- [#8368](https://github.com/influxdata/telegraf/pull/8368) `inputs.sqlserver` Added is_primary_replica for monitoring readable secondaries for Azure SQL DB +- [#8462](https://github.com/influxdata/telegraf/pull/8462) `inputs.sqlserver` sqlAzureMIRequests - remove duplicate column [session_db_name] +- [#8464](https://github.com/influxdata/telegraf/pull/8464) `inputs.sqlserver` Add column measurement_db_type to output of all queries if not empty +- [#8389](https://github.com/influxdata/telegraf/pull/8389) `inputs.opcua` Add node groups to opcua input plugin +- [#8432](https://github.com/influxdata/telegraf/pull/8432) add support for linux/ppc64le +- [#8474](https://github.com/influxdata/telegraf/pull/8474) `inputs.modbus` Add FLOAT64-IEEE support to inputs.modbus (#8361) (by @Nemecsek) +- [#8447](https://github.com/influxdata/telegraf/pull/8447) `processors.starlark` Add the shared state to the global scope to get previous data +- [#8383](https://github.com/influxdata/telegraf/pull/8383) `inputs.zfs` Add dataset metrics to zfs input +- [#8429](https://github.com/influxdata/telegraf/pull/8429) `outputs.nats` Added "name" parameter to NATS output plugin +- [#8477](https://github.com/influxdata/telegraf/pull/8477) `inputs.http` proxy support for http input +- [#8466](https://github.com/influxdata/telegraf/pull/8466) `inputs.snmp` Translate snmp field values +- [#8435](https://github.com/influxdata/telegraf/pull/8435) `common.kafka` Enable kafka zstd compression and idempotent writes +- [#8056](https://github.com/influxdata/telegraf/pull/8056) `inputs.monit` Add response_time to monit plugin +- [#8446](https://github.com/influxdata/telegraf/pull/8446) update to go 1.15.5 +- [#8428](https://github.com/influxdata/telegraf/pull/8428) `aggregators.basicstats` Add rate and interval to the basicstats aggregator plugin +- [#8575](https://github.com/influxdata/telegraf/pull/8575) `inputs.win_services` Added Glob pattern matching for "Windows Services" plugin +- [#6132](https://github.com/influxdata/telegraf/pull/6132) `inputs.mysql` Add per user metrics to mysql input +- [#8500](https://github.com/influxdata/telegraf/pull/8500) `inputs.github` [inputs.github] Add query of pull-request statistics +- [#8598](https://github.com/influxdata/telegraf/pull/8598) `processors.enum` Allow globs (wildcards) in config for tags/fields in enum processor +- [#8590](https://github.com/influxdata/telegraf/pull/8590) `inputs.ethtool` [ethtool] interface_up field added +- [#8579](https://github.com/influxdata/telegraf/pull/8579) `parsers.json` Add wildcard tags json parser support + +### New Parser Plugins + +- [#7778](https://github.com/influxdata/telegraf/pull/7778) `parsers.prometheus` Add a parser plugin for prometheus + +### New Serializer Plugins + +- [#8360](https://github.com/influxdata/telegraf/pull/8360) `serializers.prometheusremotewrite` Add prometheus remote write serializer + +### New Input Plugins + +- [#8163](https://github.com/influxdata/telegraf/pull/8163) `inputs.riemann` Support Riemann-Protobuff Listener +- [#8488](https://github.com/influxdata/telegraf/pull/8488) `inputs.intel_powerstat` New Intel PowerStat input plugin + +### New Output Plugins + +- [#8296](https://github.com/influxdata/telegraf/pull/8296) `outputs.yandex_cloud_monitoring` #8295 Initial Yandex.Cloud monitoring +- [#8202](https://github.com/influxdata/telegraf/pull/8202) `outputs.logzio` A new Logz.io output plugin + +## v1.16.3 [2020-12-01] + +### Bugfixes + +- [#8483](https://github.com/influxdata/telegraf/pull/8483) `inputs.gnmi` Log SubscribeResponse_Error message and code. #8482 +- [#7987](https://github.com/influxdata/telegraf/pull/7987) update godirwalk to v1.16.1 +- [#8438](https://github.com/influxdata/telegraf/pull/8438) `processors.starlark` Starlark example dropbytype +- [#8468](https://github.com/influxdata/telegraf/pull/8468) `inputs.sqlserver` Fix typo in column name +- [#8461](https://github.com/influxdata/telegraf/pull/8461) `inputs.phpfpm` [php-fpm] Fix possible "index out of range" +- [#8444](https://github.com/influxdata/telegraf/pull/8444) `inputs.apcupsd` Update mdlayher/apcupsd dependency +- [#8439](https://github.com/influxdata/telegraf/pull/8439) `processors.starlark` Show how to return a custom error with the Starlark processor +- [#8440](https://github.com/influxdata/telegraf/pull/8440) `parsers.csv` keep field name as is for csv timestamp column +- [#8436](https://github.com/influxdata/telegraf/pull/8436) `inputs.nvidia_smi` Add DriverVersion and CUDA Version to output +- [#8423](https://github.com/influxdata/telegraf/pull/8423) `processors.starlark` Show how to return several metrics with the Starlark processor +- [#8408](https://github.com/influxdata/telegraf/pull/8408) `processors.starlark` Support logging in starlark +- [#8315](https://github.com/influxdata/telegraf/pull/8315) add kinesis output to external plugins list +- [#8406](https://github.com/influxdata/telegraf/pull/8406) `outputs.wavefront` #8405 add non-retryable debug logging +- [#8404](https://github.com/influxdata/telegraf/pull/8404) `outputs.wavefront` Wavefront output should distinguish between retryable and non-retryable errors +- [#8401](https://github.com/influxdata/telegraf/pull/8401) `processors.starlark` Allow to catch errors that occur in the apply function + +## v1.16.2 [2020-11-13] + +### Bugfixes + +- [#8400](https://github.com/influxdata/telegraf/pull/8400) `parsers.csv` Fix parsing of multiple files with different headers (#6318). +- [#8326](https://github.com/influxdata/telegraf/pull/8326) `inputs.proxmox` proxmox: ignore QEMU templates and iron out a few bugs +- [#7991](https://github.com/influxdata/telegraf/pull/7991) `inputs.systemd_units` systemd_units: add --plain to command invocation (#7990) +- [#8307](https://github.com/influxdata/telegraf/pull/8307) fix links in external plugins readme +- [#8370](https://github.com/influxdata/telegraf/pull/8370) `inputs.redis` Fix minor typos in readmes +- [#8374](https://github.com/influxdata/telegraf/pull/8374) `inputs.smart` Fix SMART plugin to recognize all devices from config +- [#8288](https://github.com/influxdata/telegraf/pull/8288) `inputs.redfish` Add OData-Version header to requests +- [#8357](https://github.com/influxdata/telegraf/pull/8357) `inputs.vsphere` Prydin issue 8169 +- [#8356](https://github.com/influxdata/telegraf/pull/8356) `inputs.sqlserver` On-prem fix for #8324 +- [#8165](https://github.com/influxdata/telegraf/pull/8165) `outputs.wavefront` [output.wavefront] Introduced "immediate_flush" flag +- [#7938](https://github.com/influxdata/telegraf/pull/7938) `inputs.gnmi` added support for bytes encoding +- [#8337](https://github.com/influxdata/telegraf/pull/8337) `inputs.dcos` Update jwt-go module to address CVE-2020-26160 +- [#8350](https://github.com/influxdata/telegraf/pull/8350) `inputs.ras` fix plugins/input/ras test +- [#8329](https://github.com/influxdata/telegraf/pull/8329) `outputs.dynatrace` #8328 Fixed a bug with the state map in Dynatrace Plugin + +## v1.16.1 [2020-10-28] + +### Release Notes + +- [#8318](https://github.com/influxdata/telegraf/pull/8318) `common.kafka` kafka sasl-mechanism auth support for SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI + +### Bugfixes + +- [#8331](https://github.com/influxdata/telegraf/pull/8331) `inputs.sqlserver` SQL Server Azure PerfCounters Fix +- [#8325](https://github.com/influxdata/telegraf/pull/8325) `inputs.sqlserver` SQL Server - PerformanceCounters - removed synthetic counters +- [#8324](https://github.com/influxdata/telegraf/pull/8324) `inputs.sqlserver` SQL Server - server_properties added sql_version_desc +- [#8317](https://github.com/influxdata/telegraf/pull/8317) `inputs.ras` Disable RAS input plugin on specific Linux architectures: mips64, mips64le, ppc64le, riscv64 +- [#8309](https://github.com/influxdata/telegraf/pull/8309) `inputs.processes` processes: fix issue with stat no such file/dir +- [#8308](https://github.com/influxdata/telegraf/pull/8308) `inputs.win_perf_counters` fix issue with PDH_CALC_NEGATIVE_DENOMINATOR error +- [#8306](https://github.com/influxdata/telegraf/pull/8306) `inputs.ras` RAS plugin - fix for too many open files handlers ## v1.16.0 [2020-10-21] -#### Release Notes - - - New [code examples](/plugins/processors/starlark/testdata) for the [Starlark processor](/plugins/processors/starlark/README.md) - - [#7920](https://github.com/influxdata/telegraf/pull/7920) `inputs.rabbitmq` remove deprecated healthcheck - - [#7953](https://github.com/influxdata/telegraf/pull/7953) Add details to connect to InfluxDB OSS 2 and Cloud 2 - - [#8054](https://github.com/influxdata/telegraf/pull/8054) add guidelines run to external plugins with execd - - [#8198](https://github.com/influxdata/telegraf/pull/8198) `inputs.influxdb_v2_listener` change default influxdb port from 9999 to 8086 to match OSS 2.0 release - - [starlark](https://github.com/influxdata/telegraf/tree/release-1.16/plugins/processors/starlark/testdata) `processors.starlark` add various code exampels for the Starlark processor - -#### Features - - - [#7814](https://github.com/influxdata/telegraf/pull/7814) `agent` Send metrics in FIFO order - - [#7869](https://github.com/influxdata/telegraf/pull/7869) `inputs.modbus` extend support of fixed point values on input - - [#7870](https://github.com/influxdata/telegraf/pull/7870) `inputs.mongodb` Added new metric "pages written from cache" - - [#7875](https://github.com/influxdata/telegraf/pull/7875) `inputs.consul` input consul - added metric_version flag - - [#7894](https://github.com/influxdata/telegraf/pull/7894) `inputs.cloudwatch` Implement AWS CloudWatch Input Plugin ListMetrics API calls to use Active Metric Filter - - [#7904](https://github.com/influxdata/telegraf/pull/7904) `inputs.clickhouse` add additional metrics to clickhouse input plugin - - [#7934](https://github.com/influxdata/telegraf/pull/7934) `inputs.sqlserver` Database_type config to Split up sql queries by engine type - - [#8018](https://github.com/influxdata/telegraf/pull/8018) `processors.ifname` Add addTag debugging in ifname plugin - - [#8019](https://github.com/influxdata/telegraf/pull/8019) `outputs.elasticsearch` added force_document_id option to ES output enable resend data and avoiding duplicated ES documents - - [#8025](https://github.com/influxdata/telegraf/pull/8025) `inputs.aerospike` Add set, and histogram reporting to aerospike telegraf plugin - - [#8082](https://github.com/influxdata/telegraf/pull/8082) `inputs.snmp` Add agent host tag configuration option - - [#8113](https://github.com/influxdata/telegraf/pull/8113) `inputs.smart` Add more missing NVMe attributes to smart plugin - - [#8120](https://github.com/influxdata/telegraf/pull/8120) `inputs.sqlserver` Added more performance counters to SqlServer input plugin - - [#8127](https://github.com/influxdata/telegraf/pull/8127) `agent` Sort plugin name lists for output - - [#8132](https://github.com/influxdata/telegraf/pull/8132) `outputs.sumologic` Sumo Logic output plugin: carbon2 default to include field in metric - - [#8133](https://github.com/influxdata/telegraf/pull/8133) `inputs.influxdb_v2_listener` influxdb_v2_listener - add /ready route - - [#8168](https://github.com/influxdata/telegraf/pull/8168) `processors.starlark` add json parsing support to starlark - - [#8186](https://github.com/influxdata/telegraf/pull/8186) `inputs.sqlserver` New sql server queries (Azure) - - [#8189](https://github.com/influxdata/telegraf/pull/8189) `inputs.snmp_trap` If the community string is available, add it as a tag - - [#8190](https://github.com/influxdata/telegraf/pull/8190) `inputs.tail` Semigroupoid multiline (#8167) - - [#8196](https://github.com/influxdata/telegraf/pull/8196) `inputs.redis` add functionality to get values from redis commands - - [#8220](https://github.com/influxdata/telegraf/pull/8220) `build` update to Go 1.15 - - [#8032](https://github.com/influxdata/telegraf/pull/8032) `inputs.http_response` http_response: match on status code - - [#8172](https://github.com/influxdata/telegraf/pull/8172) `inputs.sqlserver` New sql server queries (on-prem) - refactoring and formatting - -#### Bugfixes - - - [#7816](https://github.com/influxdata/telegraf/pull/7816) `shim` fix bug with loading plugins in shim with no config - - [#7818](https://github.com/influxdata/telegraf/pull/7818) `build` Fix darwin package build flags - - [#7819](https://github.com/influxdata/telegraf/pull/7819) `inputs.tail` Close file to ensure it has been flushed - - [#7853](https://github.com/influxdata/telegraf/pull/7853) Initialize aggregation processors - - [#7865](https://github.com/influxdata/telegraf/pull/7865) `common.shim` shim logger improvements - - [#7867](https://github.com/influxdata/telegraf/pull/7867) `inputs.execd` fix issue with execd restart_delay being ignored - - [#7872](https://github.com/influxdata/telegraf/pull/7872) `inputs.gnmi` Recv next message after send returns EOF - - [#7877](https://github.com/influxdata/telegraf/pull/7877) Fix arch name in deb/rpm builds - - [#7909](https://github.com/influxdata/telegraf/pull/7909) fixes issue with rpm /var/log/telegraf permissions - - [#7918](https://github.com/influxdata/telegraf/pull/7918) `inputs.net` fix broken link to proc.c - - [#7927](https://github.com/influxdata/telegraf/pull/7927) `inputs.tail` Fix tail following on EOF - - [#8005](https://github.com/influxdata/telegraf/pull/8005) Fix docker-image make target - - [#8039](https://github.com/influxdata/telegraf/pull/8039) `serializers.splunkmetric` Remove Event field as it is causing issues with pre-trained source types - - [#8048](https://github.com/influxdata/telegraf/pull/8048) `inputs.jenkins` Multiple escaping occurs on Jenkins URLs at certain folder depth - - [#8071](https://github.com/influxdata/telegraf/pull/8071) `inputs.kubernetes` add missing error check for HTTP req failure - - [#8145](https://github.com/influxdata/telegraf/pull/8145) `processors.execd` Increased the maximum serialized metric size in line protocol - - [#8159](https://github.com/influxdata/telegraf/pull/8159) `outputs.dynatrace` Dynatrace Output: change handling of monotonic counters - - [#8176](https://github.com/influxdata/telegraf/pull/8176) fix panic on streaming processers using logging - - [#8177](https://github.com/influxdata/telegraf/pull/8177) `parsers.influx` fix: plugins/parsers/influx: avoid ParseError.Error panic - - [#8199](https://github.com/influxdata/telegraf/pull/8199) `inputs.docker` Fix vulnerabilities found in BDBA scan - - [#8200](https://github.com/influxdata/telegraf/pull/8200) `inputs.sqlserver` Fixed Query mapping - - [#8201](https://github.com/influxdata/telegraf/pull/8201) `outputs.sumologic` Fix carbon2 serializer not falling through to field separate when carbon2_format field is unset - - [#8210](https://github.com/influxdata/telegraf/pull/8210) update gopsutil: fix procstat performance regression - - [#8162](https://github.com/influxdata/telegraf/pull/8162) Fix bool serialization when using carbon2 - - [#8240](https://github.com/influxdata/telegraf/pull/8240) Fix bugs found by LGTM analysis platform - - [#8251](https://github.com/influxdata/telegraf/pull/8251) `outputs.dynatrace` Dynatrace Output Plugin: Fixed behaviour when state map is cleared - - [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd - -#### New Input Plugins - - - [influxdb_v2_listener](/plugins/inputs/influxdb_v2_listener/README.md) Influxdb v2 listener - Contributed by @magichair - - [intel_rdt](/plugins/inputs/intel_rdt/README.md) New input plugin for Intel RDT (Intel Resource Director Technology) - Contributed by @p-zak - - [nsd](/plugins/inputs/nsd/README.md) add nsd input plugin - Contributed by @gearnode - - [opcua](/plugins/inputs/opcua/README.md) Add OPC UA input plugin - Contributed by InfluxData - - [proxmox](/plugins/inputs/proxmox/README.md) Proxmox plugin - Contributed by @effitient - - [ras](/plugins/inputs/ras/README.md) New input plugin for RAS (Reliability, Availability and Serviceability) - Contributed by @p-zak - - [win_eventlog](/plugins/inputs/win_eventlog/README.md) Windows eventlog input plugin - Contributed by @simnv - -#### New Output Plugins - - - [dynatrace](/plugins/outputs/dynatrace/README.md) Dynatrace output plugin - Contributed by @thschue - - [sumologic](/plugins/outputs/sumologic/README.md) Sumo Logic output plugin - Contributed by @pmalek-sumo - - [timestream](/plugins/outputs/timestream) Timestream Output Plugin - Contributed by @piotrwest - -#### New External Plugins - - See [EXTERNAL_PLUGINS.md](/EXTERNAL_PLUGINS.md) for a full list of external plugins - - - [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. - - [youtube-telegraf-plugin](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather view and subscriber stats from your youtube videos - - [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. - - [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. +### Release Notes -## v1.15.4 [2020-10-20] +- New [code examples](/plugins/processors/starlark/testdata) for the [Starlark processor](/plugins/processors/starlark/README.md) +- [#7920](https://github.com/influxdata/telegraf/pull/7920) `inputs.rabbitmq` remove deprecated healthcheck +- [#7953](https://github.com/influxdata/telegraf/pull/7953) Add details to connect to InfluxDB OSS 2 and Cloud 2 +- [#8054](https://github.com/influxdata/telegraf/pull/8054) add guidelines run to external plugins with execd +- [#8198](https://github.com/influxdata/telegraf/pull/8198) `inputs.influxdb_v2_listener` change default influxdb port from 9999 to 8086 to match OSS 2.0 release +- [starlark](https://github.com/influxdata/telegraf/tree/release-1.16/plugins/processors/starlark/testdata) `processors.starlark` add various code exampels for the Starlark processor -#### Bugfixes +### Features - - [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd - - [#8176](https://github.com/influxdata/telegraf/pull/8176) `agent` fix panic on streaming processers using logging +- [#7814](https://github.com/influxdata/telegraf/pull/7814) `agent` Send metrics in FIFO order +- [#7869](https://github.com/influxdata/telegraf/pull/7869) `inputs.modbus` extend support of fixed point values on input +- [#7870](https://github.com/influxdata/telegraf/pull/7870) `inputs.mongodb` Added new metric "pages written from cache" +- [#7875](https://github.com/influxdata/telegraf/pull/7875) `inputs.consul` input consul - added metric_version flag +- [#7894](https://github.com/influxdata/telegraf/pull/7894) `inputs.cloudwatch` Implement AWS CloudWatch Input Plugin ListMetrics API calls to use Active Metric Filter +- [#7904](https://github.com/influxdata/telegraf/pull/7904) `inputs.clickhouse` add additional metrics to clickhouse input plugin +- [#7934](https://github.com/influxdata/telegraf/pull/7934) `inputs.sqlserver` Database_type config to Split up sql queries by engine type +- [#8018](https://github.com/influxdata/telegraf/pull/8018) `processors.ifname` Add addTag debugging in ifname plugin +- [#8019](https://github.com/influxdata/telegraf/pull/8019) `outputs.elasticsearch` added force_document_id option to ES output enable resend data and avoiding duplicated ES documents +- [#8025](https://github.com/influxdata/telegraf/pull/8025) `inputs.aerospike` Add set, and histogram reporting to aerospike telegraf plugin +- [#8082](https://github.com/influxdata/telegraf/pull/8082) `inputs.snmp` Add agent host tag configuration option +- [#8113](https://github.com/influxdata/telegraf/pull/8113) `inputs.smart` Add more missing NVMe attributes to smart plugin +- [#8120](https://github.com/influxdata/telegraf/pull/8120) `inputs.sqlserver` Added more performance counters to SqlServer input plugin +- [#8127](https://github.com/influxdata/telegraf/pull/8127) `agent` Sort plugin name lists for output +- [#8132](https://github.com/influxdata/telegraf/pull/8132) `outputs.sumologic` Sumo Logic output plugin: carbon2 default to include field in metric +- [#8133](https://github.com/influxdata/telegraf/pull/8133) `inputs.influxdb_v2_listener` influxdb_v2_listener - add /ready route +- [#8168](https://github.com/influxdata/telegraf/pull/8168) `processors.starlark` add json parsing support to starlark +- [#8186](https://github.com/influxdata/telegraf/pull/8186) `inputs.sqlserver` New sql server queries (Azure) +- [#8189](https://github.com/influxdata/telegraf/pull/8189) `inputs.snmp_trap` If the community string is available, add it as a tag +- [#8190](https://github.com/influxdata/telegraf/pull/8190) `inputs.tail` Semigroupoid multiline (#8167) +- [#8196](https://github.com/influxdata/telegraf/pull/8196) `inputs.redis` add functionality to get values from redis commands +- [#8220](https://github.com/influxdata/telegraf/pull/8220) `build` update to Go 1.15 +- [#8032](https://github.com/influxdata/telegraf/pull/8032) `inputs.http_response` http_response: match on status code +- [#8172](https://github.com/influxdata/telegraf/pull/8172) `inputs.sqlserver` New sql server queries (on-prem) - refactoring and formatting + +### Bugfixes + +- [#7816](https://github.com/influxdata/telegraf/pull/7816) `shim` fix bug with loading plugins in shim with no config +- [#7818](https://github.com/influxdata/telegraf/pull/7818) `build` Fix darwin package build flags +- [#7819](https://github.com/influxdata/telegraf/pull/7819) `inputs.tail` Close file to ensure it has been flushed +- [#7853](https://github.com/influxdata/telegraf/pull/7853) Initialize aggregation processors +- [#7865](https://github.com/influxdata/telegraf/pull/7865) `common.shim` shim logger improvements +- [#7867](https://github.com/influxdata/telegraf/pull/7867) `inputs.execd` fix issue with execd restart_delay being ignored +- [#7872](https://github.com/influxdata/telegraf/pull/7872) `inputs.gnmi` Recv next message after send returns EOF +- [#7877](https://github.com/influxdata/telegraf/pull/7877) Fix arch name in deb/rpm builds +- [#7909](https://github.com/influxdata/telegraf/pull/7909) fixes issue with rpm /var/log/telegraf permissions +- [#7918](https://github.com/influxdata/telegraf/pull/7918) `inputs.net` fix broken link to proc.c +- [#7927](https://github.com/influxdata/telegraf/pull/7927) `inputs.tail` Fix tail following on EOF +- [#8005](https://github.com/influxdata/telegraf/pull/8005) Fix docker-image make target +- [#8039](https://github.com/influxdata/telegraf/pull/8039) `serializers.splunkmetric` Remove Event field as it is causing issues with pre-trained source types +- [#8048](https://github.com/influxdata/telegraf/pull/8048) `inputs.jenkins` Multiple escaping occurs on Jenkins URLs at certain folder depth +- [#8071](https://github.com/influxdata/telegraf/pull/8071) `inputs.kubernetes` add missing error check for HTTP req failure +- [#8145](https://github.com/influxdata/telegraf/pull/8145) `processors.execd` Increased the maximum serialized metric size in line protocol +- [#8159](https://github.com/influxdata/telegraf/pull/8159) `outputs.dynatrace` Dynatrace Output: change handling of monotonic counters +- [#8176](https://github.com/influxdata/telegraf/pull/8176) fix panic on streaming processers using logging +- [#8177](https://github.com/influxdata/telegraf/pull/8177) `parsers.influx` fix: plugins/parsers/influx: avoid ParseError.Error panic +- [#8199](https://github.com/influxdata/telegraf/pull/8199) `inputs.docker` Fix vulnerabilities found in BDBA scan +- [#8200](https://github.com/influxdata/telegraf/pull/8200) `inputs.sqlserver` Fixed Query mapping +- [#8201](https://github.com/influxdata/telegraf/pull/8201) `outputs.sumologic` Fix carbon2 serializer not falling through to field separate when carbon2_format field is unset +- [#8210](https://github.com/influxdata/telegraf/pull/8210) update gopsutil: fix procstat performance regression +- [#8162](https://github.com/influxdata/telegraf/pull/8162) Fix bool serialization when using carbon2 +- [#8240](https://github.com/influxdata/telegraf/pull/8240) Fix bugs found by LGTM analysis platform +- [#8251](https://github.com/influxdata/telegraf/pull/8251) `outputs.dynatrace` Dynatrace Output Plugin: Fixed behaviour when state map is cleared +- [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd + +### New Input Plugins + +- [influxdb_v2_listener](/plugins/inputs/influxdb_v2_listener/README.md) Influxdb v2 listener - Contributed by @magichair +- [intel_rdt](/plugins/inputs/intel_rdt/README.md) New input plugin for Intel RDT (Intel Resource Director Technology) - Contributed by @p-zak +- [nsd](/plugins/inputs/nsd/README.md) add nsd input plugin - Contributed by @gearnode +- [opcua](/plugins/inputs/opcua/README.md) Add OPC UA input plugin - Contributed by InfluxData +- [proxmox](/plugins/inputs/proxmox/README.md) Proxmox plugin - Contributed by @effitient +- [ras](/plugins/inputs/ras/README.md) New input plugin for RAS (Reliability, Availability and Serviceability) - Contributed by @p-zak +- [win_eventlog](/plugins/inputs/win_eventlog/README.md) Windows eventlog input plugin - Contributed by @simnv + +### New Output Plugins + +- [dynatrace](/plugins/outputs/dynatrace/README.md) Dynatrace output plugin - Contributed by @thschue +- [sumologic](/plugins/outputs/sumologic/README.md) Sumo Logic output plugin - Contributed by @pmalek-sumo +- [timestream](/plugins/outputs/timestream) Timestream Output Plugin - Contributed by @piotrwest + +### New External Plugins + +See [EXTERNAL_PLUGINS.md](/EXTERNAL_PLUGINS.md) for a full list of external plugins + +- [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. +- [youtube-telegraf-plugin](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather view and subscriber stats from your youtube videos +- [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. +- [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. -## v1.15.3 [2020-09-11] +## v1.15.4 [2020-10-20] + +### Bugfixes -#### Release Notes +- [#8274](https://github.com/influxdata/telegraf/pull/8274) `common.shim` fix issue with loading processor config from execd +- [#8176](https://github.com/influxdata/telegraf/pull/8176) `agent` fix panic on streaming processers using logging - - Many documentation updates - - New [code examples](https://github.com/influxdata/telegraf/tree/master/plugins/processors/starlark/testdata) for the [Starlark processor](https://github.com/influxdata/telegraf/blob/master/plugins/processors/starlark/README.md) +## v1.15.3 [2020-09-11] -#### Bugfixes +### Release Notes - - [#7999](https://github.com/influxdata/telegraf/pull/7999) `agent` fix minor agent error message race condition - - [#8051](https://github.com/influxdata/telegraf/pull/8051) `build` fix docker build. update dockerfiles to Go 1.14 - - [#8052](https://github.com/influxdata/telegraf/pull/8052) `shim` fix bug in shim logger affecting AddError - - [#7996](https://github.com/influxdata/telegraf/pull/7996) `shim` fix issue with shim use of config.Duration - - [#8006](https://github.com/influxdata/telegraf/pull/8006) `inputs.eventhub_consumer` Fix string to int conversion in eventhub consumer - - [#7986](https://github.com/influxdata/telegraf/pull/7986) `inputs.http_listener_v2` make http header tags case insensitive - - [#7869](https://github.com/influxdata/telegraf/pull/7869) `inputs.modbus` extend support of fixed point values on input - - [#7861](https://github.com/influxdata/telegraf/pull/7861) `inputs.ping` Fix Ping Input plugin for FreeBSD's ping6 - - [#7808](https://github.com/influxdata/telegraf/pull/7808) `inputs.sqlserver` added new counter - Lock Timeouts (timeout > 0)/sec - - [#8026](https://github.com/influxdata/telegraf/pull/8026) `inputs.vsphere` vSphere Fixed missing clustername issue 7878 - - [#8020](https://github.com/influxdata/telegraf/pull/8020) `processors.starlark` improve the quality of starlark docs by executing them as tests - - [#7976](https://github.com/influxdata/telegraf/pull/7976) `processors.starlark` add pivot example for starlark processor - - [#7134](https://github.com/influxdata/telegraf/pull/7134) `outputs.application_insights` Added the ability to set the endpoint url - - [#7908](https://github.com/influxdata/telegraf/pull/7908) `outputs.opentsdb` fix JSON handling of values NaN and Inf +- Many documentation updates +- New [code examples](https://github.com/influxdata/telegraf/tree/master/plugins/processors/starlark/testdata) for the [Starlark processor](https://github.com/influxdata/telegraf/blob/master/plugins/processors/starlark/README.md) + +### Bugfixes + +- [#7999](https://github.com/influxdata/telegraf/pull/7999) `agent` fix minor agent error message race condition +- [#8051](https://github.com/influxdata/telegraf/pull/8051) `build` fix docker build. update dockerfiles to Go 1.14 +- [#8052](https://github.com/influxdata/telegraf/pull/8052) `shim` fix bug in shim logger affecting AddError +- [#7996](https://github.com/influxdata/telegraf/pull/7996) `shim` fix issue with shim use of config.Duration +- [#8006](https://github.com/influxdata/telegraf/pull/8006) `inputs.eventhub_consumer` Fix string to int conversion in eventhub consumer +- [#7986](https://github.com/influxdata/telegraf/pull/7986) `inputs.http_listener_v2` make http header tags case insensitive +- [#7869](https://github.com/influxdata/telegraf/pull/7869) `inputs.modbus` extend support of fixed point values on input +- [#7861](https://github.com/influxdata/telegraf/pull/7861) `inputs.ping` Fix Ping Input plugin for FreeBSD's ping6 +- [#7808](https://github.com/influxdata/telegraf/pull/7808) `inputs.sqlserver` added new counter - Lock Timeouts (timeout > 0)/sec +- [#8026](https://github.com/influxdata/telegraf/pull/8026) `inputs.vsphere` vSphere Fixed missing clustername issue 7878 +- [#8020](https://github.com/influxdata/telegraf/pull/8020) `processors.starlark` improve the quality of starlark docs by executing them as tests +- [#7976](https://github.com/influxdata/telegraf/pull/7976) `processors.starlark` add pivot example for starlark processor +- [#7134](https://github.com/influxdata/telegraf/pull/7134) `outputs.application_insights` Added the ability to set the endpoint url +- [#7908](https://github.com/influxdata/telegraf/pull/7908) `outputs.opentsdb` fix JSON handling of values NaN and Inf ## v1.15.2 [2020-07-31] -#### Bug Fixes +### Bug Fixes - [#7905](https://github.com/influxdata/telegraf/issues/7905): Fix RPM /var/log/telegraf permissions - [#7880](https://github.com/influxdata/telegraf/issues/7880): Fix tail following on EOF ## v1.15.1 [2020-07-22] -#### Bug Fixes +### Bug Fixes - [#7877](https://github.com/influxdata/telegraf/pull/7877): Fix architecture in non-amd64 deb and rpm packages. ## v1.15.0 [2020-07-22] -#### Release Notes +### Release Notes - The `logparser` input is deprecated, use the `tail` input with `data_format = "grok"` as a replacement. @@ -176,12 +1337,12 @@ `/etc/telegraf/telegraf.conf.sample`. The tar and zip packages now include the version in the top level directory. -#### New Inputs +### New Inputs - [nginx_sts](/plugins/inputs/nginx_sts/README.md) - Contributed by @zdmytriv - [redfish](/plugins/inputs/redfish/README.md) - Contributed by @sarvanikonda -#### New Processors +### New Processors - [defaults](/plugins/processors/defaults/README.md) - Contributed by @jregistr - [execd](/plugins/processors/execd/README.md) - Contributed by @influxdata @@ -191,12 +1352,12 @@ - [reverse_dns](/plugins/processors/reverse_dns/README.md) - Contributed by @influxdata - [starlark](/plugins/processors/starlark/README.md) - Contributed by @influxdata -#### New Outputs +### New Outputs - [newrelic](/plugins/outputs/newrelic/README.md) - Contributed by @hsinghkalsi - [execd](/plugins/outputs/execd/README.md) - Contributed by @influxdata -#### Features +### Features - [#7634](https://github.com/influxdata/telegraf/pull/7634): Add support for streaming processors. - [#6905](https://github.com/influxdata/telegraf/pull/6905): Add commands stats to mongodb input plugin. @@ -244,7 +1405,7 @@ - [#7154](https://github.com/influxdata/telegraf/pull/7154): Add v3 metadata support to ecs input. - [#7792](https://github.com/influxdata/telegraf/pull/7792): Support utf-16 in file and tail inputs. -#### Bug Fixes +### Bug Fixes - [#7371](https://github.com/influxdata/telegraf/issues/7371): Fix unable to write metrics to CloudWatch with IMDSv1 disabled. - [#7233](https://github.com/influxdata/telegraf/issues/7233): Fix vSphere 6.7 missing data issue. @@ -263,7 +1424,7 @@ ## v1.14.5 [2020-06-30] -#### Bug Fixes +### Bug Fixes - [#7686](https://github.com/influxdata/telegraf/pull/7686): Improve the performance of the procstat input. - [#7658](https://github.com/influxdata/telegraf/pull/7658): Fix ping exit code handling on non-Linux. @@ -275,7 +1436,7 @@ ## v1.14.4 [2020-06-09] -#### Bug Fixes +### Bug Fixes - [#7325](https://github.com/influxdata/telegraf/issues/7325): Fix "cannot insert the value NULL error" with PerformanceCounters query. - [#7579](https://github.com/influxdata/telegraf/pull/7579): Fix numeric to bool conversion in converter processor. @@ -284,7 +1445,7 @@ ## v1.14.3 [2020-05-19] -#### Bug Fixes +### Bug Fixes - [#7412](https://github.com/influxdata/telegraf/pull/7412): Use same timestamp for all objects in arrays in the json parser. - [#7343](https://github.com/influxdata/telegraf/issues/7343): Handle multiple metrics with the same timestamp in dedup processor. @@ -293,7 +1454,7 @@ ## v1.14.2 [2020-04-28] -#### Bug Fixes +### Bug Fixes - [#7241](https://github.com/influxdata/telegraf/issues/7241): Trim whitespace from instance tag in sqlserver input. - [#7322](https://github.com/influxdata/telegraf/issues/7322): Use increased AWS Cloudwatch GetMetricData limit of 500 metrics per call. @@ -307,7 +1468,7 @@ ## v1.14.1 [2020-04-14] -#### Bug Fixes +### Bug Fixes - [#7236](https://github.com/influxdata/telegraf/issues/7236): Fix PerformanceCounter query performance degradation in sqlserver input. - [#7257](https://github.com/influxdata/telegraf/issues/7257): Fix error when using the Name field in template processor. @@ -317,7 +1478,7 @@ ## v1.14 [2020-03-26] -#### Release Notes +### Release Notes - In the `sqlserver` input, the `sqlserver_azurestats` measurement has been renamed to `sqlserver_azure_db_resource_stats` due to an issue where numeric @@ -326,7 +1487,7 @@ - The `date` processor now uses the UTC timezone when creating its tag. In previous versions the local time was used. -#### New Inputs +### New Inputs - [clickhouse](/plugins/inputs/clickhouse/README.md) - Contributed by @kshvakov - [execd](/plugins/inputs/execd/README.md) - Contributed by @jgraichen @@ -338,17 +1499,17 @@ - [sflow](/plugins/inputs/sflow/README.md) - Contributed by @influxdata - [wireguard](/plugins/inputs/wireguard/README.md) - Contributed by @LINKIWI -#### New Processors +### New Processors - [dedup](/plugins/processors/dedup/README.md) - Contributed by @igomura - [template](/plugins/processors/template/README.md) - Contributed by @RobMalvern - [s2geo](/plugins/processors/s2geo/README.md) - Contributed by @alespour -#### New Outputs +### New Outputs - [warp10](/plugins/outputs/warp10/README.md) - Contributed by @aurrelhebert -#### Features +### Features - [#6730](https://github.com/influxdata/telegraf/pull/6730): Add page_faults for mongodb wired tiger. - [#6798](https://github.com/influxdata/telegraf/pull/6798): Add use_sudo option to ipmi_sensor input. @@ -387,7 +1548,7 @@ - [#7150](https://github.com/influxdata/telegraf/pull/7150): Add option for explicitly including queries in sqlserver input. - [#7173](https://github.com/influxdata/telegraf/pull/7173): Add support for GNMI DecimalVal type to cisco_telemetry_gnmi. -#### Bug Fixes +### Bug Fixes - [#6397](https://github.com/influxdata/telegraf/issues/6397): Fix conversion to floats in AzureDBResourceStats query in the sqlserver input. - [#6867](https://github.com/influxdata/telegraf/issues/6867): Fix case sensitive collation in sqlserver input. @@ -403,11 +1564,11 @@ ## v1.13.4 [2020-02-25] -#### Release Notes +### Release Notes - Official packages now built with Go 1.13.8. -#### Bug Fixes +### Bug Fixes - [#6988](https://github.com/influxdata/telegraf/issues/6988): Parse NaN values from summary types in prometheus input. - [#6820](https://github.com/influxdata/telegraf/issues/6820): Fix pgbouncer input when used with newer pgbouncer versions. @@ -419,7 +1580,7 @@ ## v1.13.3 [2020-02-04] -#### Bug Fixes +### Bug Fixes - [#5744](https://github.com/influxdata/telegraf/issues/5744): Fix kibana input with Kibana versions greater than 6.4. - [#6960](https://github.com/influxdata/telegraf/issues/6960): Fix duplicate TrackingIDs can be returned in queue consumer plugins. @@ -428,7 +1589,7 @@ ## v1.13.2 [2020-01-21] -#### Bug Fixes +### Bug Fixes - [#2652](https://github.com/influxdata/telegraf/issues/2652): Warn without error when processes input is started on Windows. - [#6890](https://github.com/influxdata/telegraf/issues/6890): Only parse certificate blocks in x509_cert input. @@ -440,7 +1601,7 @@ ## v1.13.1 [2020-01-08] -#### Bug Fixes +### Bug Fixes - [#6788](https://github.com/influxdata/telegraf/issues/6788): Fix ServerProperty query stops working on Azure after failover. - [#6803](https://github.com/influxdata/telegraf/pull/6803): Add leading period to OID in SNMP v1 generic traps. @@ -455,7 +1616,7 @@ ## v1.13 [2019-12-12] -#### Release Notes +### Release Notes - Official packages built with Go 1.13.5. This affects the minimum supported version on several platforms, most notably requiring Windows 7 (2008 R2) or @@ -467,7 +1628,7 @@ passthrough metrics will be unchanged. Refer to the `prometheus` input for details about the mapping. -#### New Inputs +### New Inputs - [azure_storage_queue](/plugins/inputs/azure_storage_queue/README.md) - Contributed by @mjiderhamn - [ethtool](/plugins/inputs/ethtool/README.md) - Contributed by @philippreston @@ -476,15 +1637,15 @@ - [synproxy](/plugins/inputs/synproxy/README.md) - Contributed by @rfrenayworldstream - [systemd_units](/plugins/inputs/systemd_units/README.md) - Contributed by @benschweizer -#### New Processors +### New Processors - [clone](/plugins/processors/clone/README.md) - Contributed by @adrianlzt -#### New Aggregators +### New Aggregators - [merge](/plugins/aggregators/merge/README.md) - Contributed by @influxdata -#### Features +### Features - [#6326](https://github.com/influxdata/telegraf/pull/5842): Add per node memory stats to rabbitmq input. - [#6361](https://github.com/influxdata/telegraf/pull/6361): Add ability to read query from file to postgresql_extensible input. @@ -526,7 +1687,7 @@ - [#6740](https://github.com/influxdata/telegraf/pull/6740): Add base64decode operation to string processor. - [#6790](https://github.com/influxdata/telegraf/pull/6790): Add option to control collecting global variables to mysql input. -#### Bug Fixes +### Bug Fixes - [#6484](https://github.com/influxdata/telegraf/issues/6484): Show correct default settings in mysql sample config. - [#6583](https://github.com/influxdata/telegraf/issues/6583): Use 1h or 3h rain values as appropriate in openweathermap input. @@ -543,7 +1704,7 @@ ## v1.12.6 [2019-11-19] -#### Bug Fixes +### Bug Fixes - [#6666](https://github.com/influxdata/telegraf/issues/6666): Fix many plugin errors are logged at debug logging level. - [#6652](https://github.com/influxdata/telegraf/issues/6652): Use nanosecond precision in docker_log input. @@ -552,7 +1713,7 @@ ## v1.12.5 [2019-11-12] -#### Bug Fixes +### Bug Fixes - [#6576](https://github.com/influxdata/telegraf/issues/6576): Fix incorrect results in ping input plugin. - [#6610](https://github.com/influxdata/telegraf/pull/6610): Add missing character replacement to sql_instance tag. @@ -566,11 +1727,11 @@ ## v1.12.4 [2019-10-23] -#### Release Notes +### Release Notes - Official packages built with Go 1.12.12. -#### Bug Fixes +### Bug Fixes - [#6521](https://github.com/influxdata/telegraf/issues/6521): Fix metric generation with ping input native method. - [#6541](https://github.com/influxdata/telegraf/issues/6541): Exclude alias tag if unset from plugin internal stats. @@ -578,7 +1739,7 @@ ## v1.12.3 [2019-10-07] -#### Bug Fixes +### Bug Fixes - [#6445](https://github.com/influxdata/telegraf/issues/6445): Use batch serialization format in exec output. - [#6455](https://github.com/influxdata/telegraf/issues/6455): Build official packages with Go 1.12.10. @@ -590,7 +1751,7 @@ ## v1.12.2 [2019-09-24] -#### Bug Fixes +### Bug Fixes - [#6386](https://github.com/influxdata/telegraf/issues/6386): Fix detection of layout timestamps in csv and json parser. - [#6394](https://github.com/influxdata/telegraf/issues/6394): Fix parsing of BATTDATE in apcupsd input. @@ -600,7 +1761,7 @@ ## v1.12.1 [2019-09-10] -#### Bug Fixes +### Bug Fixes - [#6344](https://github.com/influxdata/telegraf/issues/6344): Fix depends on GLIBC_2.14 symbol version. - [#6329](https://github.com/influxdata/telegraf/issues/6329): Fix filecount for paths with trailing slash. @@ -613,14 +1774,14 @@ ## v1.12 [2019-09-03] -#### Release Notes +### Release Notes - The cluster health related fields in the elasticsearch input have been split out from the `elasticsearch_indices` measurement into the new `elasticsearch_cluster_health_indices` measurement as they were originally combined by error. -#### New Inputs +### New Inputs - [apcupsd](/plugins/inputs/apcupsd/README.md) - Contributed by @jonaz - [docker_log](/plugins/inputs/docker_log/README.md) - Contributed by @prashanthjbabu @@ -630,22 +1791,22 @@ - [openntpd](/plugins/inputs/openntpd/README.md) - Contributed by @aromeyer - [uwsgi](/plugins/inputs/uwsgi/README.md) - Contributed by @blaggacao -#### New Parsers +### New Parsers - [form_urlencoded](/plugins/parsers/form_urlencoded/README.md) - Contributed by @byonchev -#### New Processors +### New Processors - [date](/plugins/processors/date/README.md) - Contributed by @influxdata - [pivot](/plugins/processors/pivot/README.md) - Contributed by @influxdata - [tag_limit](/plugins/processors/tag_limit/README.md) - Contributed by @memory - [unpivot](/plugins/processors/unpivot/README.md) - Contributed by @influxdata -#### New Outputs +### New Outputs - [exec](/plugins/outputs/exec/README.md) - Contributed by @Jaeyo -#### Features +### Features - [#5842](https://github.com/influxdata/telegraf/pull/5842): Improve performance of wavefront serializer. - [#5863](https://github.com/influxdata/telegraf/pull/5863): Allow regex processor to append tag values. @@ -697,7 +1858,7 @@ - [#6207](https://github.com/influxdata/telegraf/pull/6207): Add ability to label inputs for logging. - [#6300](https://github.com/influxdata/telegraf/pull/6300): Add TLS support to nginx_plus, nginx_plus_api and nginx_vts. -#### Bug Fixes +### Bug Fixes - [#5692](https://github.com/influxdata/telegraf/issues/5692): Fix sensor read error stops reporting of all sensors in temp input. - [#4356](https://github.com/influxdata/telegraf/issues/4356): Fix double pct replacement in sysstat input. @@ -714,7 +1875,7 @@ ## v1.11.5 [2019-08-27] -#### Bug Fixes +### Bug Fixes - [#6250](https://github.com/influxdata/telegraf/pull/6250): Update go-sql-driver/mysql driver to 1.4.1 to address auth issues. - [#6279](https://github.com/influxdata/telegraf/issues/6279): Return error status from --test if input plugins produce an error. @@ -727,7 +1888,7 @@ ## v1.11.4 [2019-08-06] -#### Bug Fixes +### Bug Fixes - [#6200](https://github.com/influxdata/telegraf/pull/6200): Correct typo in kubernetes logsfs_available_bytes field. - [#6191](https://github.com/influxdata/telegraf/issues/6191): Skip floats that are NaN or Inf in Datadog output. @@ -735,7 +1896,7 @@ ## v1.11.3 [2019-07-23] -#### Bug Fixes +### Bug Fixes - [#6054](https://github.com/influxdata/telegraf/issues/6054): Fix unable to reconnect after vCenter reboot in vsphere input. - [#6073](https://github.com/influxdata/telegraf/issues/6073): Handle unknown error in nvidia-smi output. @@ -748,7 +1909,7 @@ ## v1.11.2 [2019-07-09] -#### Bug Fixes +### Bug Fixes - [#6056](https://github.com/influxdata/telegraf/pull/6056): Fix source address ping flag on BSD. - [#6059](https://github.com/influxdata/telegraf/issues/6059): Fix value out of range error on 32-bit systems in bind input. @@ -759,7 +1920,7 @@ ## v1.11.1 [2019-06-25] -#### Bug Fixes +### Bug Fixes - [#5980](https://github.com/influxdata/telegraf/issues/5980): Cannot set mount_points option in disk input. - [#5983](https://github.com/influxdata/telegraf/issues/5983): Omit keys when creating measurement names for GNMI telemetry. @@ -773,7 +1934,7 @@ ## v1.11 [2019-06-11] -#### Release Notes +### Release Notes - The `uptime_format` field in the system input has been deprecated, use the `uptime` field instead. @@ -781,7 +1942,7 @@ requires `GetMetricData` permissions instead of `GetMetricStatistics`. The `units` tag is not available from this API and is no longer collected. -#### New Inputs +### New Inputs - [bind](/plugins/inputs/bind/README.md) - Contributed by @dswarbrick & @danielllek - [cisco_telemetry_gnmi](/plugins/inputs/cisco_telemetry_gnmi/README.md) - Contributed by @sbyx @@ -791,20 +1952,20 @@ - [openweathermap](/plugins/inputs/openweathermap/README.md) - Contributed by @regel - [powerdns_recursor](/plugins/inputs/powerdns_recursor/README.md) - Contributed by @dupondje -#### New Aggregators +### New Aggregators - [final](/plugins/aggregators/final/README.md) - Contributed by @oplehto -#### New Outputs +### New Outputs - [syslog](/plugins/outputs/syslog/README.md) - Contributed by @javicrespo - [health](/plugins/outputs/health/README.md) - Contributed by @influxdata -#### New Serializers +### New Serializers - [wavefront](/plugins/serializers/wavefront/README.md) - Contributed by @puckpuck -#### Features +### Features - [#5556](https://github.com/influxdata/telegraf/pull/5556): Add TTL field to ping input. - [#5569](https://github.com/influxdata/telegraf/pull/5569): Add hexadecimal string to integer conversion to converter processor. @@ -836,7 +1997,7 @@ - [#5547](https://github.com/influxdata/telegraf/pull/5547): Add file rotation support to the file output. - [#5955](https://github.com/influxdata/telegraf/pull/5955): Add source tag to hddtemp plugin. -#### Bug Fixes +### Bug Fixes - [#5692](https://github.com/influxdata/telegraf/pull/5692): Temperature input plugin stops working when WiFi is turned off. - [#5631](https://github.com/influxdata/telegraf/pull/5631): Create Windows service only when specified or in service manager. @@ -862,7 +2023,7 @@ ## v1.10.4 [2019-05-14] -#### Bug Fixes +### Bug Fixes - [#5764](https://github.com/influxdata/telegraf/pull/5764): Fix race condition in the Wavefront parser. - [#5783](https://github.com/influxdata/telegraf/pull/5783): Create telegraf user in pre-install rpm scriptlet. @@ -876,20 +2037,20 @@ ## v1.10.3 [2019-04-16] -#### Bug Fixes +### Bug Fixes - [#5680](https://github.com/influxdata/telegraf/pull/5680): Allow colons in metric names in prometheus_client output. - [#5716](https://github.com/influxdata/telegraf/pull/5716): Set log directory attributes in rpm spec. ## v1.10.2 [2019-04-02] -#### Release Notes +### Release Notes - String fields no longer have leading and trailing quotation marks removed in the grok parser. If you are capturing quoted strings you may need to update the patterns. -#### Bug Fixes +### Bug Fixes - [#5612](https://github.com/influxdata/telegraf/pull/5612): Fix deadlock when Telegraf is aligning aggregators. - [#5523](https://github.com/influxdata/telegraf/issues/5523): Fix missing cluster stats in ceph input. @@ -909,7 +2070,7 @@ ## v1.10.1 [2019-03-19] -#### Bug Fixes +### Bug Fixes - [#5448](https://github.com/influxdata/telegraf/issues/5448): Show error when TLS configuration cannot be loaded. - [#5543](https://github.com/influxdata/telegraf/pull/5543): Add Base64-encoding/decoding for Google Cloud PubSub plugins. @@ -921,7 +2082,7 @@ ## v1.10 [2019-03-05] -#### New Inputs +### New Inputs - [cloud_pubsub](/plugins/inputs/cloud_pubsub/README.md) - Contributed by @emilymye - [cloud_pubsub_push](/plugins/inputs/cloud_pubsub_push/README.md) - Contributed by @influxdata @@ -932,16 +2093,16 @@ - [multifile](/plugins/inputs/multifile/README.md) - Contributed by @martin2250 - [stackdriver](/plugins/inputs/stackdriver/README.md) - Contributed by @WuHan0608 -#### New Outputs +### New Outputs - [cloud_pubsub](/plugins/outputs/cloud_pubsub/README.md) - Contributed by @emilymye -#### New Serializers +### New Serializers - [nowmetric](/plugins/serializers/nowmetric/README.md) - Contributed by @JefMuller - [carbon2](/plugins/serializers/carbon2/README.md) - Contributed by @frankreno -#### Features +### Features - [#4345](https://github.com/influxdata/telegraf/pull/4345): Allow for force gathering ES cluster stats. - [#5047](https://github.com/influxdata/telegraf/pull/5047): Add support for unix and unix_ms timestamps to csv parser. @@ -979,7 +2140,7 @@ - [#5490](https://github.com/influxdata/telegraf/pull/5490): Add tag based routing in influxdb/influxdb_v2 outputs. - [#5533](https://github.com/influxdata/telegraf/pull/5533): Allow grok parser to produce metrics with no fields. -#### Bug Fixes +### Bug Fixes - [#4610](https://github.com/influxdata/telegraf/pull/4610): Fix initscript removes pidfile of restarted Telegraf process. - [#5320](https://github.com/influxdata/telegraf/pull/5320): Use datacenter option spelling in consul input. @@ -995,7 +2156,7 @@ ## v1.9.5 [2019-02-26] -#### Bug Fixes +### Bug Fixes - [#5315](https://github.com/influxdata/telegraf/issues/5315): Skip string fields when writing to stackdriver output. - [#5364](https://github.com/influxdata/telegraf/issues/5364): Send metrics in ascending time order in stackdriver output. @@ -1009,7 +2170,7 @@ ## v1.9.4 [2019-02-05] -#### Bug Fixes +### Bug Fixes - [#5334](https://github.com/influxdata/telegraf/issues/5334): Fix skip_rows and skip_columns options in csv parser. - [#5181](https://github.com/influxdata/telegraf/issues/5181): Always send basic auth in jenkins input. @@ -1018,7 +2179,7 @@ ## v1.9.3 [2019-01-22] -#### Bug Fixes +### Bug Fixes - [#5261](https://github.com/influxdata/telegraf/pull/5261): Fix arithmetic overflow in sqlserver input. - [#5194](https://github.com/influxdata/telegraf/issues/5194): Fix latest metrics not sent first when output fails. @@ -1029,7 +2190,7 @@ ## v1.9.2 [2019-01-08] -#### Bug Fixes +### Bug Fixes - [#5130](https://github.com/influxdata/telegraf/pull/5130): Increase varnishstat timeout. - [#5135](https://github.com/influxdata/telegraf/pull/5135): Remove storage calculation for non Azure managed instances and add server version. @@ -1048,7 +2209,7 @@ ## v1.9.1 [2018-12-11] -#### Bug Fixes +### Bug Fixes - [#5006](https://github.com/influxdata/telegraf/issues/5006): Fix boolean handling in splunkmetric serializer. - [#5046](https://github.com/influxdata/telegraf/issues/5046): Set default config values in jenkins input. @@ -1063,7 +2224,7 @@ ## v1.9 [2018-11-20] -#### Release Notes +### Release Notes - The `http_listener` input plugin has been renamed to `influxdb_listener` and use of the original name is deprecated. The new name better describes the @@ -1081,7 +2242,7 @@ the new option `max_undelivered_messages` to limit the number of outstanding unwritten metrics. -#### New Inputs +### New Inputs - [http_listener_v2](/plugins/inputs/http_listener_v2/README.md) - Contributed by @jul1u5 - [ipvs](/plugins/inputs/ipvs/README.md) - Contributed by @amoghe @@ -1090,11 +2251,11 @@ - [nginx_vts](/plugins/inputs/nginx_vts/README.md) - Contributed by @monder - [wireless](/plugins/inputs/wireless/README.md) - Contributed by @jamesmaidment -#### New Outputs +### New Outputs - [stackdriver](/plugins/outputs/stackdriver/README.md) - Contributed by @jamesmaidment -#### Features +### Features - [#4686](https://github.com/influxdata/telegraf/pull/4686): Add replace function to strings processor. - [#4754](https://github.com/influxdata/telegraf/pull/4754): Query servers in parallel in dns_query input. @@ -1117,7 +2278,7 @@ - [#4920](https://github.com/influxdata/telegraf/pull/4920): Add scraping for Prometheus endpoint in Kubernetes. - [#4938](https://github.com/influxdata/telegraf/pull/4938): Add per output flush_interval, metric_buffer_limit and metric_batch_size. -#### Bug Fixes +### Bug Fixes - [#4950](https://github.com/influxdata/telegraf/pull/4950): Remove the time_key from the field values in JSON parser. - [#3968](https://github.com/influxdata/telegraf/issues/3968): Fix input time rounding when using a custom interval. @@ -1501,7 +2662,6 @@ - The new `http` input configured with `data_format = "json"` can perform the same task as the, now deprecated, `httpjson` input. - ### New Inputs - [http](./plugins/inputs/http/README.md) - Thanks to @grange74 @@ -1620,6 +2780,7 @@ ## v1.5 [2017-12-14] ### New Plugins + - [basicstats](./plugins/aggregators/basicstats/README.md) - Thanks to @toni-moreno - [bond](./plugins/inputs/bond/README.md) - Thanks to @ildarsv - [cratedb](./plugins/outputs/cratedb/README.md) - Thanks to @felixge @@ -1950,7 +3111,7 @@ machines. Telegraf < 1.3: -``` +```text # field_name value active+clean 123 active+clean+scrubbing 3 @@ -1958,7 +3119,7 @@ active+clean+scrubbing 3 Telegraf >= 1.3: -``` +```text # field_name value tag count 123 state=active+clean count 3 state=active+clean+scrubbing @@ -2268,7 +3429,7 @@ that pertain to node vs. namespace statistics. This means that the default github_webhooks config: -``` +```toml # A Github Webhook Event collector [[inputs.github_webhooks]] ## Address and port to host Webhook listener on @@ -2277,7 +3438,7 @@ This means that the default github_webhooks config: should now look like: -``` +```toml # A Webhooks Event collector [[inputs.webhooks]] ## Address and port to host Webhook listener on @@ -2332,7 +3493,7 @@ consistent with the behavior of `collection_jitter`. - [#1265](https://github.com/influxdata/telegraf/pull/1265): Make dns lookups for chrony configurable. Thanks @zbindenren! - [#1275](https://github.com/influxdata/telegraf/pull/1275): Allow wildcard filtering of varnish stats. - [#1142](https://github.com/influxdata/telegraf/pull/1142): Support for glob patterns in exec plugin commands configuration. -- [#1278](https://github.com/influxdata/telegraf/pull/1278): RabbitMQ input: made url parameter optional by using DefaultURL (http://localhost:15672) if not specified +- [#1278](https://github.com/influxdata/telegraf/pull/1278): RabbitMQ input: made url parameter optional by using DefaultURL `http://localhost:15672` if not specified - [#1197](https://github.com/influxdata/telegraf/pull/1197): Limit AWS GetMetricStatistics requests to 10 per second. - [#1278](https://github.com/influxdata/telegraf/pull/1278) & [#1288](https://github.com/influxdata/telegraf/pull/1288) & [#1295](https://github.com/influxdata/telegraf/pull/1295): RabbitMQ/Apache/InfluxDB inputs: made url(s) parameter optional by using reasonable input defaults if not specified - [#1296](https://github.com/influxdata/telegraf/issues/1296): Refactor of flush_jitter argument. @@ -2430,8 +3591,8 @@ to "stdout". ### Release Notes -- **Breaking change** in jolokia plugin. See -https://github.com/influxdata/telegraf/blob/master/plugins/inputs/jolokia/README.md +- **Breaking change** in jolokia plugin. See the +[jolokia README](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/jolokia/README.md) for updated configuration. The plugin will now support proxy mode and will make POST requests. @@ -2536,14 +3697,16 @@ It is not included on the report path. This is necessary for reporting host disk ## v0.12.1 [2016-04-14] ### Release Notes + - Breaking change in the dovecot input plugin. See Features section below. -- Graphite output templates are now supported. See -https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +- Graphite output templates are now supported. See the +[Output Formats README](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite) - Possible breaking change for the librato and graphite outputs. Telegraf will no longer insert field names when the field is simply named `value`. This is because the `value` field is redundant in the graphite/librato context. ### Features + - [#1009](https://github.com/influxdata/telegraf/pull/1009): Cassandra input plugin. Thanks @subhachandrachandra! - [#976](https://github.com/influxdata/telegraf/pull/976): Reduce allocations in the UDP and statsd inputs. - [#979](https://github.com/influxdata/telegraf/pull/979): Reduce allocations in the TCP listener. @@ -2556,6 +3719,7 @@ because the `value` field is redundant in the graphite/librato context. - [#1008](https://github.com/influxdata/telegraf/pull/1008): Adding memstats metrics to the influxdb plugin. ### Bug Fixes + - [#968](https://github.com/influxdata/telegraf/issues/968): Processes plugin gets unknown state when spaces are in (command name) - [#969](https://github.com/influxdata/telegraf/pull/969): ipmi_sensors: allow : in password. Thanks @awaw! - [#972](https://github.com/influxdata/telegraf/pull/972): dovecot: remove extra newline in dovecot command. Thanks @mrannanj! @@ -2564,6 +3728,7 @@ because the `value` field is redundant in the graphite/librato context. ## v0.12.0 [2016-04-05] ### Features + - [#951](https://github.com/influxdata/telegraf/pull/951): Parse environment variables in the config file. - [#948](https://github.com/influxdata/telegraf/pull/948): Cleanup config file and make default package version include all plugins (but commented). - [#927](https://github.com/influxdata/telegraf/pull/927): Adds parsing of tags to the statsd input when using DataDog's dogstatsd extension @@ -2583,6 +3748,7 @@ because the `value` field is redundant in the graphite/librato context. - [#945](https://github.com/influxdata/telegraf/pull/945): KAFKA output: codec, acks, and retry configuration. Thanks @framiere! ### Bug Fixes + - [#890](https://github.com/influxdata/telegraf/issues/890): Create TLS config even if only ssl_ca is provided. - [#884](https://github.com/influxdata/telegraf/issues/884): Do not call write method if there are 0 metrics to write. - [#898](https://github.com/influxdata/telegraf/issues/898): Put database name in quotes, fixes special characters in the database name. @@ -2597,21 +3763,23 @@ because the `value` field is redundant in the graphite/librato context. ## v0.11.1 [2016-03-17] ### Release Notes + - Primarily this release was cut to fix [#859](https://github.com/influxdata/telegraf/issues/859) ### Features + - [#747](https://github.com/influxdata/telegraf/pull/747): Start telegraf on install & remove on uninstall. Thanks @PierreF! - [#794](https://github.com/influxdata/telegraf/pull/794): Add service reload ability. Thanks @entertainyou! ### Bug Fixes + - [#852](https://github.com/influxdata/telegraf/issues/852): Windows zip package fix - [#859](https://github.com/influxdata/telegraf/issues/859): httpjson plugin panic ## v0.11.0 [2016-03-15] -### Release Notes - ### Features + - [#692](https://github.com/influxdata/telegraf/pull/770): Support InfluxDB retention policies - [#771](https://github.com/influxdata/telegraf/pull/771): Default timeouts for input plugns. Thanks @PierreF! - [#758](https://github.com/influxdata/telegraf/pull/758): UDP Listener input plugin, thanks @whatyouhide! @@ -2629,6 +3797,7 @@ because the `value` field is redundant in the graphite/librato context. - [#847](https://github.com/influxdata/telegraf/pull/847): `ntpq`: Input plugin for running ntp query executable and gathering metrics. ### Bug Fixes + - [#748](https://github.com/influxdata/telegraf/issues/748): Fix sensor plugin split on ":" - [#722](https://github.com/influxdata/telegraf/pull/722): Librato output plugin fixes. Thanks @chrusty! - [#745](https://github.com/influxdata/telegraf/issues/745): Fix Telegraf toml parse panic on large config files. Thanks @titilambert! @@ -2644,15 +3813,18 @@ because the `value` field is redundant in the graphite/librato context. ## v0.10.4.1 ### Release Notes + - Bug in the build script broke deb and rpm packages. ### Bug Fixes + - [#750](https://github.com/influxdata/telegraf/issues/750): deb package broken - [#752](https://github.com/influxdata/telegraf/issues/752): rpm package broken ## v0.10.4 [2016-02-24] ### Release Notes + - The pass/drop parameters have been renamed to fielddrop/fieldpass parameters, to more accurately indicate their purpose. - There are also now namedrop/namepass parameters for passing/dropping based @@ -2660,6 +3832,7 @@ on the metric _name_. - Experimental windows builds now available. ### Features + - [#727](https://github.com/influxdata/telegraf/pull/727): riak input, thanks @jcoene! - [#694](https://github.com/influxdata/telegraf/pull/694): DNS Query input, thanks @mjasion! - [#724](https://github.com/influxdata/telegraf/pull/724): username matching for procstat input, thanks @zorel! @@ -2667,12 +3840,14 @@ on the metric _name_. - [#737](https://github.com/influxdata/telegraf/pull/737): Support multiple fields for statsd input. Thanks @mattheath! ### Bug Fixes + - [#701](https://github.com/influxdata/telegraf/pull/701): output write count shouldnt print in quiet mode. - [#746](https://github.com/influxdata/telegraf/pull/746): httpjson plugin: Fix HTTP GET parameters. ## v0.10.3 [2016-02-18] ### Release Notes + - Users of the `exec` and `kafka_consumer` (and the new `nats_consumer` and `mqtt_consumer` plugins) can now specify the incoming data format that they would like to parse. Currently supports: "json", "influx", and @@ -2689,6 +3864,7 @@ points and only flushing on a set time interval. This will default to `true` and is in the `[agent]` config section. ### Features + - [#652](https://github.com/influxdata/telegraf/pull/652): CouchDB Input Plugin. Thanks @codehate! - [#655](https://github.com/influxdata/telegraf/pull/655): Support parsing arbitrary data formats. Currently limited to kafka_consumer and exec inputs. - [#671](https://github.com/influxdata/telegraf/pull/671): Dovecot input plugin. Thanks @mikif70! @@ -2703,6 +3879,7 @@ and is in the `[agent]` config section. - [#682](https://github.com/influxdata/telegraf/pull/682): Mesos input plugin. Thanks @tripledes! ### Bug Fixes + - [#443](https://github.com/influxdata/telegraf/issues/443): Fix Ping command timeout parameter on Linux. - [#662](https://github.com/influxdata/telegraf/pull/667): Change `[tags]` to `[global_tags]` to fix multiple-plugin tags bug. - [#642](https://github.com/influxdata/telegraf/issues/642): Riemann output plugin issues. @@ -2712,6 +3889,7 @@ and is in the `[agent]` config section. ## v0.10.2 [2016-02-04] ### Release Notes + - Statsd timing measurements are now aggregated into a single measurement with fields. - Graphite output now inserts tags into the bucket in alphabetical order. @@ -2721,6 +3899,7 @@ doing the opposite of what it claimed to do (yikes). It's been replaced by `insecure_skip_verify` ### Features + - [#575](https://github.com/influxdata/telegraf/pull/575): Support for collecting Windows Performance Counters. Thanks @TheFlyingCorpse! - [#564](https://github.com/influxdata/telegraf/issues/564): features for plugin writing simplification. Internal metric data type. - [#603](https://github.com/influxdata/telegraf/pull/603): Aggregate statsd timing measurements into fields. Thanks @marcinbunsch! @@ -2730,6 +3909,7 @@ doing the opposite of what it claimed to do (yikes). It's been replaced by - [#628](https://github.com/influxdata/telegraf/pull/628): Windows perf counters: pre-vista support ### Bug Fixes + - [#595](https://github.com/influxdata/telegraf/issues/595): graphite output should include tags to separate duplicate measurements. - [#599](https://github.com/influxdata/telegraf/issues/599): datadog plugin tags not working. - [#600](https://github.com/influxdata/telegraf/issues/600): datadog measurement/field name parsing is wrong. @@ -2751,6 +3931,7 @@ for the latest measurements, fields, and tags. There is also now support for specifying a docker endpoint to get metrics from. ### Features + - [#509](https://github.com/influxdata/telegraf/pull/509): Flatten JSON arrays with indices. Thanks @psilva261! - [#512](https://github.com/influxdata/telegraf/pull/512): Python 3 build script, add lsof dep to package. Thanks @Ormod! - [#475](https://github.com/influxdata/telegraf/pull/475): Add response time to httpjson plugin. Thanks @titilambert! @@ -2775,6 +3956,7 @@ specifying a docker endpoint to get metrics from. - [#471](https://github.com/influxdata/telegraf/pull/471): httpjson request headers. Thanks @asosso! ### Bug Fixes + - [#506](https://github.com/influxdata/telegraf/pull/506): Ping input doesn't return response time metric when timeout. Thanks @titilambert! - [#508](https://github.com/influxdata/telegraf/pull/508): Fix prometheus cardinality issue with the `net` plugin - [#499](https://github.com/influxdata/telegraf/issues/499) & [#502](https://github.com/influxdata/telegraf/issues/502): php fpm unix socket and other fixes, thanks @kureikain! @@ -2787,6 +3969,7 @@ specifying a docker endpoint to get metrics from. ## v0.10.0 [2016-01-12] ### Release Notes + - Linux packages have been taken out of `opt`, the binary is now in `/usr/bin` and configuration files are in `/etc/telegraf` - **breaking change** `plugins` have been renamed to `inputs`. This was done because @@ -2807,13 +3990,14 @@ instead of only `cpu_` - The prometheus plugin schema has not been changed (measurements have not been aggregated). -### Packaging change note: +### Packaging change note RHEL/CentOS users upgrading from 0.2.x to 0.10.0 will probably have their configurations overwritten by the upgrade. There is a backup stored at /etc/telegraf/telegraf.conf.$(date +%s).backup. ### Features + - Plugin measurements aggregated into a single measurement. - Added ability to specify per-plugin tags - Added ability to specify per-plugin measurement suffix and prefix. @@ -2825,17 +4009,20 @@ configurations overwritten by the upgrade. There is a backup stored at ## v0.2.5 [unreleased] ### Features + - [#427](https://github.com/influxdata/telegraf/pull/427): zfs plugin: pool stats added. Thanks @allenpetersen! - [#428](https://github.com/influxdata/telegraf/pull/428): Amazon Kinesis output. Thanks @jimmystewpot! - [#449](https://github.com/influxdata/telegraf/pull/449): influxdb plugin, thanks @mark-rushakoff ### Bug Fixes + - [#430](https://github.com/influxdata/telegraf/issues/430): Network statistics removed in elasticsearch 2.1. Thanks @jipperinbham! - [#452](https://github.com/influxdata/telegraf/issues/452): Elasticsearch open file handles error. Thanks @jipperinbham! ## v0.2.4 [2015-12-08] ### Features + - [#412](https://github.com/influxdata/telegraf/pull/412): Additional memcached stats. Thanks @mgresser! - [#410](https://github.com/influxdata/telegraf/pull/410): Additional redis metrics. Thanks @vlaadbrain! - [#414](https://github.com/influxdata/telegraf/issues/414): Jolokia plugin auth parameters @@ -2846,12 +4033,14 @@ configurations overwritten by the upgrade. There is a backup stored at - [#401](https://github.com/influxdata/telegraf/pull/401): Support pass/drop/tagpass/tagdrop for outputs. Thanks @oldmantaiter! ### Bug Fixes + - [#405](https://github.com/influxdata/telegraf/issues/405): Prometheus output cardinality issue - [#388](https://github.com/influxdata/telegraf/issues/388): Fix collection hangup when cpu times decrement. ## v0.2.3 [2015-11-30] ### Release Notes + - **breaking change** The `kafka` plugin has been renamed to `kafka_consumer`. and most of the config option names have changed. This only affects the kafka consumer _plugin_ (not the @@ -2861,7 +4050,7 @@ functional. - Plugins can now be specified as a list, and multiple plugin instances of the same type can be specified, like this: -``` +```toml [[inputs.cpu]] percpu = false totalcpu = true @@ -2876,6 +4065,7 @@ same type can be specified, like this: - Aerospike plugin: tag changed from `host` -> `aerospike_host` ### Features + - [#379](https://github.com/influxdata/telegraf/pull/379): Riemann output, thanks @allenj! - [#375](https://github.com/influxdata/telegraf/pull/375): kafka_consumer service plugin. - [#392](https://github.com/influxdata/telegraf/pull/392): Procstat plugin can now accept pgrep -f pattern, thanks @ecarreras! @@ -2883,21 +4073,25 @@ same type can be specified, like this: - [#354](https://github.com/influxdata/telegraf/pull/354): Add ability to specify multiple metrics in one statsd line. Thanks @MerlinDMC! ### Bug Fixes + - [#371](https://github.com/influxdata/telegraf/issues/371): Kafka consumer plugin not functioning. - [#389](https://github.com/influxdata/telegraf/issues/389): NaN value panic ## v0.2.2 [2015-11-18] ### Release Notes + - 0.2.1 has a bug where all lists within plugins get duplicated, this includes lists of servers/URLs. 0.2.2 is being released solely to fix that bug ### Bug Fixes + - [#377](https://github.com/influxdata/telegraf/pull/377): Fix for duplicate slices in inputs. ## v0.2.1 [2015-11-16] ### Release Notes + - Telegraf will no longer use docker-compose for "long" unit test, it has been changed to just run docker commands in the Makefile. See `make docker-run` and `make docker-kill`. `make test` will still run all unit tests with docker. @@ -2910,6 +4104,7 @@ changed to just run docker commands in the Makefile. See `make docker-run` and same type. ### Features + - [#325](https://github.com/influxdata/telegraf/pull/325): NSQ output. Thanks @jrxFive! - [#318](https://github.com/influxdata/telegraf/pull/318): Prometheus output. Thanks @oldmantaiter! - [#338](https://github.com/influxdata/telegraf/pull/338): Restart Telegraf on package upgrade. Thanks @linsomniac! @@ -2922,6 +4117,7 @@ same type. - [#372](https://github.com/influxdata/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC! ### Bug Fixes + - [#331](https://github.com/influxdata/telegraf/pull/331): Dont overwrite host tag in redis plugin. - [#336](https://github.com/influxdata/telegraf/pull/336): Mongodb plugin should take 2 measurements. - [#351](https://github.com/influxdata/telegraf/issues/317): Fix continual "CREATE DATABASE" in writes @@ -2930,6 +4126,7 @@ same type. ## v0.2.0 [2015-10-27] ### Release Notes + - The -test flag will now only output 2 collections for plugins that need it - There is a new agent configuration option: `flush_interval`. This option tells Telegraf how often to flush data to InfluxDB and other output sinks. For example, @@ -2946,6 +4143,7 @@ be controlled via the `round_interval` and `flush_jitter` config options. - Telegraf will now retry metric flushes twice ### Features + - [#205](https://github.com/influxdata/telegraf/issues/205): Include per-db redis keyspace info - [#226](https://github.com/influxdata/telegraf/pull/226): Add timestamps to points in Kafka/AMQP outputs. Thanks @ekini - [#90](https://github.com/influxdata/telegraf/issues/90): Add Docker labels to tags in docker plugin @@ -2970,6 +4168,7 @@ of metrics collected and from how many inputs. - [#322](https://github.com/influxdata/telegraf/issues/322): Librato output. Thanks @jipperinbham! ### Bug Fixes + - [#228](https://github.com/influxdata/telegraf/pull/228): New version of package will replace old one. Thanks @ekini! - [#232](https://github.com/influxdata/telegraf/pull/232): Fix bashism run during deb package installation. Thanks @yankcrime! - [#261](https://github.com/influxdata/telegraf/issues/260): RabbitMQ panics if wrong credentials given. Thanks @ekini! @@ -2982,6 +4181,7 @@ of metrics collected and from how many inputs. ## v0.1.9 [2015-09-22] ### Release Notes + - InfluxDB output config change: `url` is now `urls`, and is a list. Config files will still be backwards compatible if only `url` is specified. - The -test flag will now output two metric collections @@ -3003,6 +4203,7 @@ have been renamed for consistency. Some measurements have also been removed from re-added in a "verbose" mode if there is demand for it. ### Features + - [#143](https://github.com/influxdata/telegraf/issues/143): InfluxDB clustering support - [#181](https://github.com/influxdata/telegraf/issues/181): Makefile GOBIN support. Thanks @Vye! - [#203](https://github.com/influxdata/telegraf/pull/200): AMQP output. Thanks @ekini! @@ -3013,6 +4214,7 @@ re-added in a "verbose" mode if there is demand for it. and filtering when specifying a config file. ### Bug Fixes + - [#170](https://github.com/influxdata/telegraf/issues/170): Systemd support - [#175](https://github.com/influxdata/telegraf/issues/175): Set write precision before gathering metrics - [#178](https://github.com/influxdata/telegraf/issues/178): redis plugin, multiple server thread hang bug @@ -3028,10 +4230,12 @@ and filtering when specifying a config file. ## v0.1.8 [2015-09-04] ### Release Notes + - Telegraf will now write data in UTC at second precision by default - Now using Go 1.5 to build telegraf ### Features + - [#150](https://github.com/influxdata/telegraf/pull/150): Add Host Uptime metric to system plugin - [#158](https://github.com/influxdata/telegraf/pull/158): Apache Plugin. Thanks @KPACHbIuLLIAnO4 - [#159](https://github.com/influxdata/telegraf/pull/159): Use second precision for InfluxDB writes @@ -3045,6 +4249,7 @@ and filtering when specifying a config file. ## v0.1.7 [2015-08-28] ### Features + - [#38](https://github.com/influxdata/telegraf/pull/38): Kafka output producer. - [#133](https://github.com/influxdata/telegraf/pull/133): Add plugin.Gather error logging. Thanks @nickscript0! - [#136](https://github.com/influxdata/telegraf/issues/136): Add a -usage flag for printing usage of a single plugin. @@ -3054,6 +4259,7 @@ and filtering when specifying a config file. - Indent the toml config file for readability ### Bug Fixes + - [#128](https://github.com/influxdata/telegraf/issues/128): system_load measurement missing. - [#129](https://github.com/influxdata/telegraf/issues/129): Latest pkg url fix. - [#131](https://github.com/influxdata/telegraf/issues/131): Fix memory reporting on linux & darwin. Thanks @subhachandrachandra! @@ -3062,11 +4268,13 @@ and filtering when specifying a config file. ## v0.1.6 [2015-08-20] ### Features + - [#112](https://github.com/influxdata/telegraf/pull/112): Datadog output. Thanks @jipperinbham! - [#116](https://github.com/influxdata/telegraf/pull/116): Use godep to vendor all dependencies - [#120](https://github.com/influxdata/telegraf/pull/120): Httpjson plugin. Thanks @jpalay & @alvaromorales! ### Bug Fixes + - [#113](https://github.com/influxdata/telegraf/issues/113): Update README with Telegraf/InfluxDB compatibility - [#118](https://github.com/influxdata/telegraf/pull/118): Fix for disk usage stats in Windows. Thanks @srfraser! - [#122](https://github.com/influxdata/telegraf/issues/122): Fix for DiskUsage segv fault. Thanks @srfraser! @@ -3075,6 +4283,7 @@ and filtering when specifying a config file. ## v0.1.5 [2015-08-13] ### Features + - [#54](https://github.com/influxdata/telegraf/pull/54): MongoDB plugin. Thanks @jipperinbham! - [#55](https://github.com/influxdata/telegraf/pull/55): Elasticsearch plugin. Thanks @brocaar! - [#71](https://github.com/influxdata/telegraf/pull/71): HAProxy plugin. Thanks @kureikain! @@ -3093,6 +4302,7 @@ and filtering when specifying a config file. - [#111](https://github.com/influxdata/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay! ### Bug Fixes + - [#85](https://github.com/influxdata/telegraf/pull/85): Fix GetLocalHost testutil function for mac users - [#89](https://github.com/influxdata/telegraf/pull/89): go fmt fixes - [#94](https://github.com/influxdata/telegraf/pull/94): Fix for issue #93, explicitly call sarama.v1 -> sarama @@ -3102,31 +4312,37 @@ and filtering when specifying a config file. ## v0.1.4 [2015-07-09] ### Features + - [#56](https://github.com/influxdata/telegraf/pull/56): Update README for Kafka plugin. Thanks @EmilS! ### Bug Fixes + - [#50](https://github.com/influxdata/telegraf/pull/50): Fix init.sh script to use telegraf directory. Thanks @jseriff! - [#52](https://github.com/influxdata/telegraf/pull/52): Update CHANGELOG to reference updated directory. Thanks @benfb! ## v0.1.3 [2015-07-05] ### Features + - [#35](https://github.com/influxdata/telegraf/pull/35): Add Kafka plugin. Thanks @EmilS! - [#47](https://github.com/influxdata/telegraf/pull/47): Add RethinkDB plugin. Thanks @jipperinbham! ### Bug Fixes + - [#45](https://github.com/influxdata/telegraf/pull/45): Skip disk tags that don't have a value. Thanks @jhofeditz! - [#43](https://github.com/influxdata/telegraf/pull/43): Fix bug in MySQL plugin. Thanks @marcosnils! ## v0.1.2 [2015-07-01] ### Features + - [#12](https://github.com/influxdata/telegraf/pull/12): Add Linux/ARM to the list of built binaries. Thanks @voxxit! - [#14](https://github.com/influxdata/telegraf/pull/14): Clarify the S3 buckets that Telegraf is pushed to. - [#16](https://github.com/influxdata/telegraf/pull/16): Convert Redis to use URI, support Redis AUTH. Thanks @jipperinbham! - [#21](https://github.com/influxdata/telegraf/pull/21): Add memcached plugin. Thanks @Yukki! ### Bug Fixes + - [#13](https://github.com/influxdata/telegraf/pull/13): Fix the packaging script. - [#19](https://github.com/influxdata/telegraf/pull/19): Add host name to metric tags. Thanks @sherifzain! - [#20](https://github.com/influxdata/telegraf/pull/20): Fix race condition with accumulator mutex. Thanks @nkatsaros! diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 897ac1377e6e7..4323b07cabcc5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,39 +1,64 @@ -### Contributing +# Contributing to Telegraf + +There are many ways to get involved in the Telegraf project! From opening issues, creating pull requests, to joining the conversation in Slack. We would love to see you contribute your expertise and join our community. To get started review this document to learn best practices. + +![tiger](assets/GopherAndTiger.png "tiger") + +## Opening Issues + +### Bug reports + +Before you file an issue, please search existing issues in case it has already been filed, or perhaps even fixed. If you file an issue, please ensure you include all the requested details (e.g. Telegraf config and logs, platform, etc.) + +Please note that issues are not the place to file general support requests such as "How do I use the mongoDB plugin?" Questions of this nature should be sent to the [Community Slack](https://influxdata.com/slack) or [Community Page](https://community.influxdata.com/), not filed as issues. + +### Feature requests + +We really like to receive feature requests as it helps us prioritize our work. Before you file a feature request, please search existing issues, you can filter issues that have the label `feature request`. Please be clear about your requirements and goals, help us to understand what you would like to see added to Telegraf with examples and the reasons why it is important to you. If you find your feature request already exists as a Github issue please indicate your support for that feature by using the "thumbs up" reaction. + +### Support questions + +We recommend posting support questions in our [Community Slack](https://influxdata.com/slack) or [Community Page](https://community.influxdata.com/), we have a lot of talented community members there who could help answer your question more quickly. + +## Contributing code + +### Creating a pull request 1. [Sign the CLA][cla]. -1. Open a [new issue][] to discuss the changes you would like to make. This is +2. Open a [new issue][] to discuss the changes you would like to make. This is not strictly required but it may help reduce the amount of rework you need to do later. -1. Make changes or write plugin using the guidelines in the following +3. Make changes or write plugin using the guidelines in the following documents: - [Input Plugins][inputs] - [Processor Plugins][processors] - [Aggregator Plugins][aggregators] - [Output Plugins][outputs] -1. Ensure you have added proper unit tests and documentation. -1. Open a new [pull request][]. +4. Ensure you have added proper unit tests and documentation. +5. Open a new [pull request][]. +6. The pull request title needs to follow [conventional commit format](https://www.conventionalcommits.org/en/v1.0.0/#summary) -#### Contributing an External Plugin *(experimental)* -Input, output, and processor plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input](plugins/inputs/execd), [Execd Output](/plugins/inputs/execd), and [Execd Processor](plugins/processors/execd) Plugins without having to change the plugin code. +**Note:** If you have a pull request with only one commit, then that commit needs to follow the conventional commit format or the `Semantic Pull Request` check will fail. This is because github will use the pull request title if there are multiple commits, but if there is only one commit it will use it instead. -Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](/plugins/common/shim) to easily compile it as a separate app and run it with the respective `execd` plugin. -Check out our [guidelines](docs/EXTERNAL_PLUGINS.md#external-plugin-guidelines) on how to build and set up your external plugins to run with `execd`. +### When will your contribution get released? +We have two kinds of releases: patch releases, which happen every few weeks, and feature releases, which happen once a quarter. If your fix is a bug fix, it will be released in the next patch release after it is merged to master. If your release is a new plugin or other feature, it will be released in the next quarterly release after it is merged to master. Quarterly releases are on the third Wednesday of March, June, September, and December. -#### Security Vulnerability Reporting -InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our -open source projects, please responsibly disclose it by contacting security@influxdata.com. More details about -security vulnerability reporting, -including our GPG key, [can be found here](https://www.influxdata.com/how-to-report-security-vulnerabilities/). +### Contributing an External Plugin -### GoDoc +Input, output, and processor plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input](/plugins/inputs/execd), [Execd Output](/plugins/outputs/execd), and [Execd Processor](/plugins/processors/execd) Plugins without having to change the plugin code. -Public interfaces for inputs, outputs, processors, aggregators, metrics, -and the accumulator can be found in the GoDoc: +Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](/plugins/common/shim) to easily compile it as a separate app and run it with the respective `execd` plugin. +Check out our [guidelines](/docs/EXTERNAL_PLUGINS.md#external-plugin-guidelines) on how to build and set up your external plugins to run with `execd`. -[![GoDoc](https://godoc.org/github.com/influxdata/telegraf?status.svg)](https://godoc.org/github.com/influxdata/telegraf) +## Security Vulnerability Reporting + +InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our +open source projects, please responsibly disclose it by contacting security@influxdata.com. More details about +security vulnerability reporting, +including our GPG key, [can be found here](https://www.influxdata.com/how-to-report-security-vulnerabilities/). -### Common development tasks +## Common development tasks **Adding a dependency:** @@ -46,7 +71,7 @@ Telegraf uses Go modules. Assuming you can already build the project, run this i Before opening a pull request you should run the linter checks and the short tests. -``` +```shell make check make test ``` @@ -55,19 +80,28 @@ make test (Optional) -Running the integration tests requires several docker containers to be -running. You can start the containers with: -``` -docker-compose up -``` +To run only the integration tests use: -And run the full test suite with: +```shell +make test-integration ``` + +To run the full test suite use: + +```shell make test-all ``` -Use `make docker-kill` to stop the containers. +### For more developer resources +- [Code Style][codestyle] +- [Deprecation][deprecation] +- [Logging][logging] +- [Metric Format Changes][metricformat] +- [Packaging][packaging] +- [Profiling][profiling] +- [Reviews][reviews] +- [Sample Config][sample config] [cla]: https://www.influxdata.com/legal/cla/ [new issue]: https://github.com/influxdata/telegraf/issues/new/choose @@ -76,3 +110,11 @@ Use `make docker-kill` to stop the containers. [processors]: /docs/PROCESSORS.md [aggregators]: /docs/AGGREGATORS.md [outputs]: /docs/OUTPUTS.md +[codestyle]: /docs/developers/CODE_STYLE.md +[deprecation]: /docs/developers/DEPRECATION.md +[logging]: /docs/developers/LOGGING.md +[metricformat]: /docs/developers/METRIC_FORMAT_CHANGES.md +[packaging]: /docs/developers/PACKAGING.md +[profiling]: /docs/developers/PROFILING.md +[reviews]: /docs/developers/REVIEWS.md +[sample config]: /docs/developers/SAMPLE_CONFIG.md diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md index 1aea58dac3070..9d060d095803f 100644 --- a/EXTERNAL_PLUGINS.md +++ b/EXTERNAL_PLUGINS.md @@ -1,15 +1,42 @@ # External Plugins -This is a list of plugins that can be compiled outside of Telegraf and used via the `execd` [input](plugins/inputs/execd), [output](plugins/outputs/execd), or [processor](plugins/processors/execd). -Check out the [external plugin documentation](/docs/EXTERNAL_PLUGINS.md) for more information on writing and contributing a plugin. +This is a list of plugins that can be compiled outside of Telegraf and used via the `execd` [input](plugins/inputs/execd), [output](plugins/outputs/execd), or [processor](plugins/processors/execd). +Check out the [external plugin documentation](/docs/EXTERNAL_PLUGINS.md) for more information on writing and contributing a plugin. Pull requests welcome. - ## Inputs -- [rand](https://github.com/ssoroka/rand) - Generate random numbers -- [twitter](https://github.com/inabagumi/twitter-telegraf-plugin) - Gather account information from Twitter accounts -- [youtube](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather account information from YouTube channels + - [awsalarms](https://github.com/vipinvkmenon/awsalarms) - Simple plugin to gather/monitor alarms generated in AWS. - [octoprint](https://github.com/BattleBas/octoprint-telegraf-plugin) - Gather 3d print information from the octoprint API. +- [opcda](https://github.com/lpc921/telegraf-execd-opcda) - Gather data from [OPC Fundation's Data Access (DA)](https://opcfoundation.org/about/opc-technologies/opc-classic/) protocol for industrial automation. +- [open-hardware-monitor](https://github.com/marianob85/open_hardware_monitor-telegraf-plugin) - Gather sensors data provided by [Open Hardware Monitor](http://openhardwaremonitor.org) +- [plex](https://github.com/russorat/telegraf-webhooks-plex) - Listens for events from Plex Media Server [Webhooks](https://support.plex.tv/articles/115002267687-webhooks/). +- [rand](https://github.com/ssoroka/rand) - Generate random numbers +- [SMCIPMITool](https://github.com/jhpope/smc_ipmi) - Python script to parse the output of [SMCIPMITool](https://www.supermicro.com/en/solutions/management-software/ipmi-utilities) into [InfluxDB line protocol](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/). - [systemd-timings](https://github.com/pdmorrow/telegraf-execd-systemd-timings) - Gather systemd boot and unit timestamp metrics. +- [twitter](https://github.com/inabagumi/twitter-telegraf-plugin) - Gather account information from Twitter accounts +- [youtube](https://github.com/inabagumi/youtube-telegraf-plugin) - Gather account information from YouTube channels +- [Big Blue Button](https://github.com/SLedunois/bigbluebutton-telegraf-plugin) - Gather meetings information from [Big Blue Button](https://bigbluebutton.org/) server +- [dnsmasq](https://github.com/machinly/dnsmasq-telegraf-plugin) - Gather dnsmasq statistics from dnsmasq +- [ldap_org and ds389](https://github.com/falon/CSI-telegraf-plugins) - Gather statistics from 389ds and from LDAP trees. +- [x509_crl](https://github.com/jcgonnard/telegraf-input-x590crl) - Gather information from your X509 CRL files +- [s7comm](https://github.com/nicolasme/s7comm) - Gather information from Siemens PLC +- [net_irtt](https://github.com/iAnatoly/telegraf-input-net_irtt) - Gather information from IRTT network test +- [dht_sensor](https://github.com/iAnatoly/telegraf-input-dht_sensor) - Gather temperature and humidity from DHTXX sensors +- [oracle](https://github.com/bonitoo-io/telegraf-input-oracle) - Gather the statistic data from Oracle RDBMS +- [db2](https://github.com/bonitoo-io/telegraf-input-db2) - Gather the statistic data from DB2 RDBMS +- [apt](https://github.com/x70b1/telegraf-apt) - Check Debian for package updates. +- [knot](https://github.com/x70b1/telegraf-knot) - Collect stats from Knot DNS. +- [fritzbox](https://github.com/hdecarne-github/fritzbox-telegraf-plugin) - Gather statistics from [FRITZ!Box](https://avm.de/produkte/fritzbox/) router and repeater +- [linux-psi-telegraf-plugin](https://github.com/gridscale/linux-psi-telegraf-plugin) - Gather pressure stall information ([PSI](https://facebookmicrosites.github.io/psi/)) from the Linux Kernel +- [huebridge](https://github.com/hdecarne-github/huebridge-telegraf-plugin) - Gather smart home statistics from [Hue Bridge](https://www.philips-hue.com/) devices + +## Outputs + +- [kinesis](https://github.com/morfien101/telegraf-output-kinesis) - Aggregation and compression of metrics to send Amazon Kinesis. + +## Processors + +- [geoip](https://github.com/a-bali/telegraf-geoip) - Add GeoIP information to IP addresses. +- [metadata](https://github.com/lawdt/metadata) - Appends metadata gathered from Openstack to metrics. diff --git a/Makefile b/Makefile index eebd15c30bffc..958006a392683 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,11 @@ -next_version := $(shell cat build_version.txt) -tag := $(shell git describe --exact-match --tags 2>git_describe_error.tmp; rm -f git_describe_error.tmp) +ifeq ($(OS),Windows_NT) + next_version := $(shell type build_version.txt) + tag := $(shell git describe --exact-match --tags 2> nul) +else + next_version := $(shell cat build_version.txt) + tag := $(shell git describe --exact-match --tags 2>/dev/null) +endif + branch := $(shell git rev-parse --abbrev-ref HEAD) commit := $(shell git rev-parse --short=8 HEAD) glibc_version := 2.17 @@ -44,6 +50,8 @@ HOSTGO := env -u GOOS -u GOARCH -u GOARM -- go LDFLAGS := $(LDFLAGS) -X main.commit=$(commit) -X main.branch=$(branch) -X main.goos=$(GOOS) -X main.goarch=$(GOARCH) ifneq ($(tag),) LDFLAGS += -X main.version=$(version) +else + LDFLAGS += -X main.version=$(version)-$(commit) endif # Go built-in race detector works only for 64 bits architectures. @@ -62,33 +70,66 @@ localstatedir ?= $(prefix)/var pkgdir ?= build/dist .PHONY: all -all: - @$(MAKE) deps - @$(MAKE) telegraf +all: deps docs telegraf .PHONY: help help: @echo 'Targets:' - @echo ' all - download dependencies and compile telegraf binary' - @echo ' deps - download dependencies' - @echo ' telegraf - compile telegraf binary' - @echo ' test - run short unit tests' - @echo ' fmt - format source files' - @echo ' tidy - tidy go modules' - @echo ' check-deps - check docs/LICENSE_OF_DEPENDENCIES.md' - @echo ' clean - delete build artifacts' + @echo ' all - download dependencies and compile telegraf binary' + @echo ' deps - download dependencies' + @echo ' docs - embed sample-configurations into READMEs' + @echo ' telegraf - compile telegraf binary' + @echo ' test - run short unit tests' + @echo ' fmt - format source files' + @echo ' tidy - tidy go modules' + @echo ' lint - run linter' + @echo ' lint-branch - run linter on changes in current branch since master' + @echo ' lint-install - install linter' + @echo ' check-deps - check docs/LICENSE_OF_DEPENDENCIES.md' + @echo ' clean - delete build artifacts' + @echo ' package - build all supported packages, override include_packages to only build a subset' + @echo ' e.g.: make package include_packages="amd64.deb"' + @echo '' + @echo 'Possible values for include_packages variable' + @$(foreach package,$(include_packages),echo " $(package)";) @echo '' - @echo 'Package Targets:' - @$(foreach dist,$(dists),echo " $(dist)";) + @echo 'Resulting package name format (where arch will be the arch of the package):' + @echo ' telegraf_$(deb_version)_arch.deb' + @echo ' telegraf-$(rpm_version).arch.rpm' + @echo ' telegraf-$(tar_version)_arch.tar.gz' + @echo ' telegraf-$(tar_version)_arch.zip' + .PHONY: deps deps: - go mod download + go mod download -x -.PHONY: telegraf -telegraf: +.PHONY: version +version: + @echo $(version)-$(commit) + +.PHONY: versioninfo +versioninfo: + go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@v1.4.0; \ + go run scripts/generate_versioninfo/main.go; \ + go generate cmd/telegraf/telegraf_windows.go; \ + +build_tools: + $(HOSTGO) build -o ./tools/readme_config_includer/generator ./tools/readme_config_includer/generator.go + +embed_readme_%: + go generate -run="readme_config_includer/generator$$" ./plugins/$*/... + +.PHONY: docs +docs: build_tools embed_readme_inputs embed_readme_outputs embed_readme_processors embed_readme_aggregators + +.PHONY: build +build: go build -ldflags "$(LDFLAGS)" ./cmd/telegraf +.PHONY: telegraf +telegraf: build + # Used by dockerfile builds .PHONY: go-install go-install: @@ -98,6 +139,10 @@ go-install: test: go test -short $(race_detector) ./... +.PHONY: test-integration +test-integration: + go test -run Integration $(race_detector) ./... + .PHONY: fmt fmt: @gofmt -s -w $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)) @@ -112,15 +157,6 @@ fmtcheck: exit 1 ;\ fi -.PHONY: test-windows -test-windows: - go test -short $(race_detector) ./plugins/inputs/ping/... - go test -short $(race_detector) ./plugins/inputs/win_perf_counters/... - go test -short $(race_detector) ./plugins/inputs/win_services/... - go test -short $(race_detector) ./plugins/inputs/procstat/... - go test -short $(race_detector) ./plugins/inputs/ntpq/... - go test -short $(race_detector) ./plugins/processors/port_name/... - .PHONY: vet vet: @echo 'go vet $$(go list ./... | grep -v ./plugins/parsers/influx)' @@ -131,18 +167,48 @@ vet: exit 1; \ fi +.PHONY: lint-install +lint-install: + @echo "Installing golangci-lint" + go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.46.2 + + @echo "Installing markdownlint" + npm install -g markdownlint-cli + +.PHONY: lint +lint: + @which golangci-lint >/dev/null 2>&1 || { \ + echo "golangci-lint not found, please run: make lint-install"; \ + exit 1; \ + } + golangci-lint run + + @which markdownlint >/dev/null 2>&1 || { \ + echo "markdownlint not found, please run: make lint-install"; \ + exit 1; \ + } + markdownlint . + +.PHONY: lint-branch +lint-branch: + @which golangci-lint >/dev/null 2>&1 || { \ + echo "golangci-lint not found, please run: make lint-install"; \ + exit 1; \ + } + + golangci-lint run --new-from-rev master + .PHONY: tidy tidy: go mod verify go mod tidy @if ! git diff --quiet go.mod go.sum; then \ - echo "please run go mod tidy and check in changes"; \ + echo "please run go mod tidy and check in changes, you might have to use the same version of Go as the CI"; \ exit 1; \ fi .PHONY: check check: fmtcheck vet - @$(MAKE) --no-print-directory tidy .PHONY: test-all test-all: fmtcheck vet @@ -157,6 +223,10 @@ clean: rm -f telegraf rm -f telegraf.exe rm -rf build + rm -rf tools/readme_config_includer/generator + rm -rf tools/readme_config_includer/generator.exe + rm -rf tools/package_lxd_test/package_lxd_test + rm -rf tools/package_lxd_test/package_lxd_test.exe .PHONY: docker-image docker-image: @@ -165,20 +235,10 @@ docker-image: plugins/parsers/influx/machine.go: plugins/parsers/influx/machine.go.rl ragel -Z -G2 $^ -o $@ -.PHONY: plugin-% -plugin-%: - @echo "Starting dev environment for $${$(@)} input plugin..." - @docker-compose -f plugins/inputs/$${$(@)}/dev/docker-compose.yml up - -.PHONY: ci-1.15 -ci-1.15: - docker build -t quay.io/influxdb/telegraf-ci:1.15.2 - < scripts/ci-1.15.docker - docker push quay.io/influxdb/telegraf-ci:1.15.2 - -.PHONY: ci-1.14 -ci-1.14: - docker build -t quay.io/influxdb/telegraf-ci:1.14.9 - < scripts/ci-1.14.docker - docker push quay.io/influxdb/telegraf-ci:1.14.9 +.PHONY: ci +ci: + docker build -t quay.io/influxdb/telegraf-ci:1.18.3 - < scripts/ci.docker + docker push quay.io/influxdb/telegraf-ci:1.18.3 .PHONY: install install: $(buildbin) @@ -201,190 +261,196 @@ install: $(buildbin) # the bin between deb/rpm/tar packages over building directly into the package # directory. $(buildbin): + echo $(GOOS) @mkdir -pv $(dir $@) go build -o $(dir $@) -ldflags "$(LDFLAGS)" ./cmd/telegraf -debs := telegraf_$(deb_version)_amd64.deb -debs += telegraf_$(deb_version)_arm64.deb -debs += telegraf_$(deb_version)_armel.deb -debs += telegraf_$(deb_version)_armhf.deb -debs += telegraf_$(deb_version)_i386.deb -debs += telegraf_$(deb_version)_mips.deb -debs += telegraf_$(deb_version)_mipsel.deb -debs += telegraf_$(deb_version)_s390x.deb - -rpms += telegraf-$(rpm_version).aarch64.rpm -rpms += telegraf-$(rpm_version).armel.rpm -rpms += telegraf-$(rpm_version).armv6hl.rpm -rpms += telegraf-$(rpm_version).i386.rpm -rpms += telegraf-$(rpm_version).s390x.rpm -rpms += telegraf-$(rpm_version).x86_64.rpm - -tars += telegraf-$(tar_version)_darwin_amd64.tar.gz -tars += telegraf-$(tar_version)_freebsd_amd64.tar.gz -tars += telegraf-$(tar_version)_freebsd_i386.tar.gz -tars += telegraf-$(tar_version)_linux_amd64.tar.gz -tars += telegraf-$(tar_version)_linux_arm64.tar.gz -tars += telegraf-$(tar_version)_linux_armel.tar.gz -tars += telegraf-$(tar_version)_linux_armhf.tar.gz -tars += telegraf-$(tar_version)_linux_i386.tar.gz -tars += telegraf-$(tar_version)_linux_mips.tar.gz -tars += telegraf-$(tar_version)_linux_mipsel.tar.gz -tars += telegraf-$(tar_version)_linux_s390x.tar.gz -tars += telegraf-$(tar_version)_static_linux_amd64.tar.gz - -zips += telegraf-$(tar_version)_windows_amd64.zip -zips += telegraf-$(tar_version)_windows_i386.zip - -dists := $(debs) $(rpms) $(tars) $(zips) +# Define packages Telegraf supports, organized by architecture with a rule to echo the list to limit include_packages +# e.g. make package include_packages="$(make amd64)" +mips += linux_mips.tar.gz mips.deb +.PHONY: mips +mips: + @ echo $(mips) +mipsel += mipsel.deb linux_mipsel.tar.gz +.PHONY: mipsel +mipsel: + @ echo $(mipsel) +arm64 += linux_arm64.tar.gz arm64.deb aarch64.rpm +.PHONY: arm64 +arm64: + @ echo $(arm64) +amd64 += freebsd_amd64.tar.gz linux_amd64.tar.gz amd64.deb x86_64.rpm +.PHONY: amd64 +amd64: + @ echo $(amd64) +static += static_linux_amd64.tar.gz +.PHONY: static +static: + @ echo $(static) +armel += linux_armel.tar.gz armel.rpm armel.deb +.PHONY: armel +armel: + @ echo $(armel) +armhf += linux_armhf.tar.gz freebsd_armv7.tar.gz armhf.deb armv6hl.rpm +.PHONY: armhf +armhf: + @ echo $(armhf) +s390x += linux_s390x.tar.gz s390x.deb s390x.rpm +.PHONY: riscv64 +riscv64: + @ echo $(riscv64) +riscv64 += linux_riscv64.tar.gz riscv64.rpm riscv64.deb +.PHONY: s390x +s390x: + @ echo $(s390x) +ppc64le += linux_ppc64le.tar.gz ppc64le.rpm ppc64el.deb +.PHONY: ppc64le +ppc64le: + @ echo $(ppc64le) +i386 += freebsd_i386.tar.gz i386.deb linux_i386.tar.gz i386.rpm +.PHONY: i386 +i386: + @ echo $(i386) +windows += windows_i386.zip windows_amd64.zip +.PHONY: windows +windows: + @ echo $(windows) +darwin-amd64 += darwin_amd64.tar.gz +.PHONY: darwin-amd64 +darwin-amd64: + @ echo $(darwin-amd64) + +darwin-arm64 += darwin_arm64.tar.gz +.PHONY: darwin-arm64 +darwin-arm64: + @ echo $(darwin-arm64) + +include_packages := $(mips) $(mipsel) $(arm64) $(amd64) $(static) $(armel) $(armhf) $(riscv64) $(s390x) $(ppc64le) $(i386) $(windows) $(darwin-amd64) $(darwin-arm64) .PHONY: package -package: $(dists) - -rpm_amd64 := amd64 -rpm_386 := i386 -rpm_s390x := s390x -rpm_arm5 := armel -rpm_arm6 := armv6hl -rpm_arm647 := aarch64 -rpm_arch = $(rpm_$(GOARCH)$(GOARM)) - -.PHONY: $(rpms) -$(rpms): - @$(MAKE) install - @mkdir -p $(pkgdir) - fpm --force \ - --log info \ - --architecture $(rpm_arch) \ - --input-type dir \ - --output-type rpm \ - --vendor InfluxData \ - --url https://github.com/influxdata/telegraf \ - --license MIT \ - --maintainer support@influxdb.com \ - --config-files /etc/telegraf/telegraf.conf \ - --config-files /etc/logrotate.d/telegraf \ - --after-install scripts/rpm/post-install.sh \ - --before-install scripts/rpm/pre-install.sh \ - --after-remove scripts/rpm/post-remove.sh \ - --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ - --depends coreutils \ - --depends shadow-utils \ - --rpm-posttrans scripts/rpm/post-install.sh \ - --name telegraf \ - --version $(version) \ - --iteration $(rpm_iteration) \ - --chdir $(DESTDIR) \ - --package $(pkgdir)/$@ - -deb_amd64 := amd64 -deb_386 := i386 -deb_s390x := s390x -deb_arm5 := armel -deb_arm6 := armhf -deb_arm647 := arm64 -deb_mips := mips -deb_mipsle := mipsel -deb_arch = $(deb_$(GOARCH)$(GOARM)) - -.PHONY: $(debs) -$(debs): - @$(MAKE) install - @mkdir -pv $(pkgdir) - fpm --force \ - --log info \ - --architecture $(deb_arch) \ - --input-type dir \ - --output-type deb \ - --vendor InfluxData \ - --url https://github.com/influxdata/telegraf \ - --license MIT \ - --maintainer support@influxdb.com \ - --config-files /etc/telegraf/telegraf.conf.sample \ - --config-files /etc/logrotate.d/telegraf \ - --after-install scripts/deb/post-install.sh \ - --before-install scripts/deb/pre-install.sh \ - --after-remove scripts/deb/post-remove.sh \ - --before-remove scripts/deb/pre-remove.sh \ - --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ - --name telegraf \ - --version $(version) \ - --iteration $(deb_iteration) \ - --chdir $(DESTDIR) \ - --package $(pkgdir)/$@ - -.PHONY: $(zips) -$(zips): - @$(MAKE) install - @mkdir -p $(pkgdir) - (cd $(dir $(DESTDIR)) && zip -r - ./*) > $(pkgdir)/$@ +package: docs $(include_packages) -.PHONY: $(tars) -$(tars): +.PHONY: $(include_packages) +$(include_packages): @$(MAKE) install @mkdir -p $(pkgdir) - tar --owner 0 --group 0 -czvf $(pkgdir)/$@ -C $(dir $(DESTDIR)) . -.PHONY: upload-nightly -upload-nightly: - aws s3 sync $(pkgdir) s3://dl.influxdata.com/telegraf/nightlies/ \ - --exclude "*" \ - --include "*.tar.gz" \ - --include "*.deb" \ - --include "*.rpm" \ - --include "*.zip" \ - --acl public-read + @if [ "$(suffix $@)" = ".rpm" ]; then \ + fpm --force \ + --log info \ + --architecture $(basename $@) \ + --input-type dir \ + --output-type rpm \ + --vendor InfluxData \ + --url https://github.com/influxdata/telegraf \ + --license MIT \ + --maintainer support@influxdb.com \ + --config-files /etc/telegraf/telegraf.conf \ + --config-files /etc/logrotate.d/telegraf \ + --after-install scripts/rpm/post-install.sh \ + --before-install scripts/rpm/pre-install.sh \ + --after-remove scripts/rpm/post-remove.sh \ + --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ + --depends coreutils \ + --depends shadow-utils \ + --rpm-digest sha256 \ + --rpm-posttrans scripts/rpm/post-install.sh \ + --name telegraf \ + --version $(version) \ + --iteration $(rpm_iteration) \ + --chdir $(DESTDIR) \ + --package $(pkgdir)/telegraf-$(rpm_version).$@ ;\ + elif [ "$(suffix $@)" = ".deb" ]; then \ + fpm --force \ + --log info \ + --architecture $(basename $@) \ + --input-type dir \ + --output-type deb \ + --vendor InfluxData \ + --url https://github.com/influxdata/telegraf \ + --license MIT \ + --maintainer support@influxdb.com \ + --config-files /etc/telegraf/telegraf.conf.sample \ + --config-files /etc/logrotate.d/telegraf \ + --after-install scripts/deb/post-install.sh \ + --before-install scripts/deb/pre-install.sh \ + --after-remove scripts/deb/post-remove.sh \ + --before-remove scripts/deb/pre-remove.sh \ + --description "Plugin-driven server agent for reporting metrics into InfluxDB." \ + --name telegraf \ + --version $(version) \ + --iteration $(deb_iteration) \ + --chdir $(DESTDIR) \ + --package $(pkgdir)/telegraf_$(deb_version)_$@ ;\ + elif [ "$(suffix $@)" = ".zip" ]; then \ + (cd $(dir $(DESTDIR)) && zip -r - ./*) > $(pkgdir)/telegraf-$(tar_version)_$@ ;\ + elif [ "$(suffix $@)" = ".gz" ]; then \ + tar --owner 0 --group 0 -czvf $(pkgdir)/telegraf-$(tar_version)_$@ -C $(dir $(DESTDIR)) . ;\ + fi + +amd64.deb x86_64.rpm linux_amd64.tar.gz: export GOOS := linux +amd64.deb x86_64.rpm linux_amd64.tar.gz: export GOARCH := amd64 + +static_linux_amd64.tar.gz: export cgo := -nocgo +static_linux_amd64.tar.gz: export CGO_ENABLED := 0 + +i386.deb i386.rpm linux_i386.tar.gz: export GOOS := linux +i386.deb i386.rpm linux_i386.tar.gz: export GOARCH := 386 + +armel.deb armel.rpm linux_armel.tar.gz: export GOOS := linux +armel.deb armel.rpm linux_armel.tar.gz: export GOARCH := arm +armel.deb armel.rpm linux_armel.tar.gz: export GOARM := 5 -%amd64.deb %x86_64.rpm %linux_amd64.tar.gz: export GOOS := linux -%amd64.deb %x86_64.rpm %linux_amd64.tar.gz: export GOARCH := amd64 +armhf.deb armv6hl.rpm linux_armhf.tar.gz: export GOOS := linux +armhf.deb armv6hl.rpm linux_armhf.tar.gz: export GOARCH := arm +armhf.deb armv6hl.rpm linux_armhf.tar.gz: export GOARM := 6 -%static_linux_amd64.tar.gz: export cgo := -nocgo -%static_linux_amd64.tar.gz: export CGO_ENABLED := 0 +arm64.deb aarch64.rpm linux_arm64.tar.gz: export GOOS := linux +arm64.deb aarch64.rpm linux_arm64.tar.gz: export GOARCH := arm64 +arm64.deb aarch64.rpm linux_arm64.tar.gz: export GOARM := 7 -%i386.deb %i386.rpm %linux_i386.tar.gz: export GOOS := linux -%i386.deb %i386.rpm %linux_i386.tar.gz: export GOARCH := 386 +mips.deb linux_mips.tar.gz: export GOOS := linux +mips.deb linux_mips.tar.gz: export GOARCH := mips -%armel.deb %armel.rpm %linux_armel.tar.gz: export GOOS := linux -%armel.deb %armel.rpm %linux_armel.tar.gz: export GOARCH := arm -%armel.deb %armel.rpm %linux_armel.tar.gz: export GOARM := 5 +mipsel.deb linux_mipsel.tar.gz: export GOOS := linux +mipsel.deb linux_mipsel.tar.gz: export GOARCH := mipsle -%armhf.deb %armv6hl.rpm %linux_armhf.tar.gz: export GOOS := linux -%armhf.deb %armv6hl.rpm %linux_armhf.tar.gz: export GOARCH := arm -%armhf.deb %armv6hl.rpm %linux_armhf.tar.gz: export GOARM := 6 +riscv64.deb riscv64.rpm linux_riscv64.tar.gz: export GOOS := linux +riscv64.deb riscv64.rpm linux_riscv64.tar.gz: export GOARCH := riscv64 -%arm64.deb %aarch64.rpm %linux_arm64.tar.gz: export GOOS := linux -%arm64.deb %aarch64.rpm %linux_arm64.tar.gz: export GOARCH := arm64 -%arm64.deb %aarch64.rpm %linux_arm64.tar.gz: export GOARM := 7 +s390x.deb s390x.rpm linux_s390x.tar.gz: export GOOS := linux +s390x.deb s390x.rpm linux_s390x.tar.gz: export GOARCH := s390x -%mips.deb %linux_mips.tar.gz: export GOOS := linux -%mips.deb %linux_mips.tar.gz: export GOARCH := mips +ppc64el.deb ppc64le.rpm linux_ppc64le.tar.gz: export GOOS := linux +ppc64el.deb ppc64le.rpm linux_ppc64le.tar.gz: export GOARCH := ppc64le -%mipsel.deb %linux_mipsel.tar.gz: export GOOS := linux -%mipsel.deb %linux_mipsel.tar.gz: export GOARCH := mipsle +freebsd_amd64.tar.gz: export GOOS := freebsd +freebsd_amd64.tar.gz: export GOARCH := amd64 -%s390x.deb %s390x.rpm %linux_s390x.tar.gz: export GOOS := linux -%s390x.deb %s390x.rpm %linux_s390x.tar.gz: export GOARCH := s390x +freebsd_i386.tar.gz: export GOOS := freebsd +freebsd_i386.tar.gz: export GOARCH := 386 -%freebsd_amd64.tar.gz: export GOOS := freebsd -%freebsd_amd64.tar.gz: export GOARCH := amd64 +freebsd_armv7.tar.gz: export GOOS := freebsd +freebsd_armv7.tar.gz: export GOARCH := arm +freebsd_armv7.tar.gz: export GOARM := 7 -%freebsd_i386.tar.gz: export GOOS := freebsd -%freebsd_i386.tar.gz: export GOARCH := 386 +windows_amd64.zip: export GOOS := windows +windows_amd64.zip: export GOARCH := amd64 -%windows_amd64.zip: export GOOS := windows -%windows_amd64.zip: export GOARCH := amd64 +darwin_amd64.tar.gz: export GOOS := darwin +darwin_amd64.tar.gz: export GOARCH := amd64 -%darwin_amd64.tar.gz: export GOOS := darwin -%darwin_amd64.tar.gz: export GOARCH := amd64 +darwin_arm64.tar.gz: export GOOS := darwin +darwin_arm64.tar.gz: export GOARCH := arm64 -%windows_i386.zip: export GOOS := windows -%windows_i386.zip: export GOARCH := 386 +windows_i386.zip: export GOOS := windows +windows_i386.zip: export GOARCH := 386 -%windows_i386.zip %windows_amd64.zip: export prefix = -%windows_i386.zip %windows_amd64.zip: export bindir = $(prefix) -%windows_i386.zip %windows_amd64.zip: export sysconfdir = $(prefix) -%windows_i386.zip %windows_amd64.zip: export localstatedir = $(prefix) -%windows_i386.zip %windows_amd64.zip: export EXEEXT := .exe +windows_i386.zip windows_amd64.zip: export prefix = +windows_i386.zip windows_amd64.zip: export bindir = $(prefix) +windows_i386.zip windows_amd64.zip: export sysconfdir = $(prefix) +windows_i386.zip windows_amd64.zip: export localstatedir = $(prefix) +windows_i386.zip windows_amd64.zip: export EXEEXT := .exe %.deb: export pkg := deb %.deb: export prefix := /usr diff --git a/README.md b/README.md index ca969132d5eb2..4344ba7727dba 100644 --- a/README.md +++ b/README.md @@ -1,39 +1,27 @@ -# Telegraf [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/) -[![Slack Status](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://www.influxdata.com/slack) -Telegraf is an agent for collecting, processing, aggregating, and writing metrics. +# Telegraf -Design goals are to have a minimal memory footprint with a plugin system so -that developers in the community can easily add support for collecting -metrics. +![tiger](assets/TelegrafTiger.png "tiger") -Telegraf is plugin-driven and has the concept of 4 distinct plugin types: +[![Contribute](https://img.shields.io/badge/Contribute%20To%20Telegraf-orange.svg?logo=influx&style=for-the-badge)](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md) [![Slack Status](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=for-the-badge)](https://www.influxdata.com/slack) [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![GoDoc](https://godoc.org/github.com/influxdata/telegraf?status.svg)](https://godoc.org/github.com/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/) -1. [Input Plugins](#input-plugins) collect metrics from the system, services, or 3rd party APIs -2. [Processor Plugins](#processor-plugins) transform, decorate, and/or filter metrics -3. [Aggregator Plugins](#aggregator-plugins) create aggregate metrics (e.g. mean, min, max, quantiles, etc.) -4. [Output Plugins](#output-plugins) write metrics to various destinations +Telegraf is an agent for collecting, processing, aggregating, and writing metrics. Based on a +plugin system to enable developers in the community to easily add support for additional +metric collection. There are four distinct types of plugins: -New plugins are designed to be easy to contribute, pull requests are welcomed -and we work to incorporate as many pull requests as possible. +1. [Input Plugins](/docs/INPUTS.md) collect metrics from the system, services, or 3rd party APIs +2. [Processor Plugins](/docs/PROCESSORS.md) transform, decorate, and/or filter metrics +3. [Aggregator Plugins](/docs/AGGREGATORS.md) create aggregate metrics (e.g. mean, min, max, quantiles, etc.) +4. [Output Plugins](/docs/OUTPUTS.md) write metrics to various destinations -## Try in Browser :rocket: - -You can try Telegraf right in your browser in the [Telegraf playground](https://rootnroll.com/d/telegraf/). - -## Contributing - -There are many ways to contribute: -- Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new) -- [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation) -- [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls) -- Answer questions and discuss here on github and on the [Community Site](https://community.influxdata.com/) -- [Contribute plugins](CONTRIBUTING.md) -- [Contribute external plugins](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/execd/shim) *(experimental)* +New plugins are designed to be easy to contribute, pull requests are welcomed, and we work to +incorporate as many pull requests as possible. Consider looking at the +[list of external plugins](EXTERNAL_PLUGINS.md) as well. ## Minimum Requirements Telegraf shares the same [minimum requirements][] as Go: + - Linux kernel version 2.6.23 or later - Windows 7 or later - FreeBSD 11.2 or later @@ -41,410 +29,130 @@ Telegraf shares the same [minimum requirements][] as Go: [minimum requirements]: https://github.com/golang/go/wiki/MinimumRequirements#minimum-requirements -## Installation: +## Obtaining Telegraf + +View the [changelog](/CHANGELOG.md) for the latest updates and changes by version. + +### Binary Downloads + +Binary downloads are available from the [InfluxData downloads](https://www.influxdata.com/downloads) +page or from each [GitHub Releases](https://github.com/influxdata/telegraf/releases) page. + +### Package Repository -You can download the binaries directly from the [downloads](https://www.influxdata.com/downloads) page -or from the [releases](https://github.com/influxdata/telegraf/releases) section. +InfluxData also provides a package repo that contains both DEB and RPM downloads. -### Ansible Role: +For deb-based platforms (e.g. Ubuntu and Debian) run the following to add the +repo key and setup a new sources.list entry: -Ansible role: https://github.com/rossmcdonald/telegraf +```shell +# influxdb.key GPG Fingerprint: 05CE15085FC09D18E99EFB22684A14CF2582E0C5 +wget -q https://repos.influxdata.com/influxdb.key +echo '23a1c8836f0afc5ed24e0486339d7cc8f6790b83886c4c96995b88a061c5bb5d influxdb.key' | sha256sum -c && cat influxdb.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdb.gpg > /dev/null +echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdb.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list +sudo apt-get update && sudo apt-get install telegraf +``` + +For RPM-based platforms (e.g. RHEL, CentOS) use the following to create a repo +file and install telegraf: + +```shell +# influxdb.key GPG Fingerprint: 05CE15085FC09D18E99EFB22684A14CF2582E0C5 +cat <=1.13 (1.15 recommended) +1. [Install Go](https://golang.org/doc/install) >=1.18 (1.18.0 recommended) 2. Clone the Telegraf repository: - ``` - cd ~/src + + ```shell git clone https://github.com/influxdata/telegraf.git ``` + 3. Run `make` from the source directory - ``` - cd ~/src/telegraf + + ```shell + cd telegraf make ``` -### Changelog +### Nightly Builds -View the [changelog](/CHANGELOG.md) for the latest updates and changes by -version. +[Nightly](/docs/NIGHTLIES.md) builds are available, generated from the master branch. -### Nightly Builds +### 3rd Party Builds + +Builds for other platforms or package formats are provided by members of theTelegraf community. +These packages are not built, tested, or supported by the Telegraf project or InfluxData. Please +get in touch with the package author if support is needed: + +- [Ansible Role](https://github.com/rossmcdonald/telegraf) +- [Chocolatey](https://chocolatey.org/packages/telegraf) by [ripclawffb](https://chocolatey.org/profiles/ripclawffb) +- [Scoop](https://github.com/ScoopInstaller/Main/blob/master/bucket/telegraf.json) +- [Snap](https://snapcraft.io/telegraf) by Laurent Sesquès (sajoupa) -These builds are generated from the master branch: -- [telegraf-nightly_darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz) -- [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb) -- [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb) -- [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm) -- [telegraf_nightly_armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb) -- [telegraf-nightly.armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm) -- [telegraf_nightly_armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb) -- [telegraf-nightly.armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm) -- [telegraf-nightly_freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz) -- [telegraf-nightly_freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz) -- [telegraf_nightly_i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb) -- [telegraf-nightly.i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm) -- [telegraf-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz) -- [telegraf-nightly_linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz) -- [telegraf-nightly_linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz) -- [telegraf-nightly_linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz) -- [telegraf-nightly_linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz) -- [telegraf-nightly_linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz) -- [telegraf_nightly_s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb) -- [telegraf-nightly.s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm) -- [telegraf-nightly_windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip) -- [telegraf-nightly_windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip) -- [telegraf-nightly.x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm) -- [telegraf-static-nightly_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-static-nightly_linux_amd64.tar.gz) - -## How to use it: +## Getting Started See usage with: -``` +```shell telegraf --help ``` -#### Generate a telegraf config file: +### Generate a telegraf config file -``` +```shell telegraf config > telegraf.conf ``` -#### Generate config with only cpu input & influxdb output plugins defined: +### Generate config with only cpu input & influxdb output plugins defined -``` -telegraf --section-filter agent:inputs:outputs --input-filter cpu --output-filter influxdb config +```shell +telegraf config --section-filter agent:inputs:outputs --input-filter cpu --output-filter influxdb ``` -#### Run a single telegraf collection, outputting metrics to stdout: +### Run a single telegraf collection, outputting metrics to stdout -``` +```shell telegraf --config telegraf.conf --test ``` -#### Run telegraf with all plugins defined in config file: +### Run telegraf with all plugins defined in config file -``` +```shell telegraf --config telegraf.conf ``` -#### Run telegraf, enabling the cpu & memory input, and influxdb output plugins: +### Run telegraf, enabling the cpu & memory input, and influxdb output plugins -``` +```shell telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb ``` +## Contribute to the Project + +Telegraf is an MIT licensed open source project and we love our community. The fastest way to get something fixed is to open a PR. Check out our [contributing guide](CONTRIBUTING.md) if you're interested in helping out. Also, join us on our [Community Slack](https://influxdata.com/slack) or [Community Page](https://community.influxdata.com/) if you have questions or comments for our engineering teams. + +If your completely new to Telegraf and InfluxDB, you can also enroll for free at [InfluxDB university](https://www.influxdata.com/university/) to take courses to learn more. + ## Documentation -[Latest Release Documentation][release docs]. - -For documentation on the latest development code see the [documentation index][devel docs]. - -[release docs]: https://docs.influxdata.com/telegraf -[devel docs]: docs - -## Input Plugins - -* [activemq](./plugins/inputs/activemq) -* [aerospike](./plugins/inputs/aerospike) -* [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq) -* [apache](./plugins/inputs/apache) -* [apcupsd](./plugins/inputs/apcupsd) -* [aurora](./plugins/inputs/aurora) -* [aws cloudwatch](./plugins/inputs/cloudwatch) (Amazon Cloudwatch) -* [azure_storage_queue](./plugins/inputs/azure_storage_queue) -* [bcache](./plugins/inputs/bcache) -* [beanstalkd](./plugins/inputs/beanstalkd) -* [bind](./plugins/inputs/bind) -* [bond](./plugins/inputs/bond) -* [burrow](./plugins/inputs/burrow) -* [cassandra](./plugins/inputs/cassandra) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) -* [ceph](./plugins/inputs/ceph) -* [cgroup](./plugins/inputs/cgroup) -* [chrony](./plugins/inputs/chrony) -* [cisco_telemetry_gnmi](./plugins/inputs/cisco_telemetry_gnmi) (deprecated, renamed to [gnmi](/plugins/inputs/gnmi)) -* [cisco_telemetry_mdt](./plugins/inputs/cisco_telemetry_mdt) -* [clickhouse](./plugins/inputs/clickhouse) -* [cloud_pubsub](./plugins/inputs/cloud_pubsub) Google Cloud Pub/Sub -* [cloud_pubsub_push](./plugins/inputs/cloud_pubsub_push) Google Cloud Pub/Sub push endpoint -* [conntrack](./plugins/inputs/conntrack) -* [consul](./plugins/inputs/consul) -* [couchbase](./plugins/inputs/couchbase) -* [couchdb](./plugins/inputs/couchdb) -* [cpu](./plugins/inputs/cpu) -* [DC/OS](./plugins/inputs/dcos) -* [diskio](./plugins/inputs/diskio) -* [disk](./plugins/inputs/disk) -* [disque](./plugins/inputs/disque) -* [dmcache](./plugins/inputs/dmcache) -* [dns query time](./plugins/inputs/dns_query) -* [docker](./plugins/inputs/docker) -* [docker_log](./plugins/inputs/docker_log) -* [dovecot](./plugins/inputs/dovecot) -* [aws ecs](./plugins/inputs/ecs) (Amazon Elastic Container Service, Fargate) -* [elasticsearch](./plugins/inputs/elasticsearch) -* [ethtool](./plugins/inputs/ethtool) -* [eventhub_consumer](./plugins/inputs/eventhub_consumer) (Azure Event Hubs \& Azure IoT Hub) -* [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios) -* [execd](./plugins/inputs/execd) (generic executable "daemon" processes) -* [fail2ban](./plugins/inputs/fail2ban) -* [fibaro](./plugins/inputs/fibaro) -* [file](./plugins/inputs/file) -* [filestat](./plugins/inputs/filestat) -* [filecount](./plugins/inputs/filecount) -* [fireboard](/plugins/inputs/fireboard) -* [fluentd](./plugins/inputs/fluentd) -* [github](./plugins/inputs/github) -* [gnmi](./plugins/inputs/gnmi) -* [graylog](./plugins/inputs/graylog) -* [haproxy](./plugins/inputs/haproxy) -* [hddtemp](./plugins/inputs/hddtemp) -* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin) -* [http_listener](./plugins/inputs/influxdb_listener) (deprecated, renamed to [influxdb_listener](/plugins/inputs/influxdb_listener)) -* [http_listener_v2](./plugins/inputs/http_listener_v2) -* [http](./plugins/inputs/http) (generic HTTP plugin, supports using input data formats) -* [http_response](./plugins/inputs/http_response) -* [icinga2](./plugins/inputs/icinga2) -* [infiniband](./plugins/inputs/infiniband) -* [influxdb](./plugins/inputs/influxdb) -* [influxdb_listener](./plugins/inputs/influxdb_listener) -* [influxdb_v2_listener](./plugins/inputs/influxdb_v2_listener) -* [intel_rdt](./plugins/inputs/intel_rdt) -* [internal](./plugins/inputs/internal) -* [interrupts](./plugins/inputs/interrupts) -* [ipmi_sensor](./plugins/inputs/ipmi_sensor) -* [ipset](./plugins/inputs/ipset) -* [iptables](./plugins/inputs/iptables) -* [ipvs](./plugins/inputs/ipvs) -* [jenkins](./plugins/inputs/jenkins) -* [jolokia2](./plugins/inputs/jolokia2) (java, cassandra, kafka) -* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) -* [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry) -* [kafka_consumer](./plugins/inputs/kafka_consumer) -* [kapacitor](./plugins/inputs/kapacitor) -* [aws kinesis](./plugins/inputs/kinesis_consumer) (Amazon Kinesis) -* [kernel](./plugins/inputs/kernel) -* [kernel_vmstat](./plugins/inputs/kernel_vmstat) -* [kibana](./plugins/inputs/kibana) -* [kubernetes](./plugins/inputs/kubernetes) -* [kube_inventory](./plugins/inputs/kube_inventory) -* [lanz](./plugins/inputs/lanz) -* [leofs](./plugins/inputs/leofs) -* [linux_sysctl_fs](./plugins/inputs/linux_sysctl_fs) -* [logparser](./plugins/inputs/logparser) (deprecated, use [tail](/plugins/inputs/tail)) -* [logstash](./plugins/inputs/logstash) -* [lustre2](./plugins/inputs/lustre2) -* [mailchimp](./plugins/inputs/mailchimp) -* [marklogic](./plugins/inputs/marklogic) -* [mcrouter](./plugins/inputs/mcrouter) -* [memcached](./plugins/inputs/memcached) -* [mem](./plugins/inputs/mem) -* [mesos](./plugins/inputs/mesos) -* [minecraft](./plugins/inputs/minecraft) -* [modbus](./plugins/inputs/modbus) -* [mongodb](./plugins/inputs/mongodb) -* [monit](./plugins/inputs/monit) -* [mqtt_consumer](./plugins/inputs/mqtt_consumer) -* [multifile](./plugins/inputs/multifile) -* [mysql](./plugins/inputs/mysql) -* [nats_consumer](./plugins/inputs/nats_consumer) -* [nats](./plugins/inputs/nats) -* [neptune_apex](./plugins/inputs/neptune_apex) -* [net](./plugins/inputs/net) -* [net_response](./plugins/inputs/net_response) -* [netstat](./plugins/inputs/net) -* [nginx](./plugins/inputs/nginx) -* [nginx_plus_api](./plugins/inputs/nginx_plus_api) -* [nginx_plus](./plugins/inputs/nginx_plus) -* [nginx_sts](./plugins/inputs/nginx_sts) -* [nginx_upstream_check](./plugins/inputs/nginx_upstream_check) -* [nginx_vts](./plugins/inputs/nginx_vts) -* [nsd](./plugins/inputs/nsd) -* [nsq_consumer](./plugins/inputs/nsq_consumer) -* [nsq](./plugins/inputs/nsq) -* [nstat](./plugins/inputs/nstat) -* [ntpq](./plugins/inputs/ntpq) -* [nvidia_smi](./plugins/inputs/nvidia_smi) -* [opcua](./plugins/inputs/opcua) -* [openldap](./plugins/inputs/openldap) -* [openntpd](./plugins/inputs/openntpd) -* [opensmtpd](./plugins/inputs/opensmtpd) -* [openweathermap](./plugins/inputs/openweathermap) -* [pf](./plugins/inputs/pf) -* [pgbouncer](./plugins/inputs/pgbouncer) -* [phpfpm](./plugins/inputs/phpfpm) -* [phusion passenger](./plugins/inputs/passenger) -* [ping](./plugins/inputs/ping) -* [postfix](./plugins/inputs/postfix) -* [postgresql_extensible](./plugins/inputs/postgresql_extensible) -* [postgresql](./plugins/inputs/postgresql) -* [powerdns](./plugins/inputs/powerdns) -* [powerdns_recursor](./plugins/inputs/powerdns_recursor) -* [processes](./plugins/inputs/processes) -* [procstat](./plugins/inputs/procstat) -* [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server)) -* [proxmox](./plugins/inputs/proxmox) -* [puppetagent](./plugins/inputs/puppetagent) -* [rabbitmq](./plugins/inputs/rabbitmq) -* [raindrops](./plugins/inputs/raindrops) -* [ras](./plugins/inputs/ras) -* [redfish](./plugins/inputs/redfish) -* [redis](./plugins/inputs/redis) -* [rethinkdb](./plugins/inputs/rethinkdb) -* [riak](./plugins/inputs/riak) -* [salesforce](./plugins/inputs/salesforce) -* [sensors](./plugins/inputs/sensors) -* [sflow](./plugins/inputs/sflow) -* [smart](./plugins/inputs/smart) -* [snmp_legacy](./plugins/inputs/snmp_legacy) -* [snmp](./plugins/inputs/snmp) -* [snmp_trap](./plugins/inputs/snmp_trap) -* [socket_listener](./plugins/inputs/socket_listener) -* [solr](./plugins/inputs/solr) -* [sql server](./plugins/inputs/sqlserver) (microsoft) -* [stackdriver](./plugins/inputs/stackdriver) (Google Cloud Monitoring) -* [statsd](./plugins/inputs/statsd) -* [suricata](./plugins/inputs/suricata) -* [swap](./plugins/inputs/swap) -* [synproxy](./plugins/inputs/synproxy) -* [syslog](./plugins/inputs/syslog) -* [sysstat](./plugins/inputs/sysstat) -* [systemd_units](./plugins/inputs/systemd_units) -* [system](./plugins/inputs/system) -* [tail](./plugins/inputs/tail) -* [temp](./plugins/inputs/temp) -* [tcp_listener](./plugins/inputs/socket_listener) -* [teamspeak](./plugins/inputs/teamspeak) -* [tengine](./plugins/inputs/tengine) -* [tomcat](./plugins/inputs/tomcat) -* [twemproxy](./plugins/inputs/twemproxy) -* [udp_listener](./plugins/inputs/socket_listener) -* [unbound](./plugins/inputs/unbound) -* [uwsgi](./plugins/inputs/uwsgi) -* [varnish](./plugins/inputs/varnish) -* [vsphere](./plugins/inputs/vsphere) VMware vSphere -* [webhooks](./plugins/inputs/webhooks) - * [filestack](./plugins/inputs/webhooks/filestack) - * [github](./plugins/inputs/webhooks/github) - * [mandrill](./plugins/inputs/webhooks/mandrill) - * [papertrail](./plugins/inputs/webhooks/papertrail) - * [particle](./plugins/inputs/webhooks/particle) - * [rollbar](./plugins/inputs/webhooks/rollbar) -* [win_eventlog](./plugins/inputs/win_eventlog) -* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters) -* [win_services](./plugins/inputs/win_services) -* [wireguard](./plugins/inputs/wireguard) -* [wireless](./plugins/inputs/wireless) -* [x509_cert](./plugins/inputs/x509_cert) -* [zfs](./plugins/inputs/zfs) -* [zipkin](./plugins/inputs/zipkin) -* [zookeeper](./plugins/inputs/zookeeper) - -## Parsers - -- [InfluxDB Line Protocol](/plugins/parsers/influx) -- [Collectd](/plugins/parsers/collectd) -- [CSV](/plugins/parsers/csv) -- [Dropwizard](/plugins/parsers/dropwizard) -- [FormUrlencoded](/plugins/parser/form_urlencoded) -- [Graphite](/plugins/parsers/graphite) -- [Grok](/plugins/parsers/grok) -- [JSON](/plugins/parsers/json) -- [Logfmt](/plugins/parsers/logfmt) -- [Nagios](/plugins/parsers/nagios) -- [Value](/plugins/parsers/value), ie: 45 or "booyah" -- [Wavefront](/plugins/parsers/wavefront) - -## Serializers - -- [InfluxDB Line Protocol](/plugins/serializers/influx) -- [JSON](/plugins/serializers/json) -- [Graphite](/plugins/serializers/graphite) -- [ServiceNow](/plugins/serializers/nowmetric) -- [SplunkMetric](/plugins/serializers/splunkmetric) -- [Carbon2](/plugins/serializers/carbon2) -- [Wavefront](/plugins/serializers/wavefront) - -## Processor Plugins - -* [clone](/plugins/processors/clone) -* [converter](/plugins/processors/converter) -* [date](/plugins/processors/date) -* [dedup](/plugins/processors/dedup) -* [defaults](/plugins/processors/defaults) -* [enum](/plugins/processors/enum) -* [execd](/plugins/processors/execd) -* [ifname](/plugins/processors/ifname) -* [filepath](/plugins/processors/filepath) -* [override](/plugins/processors/override) -* [parser](/plugins/processors/parser) -* [pivot](/plugins/processors/pivot) -* [port_name](/plugins/processors/port_name) -* [printer](/plugins/processors/printer) -* [regex](/plugins/processors/regex) -* [rename](/plugins/processors/rename) -* [reverse_dns](/plugins/processors/reverse_dns) -* [s2geo](/plugins/processors/s2geo) -* [starlark](/plugins/processors/starlark) -* [strings](/plugins/processors/strings) -* [tag_limit](/plugins/processors/tag_limit) -* [template](/plugins/processors/template) -* [topk](/plugins/processors/topk) -* [unpivot](/plugins/processors/unpivot) - -## Aggregator Plugins - -* [basicstats](./plugins/aggregators/basicstats) -* [final](./plugins/aggregators/final) -* [histogram](./plugins/aggregators/histogram) -* [merge](./plugins/aggregators/merge) -* [minmax](./plugins/aggregators/minmax) -* [valuecounter](./plugins/aggregators/valuecounter) - -## Output Plugins - -* [influxdb](./plugins/outputs/influxdb) (InfluxDB 1.x) -* [influxdb_v2](./plugins/outputs/influxdb_v2) ([InfluxDB 2.x](https://github.com/influxdata/influxdb)) -* [amon](./plugins/outputs/amon) -* [amqp](./plugins/outputs/amqp) (rabbitmq) -* [application_insights](./plugins/outputs/application_insights) -* [aws kinesis](./plugins/outputs/kinesis) -* [aws cloudwatch](./plugins/outputs/cloudwatch) -* [azure_monitor](./plugins/outputs/azure_monitor) -* [cloud_pubsub](./plugins/outputs/cloud_pubsub) Google Cloud Pub/Sub -* [cratedb](./plugins/outputs/cratedb) -* [datadog](./plugins/outputs/datadog) -* [discard](./plugins/outputs/discard) -* [dynatrace](./plugins/outputs/dynatrace) -* [elasticsearch](./plugins/outputs/elasticsearch) -* [exec](./plugins/outputs/exec) -* [execd](./plugins/outputs/execd) -* [file](./plugins/outputs/file) -* [graphite](./plugins/outputs/graphite) -* [graylog](./plugins/outputs/graylog) -* [health](./plugins/outputs/health) -* [http](./plugins/outputs/http) -* [instrumental](./plugins/outputs/instrumental) -* [kafka](./plugins/outputs/kafka) -* [librato](./plugins/outputs/librato) -* [logz.io](./plugins/outputs/logzio) -* [mqtt](./plugins/outputs/mqtt) -* [nats](./plugins/outputs/nats) -* [newrelic](./plugins/outputs/newrelic) -* [nsq](./plugins/outputs/nsq) -* [opentsdb](./plugins/outputs/opentsdb) -* [prometheus](./plugins/outputs/prometheus_client) -* [riemann](./plugins/outputs/riemann) -* [riemann_legacy](./plugins/outputs/riemann_legacy) -* [socket_writer](./plugins/outputs/socket_writer) -* [stackdriver](./plugins/outputs/stackdriver) (Google Cloud Monitoring) -* [syslog](./plugins/outputs/syslog) -* [tcp](./plugins/outputs/socket_writer) -* [udp](./plugins/outputs/socket_writer) -* [warp10](./plugins/outputs/warp10) -* [wavefront](./plugins/outputs/wavefront) -* [sumologic](./plugins/outputs/sumologic) -* [yandex_cloud_monitoring](./plugins/outputs/yandex_cloud_monitoring) +[Latest Release Documentation](https://docs.influxdata.com/telegraf/latest/) + +For documentation on the latest development code see the [documentation index](/docs). + +- [Input Plugins](/docs/INPUTS.md) +- [Output Plugins](/docs/OUTPUTS.md) +- [Processor Plugins](/docs/PROCESSORS.md) +- [Aggregator Plugins](/docs/AGGREGATORS.md) diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000000..5b72cf8634467 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,6 @@ +# Security Policy + +## Reporting a Vulnerability + +InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our open source projects, +please responsibly disclose it by contacting security@influxdata.com. More details about security vulnerability reporting, including our GPG key, can be found [here](https://www.influxdata.com/how-to-report-security-vulnerabilities/). diff --git a/accumulator.go b/accumulator.go index 1ea5737a84a99..53ce0642bed02 100644 --- a/accumulator.go +++ b/accumulator.go @@ -41,7 +41,7 @@ type Accumulator interface { // AddMetric adds an metric to the accumulator. AddMetric(Metric) - // SetPrecision sets the timestamp rounding precision. All metrics addeds + // SetPrecision sets the timestamp rounding precision. All metrics // added to the accumulator will have their timestamp rounded to the // nearest multiple of precision. SetPrecision(precision time.Duration) diff --git a/agent/accumulator.go b/agent/accumulator.go index 65000fd98a541..7e3ae9ceebed2 100644 --- a/agent/accumulator.go +++ b/agent/accumulator.go @@ -9,7 +9,7 @@ import ( type MetricMaker interface { LogName() string - MakeMetric(metric telegraf.Metric) telegraf.Metric + MakeMetric(m telegraf.Metric) telegraf.Metric Log() telegraf.Logger } @@ -90,10 +90,7 @@ func (ac *accumulator) addFields( tp telegraf.ValueType, t ...time.Time, ) { - m, err := metric.New(measurement, tags, fields, ac.getTime(t), tp) - if err != nil { - return - } + m := metric.New(measurement, tags, fields, ac.getTime(t), tp) if m := ac.maker.MakeMetric(m); m != nil { ac.metrics <- m } diff --git a/agent/agent.go b/agent/agent.go index e7ffee322ff20..ffa5d6bd06425 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -13,6 +13,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/serializers/influx" ) @@ -23,9 +24,9 @@ type Agent struct { } // NewAgent returns an Agent for the given Config. -func NewAgent(config *config.Config) (*Agent, error) { +func NewAgent(cfg *config.Config) (*Agent, error) { a := &Agent{ - Config: config, + Config: cfg, } return a, nil } @@ -98,8 +99,8 @@ type outputUnit struct { func (a *Agent) Run(ctx context.Context) error { log.Printf("I! [agent] Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+ "Flush Interval:%s", - a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet, - a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration) + time.Duration(a.Config.Agent.Interval), a.Config.Agent.Quiet, + a.Config.Agent.Hostname, time.Duration(a.Config.Agent.FlushInterval)) log.Printf("D! [agent] Initializing plugins") err := a.initPlugins() @@ -126,10 +127,7 @@ func (a *Agent) Run(ctx context.Context) error { } } - next, au, err = a.startAggregators(aggC, next, a.Config.Aggregators) - if err != nil { - return err - } + next, au = a.startAggregators(aggC, next, a.Config.Aggregators) } var pu []*processorUnit @@ -149,29 +147,20 @@ func (a *Agent) Run(ctx context.Context) error { wg.Add(1) go func() { defer wg.Done() - err := a.runOutputs(ou) - if err != nil { - log.Printf("E! [agent] Error running outputs: %v", err) - } + a.runOutputs(ou) }() if au != nil { wg.Add(1) go func() { defer wg.Done() - err := a.runProcessors(apu) - if err != nil { - log.Printf("E! [agent] Error running processors: %v", err) - } + a.runProcessors(apu) }() wg.Add(1) go func() { defer wg.Done() - err := a.runAggregators(startTime, au) - if err != nil { - log.Printf("E! [agent] Error running aggregators: %v", err) - } + a.runAggregators(startTime, au) }() } @@ -179,20 +168,14 @@ func (a *Agent) Run(ctx context.Context) error { wg.Add(1) go func() { defer wg.Done() - err := a.runProcessors(pu) - if err != nil { - log.Printf("E! [agent] Error running processors: %v", err) - } + a.runProcessors(pu) }() } wg.Add(1) go func() { defer wg.Done() - err := a.runInputs(ctx, startTime, iu) - if err != nil { - log.Printf("E! [agent] Error running inputs: %v", err) - } + a.runInputs(ctx, startTime, iu) }() wg.Wait() @@ -204,38 +187,49 @@ func (a *Agent) Run(ctx context.Context) error { // initPlugins runs the Init function on plugins. func (a *Agent) initPlugins() error { for _, input := range a.Config.Inputs { + // Share the snmp translator setting with plugins that need it. + if tp, ok := input.Input.(snmp.TranslatorPlugin); ok { + tp.SetTranslator(a.Config.Agent.SnmpTranslator) + } err := input.Init() if err != nil { return fmt.Errorf("could not initialize input %s: %v", input.LogName(), err) } } + for _, parser := range a.Config.Parsers { + err := parser.Init() + if err != nil { + return fmt.Errorf("could not initialize parser %s::%s: %v", + parser.Config.DataFormat, parser.Config.Parent, err) + } + } for _, processor := range a.Config.Processors { err := processor.Init() if err != nil { return fmt.Errorf("could not initialize processor %s: %v", - processor.Config.Name, err) + processor.LogName(), err) } } for _, aggregator := range a.Config.Aggregators { err := aggregator.Init() if err != nil { return fmt.Errorf("could not initialize aggregator %s: %v", - aggregator.Config.Name, err) + aggregator.LogName(), err) } } for _, processor := range a.Config.AggProcessors { err := processor.Init() if err != nil { return fmt.Errorf("could not initialize processor %s: %v", - processor.Config.Name, err) + processor.LogName(), err) } } for _, output := range a.Config.Outputs { err := output.Init() if err != nil { return fmt.Errorf("could not initialize output %s: %v", - output.Config.Name, err) + output.LogName(), err) } } return nil @@ -288,32 +282,38 @@ func (a *Agent) runInputs( ctx context.Context, startTime time.Time, unit *inputUnit, -) error { +) { var wg sync.WaitGroup for _, input := range unit.inputs { // Overwrite agent interval if this plugin has its own. - interval := a.Config.Agent.Interval.Duration + interval := time.Duration(a.Config.Agent.Interval) if input.Config.Interval != 0 { interval = input.Config.Interval } // Overwrite agent precision if this plugin has its own. - precision := a.Config.Agent.Precision.Duration + precision := time.Duration(a.Config.Agent.Precision) if input.Config.Precision != 0 { precision = input.Config.Precision } // Overwrite agent collection_jitter if this plugin has its own. - jitter := a.Config.Agent.CollectionJitter.Duration + jitter := time.Duration(a.Config.Agent.CollectionJitter) if input.Config.CollectionJitter != 0 { jitter = input.Config.CollectionJitter } + // Overwrite agent collection_offset if this plugin has its own. + offset := time.Duration(a.Config.Agent.CollectionOffset) + if input.Config.CollectionOffset != 0 { + offset = input.Config.CollectionOffset + } + var ticker Ticker if a.Config.Agent.RoundInterval { - ticker = NewAlignedTicker(startTime, interval, jitter) + ticker = NewAlignedTicker(startTime, interval, jitter, offset) } else { - ticker = NewUnalignedTicker(interval, jitter) + ticker = NewUnalignedTicker(interval, jitter, offset) } defer ticker.Stop() @@ -334,8 +334,6 @@ func (a *Agent) runInputs( close(unit.dst) log.Printf("D! [agent] Input channel closed") - - return nil } // testStartInputs is a variation of startInputs for use in --test and --once @@ -344,7 +342,7 @@ func (a *Agent) runInputs( func (a *Agent) testStartInputs( dst chan<- telegraf.Metric, inputs []*models.RunningInput, -) (*inputUnit, error) { +) *inputUnit { log.Printf("D! [agent] Starting service inputs") unit := &inputUnit{ @@ -364,13 +362,12 @@ func (a *Agent) testStartInputs( if err != nil { log.Printf("E! [agent] Starting input %s: %v", input.LogName(), err) } - } unit.inputs = append(unit.inputs, input) } - return unit, nil + return unit } // testRunInputs is a variation of runInputs for use in --test and --once mode. @@ -379,7 +376,7 @@ func (a *Agent) testRunInputs( ctx context.Context, wait time.Duration, unit *inputUnit, -) error { +) { var wg sync.WaitGroup nul := make(chan telegraf.Metric) @@ -394,13 +391,13 @@ func (a *Agent) testRunInputs( defer wg.Done() // Overwrite agent interval if this plugin has its own. - interval := a.Config.Agent.Interval.Duration + interval := time.Duration(a.Config.Agent.Interval) if input.Config.Interval != 0 { interval = input.Config.Interval } // Overwrite agent precision if this plugin has its own. - precision := a.Config.Agent.Precision.Duration + precision := time.Duration(a.Config.Agent.Precision) if input.Config.Precision != 0 { precision = input.Config.Precision } @@ -435,7 +432,6 @@ func (a *Agent) testRunInputs( close(unit.dst) log.Printf("D! [agent] Input channel closed") - return nil } // stopServiceInputs stops all service inputs. @@ -447,6 +443,13 @@ func stopServiceInputs(inputs []*models.RunningInput) { } } +// stopRunningOutputs stops all running outputs. +func stopRunningOutputs(outputs []*models.RunningOutput) { + for _, output := range outputs { + output.Close() + } +} + // gather runs an input's gather function periodically until the context is // done. func (a *Agent) gatherLoop( @@ -547,7 +550,7 @@ func (a *Agent) startProcessors( // closed and all metrics have been written. func (a *Agent) runProcessors( units []*processorUnit, -) error { +) { var wg sync.WaitGroup for _, unit := range units { wg.Add(1) @@ -567,8 +570,6 @@ func (a *Agent) runProcessors( }(unit) } wg.Wait() - - return nil } // startAggregators sets up the aggregator unit and returns the source channel. @@ -576,7 +577,7 @@ func (a *Agent) startAggregators( aggC chan<- telegraf.Metric, outputC chan<- telegraf.Metric, aggregators []*models.RunningAggregator, -) (chan<- telegraf.Metric, *aggregatorUnit, error) { +) (chan<- telegraf.Metric, *aggregatorUnit) { src := make(chan telegraf.Metric, 100) unit := &aggregatorUnit{ src: src, @@ -584,7 +585,7 @@ func (a *Agent) startAggregators( outputC: outputC, aggregators: aggregators, } - return src, unit, nil + return src, unit } // runAggregators beings aggregating metrics and runs until the source channel @@ -592,7 +593,7 @@ func (a *Agent) startAggregators( func (a *Agent) runAggregators( startTime time.Time, unit *aggregatorUnit, -) error { +) { ctx, cancel := context.WithCancel(context.Background()) // Before calling Add, initialize the aggregation window. This ensures @@ -628,8 +629,8 @@ func (a *Agent) runAggregators( go func(agg *models.RunningAggregator) { defer wg.Done() - interval := a.Config.Agent.Interval.Duration - precision := a.Config.Agent.Precision.Duration + interval := time.Duration(a.Config.Agent.Interval) + precision := time.Duration(a.Config.Agent.Precision) acc := NewAccumulator(agg, unit.aggC) acc.SetPrecision(getPrecision(precision, interval)) @@ -644,8 +645,6 @@ func (a *Agent) runAggregators( // processor chain will close the outputC when it finishes processing. close(unit.aggC) log.Printf("D! [agent] Aggregator channel closed") - - return nil } func updateWindow(start time.Time, roundInterval bool, period time.Duration) (time.Time, time.Time) { @@ -738,12 +737,12 @@ func (a *Agent) connectOutput(ctx context.Context, output *models.RunningOutput) // written one last time and dropped if unsuccessful. func (a *Agent) runOutputs( unit *outputUnit, -) error { +) { var wg sync.WaitGroup // Start flush loop - interval := a.Config.Agent.FlushInterval.Duration - jitter := a.Config.Agent.FlushJitter.Duration + interval := time.Duration(a.Config.Agent.FlushInterval) + jitter := time.Duration(a.Config.Agent.FlushJitter) ctx, cancel := context.WithCancel(context.Background()) @@ -785,7 +784,8 @@ func (a *Agent) runOutputs( cancel() wg.Wait() - return nil + log.Println("I! [agent] Stopping running outputs") + stopRunningOutputs(unit.outputs) } // flushLoop runs an output's flush function periodically until the context is @@ -919,10 +919,7 @@ func (a *Agent) test(ctx context.Context, wait time.Duration, outputC chan<- tel } } - next, au, err = a.startAggregators(procC, next, a.Config.Aggregators) - if err != nil { - return err - } + next, au = a.startAggregators(procC, next, a.Config.Aggregators) } var pu []*processorUnit @@ -933,30 +930,20 @@ func (a *Agent) test(ctx context.Context, wait time.Duration, outputC chan<- tel } } - iu, err := a.testStartInputs(next, a.Config.Inputs) - if err != nil { - return err - } + iu := a.testStartInputs(next, a.Config.Inputs) var wg sync.WaitGroup - if au != nil { wg.Add(1) go func() { defer wg.Done() - err := a.runProcessors(apu) - if err != nil { - log.Printf("E! [agent] Error running processors: %v", err) - } + a.runProcessors(apu) }() wg.Add(1) go func() { defer wg.Done() - err := a.runAggregators(startTime, au) - if err != nil { - log.Printf("E! [agent] Error running aggregators: %v", err) - } + a.runAggregators(startTime, au) }() } @@ -964,20 +951,14 @@ func (a *Agent) test(ctx context.Context, wait time.Duration, outputC chan<- tel wg.Add(1) go func() { defer wg.Done() - err := a.runProcessors(pu) - if err != nil { - log.Printf("E! [agent] Error running processors: %v", err) - } + a.runProcessors(pu) }() } wg.Add(1) go func() { defer wg.Done() - err := a.testRunInputs(ctx, wait, iu) - if err != nil { - log.Printf("E! [agent] Error running inputs: %v", err) - } + a.testRunInputs(ctx, wait, iu) }() wg.Wait() @@ -1037,10 +1018,7 @@ func (a *Agent) once(ctx context.Context, wait time.Duration) error { } } - next, au, err = a.startAggregators(procC, next, a.Config.Aggregators) - if err != nil { - return err - } + next, au = a.startAggregators(procC, next, a.Config.Aggregators) } var pu []*processorUnit @@ -1051,38 +1029,26 @@ func (a *Agent) once(ctx context.Context, wait time.Duration) error { } } - iu, err := a.testStartInputs(next, a.Config.Inputs) - if err != nil { - return err - } + iu := a.testStartInputs(next, a.Config.Inputs) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() - err := a.runOutputs(ou) - if err != nil { - log.Printf("E! [agent] Error running outputs: %v", err) - } + a.runOutputs(ou) }() if au != nil { wg.Add(1) go func() { defer wg.Done() - err := a.runProcessors(apu) - if err != nil { - log.Printf("E! [agent] Error running processors: %v", err) - } + a.runProcessors(apu) }() wg.Add(1) go func() { defer wg.Done() - err := a.runAggregators(startTime, au) - if err != nil { - log.Printf("E! [agent] Error running aggregators: %v", err) - } + a.runAggregators(startTime, au) }() } @@ -1090,20 +1056,14 @@ func (a *Agent) once(ctx context.Context, wait time.Duration) error { wg.Add(1) go func() { defer wg.Done() - err := a.runProcessors(pu) - if err != nil { - log.Printf("E! [agent] Error running processors: %v", err) - } + a.runProcessors(pu) }() } wg.Add(1) go func() { defer wg.Done() - err := a.testRunInputs(ctx, wait, iu) - if err != nil { - log.Printf("E! [agent] Error running inputs: %v", err) - } + a.testRunInputs(ctx, wait, iu) }() wg.Wait() diff --git a/agent/agent_posix.go b/agent/agent_posix.go index 09552cac07026..e43c3a7817a88 100644 --- a/agent/agent_posix.go +++ b/agent/agent_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package agent diff --git a/agent/agent_windows.go b/agent/agent_windows.go index 94ed9d006acb2..3196dc70e78e2 100644 --- a/agent/agent_windows.go +++ b/agent/agent_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package agent diff --git a/agent/tick.go b/agent/tick.go index 91b99712a73b4..ee4cc41223f80 100644 --- a/agent/tick.go +++ b/agent/tick.go @@ -9,8 +9,6 @@ import ( "github.com/influxdata/telegraf/internal" ) -type empty struct{} - type Ticker interface { Elapsed() <-chan time.Time Stop() @@ -33,36 +31,38 @@ type Ticker interface { type AlignedTicker struct { interval time.Duration jitter time.Duration + offset time.Duration minInterval time.Duration ch chan time.Time cancel context.CancelFunc wg sync.WaitGroup } -func NewAlignedTicker(now time.Time, interval, jitter time.Duration) *AlignedTicker { - return newAlignedTicker(now, interval, jitter, clock.New()) -} - -func newAlignedTicker(now time.Time, interval, jitter time.Duration, clock clock.Clock) *AlignedTicker { - ctx, cancel := context.WithCancel(context.Background()) +func NewAlignedTicker(now time.Time, interval, jitter, offset time.Duration) *AlignedTicker { t := &AlignedTicker{ interval: interval, jitter: jitter, + offset: offset, minInterval: interval / 100, - ch: make(chan time.Time, 1), - cancel: cancel, } + t.start(now, clock.New()) + return t +} + +func (t *AlignedTicker) start(now time.Time, clk clock.Clock) { + t.ch = make(chan time.Time, 1) + + ctx, cancel := context.WithCancel(context.Background()) + t.cancel = cancel d := t.next(now) - timer := clock.Timer(d) + timer := clk.Timer(d) t.wg.Add(1) go func() { defer t.wg.Done() t.run(ctx, timer) }() - - return t } func (t *AlignedTicker) next(now time.Time) time.Duration { @@ -76,6 +76,7 @@ func (t *AlignedTicker) next(now time.Time) time.Duration { if d == 0 { d = t.interval } + d += t.offset d += internal.RandomDuration(t.jitter) return d } @@ -120,42 +121,48 @@ func (t *AlignedTicker) Stop() { type UnalignedTicker struct { interval time.Duration jitter time.Duration + offset time.Duration ch chan time.Time cancel context.CancelFunc wg sync.WaitGroup } -func NewUnalignedTicker(interval, jitter time.Duration) *UnalignedTicker { - return newUnalignedTicker(interval, jitter, clock.New()) -} - -func newUnalignedTicker(interval, jitter time.Duration, clock clock.Clock) *UnalignedTicker { - ctx, cancel := context.WithCancel(context.Background()) +func NewUnalignedTicker(interval, jitter, offset time.Duration) *UnalignedTicker { t := &UnalignedTicker{ interval: interval, jitter: jitter, - ch: make(chan time.Time, 1), - cancel: cancel, + offset: offset, } + t.start(clock.New()) + return t +} - ticker := clock.Ticker(t.interval) - t.ch <- clock.Now() +func (t *UnalignedTicker) start(clk clock.Clock) *UnalignedTicker { + t.ch = make(chan time.Time, 1) + ctx, cancel := context.WithCancel(context.Background()) + t.cancel = cancel + + ticker := clk.Ticker(t.interval) + if t.offset == 0 { + // Perform initial trigger to stay backward compatible + t.ch <- clk.Now() + } t.wg.Add(1) go func() { defer t.wg.Done() - t.run(ctx, ticker, clock) + t.run(ctx, ticker, clk) }() return t } -func sleep(ctx context.Context, duration time.Duration, clock clock.Clock) error { +func sleep(ctx context.Context, duration time.Duration, clk clock.Clock) error { if duration == 0 { return nil } - t := clock.Timer(duration) + t := clk.Timer(duration) select { case <-t.C: return nil @@ -165,7 +172,7 @@ func sleep(ctx context.Context, duration time.Duration, clock clock.Clock) error } } -func (t *UnalignedTicker) run(ctx context.Context, ticker *clock.Ticker, clock clock.Clock) { +func (t *UnalignedTicker) run(ctx context.Context, ticker *clock.Ticker, clk clock.Clock) { for { select { case <-ctx.Done(): @@ -173,13 +180,13 @@ func (t *UnalignedTicker) run(ctx context.Context, ticker *clock.Ticker, clock c return case <-ticker.C: jitter := internal.RandomDuration(t.jitter) - err := sleep(ctx, jitter, clock) + err := sleep(ctx, t.offset+jitter, clk) if err != nil { ticker.Stop() return } select { - case t.ch <- clock.Now(): + case t.ch <- clk.Now(): default: } } @@ -219,20 +226,22 @@ type RollingTicker struct { } func NewRollingTicker(interval, jitter time.Duration) *RollingTicker { - return newRollingTicker(interval, jitter, clock.New()) -} - -func newRollingTicker(interval, jitter time.Duration, clock clock.Clock) *RollingTicker { - ctx, cancel := context.WithCancel(context.Background()) t := &RollingTicker{ interval: interval, jitter: jitter, - ch: make(chan time.Time, 1), - cancel: cancel, } + t.start(clock.New()) + return t +} + +func (t *RollingTicker) start(clk clock.Clock) *RollingTicker { + t.ch = make(chan time.Time, 1) + + ctx, cancel := context.WithCancel(context.Background()) + t.cancel = cancel d := t.next() - timer := clock.Timer(d) + timer := clk.Timer(d) t.wg.Add(1) go func() { diff --git a/agent/tick_test.go b/agent/tick_test.go index 5b8db7e93d4c6..397a56ed8bd62 100644 --- a/agent/tick_test.go +++ b/agent/tick_test.go @@ -10,17 +10,22 @@ import ( "github.com/stretchr/testify/require" ) -var format = "2006-01-02T15:04:05.999Z07:00" - func TestAlignedTicker(t *testing.T) { interval := 10 * time.Second jitter := 0 * time.Second + offset := 0 * time.Second - clock := clock.NewMock() - since := clock.Now() + clk := clock.NewMock() + since := clk.Now() until := since.Add(60 * time.Second) - ticker := newAlignedTicker(since, interval, jitter, clock) + ticker := &AlignedTicker{ + interval: interval, + jitter: jitter, + offset: offset, + minInterval: interval / 100, + } + ticker.start(since, clk) defer ticker.Stop() expected := []time.Time{ @@ -34,13 +39,13 @@ func TestAlignedTicker(t *testing.T) { actual := []time.Time{} - clock.Add(10 * time.Second) - for !clock.Now().After(until) { + clk.Add(10 * time.Second) + for !clk.Now().After(until) { select { case tm := <-ticker.Elapsed(): actual = append(actual, tm.UTC()) } - clock.Add(10 * time.Second) + clk.Add(10 * time.Second) } require.Equal(t, expected, actual) @@ -49,16 +54,23 @@ func TestAlignedTicker(t *testing.T) { func TestAlignedTickerJitter(t *testing.T) { interval := 10 * time.Second jitter := 5 * time.Second + offset := 0 * time.Second - clock := clock.NewMock() - since := clock.Now() + clk := clock.NewMock() + since := clk.Now() until := since.Add(61 * time.Second) - ticker := newAlignedTicker(since, interval, jitter, clock) + ticker := &AlignedTicker{ + interval: interval, + jitter: jitter, + offset: offset, + minInterval: interval / 100, + } + ticker.start(since, clk) defer ticker.Stop() last := since - for !clock.Now().After(until) { + for !clk.Now().After(until) { select { case tm := <-ticker.Elapsed(): dur := tm.Sub(last) @@ -68,24 +80,69 @@ func TestAlignedTickerJitter(t *testing.T) { last = last.Add(interval) default: } - clock.Add(1 * time.Second) + clk.Add(1 * time.Second) } } +func TestAlignedTickerOffset(t *testing.T) { + interval := 10 * time.Second + jitter := 0 * time.Second + offset := 3 * time.Second + + clk := clock.NewMock() + since := clk.Now() + until := since.Add(61 * time.Second) + + ticker := &AlignedTicker{ + interval: interval, + jitter: jitter, + offset: offset, + minInterval: interval / 100, + } + ticker.start(since, clk) + defer ticker.Stop() + + expected := []time.Time{ + time.Unix(13, 0).UTC(), + time.Unix(23, 0).UTC(), + time.Unix(33, 0).UTC(), + time.Unix(43, 0).UTC(), + time.Unix(53, 0).UTC(), + } + + actual := []time.Time{} + + clk.Add(10*time.Second + offset) + for !clk.Now().After(until) { + tm := <-ticker.Elapsed() + actual = append(actual, tm.UTC()) + clk.Add(10 * time.Second) + } + + require.Equal(t, expected, actual) +} + func TestAlignedTickerMissedTick(t *testing.T) { interval := 10 * time.Second jitter := 0 * time.Second + offset := 0 * time.Second - clock := clock.NewMock() - since := clock.Now() + clk := clock.NewMock() + since := clk.Now() - ticker := newAlignedTicker(since, interval, jitter, clock) + ticker := &AlignedTicker{ + interval: interval, + jitter: jitter, + offset: offset, + minInterval: interval / 100, + } + ticker.start(since, clk) defer ticker.Stop() - clock.Add(25 * time.Second) + clk.Add(25 * time.Second) tm := <-ticker.Elapsed() require.Equal(t, time.Unix(10, 0).UTC(), tm.UTC()) - clock.Add(5 * time.Second) + clk.Add(5 * time.Second) tm = <-ticker.Elapsed() require.Equal(t, time.Unix(30, 0).UTC(), tm.UTC()) } @@ -93,13 +150,19 @@ func TestAlignedTickerMissedTick(t *testing.T) { func TestUnalignedTicker(t *testing.T) { interval := 10 * time.Second jitter := 0 * time.Second + offset := 0 * time.Second - clock := clock.NewMock() - clock.Add(1 * time.Second) - since := clock.Now() + clk := clock.NewMock() + clk.Add(1 * time.Second) + since := clk.Now() until := since.Add(60 * time.Second) - ticker := newUnalignedTicker(interval, jitter, clock) + ticker := &UnalignedTicker{ + interval: interval, + jitter: jitter, + offset: offset, + } + ticker.start(clk) defer ticker.Stop() expected := []time.Time{ @@ -113,13 +176,13 @@ func TestUnalignedTicker(t *testing.T) { } actual := []time.Time{} - for !clock.Now().After(until) { + for !clk.Now().After(until) { select { case tm := <-ticker.Elapsed(): actual = append(actual, tm.UTC()) default: } - clock.Add(10 * time.Second) + clk.Add(10 * time.Second) } require.Equal(t, expected, actual) @@ -128,13 +191,19 @@ func TestUnalignedTicker(t *testing.T) { func TestRollingTicker(t *testing.T) { interval := 10 * time.Second jitter := 0 * time.Second + offset := 0 * time.Second - clock := clock.NewMock() - clock.Add(1 * time.Second) - since := clock.Now() + clk := clock.NewMock() + clk.Add(1 * time.Second) + since := clk.Now() until := since.Add(60 * time.Second) - ticker := newUnalignedTicker(interval, jitter, clock) + ticker := &UnalignedTicker{ + interval: interval, + jitter: jitter, + offset: offset, + } + ticker.start(clk) defer ticker.Stop() expected := []time.Time{ @@ -148,13 +217,13 @@ func TestRollingTicker(t *testing.T) { } actual := []time.Time{} - for !clock.Now().After(until) { + for !clk.Now().After(until) { select { case tm := <-ticker.Elapsed(): actual = append(actual, tm.UTC()) default: } - clock.Add(10 * time.Second) + clk.Add(10 * time.Second) } require.Equal(t, expected, actual) @@ -169,13 +238,46 @@ func TestAlignedTickerDistribution(t *testing.T) { interval := 10 * time.Second jitter := 5 * time.Second + offset := 0 * time.Second - clock := clock.NewMock() - since := clock.Now() + clk := clock.NewMock() + since := clk.Now() - ticker := newAlignedTicker(since, interval, jitter, clock) + ticker := &AlignedTicker{ + interval: interval, + jitter: jitter, + offset: offset, + minInterval: interval / 100, + } + ticker.start(since, clk) defer ticker.Stop() - dist := simulatedDist(ticker, clock) + dist := simulatedDist(ticker, clk) + printDist(dist) + require.True(t, 350 < dist.Count) + require.True(t, 9 < dist.Mean() && dist.Mean() < 11) +} + +func TestAlignedTickerDistributionWithOffset(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + interval := 10 * time.Second + jitter := 5 * time.Second + offset := 3 * time.Second + + clk := clock.NewMock() + since := clk.Now() + + ticker := &AlignedTicker{ + interval: interval, + jitter: jitter, + offset: offset, + minInterval: interval / 100, + } + ticker.start(since, clk) + defer ticker.Stop() + dist := simulatedDist(ticker, clk) printDist(dist) require.True(t, 350 < dist.Count) require.True(t, 9 < dist.Mean() && dist.Mean() < 11) @@ -190,12 +292,42 @@ func TestUnalignedTickerDistribution(t *testing.T) { interval := 10 * time.Second jitter := 5 * time.Second + offset := 0 * time.Second + + clk := clock.NewMock() + + ticker := &UnalignedTicker{ + interval: interval, + jitter: jitter, + offset: offset, + } + ticker.start(clk) + defer ticker.Stop() + dist := simulatedDist(ticker, clk) + printDist(dist) + require.True(t, 350 < dist.Count) + require.True(t, 9 < dist.Mean() && dist.Mean() < 11) +} + +func TestUnalignedTickerDistributionWithOffset(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + interval := 10 * time.Second + jitter := 5 * time.Second + offset := 3 * time.Second - clock := clock.NewMock() + clk := clock.NewMock() - ticker := newUnalignedTicker(interval, jitter, clock) + ticker := &UnalignedTicker{ + interval: interval, + jitter: jitter, + offset: offset, + } + ticker.start(clk) defer ticker.Stop() - dist := simulatedDist(ticker, clock) + dist := simulatedDist(ticker, clk) printDist(dist) require.True(t, 350 < dist.Count) require.True(t, 9 < dist.Mean() && dist.Mean() < 11) @@ -211,11 +343,15 @@ func TestRollingTickerDistribution(t *testing.T) { interval := 10 * time.Second jitter := 5 * time.Second - clock := clock.NewMock() + clk := clock.NewMock() - ticker := newRollingTicker(interval, jitter, clock) + ticker := &RollingTicker{ + interval: interval, + jitter: jitter, + } + ticker.start(clk) defer ticker.Stop() - dist := simulatedDist(ticker, clock) + dist := simulatedDist(ticker, clk) printDist(dist) require.True(t, 275 < dist.Count) require.True(t, 12 < dist.Mean() && 13 > dist.Mean()) @@ -239,22 +375,22 @@ func printDist(dist Distribution) { fmt.Printf("Count: %d\n", dist.Count) } -func simulatedDist(ticker Ticker, clock *clock.Mock) Distribution { - since := clock.Now() +func simulatedDist(ticker Ticker, clk *clock.Mock) Distribution { + since := clk.Now() until := since.Add(1 * time.Hour) var dist Distribution - last := clock.Now() - for !clock.Now().After(until) { + last := clk.Now() + for !clk.Now().After(until) { select { case tm := <-ticker.Elapsed(): - dist.Buckets[tm.Second()] += 1 + dist.Buckets[tm.Second()]++ dist.Count++ dist.Waittime += tm.Sub(last).Seconds() last = tm default: - clock.Add(1 * time.Second) + clk.Add(1 * time.Second) } } diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index b454c8dc8d9dd..0000000000000 --- a/appveyor.yml +++ /dev/null @@ -1,35 +0,0 @@ -version: "{build}" - -image: Visual Studio 2019 - -cache: - - C:\gopath\pkg\mod -> go.sum - - C:\ProgramData\chocolatey\bin -> appveyor.yml - - C:\ProgramData\chocolatey\lib -> appveyor.yml - -clone_folder: C:\gopath\src\github.com\influxdata\telegraf - -environment: - GOPATH: C:\gopath - -stack: go 1.14 - -platform: x64 - -install: - - choco install make - - cd "%GOPATH%\src\github.com\influxdata\telegraf" - - git config --system core.longpaths true - - go version - - go env - -build_script: - - make deps - - make telegraf - -test_script: - - make check - - make test-windows - -artifacts: - - path: telegraf.exe diff --git a/assets/GopherAndTiger.png b/assets/GopherAndTiger.png new file mode 100644 index 0000000000000000000000000000000000000000..d76a4242f8636902f78949e831a386d66ee1cbd3 GIT binary patch literal 74224 zcmV)YK&-!sP) zaB^>EX>4U6ba`-PAZ2)IW&i+q+O3^ilI=E?4r3#0z!aen{*m&f?~W1W9;V=-po^HRLxN+I3n@Az}2 zU>|$>ml`jUBB!1KK|_j&iUtK{?q#1f81I6$8!99&>xRg|F%xQ zuZQxt*VCVukNA^y2@@vnDOa{67te_g-5=d<_vdAgD7#d6i(hx+3Y&v(4>WjWLN zwaTBy{|n!@^QZIYxySvnYm-NPulwa+dWfv}4mr#)!Vc?v{ldpB=D1?xYm6W3?rTqK zvB!@M2r2s~?AR`LW2T4Q_}IC`%kj^93Fo@q>v6G%E6>0?W8lNW+1bDRx&Hpb|Mt&6 zebC;Tc?d2hTOWQlIJ)56hCAoqyo(J9`*%&tS>JH|e7XPrj~koV!E%=Q;Q`0}{(Qt+ z!ar;)oqbMsxp-^7fBYr1`TGGZ5!Wu<3=omST|+9NhP%bsLSP*q_GfVCG3EHO1EE|c zPVO@1lq#F^M6b>7v~b=VOZ<5mcwr+-s;RLxxC@(=bIBEddoZDKl~i&mrIuEn(#xo+ z=2~j4t%_5lrIuT1wYB!N-o}%j{FJ9Y?RlR5j2?Re#>&-eZ~f`LkHJMp9(>l|{eu_Y z_@+0%<*jdfpSQo`!+gH{m9Kv7d%pgS8E2Y6z%}b^^O=2)6&9wn@+zyYww~43*x1@m zJMXgVZu{ANk3VuSiJ>$z`u=wC;c>>>z_vv}h)#m*A{?aHn&veiJ z;f`3|EZ85*-maQry?3i0Ai~|TEvY@M`zYK%&3s7AIqg|rd@=xkjmv&3?@ZkK(jH;- z5+;~k?r-4LJjAOKVI^Tvp^#BC19mJB--cY{s=G3stXSAntEj}@)>;7iD z%Zix%`_}%2vBtc6dn=b6tTw9`{N!_wU#1{@V-x+pm65nSDQ* z$B$Tf%V*^=f;JbFfoGU+V*6Lwh3A^<>4W>l#=bdbf5HbBy5}|5H!%1;?o`%%E02&_ zwY|r&OU3~A+#3{30e zT?2pX8^%Nct7YQU@w?uJpYHGhUw}nVp<`(HMcX?FY_0E`&jG;sQI8d@Dih;c_|ckR z)e`#0zQ$ZUGFy8m+$Z7Hy~Yaia{J&dSi)UCK=5`+p4$hS3DoOtgZspl-FRoeAY|PT z%jWc!w#Xi_j8t&s+E%zH_tHXZBXT1z+^zNZ#$s@t%sX)wyrg_r_;_l(jyqtL2R6U) zoQ41Kyu_q^&pNOxnEHHi{rd+0(_r(t_I6bk?$n+OoS0zRgd`HYW*UT@or%z^gm*o7 zau2n}a!`le_|LvxKi`De+Lv$}LIh7t(116U51S<@VAExdR{N7GTp*3s>x&n@-QEslAEx9k=Kee}CE-Wgb<)d-q?#fgck3<9J|I50ADpcQ9c9B6Gh>$4KLm3akkc zj&M*G*pDI30g9ITT-*r3U?uMV;YvjP;Oek9sAjw%S7K=vkW-!znE=Z2T+lY4cfY9@ z&@(;l`Y;|BESCBLQy4?xH&Z3(V51NG`dMGXbqP9fl(Gj(0Nj|luP=gdH+ zZ}s`OP=xU}y#7 zH^dG=1wjq@(1#93V#)p2Zk=9U#^nSb@RynW;f^Jr5 zFP4bUe+%UAVvBL19V!bvHge}lVE}XG0YkLI4ZKbece|Oa@q#eH#+F#IG&OdArG6oR zk(opfU3Z1F_*6(mWl3Q}@4hiH-@XPsr#AMc?MZ0CUBH=1w2-%!d9hmW0Ntz1Ykb5| zITXAV9gBPdwbNwVb^=!tQ5UGi#<95hq+A+!`LN${T~XSIwG9H?{MyUHHEfIbWXv@0 z+b4$K_;gdN%KdQ7pz6x$ZE(T|<4K7C>NK-7B+0ji#O64-t4NZ*oO1Q8jh%n+dJ$0Fp2vtvK-@RzXpRqS14`#w#Q zpQ9+gjiD#77Uo|olnJ7^!pmJ?-T3+l&jSb#^jG^>!V%i2&@n)O3u1zB@74*C1Z*g6 z+U?h=8imzM1@r7gnS1oxfQjf5xtl?N<=Mh>v4| zEWA0MYee9V2vpQIu^>Gg*a-)CpGkaeM9vMZ`tZ8;BBXexJ(d_)Evn3#Rl1VfJd1pduvYa)eow)++mzX4TPUE^+ z&>FCJQG5`+!F2X8vq(^R3<(8HbaC&o3bp`ig8RE-Na2b4K-t3=EO*i-ean=G3sDjko zB8c)pV#1NDQ%uAV5XR+$S-u&|zwZ1zY$Mh^)#1cl*b2+AaMob7#6MtP3!4vkJoX7- zW{hMF;zIl6atZc#?M#3NZjsBGHB4Uf4&Nq*@j5$zjYe2<2I`637#}j^js+406G!Ip zA?#dW#q}uW0AnCZHk_daaA8!9Yr&R2(0e=)maBbyBfM`|7xWiTFn26F3Lg07H_|Wt8tDH1J-+(fT%V6OA?z9eC&$;9=+qC5=LEvF~^V z#DP#c)qnwOFR&`OdNA#?7jT64SBW*BE2D?=<@Nxhd_%M+0!;Z3j|n)y-|xH1PrJDf z=*t2#J_Hdl4(5;61oYY15bn4RvL@URnae(%>_M4CXxs@V&Bu5d}8>WUp7A�+lI9Xa*=($TKc%Yo zv1@(-@dCA21_qE2s|4`@R5)>2M=@Olqk=`FHyXRmUSj&ou$Mk);`0Da8+`acX#hb@ z1#W@7_!MxM=Lc#Cjn|z*MAm_GFP8}O+!3J#UOKzmy;!hEX801fcGi`_SC9m3Ku@52 z&;}5#_5yoZ78BWlN~i%rL0tl+3X1TZxj`Z#5e7*b*=y8JQNgsuTiEQ6Ah|CnbdwT{ z2mjfDA3iTA(1mBD3;JX}8NrX66iHSkP+|T7TwrqvY+Q*zJufsbG!G(S#Lq0j0& z8CD@d(p-?|yRXTI5ITreD3kTR;5$p)Olq(f?lj){O5kCv@J0TG(D~)2;s1r;T_>A@ z^0E|yMdMGhJt88Z0j^g}&2E-zfpHohJIWS7zX(V(5VLyXT`gaGv6TcS0QgfMMN_a5 zPcl!HAqYBBk{ZDwaV9wz@d#id%9k=D;7ugiSDMI*7Xk(Bz5dIM8xE7C)va2EIe$Xve5^_T$Uvq zlRxEoj4ZDiOXwVghWMODM47TZ$}O;wyFRbtgILQzLf|RS&BJHx1^>;c*84Rt!&OX& zHAtcc2aMojOI<|hM-lB4Z(w*xIzrS1z5!NIjF@jDOqnf0kpedjJqF|L*ffskibG|& zon0}Bjn;&|RVWoIf7HtagoO*NhWp<9t67%P3L64MSuWcfDD8$cc{WV3pn%^Rc>**? zdjmwIUVInOpa82G19lwM#V)dfDl6QB#hvJ6ZWSqf9H;kPPE7z!L5Kvu_!YFs{UQWb z>uX1lacSzQ@+g=)OqNyOP|3kUqz}#)s)!Hu!8EWud=1h9Gri`-BVP=gHDRNUOnC|G zfE%b978GTfUt6p!LH-E9!9`zV3*HTYb>zl(Eyw|jiI|h;V!>eFN3_|-WtQRr_VGDh zSeEzRvCObT$TH05tS^yEaS^35sZaRtMc8D^Q@SC#8_z2Ygj-?FUT9z&msrZ{+b55URJ4I^dly zd=3jI6k}(2C-GFZhWUrV6){<~FZgU~UR2VTib6<3Q>sfIK_FTRj{Rv2*t_2qU3p$N(VVaHuk@;*+M9@PW~l-Qxu zDz=x|E36(ZMR3`&@CJ+kxE7+YDtW{Cz3a93mOAnuvdS_Mc2_iUm-!GhzWUen8&pl@ zTtM%)DiGfa!$kVQhaX7LH3)PCiSSsAV_gt_pen*3Hry1-iQANudYbi?mU6$i4AQEi zqh^jDRD^xJc;ACayLC-eEbb?(2f(l+?ptsK#Jc_fDL@txn7IpUr1gWU1!rdZ)wjs-5f2}tV=)f}RRCOB;alMliIPQ`SwKiO8mqJDd6*Ky2%mbsyLPL@`08RG z+jOCk&L2?)H-+HcY&^uGgNB4vUmjQ6Lp!&#vFDG^p-ZN$#!W0l}$HM46tTWO<8CoVKd2Wt=d zeBI)#*{IReoG3waiR@Me6WNV&^SgZ`Q9$w!$2r)5O#dRjRuHWOuZG!2GweLt)9p8c z&TlIfKRGglr<4Y;g#>ivK5P-0z)e{EWwBAxJMzP=haGAB^v$vXY9CbR2+D3M|Lw#* zR*UB~xr3P)a$q{IuyFlR!5#N9vZ4ID`T+j{>u}Z&3&j)QhrH7u2zxAitumOFOpdW% zY(F-h(VoCaCOV{MF#-R^7~vi40iUb{s0Hz!-*jOL}U0Fsuifr zM4%PzI)YwSGtk2@b5~bWPPVO@65}tzP*X->#h?#YVQB-A7k7QEyH$HEwmNJ*5LqN< zg5Iu5Aix7p);53N>KCz;XL3)W{d_p$7@>xp7>k3IOWykBz6jbKooA({aaC|V$9ERE zgN-1>HOy)y$uA}m@d8+P+oz4qJQp!$c581ezd;3^!G?;zHA^?l+U5mCx|F;T?!sT6 z-4ta4OlT)U%Thm2bn=l7(+F=OP8JBO0+Vlkj^4&vW4N$H6vI1t@X{sU=FODZ8x?xu z>9_eEq#Tf4fflGcxWk?hJc%;x#vsP57ia^pZnkQ=VQ2*ed9B2c#WUNlH&Bt_Cy1c! zTKlc-#S~v{7=7G{-+f%r($rpyEjZk}5o6(=<{vS_0fiYR+aWvr!5puE#z4(-+Mzc*f^S=U;)MZWDPOrE%slXVL$nA35k%mnvWxoY zv_VDmf3s5rRmEBa8DS#+qm$wnFIf&TqQMM7usYBm7zBp6(Y@A2QMNqF&KAP)Iw)bm zVp)A|4xfaXzC1^}(1Qdh-i7rNM)>qtIIkeHqIly-+%yy4Ua=-OC4~whLJA; z=X)-MJXc`ttrQbuJAQ{)0D-}lqwop0L_MAdsr_0|W``%VQ=AVieQfngHtAqF1nTFu zbV^JGnoM`F=RiWu?t$G>SQ88hneq`JUmi1WtYwYctN=0ZVn^m{$lwwK;9-sD%xJLN zHlc{uDAzwzi^bL)00|7uL}|0tFasoGS#@0S3E~~3rijPZ4_}s%-{Z-`z|sgFUqL|P z{NMps!7TC}TFqA710(_#_U_A=410j=>uTSEI@J9_x`vtu-B^fq$TAv0PRz782m>5(Q$U>YRhe{WC zfkS^E8!lzWso)N%m08eM1e#)XI}_a@)~B?97k<(5FUC$RhvXr9(sS8qhq=MgvGenM zpe`Xl+iZrAgF4&~>e@~C?2v6{WhxPDvV|Rzg_yzct`}Z_a0DNKNJX3x0td%9;B7Cq zr5i5m5gSOgs|Fn(u?yS<9KtN7#i;9)lcAfEwpD*Hap2PUEu_M_?rd#?333_~1XxXE zH1%&%*=`O)!E;|c__FmZqc(}=1;{y4uk{)mhkp?}R5FiwUxw2vRO9vuQVZdIFR}0epTsJJy z7T8a(P@DBQ%iN>q*Gk!n@5!k)i{zkoG^xx|#OUQwfft;X4@FcFS>XhHgee3_B@6*Rjk>4nDh zqec#<=OJip2;%--#fBPFM7F)095#=oiPZ-&>Q|nEu+(qX43!NjDy-;wt(LOLLICj} zOg(wKdktH*aw0Bub=l-;0tySHTHD4ulUktSg>}F0o%abiL(f8xA>xJwOiT!*WZj-| zpKfA2^*K2W%l8lXVF^!gs~5ELf#EGPB@hst8L6JY9LRgbMPs<1G$Fc1Tp;0X4}_YV zMyHz%L%a{dCsGs?u5A(5ZkPhgW}J&H%*^kk)?5xd}?VC<(VMluVM4T@j*)BcQ&sJ zw{Ms_8;h{$xE28%G(BCmupi86^;A^E_xoW5cG41KkS`Z#42ZvmdteCy=`#WvK04S% z5WzXjNhB@= z5Gl6h@EYv>j*qQx{S}WeMm&NL^jeNE8)F%k5W+peEcn1+4s_wT0h&&?pmv zsVAP-V2ZnmOS0;OEC;2q*OcE=(@mG` z<=(Gs4hBz5_BVr+;1NXcWOkQzC=Cc2`C;uQXYG}-x6sPT30;=`(Uav01@|CIPCL_3 zBTAA$EZ`;)Wyug?`zChV0Of98G`*17x5L#4CvD%sZUZ!9>?SnVJF=DM?LqtT`)%Du`eX?*{ zbH3&vx>g9uD5t~XZ6GgP96bam*vxC(? zX7;+WwXj@54iOTn;BQ^{D0p2ElUyeMWDt5;c6Us6^O43C%gQLMYWc;i-sh#Ogu?u^ ziv7M$t8$S6KpAWwEf8S>Z#A+ypYST5&1PYRwl>;vXDED*mB*j9!rU*5ih%sn+8 zUU*{2KxDSb$QphGE*ln47=t<4n+hTj6U+0wC-EaeylP<&U*Nr@RkauZ9yMu<0z` zf#T^KYE$Gvg5BCXCQrH7&yZ#^+eu$QdH38tPy@M(Hj<}7PF|6~kQSZ5)Gp&JWV&iR0Y87qNoq>Y+ zSr&Hat8AM_h0NfiFpbxMHtbb%kJ$!uyrH)Ix=lWiM@6l|cxR_7GJ z0}EDsAd~_r0g+7TblC7^@^}G6_j=L1W@!ONo8rRgSSfsI5W@O_Rbnj)t>9S=dU>xM zfUB4HVKwMA>A9T+(ZLk045}W?JG?-lWIMuQm&`hu2_5gzbRUmc<>R(i^I6wTfQSor z!lh3yk7X<_h#M_Sc4+@1{d+C%RJ1t`c6Vqh*ibSzi*5t*rqg1NH&~1kR_7&%_i4ni zSMYrJ%g?zmk_Jjhpk5MGXzzF9LM^GBKQ(xse|wnSitdz<^}GTeCNeA0}8- z4raaW?*#>y?THu`*4HPG1_W|Xd$QTWdNE97AOzKe6+5HYJ=u^cHXvt!nwes#T@|FroG{E+d6zeY9|Rc z;uhF%4kL6s6k%HPv*A~NMynSdCvd+r-`8V54LF0fp-6Z%?%|D@-$Xzmz9)QerEJ4l zz{?@)9C0!aePbLP`V-zt}eoDz0LYWqemZ zac)Qx9f7mgv)b!26&bd~KZamn0Fz+dV5hUkcKno(*`c-EEr@n0Q?!{|jSUA+8=bYg zNzKz@$VUP8-KrDOY?xdm$clK#0F!RY52*8Xh612a%`Tb(^UOFs0px{zac?%Ws!Gj} z##2oL@>uR|Bsu+sE691v_2CARS&-7y?o7A+USJ`l4HHhbHXsdl-PkFtz&_ZQ2oaSS z=yCKk78+66wp41bK(f``@Y=72Vr8Ms*r_An0nG+Z>$ztRuA4kPJxF|PdK#E^+3vgn z3a58|(u3Ap1pAXi2c4V{bLuhLqK1Y0dQqs_S!AwgXk1r=oN@C|ZzNa=yPc{DFbG;& zf4EJ8vZ?ud5T0?ez5PhV&%l_s9Wa3ZyiS8lC&2QkR@0tj(>sBktM4Dt4^raQ8{5J| z37vo+It5JA%WVh(>&GRe)!tC#!_iRlR?r^EH~%ZyuH0&uu$e`037!cJL_CjB%^?r< z=@39JXUv=@z=YNSVtGs$uw^k_5|jfSK0<+gya-k4hn1{p^3%(tH@1wxzTo26$3$pZ zZZJW*&G#g9gzaDW8q59=(Y|c%xR{Qv7+<*$4P~h(c8-hqSOAjUgqSYN(q9Kf5keET zW=H9R_7xkjy_SqAQmce8c`eD6HyQ!jg>j%PA5-_YB{z%+j*W66F0*Rr$Q4Ae7ny?h zOwbnP`tj+b62*(n@TUm1?SNU0rF3p$_1ZuqyiQp&?Ce9U%@t3&VrZCV*5D24riT~+oq;D6P)D^&2S0B84!BYJ{v*?Bl{AI#a%^gcI<5X z>O|P_S?lBh?G;JD;O}&!!Y?lE}5bcFhPo-l3d?VMftftp@vy;c#0M&Cjq; zw>}QLR0nHW)t}ZlhtsSZA~Bs7i|TgUq5B!+YA#!;ko=dmdyEUo-FJ+soAStOz{`Ag z2XN3Cc8^{kFhZb$$RJ$K zHcy9m4r7>>t(h_(RY%s3?|`)n4Acvx3$_FJzR?c_>O0fL=n#a>S0+MDGNAm$IiSihZE`Ev4)HS%-}?!oa!XYg+1C^4y`sQ93jQpdyFbE20D@qARkikKH2hzADX_ zfhb|axos>?1`#-J^HK}K{uW89SdY z6~*ao!5h}{gc(Ac5&o_AjIL$|632NQvF2I9+MoXVDF;U;1>%~CH~weT$6FT>#X7fBQlQmEk9GjbL7orlUcn7#*c>_Fxe>g(}P@cYkX0IB*g?)qg*-de3+~N}+ z0SU6773z1dCpuUk#Tn#Du^~7)Z0}g{cbGJ*GobVy1Oh*rc3nE~1IpP;1^C*elmtZ5?o3~2_@9R8l=P!>uqp6P{1+{$IZa@jaqtb6te z2{H>b2{;JdWQodrF2F$~fVL>8aqrJ+;<`|zc00!Oc{T{a2HG+mIH(FFOflP1Ws&vC z!%4{DpxCk}ofpvy4$uMTsH5*E#dMEhm=VaN#J$O*W=zEwk8P{5d!MD4+cR!ulBmSwNE5Bnrxv9?#p4v11$1;D zq_{Vp$MOP}jn}b=<}=>rnwOnE1W}2}3Rk~2wEMGnSDZML@(zFjLZ(yDZ+r~vyKQiq zmhPPf8^!u#!)z_1Sr}db+v!+@D)1-eH%a#$LO$4Tm&6ND&o4U%fx3yr02^dwvI#d> z(CLyOAD7w(WrzGFF0}7Oc7C^=y6pW~xF?779u2XQt-C*Y-uS^Ghtt1q8~1kMePD!R zhQ$1JCLC5UC!ZCpdki(76vcyQ_ybP2+ny3n+4v;eP{m>IF{o~Z(OD~y4kDARuUJRx zui{(1INbO0jTB9_z9#92XwYIj3KGnRuIqDZiQoYqFZWYz=1Bpb8o`F*P^^%LEr9c< zkF*h?K#|Z1xll~V7fx{6JsuARk<3S^HWGu)9`Vfd1D>#M9@_F|W1V5MX6FHq?lB#Z z7kj}}vg-=(ZPt!RMc_3F1DYhFLU*zQl6OoAf3)E@2!1%gXM5X&BC|tR$qjqHxc5g3 zL8)>X+rE;K&VEWMsUu*!si>V!fYRW)+~!6goAWRq$aetDbwh`K(`VHbCkzIx;?BUL z$3_W&;yeesEl%kO771u!#AommN(m+I?P4neUd6_NGFZ{ZPEbkqbx!cK9*yQO#c-gl zm?X<)>!1>NF=RR0W-;&U9^U~&SpCf0_V|8>b}fKq0v=F7sD5orHRl55zssX>v$&Ye z&#e3CmuW?ZT|Qh0lB9F2YU>=@@SNP>FlnxkHOQwUmVe?f{>J(}_M-Q^orHQIorHPY z>xs(S`qesB2cp0_GyKC58{C2*p3Rbdra%Bqc*(F%$I4z#w^_Ah4Io=UvBmwOCU%Ao z@#-|LbBtlm@AR|;d-MqIxl>!Aq&I?V&fmz`s0z!J14*Ua|CqXO&ohWb)h}zy?3=F-uh*OxGE}=}1U) zFtm1QcePW=VYzgs32qbg+zfcVnn!eHT;npI1}oD2&{EJc$jFj%&-#Vx2}YiL18Oxq z1F?yilWmXDq4=z*r->Mhuh@yo<{SVotcI_TMSW;;ceuKHc#32EW1G52893QBAOmK# z-gF8x5j4LHmfT7ASQZ}HGJiYeu4Xa_zLi*|@!4yKogtw%pZSx$LD51JfNrzwmj@BS zoxTAt|5%d%saPIw77D!1Bs2h&(ipbXwZmXM`2?H1&+xzPAK2l%Fqxtx#{dV9>0k$9 zn>8@YVaw0jj*9U7@0po1H5k+LAfI?+JdM%L!(RA-wqjg_Ld(O725b%ng}o;0_L+5h z*aNu2`@T+cB_+hHkauUz!{?a=36;Oz*ES}|5z@kOG3mz{$@VaXQ4`A!d%hha81uEq za9uEL7LZL_XHc>u80%5=(8qkbb3n4@@YKnLm^y%;A@r>!*~^Chbg=&_04^)}m^iaC z>aVgT9zeC!h)5}iE?7AAbT(;=7?V4SAZQj?FQ31!=rF~TvjkD%9@>J3D<)G437| zZ8q3JL`1%=B5-Y|21m-an9uv5zPTvrc#sQjPvU~sEHh4V7LT8_>Gz(~)TP9gAP?tM zy4ko?L9kvYp`H*A0q$^jhjQlw?q6H0b5aKTh6xqqR&ySJEpp1!)3LUdWXyN{45Hb5 z+n^Rd8u)uX+GMT^y5Qj)Gh$t^?O>-$bjqEu@l4yVhb7m$)Z01WI6W$@I(g@L7dLzq z^v?4|X6|_n{ea|)PcK@SIkw4I)w=2{fDz@bFHB0;Ak4NmZiSV-{ zNWSV(TJ{UM9%5iv&R?;sBNts0S3oM5p+QV0UEu2u4QIiyK_dFNO_f??F<}6hKj9o* z)g)RNdh_Iz6zntjX|#OTJ(4}T!{ZYj-tP6CSY#hbb4T3 za$?*d>#>l|z6bBc;Z^%;Q?`j6ay}lB31see&f1F((YJR%t&TO=fKgb$lbVMYcE{wq z8R}pm|9Up!lx?jxPMo9ZEME^IY(MMxo;KgiR)D~dCrehSzQfl8U%{djrKn2VJ*KUT z-Rbm>V|V+S5`F?;#@ll<*kX{NIQ8dX7$Oixd#3)QM_AwlFs@4iQ#>hxeO5Doa_A+G zH!ROOTNow6P&zaG`YB>5+MPL0a=~7EQ#n^~P_my^)Z60Uh?!x`&Q6k$RX2>&Z?Pxw0j{HZ;AwJ<( z+iX#`WqkJQhk4915Oq}AIvGJU6;b{vgxLN#?6FP1aAS7sJD6VMxE-w|?lemh7eAJ1 znoXP*Fr>+ehbAZ#OnaL!C%!)ToJWsz4+Avqx*!VXK6wwzc*d!nVkR1bT#(%?zcMev zYLzN!tzYcTxZ*sl^kOS~9pJU#m2Jl=5LluJY|6E8PTk2_OS6Z&nAax`DLJ0)oW%A> z1({aBYdY^8h^@!Q+Hl8l1*^44*>9qZ%)<`M9IYC9M8-B1^I~qOl_CK_)IyMhJBUkM%{*Kn z=AILR_yJ}MYx8_4%^@p@dI12W@vpgitSJ#AUS7vYp*JAx^IiG#2w)!2_2tocxp}C> z8G}nUmhNE6jC+G3LbN;?iH@+W14BMsDGZ1$e+fQBHe;i76Ue7BLsR z5*gb2z zpqFhLDr2az+0kxZZ#f794c<3lV03(KJhojp=p8JLkK)W>PZ-%v7`YeJ?6wf>JkBWu z`WNhM<0e-2oa#Hk*1g~NzNsIQl$oqm>&O3Azk4G5cJd=ww;#cCzOBfF=kNLfcgwf`o}W{s!ZF zohcoXXJ-UGz$SWfV6iH>O{)aEAy%mEoO5K6GX^lGUg_tk%YMeIE7Ah}lTMwno*uGz zh9ogk(>3g<14V0QUCvQ@gf@r+RzdyB_B2U1Ye_+^r8^z~5v-r*1%$_*Pw>{WhR&p} zsaqbylWMAm!8Od~a%RT9ZRcOQJ%tf3fiQH>Xnjhs<0y^W6yHgyIZ3{6tHPGW%)UUj z@RZAAU<@cGecm$ztpPpIsA89G>e1N)gb^mO4uMaBXdmsdgR5@OrEAcx>zkgn+uY{c z{)RKcBYiwGf8nuamxf7im~%cQ49|eYcK{TY;*3^6n6F^luE$jNY4O#Zf3dI~aAK{R zfMRXYBd9Dr;Pv-WO3?B5q;fn!Zp3muj^`G&DMZRKQ=2W&e6Ha3{E2g%Uw?{+)ECSx z5$k5KqG6ZXtJx@?uj&IT$B7?rvM#sNkXCvbr{HO~nF51%ddY;d!Q zc;8n1B(u|nJqfYw(GCO=$nesUs>W=a!>!F*meXyqMPx@(W+uy`_u`J8Z@>}y*FD9z z8Q@K=O@~BpbT!em+CElHV>qefk=plmV5+_m%)ww0$rlp9Q@E1mG(Sw^B-9DujSON~*2oBEz|SMNfLVO`u_K{@ff)*NJKkv5(vJ%D z@P#-$K2hpwIU533>ILW)RE|d{0i%5dPg2I6Z4Uy10}NxVeUI?(XueV;R+0%B=-!ULFvRd$5*DO4-)Z;L0=7z&~ zxJEvsShr`QoU?$ruSSTuU$n>-%h!ea^WxSnQXd|Ms}me-VfuUd-~?{eEAk zkJ?kkd8kzhv38m07`RZ7AuCHl@nD#LfIf^;12~tcVYqsU~9MWXe z8uQe&0yNP7xIl%xn^XAs$RTU+u!K4xkO06b2yJ%lbMm*G?f~A534|WuCB%-Neopxo zJ(ja1uo`nR6G;f%no%pk)0QxfYo9Z!SiH7FxlbA#<>!D>KRLtcVM9NeN#eTsd1Zyi zYDI4D@sJ)+ufJFDz@FhD8y?Aa&U~rTV5B)h=y^YgeK~oVx_(;tZF#ht7`y|3y(Ls` zyNZ$ohVq_P>1lsg@#F_WuH4lr+3?lIv(?iqGaRsm5l_p{3gPkLl0AzNLyt_>0h>&=r&7kpgS3%g|NBQY;es(o-wsKtGbA_jUaqIkG#w{g*~JFehy0qeO#0yRM`w##Pw zb!#!O-*$Njmic|p!mX&t(_AYTxOvL6;HC;O3B2Sgm2e z=}=h&T)KLiEWx<3xt`0WdbZFJfAUwSV!yZ3f453(PkT`}YxH56f8^k@2+2a76P&HK z5_cTqTmFX@JWjPaRx@Gko=)j;KoZnamChb8wYzK11O1F$kNbx^4QtX`7 z_Ql&XwChm$V@BJfG%apjx?Vkv7iZ$d-hYodhZY2DQ|bK20-R&*;T(F2mOI@$VlCWm zdLV9s|6D`u+bwTmE!$%Zt(u5Y>odo|R(&3NV+tU=?+l5D(+3wquOfPBY{ReQoou+QY#mh(|2{t+dkIj=uX&rkP1hMWHD zuYeP_b^Sj#?nT3*f2nNgNw7S4z7YA_yOYRk0AsRMNnd$1yloQn<% zJTqiuQuD+iVzJo4atE`Lp%PCMhZR+$d?D+y!g-6cTB)(tJ^2d*d2J=lb(*7yV+jc) zAwotCWmI4xLaRoKi4^U}Jp3b$KS?f`TxBqFET9S%lH&*egWuhng{i2U6o>)cFSh+L z2K4L#&AM%WAKP~G1n@rtS6bU&Z2+^Mq}SV8_y`!-1}?7Kn!E>G?f}D2x@1U>LIIDG&z)YbA0aBv8W7b$z)N0^u(@Z$BUuNC3J6u0S~8-tb;EkO5RS z{ChW$2NXEBj1x{c(ORg0VSuuY*J**tz)L_nU;w0+{=8h=rL(+e297q|V*;Rd{v1v? z;gqn>g^9o_U_Eekg9LOo=o(1sM&L3)qf{se^$Q{{G6pvnSG?Rk(W*7*v|5ykrX1*S zIM5pnSZ!AHMg!&5l^oB_BCoUvyTcAp1AG9y3Y=&lH7A^K!bw<-hy{)}{5uP{68PA59jR~qcV4xqm%h^nEz`jZeHiFY?<)xHWRZvr3gUx0`6h%}@WtS0{QmG`^Kaj-e?pQ4rb!k<_L}cf{CS)(86HYks z7nQJFfl`$yxL6$$ed2qP5Y-(|cMrtV2+3x*Q*WrJw!V(Ml0vLDEAdgWga!no(`dVF zaCV2CjO-JJ)!R0>*c|p!WINkt=T>vViN79U^#Yy*F86eCQ(QJ=G!a2zh@#jEwar9+ zX%UA{WDp({N(n2FJA9&Zf-JEdZuR&OWz*EQ}3d+>DWQL^< zY!~4;91aT0irIa5KfU655FHZHWvZglY?3zY+-yH|@|Xzx19*YIt`|{(96#p{!~y+~ z9VMO(@4Ev-8zktq!UEI*B|uSw8eY`!R}7Q^IY2gY44xBC!s@c|T?H%#0w#}{AQhLE z2&pki_5IHf->T3we+z+$!V>w!J|dT1{K!ebKBe=p&^BN^lZZdl7^jlzB8=b5y5Y*Up?U z*|u%lwvAt|$=0N)CcCDZY}@uU+4f}nyPt2Zx4&DhvrgyieQBRtvo?^Vp}0ggUzD`} z?6Px66TR3yQ$s^neQMF4A9vLkOeojLknL;7K;MP<>(fEYIf*YSwjqQsb+srT0`xGa zj5I4!;@>=^IG7WAVONEX2N*Q^VO;Gaib^e$FIRyg&LB1Bzr_k88&f&`iDVBKtMVW* z*Q0VrD)g+!4>{LI39&cGj>D**1omq!F%BmkguMpa=fW#Ss@$b6Nx{4#EP+PMZgwV) zB$fBQd=~LF>}m6|I#a{!*u2Kdf>xt&O!$Jx4xv?`&-`z9DV!285uV)`7A+08+fGo= z&*+4BE0jqMtCjZ4ZQ<`35mmjvzenttQI&?loY*qBrhgwXe)H!2QVHG6iKo6Km9AY0Q8kh*)aQZn{po8t$gX{#zY&DA2NTyju`tkK+VDB6 zCXaf>=E^tgXrX-Aq*M=D0Ov>Gz1`)xLUgA7lhv$^?y{>ap>z5j_VIN7w~?CJqt{S| zg(au{pQU;8hP8Hq4XH8>w$2%n=8Lu9&1Fs^XW%Loe<6+_fs1-$z1IEc-3lU3h@E%@ z1D_tr7tb7r+g3C|aW(spA2r1far5R=Ep3HUDmw@vG2k^F#IfhfVsYrV;J{N%LeWe~ zHE&rN8=s*P;K7J3x06>bOQw74dS7;NW>zKLi4MR74qhyrZOR~+heFNjp z54rO5QUVtHF@Er@*Km$GOH9_57JcN$NQbRWumu}VcyzxS6cpFz1N~SXyjPpA(a%1@ znW=uWrk@x|f68a%6D4FVaU+xo%pYgpO9pjBdlk<^f`a-${Y)z^GK!rCDM(>vCn2_j zke`ku^s{XTpDY+)d0@um5sEz7@_&WkkK+3od)G6P_S4Jfb7`#;F@sd}Nso}_Wm-=R z>Q>IN?StxeW-H&_$@@M;pS-g9r79Zhe{4iNy0OqhayP~8*Iu-CwCF_{egE{_jhCcx4h((i!g^w^Z<&n}4h~zue{weg2~S0e z4lH*$p|DdaIy{gz%u0BRj?qR@QzI6s$$qmLgvPh&5}i5h>R}7`Ba=LS_;%sU!{hvq z#m?g+ByBNv`45esU}sio0#GC0>dE3TEJ#T+f@6D2kUpn&G;j~&`q5iEcJi;@8yv^C z({a`XXP71B(Uu*we)tI9b8^mWYP@9>(}GI}1%ey%QdY;L0pBvh`X^>$5ZU zBu%kzqslwzo23ix@QeEQ%ldMJn`p%2q~)^^WFGcM3a*aWu;U9w_yseo+fBlMf~iXJ ze{g)$10%&;q{y24?Ds_$zn1@SN8W}t5rvd>FK3A~Fuix|Ju=&{ckzGOCdP|y$s-mD z4u+DJLc>Cr7m>*N*$a*7=$Mo_3ZAkWu;(yi&8XFxTxQS$Rg{-jl@F?DOWTT) zW}LvvY}JK^?2Rxl7OjN-D>H=F;ITc~C9A}S5^epw(7*WIT#XzqCOR|t+lsh;?L656 zn^bI6xQBz`PuZAZy1RnE=G%~&c04>rO_JyBtu%}JeA_v$BSEk zf=^tKKc2YPtVIjF1>D%<<_}pgc0xs^5yk^;&lQW5VqUPZ3F=3F{!>KQHa13BL_7*8 z7A8e6R;&z46igO$^Xf4MgAGOC!B18AHD*P@P!scDND}AtexFcyy1Pn$TN zTSi}*lv}`M7lo=4V|$5JTpC0jM>HaB>_2|bUL#uNcyV7*Q&w19=^Y={w8qW$`!NW4 z$;yd7dR(4{iJ6i!pBgA-ioVTtucP2FBPF{hs|I|Pobz?J%ixvBYqI-PI62;(XL!HM z0wEj&=MQJwJJ`SdrFe-YiZxbY2{tta2DS3tT@om%PIi?C8rIG=Qc_g2lf+$&n7lKn zpIpcE0~>vGkp$wh?=A<4WpuX6G*8RoH(pu1!=HHw;;@S7gKqlKCSlYYmf|f42R0UB zYkPkp@;OIP)PW}Wq#lWfqdM)i9JYFcmW`f#IBM`zo#q<^_qgqmePLXUEKI4929R`? zO$uGR{$xoW?TGfw7M^f}FzN(-zY^d^Ihkxz-it(r1g)C%CMPLw)|vlXYl+Vbc!|2Z zyAw9!@$PNteHH0Ss%By^jfD#6w$jUFA!#Be4yX&UD>jbOTI(!M8QA&tWDyW^D&}s8 zbTSgp(mC@>Ne>LXW53s))tn=X1qaC$pBWw%3Zno*I|sQ+N*<_Z5lRFHf5yxyzJDX9 zEA>_?TmhLr*1gYXB1wt_@-drw$PGl{`;&6ZSY2Vuy*zo5@{@o#>7&?JvDX&*3g zB!`HU%1Qo`i+bSj&7_b$zV`uvKs?`$QH9^{(3!iRcEgD_;Vo)*rGxHk6m+oJDtAax zxIOc=<1OUI<)RScB}(R+TCdN!@Dm3}*LbIcBC_}~IW^N6tvF5}e9s&_@DDUo_Aw5U z4AbS6#-6>04A10pWaHzfL$8AVq(*A-WxE-aa^nB!tyChb5E|9gqBf-AU%>R-9J6e& z`ieso+)+YZ&JN+tOj(Z>B+~>4g9w(0uZpNp>+9>zZf@ip9LezLRJG1KVvCi!1MD*b zZut~7INO5TTHDA>xo>mwTK%Wc|MEE9sl;DU%;swJqoRILakN@CDH-c4yPvE$z)3f- zyUZR(!XZ1{iu|xbA8~6~?hfSU{LZIQ5aPULvTnFeP_2h8N=gS+Bo<5pCzBX9_>8tA>QMG-(eHE_^O~las$i7}%cP;1P7&rY6tEs;liP zVfS>k+flE?eH&A;l*9~s5BQFfMfT6Ul~t9+)i?jfM%-U~0t9QbTtcPE8eI`QI&vmE zlPcG)5vgJ_Oi+-XIjO!=-3<*4RUsi#ku;&QY1)-q>kk`I$iATQo)>jpNyjOC;sdV( z5L*uZ!2N>5AGZ$<2^SwLS*G@G$pvJnWGl7S%E;_wW#RRk?ROTmjVn--62JPDbg7u3#X7R8jUqvi>XLJx9Q+W zJ$->C-mkZ-revTh)Cg-izgK2jRJl z_Klq3J*?OD^gDTydYq~B)>s!9P&Rhf_oE#LUi5=i)g|0TpY_cq9mQkNR52XIG;O(Y zP$xZsdA@u@99FE5D$@8cugQ_^MS0G9mCzv-_*c^+h0Tu# zaZ9hi1QzAGMfy`#;_)I-wjM#dBJ$5ShvLMJ4t{iMC5!9bAqxwOtA0dgjxwRYV>qyH zFzXY>Bne#;o|Re^IW!1l01}1^5pl7N8lD-s?Bq(h&9xHxNY+YKcjDn+!AQosKFM@K zr3?<+wqcVyE3Fh>uqIgtT&SGH%7R7r)HDkCiYd7qQRSR`BewL|^eQamLn6p!`8D~W z$lm)=sd@SiF-c-Rce!ep-#rf9mOX_>GG2z}0frE6rlz3-WrWmbs>@4_%eTEw3Qre{V7~Eu zxj|h=Ev-ycVyt#~sHW^qYh3*a?u3n!Sh$M{ozOq77Pp_qNyK3mkdKjHCSW;Y|R`FaR zPK_{V``Jq1?CI%Q-EqN3rJOrC9E<<_uVF@~&P>uea0+J|)ivW9B|B`v!|U|^tbr9;MXQ&;Iay$=gGZE2l+abAjBtxLZ_Kd$8KwoEF*;UG)Qh?=<|W*0 zYAE*893 zi_4R9i4<)sz;Hr?j(|mTNk9;R2Yi4npCiwY-{w7lVv9+gZ{Nr~dx*Mt{m4Q;#>(nB zEE?(@N2tPwKt)apuH?GlPwl>64+%%BC&?fL3{~kdOEJj03Nz((kt4+URFlQ6WUGVL zCoWX?_YdOcTYx+UhSBe&HB+Q?mf)4gP3gZu;7U=a3b(;(Q6;;(Y?m|`2;aZp_WyUC zZYG$$ccZ9BXJ#%d*ARQzM_K)DTf@-GJ6_<3;I{V;Hbfvi1O%r)v*|SDk~0_EmLdJ$ z%c?7i+N*)*Vm#xH@+dfqVNcoVk`O{fzBxm2TJclMm*W|u+~4==S5TBlC^a|p|09^@ zWwJ=!mm8fucn`YdlhJC2DLc)Xrl`0IgUWsDiT66)!@MKnnv~UI6lmMYjPX?L?Cfm4 zU)1Lfc;m5Ksj*?D3b3M=nFrCI$AB2A#x<1gx5hp_ryWQiNMvlD4M3 zxFel2HzBjbS=7If?&bz8t@G3IbxGyx@ke@GUWd%2(rF0eqJoX!d~7tms#KS~;X2m? zIV1o3h3QUrolq;5H2Gh<_S(uMFe09M_)m+)?yaqD1sXZ}=1nW-^NUM6&OD_HOSc2X zO_3oY>48^mMTtSE$pz7&ZIrR9n)%&CQ0-+aDH%!uno;v4ay@hKb}ZF9$yuwqF+TSp z9nafKgp--vpNaB97xrPL9|jT^J3D=E&$laWZq(#bKaMNcKtI`;g^rXr?SEG|=9=+^LBJs ztI|loA)0WaMwt^_4yx%uL{O!mr2EMUdkfZ`zn7Dd(n~&SDaL0n0<^mPG~?+?@7ohz z86)*n>La3 zm1&eqt+mn6l~y>ZQ4R!DN9djv3=AyAtVV;%&pPB@5<`oo8)S zBM~H2ZLC`O@ZSXeuidXLJgrFg)XY>OAw-QFq1wu5t6T2ub+R6c1!#Uc7<+mQlMUaW zOuR|P$l&tLkBK=XXh~cvS3$P5wzBxYd>f7%I=CCvq36v!BOaU+~fVgQfJbT6{Ap(>>iI5P9aR$lm=+U9=lB2w!% zL2i!AlV7b(O~v9nwg&sZKDaDsYP&DGeZpUX1=b%wF`*EoSQMs!{Zh62b7H`fp=iUo zdTXSwr8+%g>@TGhz$Ck~0e^EXU1YQN^PY-aGKCUDJJqx?BY^@QyD9a!*ZHl>5B?E; zcWEyi%BQVBl@|WNgSuB!vTXAO4lsKsA|D~OIF_sU!h%L+KHfK7eeTYVEOH;@Xwq~D z^u)vM5O-0u{t_$UaFWCfB@D8Z3$*rIT4DI<}EvvpH_**mB zfeYBY5JJU;h#t4XpJVHR!m<(u*-0*Z!L=A^zud%7q14pW(%CE#q~nPwsY?bF(|sV} zN8iz=0xkPb>1F?JPe{e3NT_0nRLg7Xs3~~K=)_>BD^P8|Tp3RKpqJIjmg;2`t2@oB z>*$W}WbY9$PLlN$;CzCM9|xWp%$KD6syn_<2iZB_qL7fVM)f8O?O!Z^AF!9IUxT~9 z7a>Jxwb9I`sk#?;Byy?dxpnuXEjWTSdy7U(hLF6h8TN`SUnGZR>8_ta*IOx!A%B z7NTeVUdTqm%1RmF9a-_S;Fd;ETDF^dAsm&Qip*lcJ;3GMi%r4QL4pBjC|3h+Pb(D6 z4d!MV5~A{18!MiX{I1%a~!CtEH@#l7|V%Rc`Mr!F1=XH2~?FwM4IHX@0TZ@`$S)TIv@nrj7$w zG@U$;QF82%<3P<8$@dvAu+LTrgSWRo-mkgELb5#nJU}H<#e57 z1(|X>Qjo#2U74#*UwkU-8}G}+P|ZBDX^6h=p2sN_7HDgb48h)-C3K`PNL7nmdrObX zp-LFN1W?k7f9KIHLNs;ex$3-&apnq+k4HffxB^3t2pu8vtt4+bjW?({x7t^eOA|8n ze^{~p2o4VZ&dA@Ar2SL5OEkWw1%*~H5$Kk`%8T(XVc=LNf10I4Fy~vg}K zNLLWl;@QMRCFtVZ$3 zB>-6+;CfLqWxu8s$(5EDPMh z7u#YynO5NOW&%n=($tlQh2!mn5bdESvg<$$U5kFaoHKODEdST9AmV_h;MLVtDOgn- z8yfKlWJ(T>#vzzq>@35x(~24@Hnv37LMc7LDYjk7sEDuY2#0fTb>|7wD_~7-e-s?HFl4vb|it++Vp}`u4w9 z^_CT+K!`Wa%hS2YHk`P$f?h#_m&`-SX%1pcZf`w~-Av%&3&o**xq6ogvZ~7j$U-Z* z!BewBYG0v({}UGMSJ=eFvks>O3hf%J)A_Px`|}GJD)1U|?)KWx1h*49=)k)E zIlIvxEy9B(`jz%}e2O6|ZLO%u*<#*~)yZwEvtdd<7n8_?55Y;9+@xUAPzIs3~=ZkVT`%S$c(q`PU|CRESvtCdV1mE?*9mK` zEZt^%Tz0ET9;dP0fB)=-p7+tq%gb@N>~Zj74-1S!mzQ;lmGhK*eE7V*z0VDY!AD`9 zrqb|CCBd8?JI;h8CtIVeckJL*i^L9g%{D&z_@fDX&WmS4U~ya@A#DT&_?;T)Fl^R0 zSeF)>ONqG9N~gnr&Ucq~`q8X36k@wB9h3y&S}xp!Z-jQvmL^+ZXt#KOnGyaJK*VMm zoSLdy;1?&4ekx0bqDi5F1`izwRzCz0e?f*71ugKBZ=Ri<{nrk>->ygW4Qr50mZ}Xk zw6t7TEYsXA@oSVsRa8*TN8&6;;s^<8hXfEtC95wz#!$7;oL_r=iTVYPHRyfZ9FXK< zv9eYqE*`49B&A5{Qg(w;QW45-FRXjdZ&jPKQO}PfPw65NlJyu5S2M9T(d+QO=pgz0 z!(z`h{R$kUa<;fTNG>p~21C4yYE{&WqbR+gkkdt2aN&n#bAhMr&~u^Sl#rk_wcsAL zD5zsXc&Zqf9z8zGUkzsuKRytcTG4YT|^z;N_CYdU3=mvcOvEhH-8W9u?UQ@c#zYNt>+Q@J}!rw*ClNC?w#Q2PW&{EFdslslR5lp$;lXg{s-6lmV5UW|H}d#9MoCQ5T!EciaB1C z!Bhx_2;!pROS!lcR?N+8c)9=mE$asy7%y*c_YVPzD1n%8ftY(`QU~07A2~z>n(?vx zZYixYLqP%fFGxeo6j0pCGl+kKSH(#acX$8V4+!6kl284vupS9xt0-ZXt2-DmU%4Rr zXlk=9?LDjMnKty?xGy2SM#AS}@xIxQ!+=_kB7i4(Ux7y?;RoH^jmpk2O7SSR)G*Up zeMX}kv+l`t7PjYT^3~xXY2-{6>^t)nv&+%Fdc=?Lm*W%V@5JVgEQ%N#4KHHnQQe*? z<8p$MpMznEXo!RZ{95&@ux6~5bQ-LXW`tht&AnG`%VwWiPjmuFVmdQZHwN7fCwERt z@~C)tTIB;<5h`N$tk_dyI0y=g>l3HwdyY~fi2BWbF4$CwK~X3?sGBPhumoLe8!D=O zeuQ~mZWgJ2eg$EwmXsYeH8tH`ZuLa{c3G><72pyHrOD~~q8B?hoXyO(Z0I5Pmx$Yu zV9WoJkT2(Aovxh15aXbVQr7JLbU`6kppCrc8TRU5k%$graLHex*WljVE@f+S*A@pD z3Nje(8SnD3nS&=~Vdvutk1^C=)y2wM|Zm&g(e$K!|P^cQHD~nnZ z!rsX1*6p#hE%iim#?67?Kq_;<&&fiCcGBbLWikAZ8(PSd zbD=1|ce08T;R3+S=NlcddsJd=0#sQPOb~ks6I;B}LEEUmp}Ys9h^#9JNbi+pEWtL0ekzshAt3AO|9Hv&DL^Y%?i zhMF_eNs^s!ds+ymKYnDnBdG%GS93?yi1Dg<{*6m0M2AN=a|K0eaCmM(L zwlmDVethui09Vmyt;?6&A3vHYbIR(!<`WPgWIhZ`RyNvxA>gUKW|5bZGaHJ=wBPK^ z@V-6VIXD=~ zAD8Ah^vayMn3&O47EQh@`T#J6IGpUBcDdOf`wMs>1#7^`;_sWAV;9Ac^xU7k_G;1- z6l7y_)zQ<-`~>)r20piAWh541Tun`e+(ok@BQk~cniVpGe-5R5Dobh3h7|?^G`scm zesPwgnY9P5<~@~<6ciV0JClC%zmO!%QOh9T;_lAbU_EmYN!sZTMG|e`GKA@OtJv|X`kQPi)U6G5@BGv-7v|0Qxb%8e zc7|E%d~sfAr&e#}{X+lmMOxlhBcBsW+rAKJG5tMVAaRf(94JJuRcSK*H$IAhul`z4 z-d`}!h&#s?@}=Hl49&pMke?%Ca#8^X2`{W;U^MbKO*7#XWkYA~eLV;=j8Ufiwft5&Et0>n*X(+-ip{`lF#xII$zXv=cFYViOF->!lPr>H-{{CsZtU z$RecZ9*o$;Z8JXwa*D?7vy*OTgCqYtAY`<6T;?zRBqapl;a6*Y-5{wuI66WnBHG-4 z2ljRPr;0W~3hd0Y7eNgR+e>Kb*g=MME^x78yL=zn>=rAzVx4veqx=B$H#Lc%RgnUq zin_qhfcpA+Jb7|oAD_g;L@@;gg@(39OaT0n0yW3nmpPCyeQUwDvZ8Q934+`99ikJh z#5k%TD#qC4lbX18uZ=p~I?sRc+l=62Y4Uh;3D@a2KB;&jMMp;_W8rs-9rEFAESe=^ z`t_EqJ4>gVi;hb)Ow5Iyv8Sh}?cLq}=$dZw)fqx#Pr{MtgCtfiu4YBErvk~C){mDn z-Fgf7ot>S#i;WKP2gDSbZ6-RJhLsQ0%lv)k+P4+!6KiG@4-~=>!Yq2pWMJ#?wl;z$UTVJc?k z_>hngF5>92va;A_mWYAAR7QQTyIJYBv+9l~fF3*nb$q4Og&eqI7C`&x1iXsQ*NYA> zUte@g%pMMQ*I2sX27DGeE0$lKwm`UAnQp-0|k{4?n;HFuNS&w3^n=F)%Px z^z@Y$8p~@AfL;j+iJv(8eL>4C%>+~V`lqPf&xcuBPRWKH9;_z4U{cg!CQZD{ ziMo^B{;x8!vL~%bV`e+QS6usKD=KS$(xdNsdxD_Q&AQfHzJb0PilZ95e@&jEr0Ia_ z!PK}}_w1=Xks#$}^b51)-daDLUb@yp3N9hcZrKXpzwC4??`x65Sbn;)2P7w#`IH^t z6_sQ(`Y&1m^p!_zp@6G0;KS|rCU;6+2QDI|eU-8@q+n`o3U9$T$ z-{}u`P+LMK;OUM0O8y`F+8c>KN;3}FnPBdbC{kl}-ItM29RxMi``m+0@1+v^Az3mN z{t5aKbQvi-JG12GTB9_Kj*aaM<2(HhY$55~XrzGx?^@~qFkb;+a*MhWyl1#f#*~NX z+^J`^as0y{scLz62^VBok+RMHZBKTVtf?Q7H=s7zuqq;1<4B8gyIS9zRWdnM3a&p{ zC;q#$2oRqEr-8|780eV9@e#_mN(vz~WMCdyNa1QxHDXa&yYsw+1O$`ZTYlo2nwood zx|r^Mos2nuSAB>+eE2Ufn5~_n2uyg%hr?fCpl_Z^Rf?%zzMd3Gmhia?qf{mCsOBEW zTCvejtuHR61(WhMV>dIp7-2|aG3w3l`oa?-RA21KcE(3TT2AG--QVBaFV_Y)Ha2P< z)#f4t^oWd{{6Bke)h_I_sX}9*I=QwQ3hrcXT~R-g=|~Bgkgri4UOivsXKJd{v@$(@ z%uUl!x}mmqL{^r*IiI411qHC(u-jAITfbXdTd#9Bjg02`k}cA-_NwP z`pCJtRKvyXOr737Y-@M%_pU6i>xZ7a?Hyh1+eiB9?S;U%&?9g;l4Ay3fx0DNE7Ii#rSXCEjk|V;!?Bf*l_cE1+ z&L_eP$Uq-~gw)I1xZe8wXj!qleer78`F`={rH(>Gh&eWg-k!k9oWV^U{`H)WE2&Wk zptD?*Zh3LGbH(oaVE(Yq=h0n@DedBV+uJ*>5n;J)9kfbYBaOCmk&%%$w~FT;{D6W5 zRNbA+%lMiaCv>$J!MGy6$KMKZl$2H7Ei3kiKsg>98G&@Axa$jbRWNXOcQ2gaqo$6) zckE1vjD#cd+XkmF*%ps4^l;l786hsHtgIYrJGu8U3V62k`98ittfZf)B>huPvTnh{ zbhiO2_|Djqg?#M1GYU3E%C3;2^;Cya5W?_lPz_4j_2@8~Q4VVVB*kYj=ywxV*$rx( zp0-51j0Cn5s)Pji3v}D9$0~`?=EXRB%V%s?7B{=|I!~(bD}(L{EDp{uGy5OZpzzTb z<`$O)yO<2O+bJMMBlP-@eK^ckhvQHD?_MwsgkZd+9L*6rF9&5z)oQXD#hBuV9(p>- zwEzn3rjU+^?`Vf*JKgT#Z`NU<-l1X{8T%vgBXr*8(!43ni%DZ{dmS!@H?+(h^R-R} z3kbzjPi#Evp>114*1lu3_W&6uJ_LHCW7>dhk=perd3-HU50r8RRu(4)g0+Ks-gV*R zpECbMtcPMz*@6!3tR z_H?XKziK(RS1Pdnqg9}h?t)+=BKZoe+RL0*m?1E}t*fi`aG0l{vniH0xaAJ<&_q@+ z@bdI8 zKtz5Q6*aA_MHwJ|5L(RaNYgEQN+$ta-;I*G14M!}T@iaCEc(JSB%TXAI5^-atu$YY$JOjBq4KmTxbz_48jn(}n ze!D`WphLvaJedv?69$OOPFr7muarZ_>IhY4J5_~KaZDL4OAVEfKV|%4;E~MjnSEWm ze8)qFVoJXmF`$DaqjQcNAef6N!;pnGEi5I4x>q4iiXeluMUU_kZGTvtT&%QF4yvs} zzp(|rcaVn|>+bq+h!Hy7{ya@h5}az}^=fE=)t*OnM0hFp_oNBsvf)(rJeby$>~;z~ zDOBD7U*KA)kPXMtqsSx5?XMbRb<$sy?k$WsJkBQQ%pm->qPe~Lb`W<7m{*vGQCnESxssdi%o>vl8}!CN_|n z7eK~3*%O6e)5JeoHP7y_+Bl8v1`Vfbh@-D1qk)mJ@Niq+Vhwz?K%rTxY+|+7it@gm zkgi3F>sz2(D_-crX112S55d97@`Q5jhX~9}&Bny*99cT*k(<^)4-~~yhe;Ai%c|eB zi<07&Fbaj+2|Q zW#GBxxw^XIAuVcEG}RRpXj@>&x;gG0W;r=FWoe^A!F<8_fBmXlvqGoYUe{c5%>8Wf z8Ynge3!s%+Q%G^F_4sLhs;Ox75!$VH`cQN6<<=M2!L>vC59kXU^34efI_76{T+>%{Ni`#;8pvsI6F~ z7kQdA<{FOE(oOALGY1@ALi*uuOucrh<+kPPg|%F)Q$>+@YNA7&)0p|Yck&8iIpu`t zcaKtZ>1++E2;Xw-%3bkL7H!=V&*3z0DN+-%t0Js(c{0rk z`Zka3H$t^pA4@9gRJZ?ePG+R2l9L&n$sPh|J>DFm9wdM!HUnNAfUN?+-FDkiZeg|3 z1yN37;@|W_O;y#t@f3O&6a*<$iI+`3SUD%-!>Oz#ih}3P2iR~3ZS5tgcvC>$1TMQB zx%WfIBk|MF@R(cu8gH`?B`Fc!Zb%o-5`DCyaDX^qvV((z|G?))4lD{EWcg&zbBn`F zW;g8*Q-a5HnDwHAyYqA}s)<2N0J*1?MkZ2x&B|5?&=fqp9aIbRu}R0P5{IacbEDi; z9?5Xm&@2cdfg_+1qU20l%oAKZ9r=Oq!z1cU9g>%r8xi8)XvL5=C5LZoRt6jf8LK1N z{oynsD*QL~+xalZAE$@Y^U7>1&IQqPF%Dj!l;4!kxA*r^3>^a5`J!}6TKH|Zh3*C) z4?f*O2D0IOK3XTNy-j5jIY~)#yghYk+_xXMTORPtz5dwzrcr#v#u|EtF(ocKJ;8o} z)#i{`-`Uj#bP#G<+9*J+O=U7f7%k(yJ)E-R(7D{`(Ejif?_zA}H7V|VK|WimIRfOT z;?mO6sic*jK;gjGwJkivr;%bq@%by&*o&^{N`;%H8Y-$k_^PN^-)7mEitx?R#AWXm zSV(!ZKkD*nFcs4}+!x&g^YOSk+4 z*MYXePItP5H0F59_i2jU_DXyv5OgN{B4w{)Y?wsK^?cWl=2Y1M8~0j2CtgID=e;A> zfcr<0gj`RL6j#keYN4ZqS_UrIHXm4F3yoUJj$-lV%DJ5~&3b1ID8z^-w>{4`KmXD% zBeu;(@Xkv#t(6@p6EzCkd$}qEgmdKO}u@gA1|4Y&@9xUWQ8_~VQF-=$xgchoeNrY8@Q#zLqp+! z_@ttvBh%uz(F#yMTwXOLFApAqN`tnLGPOF@hq2pwv`>`!aP}>R+OSOIP@$D>;6B zx!0!&BYQ_jy@~U&H}T(d?YDPI%G!+$dD^F3vLl{?e zdP%hqgQ%ND&NitEjA#8Glg>uD3tfE~;BV@`!W3#o$)EvqUy|-t2%xF`rOd3x^a*xZ zKljq?3G}y2o+hrGN-Jwy^%Fb5&LjMAn5g3Vfeb5`5^c-9F^FB$vuG8I=!Mq$P(aDh zRu>9g7ybwOaT}G4N?`|3F0A{*;H%<>HR)0SdKfC3L}ki>msM&{0z1(Ks0KfL4pXRx zwxCogo=;bvsHv_e*gviIM-u?gJqJ)S6EZWS)6%rfBg0qhi#t2B0iaNCGfVdHZzc3I zh*U)_MB5bs@^CZuwDBr534c+{5i8H{2L3;#AVU(9B0K-3sj-edhHk8)sTK(I@o{G7 zGdh2jO#H@I?%<)6IW_N2Vl_d{@>4X4s;-bGR5byjsw~^LR3mn(l4#6#UDI+<)%-k&3?IC-sAFIO<8f2Ndvf;=i$wyd>c0I zChj~cToQx86in3S_~JjjkGi__K;-MA=8rxCTK22OI5DcK(ftI{fJs@TeocQ+{6eOK zpU1Bs)Xk&$uz=uQx&YFu)B*1e>E2xb%Z>9nr@<9Y)zTDpO|bySfg|$i2-%^sqv>0_+k_S`KM9dRzY7UxieT>$x0mg?r!>Re0b;Q z7t9A^^d z9C$$8_x%GEfhK6C5L0er;)=kY79i0V=kU+(m9yJ{2Hx!;MVpI@X9K8FfKUZR;x!5< zC47q@v|*-uzYWpsv%uh7f&ydLN@1d$b*OQ zd(vlBc4QPM4N}4=6;@1q-C8y&e!K#p90hFq&2BPIvE>5G=ZJyaS(N|>83cAp-!~RcFzlgfT=3;=ag$NP!fopiK`&cBxQ))HFmKS+Je`Yb6b}VWWdj&A+$hmufe)fxoc7I2jb*YdIy=jYLoj84Qq_on4ZYvy*W?A7@Bj*n^o z#|>$~-34TRkf%3hXkV{+yx=gOO263G=#qE#xkM6K^@VVR+OlMgacSXX_B;4|ls zXq6o8aRTBqMWv=FUgI2#>wUQ4;p}n7WC|Ji2kI_~nWK@rR=d^QAx^5{SAl7WRTfV@ zpa?}>&#IZ%tJ>D?6sb2a^HMJxDwbx2z;a`_X%=AH8fRNGr65zR@~foH$$3LB2IO;>%{_Ezn8A|OGy@UD~~cMocaZ3jL_OQ z&5m-q4sfFwX(c@NCN>EyXv;@+1Ugat$T59TKqhO85tQ2dX13>6KU)( z8QL=GsozN=2|ga&xIXrBv1)KXZ`Qu`*vp+^9dR_x2~uE;IS!aMs3msikrVff0Q4ZU zNbu?T`5KTKf2OC8?!^ml?st`z(saLoXw^%UB%fiEgKQZ!LPYA7MpTocT=mcF_@fY@ z(QCR=pD(*664)RGXVgK%Gm?_AV^Vr*VBn4*qW7Y(J2SyRVl1V|i1yiOw{EYp=^!Wq zdiJXxM(?+DMmrc{rFe^PEvbam;$(usVgYD-xQ83q98J`u+JxEdI#fBKfEvKmMWW z5^`m-<&4SvlgJD;0Has1=-eY^+dUrf*Aq?+cwIQ=mDxbjCyy;L&4GeG|9;J^z zAEmtjQ3jXSmEyAJQwRu}*s&^2n6T9PK04+C6dxd~&t407Oup}iXvNdXnw=E(a`#72f^M22_ICGXLD?xpDr;_c~sm$7kn#Lzqk z*>(y(Od2hot~7GP=hRRqfIt@Hkegh^932G!vPMBo-C#NZqg8D%1{g)W&f8#s%i31d zcRG$**CZ91_qW^5VWW;*@O!+;d6WU3-C350{Ys?s& z__4oc;Xx_O--?4w?HqD41F~`YYO(MMs7O$R6RXA(n-Of@YllgM+E%IYy`TU(B9@Nu}FP-RBWsz zkXc)pA_O$x!H4rTSvfhe;>Ew$btRGAZVc1f6#WSl6zcDk*r|WfqCFi3-2R-8VvI`( zz(9v6e5Z&mE%#vieA*@)US|!7APz|Xes=^dN+KYT4VW4q)l}%eiu6JyLG2|)8fC43 zXP(61G}P(%g)6?ugw-LBsB4`9Zz$cTPK;WI*Q|S z>6Lz7EHNl?ShuTS!o$N}g_`4lNHFkY>f{R?bCGeYK2l_DBpUwAKKc$tVM>dGTTx}@ z7bhpDFEEhuX-t-YaR3;C&L3~yMuG45csm~MRX_3hMWVR zcPy2oKT(4L!zZt|S46J~BD~Y{;Y(g%eT|{4#^&Y&e2BzRJFl?p_x4>ggp8V@GtlNCzE&D@b5t%qxKd{p2 z@hhmL!*`Z^5E{1VOAL(*N!O2|*IQjqwlE!GX+B2b5g^JDE2NQF?}vyOg-1>2<%%bh2n!Q4 zGuK$bM=)nh8gOLPE@!lGWGQsAKf(v-b**mZbs;W$HGt70;Imd@nz%a5Jn3s+rsnFf2HE{7*V49<~j-*) zC)K|rZOHh2aL}oR%!%fj5e|crl1y(>=L-y%BF>mKy*3*#@bIxTNg+p2QEBg5a`Jnw zq^(9zhwr$&XV;hZa+qP}nX>8m1?)S%+NoF$1 zpJ#5IbM{`>T6%TvMN(R&DrBezor|(x9f%IQ5@8d|2!$Zy}keG5`er| zeu<-4leQcjymwVm2ANg+E3@?T?Y{;0@84};<2VG~DLrpQTD%iHF&jp(B!~o!ih^`0 zX`P7Ex_!o*iSyb1t$A;#;H`?7YexRub5!qP413w^ocr^Wsz2a$1LwxU!c%mipWiQCMJ9DN zY|5-~!-#l5TJUdr@Vcg(9x5>`CAHMsZ-mT~*VPwr!i|?%Nlj@iW_95>*auGsj_fZS z+MUJEu8FH(eOTCCA(R}VeSKzfjdW zgZwA$&uk{7-#AfT`|=dQGxMjcQF=>O5(9#jLd9wC1gbqXvFH)$Qv^v9@-1m(#Hr)qnc8 zL)n<^pQL%iGIjz#g28VGY?osk-o-MFIVO!3ZBk0Zg7GDg(85u}2g9o7_!QQNy*C^p zPGr4$&c<2i-$G60EO4;}fx#QEU*(5oF~|!-6v6oM^silS@}e&L`2nkqDXR}hlMowB zuwdGY4Uf+F3)+HkE(r2E8W~FVo*4=E*UEKI1}9FkOfHrFY`mmv&ul*~_V_q`NQZ{p z>mM)8lD@;}eO_wKg&}k>j$|*IT62{qWoh6jGoR53ZZcPNF$Uu5Ru=)FaR*=+D^a5O z53||=aHY67E8nNF?HZ2W)4CAIrrWw8ICKVwLn#~_oRtQ1WZ+0eMsK~h;@Sk#Dpm5x zfECZ1R2Fv(C10>$Ks;}>>sXk@>zJ6qh(T@+RnvwOdXLAmafw2HUDheDQ$HcXpSzLZ zv#)1$c@qCT=+whX^8H^*Q$spiM-zLxT&mJ7|=5+F9Z}WvfAB>ex+4613=j&HbnRzb-aZpl`IBmO|+fDVd&R*?$ z$m~{k3_bm02(2oMtDW2^y*e>ev!mhcr>BYIZFy3;xs2i%q)77(L3Q6mce82TQx{7_ zG{xTH=831PsS&lniLtHq9tw09cjoXHwP%tkyib=VkW1@Yr29>#?6vcbQW*_EcL4KH zrp$1KdOZdJ^8siOUG=nA;Ob_NC^g|cK1+$Sy<4*^X_y)@%o&1=TFCIgjQKWS)a8bG zay(q1K6Gq@8y8jTJMO53^!nQnL71XqSted`kDkG>?BOMCm*K!IZEf15RTg~~0;1I6 z`H*cX6_uGYzk{f)?vVE*GxrfKngcAQ>**@`N}Vwr^miP5PR@3XzY|twd1RWXC9uBN zyjb6L%%L6Jk z;WYoKe2G)xiF+3O4MWDeI!%dyl}X6lMC2h*m7RS)&xJR*t&8`G|zd~7lh?7 z{LZoIc-qII`6l zY^l=lUzgY7uHRSqVXoZvlF~IAM6hhCKz3+6=8H5#>%lTj@KHhBUXk*4Nt}bNsO{EA z{$FNN`!i+xmNk~l$)hM`Q*kP@HG_k_0s=M47_9=*73IltL4UV1x!)lSW2Zj4LToAW zLm3z96BnnW8(aL5{)>ywxBKG2VNtzvo-EA3NjXrVRAiG+lqj4iR?iG8x~#QQuzIWI z?Pb+t&FgVn&H+jE3AA>pE{+zl`=ieMV*B#k=L+F=Dx*(>xz!jJa;tKE#`+Pb=!!?; z%4b^iV%J^JCQCD%u5aC?0cd8{6m!m=$yqa+75;KgljJ-l`mZ|e$OdhhEgDr<~@Q|Bv9+A9$QJYeUPYX6;qs!i) z?%Zf8xT~M~JlQG-as2eRz_$(7&beG?+ zz%7`@HfA?B-}qNei>jPCmu@!f7bUInZZqRV3W5l#)1y`x0M5(JgkUX(G3ydVY}2QN z!_{e3{ef`Ke{g(ugvRa8wRkG!43@9wfox5iU^b`~6CIx){7C=1qumv@pQK|PuzflU zVu>xJu#s8`FYy8rx6?9SO(WjfS|cTZ9TKy!Bpn|gcfG{E|}2v%Q~3b z!!i}#$M4V=aDl&-r;)CO>cXK9H!t@HahG*q$ZILe2e%(VTeKf`P#tfLihcgCV`~OP!x2x`KfQ_uTUT0Z~vhgzl0vrCz zZU6)&loVJrRhY=a;gI6M1fth6RKwM=OdQ_KvA*IJBmws+u)*zfld?qsC_@tPg z$3{zaCsDcl2$y#L`m)%Pe2J*&d`fQ3mRqTAw3JE$lJdZOiC|^_aOE8X1K^&Ix@#@i zFG*Vra&{Zj?T!;&hC3J=HIRNzSybc+wa$)x-YWplXswNvS|v%@#Yd*v#&YuNI)nL1 zss8jJlFmz~uWT?hJ=It}lv*No<$H>pSkS11q%3u9SClIo*%^X0fDm?nJYoY&EinOs zKfqe00)1eH`bs!2!OfTo5s1+yvExY-Bm^j$RO$^1l5b#MR~Y8BdN3=l>XWc)Q@+1u zAmt2^38g@$3@M$f65pWuHU%{BM~nbVBQmQ#h4odI=eQXkGpw45B(qBe!E#ZG)4dW& zXPbyYW7z%KYFhHYe*vRLZNC-h0Tfayl_3c5e39U0XJt+KL&E)M{`z=(I_ib~)xg|y z{n+*UN%Z$OR|*s()*8@S%YLZ5%v?!EcU6SW2Y)+t|J6tJ+O3GjF-e<~6Od8&sUy`n z+KEWu-0fiMh&lc>qW{9m?6MTV1V;PxP^ekskjdvJt7#i$lt^!6jdTg?XL|`-XbQKi z!FRtJi?hX}rPgUy2I1OJ53R`gyeRjp(+XG$^K0fN0F4*iw^(8O<2PtTiNPZHh!8ek zCK)B+EgD$)_)?o7^m|`li;?c|@#W-CB#ZJ}Ipi|mGMY*R+-vM>P?omNm$9d#Q>N5w zbmm8^@AYX?DUPT{IIo#|lspD}(@7IP1}Ng_4y(6!e-b3vIWoUz%D4!ESYB;)0$8Tj zd~3VQl?l)bBTJOv`Fu76SlP(-c8_{pLqT6Ba_&spE%*2@_?u>r;tF*%92nSYv#>v{ z&bIxj1bF9Iz9PR3`C72K1+_*h>0-4_1b$fUnEXwLB6a$R1f@SgRqWqAJhmiavGdO!eQi+HpTIehqF$A8X=|{w!pmi&q1Ob!yu&z4c zp!fiAJwDakcH*PR@DS*DJf93=hr<;I4ci{jk`BBccRW{?ydci`s}n&bNuMsQFU}{~ zlj!tv@Ry(i9FDa8=fe&vI<*ws)>L7n`GJ^lXLH zIH748eJ=N}A8UhwKjta^7NdEK>+oX&FFvbOd}m$VZQC$_@=av0$19hs+yE}H8364z zd9cgeXF&0nGo3xZ@p}7Fxnv^Qux?ohCE4f#sbp!DNnWc^!l;U_SdLnEij*@nVU+kx z-599qY#LXI>}G2RVhjbBjWc>Q=;e0*sLDVx6h7rQ71(BOc6Rh_;V}kzOt>k7c3+np z+_FH0IxkS5q!d);Y>J}|TsV?p)3)dV!@wmqM^+in@~aU{h3b0$+~bY@YL?S(ri`qb z?s~l!0?+3XPKpF>HWu{xt*!RFvXsqCxue5Kv3MY%54?wJ+XMe?Vr_ceC7T<%Fkp`w zK`xF+zqF!&johyRIBVbTAbD;maj`W)?_G6Z%es(edA(q-4L9ZFl;H6GV(r>^hL+|1 znD{5~&jMJj04O>gFpD#o&lQMpLaidBaf}%?#$C15F)Veo2OfF2?G?#y1Rj~M&z93q zC)&5Ykg+G2k~9v;A*e||EKMDrs5z6ROOGaUEsbWH)Qy6rSPK7wUB)+F9^sT?q z#2QZI+WMAF#nf@2qirjJS7@}3kk6!IPt1^_sIz68BfrM~>$}PfsiwVny+7FR>U_*# zxpxP#3|088$f0@?XR1qKa(QRD+V}+q+9d-+QOI?*5oGmG_mNnH9K}A%&67yi>-}3i z-fOg?fPL@UDYW%W{ZXW_tBBAJ9-iOQBrd704`5caPs9q+0Qc7pnp)02iy19Dh;~Y@ zBX3KQTFU*$ewSFOtr|U*3xA9@^v>=wN5cXk4wgUaKS&VgNspc=;U`6cFHYzE{)`Q! zw%O)n`ABa%k;1Q`fdyPh$9kdz$R1^yYDhKHXq?}S^s)M#8k-3)iZ%^OmXgZd=L~*q zywKkd676$y>P9UPTTw~6*s{hwM8hyJi;T|#zpcHpFN#%hmhC%!cPx_3VSI9VJo5A= zZdV94k=2vqqSFS=`%4v1_Zsp96B36vJ`-AMe5!Kg&tQ(O=jVeT zc{i@a_UGt_xEPJz?FtS2IdZ=!rOWMbC_Jo>PDn5Rq~CcOyneP#ryU%O@WpAEsG1~j z!iXtfFw@$cX=(i(PZ=|MK-TcR1PAAwBC-|(9)of$wnBxd9Mwyl{xwhFo>+K4GqRY7 zIVd7?y)61_o;ss+ubtj<4Mf1dgN%&I^y2lS;oD|f*^mV)y=Dc*B-(czaf6MP zKMjrDUM}&}z!8Mq;qZFP(1I*oCmm6=!zq}{P9 zCDoY`NxovT)N*6Aqr_4+SL-4jpSf7<(+^YfqP&J zUAZMlw-(vqs5bkejABhX7F!3-O+8Wjm+I)P8KT zZto7spX)RW>kD1GJzqZjex06IbbRkB55zA=g~pJSgsAWJRo4yu&5J&3 z##a_6lmv#|@08O>84w#_G-Zb~L`&Y3WB0luCqH}X?B zbgoX~`}-e1TGscdwb|^#yCYUjGv#FR#Q&0Al!2%L^%lF|OD0s!DXg}5TU%RzO8?R3 zpmAC*CWI8 zN>^{` zp5qxtZ$T}0%h}eK9$nffCD~q67rkQy+(CXKnu&Bff~l!y-|Kr2Faj~u^!{2VZ;&9b zoCy{(=%BCd*Y{Z#juhY;!-iANC}TYvPh-C0PWY=BdiYi34McOwFxZp?OBWrB*4ss%$H5!PeP^piwEiiA?$nv;kABz4p4OmN8JKPvUzT;XX z)dD!64#0y~x8F5rkO-KYa26jY%br&kOZLL=h>Z39>UAh%eS#M%i7? zF&spr4Nkkiq>nzq?QX7Hj6ug|2ZQhgLssF`z=8FP-}kkNXBNb>^9DqD9miImveoS; z1|r#(I$PEs7iZyedHu#Um4-WUcF)&KFBBA&8J;Fco@|{`V{CkGWo$1}QWHWG>zxiKu#NV7ysWV|hLu>ZUTQ$Wc$@jF=$_^#L zzRinrx-x~*;P(A-EWu0@3Vw@CSCud>j2)9eR*^5LDZ!~<70{Q594&1nBGTUVYo#2S zYBaa%d_66f0#c@GYHGUQcbY90!yw_%=m0$6_U>+UYHG-!7Kn(kv9VF3LPJ7EUTw5Q zA5U7OB113p_z-t;*8psVA5P?|V`XtyB;o?q^EdO!x(w7)aHfeF-R{366Ntv(A3X9j zf+E;>snN%rj1sWS_YBwJDV8)>>`1bM-foZ2E4SBc^&JuHtLi7weVg;+5^s#T7Kb&e zx88Rm7_v6Aro!3@t~&*KuhL)k16Q}!O+H>fmu_=` zBBPwYn_I_zc|QXOSzU1teSlP|=Hi6XF7*5UC)bR*30j{n4RfOzmGhCr?o!};@l?Uy zamnH0QLkYefRNMrJ^1@uPB{~>cSlERM*qOu4H&c_rtOaC z+lBT1WX>`yRuGqHtX_1^KAR1~glEL8|I$y0%-|#;dan}Osw!np4@_$v@oPbMo z7)V#_2Mk?m9j<>m`J7h0zdl_4J8uK1r_W?=E_`lV*k@m{e z)L*25XSVB2NL*2damRaKIHeYZ_#1+fsl5V{z~FYy=bZ|bnl}UbYTTMX*LWAlQ-4*m$c(^x zt1OC|RT;VF62UPK0OzE*O6LoS3-9f>k1HJE9%(fx?7EmF>OYUSc^xO70<>1}LC>!& zZdFYWU=f+Trfe?XFM6$)R=CKup*?A|sl3jE>5=DP`C3J^}=8a5ca;5?iLx5{763^%L-rT^@u==E#X!~y! zQYTrR=M4dpDz))ufeI_JPolg%K#_$X=T2x-`g*!1(7n0_u@z(Mc&y%n$0iBM@Iv)p+Ox! z1>ay&)&~RhW3wLSFeD?9pcyFCkaeZ9di%8N@pb{@$T#avtW|2hxg?^{mw1xfx24L* zjK^=+*+pPWPgOEwoU-TMkfPAc?wz3t4O}V}o0GumR9UN$&6D0ccd$NtB2A<_VSL%& zd^gzkG0*@jR2p+Pn)dJ7P7$#oWsr5y_wP!m?w8zejmk@tPMsfa;v*0LX8|TndbLy{ zN((UZX9Gv|a;O+_wfT7tC2;cw{ne|BiKrnBEx>H@QU&(1nO;e%KQ*YM_^It+z6k)A z-;4=}viO5w;zik--nDZ)oB#awcIQ$L$Pn}v3uN{V z4%l2az>%&(yZtc@<1HDKvbiSark1{Ab~C89RtJG(F#N&f*3o1_1twovh&p*EFKI7N ze*2=Nf)&Mbq#|x02+S-0JjFggQ5;*L3|ph&PvYHe%nv{uSwkI46Ao>DEV|RQU{~aD zB^1BzZxsO3R)&wf-yXG2#c`#BDu&WXI5?Y-dgt(q0@WyTzBYUdBG}?o2n3n9+L7+2 zYbNOisLTI8!Zs4_I+{#PHF6zL~&fI?-L8*L8$^=6$hmDj1b$J8s9W{`Be zK%ZnG>fxzYyh?fB_&lZ$?-N%1s#R@VTI_Uh{;1;x#+2pszkcZK&xxJA6RwYHPBv!> zRQ&3a0auVf)mFRD_I+KK63u9g$hFq7pzrWynU<})Ct@m;XaS(37W`3x!teDltvjxp zDnE&UZ7NzTO1)T6SEEX{|w~YfBwUU zb3^bf?pxYa<=4OB_x9)$lIQ7b9!-R5xB61KaJ!2WZ$DZU<|xjR`7yCiMgp`yLT_rL zttuEprSopt%MYlIFUB9?gx`}453NcQ+n2P7`oxC{oU`uX90)fA12kwXe{>`;_!Ykf z+Gz^0`xqq=iX+vxL(dQn1T`2CLdlnXmp!9+{6YNuZub2pk-$a7qxQy%Cp&`*ayw{? z=f16sg}!~`ugLFmMf5Fo;(Ql4^fm~NKP0lluaVgdXM4tu*c=gm5P5~#vpQ$Y8hwI4 z{Cx+lhUC;kNA*fvcwiVXWoT$h0FpGumuNMaGMb=V0|YuN9?p#J7i?NW>

WyRzGiR&TX{0Je{Z7A#BobEEi7{?MoKi;k|su^8AY{3W9X1AC^eTQDi1yO6O&Fyy$TsNq8SWMnJjxzn;ezrELUmMEPKwnuxsj2j zEaqEv=y^$X_l6X7caM7N+P{gWV?TU@ygZM8-jb??BArW8?{o^jz{9!h_f~;E=tOJ$ z&NuXBtnHE^LRG1w)@PNC+!A@~`Z%5`Moc9qKOU1Pttja+qLb5565@AeG2l+XEEExx zb7hO7p>chEoh=je02Ao{28%e{Eey9HO(uC)-yLF#KLf?Km=Gfe;|Vs=VrWOTC*n-Z zHWQGjdoj2^9|SH+W4CRF1@fz#Iq|bIKb7f>7l%919Ad1k3Gq><-HIi4GrN;Zhc4!1 zvDVU6U1jL!zov`LkdC%}S=uwG^g+EGChkTJk|J`G3bLc?I>hTrG7})JFEBXx)}?uD zOyaWj2(QU%b#D95+v7#K&BCk_qc~D~U0o|T+utWBXgZpubcXyy>LWPU$7M;iYWlZN zPg+|OCueB`Ry|Un%GQ$I6k8yp%bvL)&ix5bP8+ zqPpI4-vjG)uRPvRbEgp!M60sI+F-}(885)gZyBO0)Ph~qf0r*;yi|iwff1Ej{OOPo z&TVo+aNZd{nFV4v5l#b$dQT0L+0>1Cb*W>_>2^v{Tu+)nG~5=gShJPX)U~4s!7{@8cD2VEz*Jvf5)P%{HHAa?G+2hL% zH~ovB4VSCAzD7ye+7>PX$r2=9BU*rB|5Rf+N_lbXbdoyg*03-(40P$C# z_FEJFM}}VM*=~c|M%eyGemv#D5K299nRbyAe;}48*GBp~G-cX~)*_XJr%RFV^7sAu ztFBe9_H)Bv=`Hb@lTs$=T$S%syb~>qsxC>K5?j5_T(Yl4j9*L;{ zr>&AbCMpT#FT4p8hp?;&1~|feEo@XVE4JTr4U?$+Adv#W#Q||kTi+e8UAFJ6MgN}8 zCB}@rT4GO<5c2vwBx+C5UYjd|manCQOVlnt?ki7T+BR6UIvCiqh{_h#J?Sy5q?C5` z#2iSP{6z~V+uuizH*Z8Y~+pGvbS0&s-h{xbuj|NhHbK)?gE8&RN=PUAW( zgvFiaVMxEv0CO%nNsIJh?v~|?=fp)nqsE|)3@8|-sC$%xJY%S!diNwYyz~KavAyEp zmf-(kXXi{pL=m<{C_?vq*&b?!E!tZsSzu&TPudyBTpa%VAOI@SN}YODk;HnOnRHCe z^`Z#7nh4k~^dcjYp`n*k%j1bcwaJOKsOp)yyz<&*_TJp{astJ+gBoa zGOsQ~2PWD%iF*s@pJz1&+_k10mu3}^%^Da`mGx0s9<`nQqksH$8i+PNMkMWMv^iui zeT^=`Nkmep2Pnlwa#@+7X69Hgy#?j?ZzI2*JFl6<%aiLpS4ID`ZvxA~^*;;XIsyt^ zNw1+wrH1n6c^-LELXyN`5L`;Sc-}uv^6Qho+a%@W-}nk!|6EB_B1!ctCBao!XUEhRW# zvQgN|Oi$b+fs=Fm{I{J072O_;ln2r?s)KlDaR)-8R%xQ_s0704JUn0QQNU#FIlZCn zv6m?leN1$=oZoo;d3zCovlcsI$J@2#s`lLqdd{-P)I!3~cG#wTGOv;8DjGaGqSUrQ-W_|pJZKuF#)UQ;| zGf9b|R_Mw=5 z>EAPye3K($=y=D+lN~&u4RCGXWTtQG>k#bCeNCP(TpQrLhkdc2n_?gPt2-Nj4w9iu|_7 z&{a9je6v)w3^WC|HnXM6_46^_wkx0#d5PrrkGs6~O-)#wT$yXq%i9Fj%c*Y}>AZp3 zRMq@SPNW=lt^N|A^mR+jI}IX`f`JmpIkfPn^PwR&Y_%^#%;%o+Je5JL5s zK>c&Dnu-9+#781G!jON#X^?b==Vy8bO zJ)wVI($4N`>$JcwKUK*VvR~8V$JyTB?r>OLQdEZCL+KIYKIVta`o9VR4D=wzB5<}} zN#mVxXfvDM%oFZ79x$Tl=xYz>;#$#oQl97ZQ@=Q;4j0Q;y4@ImWmE0!#S;s$kGC+c zWnN9KbmVOjiZVQ961%>>{9LTU{Sb{LAS4e(?*153l}x@J9SwK9R-&WpO9Zg7*O2z* z6;!S?>$4Dcnz>j&n@MYI6%864Tjjz9z#wdhUf_}cg*(OnYc1+X*8{h)NdGldptL}_ z{V{{xt|x086Y9K#bI&Tfu}q>5v1?B9Y>o#5-ISX1iTHLmp776~rJCzW>4dWhT~O)Y zjHNZq%AUG0;~xcOn^dU*$lls*vw5bvLglEl2ED7xh%cyHF%W8zQ1`;ZDG!Oql9D6g zyu!1sjDOv60p1Yctr>wxkJqOwSt~25sHmv->a{BEb}XV$8Nj;1Y`Me)yALBg2!FPm zyzu<&9u2buD`kB(Izxsiu*p2BQwL}*aq4S!-`W`qQ)C&LsJ@(7@w&QL@3$o4gk&rz ziofISha_e%rxbu+b)~c0c1B8azrLE}#|9#kt~5JQa&~1_x}9 z9tx7z_HuJW!?`*f{f!#Vsj;b=bC~D{12bZXU|LFx)8iE4`zk5_VL`Wh#P@idQLEu- zJJ>iID9c$_sug;p8N(N?85hbqPMf|?s|FB$@;zp$_>QaHPCTT$^KQqbv{eH?VO+6Y z+d>UFz*;X%xDPP^nY(kkwKQCP^ zP+2Q5+Qe*sH%G}#sFJMxdf6f9KcqF8+l&4xEQ2D+kG&rdin>}488zCS!eq2_cChSw zMK6<~|J6~(@yRckZfjC}UQIZziY-A%dX59zG`O59ikJf@SVGcL)zYmQUI$6`p z4rJ32E_UCZc;4W_hc|+TqJ+QBX9>Bu^m?2?mma3J7~=*obF9t&dL;AtAU0jHfwIu3 zJ|8?`)f7KS{_3d}uJ&eogWNtx=aXB;!x$fHsqvy*3bT)OXMwZvFeIM>^&3f~kV@pJ zbH2jo;7IfSc>M^FQ%y}yM*oje1LAl}pHhHj5el??!s1|})xpV|m3S?6brT%skD_X| zmTGl^e-CP*%JR`H({tRRa-50pWdQWo0TLBOV_K5+x=L~})58<7c;DoBIPi~or)|DV z-D{lT$4le4W!<~MC~XLH$RVNj*`?{wAFNLG?v>VFna_F!NpL~#yADnjoSG{6>=!wI z_WoYXEvi%*hP*&Qt$L6*It!(ZZywKXweKzQgKC zMW|Nd{0)@;^&#MwHQ+Cy5Oz;?z>u+M-?zcs@Zdzyrx@P(vNv?A+Fh75v-h}eK3ldZ z-M+jGo&YTT**$#0K#2)_+7U5{-Dq8x+S`j}a^56&i|>7>--jjy?A_FrPLPn>!Z)hdKoM!hvnvK@ zawF)>*K>Ec)=2VYR$RPJZ17#k0;}Dl68Ch6>yyvmP?$g|5fw*T&9Rw?RHn%7$^7~1 zJGf-`JJP0~Qr6Xh^gog~>vRhnEJ)xRP&?qixflXdPsM}Wo6cXNpu1ng*srMc_aiVP z6@-BFh^^kE*{3_;5k7xp% zplOHSVe&!!!MR!zR5xos-<=|w5nTWLTd)2Gdchg%bWVrfYz2|}jxKbD+%x6Q=r}z! z7?UU;5Ph(_dyB}xtOvxK909?tl}~RA3kw>jMaC+WZfI~Xszm2UPIpE|_t{u#K%;<% zEoLewvbj%$;1Q;S<-?0VM)_#1Ds|-@Jun@rv(k>UASj)Qe}5R5 zo#TR>f(rL#>KVq>k88a##50Gd?$12>ulH51s}PwP z+WW;E9u;MC{RY6M8kAOLvw;vO%H*lez@Aze@Qb0hsm~VB+%L+Jt6(ZX6vC-fDx{%9 zUd_b7Vl*m>rJHToR@Cd$k`o83BC*xzi~&ep_-5trj@7Fb^2DxM@?>!`#LD0o^%cU{Wl`C5%1 zP*u3SZDBS@eI^Q9L%OToJ@0(Dq^{6J0r@|wa9UdwDz48FrCwR7W{Lhi` zaX4#ah@sD9X&}0%2OM$X?W0t&ct$k{+g+Y$@SgPFiGn*ve_qB3%CVG9P$;^$_`xQj z@UoXQqVtWYwdikKuNjk{#8ZcucFxusc=Kj-H728+fIh_${`PFQ?PrFD_}x&cfppqQ zx!&B0iFRu>9=%A)WWIR3R)eZ}!XcqbZqNGoGqBCl{HhecaAc-{OJAlnwgE|S^Jk>+ zetEpxJ+=dS~qOMEva!)66ClVals8o2S>+_+(?-( zY$=j>X0nwh07f*V!>Ex!I=Y9VT8!&P67_k3l!Yc{KOn*T7dOEQgSzZQU1jeV9RmGF zGqBw&M^jdc3^etCK*DM70Z4faYP_P^8E@V)`WSGQx;hRSMvYViwxi9o{)uY|m58w2 z57KO-<)O&tsQ{GIViz%Hd;9G@GFv)!AT_0{!0<5|^IVLg&rx`;Yd}x`U>0XitFP7e z^PL1WRx&QI{6l!=!Xof%IUL5?S;Z-#E)}S7@hZXfW|xBR<5S= zi^&?eA7C-jo%wKr&;zxeleQqOUwl3P#<2*O@kYmn)HM~gF75k1`u7H%zqa}>HZgEc z_*>ecNE_eV!d(*WL4%}9mMzVsT)575g>};CGgV(-<|qehr#2*0XZ@#)2E{9T)TvFa zB3Pm4$JLsyh0!Xtf`)o`ig^0Rciqs#9~LL|&nBFdHN?t(q#hQPG6tfQeLA@5@{^-u zpk4@PWF!9@Ji2r9#OyDYJql&lK4G`Qz>}7{Z<(uVR(yB}4*Ue^+Nx4zq3^s#gm~|q zSX@keMA*llgN3s^qIKkG_c=Pb)Oq)|wLE;X5Pnk5w=f20xz%`2FK1Yn*@0S4F)2Ie))F zZ8{+8W{*{?Vppiy?(_$!H#{ZGkF~S(gdv@`sr3ypLN=z&2De&bY>h#&ivj-1S2 zK$V+>rd6?~fyhzpAKy1wxS^94c2)GfBf`E%PW1Nr0qIZnKprSZH!XREHmn6pY8uyP z1BNNyIT9aWY+;pX92KUn-sDpTK@Z{+RyXfFDkLGPyZquYdZGK$pgtmo>Eeu^o^Pj7 z_8^3WM2oVW7NF_Doz;;Fb2Amw6cN=+BZ3bvxJ5;?~h2Gldlt;(u zcK;J9|BhSLs`xw!K}GGWMI)%Zh@@j%v&Dcsiyx*|t6O;OGmACXCD9$DR32DdfZsh! zPs^$TT$Oy87oB=C{kp$o2X$v8p&aE0fB~dlS%4h9Mva&X$qcvQFM?g$qs^97;F81@ z$Cx@$=<;yJ>1whA+!0swI>81e_TJ=(tv3PrC(|r0=jMVb1`g(^=2^GlV!G|^6|?Tb ztM{1Vo!MqC_^9c$-Oj%Fqjrbb(jdwYVt)cca}QG&G{C3}r$`d7V^IRB2Aj$^%!j_@ z)gA?{>-w`*>$4f03dV{Pks{HWtX?>xOPNH!Rhekjh(w`K=ova5%Q+EStb=rT7-%9| z+NF+3Vr^-joRk~gY|m)qinh*5cveO6GK@bH)@Otdq6uuEz=zwLDm9>2r4@C@fyr4k zAo-N~@3EtvOVAw1ZDntDN|EFwjgN}SFOd5bCi9 zxxT4$Qi~mjsK`U{q?8`u3Wp@c=X*fx%>8|5hL8C=&Z>KNTwc9>4es=qN|bE#avB$Pp-ja`(yeFWp15x ze-KhkAQ&w_n9c<ZL-7*!C zmv&GzkY61vxZ-ha&J2m+n8eLnj384d2tfR23whd+$eAODBL^-!JNvOC2P_<%xU8&b z@jNh2V`O5Ypsp@1Fc9MQ&iVxpaHh05p7>W+(*h=WQ&Up{0)lVfzTwl+{V+8>>PDHM ziW6!Yl?q;!@+RcJTcYjC8Mg+PD`kEFTUrr=&gulKpqM21moc{Y(kX4)X2q>L5XhG8 zUg$G@q{{I2X7rwcVwLrSKO?(ReehsISzF%_kZl}1pyxRkB1u25tfim`6|+6#5XO`5 zTq8^oHfiL+n9{q+WkE^U+I{}@?WDG?G*ROkj27yKc1ke zdisY$+$4GRE=M@YW;KtI_9t39B)Viuwxc|*3W0}>^6fLRelBuh%Oe;nk0 zV(o{LiAP{;fXX5W-QS7+Midh0GpN;LAdYjtBpa43ycr%%YvV)YdBz9S{X&PMDF|rj ze{|OjVMxgnCiW91Qp#J|;q9D$v9U(lCFzNEKI10NUd?{F$fQed+PK`PT)g-w$rLqX zG>kWgTaHV*HPN4TB>$v>#4GTQBqXUy_WdG9?-87KM|pZ%EUl}ruFMOmxMKztz5GuW z_$+Ix_@^o`*5fe8d2{HVkb>0J2Ocfd5tK;n$5vCd1OVSL87(x>c>Hl*X5y*&3-~RE zF_f)zpRM$Od3%$e*wm_l5oS1=(W-CY)xpM}&s_hvNPyp1m_2ZhfC<5s^~g4OU(;La z!JixaQ8n%U9d0O8H`)QySNB9gVQ|_TZMY z=h&}s7{3^h)EK$3kIJJR$oTrHb9U3KE1k3UWFbAe)Ty+_d9NA#A}XyBQ`)Wp#_%u6 zcAZYw$ME(OR)YcZy|pQmo;gr19mxEh^Kc5@8ZS=N!b0W~5FTyQ_Hp61RjGaPp>i@< zK*ux$a)UjA*(QKLF&Id;7XqDA$j4Y=&4k%&n`1a3An3OaT&WZagd_|D3oE;s81J^* zbNUE{Z_b?2UU1AL{klWmtHSlIIyvV|nZ6olwR>3n1%xvJvrpiG`JDwoFo(2)tIT|^ zlJdwCc>tyOSO@zkdlIk?jyh!o(?t2x!!6yX z<#@(S5y~NRi?Kzg}Ks75*VjrbdH}0b#Ds;BMMA;xo`fL za(_$dl-*fWYkQ`G_VJE<*Zme%cbcv}AJP8B!|`s51hfJ=YI&* zipxOCHVetdgCxzL^rATql@XjnD`45{3(NnkDl5}?krbt>RB4Na$gedic5KyR#(!8f z&8`LJ;b;)8v>FJxi10eFS852wCj8*Hi@}_c72_DM8#o|rt2NhzLPt1P>#WE7BNKJm zKtSgxCwdDv*1G-v=xCKZQX<)Qw*anMXTGlA^97qpv;pGD~ru zOjT)22}n}oCT}gV#OkE!_!~rOPFiJ&^^5z13HWiWoaXIdD?Mbg)2$6(FTOI=HQpmR zyx`tFOHiq@E9YuI{vrOGLA4>=NZXk>K;xzVM}|(~Lo1IFs56SI-OB0f1+2bwJif_wB}N)Z?pWY5X13av`*ZV)|n4OZ!(i_Bg*AAr;ir&FN zsub2}V01%05E-&-=zkKE)I%w&J(-w%k55HZqVg{9)fgir&!_B!Bg4v*J=O*O($dL8 zRm82tzF~Bbv~A~pT0B_n zR5AtJA2bwtK$}Bwvb$@|@M#GvC6j%1hpaQtwbU{zJXV=LEzm+?<`s8O9dp*9J4LL>jQiU^mt zK7t;a;oedSH=TFNL`oJgit*$07@Q}}jYI!r|0!ul7f{`@Tfv!Oj&#|~uwBQpRRlqW z=(}8w{}-&Z(qJW9er=;jMIRrX^URH}EU(6AorNcbC-Q2`6WKBI=vS+M7dI_op)r%P zGLXvRgtZ)fstKI~!Oy463>!8!I!Q~)*q#!pi5MHJKGCGDjx$#FHIr+7Mkd6Lp?#NQ zPBYVCRu=G)O82mi%Dnt=wHhX$e6P2%Ed!6e5!JGU|CP$ zYV1U>tF`5imP^7gwqexf$Uw>ENH?Bh9BXr9^8M0jj%03STPgbzrU|QS+z|;kU0etS zk_oqKQ%Me71!S6r8lPolr10b?^WIWfc4yU@)Ym@p=kv3d4rZ26dCl6mz^tRm6~%79 zv~dZ?eJH)1=9Ajx#m}jF*3A*sGuD|r>2wFg8VU+9WpZ1I9tBYEw8r4;8GLWuF6gwP zp`}qGR{Wka4vkx(o0pYF8|r!9l)k)^`jd6;)Y9sIQrVZ}Lc(|mad35MEw{Uu6{{`T zeqQr8?x+lPVJr2#(T&)MHD;qM-%XNs6`2eO0>orx4QG0CwOCmqM@r0);x+VHG{3B><&|f~kPsV2Ty!W~ zH!kJlk3Z&-M;@tn=T&r_-7Yu9YU48vBw1$kU3c-^Ee!`Ot3kx&(|B(2A=VT&JSFQ$ zkG~V7UBDuzf&lC;bkZR@2uTz;zhx-D>~%7r8t?)= z`bn^xxFW^M%>B+ff4ev>9E&XC^rU}XUG>eiaH zlsKGjFR#77Fbsb-OmpSvVvcX>Bvd~rA3yR@ot$&M#U z!$O1j@QcN4*m8)^zWbBh0{>~(=&Oct`|acD*trKSlaq*vjUzlNp}{y^pVecvT1iPs zAtfb+{{8ziapDBtduK8?-TfNZU)h7#Ul_~AEg9T)-(y^J%{8Q^)~@d8P&IbC-8h?| z3TBVNgNcYwpg1SHLDyG$RDRrA$m|2I27kxR9qioPEtb@iwDb~yxqFJ)QKaz7h`5@n zqDMe~Q*ITbIz%A=7p8>qtXtv2l(0rM7B$Nv4-ZVjqhv5I<7lGvq#og1+1_8^Ru)BK zgJn1_mMqe0@P}1YKl|X2P;^~q{trLUEli+ai;((>*pj*l*{gHC{8Q5z(Kdu{-+hnE zZ@HDAbEqeQAP7{Hl>ViFbpRkNG>FzMqX`YMg1-fqiDL@KH?aiLl$GUk=keRuU$b`6 zB6@XA;-9w+TN+0W2NJ_xafG@uD`-PHYbk=f264dGdxa z4Z8lrH935>qhY*6vQ^~8VJ+yNY(HUH<0De+Qg~-&KKJ(ZTPR07HNcH}fq1%8sUHb_)Y)(7Nb{?-_SZFJ7PZC@ot=@%sC7Ihtugv zj4FqljaxIAHE%sX%v^`tqcVK#SYG_`VOn?SNK9ND@fHgql0=XsA&O^)2sIkbTztuu zyz%C{R93o(2n*)s>-w;>y3?{#(>UPvP@*=yA=`KOaE6WJ#z7ws$MphM%b^oUwk(-rL_nYO)@eN`GRpH_1s*c5neH_7xkNgP-2Bs+x`4#|{A69g{~{;_{JS>BVN2iMJW)9&cwrTm{Q= z{Xl0D;KuVJ2{KBARJ+H8IXNVyrk*>1rR!wv-yf(}W>a%mT4kk61c;5PF@Kx$O!iFkg8)_#*FI2>;)Scb#V_CF4@L=pDrRJyPN@+UB=^6zobLYo4VmD-if!m*Wl0BO? zld*HhN!{CqjN#Ufb{-kjf?#9ArbU@c;f>{aJU=jom|$ZaOX%mZ>9~Dbtz5-3WFU|h zX(cVPc0#vD)tI^81+3@a6DHttI2aIXAgX$Nn{umoZFwGf9vu-F-zAhQ+eh-?fJ8Zfv<|nB9Ad6{*CK(H@cBE4$L*#lFZZ<8`{@Ff3n+Dad=UU)AvMXAX~3Fh`rW5w z?NXA%dFp{HxL`mB%;uw$T2VFf3(NWNi^V+u`mZFmZOi3%JxbqU!$?d`ZBW3TOVh~k zddczmh%?B<8Vs15DbR1*wr&0`CZ~j=k}5WDKg_&kzjOOr?=+_EcR&)`q6N=Poyx=4 zT}x3`{YGs=!0I2J>^wFo8H=Ie&uCqCC7CXbVQuQK?S6bh?q4lA72X=oD`U$%%!G?E79Iy;Qzae+u>mQ%9VkrlpIejo$jo6 z0@?cx=0^ZnEH%4fN;3l1NoB;4cD(-lI3mM?Yd-V(6xMG#$Tz>N;Kx}T=s)a2?!M_( zx(^yma7aj_zQ?EQ9QOLi^!kX9Wnv66p^}8uL{?W-RTbZS^9?s&*M}u5c49IbC@pg^ z@X|{W&QK(DO2>}8^3zW|F?KAu2LkWhc1cS!51rp4Ah1$3ov%0Lb8AQY3483F3DI;N zkD}L^&G#S33~3!okU^rl7y5 zB(I>1$nao1UWIj=4lwnbCCpj4jUiWG&ASU;BE5T$26g#pkXsHEDGEi3f(V325@E7L zh$Im#Ndd(|X+V{#QlYBMnKzG>D_8Q$V~>)S8b?lkDNnsTlV|RJ_Kc=(q;%-Wo3m!~ z_G6E-eD>@H>vas1c=n=J^@b~)l%TBeuqe~RkGwT~p!QqPI9?oaC(m1?c&rhM$4Wiv@6ngz1lwQZ>AU?Vw|9$r|S#oj7Epp9&4 z=AXS2sPg*V{o|>Lzq=IY=LM>g(HwxadGEoJ0dBYV*l6zan(=p60h@u|04)#uzE+GH}SNo7G4Pna9_{6)#c*#bXJwV7m|9~eXD*fU>OO4H%Xjd?u% z=P|i{PK`}}<}+_k3GZIsrVekGSLJ2;J}1u)jH!2X3Ng#v(JP9#Ru)q1^K)1?w6~EK zX<_Ehq8hLQn0v_0uv8aLpNecUHQM?=TO~g;D^Qhv$A8cit2Y%lIq~pG`|zx zz|%ZU)0i=JDz4l;jGz1fq9AZEqkv~#oy`*;e}XMM{4c5YXtr7zdfj#O89J1;ix>0z z^y&OLb4I<_KHe{io{6?beNVVW<{v#`|B`~1y9ymlUS5A9#qvBKKWr^vV()}nmfz-_ zDg>ZS#EB~y^hpY#WvGSRN)L9EOv^AcvMAKsE?&`e_LX=^PD>*!GV+`PtV6qY2C7mg z6Tc~Zi^Bl_d2%!_K6xF(hI9gd`}*cEWyS)VWJ&1q!K>qV;jwFKz^c5`$>-ny$&jmF zV)KsLClQ;LE&JO<46k3ZgsIQ}KVE+3dg5clP!yFZQy0?x!VBqt@x^~xXF&$}iVCJqO zh9{eAHC?Um-f=b~-QsPeMp$Y%v0h1bN`Hy`BL zSEsRdQ|1Y0jH)Pze_>aX1|k36y}bOdyXo2?o*lch2@45g)w+Fr_4690EL=#C&32ZI zR7a9!!ee6SH+%#?PnlBlmq!OC22_{*Y!MA8cAYx4$fX>O{W?_YX8u7J&z!&=Je9PG zu=37`1U6(>VKs>KX%T`?)!1C<;ihig&#AN=x6{e0d2<6&sphbn9I)~LcXqBn={YH( zCONlT69HBS;5(qxpuVYGdwH)Kuqr97;`8s9^3pr=PI`j_Tegs&m3hJ%*Jp#278df> z!w=EFLl~2v8$*6U1#=c`;KjG+ao3A4(W!TzvvS-m)6;9XtP#m(dL-KZmJ3P*VuMe< zp|!AxM~QV{>A^B=l0b*(#_??ekQ#0!HT-B?*B=MU0F9JR9XXdIKPzDVDw}$`$Tx*c zm=EMEUb-Do6o`unrBiw=z~xPF?<;^m+qaGGH0!H>@#VXBvhdGsJiDJ~Rq@u!ZO-J8L%{px-5za@j4noV8<*h>2n3xEleu z@w}LN+j;#RL-{_hUjS7#-7aI$?rh~n~0kNkNRnz(NP&uRSoIq&A zxy4^)>(9#iLOzd&@7{WgRlm;U%@@WI9~*|(tMK+m z^VylJanDOH)#+|>w#mTDF2f-BqtO4((lX4Gnbe!Df%&GLUG}p_Z%P&9YYGLi(xG_+bdfRC?71^*l zGpFL#teg@(y-mE(w?``YPx3AYJ_+z%A;1s7@TVWRlv~FPB*^LyAk4@rV$Q-%ELpKv zvj-dYR#f@Y8hX=pzyHpyk34!-5t)k5$MjD=;fq(_WX6~Gkd_*YuIo(ubsg`0wVXF* z%_1&2`K+Ieo%-}4vSn-L?=4|MulTbrYWqlI3Aq7*U6-Us&^FS-oL$8nEOlcyNepTg z%D`5kHMO?N-3o86$l=NqD~TcXgY=F92OqD?XItq}l0MoXaA&W`>M}TW$FlNpImzwX z(I$XGif*TqC9`Ld+@>vY$;kwVgdiCVf9>JS@td;O%dcO38K~Og<^-&jfWG%YKJC*I zC@HIHJn#=-Dh+Zgr@1A*(>pMrEb)@@GTMPJgcM0!YmU_yu3HV#GKX|?sRRdA1yY^@ADrQTu z!u|ad>J4r!boiLOERRfA?XZOcpUw-*3wbNZK)a~gR&#|%;o~jkJT&D${`ji~0?y&R zd-%`z@iqVNGjb$7hYX=j=Po3)ZcSKZB(lNqmlF5|+1XSUmjt3xhQG2f&E*nq0SZ6( z;=jUuPkqDs%?Ek)nQNRtU!b9pju!#T(o>_-r++btkr#GB6b0NKA7B5poNI4?pS%Lc z3Qf~{9Ly}6hCgh-!I_jkmy4W3ht49)>SNw|@Bt>=JdlHX_o1p9o3>>z{=QGR^Z6GT zHhT2m%H;1id^r26G?r(SpH<_^tMu~xk~|h?d3a}KHeR)13sJd>VG5c6c>3Fa@mMfdJv$#$Y?X;7Vw87^rgC-@(JV?;6uRp;6F#wrGvkAc}1m*SEkJ2qpz0fz~`E-|I_2ukF9+6 zJ8)w|2Y6uT4!R8-_?PZBN8G^o9({zb-o1?r2X~WqLHjAK zf`fDoml@nT^n^f{QRY7BeKzNL@u@meHCN{FYJ9u7gfS01No)h^_Saa0g216&yH0w| z+Jy^PyKsU3dQl{~U3>bE97(6%y=l?5Eis9S%{tcvL15$3rGct#IME-P@0v zf*=rLw?s{O@rG^q({x^-{$iUV@G$V{Eu;Gw=X`w+d-mtEX5#^J@=Ce)$*=kR`xRaR z?x}V!kDmz(10vcMEjnSx;xh$SGd}r*DUUqN7gKI$*x=5{l1ST>Sj;8^9fn-SO%FbF zR`sU~WMYoI;RaUb`&e@DuWzR7Q*}OElf#@M6OT@LmwqEg@W#wpyuC~1`NfAQaW#A% zsG81}JSR^rI>_=oA5lS4y{*WOxRj>?L!($b-*9`8j|`{A(Ce-XjGNOmc5YbT;Oliw zo_Tj7bUfoi->$Zz{@y%J>^~e{@S-7p-VwSJa zVaf@<^S=QB*1UP-9?tkn7LeED;pY!N;GIVv=Ii%vXV~D52m2FSbe@Bm&uzP{QIk~N$u1LfUf=f^X7~hED>8We%c;Z zWR#!OMbveF?f8HHk-^2^?WL&A`CNPV-Q-m%DB6jDn>zf|@iMSg5XR~RcDJJYa|my^ zA>acRM@0o|fB*fop4GOMD;m?urooc6e?OV0umX?Y*MUFI1Yo&)RS*?GTF6ZL#P1X1MTapNcrN#M(kd4I_m%H0aDEWK?+?OPa+)K}lJJDXc4eMEkdGXuC1*mNo@7gz!`xF6E01q&E_<&{m+ z#kL`Gq&MC2pFU^!1s#cv3PBJA4jd}r&IhJqOXVfItCJad!&nyl@B>fJnn_8{-a3nI zz~z_o;ZO|Z0W`FoXLhIIOIutp4gr4BU(Q1&G)GvV+I|`Y;$AP!nh#Qh%;)X8K zHB~IOfUWD*DiR9r1%I{Z`jYQc^kNA{4n1C{J;GB@oUj_f9K>Q4^h`) z)%tzhe(%Q=mbecA!ve6JE(c}?GQgj9?IJopq1hKuS#dERJ@*{D|Cq_guir*eLIlsh zK7(O{I`ZnfzY*E-eC~Pqm0F#t&jrzSonOEDl2`A(oA*Z~GBPc^K`~IL;{q-|P|AaI zGwFEANy4M7l?@#$1vpERk?-)3hV z#$+~gc<){|uUNs#`SV$K0v>7Bp^RpmfBUy>;~)L{2C6n3Sa7B+EP$th%U*nQmc3KE zB!Yv?JoC^fO3NHrEk-m=uZ8wLpUSNH8@T(iZ%|czEpQFh6(AZYzvC~P++gzO!1f)@ z4p?PHMZEdYLzL}a!KZJJCq304XEkP2Uq)R21{aMU&7_xKIcF@Z;}_G#<8H)YG&1py zJ9xO%#m(JfnHDbG!w#>9ueMpgA`z=Z*V6+k7hDiDR; zo0^x$Dk&(a0jm!t|Dy)1w(rj7p2xmq__bG^vtU&-u|$z!qp##UYyIA zr=R1-2OhxC$O8%m|D$QTaQE%Qn7v>lcieOV9`1~_&?P%xfj!|@o3BnMdo2cnf?DVlBqns}` z=Ch_iVbq;>a?^tk5grqB#(r0u&Yc-H<_3QH{U=(5nQO|;oJc&1#^$^#zSvMmu|19_ zr@qDoqpmuw>bF%D74ewS4v+N{q&pLE;1Ox<0MwDbE z*?=HQh@y;SFd>^P$R>;bbD?22PF5p&006U2Ij5TQ#dhzUIClH4+_&9cHQXL-VC03J z*|H;(oqG$w-y-Zc;2};Bh97{m1$GcXd$A`!naZ%yqnk8{v@j=!7ymht2rZYv{o2to zDZ(EZ`sX&r-#eA7Cq2m6`|dyMlbM@^XYzX4w0s%AeDEPF=l;fsWHXnfMUWn4CDvvn z#4O`a)zfRHrbp4K@~Y%ic-UR!WOZf*-|VacVBCWbalutr(W-s>Gg{;Rh#cO#mr0ji z!l+0GqdG)ks>d}fcPs2Hbn^4IVm6m3+;IN`Tzb<@B&MXC-n`CSwha#Tpuo#TJfitea2y)oZWu$#c&JO1djv z@g~B;A~9QS=%S3fvV^>%Le>`eP^yP>*=@JcZ}@Q1&g(`*bacJX;7rP@1q*oO+G`jP zYv8(0QAAi|bo_aNbN7_+XNH?{RcF+M30!*9&9rVLxayjQ;`87tFQlqChpOVkESp0&b60c8AO-!j=IHrc02ju+u9glG{W8YMiCU$*pqS%bQM7iMK-r2IJyVn$vp{) zZH+A=p3>q{#&qq9;_(D>)AxY8{!$C;49kzewSg#1UB03wsPJ^8tf+`r?)ev{vOT=@ z?{UOLh2Zn4eDu{~o|`;}``&ny%Wl5;#4falMzXliB3qUA$?<#l8B0nBPt;g zS(cGy8A+B2ve`&XNg*~lxk&)`h*Xr8Qk0uZ_JISG6c(^+!$!)B{hi6fqhm-r?>y{b zp(LiH5R;g6RyJigxN|4(Kl3!J=hY4uOiD{*-2L~{xol9L;pzo+XuipPbctboeG z49au&P+71YXUPh5wW8WFfn`g}_*PppSL_z{r zjUP|vzWr#Ko=$K`$Z5G__OLMQVPUjx-yVP~ZxfGs=A5u2&)Foc+O^|_uf8VxzyXT$ z^HEiWsJJ*{5|bK`wIHBt3La-Al||W<ly}$=Z+ZhS1D}UPz765EK`1bi;xfl zx~`%)a#5UF=&H*POuDc7HE3v_Y;>&(UCl>V{hqAH1E0JP8`cXfnfVB5sc{sQRPxeW zv-xKFY94$Gi!>K{(+HK6c0VHw{%l*YT(-nx^4ZRlJIVs_Xbv z1*gZuj#X<}R#L?FYQP?O)6Mi9HjLDc9f?a$Mv~=ost$Q`SRk6}!(CNIMgAema`#Yr zconYFHE2pE2n_=gmrG;vO9l1*ExnzD*&b|qtM~N>GdA72kKOLJgAz6RtAJn@I8NvluT}_9cJxNSS!T2{!;BGEaeO}7)4pN-4 znWFu_<0@N&u9XtN8BuM%w7|;;@0K)5_uA`iq_=CnL!U(pN?5tv8>l=X+xpW!ErG(~3N9V{8Zikf_djSRH2l;ZSPBY^ zWM|3b=X;qwqk>z{>(1zI-7rWJx~k$UDx$Kmi2GNrAlv6X=}WW=0wVs}(noe80Wcuc zY9%EA!GefI5U>a$CZV?bE6N50js`M{2o$P54pfx$xvt}ORv-gco2-lo4&@q2qD)iS zyka$9{IP^WUBez3$=Lhuqf?(gv})HLYjE)Y7CBsTa7e6 zHLSe6af#)E04-WHUm5GtQeU88MYUJd;Dp0-4X{e_he|ZKMpk|v9N4GPzGK6=nDR26 zuRhnf?~V&Gn~i+&{ZjG@%c*p_xa)d}(C~)2MUjyTWu+3!{`Ar>A+ZLmG^Z2S>h9FmrSph{i>H8$M8E=1Y!k|1W6{wU?fD6F^M8F0{Tfp+ZyV;l{gf7 z7|jHU5*4aKp{lUR>*5EG6M&(zk>1u|1_s;lY8rc;E`ERNDc;vKLStgM`pyY->F4LY zLe9ltSwX<(a!{Uskb-?{DLM2z?(#(-SdQHan(XUJN_>sd8#vg6HK_S=Xm{^&Hb!lF zAQ#K$-t6JqKmZG1=@OlG9U68@u4ypuH=R&x6t|5VOk{X4Z+$eM*WO>qqz5DtlN!$9 zNRmd|G&lATjcCn=BnarLifio#^vbFl4yuLGL<^${p*qLItLwNmjl(_<8D1|LUN_qn z9}9gR@cG@vI6)vrlt_>agi10tNg_y+0_FfzT_;~tiIxo*1%XJ}K%{J-t*+zHG)h#3 z-KxUE3crhTzGR@cC76k!{(#wCl`iHz|2$K5jgY8lhL5?PK0}9*+Nl$v5fOiD&dcX^ zP?EKWg8geKIXDlWb0Y|{AEX3pb73PTMUAtt($bsH74&*FRxWb|s^$Pb3ycLk_e8>< zUwD2XN-w`I5*>5u$FXyV&MW`c`2M5uTri*`C1nmq-~JAsMJ*CWvl(g!~-A!yt)td=txb245tKs$PH5=^C9S-Ks zscqdvMhV<>Ya~{yR155yOSWvTWaczSplYuJ5B#qK7Qj$oULbb(l~)T~an;E$_j%#% zH&o(cBKYK$hcFw>y!PI2eEQ=89(zh8JfiWBbIT^3A3gy?l|&CgB2+S976sxAMuJ3% zP)Q<4lrRVaa`nv98}M?gG>tMvAy4tL+wEeP&%>r;NcHZLOpGKGDH{lpWQ?L%Yf^Z1 zjUOt@7-k7>@MLjm8fB_NrmAp2Q}F`nqR3E-jdZgG4cPB-v&vP)D%Ec-Tz~I<^cgyo zwp}{=!vO^Gtm*vX^EfHZ+Dq=PKPW!%3!0L3S{zhUipQhz`P4!R@@qertzqw(%rIzwQ(0*5mXz3SEaOujz=JblM9dyLF8<^@J<*lq8ZQBk=|!aRwvdl1!*96D&$_ zQpU>vvK3vYTvf?eykvR3?DV);q#PwmJBSjAvVkbs?@B7V&Jt%OJ; zo16jBb&ayzgXHgBP5!Q*@j6x^2xnhP)z0k>e*Up`#n7Y{B4cli!eBfz%lkw@tf!Dg z)rLhtBA>&6^)B#sAPVz-)wtz$iTVQ4tEzN<`d;UOi9_ktxgCnC@!5BO(yjw^>T>#o zYGHxShK(9i3>NyxW_;*qI(X12&{UkdPOhduWDlEha%E)?g&oV5GCIQ>^&NQWf5)Dub#r`b#o{tac`Z$DrssJKav^P5FXv zb&aTKIDZgy>I^Mfcf}Ok5p(4E$d+i#p)E1mVv)?YrnAI!9Yz$- z}d>l}G9M&~7V32HJkkw8rlbK3YWry3zudYfmb)6P1 z;ev|<(%K0`M+-F+pJ*J6CF){~wiXy|i5No?kWF@fHbDc$T~$WWp)KTWpHD^Z4A8xQ ziwVAShl8JgsBK*(w-mYI#wcX@%q;D#TdJ7xOI4t1nfR*&d;SlwYRk{S)qyDV>Fsr23>G09|a6oq@_ zMV9FrCDj17NAs~;*ZA)-Zqy|P6D^Go8PKO6 z^ynp!+D5=0A{;{|tLPd?#y*(BhhYkBhdCk*V{k08DY#ZwR9#ak&(9!h>mmyFe1qo8 zJZpHVhFVu&epX0cZtXXB^FKmqk!(IAV<;){@xhd0>dUo1g-irKXv}xY=QvH zh$PZVGB5x$Do~+ozl8KaMg2ceVKm*EDg&f7l7F9t3x``6Qg6y<4TM!Z0 z<8d*&s+_HwN?ZaAzCfUJS1U0wDuUoU$rI245ml9NyN!4~23$@Vx4YrvR1}@!A`hWq zMogwNTaU}@(fIU}LJA9N1J)fA!-xHZ1zV-S;tf8g&-ap>D`K^1*lYrI z%cTo7`?M(P7>x=RiwB$CMOe5KYmkD`s3VF3nkt~{^^aB*1$;gYmrKQJmCvZ9wR^Xd zb!%$VKizv8=z5;*jE!T>>Ix2L1Z-Zdz~A$s(IWp30M^R&nvj-V&Pay#LpyVm_Y!0h{C=-M#91sjK-56!)F}Q$;yP@#wGzPJy6S8 z2P}d>iew-~GBDHtmAXczs<2)2vCQk@JC9QY=p)HWFLMy>%vK@FU=U4$ARN2LOM*bW z!5?zbFWAm4io!mRi#5(F7Ajs)yd(=E$&yTzEY~LOPa+0EAVxM2BO5qR)5uqStnzu7 zSz17ewG~WWsSz0kLq`a7>md*s*|0}qGI=qXyoA*7i!vU! z5tqw=+b!es9<`=|gJp^eJyewY2nv=dE%6c&X>7Ji2uHZ9Wq;Ntxdjn=^g453Rgmvx z`JbM~s7^iI@BUw~umIKqcLJ7%D)@X591L-B0*e;tL`Maak{r$U-PwHl-3o?|5QvGb zJwp^#;Lv^}icdt8AU?r+(n+N2uy~P1C6uC{dz=WaJ{* z#FdhPUV?~6SH+LLF7a1amAKhaDOC7;7zBY}u`b~*09H{X(P*M)kc|VKoT|bf zikF>=k5WZJ)OC!4h@pY2%^(PbNiwYs29g93hpsWp)g3SgU-Bn_jVZ{U2a8U}LIw35Nutv~vfv z>i}z3!Pu*MvtdgHf*|ng!Y%abBM=u~dy$#VI=R_K91el7Fbzc!@p?oo*7~h1rvu)8 zU1z*xB27Ah$7;YI3e+_5k^v*4h(NF)(n2!OM>a85HZeeuujL?Cev>V^wTswT7bc5_B!Q+0Xj;?xX?u4& zS#z9qHGHIn)~VJr^6ra^eSGs}d1F+61)gt;G5jw9mIr@!_{D)JIji$IhYoH}>*S+7 zEvNsgRWH!)#G88hxmmg5vWLpXkD5(hx)eRTa^+#(|^9qcL-O8IEJ}i;X6@ z_|mX5a!RRbI`ik2Qjj0;?y2EF^ve7{zzSS8R=b5Q0#&pH85nYY2SiaIJ~oW?n+~#d zM+vEI1k9GBGte5O5+1IRnB>K1Jh7i&T%_~)Cpr^k6D=ge+4PJWg`Kh>5H3ozmyBF2 zo47!fh(PBjuZy2uRqS$Bfu>^-#d^!!h=M?fB$IBoFw|SKo6!7gVN;iANOmLP@)htNqjvU#_Mpa0a@zhB250^#8e z2QEp{iH-9S8?RukcKsDaVA~c877K)hd;J_(u$z!j7dD$0gFy#fKv5bFG<$bDS$o{c z;K)m@B)7Djkx8+2OBG9(G>T~d4tT4n#__)fSX#AfcpCv$RNJ&Yi#DyJ@OoALT(yg` zawnz5GT(k%j?D%J1DK5dlS8vb^Ao$(DR!FX|L1o5a|SarVdZk2g1s=_V8JSgXN!f^ zB*Qpjd3BVGjFL_C6C_-^$~3QwADvYcxLue8kzi56R8O3jEC|FHj0^~}Gr(jaOouf- z59<{lE>*#ZfLRbvSnlR{Ahb&otqewzM3H<=Wv1K7Vn+pG2(+_U86F%$PlJhq93S5- zRoS&uCo~ko8VFpX2(qkKZ$t)4OJvGQCDPitktDsAH6}?KX0sQY-A!ni3yVcXmZ-75 zPGnup_@xZzG1U;FqXjM+7KUtS{+Ux)=;ixwDriLBc_nZSPL(7&N91qL)m@!|_R zFzJrrbZVDKS%s5X^EU9(TXQI@^yi5n}*8mJU>EDu)y=TNDKi7-tTmf3Tfc*+7QZ!|zq)%=EfRYX_HH zCXm)nq@mr-cI~iGTqF?_tCHF_u!43zpNz|8!s#&Lc1bk=wqsirzx-62X>ab@>_^9> zGvk*ccI{LfqyBVLaaaHAfYldRMkDLiPPlm>*I(I(wyAL#WQo!;2g_FP=9k%P=+!L+ zRnzf!d`Oar$z(uPHMZ@{Gr?;aX0&ct*jKbZ`0X*+!@LsK_F2O=^)C4 z3lj07M6e)Y7exNfy=hdSP*Yf~diX*2u~*gUEy-LMWT(B^S|>fI)&kOX4tqT;sx0GI zp9c%jMK;hvHl8vnzYagCY2;{LHY+|H8eC(vaDgq9c%zXluZM+|Wz6)tNpBA$FA+#@ ze`??=FO%82*-V=>FEO!=PomZ|5wFKcagl*{-Yh3S@8}bWi50l@_Gk>vY+dO(Y*<&p zZ*v+|`glY2I`jS?U^R`r2~28~%VPk=2R*tZa@p{1L`B$Gxo#hq4(m#CVkBO#LS{}0 zg~gR@*?yQc8}@S`qX?H*B|Tkc;1HGcb^@VcLaoW6Do|FgTy>2| zo3sy%`zdue0#OwtD@8|WpFJ9c-d+xdS zInU>O=jRJUhAHIb#It_G5poXHY!7tk2#M_lojOBeqFJ{U6Ju5;`~6T_A~|sa4rEL8 zA0!i1h30&T001BWNklDZ*z=k+)tg$18#(K@meBDnLlHnFBqe8;zVRzOT>pF!3O=ERg3G>}$6d~!Ws&um0 z32S7^Pg5j$eji)?ehLlAWSf&K!Xikp+Q{{J`Lv{vC4MiL4T90*OtHtBPRr#|vD=L* z@vEl5?KV|H4(vbuT>k-Z-!Ev@dPm^C7e zb`5X6Q!wHR@cR{9E=kcz@OZ(Z3c|w8s%o1ZPL1a~)n`~#B>8reWc4?ay=UBiZn)tF zY!&{7uItR5JGa_rI-TZ4{NN*ke(4HsHs6P1`|<-yLeL5*z79fBUcHzQqo_w(8+46P-ATb zaUsdmeQfgiDU*^r93c!34JTYx+2wIFzod|jhQ{b|aOKs_lg4hWb@lsa6uNXh(=r!$ z+;+;#?YPRVXj=2n#qBnD`>pEIH4VgmAOkoVAgtEGbl`!eyzy6$3i_uvjO$8@B^%aD z7A}yKmcZo6lezrz%ScK}B04$-yWL(njaf>G&*!7KxR~7BTsCajz??aADv$N-^VNs6 znoZZ>^O+?R1t%m&kC-myj%}x3mmvg~3JaYT3rUJfqGG`=+9rYNzyaOQa?Qhk4UKj} za7{&kJhl#9pK?QIU3n>QloeA3^iwT#vDzCQ9}^&~)r0U7I=uX%&YoQk?z-zP^78Un zv}h4V#azQ6V3<(2Sh3JmwUVS*2oefP8(XBKhQ=z*%R9P{L%`KmJ7Yp4NU=L=J=aoQ zXH{7V@48AT12QaDQmpnyWfj33s{AFqO_Pd-KF&G13$S@ zA-SVyktXDg>kOv^ru|*g`=ZNu?z!g}GiD4C5fP0($*$*G?Ko zNxL{cDJ|xm(h}a%ylmIKlo%R*RboY86M}Oc(hm^|X{wDe7CXHWY|(wpbeB=!aT8u4 z8u-JyEW4u6&EaI2GlWPfS?cq!U-J=)AXsTw9T%7l*Rd8WomC4i!{7r?Ir(lEDK-Zg zA)ySmJK2B8%UfUSI2_PEsU}&Q#>K8)|F1#G8C@_}je$W!YQI7G{erwhc7lTqPI&8dfKZnTi;}Wuv#AnfRc)sR^x(`2TeP* zN6+d3*o_+urcaYxbImn8`Q($t$HzD6HWbyIwWMyS>;`m7}y>uuOCr)DM&|wT3G>EjcG`#KF@xkFdJ}58e z9o@@L!^?3^L&Ly=IpY3YSzvY{NKvg^q1w4bC~Vbz%y5@+#O)$fQHiotpS2jIDD-dy zG1L)+)HIg(y&Tv5LG6t6|~p z)8u6c^onZxBZs~G@7D)kDidoqWmMw zD=T5H?q##)rBrJ8rNmxAU}qbu5F{uThFk1hDipTseqQ&KlkF-eQdNnzSgTl20ti)A z`UZt?xy`{LpNHj|4@1|9Y=j=YDVw8`Qql6W!tWu|T}GUua&d4dLmWZuJnZ59uXSSD znOWk-gwEV9cypFP(HRN4$v2xxmby++RUxLG#*xD|%F0xtV>Aj1EVx|?&LFBNhMcOl z4+}F}g$fHL^X8UNTGHg@xCdB%R@ObhQ&sR4rkrwQW8b)WBLoFO$4&x|8(x0NpnYNw zo_XfkI?1ZMyqwI;Og{Kv9*;ftIJ0KWV%Dr#csw55Za2%9FDEW84u``*yLRoWkWSye zeaX$uW#LEJ3>qwOI;&sN%f67L^|}lp1X)>GY~8w*_uhMtrAwDqxDtL!N=isiPe(ap zXDNz8aBwiOv9WaT-ktF9aPp6BVVQ}QI4Yy0+cB{n3`D-LRkH=ufYaViQn)LHa;CH~k&enPdg6`EZsN!zX zBysG?Hw!{TVb*^oGiS~uH8r*N_|n0HOr1KF?Cfkt359-&N=IQKS88lAbiR-VPN$QB z0|)ZdQ%}`Md^tHe+;Yn;Ouh+*jZ|=zo3&ks^CX83O1@q}g9ZgW_uO+NC)eH+zI^#| zGS|Ju@BU9k%G22a`x^x0$isP(ty?5ttuSBL7r_X{LLb#ivT7y#EL&eXuv_=>so%r@ zq|QZ(%1vRBq&k9X6bU@8`B_q2$b6rhE{Z~5o1;Nx5Pm5s)HROj8i%CLF~jtwri4T= zG&CFu>~OpIXYmQ*`b%!PO(CX^r$-KD+n>L)WGo@KRGjdepe#0Hxf^fS89*-cp zole(op2}Xws-Xs$H&^GIl}-M+->r!K5+JN|i#vfC&9OkQz4lrjfBf+(R?C^l&CTV> zC!d6z9GE24rbIp7ocbB`5Rz^Usr#l2YTbX3Utu?AdRks%AlIV=!*q zIJ$M~Mp{~0WsIvD3Ftb{J@*_PQnI=09<^GWLrN$wgTsdZ{ArywF zHd0j^vC7#q=nY`M?&p2Kn*~xQL$xqDEUIFFMfKP2^0=5;R6w>gxXfasqs?ApoUYu^ zIq3JXUDqfFA`pys1kv5$Bves}x7w;q3_9xf@p|EL3OZ~2;-Ts_TZRFENp z2vElqm@-u*q>*BxM-JQAzt=`gJDu)5-8EWPCB>50{%uq#d1)lFfqoUS-vD8?0Zak@ z-qbfwn>LM%jGDt?KA(@rAAg+u4I6mT7D7rL@-n%)#$&!x!us{&AOHAAjij<~-#%`; z?Y8>e7)7B&hYk!LJecm?yAu}|htpZVJV{<&9>4wVZ~4vRqCr!xb=|ZWa&sh`Hb^#a z0!4!nio#VE8{LJKIK^^S0`~^6OZV}f=H?T_V5G&ygs@0b?2c*|f4QOaX>lR%c*;o@ z3Ips;f)%rGvqaa}=l8MQ&EHjupr@HkM6%D_6T%mnZIX(*W(LnHECF7s6)yo3Xrih^j~+d0{aaR678^Ef;1#R0PO<_>RxM1iI(cBr z7PfESPXGS>tG%YKnl6VEydH>%h~SAQ%uJ%}Kn;wk51tBqs2&JIJTvru?6B!b>S!9-RF!@CTF-5zt$ z#sXi}l7RRGNa+L_aRP@!R4Mj7c~Ua}e+HSWtDkdp4E*}BrjeETem~(6T8(a?-w&TG zGMYoBv24Muu;daag=5P}UG%s@rD+OSDE{VaBZ{chH+TSt0& zdgb(<(*&V1X3Q9tELj3T*Y1#UiJ(hYL41N>%VvX_|0!bc-aQR2t&mb8rDXN$)vR2( zlKXxs2y5aD#;9mPRJ5R9I!wAjl6wHQZjt=%b1!IKt`Q2uTiI>2AV@dtqzN1AY#yd- zZaz5S<~BzN!$ZS}KBIEJhr`M5t%&r`-=PKcd zl%%G?9hV8(CkdjX&3;j*L*Q^$b)BVzy?Z2Y&NMijSN%L=+QB1Jnod@g-?uih`q*IM zf@YOUJxL2pzX=dl>)-$|3A5Fp!HpM5NlDo4_R4DVAELChl!p{+gDbAB2<}m=WS5jw zT0TD{!A_kzarqFq?s}z~VrIrA3ONUKX3l(_@^Tl$hY!b5Palcf?PkS_6}%1h4)}XYn?J#hM4F=3%m;GPq(WM^G!wt+FE+syazm?5x&2 z{L53ur=D_d4~bx4@M%H{Q&nz{iscHQml&(HG7#^Rl8xo1yj@nrAt{+OSuh|&AuhgV z1&1@2%IAa3)dp|Ql(kq!Ekjl{!Q+8XKGs>huvv%s4EV>n+>imnsvoN{dktsOz)E%id26N}mB{MUV>#nv}uef#$D;fEiRo14qE69w0dSDID1U9+VLRvVl=qea#YtnQ*Sdyk7`zl&Q` z8v|84?Nv*Q*WZK)g{!SWTx_xPktS}$ z3BDIPV{i?KBW{ zZ}{)@Fz{kSN$a!EJ|m=t^o|gM`Sa)V(n~M#r6asSY5x*waD}IshaY~J8*jX^nuTQ; z21AAnVe(CavDempyj?pbOFl99?%Pwbwk~w<(H$gYW$k3oo&$g>V={W2LiZl6cZnQ7 zCVBkVRf+huH&l)vlWf^+W*ZAn!gWGnw8c&@)lN_=HEJI+G~V&Km@jq4TOEuGk0ilr zCOArToh2n^F#htv=I`>u=JPtu^lZuOHw?1()%^FCasz8ZQIX`e=>}Olnst~x6|vl7 z=XxUsEUbn>VhaA{0P`C1yB#}rSPKd1ZmB6YbSiNErQglS{TFIjc6E!?HCO8i;K*TFL+r)u z#cQ7=7=8uhWJ{KPVQ~LOH~lmZlT;fSs-1R<+M>jjq*$2Z2xXM!<7K~_M@}5!=fUB` zTCKcOa*_jfgNJ@?W=R`rZE6~<%QSdnhOD&~E~Xtk@|Z&V_AO6V`}av^{>R|R;btA? zFlO@USXps~ZKVs4qFQS_m;}D| zyZM9eWzwWcj2kzu#th%Ob?aEZd^r;)3W7r#FqlDskPtH<-@RwmM|^@HB&2FPiIk9g zz(G*3fzPM#?ZzP5#nzmKst*MPrqxtd3TrcEHLK(5v8M^aW`lOIf{QOvaC>0QURb3H zKK8khhE9xP%0;&n6a1|vUH1wyE>;>mHM+D!vgjkT@w~SF zyqerVuU8>^pM%IKy;-UcyR#&J{ZrKj>L$8@V`zZ^r~$${ci4(Kk9lZaFWRzYOZauy zU6&se6ck#MTiB;hA5In)^6#u2gh`z^p<)vP3jz(vNn!Ab-^J6KkFjIN^5BCH)>wn( za=Ccwsi#Q2sD!~o6_i$}50DZL=GZ7WVId+)=Ww2tuy6yrUDo?~e!uAwt@%ds{Bs7M zFOe+&QnLIDS)+@TKG19$K6A4g27K|k!DW|B;QssXCn7$c1&4B(RZ@sBG~z0TcC?g@ za?H?Ks(aaYh7GrA0|?brdbQi@{-i?(C9`H2tX*CEYhv2LuPf@Tq=ch~Z3G7!oH%YJ zKi@)Pdw-J%OG?PvVet3oWrI2#>hdNluJPS%Y4ZhQSWS1O|ARR$tX52%II(ETlqo^f z(&kfKT+GjY_OnWtE?o%Hgu+f~u(9H{eEaRUbIUEa)Yz6Igkavhc}$x&t(p>LOT+IM zeE(e#c88?2SV7mJYxmj*Ef@yu+%EZau~|)Rx7)e>_S@;zs}~6g@dO8lRJhK^C@3f( zGc%I~3l;z{@=C$TD@CQ7GJY&wr}6gXP-o1OdPq8r8>LgKV zdX&QeHuydKNplmC2F@VZyrH^PbjRI-)LzxCEyFOC7G8ZtqG@%wpB>xM4a zv@wWwu{wqU#f2*U2bQDx1wOw*P*B5|$LobP-x$n(quI`~2Xj2vCv9!x1qiEor1d8C z^in7Z;P6naK+;ynw{Ry}vM33cOyEu%3D58B$VGC@wDM*s){e=H{|u#R?7{G=~a@ z0QV?XuCh9bX=FW@Pf9-Um-CYDXWV!*d0|mu>vslkzHSZ%ITIsB!MO1X(G{&WE*C8Q z#9+}!b(2N=BzW)@^sR&T$hler4+xZ_T4yY04GdlYNKi4!My;e{9Y=9_Q0<#s{(K*6IA z8H^r1nnQ;Uv17-MI?F~PfyXU&23wps8t|K!N`u$DWh}7j-1`eb`oOB>UsNPn@ulSB z537=q)e3jrBWT}2u;6`zEt~89oWvw}s0Ok+<;s2cZ7}8Kg2+e%zu$~Crlz^;jTPn` zko@h>M&)Fs=G>Ej>wquX>ShbXu$qrW6?-J3wj03m<;#`C#6+2#oGdJsx?Psi($Y%Z ztD2x`8mm^VV#<^$^dD$SkA#Mvz59;CZctcgB|1hUG)%AN5`FlA!HO^8Pk(-nNs}gF zsi+{=G!3uUi?snMwxFOOh7B7Az?`?X@c9yR!|vU?mt)6{)oP^yTyez}bnn}n={t6C zQ1{VEs6^FaZRP43&-zOFOd1Ru0hbL{s>$>?9D<&yg8u1(!a_;@(bL~|(*{_vOp>2Z zofS$+9cs3$PP_Nu^{SKC?IR;)^LRF2?0@ z;c~gqb-l*#d`hG+D=UjbhYrd zQdDTFVI(E{iH@#45SE)Gd1to4;)P)7&0f~sz!2d3w!YZ{E@8{!A>g0&y=c^^QCxrh z^>pjjtzNYRuh+}IefwCvco7R1eoRm>+;Y2_>bJC^Dmqs=Jn^Vb*Dn2d@x>P_NvO26 zlpQ;EuxQaDVq#+0wrv|bcb?9YM@B|+=bd*la^y%NBdhBs=H}*d^UXI`u|&=^u*QxZ z%g~`iYi#QF`Fw2LxRE(?=8*1o^N=-|&Z-p&Wcoc!)!fuI(|E`*xb$*Cw;t7NuL@5} zUi_;;UOimeJ_#QBwb{B;AN+nnZjPO!hb{C?cM}q#*XjcPYns7{<1Mmu8-eSA-1A|B z1qiF<@iX96%ouQkEQ6jsd(ypocTTyBhGDRK_inPYvzb4CJ^(SXFkzyg{{V$1sx7xF zc5IjY{m%w(zWFBIx^=6`!er{ysl)(xt2Q!J8(t(!H4k$PovvNF@cdu?LSkZKwf8Pw zyqM>of4=hn!NI|d9zB|jj0_SI66%k;9XZ0hdGpw}c{6{sI4MIiRcp8p)u}U#9w$gm z6O|V2w(kvI{EKWcvN|)CRbH;vNLGG7e7D75#=m7%>#pgTS#gu~oG+UzKv=DVZkVd2 zNiA`({sZ9FJ52Gn1`P?26+(b#adkgkY_RkDG+ue-m1;ru<0lSr-Gmz%ZBcl{5<-lq zTBW{A_wl5!oIYd6GIi?IYVPvEg9o|krkl(u*hO}Frgr1L`|cwwEUZE2=J9w~ym&E- z7A>lL?v~pGsTT=0tv7r88Um{gCSEV-)`RLSU5?^@ZiIZBdfYLX@0(B;R1s- zEhp;o8t@>dR^)kuKn$yOa1vAD;VK}ixd%IX1hV%_A|fFwN>E?M{^(H~c?WGo$7mHU zgae1uXi{Z*gEo|ae@!zOF=8~AUV7;d{wDZ%(fh1l_bty`f@`dkicwUetdiGuW-(~c zpeow+VAgOgU%s4?SCukyRA)Z^D2uggGf7HHA~rU*j`I@a#wtV^=huaQJ@=e z)6WE#4^{{d7xd^U7%@uFAsG%Hq|Qv6p~LoV@XbnzyAEG6JOY06Sp6}phTzyS$+|U? zS6`9r-`8SG_7d=GOhst|gcSgoS(mqgc0k|eTC)WOkhvQ6?3TpEiMmw+1Z3~C5gDx` z5M=GJ6BVu3s6N?Jgtcg)lL(iP<^Kn>UYl-+dRa?-+OA zD;PRVVaPDEZ?Dq&6jX}q4I81*>mq^MZBET@3K9~{M&`t3_6B-AuyVPsnBPIpO97z&IZ_RiOE~^)0QtfrXf&XEDt^VAc-!^RosMCJC+_U2{AuI9SISBq=&+ zAw1H&iPn`KZzbaSP@dEoo7KSzC2C zaPWNIPyxa^JNOtF27Cx~Z;1mgTp-!@z0UQM6}tDVs;nRcL`Lcq7OGfn5{m_Nt#t!h z4hL-6vYFw-hgU1=lu~lQs5?~R@=6Z+BzTk?(UN##a zP+Fo;Uak-q?{Bq0yb!QqT^XZCkEtvct@J}mN|?2A17lPxp|z+hto3=At!vzL(@oVf ze|dR%y!-CETzg&hsWav*K0&)!L43R*vAy|kSeR&$vJaoQ@} zMoD~vDaoSw74naolYdne()zfuT3gR8)HGQ1Q4yC9x`epcs=DzB2?@-7?>$th(^s`s zE8ske&WnB*T?Y-~r$7B^wJhPXWy{FS%;bif6m0f$vv3wG#Kj9P8>BGsVnJ{SWbcQ> z_Dy#Kj~|zOu~ahsWy$VcEq==11J?j^ImarY0m3@}u@%??Tnz-Z)IrP2AafPCToS8Q z;4Zf!6hU%|pQxzT&0dFu2sUnzY}&AktFF4LN@+t#NC=6EiA?|MD>iCgq7+GlP$`uL zOKom`>2=`=4d-{i`(3pR;oiM_dE$vD7&k%Cx4$^6r5d$alHuWkUTJ2JcBgJEF;XHdh>>6AtF@{5u!n`y3IRgH!&@CV=# z&ZA<`M%^$1ywN7s;AgMC>WS7cP{F8RU_4L+JL`R#9~GIp%lPgvD; z)X2@vtqjbsT)C3y=xCCYlNmU0U}dJ!?RN9(tFQ9G2OltHsvzwm<-A;6zaR1rNhy^D zZU6uT%1J~)RKEXCvS^{16lf)E#LNP2z0fyH0Br-wzzpEZ)>;=uf$Od}U8}_QqE&hk zeLk4~p3WCbVbY|D+;`voH4fuAh5Y<{0OI1}DxW)eFqdbaeU=?Nc5=@z1eXm~&bte> ztPHkoHTdLX$)VOQB=7>y0e|H@t6C2b)`hViHv@kLI=1EqrS*oxi2du*Q% z)~+#lV+M5Y(uq6oxRdnsbRr`oYpvBX41@gqe7^eXD_(i!6(XbHo?j^ROg;B;s|GEc zfbw!P{pn*fZzX^Hb?+%4?)14z%xK}Ykp8lCm23b(6g60DaT@I zeBDw_kkV`?`h2Oux;19S`-BM-7%*S}iHV7oR#$$0KF5zA=gTj@WX+m208F@EFnFjS zCZ_FQo<5)H^6l6r$y_7JJ3D2WUjlyvmT{g|tOp3|!bBWqd(RcEy;ed%P%sP`CP?co z=-RdFnDwT@FwAjYyLU;xS}rLlFem1miI{d~Ez_VOg3g^qwOXsT1}UMmRPx=oX8ly& z*{dBt0Q?qs@5e?|0m3?epaOScW`B~-)`xcN1S75xbnPaHjT6{x&9}T11xic7H(7lbhuO~c8!o__ssLgAc!=v(%$AF& zb8-UV(4!}0Txw<;LqpB>81+1@>ht@}V%y>(bB5x#n@lN}W)2~3mM2}|_Pq?O=K`u! z4-nRm7b(CrU>t3riM>l#=s!Tvsf!>c=Cqrqw4w4TT?4ProXYI+z{vtLw)XvZ)GKv- z7V#bM0I()-i3bSlLR`Pzz)cm8PHpMr`VWMp4ua@th=>H6RbaJ2SeRKQZg*5Cw8q4- zgs7^js_Rf*CMhWeR~dLblCm<>QYt8b+yj!Voo#X*{lF?<7BCO+1upFXVO{93R=A3P z0^-~H$+n9#$G$t9W^bOu0U@Cf5@OE#tMAG*bhG*RWWi~M^LSwYJ}50YZ`Mr5lpcB( zv$r&W0Dg3E0(Su4RK!AL;L)ZY25<;7<9AWuat$DW(-;qY29yPeqD?$DVop{Lcs2n9 zPzU0Hr+~ZwX`J=PAxu?{@dVUS0tlc{2nHqq9|5ianY7xY5O@n13fKadT>t?zABmW9 zB(pK6{s!V*&5uoegDIRBn3d2<2>8+g04SL1r;{;fDYg&%Bn`qbU>D}l+>e3HfuAHm zSOJ_}Sb)naT*u*q_X&t_x1${D}c6keFRllU88`(Kq@UXJmWlo z1lD58xqJz10?GrIMS!pZIDgOyb4*!pAg#h0i?47i&u937>9Jh7?*p1o4w?D9)0m2I4#|JAAiJ6IPU-7S_ z!Xg`p*|;6mir3!}OxfV?G3%g?S6ELum~p66g|q<#2rGa9>b>SpJsgFNSXb0000EQ<(L%7|#VrslxVyVkT!R+3?|1(n zzW4Uc?#y8?bDG^Kbyay>?048mNJzMf3No5VNXWqd<`GQP|8P6Sc<8_O#zyjsBob0h zJmAR;?LUtEO;cV9scM|+@IRuguA(h_VF>!)l6O)0U-AD{YMho>|65DpuBoc^LKeG= zfr5v0b~~exT|K-;%iZB5?8l;Q>N$|fg@i=yrYIw+?Y+Eec$o30gh=+ig8J&hZQ%6C z7J7)LKZjFs`U{VVRfau*>sBxNLoerY%#n496ejST+dVIEO;qh_RuJp{34<*9iRMr? zI>5}8Sx-MjsQ9%4niQP3VN`Ljxj~}KAoOsZ^5vWhJH|Ys9a{UE;Vg^g@Dg1?z&;SD z2J*O}2GesMbT9*X&$+Mwg)<$@6-81e27Q4_^#q3)@IAJ(OyLVrVf70!FuiyVPxkxk z#hn>Z91zY}rXNx_$HGn$JE?hEMh49xzaL}T~Rlxrx>tH~CYTn*V^{z{~K%zIJ? zjIdjMqnmnAUd|`TpgpWu$F^O?Z33*2H3#$T)cfLM=C+Wq8@xORp6BpiGJA-VEGdSj z__is7!u?oR_wRw%9mx+J681Vc{%d;l4{L%W?4u@SrDvm$wQ!UC0&N`cXM8>6hsg00+!c{b|rHH?zwXq=T73x+HLQ5l4hY2+jl&!X*sy zKb-T`M5Jtv@BFQQ4bf=?3_isoiXpg76f3Ki&LR)Zc6Jhye&hF}G=!dj& zaJ)1}0YCNjUgty@3wx(RKdQCM->yC`CdmQ+M7^CRjl~aR#ByReYI6fps~vxf5DiR+ zl1wK5`+3?x7@aFN2{-Jm2U8x#yj^G^kRpy0|4n3oJwL6-gTV%5b^o}ZUyfKUk9kk^ z5fJ+qTx=^u5oynow>kQP>#v@fHusen60N&RM3e5+_l`3da+5i=S=s}r;+vWQaA>|+ zYexHR^clcKJTH%2dK_y$xt33;b=doz`GYGM)Ab;JTF>0=TN~fW{GIP1K>{^ zjECHyzee}zvLJW$QmYjdpnLoyTducO-ylblfICYHlfNEG8>aGOPaAKprOlbwd$*HB zFGoI&1aVezVx@1-MrOz<*^$?h?(t8h(sLDsHNyuk8!73{gXaumAy^24oW<*~vM5>b z3~}Db55Ks!!94t9X;yFA^&(IVRNJ=Zqztv$_J^MnU9$)@&0Uzj;Yw+X>h^s@vWP0B zccC@Ks}1&{Dxa?C+)#Sy>p@A9{K#VI*1Lj zQ>c9#+wm}t@9X#I&BLcI+4+(YXj~4~NXN=CErDO}MF zjgG`(qgd$fA`CA>*@txE`JXocQVErT=B1u;Yc0F)g|k-oQ|eA0ejG9QY3dFogbUm7 z@x=f83C8on@9X~D-=AIiuTK!*4^3m6)CfWDecwRilpU;Z=ur*BR#UEii{1Ac;LalE z%wadvR~LAa|7a647LP-hE(2EN?)-GGXnq{bPud7e+|B*QWvdn5E$Owv5xWAUHnHKz z8}}PGJsdHz@-DIFp1P~WS3Nj4_fdr3PQ{^@7SN~D7K54nhu zHCfzbe-h(~B$Oo)mRu%=o2A*kZVlYu*Dl!}M8{*t{QPbTBA00675%v6M zAoqT7#QlA`5csK>}n1oyQPc4Ov;-d1jf*;~F^dymUkUAH76OvWE zG1chi@%w+G?`#)zJLrVlydZ=p8ikbf`*EzgOE({p&0;0eZAk}HH7@0>756TR4?Ujc zDeZW&cs^d0#{Tm0%O?@2p|vOdt?$$pyB=jz#?dxJKz_%SF3-}IUD{mC)d2wG)twW0 zU9IBgtH1}S^CwCv%Fm0Rl7jlJJ%l2{*#4%qWnAao;s06=JOO0Qmzil36NJ52lpv{U zttBj#cR%7Fm?z?TZd!y-2Gw_1;#yqW^qre9;F+&Tsq+aKB`$->cQ(iK{`{i7=FB3! zlfU(#A95fSpz~`1FXnlto8k>^7CG z=-6>B)y9kJ$VEPsP^epqp%w9`t6XAtYKoKRU!H#aM^iFEA!_&qLXssjdELjP4iAfl zh?N-k*r^)tBdbB4vcDb86=y`+wxLmaJwu1k8NS=VbjO&oQc+D;?r|(M0pTE_`?7ZCQFDz;Y_r|MTrTn^EIKGn!F?tIP~1 zUZ1${cLBd3Srg4sYZ|QJ!Dw!}4`ZMqW7-R9hhT@yw3ry@Ag2RPX7&di{fuY24`He^ zYb_t56F%Ier!e8m#ESgsvR$_CHl8Msx>gTpqPEFuvO{fB$|dVg|>Ui}DpS3-Yn+?XAF}57UH2id0h1gCl$NEUg-T|AkN>s{YSRSJfd*$NY!gWpK_j2 zGHucBjhKfq@^wz@sPJ*F>`C3+N%P2P6r(CuwZARz%u+#l0*248-J)I`BELpy30;Ga zW1z7_Fr~E%r`OWn*JV^e@&XPlotlzq`eMG_8dAez!4vi!$@S53VLIgRIGP^%EB2wM zy-_dh8$*$Q*#Ml5rds;5oC2!MFE-oQuZ>VDkych+C!>$CuL_Kd`0Vz@VR79K^w~Ws zOQjUfb}_llBM!km_8|Uwgo@9p%+~?D7^uW**e`Ga!k|OhQEcZuKeBP{%e`x%@!?1fI~-0OhGEaNXV#)FR#j60+6oD)3D#)?3S}v299}uT7lmw z7SuOqR>D&0YbSqmS=Rn5*Z7;A62#_4nK3Xa*FJ@~*sUNt;d}S7{tEmnGN>@shKYoGa}8xhxrkgW8+@A?qCP1>7@krDN2LFt*AiuP)uaDk*P~&_z7d;E6G$~fd1YP7Q;%4R8dUe>r(PF z{GjacB%ZDe4=x7rmoBPu&6>!rpD}t5tIqhT)d)5`Ect`B(6FNa6?iS!bMkGuh#3nQ z`Plh1zkC?+L&s4fnh4M4!kyYO@vF$hnU9?7B|_O=rGBKJ<*QxCWjbC{RtJYuE3x=` z=UG+9kwKY{h@4(ztK>-$CvtCVvB89i)I@RD6w^Er64R{`iPQ)O=-;lj9NBzcn=73C zPRW%TC;c6h{-FgarY*uNfr7RjlU=Vwr=D8GjL2=FZkD`4tN-_=Zmd_m^mo42lk75m z$Z|!O>GOyBPg!UJ<(@&ZZ8}-PFc*IK5yctHY$`;+7+Vb*Ao6WBC1hk-A2)0SV*H*2 zsJYxi^UeBGh(!W)bj_4a0KfwWX`g$S(DIdcm#Y3si^S;P*IJRw^o>+$!Z#9v)$MT? z+QX>s*&sURk{Q^M8jJIp!m z{jn*mu;;Aj9-aE?C1A3yz5IL7dU~PnA|(iUG$h?*qvApO z7+j&WKFSX_M{oAQa#6KSA^?nBgZL@$7HgL5; zU5?fpD zo67ADCH7k1&X0wh2Nc7;6_$+<7{+329iL zq9VaK+XHpaW(`S5W-!Y&JXOS{k@g(Yh1HJ<=XWsujf?7xT`%%gRLrd5M}yBVv?vBL zHo7;>Ha0QydMEK>O;>*9?A7fSOP7={e-SLkEE0Cok!lE@R}MOK^H#oPGd zBR5C2)c{3+qfe@+9Dvhfeh#}y1jbvVktavADv%#^_cC(K3&tsx*mxVJE0#KCx{3Mg z2jjsggZ5sFk8h;T4<1yl>hx9Gi7)g%NP0l&iag>&JseV6Xm}?>e~aiU_Kzz8AGxhA zhF^BJA>$QUaujR>w{+~iHzW)rgGp_-S>3Lj=@cHQ=aITC)cWlB;NoRWGgp`;QYQhJr2a#)dKfsOf&`5+jAhM9#(``xRXCzSAG2G+kV+zxc2nk7ns6R-Yb^N z9_|OSQzTqb4|mKJjUqUfIT6600xa$;YZ^M z0tv->OgCGknEvkBwO&IxCE0s2jgWJj@l|T-`8dwm-dP}_0D;Yij@WZXSZYve#-f&x zFo*hVqZ4Bb)AkLXJSsptA0I4#R;*=Asz&kS-e)~KI-YKW$dCWLgnueeHPlV)KPD3r z>C;(a0E&+Vt0*0Hi9ojhQfe*orwhdB*7FJ#CynyPWPj9MjKz?Z^v!9k8OC9e`T?vH z)w-nc;x!c{twU`!?rF(ahCgX9+ZO05wJnWZlTU(*;g8UMgGJ!1t8vzmv@yE%uD2Z# z@oPv48mER5?%Y`EWxm2UkH<;t3+j%Ntin659tz{7%k-e=R%z+3h6<&>HK<%PsMv z#taW;9X@PT%1b6;ke7pQ-?2j0Us)d3hXcY|O6o=#ZQ0Z@tSWDC zzw@Py0x*~K;5va(85PD{~NJ?v;xQ;O9{uF}~t;!hmOS#)wD5k%q>X&sc) z2_G9?_})~U&E>74RPg2rrG#c`ACHz5q@l%I404)6nB1RBJsM?`O9GlT1Cjd~QvzL$ zk=#Ey!T@Qq6>VUN+EW1xeXDtM1uE6OmXNNa?b}Hg>&@=5YS3*TY1zs3plQ+7kLB&i z^^T$zBA`^+^@#1;8w*Dp1w}1d&{P+Ku27Q*Ch)rXtkc5Z%IAP%h}Cpfsi>}+6F$rR zs(=Do0QUGdi9ILn+An*!g%RM6O66stZOqB=#2Dg|ejR%Xi6Fl(GxRAo64R+dKv}CG zo@X|ngu5BxG=$xzgrgl^Nn_3Cf}+2VX~sLObZWV~FUG4+o~n$CdGCjRRT*4c!cxKC zhMk2h0~dk|fu0NxN+<{+lzp$a`x3VuDVQ1U6Q$GdoM8m7nw`<(5wgK(M1nJ{)zu!- z8n6@wi$YsQniW@hofTQ(Lg^shdkbjpb9do3#$1&U!|&2CkPoLkoP%AHeIjLTqtG)6BEyo;ahmt2;gp5JHxYY$;)Tjuo_L-dwN0DhkgLmB)#Xu8gkr*_FQ^7#)U3> zU%+rt{R0(%gD5PR7ff|N>jO(zQ^?S34H&G?MgDeO&HT%cpt36o=ZP|*O*E~99vZLC z24pNa`>WYt`FrGC{TF;E6zZYXtX^nF<`_|pt*)k$j7eKZ*#nN*fbJlEoPp+%F^#x= zp%Uul3w`S@nt!1~kcIl=h&cZX^GF3Kovu`%XxwckSr|9xph9Nomw1-@uU$#m(mwnk z7nkqP9it($>nc^~4)EtrpQalkyB^^eANdfbwhj76lNyxU{l^F3an!Th{-!Xrd0Qxe z2jsHc;XOZ%FVK_M{2M$iTD2P(O={Ul{6Hwr2cQ{^mm@QQQV6WaeKN7oRk62}s7o$f zq2#)b`Mtgw+TtM%whGpcT-Liz>qO-H}GL6l-o}Kqv4J zxrInEgJ6zXn}x>|46m^CJ({a0*~R~}LpB;xTRAW5JsZAKbreWSXu1?YH+{FmIUTe= z`TUFmB-ojcL#TUth02vTSh3Q&Z~I*VrT?h7Gk-U3?44D82kQkd^N(XpBYKp?tPIp6 zHumGnZ%#TuVtXZ7L1p8A(w(qOgAcx44=fm){&j2#o@L&Mx@MQvoJA%*T@CbdF%GJK zLI);L2t}~=Y&D~Tboty+M#g6Zeu8Ch&mpLvNAtX2`a7k08pZIqJdVADsKEHNq$P~YSW*n|=kHUqv#JcR)Aby|w?Dw;u|h7KuRp;k6&l>j>+)(4g_$G4QIy?? zZFkmGv-)O4rlZfcrS4x|NBJ&E3|Qxq8g@)EH=sefMTQ*bDGGe*KjWhbuz?T|f11wd zMs~dH*a%`;Xq(FlzS=f}`Hi|^Ewu>%()g2&ttEqsvIE~h9C0o1bC|e~&xozjLeZRc zf4jUeLS+;N%YKV^O6QEBY^G#_gGbautX^Fcn$?c!6HrJk*G~?iHiW~#8p?nuaA7={ zE}F96WCc8#J)Lm@e8b3T9FTHDJ8SBBoJ0V6cEG zo|k0hQynUDIs-ltlULh6dQK)Yxo$bU1DP+>+;_=DXY5kx#>iIwIhnpL$t#3diG*=F zc~KD5gtyYGWGwozVgYThO#!vYCJ&l%ef6NkTv7n9`d4w@prPefH}J6edhv)_2zY2T zgfoJ)0IiP)UhQq*DeYdKQ^-2nWFs{jUMbe7fr6$kAAkiZ{;{v!P50e$-AK~di-|pv z$+W0I#IZ^6)X*bNpju~A?NJxucjjbI2ui|FPy^+;#lt zS8#uLTjO+!X%|eOK_|plupgBIoBxoV$wJgsnPEWYpfX@5a9PA@NZ22t3Z_UgE=s|K z?1V|uqQ?7v?fc;HL1fMT%Rp8y@sRV>=MW1*DTxPv_9`m1nSTZMk%hz@6cw6fe%u=a z#zRsgX1`;LBon?7wekcNOnj?M9!a#wjq8+~cl~Ocg#p$9w6T@E5CNBxzey_}-f+}B z=&iylIWSQ)gu-~xHAJ@`CG57=UDbJhwb!n&x#HR}ADgJnhlq{y-*&r=!vpMQUGIxQ z!YUHSPOBx)7`@xr(VIT(ma)nO9N+GmL2=8~%v-`@OC)?Popb%U=cA7(pBXRSrZ6|H z)xA6SWXXl;CNGdVZa47rqPrSxt!GY=FUgk(4+8ev#eD9tT?|h(?XS-%~4vJoME`i$avlD7Qj@W2Z{yUr2F-!cql(35l=51sA4k>OD;qfJDC1= r)W|CfnsBizzSUymj_diCh(O+T^__llv_=2#MT?{;t143^Wg7B-hxs2Q literal 0 HcmV?d00001 diff --git a/assets/windows/icon.icns b/assets/windows/icon.icns new file mode 100644 index 0000000000000000000000000000000000000000..339a8daefdc240792904062c3acc42f1dfa3926d GIT binary patch literal 508472 zcmeFX^N%i0@TmEYZQJ%4+qUNn&e*nX+qP}nwr$(m&v&!gyFct-aC1AU>PmMfophzE zEA_Owp^YN|Vt~Wkkm(NqfOQ?NASVt7jSUR|0N^AgM3nyP1^!1M!T-}0=Jq=P08qTS zu&{!purQ&5gRP0Vl`#N78Sfo8DGjWMI>J9%04ZD$hE50mM;+TFGo{=g65Ig%FI`4z zVcsEQjLO^}vqd4ZdBa7^`4VSqohG<|0>}cLUx8ZLF{c_sGg17uEP`F$(=A7SzfEK( z-d!XBbcQ);p}r4acb_Y7pfJJKoq5sjvH&00w^lA+e2t|T<019pSlBvxp^=!xf1 zfD9MAb>j%fq=wbGs#aSZgLE|SI)Y9F8~fzNIV?#8rb!$C0m=9%E))>)KuJS5Oyn4O02i*a2o7H7>6ufm{fU{Q72c}N^`iI88nbuI z^`C0Owz;P@>Vy}|%OVE;H!$V|b6xS+UZYSm{8pI$9zn%W2{34znt^q!+-&s+N4 z@0-@$TRxE=wI5&QzBj%UU*G;|5WEnYRf?WlkStI-$Am#U*!rOem|0Zi0Kv*GQ6=>% zs=tf)t;*~)4B-H2m=hTkrks`lB|Xdpx_+A(4}TkSKdiP6>N6s~$=(B_nA84ht=5mu zi%r$c!pf1F4RnOGgd+)wt-U{Zhk^Zi4tJlLyyRrGG;aBm$ff6)sS6e=Xsaq-+h{d9 z&FMOF;@qjG=E;enM@PJ^U(KeGNmf8S=c025P;L*@NT$(qdCs`%YN=p(>nJ z0(VjR*qM~rKB0xA^X?rd&D))YNuK9z=IJR6Z;!W<2c1LNj!q@8NvINnyMD^}04CxUXqbIC7gcW&QN`bd-B>bh#hAPK5BJ&5zX+4 z88j6HmG>3UpADto+2nrHGk2J}n0%-91~GRE`KWqVhSIfJ%^r3HyY&$pNA@Oj8G*y_ z!jo`__g#7VWE|0PEy_oNEcH<@*>12%0s$t040{OR0VyHU)b|r3wrn@=O=BS%*00C2 z+we{>9QE|tXLDDeLt>070N4D zoHX>MRIl>*vJe1%A3-v#$8J$-kMm9IlV48p^vQ zdHD7~tf;^k)6P@Q-=5B}#F;~)oVd3lJcf`g3q}=0VjGmz6zt_C{n!aNw@vzB1j6-m z<{^^-UR?JOH{AlwDxsK?WtslZ<>BiiKk#}Er*AS?tb~6@@GTNvd(7hkRq?SBj#D;A zj3StkjQa8}$a}B&)`?*r7K`(jYvT%<2~sk{f^i&*T6E|qohDUN$!H3avn>);b#s%U zm*j^BZhJ~nj|#f+4O{VT&|8^@7$j)ZSplxGXiXD5K)2`;fcYw5vKC67z83R_k+C8r= zo{5!0Jl7Mx+WCWTI)T}?OZjGI9s{T7;lL@-B(}4iY zT_a(g1!TO0qd%mH$jHcKRG%{>G_Kjn4HmI7U9_SpGG}Dpo}Ehr$zy`atBYs(lRb<(@i-=FLXt$?RRVpx7HzMMiho0@8(2BWP0uhx&E*Pyi-EzS z^)}v)fNXE9Icfr;hTkKVw-0on0Nv|>!w@&I{xuA^i9r4^%jLRGgr*@cvPt&e2}Q_T zJCNPQln6J+DX^5vTt#{^{}QxaMI)ZZox)c!k`P|mCN4KGi<-9+JSm<=mZjH$FZ8Ui z0O9NQ1*$x3RE)BVP)`uMJ|^Rx+KKoeTibH?ymG8WU~>xbN3K7oIUNr!h~z4S|HPk| zpCFf5LPE>SD-xqlJ_YmoiGYJK#b9_5z^9AMM$qLDj_5&JQ?`?6UjHp>O;~n4Tzycl z+vO=TJvL~!!us9ws4dbn2uba%>oAb$Bjn_4s-K`24O#-gSvgzssA&eNm_v%G(UzvA zpkWPP;+Zx}FSvHHLs~Hy@kO`F!4(b{*eCcc477p(W#8BZcW%nkuY<^{p1; zxVXT)#q&v?Hxz=MXBFh{g6;^?2w9cy4Odupz(X7mNzNMVnAr)QG2VE!1* zwpKrA@0$-l3(nnBn7$@&ju=xL3?F;YTJ;rCZ^IIUa=nG*qmzzisl@*-f88OvIke;a zsaVpH3@53R4pdI|dM`QAm|WY+U*(2%gS&n2qrdhoGXC?6sYD;_ zvr!B%q(GQ(^OoqnRLb^$(FK`S8TN)s(K5MPKe&KK+ZH4BBk5mqM(Pc4RYJL~1*|P! z{Pi!*EV?NzFJsbPF3iNRsR6YY;;Qm(TusnnG{I06@Jk|9-Zx-J`_GY%Dw!0gF+ndU z=W_l}0i=5L)1Mc~!#G+`UW%Z1pHXymApf?@ZZJ)KDPCdnUJH|U5hR1UzXN9LAarr9 zmt<7xC{Nb*~;i?nwlOVGUF>+>{C$A&X; zt{Dp<8JoNw_Vb9MAg;U%a>lj5*MFe_)99OY*0Q&Lvqpw)HLC(!V0l$P$=$~-IvV6V>wo^18^QL zEO)D;7g{NS?er36+o|>5D-FX1^i7vazkWm=deK564&-!w(GX3t1c+&|vx#Ka;;JB! z%);nPIP|qK`a0*hhtXFw6s${UH3&9c@PYnuH?^R94wyD^FM*uB!XUOsUrgLOk|R^V zBUk+%m{n$RLEL~SrgBG7?!kxRwV`v5$|?j_G&0WAAHK`Abqx%%{EO$_i@G6*;Grsi z34lZjpd;v3?D>i7&Y3+$B2+PNJOAUPxCMdiC_eBoN?gjFpF?!jKcPldX_df+;E;?B zdQ)8u^w+gcG5p*u%qwY9__I3fOTy@-K9(ee;$p*?rpR!0-*I6J7^iywqJ7ZypheG# zOPg+6&&i()S5(%@xheQG&KIkWqns}P8;-~1e242oiv^vjx5mg+htZzN>t0%`!r0S0 z;HmeU-h3L1a-{q)050g@taR^2mnNhBbyg`ey)1}9&xDv?0YUx_>qDlmg$v{smT70# zRqo%q8p$Y`@&|(^ZD|#YWKFFK4i&HWQOwWZFcumj(NRH`PAny^SvG((De1x1IPfC%-h3hvqM9R?2X&o?Quyb`q*0MO-gq7U~&06 z3EiBxgA1yNj{WXU6DJT%TrNMg>@X&Q0gepo8#4IgtVF?FH=XwZf${ufi+jXlT8~Pb#$7IAjY6NZmcV0U@FOTcc8i^}|TtCuB}TlyQ;KZIt-YqtlpN zNQ?f9F@dNF>*~r+!qmjHdf=hd-0^-Lh@u&k zU=J2jyDWP_F=Pi{eQZW#5!|$>t7Vv%86-!C-Ldq)A?F|14SzIjJ)EXy@iNv0Wj@_7 zNrinn+rjVMDii$tb{U|?B?q{Xc8?H6H$n?)*D1C>Cm& zKDZx7n{%nk#Q)Twe1ifEY+TaUbZXBL?{U{cav5+2gFL^0uh1cnfp$J*pN&>$b#jL~ ze#oZYh~N#?T3xRCMQCCYAzjm#H6i5!4gr$}R+|iCXaNH9kwITSRr<^*>R0^!_+gNE zX9x<@!Pl@$;7wFNJW)zgga`X=>=Kg51icCxPAYpg*s08zgD+t1IQW-biWrVaHRBFr z!1$zybQ%GYkQ2uqxWrUQJf!#Nui1;K075%eQ41!1dHj+=ZcF{pPZ#>~F zEU^cQ7RDQt#A&S*G;kjWjEihdGUWSNk4`q;p~ca|nVoeNQVA7`I+cSvy?f6GyEjla z2(N(TYrzybYD&n7wbQssRZNtaRK^csy9d6IIhk>6UDSr3yNoeXEBy7E`yyeUte%mF z9Z~eQPtJ-oK7VCIaUzuvg?ffo)>>R8`R}_y?&p03HyyI_hVTz*0y+b4Yfqx&8+RDz z3L(k#5H;D{47W+wK9WykL6I3&Xuf9|EmhmQW+?}IKM0}3Ev1a9Vw1T9IWw{>|Pjb{%B4OB7&%ehuVu{b%}o|2T5Y~#6o<$z^+TywBp)+bFb)c-(f&t zj3b~+`pAjt`DHMVqyo^?i2d0XeKm+_WCGdf2D^4$`1y`TF{RlrOlP)`t_q3USfy-|t~ z>qVID!o%Q8hqprWRCS0YTj=TrzkFEkU6yv3O!(<~4zJ3%{@x(F@`db>?wn+{aqZ2F zJf8jM1oGo4+C?=aw1BJ~#uy&%B8`LRu7Qtm#vEp`BC!`-a#Avoi#njpe3h~tiK*Fk zY(J3auV_4HYUpB*pQ8=hc3{OzB*f!w+nIt)6EYnxCw?>D=7uV>MDGJi;5vQn+0@<^^cfGRm^G+JO01ugoBmPXz>_7 z3;R291qlXa3RXS-74g9@z5&Vd{!A)Wha5>f5;IUmbLreiV&=20)(;OYn(x@7hU(Aq z@u9Y3YU^Yb=m`4i*A=NKYq@iPhPY42n+m*thxy&l;swaWr$~Cb)A1G(g6KV8X~^-w z0Iq1!7-2p_6z!v!xKQjQXzV8IwSI4~BdPc^qp8_8zhd|cg;1x|Z>drHwRtWAWWxMK zon<6`Qoyzjo6`0aRF=QsqB>p_^$X<~acf262;JJ>5}}8b0k&;t=aMHmeHj3tFOmA5 z!Yc2IThM_3)Xm-9eLmEK&l@3LqBr<X+G&d?g@B`6gjr zv5oMYsCUyr3@_{A7FB2~HPL4g_jTsS+0m1KlR> z$6JwKG5}OWGyrofa2bQYJO(jQSO-4n#J}&+HW`%{4IIrm@M?&mL^lN@YlVc71nUT4 z^#H7SHJN3fhrdgJH)#Y5b0Jn<%)%0zxd}C}2Ks~9d_F*X50B%Ie1L^0tVx$WA!3k% zTuKv6hy*f}5e<(CLl8|-^}?1Z`I_n9QQFCmz-r4Sl3)&-zyDf(+z^paV$zAPqoB${ zAq3AUUqkV+3D8^@nUq;NyM%PD?G)**F)nZ{PD3%1kZII<$Gq2az58StawHb#3T`JW zrP#A#Xo*kq4V{_byhPED;XdR-DmoZ>UzmuwHaRP7F6MPM!;Mmfjf0Z!+W?>#PKcNM zv4Eo=u04^POQEcA?w&q3t<~X9S4qEPvS3xj>(g}A+Zc=ZW@X1QsUJ>hU#aJm1IznP(Kni~{{(Fv~fHLNDJ zPrA#xZCe!klQ)$Kswha&#HU_Dftf(2W(hxX{Y8<&@v{YTBO;k6U}4(}49jam0XG2I zz2TnF_)>6Nz^Cev&Z@IZFJX@_?YaU2lxGS>O-J0qF)RLhaG9Jm(6SN|A^ZWfIw2Yr z;&DO++j-=ykr>ewFYv2*>sipT;2oM)88AyV(x=po;i)SiHT#qL2L_t~@l^w}{uZw? z!n(d2XQmH=gM}B}(?lcet_cqa4ntzFNXfj#nHo{R0V1J5mrw(_P5J~#bN{19WLpGO ziD#*ZhQ|e5#7_irU>k(d>~=qqQ&u|~`VcUaZwTg)n^3m99?S6&xH8`|67sun*wNt{ z3E+_fzmQ)8npY8E5?Dm! z>Wt&x3b*S3E0so>=K~Q5hi(DHAPY!74t6g*_(nYj0!W2CsV?|Pn}@9KzxdF9XLdSe z{R&7#{{@Xr`U%H_i$(e(a}UqbXsQqghA`RyH5}Gc`X~>9wP6jf3!mEeUna;S##NsSyfgHU2j9zVb~K>Fl%)TY)t?LJ^+&a@=k zpWYTVh=XbGF}j#MRKx2M}#(#G@a{*H>v+Y(NZo{fWK& zNa(V2SU7@=gyi344p<RbdQgCN0z>gD{X)9EeD{5f%6~R*kk-WTit7WkZrl8vtHczj zJshaY=MqD2kN955JVTF~eyB>yH)}Cu0W*k@Y4V^$7SRVTt))FrF)TfGkPQ5eP`}nU z36OVk=KD|@Me0|fRH;2xH-dXvK9ju+d#nr@pIPS#kOBZmF1eOs9{`<3=Jts8Bm1Q9~xcXPZ*Coqc`)i z{>JCahja6I$_&qO3V>uh37r8C6hs!(6j2Z|Z?4Q*l*)XZk@I-{eZ9)`W`0~Cr(?4) zciHJ$)!Eij)%l+$A2xaZ2b|t;?|-SmFrd-U`o-<@$7^PcQ_s!B-b%`Cs_nlchjx*E zG*6YKFEyLn?ybKyAK5@^@}2f&i%3slc@2G3Te=9;{Xc^t=F>5#v6H+?PjNuF@S%`2 z`;l*5cSh?Svzcn{Q1~x6AYSe_)l|;1bOZ=A`MI39GgidtZSQ^L3p(fLceiJF>3zp` z5eke?)2t9HzrR0MO_jaW1gV@~;iRpn`wARcRq)i|M#LW=Z|BxL!TmBerIzPMU-Qnz zs^YD$Y+jvrAI4MLoc6!^|0R;qi6!`_nGX^YkUSY05?5_muEgpGdt z2Ljr00Hc|mfQst;!Q*;neDbtC*pdsCpRNqy+Zpj~q+;);#pM3iFA7CRbgcS7O@01= zx_mafYF1ADt7n%Aem^?&(7vN?1HKo;`;dtzk%X}VbcfS?7`S$_o3jVex6t1?t*QXj z6^de|VM4t=i2KN;bWc!geX+Q<`7BxWcH85!x+du%QQ_hc;hVigRH;Y= z>VyAhsh{1l3emw;@%}0fIRzQl*f+5FX^xSIav*qFwC1n7FJ;?Z`ZvywNlw{+{1U-7 z4(AZ1jg(@5A)Z1X30*GKF&agUt`s={{m{$Od49qhzf{?;O)H0b_s)tZD(kR^G z`cljI$$gC>W4H`^g0%hJ@S(f{ubp$~Tx^{W&1Wdxy-)hkNj%^|jbZ^xrNT`z3@N5{ zDE6R=cRFs`Uq+aRgA7eN^%Ri^Hm1zc<0SV0KZ4(_$POMkPUv6)v^scesrx=Ttp3)n zX=$TW>%BAQeepIJ5;OI2%+88UVbS*n4mZ? zJf@!6GxPLNlf(Y#bgPMxlpsNPXSqj|mcYa6m;p-)(m=yhbxzYp_^Apl;c<=6Zu1co zjHA4Ip>L#1QTbE@2t#;XYw&+j1X=bKp`&rqXh$$e1!w&A(jv0(cgyvmq`>>`fLg_Vbk1ji3-KQBbJ z6a*uqQIZ7_Jq;MIIDl8;v4Et}iDY~!^|aeE@ntfYrezAQ zf=)Qk(?jBa!I;x5cyZqkAxI+`=;#skmm2RS8sBij=GV|c?V+T?CQ9^K>F+4zU>m-io5yOfM~)W3u*aQ(wH<_?ALJK<8C=AvgefhE zAuUe>-7m#p->;2O+qX;*oRf!joYM}fv8jXE2c!riT`_wc8@RU z9+F=-Mh=M;*yAKo9&(ewMy8=zr%Q>QL~J)VL!1jCD(Y1J3Bzlzw8UOfX$^g@q+|ek*DQ| z{YQ^Hhp+YlmBiShFfd&Q{i5y%wVpV`Cx+7n%LECCt~u6d7&w6kCscn8X3CPn+SDos zUYiBnusQbaoGVfL9mRwU5I++(!s(PWC{Q+-C>Ke3+8A|{bnYU75q$;#v3dP!e~ z7C2&$FvZaaLc$_k@%deG^wOh6;jwwLSX6{OdZG!$VtKaAwGv3TdAOzcl_Cd$eaBLi z+5=rTd9RnT1Rmw%6}Y*(Ozmwo${p#cVMH7zc|pyFUiP7j^DQR3AE5`gyP8gw?Bn~H z`JgJ+fHxOv{?W2Dv|792)aWm@A?HJ8Q444>Z1IK+Kaq$(;>P|^GSJ}pKfRa!d%{S)4SBUdkAx-kr<7O1b2FkKEg zhzC<32Cty+;_8Of99EB4iupoI$@KI~AdHBNM4jcn~-7Zyh zx-jmvEf%@e)S5q@XDN+OOuOlc298!f5pBqzYNAf3%GyZz5{D2J;kEQMh`%7lm(~&> z{Nd0;J+57%xPiznIFO8BX=qx()RXv82Rbf%@+bwLYUm|Ie-3d(-SK zgJu>yHNRQcK>}#F8h&8ORD=L@4(3K-dA>o0hJt}U_=yvun)Wc_?~a-5hMGkU72l|U zc}DTfcZO+4DjuK~H=equ!O5@9L?&cuWjsrp%i;L0P*(Lo01Olj3UhY4%oe_A=xV>5 z7)%+aG?mPdnUTxig041zYA~+o7S9%K*RxT{igTUzJaj?#m`@plxlJebfOaC|Qq-A5 zlkXR`s=U(%-N(hxSgrT;(yb~kkI8DjvxgMePdTe){yT)ldKzo+gCEV>a()(D3md#x zhnXAR>pno$O4C;3JQdKr3dr)8nC%p#h5| zCoIRJAGX>Ol<&4;*inuV?KgSKXcbNr$CcalNBwIu33i-^Nd^T>&xeU}o8%?yIh=fr zX3EJQ(nmXe?qPIxf=;uw^8~T*ljDyrM1!Z6bWSDnMZQK) zS!PKh(y=BYs)80?O6rTiyk=mkvap5-7kki=qWr)3l+J0*EG*>8uv0H#KC?}R=vm~rr82AnCkHo(FO6(>uz|y*=Mwi-4b(??5YIkVYn7_*O=;zqeKZgsuX85t9 zTdVubbF3|voE*6VgK%t3&=5YD)Cfl-f?!BF^|8)=UoA=^&~K3ot*X8qlf z5ulHz|81ev^|I1zkq8j1vpl=4_rZl)JMG~+>B6KBhZq6(cgBzIy>)8uu^!>D@Yz$38rkA^ z#x%i+Slp2-^TYlbEisOm)lFm&J0TxeoYraAq{YZ}tCivbdA!a^tUge`T?*?>UJY|u zdm2v?XiD>U@iv$A*8o>$DfPdh^hnVW;vF|TG`K>u>m7iGaky}f2+)gY)j)V5IY25U zxoX3@mR}4+(+7YNQ?QXS3XB@+7{+R

af~)-5NheMNfQ)=;ref!@=J_fEyC{e!sw zuZZ=cU7c6CI|R(fT-Wu5{en`Hn?&~-v*uExnJ4_U zIRIRRSZ6UDHuN7(0ywADjZ><}YIBMVGOwQW0V(A~$Ff-MlZN zT-TXe#(*Q1!EIs^{&{*Dxvw!Yk1Dk7Cq{^!&%s45Z2AU$;&Fkv#dDFUSi1;;QDQ

=)I*1ixCkxf!?w)^_s7&-CJimUfaEI149gz1}7hZ)5ztm~?q>3^6)GEt9A< zJ=h1TA|&WK9=LVmt>=T^^zh%?ddMBV! zU-AxcAHOBk!5C~C$qX{iU5+8TG{c^#c>+U~ z2!>G~d#IiONB-6C@3^oncKGH#XBeL-GijrC3K3>n?&(;0>nA^GstG% z)gJjK7fj-V zdcd*-5}-C4?q9Npetxr|;(;)3_rl$918#G!HC`Qijy-%iOA4GgysLsX)wtLa$4)}^ zAvf%?Y4ncHQ$bq`ar8aMLVXK@5zpc};AL=Ukei(2(ot{jmglY$I6)xm`=t=(g0X!Q z574!0wm{EgMUJ}M0XIgN+qmb6;KgC*EJFR`hT8t(GVwcoNZEeyIZxC#Wc{nnHw7DB zZ)CJXH$`}z0CC<^@9z282u|<7K3#UpAWo3BEIt{6P!3%ecBFknrdM6#qw7VTK9@=V z(->{ou!adX&hEw6U(Jj>ZnnI=1M-^OIHHC9V5!-|!m(B~d@&{ICM|K>6A)I6J}wT# zC`4bje46DrlmEU7WO+_cr2py5EyB<1@{0x~A!iPx%dIC`Ef6l}UB7Rv8s-qDJVLx= zdS%zIOfv^ADL5b0lT$aQ?KyYj@>MzI`kEe`E~-DnfI`O&!<|g1NpS zUx4kHftkf!Q_u$=OajtipN$d{uL0-T!TAB#LgRw znq=DGsDzGs|SzY&fHfFr9TbA1V{Z2>n5g+r|Y1=(t$YRdzG6x&Bq2HWGm^wECUf@*X6&qm;!jL>~P zO(i}PpR-Hwbaq;NKZL*;V}WKV^p%ly-qv2F2IK4!8$xeTLT~+3mb;p{Pt0n*6uz2a z*knMRP}3d(wF+c%FJM436uoG$bNMHImZ@| zh}r?B=J-7Q1#nUF2UGRbh%gnH9VU8?vI|=TB$C!cUT*_D6FB^4S$|JXr^N#tN`1xB zJ&SBr$j5-tRoC;_g*d5$R*D$bKSzlN_S$LWoW+vg4}Y<#?+9&q`JwZz@IW^M2vjy{ zx2(i{bZI@(g8L+`VOs)AWYauUO@oMQksmf(8SB3GpcNN@q1N(C=Cb7zEMc3r4jWVg zYDA^#BPRz@c_tBZtDZmcy3pJ zj+@trjz%;>y29OwZdR|PRg)Rh7VO$c#IXXqMhUi^s zEY0JEfwR{(hzvv_K&NIm%p8cvhU*uZL*KDKp=q54{%sX*;diI#Va}JXVFkFb@`b8l z_R2?zJXcy-!KfV3B$%~3FgelhYHnqv0&rQogz=BMd9qk7n-24#|gqYq{AXmL_B$FbKZ=42){vk8_ zfQh+VUyzmC9z6;AY|L~+YWO5VQ!PX=(9PkBrT%YZ zTm_w2HicGFg8zsFlg*0J{W!x8#R4<&Pb%v7V4h%?sqKQmCjS+BD!TMrc?>`m6rAd7#=%j<9X+GTElI7V*glzc^-q!+zEk^8E z5=-sSR-yJyyE-sv@(=K;d6lknJ>{t#UaS_g9oYDU9Ji#~iFfAV6O!nnos4S4gQ{^DrYRQdavTGBU&S%0!;>e}av;u?^xJY7~jfbW5VR zru|0NtRridG7{$fb&rK6da_soZ9=hY{RBvS*fX#^&YN^Wft?82Pm-&&*ngHl8Px4x z-prp(r(V+yDUK!95Xj=!)?lW4aZdq%dElTWn1))pXHEz}`eqRREX)}(9Y&0m6-vGv zpJ4!5VGz_nkAqNToJdJJ?=St;8B;B#(aVAk9QQ_AZJe>WI9cru_lX&zI%y9sZ)u5y z0=f1COzCN45`lPG96Xcc8B8}GA`WmmAiaLN&-LJd#E_hNp&*zHxojpLq22a0NaAiN zqd)F#jV~TVpXv^m#K2IvqGj!dwj#)X3OpZ=?Zxb3QWkC-jHG4cDGnyI8)#=lJrDZe zms?Z2yMAZyp|1i%7|4S(Oz+`^fr^{n&Ct=g=H_y4k)l*(bfYPl3k`P z9zgz@_)dohVG1KHYbyJthMn?*WXuc#W!j&&g%k676aDE-3=`opxgZ<)t>pAn{CYN( zk33QtLU?M9^qY;9Fu>>R%k(0;CzpG-`<5qJGD^!GB`#BLjjLP*Y#m;dV8Uk>+hHHG z*g?+}TRC=t>e;XFFk=iq#`01r<->x1`OL3GU7NX*QbU@OjWAp&3h|#3)d)hdRnD*r zAk15al>q-SX&8OnZF5dNY3W+BhF)3O4L+l+Bc?!_AyH8ukTNMx9$Kw>pDtd}3u8f6 zWN7EL!784FFh>+4S7jnaIy+g0Ww)JifE^b*MKQ;zv*`LKD)!1K8a?8_h*bxoa8UoR z@n|3+S#eYo8*wyj3L3+*C3agn)F6Fld%jjJqQ}^9)d7h#Jlns2;dcky^>x9E#k>cnJK(Is_HH1J zdBLJLdl_iwQOJ6FUL!lh2_@CKr&*K*SB8%*w@7x++U}V8K~r1PAVSZ2R9Q%CX$#jl za;1kEGi>T5*Hyh10MRtR8(;zi>h7(xIpA9Lp_-0WXk=o>2tbBAx@_Q89E#3Jx63XtwXJ0;46?d|nCvSiBrT;;q1#M_Cx)GPnt&r} z;ardlsveB+pK3QyO4&*%S&DKQ19V+)0#sv*&mtl7Ke1b;s#5}kBFpi>ukUC!xC~%x z7r~?2kqiv5#oRzUH?lG;so7-by0DHfSgbZ-8`6da!i_+a4Klu37!*@XAI$@MfkjU0 zLrj9~h{lWvYY~C)ZPQc0l*@2T-w~zoNg^*VmLZQ5AnL6&*gb{860;_43lVa~`A4ed z2DN9#@{2V>Q81{?KnBO>EYhG*oWVE5kFB5rWOk#f?}zPT?ibRFDcTe>16v-mj@7ap zJb<-MZgb31de^NQheq{74<2PV?(V-oxwVj$uU_ok_-okTdGI~2nvxGv9f=N~s)rb#zA3cgW{M){&e1YKIel7!erv8D` zf$J`0DF)69G3XQMIV3H#5ahOrogyb}c1Y^ASzTGSoh@SS5*!(d)Qj(y4fzOkT0sQL z`kZ9&a2Ogy2};EiI&e-{P*QtjKtfn^D_&#N_WkNX4D2f}XsiR4EX?%+dA2t+ck7(4 z*Xh;-!izA46^0QN`MWf~z$g14u;8Ys>l0aQ9q4|(Zm{ov&Q0tWmteDi2W&8Lwzhp5 zaUrF!iw&EKE5&5C+)g!Uh`mL5yv@Bj^c~RSqJi(7^Jz;XTM^i##Uo&*-5QZfs@cMf zNx<`N3EA)rc3?#qSuCZ6paJz=!wqVE*rOmV{rGsE`}H-WEr7m;&C4NLqIDdaR}79_lkx{IXgn^pQ1wLiBONy zBF-*ULu9KoMaYYdhzG~xTAA8C?m*yzEkS_QK86lAEtvW=^VAs+um;y0{DxXa%gB!c z^`Lvtpvb-{`8LdegAswAj*sz@& zE5X_+5cGJZ^y=t1ss}atNJYR&2k{ymj1`60cHBzDQL{biswMDa*{&mt5w)Xm`l9-0Awc!iDJjtSzrYY4SB?!}N zYQy)h^B`hII%?m=t2Brano~3?*kQtig=k@H@)L&$PQlRi!Wc_sENzs5 zkn{(EO>N*3QjVgFK{PoIh><`$E*7!%1wM*V2r}a(Qt+Cp1HJ$X*>9|)`lqOu$XBLw z^!l$DOoHkGx3|rp4Iv0Yi(X|TaHofg!dU(3{&)w>g5Sh}9*d?8_W9gY6MWygQxV*w zZz67gRDu1fRZvjzKsf{qv^JI+r8d=?YQofoDcaV36i3?7$&mEN(S$Mk7ZN2a{eW`?&Irt49knZHBsqy688$;3x2*!#w zIBN^m#+<8Q9~Qr`K0CD0XO?GN$(@jvjgrqYJBYa!s2WQ4705X|ROGzr5J zQBxNubz3`RKpeFT{S9A2of2RJHGNHboo{+#atBX2(aPzAzT9%{&t0zFlcIODOSPtd}>~uf-tI^X_8RQMtWRn?FW?lN=?+ z7BoMb0JBNv{$^C~tY6G`$3JY&Yas4$vD!Tle*YJH@AO@X7kzu4*tTukKCvsdZQHi( zN>Z_HtCCcVigQx2?WDiI?uYI%?rT5YabNa7u=W^h?=?SjuG{yTM#R!+%21`%Hy3Mg zIr4T%n2~7nO3I(E#HczDMSix2>Z}bzd|yzln7LfA4#)6i@HHtTuwTn*Ho**0RW~g9 zAqL9x@**WnJ}SQxHDE@4dMBc*(4cE;Oz79@dLWce`3AIG`jBIpl-X{f@=>2Fok)$h zVI$u$k=K9*8>%o`kaJh|gC`xHNY5ZtoK-4i5L3O#nv?s#dOC=7O%l6E4`*7?%3KuQ zLghO;dW;_!jqPcC4jf%EdL=-<{I4i5FjqQ1V%>HeAuW26+W&gE*9Y8`1EuMsJd(%= z0=3yOC~f%*%<$hY@6m0oLyFYQ`YZfR<$uRKa_4BXBi0a~j3i-NBp`EE` zw}YuLk~L#FP*^ftu6T-8X^BvY%BcUr?;82uYFofSZnLOC8^=_IA#{kNd@u%6zwz6N zHmka)fc#})QfRwx^FT!`3Z~JWncR~4kgnj`ocABIKD3HG!JI+!WIWmC4>!?>qpj$f z_vGaWF-^|y(8CqQc=SIwA8GW-m3`?O6I5GpOw*KT3>4M%8zj`#pc7VGxJCyh0?a}1 zor$W$Y`5EzT0=3EQwa9KV%DoWh`VDntY^<7fud3={%|@%*r|7psD%Qxr`bm!EuQ7l zFGvDd2aN6?l1sQ#0CAt=(`F=*#jVx8R&#)}im~q33F6FgT&+789h9g_mt~}N5*a2O z_|h4pH}XAGe%m}AJsBIhF0ztJ!Q$f4}1|Ya2$fZwVQ(#8_fbByNH`FwhTa5%oMNaTgFgF3a2Aw@4csXP0*yT=&ADBcr6SJI8es&)a_1;2ckY~6x ze?og{PM&oUOrzQGxQvNenzP#llJ`a@6jJEttq1n&HR6WQ;lU*Tvae#b;TSW5fDFs~ z1T@qgVB(>02n+D|f9wx>6*BMM6y46n2X3YUnptT zv>Y$%Fxp=Sr(af#FRCT9J`hBop$QyrOsRyVE@jK#>3vCNn-NymeeG2<`Y~#?&%g@K zK*mSa%)*VHpgji?8p}|rt;=jeM2yJt%@S1W-&Z9lF4eh^4^AXE={Oj;(YyiCLWUv4 zZvG(r#W?YF!^s-Lln7(0?MOR!AGXn$K*}!Njd)Y61n2kz*{3^g`NxOlKQw&fXl?eL zPj4uDXiO=gjmcApS8|12FiEIi+(4%2cfz=Nvs>F&dYcX_&C|x8N2=4=0cx@0_ac** z)&!r2Y^;mph-Eu1%p8E)>Gzwb(@W7UjRL|#*xZ6&SPl^4zc7dRCvAotZTHdaDH8*s zrEq!X)vsF-+Bry~bCPH&f3%kB*#Bki6=f$l_clO2VYGRVb<}g;v@E838jm9NGmyP( ztg;U$S0MaxmrZ4JuO;p`*#ACrcPccZu+}!(5gS4k1b<#^A;#@bV)(liqeL_Qvhv87 zGKDwh=?F%2rCdmLnxEI;(UORea1m~~*Y8c4O*@(c^e6TOtu%=jx%|;rmLMQH(yM0% z5T1VF(HFgzH(*VZ%8S-SZ1wElcr;aFxt=cn0%-l!C!SYX{_Hs{$C?}JnU%UHLn0-z z>tIZ6qA{rc)o3xWO;6pJEApkcH2NYvK7bd>xr`k+sn06;d&wZj>1;31XY|Sj@%e}F zex(%%_NVyOF^K?CEN07x{5E++dvId@S%-yKIz!lwJK~=rPyN1f$a}io)^NMz5$rO~WH0ZJjD*G@* z>=oSfv(C?%FC=Bi^m(m1cQEeEdprv7l_9kWW(-s-cFW{38CE9?kQ(uJeW*cUl!_Fo z9Pcj1Xj}VXvOJ`PcT_2kF!_Rcl^JOe!qa~oG`b^D$0LWA=1MlqnrgfA1P!857&SVx z!nX!`DRH*%+RGxD@|~#-AMT0F;~!9>>UPcSAO=QJDijQNK9^Y06>HuicpR#g!Dxe1 z0fX1_I7hQH@^90dtjr@(Cl8d}W8(zkjrZxBgczFdUpX9Oi9v|<6B}$U+<;6*$CW!` z!G_Q%bS*H6=G48p;|%H9GqyK#SY8IDE6YAk2Lft#s$Znty_>Scx}U*rpdF>a7C|9P z0JvA=*z-+fEPPFYt?l@6?ngHKyebBgbhics^YPpyeAaK-$lyF?L>h5#L+O~le(5o~pfjp5V$PY$z zKp;!kc&Oge8LZMM^+U9VHRncF(AsZ+EEew|Ng#k?&l?nC1R~{aP@!K~T8(GB)R@na z=iw#Vs$&j5A{cL^_raND%ef#2 zbajaxPr(DhvI6H_9$TZjnPYWMO;DdqDYWoh04qhLq}z+9T0DWGu@C>sv7)b$&2N4^ zFZ{A(hI!0}br9CaLrv;8q)Hr-G3!bNL*$ACtI3KlZI}3zA|??mGz?o&?oi??dSiMn z9ljQB@-xWrt*O|RsyxHq3SN3ttkIgpV0EV3-RpbRM(CtM+dQjm+J+4_ZVWGSs>cr6 z`oV-^OPwxRX!&w-``D=e#T4$Bqw6O8@nUH@@MSObhto7o#%mWmdm~|c_L8@Fsv}ks zfku7VM`aY%<)HTH5z(U~soM{pm7_43$~e3DZGjx=iZW3H;R@IEp{ z2GU2xnYfodblwX|KDF>~??HPE5&h}?B{>n1t-0{w$qWgtG?pDyK%?S~uVEC9-aOQ7 z?3cbp$s|h5LH(*jrjc1S>$K(d-#@Xj&NeFxirxc*6Rbu^?DzQMR{#F;T&4aA#()i) zhE$jjxi^m5e8_O7BAFYZPQRs&sc>_3vCX`OWgO8jjF)s}Za0Id+=GRE#*CgThCd*E zyOyL};Cul9O~5^gL04^qq^lm-YoFP8m zMc&ks+pgxn+oc}mE` z&y`Kc1GlPqJwk}0pZY2r4dN+MW>dsSpiMet(YE=HEg9?L~HpH z|3@3r3kzlKn-4w{er|*p@f;u;;v$ZCLDz2^Bvmx|P$Md!)ztq^Yl=tDH)v~gug7yE z%^gI2Y>ajkoarj^$acdhhxoNRS4(H4g&wgWj}tHqQ0lmQPByX`?#&d#XjJ_fZjnBR zP9*h-ZSIaKlm8nEVkCl%D>d z!6W!tYfa3${WvUd;b4{JJ3>CMOiEFHOq5`RG6@(+LOr+Ki4Zu8hp?7^9Ph=XCH*xn z{|(XIiNe}&k#5t?_BUf$j!GED9?`ND~U(-rS76L_+x!{w&f8*7`!yTQmW3Iu6AL)+yxL5KLcTh=YO>c z><-o}yfHu0K{Wn#*F@wScG_5gI)S}~-WipwDdcy(Ag#HcU*eb)e8$mum`Cf!DhKaL z-1Arh&Vng*B$UE`d|Tu;;SfLZK{O`ZqpM=C&ROZ!gq$$ zfM=Y8I3{;oZtBvECrRw-zQ0jOnCTX9QQ~h`(`d57!NC92)`?MdrbuVipQ;eA0Dm~gI0y%1>YpwiI|Q9A4=Yokb!4LsO%QzACD``7KB!hWN3JL#n-BnCpt zJKwwCikOPG_(&y_P=6&n$?q(Dhxvnb`Uaw_SzXjjJhVK&Tv3?-+V7?U8v1ZuAqOlb zJQOam(8jfAzbj+T{cB!@`<-~bbhw&}>y&E)Z#{0kinPHW=|Mu$^IfX9v>Ga|H{$eu z6ne~At*%RkY^BtDfhYj?a$4NAKhbO4oKuHGc*BD{#B_8arK9DtS?OSf{GEZ*)ju=* z1eK1PNvZj2fKavsMmfw0vVwCHGGql64%&q^z-}YGg~&NPi%})RK2KFtoxhhWuCm73 zj%;oS8_XEaD-Z*}!yId~)p@f(@2TtnfilPpn6RrGyJYZ@x-tNgjh}N_{Jxp`3SS_f z?C^R4l?-B!4@UBCM6<2X7L1v`?6oxM zh!J^y3=^wn=_Ypa&gAk+Gf$jjf-4IKQ>#`h?u$nTfs)WUg-EQmM9%{(mRg=5sNUr|CXs4pW! zX%UBRMd$yCyaZ7COqWF?p0`qe0!WU4Ee&ce!s9CbpCM6!X2CSdnWUAz-Vo5iLXyQI zjY|_AsV^$wc6(Ue{Yf?Kkh@NZgVww4vfKfFP+2tztsN{RP>;nx=u~wcE*SNFUJWO z^cT+Y9Q+KfE8W(A{>uN4-Ik350PyVmU+uR46Uzw(_`mJ8`mU9h|Hp3ozm44%4DkQj zZU3jr005Aj*X@78bpIc_?M0uCJE4{@wYS@D<23TWe>J?_KXVs4G z73LPdt%?a0H>Jl@oL8IbBK><-t22|Mmobj0A8qzsD-KyT<|&?4`ei^_OpVmekUj8F z?U(=I4|o6f}9mP;bvehQv|^f_4QxO zrf8Xk*^-6hO_<>tByO zLIZ^3n6XJlQ%l{s_shyI9vFAu4cziCxhwczm=K#%Um#zBSNz!)>ZOeerwd;X`GFw$ ze$5{3H$Jz$hxd}`7l`-@;-EjKcxz9*cm3d&Cg@b$HLn?;9s&b>Y%3y#&uExpQF@6G zu+rocq|dEbLmN&gCG!lZXo7$wy^+;zOaug>eDQ~YLs(HVY@ePvaw9Oef1fwGB(b}7 zFnZ&NFXLZnNX%*X0cbItq9X&3I}cCs9a^GhQ0<5?#t1z`^N)pqJ15Rp8^+&H<#iq4 zDbdyfEzj<%J}gHEO!j!-$w^~dS7!J10PkTAlW6cfFX)^EfBBnXr^|rH>@m}Kz{PjF zmyzYWd7K~?T!;iA9Ms3e?s*SuGZyvd=sDoeg@_0A#dgdj_nlM3XZ|6fVk>;Br2Q3> z8F3IuGPqGTKW2|*x}fRv1zQ))H@cr}XB{257efQkv)*-7UMa?E)F#i;f6p>xt?c^g zNL>Q!e6&}^0h?;0Z1rH7c2m^n@)L!J?p2Z)82_N^wRB|%8HtZM^NN$mm zj0qEK*=~hg@TJO2I5nvQ_reGG5UkpTwx4b zWmhw$xS=!W?z`Q2baRPSR74)c$0i{Yv@J@+(Hq~(b-(JkDiWad9aZ6fcHC|01BcT4 zuTm`Jza}LgzI}zqgY}_;OxYuLm6mbR*I{)R1sgDge)fJby5Qr0kZlyfQ7;wVWElu# z-}tuER?V9~A5l>9W2eW#xex8hxv6X>@I~YrL`*&bz*^wJ@umDGL-HT^bRh8G3kebL zV+ju^=sv6vVfs`s{frp8GyBpf%PG+QhisL&9efnsHOtRapx^t$n^;Nh44^%khbx%Sl(#Vn|)lmXM zmJShUjG_SuHK9U!F@k|DkO2Y^{2o%DJyVp}qxD_f3rRs8CVo)dL-6Vw3P{v$FT>Qh z)wKBF$<|G@iJkiG=OSA{wh$?ZP)EppYtM&2-?}*7OPIJMzowr~8w_(muS%O7uY+b81s`Fv;y?Oac+o;Tt6{C0MqC?tuvsoe!F1D#B)no%)$Tt6_rRrg`Jt0S7 z_G1K)6cRCY=aZ@K7b|WF86(?U zYb@XjDcH8Q+#AT2&nfJxnW7mt`-uLemoEw65F`h7I!H*IIwm8bU>!|UlL9snATVa5 zRNd3x?ZGV5+Z*{l*1XAFM#Xy~9_oK5#C{LXkSR0u>lyLJD`KM!R;{Cg@DJQRYUrBa zvSt};*B`g!JV(hl6ke2;$GvK>zV{R08~?uQD*Qk=k9{H!D$v)*D7~*SHD^O5H4GZbTav}2U|_B4`@D8woU? zAT0X3kIv!+^k`6${&;egAR1${0bk9e`C9%Vqwl+i5!NEs#CfpPn}IXMm%Q%7x|`*h z85@!ljBBKewKQ!fdz{laWG+IgBICD?`#>G}!qW;zgJH3heaE+G|8Uc#V}~aDAw8EB z!(Jg-IM_kGIx6zO9G$V3OPW^=b;tgt7HYLoeF2K#=26ll7vc+^D2_n9+X4=pA$i#e zk}|4DQ&9kb@aVzEx^jgNrPWEtza9L)i=O&hsYAr>9592uq%2V~nbZii!99ULRTtUhxYa5Z zza6S}snR0BskI(KaKYB;Y{IPvBvq%sQhFOhrlpzdHIP}t*Ez4<5*jy%eysQAzAf7UP4 z>x#h&l4@TJw84iPlTi}SP#>kHCRo&b{_4v6V<_^hwtn^vkRGEZ19bT~jH)^wK`1K4 zsPJl&Eg>|u{HB-FCpH25A(V|AZSkG^53$oE=0Pso&@keHX>@s*o|{fd#t)bd;H%p+ zARDK3Qhfaj0a*(biq_DkWx_&ks-b3tJ_65$q^xr}yrpK;cw1#!ZWv7EOCT(y24igwo87`jb9CyHch@*j zS0f7K5{(RM-=xUgGyq+t>wSPWer9XK$smgfS%IcQMJT@}+VE~yk|i#j++!;Sk#7FU zVQ84AW$Xbq>}GzJFOo%aar55Rm;R>@L1&I(Vy3IBQ-^WS_yvXO z`BzNCiJL*89o7;<3|xwLT}&qp3~!CDwTRIvBO1;X6Lp&|C7$KD$i7BCQk0_Ak}Y7BVZn$mw?1tX<+MYuhlCy5&FkAJk<6|BONYaNg(@dXJUZEP1&ThiEw?r+<4`DP>IIOrb}#Uu|o^a zx>#({gjw%K%nvC7n+fVn@e*e7#iZh=Wq&;hPHiOBJcS`fT%O3EvjOHkw_$+BSiUu3QiN7M=E6GhLJ_40USNqcrulXjB? z&-BgmPN@RLR5~g<`a`GrfAI%MPRTlZVXl`}9dkwsug)o3$!_3-KK*O&w-z5uf96gB z`GL^RMtc7}vCj&_6dju?cF1xYQ#$d~z}j{uy!O-WTg4Bq(2sq!fODNZN`{GT94O+q z=K=^I_}0~i3uY*qJxqrT)k~iX;<}*27+BRkltliq=TJ}Dm*7U+VuZY%+C%l?6`4>w z;8&Z5?Yt}`$ZGaURv(#L=eB~SgFYn)Qm)@feB>KY@$HW8MjmAaGqC_D7{0z@{*TIP zgs))wtdom4=3vsi9V^E8B_@Iy8xx}&S}t?$%1gPkr7&5~RrR2yaehi@z(IkD5oNVV z1bs~M0``x3eInMZjNrmit3Uz*DOH*OH&XW40IxNA&+SLX7goNv-EiFYPbh}Yud%&m zTtoQicH2B}n<$tI!F+GfEz6Zl)uGV<8=5@fvq~Wgq%&mNdVtVB93zwlQRQZ6WLiFP zPo50$5Eqcd$S(A4=+7j>#*uUv{|f0Dv8}ry{T`$Z292q{DeKq)nh104W!fIiC$oq*j9~h{SWd6u&0H zA$1h<>E*za|NQQCyZ<<6)i50Fqgm`SniEp)$QUlOnDf4GV#$5&wxz4l8>2MeG1@gi z!+8gZ;}9d+nNU$;->&go;Y{quJa0QDQHnF)edM8r&JR8-4w=9+eoy+V(7&ZBhP`E4 zM(Z_PGNhenPE|G77fnks$b0pvoyj;R`DBO?+~dVM{LAl+9#_I&GOpKUGZ6B--^$ai zqoD*GMZYReX3+psj^w^eAu1ZA$@d+ZbTsUKp6Va`q8m9?&S@cha;FH^y*qMkc^N8U z&;B9=mF`%uwRggYN8`YGv!_yYBDSpGT+p|kd)~Tc>DfNYs<1~?j)8Ye%{YKDx{#OS z9vg*V@DSumQ0+nz`Y>-j#}q~xHlf4X*}EWrA5M<-Q(gJ9j0y_4u82MN^i|TME+W{0t3Bk9fn>Y#xgo z3EQm^fh2m9DiyF%vylIuqbV8}mI^WnIQ5JP^dW4o|5BT30`U~0)GHTwNdG7VBd-;& zmqrFaF+QN9MnAa}CY!%}!m+Q9@*`=x0XOGkQJ0%&&zZ<3D_HzIA(mekPCk$Sl&CxfLbNsj*ZL#9; zG5n05$+Lw2)(#NbyauY152jdVx%(V?QH1z1z9ABZ9jM&*5iOB`=tq@>v(-+Za@RP<|Cv0lk-k4TZe&WOBr+?EbU4 z(uXWLE6~T_A$}vf_QM#LaW>!!%)RHgEdT+%Vk#x@1KkTSiapA(9cSM6=2sxtf;BOi zLenLWFV}YklLJ#F3Ft^7%s4-7Hh%0>-bdzR016K1)-N?w-Y!EiW_S4IrF4(oVU*lV z^fI83nKWHs$%jOC46r+-`b=ZCf}xXnAL6~oP?eTH6PRU-3vGYl=!+jImSUmvV30!^ zEJ}L9{*igmQUUL~K<4h2)Ji~tt^T08wPM8z4gBz;)*Rd2(FS-m;d`9OhMIW5*78TM z$6BEgwzbe#@AKrYS4-=|Q(Y`EVc*Vn8g%1_)jN*7{TJ*AeshhS&u_yf*`GAXHc|q; z0aAiYU1#5K@<@z?VeCra=P_3gpVI{Jy_*n_L^JZBPHh^tfs>o?_$iSzfy}#cQR`9`@WwLIzJpTt1sUq?IP#vd zlfZdh#fSGt(p5`)t$>TU2?b2BPV@uX8*|aH2&I?;=Kuzor*m}Vl$5a5xe$My?dT$0 z{B0s``m5;6e{Z-qMDe({stl6!Y{-fr6y`PY#BRo05cwDWnw)%Xg`^uURlhMQE zw=b}1L}4}wAuN}q+K7=Z;3iF_3C8hsI(5AZ}%=!5(I4C^0QkKhbW ztKT^p72jS-6ORlhm5zb)-!8iSpKm=2-;kc;ygR4fKWWm?gI9KbHzX%rTl`y#8W*bh zc!OI)r@_}2Zs?G}-bR`z;wBaA4}o1nlM?C`E$A!A0Vj|u7)Kp+Xabk_D=7RpD7 z{(g=t_Qr?rQOe(%Ya`cHr7#q;W1c~qhg{o$jwY2}4XB&ERU68Nf32FAa4vlTiHZ9A z+i5Zmw<{f*vhQtNg`eNV7!WT2#Is_)$aUUNzof-4(9&`0v4YJguLwCR1 z$o(~|e&A*a=~B(d;)+gcUW~tcQ`4$ z9b8?nd4OIu{n!d<_pZ(9M>f@d4<5O3J!rzdi0}Q|^X*M!+4VIWX5(lbtvx=g_!ibzKHumEyauX#T+TwVK^>DZHD#<> zy5JY}u`}@#5P@i!pprfC*s_^a^n}6QU!^;;Mx96)Wwj2?9oTgy9Id%gc>G4=1X9YQ zw2id8@p<%dqA}dy8DArKckO!nk!|Nmbi_e0C!Ip|_vF3F(%`5zCh11Bdm{B2hI|Vf zy(_Q&IV(CIaE3v})#I1c8+}!HUrJf><5>i2u0L~5dX$Vj>EV)w{a0dX#i;*uCe-pl z$o?=u3YH|9pbNkQllgBb5PD3u=)Dj^ON3Nn>TPG7;571UZs|*{lZ8ceRXMXL>E}|R zFAw>%oVm6u)i@K^$5?yixm%gl(Y_^tI~X#p1O!|FHm3aDXy8mKEa}1fCOLOy?h>T$ zC)+2_zoG)suht^@3LpOQWrd3l=Add9bf*}Z^dc|PU%^w7IhZ3*EhX4WOJoxW zJ%abBi>tdy-tT;6Yrizeh>8P;5Ilo0L8GLt?<26g1w;4CZ}&W0$zi6eW!S)rOL0ny zwKOpG2flgyqPJJ^fa6YiHpg7@NAwBKiHkuKuMec#$Pz#9+5XZFGB*NYeR!SK)U%t# ze*gt*%J>b_*MK^up3CSd#*sjeS)r9#!EEb0{=ZWS1AkxpE6%2)cdIJAZL$cupb0@_ z7gqzFU6g5t&>`#2XzRiGKo1to@Nah~Lw_MtjDd~{%t;%Zo>bm#l}was&#c(bM;Ujec3TWUN32da$5-WhJbH3h{7-}1#LT|_wl%dJO0|AA&PLoAZ6oNrq-vN>oq??2 zM<*t)XbDB$&wKeLc0+b@13vq211_aok|GwIT@e*1&%8u=IApv%=9WwUWe%3FO(&~S z7SwRg%Q$qzM^fxTE`I?+Op$>zjB9^Br0GzWlHNI&Jpk53ve!*gAsvpBkOp+uZo5sg z2)9=^>{9p}cxs%hK?1aMU9I(FRlxODOHTW&)nDk%->nJibLEr(YtlQ%IhLrK1~P7p zJia>$b#1;BTVt!IB7QS&=|5%NENQzr!gf3gL09DJKdAChY_OvA;Tj}!*Al&aN7saJWKZRrDa$+p1!t6;*_g^-4i9zXZ7 z1AP*>MX-!<%)TXO{WMsQ+r;vD|7ws+WVm%OX@pHKXt@SmMh6&Uf+$y0;B?|6qv`a; z^#a-TdNqsusnL$Fp5%}oDEfhVqLADI>U%)umQ6{5BUjiM zmal#M!IcBhHj*R9mIzvu9CP<4UWGYn;>rumG zTDDQ{w=+V< z#KcMz#?pwl`Gfy9_D_)^*Pwkr@e0mSu61&k&y3&bz*d5O0@BQak;cHuZ4vx;@b)Jt zDb?4jLh=DZ&=``}3lu4nbGD-C_g-X&JPcop8ac#q%J}o?Q$HCVP-oC4jXMe3>Q?N7 zSDKfqc#@yuce{M9cw19MR?Zwa^eNa0rAT4zCS2S^VnkVZsF(d6iu69Frnx|E<6t)VM{o9mg|zYXB{>W zG8&oB6*4D4#hhFo>}eHVgbF^fUqN{gOBN^y%&+3JTGBbQsOWh^{W2R^gfiZN#JL`wJjQ`TJZ zXU?cfG$(6J9ZUBvM&xnxw0`X1868$4fVH4QmJnyb7>18o5BJn`W*h>nz=*VJu zF~Pi_8ulnA8pAcI!WtMmNZ#W9yuJg0y40_*FjiXB0U5(0K{jQzDKEJ~ee>q`V2Su5 zIwwf|&Kq4uF-Laq$WFSp7eZ`;lJA=zk+`YIJx};0bxi6SbGR4IT~iGK{BO>Qxu+{% z5eInruoFRo!w|LI{zQJYn|lkdnc(D`{1w)oivDJ-Nf?e-`GLkVAbrmzy$G} zso-Ze(9L~_vC_r<)Ejf=ZY6771o@4|f0AP)I491a(4W4-xONU3H5{;FBNs}0{HZdE zODh>Gin@6g5vRm#WQix~(V`|5q@r(K?3o!+Jj)9i_$BEwy-fJdpG$M$c6bcXy;!PzHf(RE(Hl^g0nB&8(()aeY1b?ivM z194BsUasq)?TP^%DGfz7!G6R2}?Ew4Ze- zRykT!^ui{L-9?zk(0RpLD=v2d6bWxnG>pyduE8 z-6(nU^s%GiFVjI^*H8yOEB}o!e8K~%9c!9FWeJ9%6OC#>b~Ce@Pglv!j}a#re-M0r zRJNEG1GtwWr%gOe6ZV(`cKDss!m*w}yt-evo9F1jtKwHXAK8YR5pawMJ+uzmnH+4f zAx8O%g(5@C-y!6Qp5}qvQ-^3WVU(68WugIpzws=Al>jzY?7ziGBJr!tTejzuuNy=7 zTtlJm;QMp+gx-?kn6zRxnvspbPhf}6nL&QHL(rFq);-y$k|lDr%aH{zvW(@IMzrrf*Dc8QFu4W&G{qh zgTW*imDr)L#qc3dXj@l;&moFP@${i|;ihHOWo_5_iw&VS`2RG{COG|WB>usjerqS` zJWqB}JWb~}d5=^_>*|Auzhwy=y@hh*!Zm=6bCO!!`)Fa?WXcNtdBM<=pKSM^u4ovl zUxZJ2>+eEi`e~ z_=~sX!v!JWm(Gm_P?9}Ce<4IxKM`mj#ZI(_AqlW0ZBTVl3w52>Lp%C>c#9)N*^?S0 zQuZZ)1+7~DWIssQu=!itZdx0y0#7)^SL1bT2jRJrLNL;Cncdy zobz8?kN$9p36_iKP#cV-pz;-cMj)8}9Bw1$s}Lq-SCsWIo;ut{wLRcKw!5jrv$Q(d zHNf;-y*Ma~EwlE9g`osG0_K|+Jux5kNJgwL*L!%yFZsNIIPkk%592lG?!D#9hb_e2 z$hLCyH1KDsx}}&>t55l&y0#=wEo4ATV2+5&-#?PkU?s_V>bwRwP0`1g>CeJ3MO`)g z$wV{(x{2qr5(VbiOoTYG5Z(H6mdWbXZcQG9kB|0QO4 zB55xl9CeDEJy!g-pI$z0OTHASN!x5bWs-e*l8IAug`=#N%Z4;D+397|#%ZT-3}&pO zekd^ZJQasIZ{IFA%%**otp)o z-qi?a(C|)vWd7QStr);z9$-B72(XrH z2|kXaCB8!^)Tx|Of@*!d7EMVx-juUIS9$#Os`#KKXs7)}>Zo$uh!q0AgtJ-qvwT@c z#W9UndTaWL$XpSy%8`T}4^g~?f)%vNbW$ZQT}0(wu?7CF3`0t#l!{gjalRYQILXD| zyq#Kl7FpwrRama00Vk%8t}7ZObO1JFTv`XKPeQG`jiMAoXa#0MR!}=oHEuPDcyUB^ z6)9F3-C`;G14;X}2SUaOsi8frac-DI_jM`rg)lfF5c&^4Pe`(DvW64=B<`V_>0 zSZ|Sb^P#&D>-%QQ3)oQDStbkV1uK6P`@kbngT-ruv1kH$%)jeRAO8Fv2}OOaNAVe( z-G6b_L=5Hc9tm6Cd5S+&r$-X7-3O1;4c65d5IHxBG!j(_Z?|4VCI6!enJ=_*hQ1Z& z6Yf&Et^7;vk*0vn;t;0z3NXZzWs^GW+FBLF@#cp_sCUw(31;+E2ivmKm@T@PsG|YP z`%Dfe3WJ(eGM{s$C_H^8?oi3e*^i1@OHmn)_76UL9QU_A8l{2iSF?xkn&~$aOf9V7 z2tJ3+$6E-j%Ykil`r?fgiJMD>Q+SEumyGiDAdKloNQe*`?IJr}^-Vk+&#c^!@B^WgFf zId(pLAusAKfzcw{9b72|hWJ8CY4yY6m;`E$;6O}+FrvX9#J8#kg=|}c?*?`;QX&(iC^9g zZ8!fBIFa-lNHl;h|G!(y>54`}G3v3wB(4y8aMsUOWxEa6Ow+B~wjBC>6UbRcG#4-9 z5p|2xsUV*hBHW+>Q%}fEDCLQ)(C#zo-VGrgbT0!va&ijSolM2*%=-HB67HtbLT?@zMe# zZwk2*wMWxR6(X~%5r%Hh*Ydxf(R(mBxpQ)!yd?Si2dUqH#0JfCFLQ!)IzeT9o z)K5!AL)3o~avX`^O?M~USV@yn@b^E;%Xp3CP+%9~j~$$5j;yw#H4+DUZTn;ps^;-- z$t;3YyWNW;pZb~)v~ZOZ0iR5}hTTch1uluNxsF$t_5L`E0c|y2ogX1!x*K zc{A_gcoQSVG(eX~gA+4OMv;X@fV#--4(3A*j+&e*o@ub2%q9onS_%d=HuW5j1C9L4 zvk(R77^zQj-Eq*_F2+xHBW*h(z%_rHOx5UhNja2Qx_QBV|Yvwhd|Urr?#% zgC((zZ*&YcQ5XC-+Hvz^CBTLd4Udt`SR;88U$u26EPEMNowZxh%7ft$)r zqaUo(=sAjnX%GZGo5tDkybMq~uQ7<4mKm8broSopCbX0NDERXJ8CL~ol=y;GW0B~& zN;Ap3dW@N<2)A5|*7)(X&x09NjwAG!B@zkr&Q+222*_)`(PYRg@;`17MriV{oZKAY?~c6GNd^Q#$BqRZmd^r=i1h3 z3_uTtqXZdekWe{uSr=JB5ikk#-5F8{rwukn%OEV8r9@ZlLXVMJqFeIcx`#kIpy^|# zB?tndI5=@;MPPFq!CnYe%mof2Kc2}pE`~h^;B-I(Az&gn*ITynF(|BV&IYK zbml{~{?O>MPHY>kQ|`7)1qn|NY`^1kXR^%_{01w-^_CKGTOyLE7r}0wvyV<{{uLiejy z$WCIaGr~5bGBXNrz$8pdcYu zt4(UgZ_|`J0`VUo+h*E_E2CM5{}a-@>cHyk`GdmxiMY1Y1Q;Plgrc;%_LLP1&0Bh6 zQP84;_xoM_#9w$12WM!59ocX;uq6P&bxezEEv6&zVIaD_uVC0fmwbkvAp#0=*g%s! zS2u4g%V-;2emZb}@O50Fcj}2od5F##Kj{6EFyCQNsrvr{m_TR0CiDzs-$TfRw1@TU z4CUoi9Vxy_ZU--hAqaFxB6p$$S_GEC0qjAL7M-}K6KD&DYwZ#9(4HxnmTtvxykfc4%LexL72gM zjbPk~A_a*KK5$l2q?!&$&MJ}FjB8ijSR*

=q5JEy7b)`WX3RlLirRdY}gZacF>U zz|~MMvlvg~{S_jWIf-5w9M#$okmUx)L?V_0IBp49%d>4a>vQ7`@dmeF)&o&bgp0-s ztuG+Zq81T<_+9`zZ*G~XR?K52#RzYws$-ii~}4&uDgDnpXC z0wVC62g}d}kZV$p9125lR}3aPe|tg3GbB49tstn+ynaHo?dcF@SifZ8DK8WIpFW5+ z{w@fB4-QJJvQC1=5Rx@x$ZzCYUTAkhcV(5n3WhYQY9wL`myr8bUDowAYXV^3JNK;1 zW&2cDuW8)uaUv@XPPnIAsL@S~SZOq+`<7W0!MKDVmoE|Qk_wzIm)OzKmg=@gzc=sP zAu6gW#WWOVAKqGA7cJtEf9+EalW`e zF9zoV(pTfr3&_(4asf?vKSx_Ua9)dFBO`_ZOUGY$Os>Dkrmd_9z?uE0&R9*>{{-2; zHJd?bJ==x)^(ZAUF<-J*qgBf^~C`69C9!e%A>!3GWzSl-M(Rx@P+EI#aC*GA3INkuP9XG~( zIDyw713$)c&TSkD%{~w}p8U$WDcgvB=KM-{A-XZppC zd;sKIhVll+q@>fZzypmZq1CpL4NmqD@3mUNw!=$s87%VsX%c{q{V@6FeaOcB&=aV^ zhP7$M$!F0cyo+;V(cu0+rJS9~JiN97$nOJuk* zv;;Yr4nB{OJV3Uaa%h-7Z~zklyQ2{@0!zdGVPiNNzLIR?brKm;(I1+X?( z2K`vi$KH#RC@=O9F%2_{cmm}d<36LE0Gxdk%M*8t@vwP{zCH%)wJ{V~yK0?Ks(Udtufp z1I5g@>`!@g8vf_hWNV1saYNR(1ch$W1Iqq+h%Gw?yA(C#J>uzy76IoSsce1c>~+=i|RQC+=Op zy`ecqzAXSL$OU{p5R_hjYSS1uPCM4+XlrgX${Plx^9wn(XLmp$h-4&^I4Pbd%1(pr zzj$XwuFnL-9hMnO7wF~p;rkzohYyB@51dmnAnSkg+Z*B^e*vE~1;8a=j4dW79*Wxy z|Hy-^%xM$<9I4t^b}a`qpp4#uw!JGYdIzst$cEjq0bYL}qN7gy8Ad?3DCJ=Le{i@$ z7PC^UGl$ltzrXPp=8Gs6oiO3oA>ay1Gi(>^5PCmR|`$oL(b$A7CT@ zT-=8zpnu6LLs?_@?5K3YebfrK+OYWf)j5O|$9o+<*H#9vKbMS#G@Ol$4J3R%FoW&{b<&)Se03s#hMnV-+m3P?Q%O`A*bu~2h2$=a6h0*bxC z-hSb6yQSGR80Zi#xLix(fVEr28b7>0DDHmKBPJkX1wdexz%~??Naas?{Y>$;9VNz@ z*kxqS^eCFrs6jvkI%bN9^LpCTO#t*XuIJtz$Ugh@juOF`>Oz6sD73pP5HOvwP);4| z=@);6zozb1(R!c*ZoeJ!(a{G(QYO9g#f^kbG2o~Vg*WGMDpGr`1mG1sA?Z-!5Y@6& z*T&zK_ZIpyqXawaoMId%@*1`MbMI@aK^U9+KUg%ikDo`_9)3;yTQLF|EAb{%GU3B0wk>6A%J& zhu|6)gt;&gG?q9-kU9d$#&rBq0#A*9b)-rh?|=glKI?j6%7*gvc@8K10BrAn{>e04 zm>-Aba@;Z5mnf>DbLou^jnGGgA{HHfB%mR#FO{(oApgeIi{8=zho$cys1z^mEPMqM z&YO;Amj}J#qh8}{>SBk?W0Ul}bMxHsbQU#j4&cn*)5pMK9LD)*DDrcVl{ne>M;WP* zz|)qlwfr|zSX+bV8~*eF#SRhxHS$x|XCl-(Xpm`c%VJo1hLhF3ET7GaLJZ&vEdwAUXuG+z#V%DcfjItxqym{+b5@%8*SpD-T?N(Ur$pDaVYrv_=h5%g0a zmw)w;QhsN?fJSWY?{t(&q|5VC#*L3}k`|O=Dj`>m+6{cC-LRVErmkSbl&*JR+5{kO zWevE%&Cys6pZ`XOsE6{e&_E<>sepTLL09GRDv&FB1!9rCSO)ZXXh}aYDaO7=i)cUw zpIq3zlXkB9(FXC7@g#M~C{-*#g(;JUoWQ%4IZKQ`Z<^s9_~~1n&!km>}h0zO0`|$xJ`m6;-1gBeYU?K-5@MI`wp-(QBJZF$iBM2im%w_^|b>%?1L(L8# zU&<|()2UTM{;$Pas*9TBw9_;ir#^>PlMZkJ#*4lt3&izqi=*;S-`sPmJ=t)QzMa#Z zryb2scNfIdKZWaA#_on%1K=hh%YU#D&LhZWpL8Hv4dmb&^GH_0x4jhO$+sJ_aymr! zT|j=;`D4|YO@{w+Do#-J!!1sk!krvJA%ML$RX9X5EKIf{`3m58AItLlQ zKy*fV8;R%Ik~N*Q-&#lHS*tbtDH=%0N+jz<>o+Es+=CR#yWk#4U+H21j)Fxs5AvlH z@Z_SxZ!ZYr?gr1Ht>=^7*S4zL=qC$T;WTqmc);Ql;h`AD=he&{Ghlz+KuM(}`=DAKnUjt<( z%X#GsxQ>h~X@dMuPEHx6n6ELU+YkL3801xfH6mUZ4a8yMa-wzRQuG8oBFj-(G~E`mZ!OZV?pf>-Jp7|#LV0jo4$-iO-nr5m!pv=fkz~rQ$TUqNWl5sl>_$dXu%yio% z1Wyvs3Q&W!lof^W{xt!YldX^DGpdWFNSM;Zcm!mWYAG*FO#s!9Qhp61m}v#E5sZ#x z-hzBX-nAnx*xCXJ8D&VQ&4SQbY>sSSd@dG^+q4|)>x(;KLb6nfpI@61^N46;o(aA* zJ;3>q$`h{{W1#hP6};!VzyfxmwjIrpPZAL4DdT>3Z>bo(52Lhq4)q2rW$WskB!v0e;F0Y(C^z5Ye)^}m6@Q7YyDA6 z%UGg~ky(#^g3d|tIWHe)1FPZ_l-lZ<^ozYsZt!C+aHddZ3cUH+U-$c6W z!O6!t?$TK_nml(Q9-YyAmr?mvnItpzd?(n}SK3ik=t;x=^$C&-*i&ak#UMArg8j&M z_&8plw-E75;jn)Ubs%mb75dncU)Dp2t&i5fBmgdCM&5x?=@adxa3`nZkx4j6Ns1Vl z|8W^f0s4>-_8(9iVG{Kp_&p~@Ov0u=Moo%~iIyYp1b_h{EF7_hxOaivcVR?w?X7o- zRP?)Z#0=j1qykaW0kg)wdOON~uZd|80Oe9}U3l(S!YlNZ4kxN1 zS!Iz?rt)HXG%x;*AlRMeJtv3>%c@bs@4;UK*?z>?Q{ji7iWZo%j9HS$ksggnU;7ai zIEu98UgSH-RTMM296#IhQNr?J33nXU5y`P6>yTCDkwkBUdEE6a$J?tRG;)j(xt(B@c;3tn1h! zDkPya8k4mmle#pOl}nDwj#BIN^EXRTjfA(Dfdr0q6Gsrep zSt}XUlT&o{=(Nq0v}XF8dQ7aA^Hhror@I-YyEewddx0Q-cViZwgY}}lf@R0(5{;8T z5-p^{wm1$fX;fdpe<(2c001lc#YZdHU0(6w<`YoX}Bnhr_+-iV~l62?+on3Y&rfoMS`yv?EG)lQosyZwNS&x!bRfc*XE^ zG&4>ss;Zcw(Jh1hm}aTPQ7B#Jg&f_6UqeVv+J!(j-ZbG!$2gewC zVv-PTCg5D~TBOcm46^=jkz{sgc2(S;-1z=;Jtw@2fY-P7lMnho-pFRDI!;OeR(mVI z61(PJcDt?DL7fea22_n>!&h1g&ff`21y2{kg}>K{(%R6H%MH2V&f@;U&2Sr=`8F&E zL{=rpXfPnsv+XQ`LViLYu(eUtyc@y^^C=3Ma+492kXTr9C9*fXH2$mXIRJu?4oXF{ z_pOf!yU8RT%d5mmBD(_|P$k5+r`tT@)ovGR6@p7hV;bZ_C3d(V#-(4oi%LFy zlX%}~s||<1W&$q=?Tj}pK1WsOFZ!3D;LF>IpV2ZUJ$G)2Tao>hyM)bV{?BMVuOVGz zlX!B?;WX<*HrZ($XRAm_tQ!5Xeo4O~;K!e|-x!Td_h2GR+81=xNA*toEY>o`6(%?nJn(UW*4e6J`8+@I|0Z zNh?I5fIEvsU=~{XyHh4HfFhjjcqR0Aptz6>$N`nXZMO}|)JYJq0EmEcr;?zQLBqd* zB%>4=QZ$#WtDOBNEf?IQcpoEktK!Z?P^LZ~o@Z6(jqhAJh}g;~=d1iZo6l%_0Bir{ z*_-Epv#B8|2f)W~O%1GGo9z3`!*yMMjIa5N=unM)4zg`OD(3b)^x*`842dESQ7^*S za;hFE@K+zQ0sl?!q8J5>u6FRC?7Odtjz8|SPD|xfsMzZrUlo6IeO7Kf*IdW38>BvVk}a-61FDAA3Y#-nzK@HfE@ z{?i@EfE=I-4m7yLOYr*3Bu;%94ly5Go0r~W1z;^zGuilc5y;>IZ(j6cHV&MYT;P}-$xh8&ob3yzQ*!`;`D^Rv-rzDTfhlkak z1`k}zOTqQA2<=*K2!Ie5u1)n_jP;x9Um|o`@ZR-vBk*LJPwQr#H<4nZbec4WF@Zxh zoM^}Ago5g7RPWa7u9EG1({M3^D<_ zk+QlDy~>IWXp!(k3C7@-bb*T9n8}Y81|`JIn=C9!ZlKUW!cK9Hir0R4^05Tk4E7;K zGLm_%Av&;+`L0`$;AI!Y9p)+EG_~%{c79;}FGEc>sHYvPo|~cKg%iAK5yLk$Mc9mF zo9xi|TA&rL5m%r{`Rx8O*eRF;f$+;|-lwdGRA%_07LuGI&wt@wP;S!6k z+DodZbN4j>#l;k4;@|WwOFO)>Cyr@%gIjc?!f!){S&)hM+;GJTc$Hv_)o&~LXc-zl z0fIR^1B60~^caxX({1XD=GolL{=>X~r>_t!r zv+D3gD*z>lII`?(i$VI*v^fG!SzP6en#K7vd^hL3*{Y%K#LMixS12RMOd zrgK7wpbeN)Q-%N>C;-X&a`BbG7eP}CpMNj~a~K?N#-V-R-4GUCNIXJc@qC>c3|OIl zq8Cmd>eGSVa=VcPwE^b8Y*LMpkhn0XMMVITay<(DnJhi_x-Q_H=nY9^`EtZ6<{%%_ z0z}|WqeyiNib7_QX8G}a=zDw+WvG|94-vBu@AD446X$SpFRS*S9OU|cI6j(pnTl*T zT)Ua&hx4NRPhHT>3BMFoV{=nJo8{*KPL!W`ecq%_KU#h0d{tm=f-uNCoP&5Ztx-lt zt6BZm7*#h7*7+7pDfQ3+5OMj(f(}*qP8bE7NacwSi^+|_KPAlKH~!t53V@Y_`zm%L zs}0jjO(q}3M$0A4f(Q-46!rk&u1eqrXa5Hqk)jHx0XIaZKpT6h2hoVsoFzMkGVLJC z=NQ(VNlX({ptZp}kO@(ivBlJy^j;8vI_!fs=!n4Fh{lj&#dFaJ9?&;K}+=XDc+ z_xdi3qU_LNcn1z*{~78X$kUzi(cK6c-3bpcZYC?(<~?<0d@qCi6^4UFP+Wj{Z6Qhh zy&z7EtEf(U8`{~U%(#k8^?a-I(F@6Bq0)Kg8X~-gA+E3}rV?oLt|>*XzgDXZCQ|;U z_cPa|9WvzuL{e_-MQ=;Fxt}TC%Mrh!7icyt1reav=Y4ojGEz`U-zF9VIEmpt42#cm zt+{bWA@Wbdl0hBR7x1p64vOPQmH>vxPZ8#Kc2|jGZ4CXk$c#?P7zT2N_ol$1B7&4I z@5KbEZ8aLBy*1Y#I9%I#2}jUV-~tk69MyO<gQedNr3iC5t`Tk)K?Ec_V z)OLTeg|BuaVMVP&x7}YoM!6ms|8J2&rdT4%Vq+{pOu#|Z0XU35MosDkIuChZ7Mw*; z*fiv1Wrb!eF+zkRa108Gqs=HemIW839v4$_4MF_8_An^^9;Urp2;7-n?_Vv-#hxXJ zV_l0Q&2I7ffimPgjFxZbyh-q5N*=cT5ROhC;?SA_cWOLDuA;L)gz0Z6leRgJjNsj# z^OvCN)Gc!)>x?g%6mkL!fJgOJZ5B7n7{nTpCoqkty484x3S#Hn=@!~>@IZiHNzj*D!nF_9QDLXvp{jU-~J z`c3C|5@;sq$TX)RNd^&!iBY7;rs|X~!&5K`(&Wo`n=P=3s54JUj6rWi4L_C9(@^x$ z!%?e?*ch4ays5$}JsFwfpc?y-*@(GMmB9At%{YmC21WyV8#kNGcO))m*my=9K&-FB zb)9GMxjpvXsDH;6R)OF)iSTq3Q399Oh9kTko`Cys((Hj*%>w_V2Ne1f05=N}^VYae z^o+pk49Lou0%jme$73x)vyU!QfAQ|sd8vF_MIXl552B ziLZwO;;+A55TC;A%!o~O(3ZMc5~#ziCF0*4Er(MN3y-jXjP?PG`EL;6a??hrx&O#z6bH* zW7vkNdr+g}%ezV>TTBi>eLCpvD|SD}Ndop@3Jy?^S+zu<*uDAqm#?P`qSdKACDOlv z&Mw&i9fEMk%M*1J?@0+!Zfi{zm8FtRFA-pj2Ay|qAU)fMw-&^=!(RAptj39L%0BtI z6;-UCZgHd5K#4d87w!fq{74KEY)#3b%tKlIF|t72#=+yug}fFqoxMKYH>e(~p6|Mq zfXc>^YwRkLhLmUbx|J?G&UU>b)NzUN3FvIJzLn7}{E zTWUoTTPJI`3J!oTgc4b`18f{VFxQ_O{qd!y3serRzz4p6CJ2X>jcCY?Q4U=OE5O!+ zr8NrqV`F30U=kGuOHD%bC?t?TlVf877b7AyDJr#qpwo1AZ6umHjC+EB*C*xkK?FjP z4Yz?CN@s2PIITxhVDBFyKLgEr_rPT=!_g?#w(LOhALu1z{=sVKaA?+J)@FM1)$n~P z?$i3n{-(h8v%iJh1Z-UjS=O=^!L#B3IPt?ZUG3n&&+xO6z>Q5EKvTI>+ENp70qm2= zMp2;)O15^GiD-u%g$ajlo9p`~ZfFeFVyN+ToCK4I-)19jSR3hWmM&1f8;05MZ*DBg zX<(1g98)Z4U^y+4RFdk^w)Pk9?Z7g^mluLo|-GV#+@ef ze>a;Wf0E#qj4$)Fp2TT=5DpbAH=SzX`Mb2of?^+TM3!L>l1t)wBwdvjfkXo#h3=p7 zOSTZ)p!7|5fQv|VvQTT>gUGyI#E7jVYa)g=f#;ya;2 zj~1X`+*%RmZ_P_<4aGL)``Xwt4gs75zl&TA--Su?_jbEw2|LN|t8Yy3oqC`_cn$vT zwFT%g5_1>{9#8A?+FJN0w8o`%bCR2~CIEf2eW5*NZ2=?=c{@oz8U4hUL~}oDypxSi zN8ZE18x;$XZ;PH;%(f$=?!eC_V#TOjBC(+Zh}7ERbRkzkc9XI;+f+jkJCaT!bULEF zSP69WIRNG4^QeBz@K~+qmG@`*nd?DB2_pjJHV9ZERyNc2Q$5ErTuf*F5|m+0_*Y|@ zQZi)?CA~R_+b1!yE`A6^Y-%H-k<@{jdV@J@K95 z|8YROy$5@Sishpy33mV`;#p;g8ISZ7?WB%^q2QO`!cHzjO}OGZ)_ozefX(@Tj0ILN z4$Kd1+<3a%0$E(@QCa**?Q0d$a^@+Y1#srRBr+=-s~WpewYYC37kBY zV7n3eS&p1`2AZpt-N?^Gek$Mg?{Ff8ngy(K{-WmimMh_rNF7Hf%(8~Pbj2+B4#{EA zg_>*4XhS9P4#G_rG#=V~wKO8;Q@$sApUCDPKegzUn5Cej+A0q1a3f{6Rcd_IvFFO?>AE=x z?KAWF^&;;;31XEiT*&x~jY6@2FZCaKS<1;LU$*jD7fuldBK^z59fkl~XUvZn^9~s#6FOQTXhrb8z&AFQ=iCj4-@V9smFkeWmzw%esQ(1X78Au`#IsorHPz z-qf1-HZ3Apq>0MBr#$H%(G3Y~7yefYi_NC6R5bMbZ$O{1{8G z>*b&Uo>ojBMq=Q^sZ6T^%XQIgNOtY-ClzB zeJ##>)B;4@-{6Ga!kM>}Wn2%Qgbc%_XCxhEas_-J=n6>V5l6r-Gdj;G#h=!g`R?cs zI>9PTIN6zJZv)`>5V$&4RQ`5|K|A2!z?Dsb-IobcH{2Zk!a305vlE*AqwwTo6=)WG zVbq?q>GOsRfMR4HM8d11KJu1@t`j{7mg3x*K@_RoPGsqfs0 zk0<%vNC;yaRDms{lL*~{^#b21WpkSQ>YyYn$EiiV89fClw~+{_2>}lqKQ`Axxc4sL z^no;jt%#x)*o42jvxw(6J^?Nijxa9qEaDI9GmQ7wJ1^c(B%!W>sKWeo446vg(2_|% zd)}4X_qEf+1AFT$h3LQ=SW~eAiQ(~9*0}4vMbJ$+#DUcK1UW_x_vWh zFA-2YBl0)Gj(>EAN4$hM{he^;;hdCughbCLyH4Un*8d7pfwKyb(SY~-4$^)7#K?9H z(*YPcD6O{WrA6eI<)3RZr&}Zh^rYK|v|Ky^`dZyE*U$@$dFc}CRED8R9)rwz$-D&f zlvyeZNg$L^Uq&+|lYK@@o$)u-Ey~*s|4i8xYD*9VAj1RXZOH);d0yXNB{~qYO@s-o z7eomq3C=QN7%lMpY?woc__R${Sc5Q#p0J(!chDA9#`VGiql;ohPch8w%q-cXNO_xG^rO>-pjrS9iaJYYBhID7m2 z0Fs78tT{OQ7GRzmhKJw`QZqG_7(=@$v_Gg<%)1xP>GgkSi~zKbZ=r(X7~yE2>rWIduJ4Xi+6H?|Z&D6C(c z-s6lj%_Vs>=}$~g-Bb>_5FUgF;Xw5H05kf|OE*r26sHT+N=uDpx({r^`?wl<)}&BV zBy++*tnYS}h&FiewIQdVWLR=AAnGBgfMk&rd^8k5tCmsX>-7)%FJ$fZt2MBk5P)E> zXT-E;<&eb^zJyK@PwrqIk5~z(DkQ zJoOL62ccDq_W=;kLap#hd)w$ApMDJa> zTbcEd{P4i33y@(>=p9(9`wzj?_69PsG$TV$w&t5;eetD^K(X@=H(@D2&Vh%}5vU>M zBBs1S-C_>EV?N2qCXq-lWblaIF5>UUB-H0EW&7XVQzrki$w?63nf8@6C`1@m{@<=H zAdOkpZu#+@n8ZbR{hf^e!MH0q5dn-FUgDNys%R*6xM6qTl+G2feO9tP0&}4k)Lu;h z_&eDsby17hcx1v6+*2$NhC>sRYG^S}oC`htG65%)sR#;#uKqF5EElWasQmo+4((4` z_wt%tYnnvW<1H6(m zMlKRP-tJ-HdHOfQKfs90K8eOhlOdzQv~YuV@s4Vz*oDwkE#R}84(ccSf0>8+Vy$Uo%;7BNf6?f6)$Qe+0Gz#X{w$J) zJTO@zXVjf%`@J8IIYi21Z4TjtCO?;u=oPyW5x1|oM0}?U!2%j!g%wAREXU(hcby0c zE)T$`e%-rK{S0*3Jf?|c_m1`*@~xqD3!)y3) zlELHx!XeYypY@#E`~)BuixX>e3PD0vI0St&8WJzI=a!Z+wezZmqafu@cou^IB_a{I zB}}okFydbvtiq2|6*15oPG2M1Ho6U$2cp*!2CR4|96#N)eW)_wjP zxk^xyubFYV9VUo<5C@bHdq)Z1lk(;aKhw_70x-Y9o@2`Sob&};kvNm#81+1s7XrYN zVr#;5S}I?*aXsl8I>wJ-F`RgeNbg2WbY}bQ`V06e=NS)QlC%=cWV-WMPw^x z#bWL~V%Prrm-KU{fDFUv5H~;vqSN#5gQt9JuXa`%UF|yFC z?#q9R9jqBBDvg*E-Mzm+wPG*!oI?FzMAK-)G=d!e?FKcLRx^3w063DlU}PM5%unK@ z;r`4tYh6Jpuu4n+{a0Mer3;laaqLdX0z^#0_=7 zx&EGa0+7oI4Im%f9i-{*o6Zdjn9H#jqHSzC>G>CPQQ)bz40ET^r&}TBW^|te$V_|( znV0WQ8eihe=&|Z)uOI{fonFA+JF91)>D)_?tPy1Ny)lM5?3*w$E1fxJdk^j8G37Ph zfh@NjGYgXaC!4!j!~m_epi`dFDgf@K9wd1g0TEDFe%2a;-fP4K;)LGkjJUrb1mNt= z^B3spWW)s2ZO}Y(3whv~w%c$%P2Jfh{7yfg7;U5(t%iv(*WLbz_)MX_83+K0mllw? z!9mbCz?#tOkE!WtZq5Y|7l;$NXggJ#1t9>b29?e4evto3cnDq@3gs*@W;Ww1^s|%R zfLL00X(ZlPm)m9W4UUV-vw5svu9Rp-&fSH&i-`iTjO>ST2lVRscm2V{5`wv<79hSiXQj*?$Gz zZJU_`h|a{A|8hN8M(id^do%F|_m;tca!%qW5*G|^INjz7Mgab@_oKnk>fE1!4ZVl0 zXJ{}r<+R&S){H_QK&^Hzfk;GU+GYWYMllaI#xc~Qx({>baSA;77eN4c>J>Z4HBYea zMvb0?k|oA^P(arB-hFQI@45r}2D>kvxKtT}Y8rTjdF@@akq)8ul^j94qs zEzRVfbkGU1uA2cckG*s0z8~I^#@qe5=ehTZ0|pE=<0cA9061#t^UTbr1_Kjx-bSgc zJ1EdNGMAf>O%9o;zZuYhiqXH zc_$g|M-jm2y~DlEJ)i9ujd;E&2mn2xu;C0Vc&J>M%P182dEbhx0jRvb)jm_69f2Eb zOaVx?e+QChT2Tw33F*))QFbi`EG&|fQ!my*4*#W}gEvbNfTGfGhy6JH{gRj%KuO6+vBBUf3YCz?~S z0-}J2tiU=vTnIxC5c{_E{=JLpzBk9bs1{|YfvPAMvSM^c{%dSVBtl4pYMD)R5pvv@gG*v!g(j}d z+Te+(e$-SP7wlit7Xb1oY@9U`?4Tj8zp;sTxWHO1K;pxZH z%^%-e#>Oso^>(6JWPY9Y{UuN^2C|R<(Bwx1YAB5+|75k~5+paZLmxmGMY7e&rI4T^ zY%2k6{XJu-J-NIJD@cYX#U@*@q9OpSD75YrVL18a#=i@b;KVw6x5?g9R>P&}Gvf_Q zZ#FhWgs&~N3m07gaf*_Isc>6GZCO+fAh)mtae-;piWNw!VjPQdj1<+V2-FK?{aJbV z>abU{^t;~n;_pX=ER*=JUY|EFslMX-*S?~V{IqW<#8t>ii!ekne~mTIgt7SMh1%V zET@PJrEAXM)|M3S-*BLOylR3{vI=jOQ|8`-Q8Z50&<5S!4N zUmV9S{37<<$bUcXT@t=Jy?FUjE%@+-c{vz^qn6X;S99D*7@if z9jN%14JzM63fc>gsNNcpj#|5qL?49urF4jU;*ids*)3M86mC zdQlR9_xjE)f?djhkxRXj<14G;-gE$U{`A?o{BQ@Q2VK5^o@gw9ItMm98Mw`ne5mai~bvY@PYFuuGdrl5?Y_>u{5e)DkZnT7R(*=z|VQsIX>esBa* z0@VSGX>h|On6kAGiUTSRGtIHczQ++i-#^O?O{?M|KI>U;<=C`7GX?{qqSjBkuPq0# z-dLv@rbIRvU|Sh47Oy|8scqZ^T6Yxa8a<`LA(4|DX?Ky@>9hVNY3j-i$Y-@0Y_V1FcC(%051^s4$`YVz$vbW3*nNIc&q^N zDGKnQwxNh4oed2-v=gpy5VE5W{9`)6aSkC+jMkxPh&o#l(p#wltxU+WMw7`UqxAt~ zMOo|bKJ490_)u&#fbsd%&@_ea5UvwAX#35gu5_qJBr zM3o!izX=?d=vY&S1R=E^o9rJnL@apG4p{glu*m6Z$Eif0g4Ka`b&iLThaUJDF#I^z zz;~Ih2OlFwndx^Hhe1FOg-d4=Q61K$^*x-<69u`as0qMd_WbIq&$sqEw#{P|wn588 z$DnVASEO<*-HmNf*D0In;3*AFJu44$Lr);Z*=G;@@J$Dsn!vx?zG2RI0m7zO)JapJFjJ0m)39HI-R!X2<8RG=C%9@Amw1=!g4Hr6%h-yQinoG+kb?8CFXo30dQ0&2kfFKsS;#Dcj>oT@TN^x>B2% zUJl|^MSME6`6FOi@0HSxH_E1?cR^Nm0ms<`5CEn{pT>zsF#tcwwBL6a&%$ zp4fb52`0!Hcuoeu%F{ED773@7sj=Z71fuD2hESe8I)C846cecJA76-$mwhqPum(N` z0z+dcPE0P0$FBTDzHhlc9rgB&He$(>*X+Zhr(v@H#cU4mk0=j-qd11 z+!(e>iQL|kV!Vwa^F&H`P-Gp(hfyUrnMK(O)wVGzDGc=RQM z08A=lQ2JdSTzSN$B)ebFZ&-Uj7XKc+{$AJfT%qpWGz5U2R2IAVQy}QuMlaT<4_1T| z_)+EY#Ie>!_ob)3GP2HK_l2oi{QTOC^vx%tktP4^!II<@B)!5lvfHtfj7V8mVi#{c zT`qPZ>4%H9eLWrH-U^)9B@h`JU#LLkWxu%8zkpLSD$^^QiuI|y#=gi!ytcnmJlE!g z-a&u=%J%ep4q!mb_~W6TXwLo9FiXNhvfzUffHxWl==!IjfcP^5o6Kobn}z_;lj?{2 zPUR#+1A{=ALAZZ9C=N7jl&y2R`Y9Uc2BuA(rn}?N36Q;?02@BVXfmOj%is#A=wlXPH~a|P z;8;pPY|219C~|NNr66Jy6O0C?Fl4iAS-B1onopB30QC1CnGZnwkC+GLgT@WeU#wn# zT2tIK1b~B&m?8@%lX(Ph&M;*l+ocI;1j?&k*Q(gt-~?OEtHlA_W>(g|gqQ#{=QD}qW4O5)``3Gd2b zxH&&mi86+a(PSEd2BN5<{Zh?qQxSmafcej>tfpUEERpM}CZ_jyo(ilg^gLMBW>^G@ zi3o5m4UosFOnbJ~EvCGsVhjXp8Hy|dO?;K^Xna4p04rp8qfcF$2P>Q=(NT>pgOf9~ z%KalNPovJAil~yI%iNMC%mBkk(Xg&Kw^7e1;)H z^*jXU0`Vmm1OTbN7P&`@XgN|2Wm6FV&SrgqFaFViQ^pps!H%HOW)!D`AKrm*hLfjR zYz4_~=VH^u+Sl_hZSVBm-^w6Kj|TlAPoxwHVrxXsU>fW{)58U_%o-eP^YOpbCYXjy z!5nxLJ?cz5koO$9`~BO2va6_qOo zr1p?QsW@;!ia@G*=nv>mfD^fq8&?wqs?-Y-QWJrMg49Ez019c`ME$}^>taAr8{*jc zu)RFbCMMhUI{unjdw1Sf+IT&l%*^}D?7Q>veyIB75%pIqv!klYi?W>K5gui&3hHv* zp^KP~&MPRYp@<(IZevprA+owLgKH>F5_BNoVBM3#?)qhz_Npkh0A{_F)l@Fz!8P90A)ioz<|X7Q>-3SWON-_x0BC!KYn> zO9u+jgrpZ}YW@8JXP+9V2u0|-nEuB?>o1J|*vKkT*101;Kg8r zTLJ_K?(Ux8?(XjH?vmi{PJ(-a6Ep;O3j_%g2=4B4$=>Juf&1;&o$3!gRb&0OO|R)* z^~8QPoK3%wI9Mo?u*0Em?V_@*>mPwIh_Anj7xKA_$_Z$UD|dpeellpP{}A1RxIS67 zMQk=n7$%E+phWf+7sxuy>+L#V2ENJsJWXQ_eg0m1wr`}(lTQl6`#AIQWT-YH# zuh-^JVSNy(3!+?W1o-1QSj)hslFSJM2_F|e{yTB>?nC#0E`ZsoXV<{@%YXq7Xna*{ z*n`~B@15?U@GW{ic#i^4SD{|Io*K@3g~WC1T|*tub{N<*6Qhh@dul5pPyv&?);(~vys*?VXxFyW=;=_@q6 za}C6BY(__OPUX@?-hTsADSUB`L*mJTNiQFGfe8>aKHpB&6}UEY4ZjxRxj(T$ngW?Q z5>HQmY?>u6dL*I)<=$e5Aw_B>dLKh(pO}a?_q`>LtY#OzZJ*#>{@h4j_PX{{j#n(= z?4HZZXETf+Vuf&mvQCk)Ngm$vZsFO+34ewYw)C(hZf`iKSxhIA5$^d2PgzH*0-XMx zR7wj!pEvO;QdgJ@I|koz{P%Vy{N!K@`F*a;OfN0Yj=rP=N#C7fy)Jkn`v_4n(oF~ zN&RN)oRVU0J_L18(W_*2xZ6{;0}QOneCnh@@d@CU${c#%c`i4IFAxtK&Q~}X*)nQn zSLP6PeaLHltEr_|$k<`gsN8;aP_EA<_q)&G zFpuTqtt)r(=PSIAQZKip;x|;z(gM%ozmE6%ne0%zZlC%Pjqx`ImV7WV-z&uso-bf^ z*O7>$$n+hn%l>emA)rnAZg7>{sZIaZ-}zyLFUJ|vZP?Xz zB*B^YafB83-pp;`f*;2oHczuz%wMe~t10i#^W>}&gT>E=gfF|2u`$9J6H7zQmD&#s zEiBXXtIWuX9YFApk0nYI2?dIaVj~pDSWXD|Oy7+Q-JL~wvzhrNpG&&I!h>IcHvL^E zhRcQ3RR-B0fyRZ?v%9LD8AYbDPVx#;EO4vF!})?8hY(F@7EUIZxk_<}UI+>FkrXJZ z|NRt3h11F<2@vs?ygM8c`Rrz;C5>qvFWg_hlhSov=jId(6nd^-jvQLpw@?KdXr1Wa z9lnBD=(n^tZF=sVVwD8K*;s5sd#b>{?!qcB6!{2)jDqUUe?IE{QGyAr7NGM(cII^(zisan z_>k#>Q|exSx<~+1juW7=*hGFoG5t+qdW%&RFn4uhedTJ7fH{sIBf<3Rart3;I(7^& zoz_Dp&}qPMCbD)~*H$iKt_b}oi?xJRIA5^26y0hN)tITMik!gvUy-WAW2tOVq7NF4E0>y78b5RrijU~qncRgYOnQJrN7G2!g( zRy`ti4<=jlf`Tw20f6B2jj}N6;@T%L`w61daY__W+Q~Oqk5}5T<`gVeq!eO~v`)CW zz#Q9cvAk!o;ITHfSv07XwQ`LFWBbrnK+&^fUO>RA{Bs1gXW=a;=o1^5__>2c#}OwS z@O-Kzx9rP6GOeJ|e5oSP+7-&lT6NnoxaW{Z{z=sFJ!I?$4-9M>RMnX}%R@6Au#*CF0>eh~G+wW=UwI;I2 z#l2g^Fo@>n8QtGQrHID2Oaw5nEVdFOkc2FtZY|WUg}Sv+w-)NwLfu-ZTMKn-p>8eI zt%bU^P`4K9)efQtTButKb!(w+E!3@ry0uWZ7V6eQ z-CC$y3w3LuZY|WUg}Sv+w-)NwLfu-ZTMKn-p>8eIt%bU^P`4K9)efQtTButKb!(w+E!3@ry0uWZ7V6eQ-CC$y3w3LuZY|WUg}Sv+w-)Nw zLfu-ZTMKn-p>8eIt%bU^P`4K9)efQtTButKb!(w+ zE!3@ry0uWZ7V6eQ-CC$y3w3LuZY|WUg}Sv+w-)NwLfu-ZTMKn-p>8eIt%bU^P`4K9 z)sjDWzkw88^+Q4m-+0El?WQh~Q; zFWDtA`p=ha90kyeg1~740ODu_1ZD&w7ywivAb?s300Y|#bXslsiWT+dI{y9CT zFvnZh$T-;0Ur%r9aistIBxFEG$G|AqKrieO0;y?-K<-0y3@rfzqjAW?+0ek%9c0Yd z006Wip9WgLogI~x?>xt90wy{=kbdii$Hxl$1xTN+p`LEU*!93xkk|a^<;VwJ13=gH zXG_tLhux1)Z41si0N|iezc+j^Iea*LSg)ZG44`N`OsrqbO`WbyIq8G|D8f1sfA21@ zZ+4<}Ekgk$1H+o3;o*@6W5X~21ToMH(FW-m1iu6u9H#dY=D%PWLMTDZYz!|Az`;t% zKo>$AL`P4@KubqN&%i+UPF6)uL0OTHmVuFhnt_qdF4D%r%s$$JhLM3DV4xTN7!j>) z6ch2`JuL$Vz`$S|{UOlGG9)b8=H+RCS~SWs(IU`aB`7*Vf)-$;HA}GI)sKwO;CGF; zpkv^n6SWPu@O;njWF2ZPLdOiya61~R+VHZ!wooy2;-UcnN@`tS>p(l}U@JdeYKE6! z={RgXLLKdc-R(JP{{>4U>Kz^&66P&V$MF&@1HG2Dm6eSiBYijk!$2QG1EQxF{s4qg z{!gSDAu=FIMd1(tUO`$$L0URkDo9#JN=i(gPgYu5N?A=)ONm@o)N?uw*%Ft3@ zPtU+gUrJG0Dp*QJ$*MoPuNLeE4|!ARdiUqwn1kW?_#(J)YueXXaVW2Er% z0Wy+0rut?E`sVtkI#U1IRYKmt*uv1z+}Kb->R+f*DkhfZ<`$-^(l4Qcq-C^&4PXs* zWTjq0m68dO0?A1I3svS{s5WM7oB)i1UzD<<6dE!C@(UV`CL=Ab`qIT;9)bWc!b^7P zJ$DZPFiAFIV#+dNVx-EhPL?+I763q-6p%P72U2-8AT;fvC}h!*-8{Mi5^T&FPPUTbD8LEM^4vi5Fg z#Z4%98QcBc8Ww<@Z9`dP68OHe$Ln2~7|F`1sdA#MFKtp}*dGB%Y=QuhPSDA+ zWLV%W>tv`X`da!rQeT_@;V~^p z2N5so8 z!qXoC`+;(ZcsH6nq(x?hA+E16)!V1rXuZmmU6QMYD#~A>rE)31vN|7Y?=EO+qmm_` z?Bh6eYfW$TDeYV^JZPDJ9uH%yR;&HECH@LiZ z=2MU(H%gLOwH+8VxN%JE0p>*@-DlVu*dbjfH6C*$KeiZ`G3(HJx4H9H8a*P|>9M9- zrWjr5$I~BDOyM3jz>$VCyr+HpxA*bPwS9%<44RTB4KLUo29)nKAaT3nzgD1pvuV)U zf9k%MwO1*_{Ahgkpd?QpUmZY_-<^5df$`*BbL&n!!Fc)#_t*doGYEtyI@d*rlIjr3Ec|I2743j* zPo=D2po0uP2A4d<-|~E{Kly8c;2i*o;;K;gj=u@oSrvGRa2@|7gomXoD_<^qp)xSw zXP0^%O7ce6ewb-y+5US>X;3S?Rh|Mpuh+MgMVX-QRTm)E=PLEF$6%KNua> zJ@Ff%!il166GSV$XW>n&RYxOST}JMH6MEu`5;{obW3ZLs`Je4N&b~D7>T8v|nRBUV z>|fF6iNAk+!+0sVJIO#&XPCy(plvzw2o(8_?6+`?6Bal9=70~bo}6i$}WgX zZ=WV{>Uf<{+w<^ytM6t23{O7|rZYh>O(Fgt27FW*hSZ--}Z#M&&`@mubkL>8=g)v(sle`F@VF~Xz^pWsbj|{dgI0b ze+sj%B_}&fI0jQXA_?YerOwi`>0x{fsx?+9Y~Z)eX9Rzk12jlejG5V)`E?L`K166} z74i`un0xx_yU@Bu1V;oHj{^5t9pwYj?fo89WVSH6)_vbT=#?kb zx%blT*GZj8;kHnMZBDQ3C0$_heb!kVAuIYEE$D)a0etG1&Zs@gOl3K-FSQ;VCR~Mi zI?ImLMg8Fvo*Y(C?Oy)fOP|(YMPI**hr#cWu;VmW_pXT1Yg4Vs)j}sJo3Mjpy=f2Y z`T8+f><%;Fqx094nB|M%Odo+`nr+U`G8c##uNDv0f6I>; za{aFzuGew*s<6KLCOsE1M+6mTjG|45^t_}(7EEUqAOObC%V}S$vo-lS zRxWQx9{3I4bnw!(f01HltUI40#;gh5td^YMh-}UDV=N<|0n1Dq*J`(dM!))$j>P^=*Jx_>e)wU>R>WMeV+eTX$USED|5q!0QQ3h%h01MclvEx zElcCC2!R0Mehb2DJvE*`S~&a`B!asK z!341p*F8;s&GzzQH@FiPyX|24;mmVH07nEn#Wi)^y!kR!z3TN-Ief6_B!4RN6##kh z{F^v8LW6ne1A3}d5w>us-}CFcxi!FmURvuk;Pm*z$@ahb)RrE_Wm5?e>0M^3+v?lX z5|1BVn7b@+sp6=A>fl5+#c{8^SdWi?2M3!0-(Y?cUuO+duU>bY3m|PO$ekOxhj&Nx zc(u6xrzY`@UuGQRV7!D+eqDStCXnUm2fVW5;%68mMIQhE&dvEEFEi;o*rxWR|MaOI zVNv1!(2midvymKX;+uJrS5J1#u7vuM~@Xo}4NRe z;aEE;k7~7wUhY3{V&Dl3NiW@-XNOIc&5@AE3fjF3+!-D7-EN?3c)*DZtk`=)^aBT; zBIid)axJo)sCfe^Eg*84v4=H=Dax)$I=;zOck^{wg`f#<2YUXe z>UZhkWSiJnj_+`K2dq3OkSsgxJ`W=-t?a+eF$MBf_(loUqsVl-RMcSAX!Uw;>qeI^ z{nd7OcxA9%J-k}Z+ckfZqkZ!b`qx`|!9+^n!;M`MgZov{65RHSg@xzsc~%%xp&C06 z!!E6&kDyQ&q3~bLNnokw$XD1Oz)DJcWRTt;-V;qJ|EG70N^7YTvJx@b?sF|YMmO^Q1cUvScpR_g?n@4embJ#x;#tYYS6i|!))l4f{>Boy z3^onlf4AV~koSszspp<4QDY1#KTCOu!EHvzam`At+Q4r$bY=;$>V7vfghiG-(}K0~ z#ng6zN3qn?AEWeUZyqe-)4Bx8L3X-qB|QCrxUGa9e(W6EUh7V0$~a}$&qIqp8}m>7 zM+G@{)T)(Ckr4Ub4&nrwXML1+Wi|6YG-H+_TND3+<()4d4q zvGZ@W`LdY7ZBZ=WjM7w}ka{C+4RUBsP#xde>{`p_H>!-=e5Jv)g(}>j_c!ZeK#!YH zTpj*;ta=6UzvB+(Lr)-%bgL9q6Y)nkF+2}_Be^-_gis}HAdZPau~_siS0QIY~32&#~LH(JEH zhqS6H{EMu^Jjqd|S+c&hrZM~+vG?WGUHCUWECmafJvFG{wo`*?$)koSPuDaVb*#Ut z7Sbp^>EqIFM zo}7Jcn*KCB{;c!WXe}H!QR6zh3jyjvo%Oa0BDiZ(Ol-*Ml}}I4HACdXT^+^~>RCom zQWF3DuNF)QRZ0}1EY8Cs;m+Fa;cQL;ZZ^!A-p<<4j%iklEycF~?~5pY=i55w$fdh! zHsDD3sLd{1aQi9y?yKR!XV^S9f!92c@=>Sv3;mx+JOY35Oaz%5Z#j?u`>}fs)&>Q& z`f14r==?TH2{*=~>>1DOo@QRbvgXUh9wd7;3a-_n_~xyj8X16_ zu8PcnCD5`P-eR%fd%`x63mOfji!lt`?x5pB95pr#_ix#Hn%JM0{4&-?7Dj%$s8!{j znSHArY%9SaFydG{%O##82;DA>+4sj=mGU8E{DUsk?L17ZDRg*Kn)v>P^!uP&p5V>d zqMlvvYZre^r0Wmw^4}HhDXm1fP;=^_uaa;cJ3}<=22w9~%Hg8@`aFb;b5+~ep6b>( zC+m+5=7sU)G+@Dl>X2T%AT%cl?s%nXV#|Io!4vIGkki9!yD9=yWde7}*D5^XL%C<2$1g1xjss)^~}`}}vL zj49vy`eN27zpLm(JJ(O}U4~bso=I}VSS5M&lXaD?=c|}Ty^5uxx28h@hn?;x9CwX; z97)_rDyD2Po`xH=dwMPGTyO2#UtdHZ3Y02OM^H4n&u}!D$)qR@jP>s|zEN6GD%YGQ zJ$6bR?;Ni`KFUdB>P*))^4Las1GnPV7Jj$D@>iZYE|!=xFu_j#Yrp~4^YtS`#)t5y zFSC0I1JXZku46idFd+soEuVP~K4jl*x(?ePTvV=i)Eg`Y*^ZvH8F|OrkQ5Y*SO>%{ z($9~T#)tA&Hi_ztsgD}mJcE?zoC6>=-YbxF>Yu4NO`2JA=P`(4T{mjHj$ZDl!cJO( zCF6lA7>^+s5rNAi+2XcdTEn!i+r`AuT35eQKW6S9u_ie4D4>g@sG{Uh!PLx#oSy$M z@zf7qg2-R*-z6_%E{l|}3m@&LmsUS2K9-_F9CCY+?K=FFe(xR%?RiZJC%#BJk|pHv~7Rfs|w8oB3Bi^-|5 z4{(PGS;3k;Yt>Hrd_cG1U`O;V|H{O{Y7jg7W)|stBo)rfPg>2O$j#JWS@`G(-P42B z(U

R9oufkZy`l%8XT)-EPMwmpDD(UR}X(?W!oB{)$+0pJlJ>HB5b&*WNE@jKd*Y z(3lJp2Ek})wznj);ok|b8WPNMi1>|}xLAOavThz2c2ZtT!IdoS{QRfx>oOs1huHUW zibaT|mhjA5_*lIY$b8FLw&hb#^_V&`LuwEO8Msib!1cf7Xs??btu%hxU=3j|a^ODU z36!|(dEp0#-_m!XKkWq6ejcpoy|X+_FlZd+~nH8a}vSVr*FcWj9%iAA*=tIs@_kITbC<;Ag$9k z&ijF7eDkf$hh>c_?#}BkwBgi9Dr;JmWV&_^l4R1}3j6s*I&|{lgZun|Rs#_jGDK z!z#a`e-MtLSDvBOc*zbNAhK0ZF4<(`mcGGH!20c{YRAtPN)o&c_gCFc^2uouXX=OD z;Rl7sugzXC?t0rXdQ}=#@!h^bS`V$Z(bl||(cD03;qQ+=7QUP|S(Oq;%fqOp)tP*G zN;(6M0WG%N?u<2QEZrNZ_qZJAV>B+x21|tBp&gqiJ4{R)mI_ISApK^bc%Zx9 z*IA5J`Ls=SFG9jZ^zdb1Ik|T*e2ypj94AtX!0yd{Y!V+Zj`glaI#Rl?$EzETh!t>s z#L*|QFh=L}GQjVxP`rtF?z<{q;xpzaRld?y6_0)gY$(#0fBZ!;e^tx3%OhZ9F#2fh z+Squ#?sgC`qh~iBA)pbq?P&Y- z9d1nR7Gb#9!s@NVh9qzf)3XDsOnjC9;CQmi57q{A_5-PA0{yAOfrcr%va3_%s$99; z`n=i8;GPh6O z_38IA_IKYtjXoq(7xPdNlF%xuNCiy6?IHavHyKNePKO5gQp9U)?z7BmwU!0+aisSt z^V3QXQBQ3ur{kSye2*KkWdR6hEgd2_U%)huB0VH;4}Zv1-1!(f+Y06>X;ZuCpDZ%? zlQ_P5M7c#zku}UEDRm_Rd2tO<)Y0!^hPSm< zyqz5e#LKZ=oa+|`3Q_fM_l{rM+xZ|$J)p?7?|74V@=-AYEOl3ldvr)OO%upQO_wZs?FU$G5& zmL@b`A*t$H9DZ;OWV5pqaqeghrnZMNgkQ;U27XGxw+o&1bzha6j*veexZ3w+lpwKo zuwoCy^TBic{1X+Mm7XqpJC~bB*(*BsRi4P^NN*i46jca$okzg()tf|ayTwYMLU7Dy zaz<8_)@{-$qVbEEabPAxX&f}f};P#iDJ$s#~s|>;} zU*V~y^f;xze(w>4LlRsmV@%`JfR$pP9&!1FDh||V)}G2~MO`&dR?AL|GvoLheP!D@ zUmt-TgOZmMQ0j8LL+hhpN)tjP0KiB~Y2Pq$azA0^72+sy0t)fDZSj%iuSbvUfZ(^W z6XnH0W9XgJ0JtJ*%E?aXo%-!|SWWaI7Ob!~#ZpLf&UUZ=uZ*6LkqGL~d^J1%?;T45 z=P!NG5UGjANHPsi?L@eOjevl*mjLPh1?awSSzE9X0z^g@Q<3iL>te@Y5=4@`c7DhB zx^k1xOEH-#$-)Bbkg3A&EO#xj0I1YXWTs!xRAzz0cn6B~*I5dF%Z6YU&2E^{m^Kr4 zkvt?)89N={5HkvsKM)_&Nlb@z~stJ?*W0u|lESq)Bj!G$>DgAs$U zzoRj)5b+jpw_JHN9fdgNjXs;v*b+)SMiNnQp@yoA~o=9=*yG zCgb=<^w81D!-w)G7u5j9Qg=N+ONA4%=PlNCBY`-!gvbz+wK!yAGxDuWjw!`Y<0eXQ z;RPQ2{-L_AS!l&*Q*;T<#?>D0#b&0#`A4x2-Ugd^?^v$wYFq~4b>DII3bLoE^}g#J zX9KVdb7?WqhEu;#5uI#Xj0!SgctpOc48ptX2h=oxUOK0t0nS=m#iKQtF# zdJ$yIS?U(vcegE<@i!#}{YPd?JQhc(I(E(t|JpX6Ev z;uzrtXA(W?!(sVz*2F2IVDeruQag_R65Fc(g5%s855}Le>8c*Re)p$c^@2FR#hbj0 z;_K&uHxi`UlQ)xA z_+u!uZw}_XJlRcfsj-((^7E5=5P@u^%P|wCNBHjSI)-`#oPNuS%5u2H)K?}d={BV> zych>d0T4LQYW>ut(xa~Z-munN-_8Ei4P`8Y{Rw^#ukz)1uTA{m_^``c_sDE)8#E?i zT8)Ji%V7D)3)5u#sK3-Iqoo!^uLcd%i*oM883A-9ZO0<$MJlt+}u$2DcuK2UgM=mJf z$jQ;eS5@{y_4ta_q+Pcc626LWaPyP)eXhwnTnIc&c{O}_x zYVwE^lS&$^%~Q1U7@kk^NP5PngK!qh+stV=1>GENe!FCGpe^&YHKFPib4`YVqZX3E z2ol3ah%!yTKb-VyQ7fgRF_J5Z?s6X&1i`e7EHUwqE605!W`Bf7L)e3t*<~jphAyIi z;19|y#z>W*I}LQqWQBN@;#I*UA$O~47$PX}Z%{G0{7##TqXs~P=f>4SJ7SDV>!f8q zMW4P-Qvwl1pU7)C#vmDlyl5_ zwI6qJh!qen8E9BD5Jc~2=Z~q*{7!F?`8RFf;FSH`bDfMrOp&G}v0{xhH)=vx;Ic5|fVi=ZXV3@C#@J%&*y87(i>Y^&@+5*dG_U%5s(u0+gJlE$x;CJMpT&ver@7Z+ z3bE`H!QJ0FZGM3C_Uu?EuWCIM6bjV!mz}IK5OJ9E-dy!?0HWLnC8~R#+;->4g1T7E zyv0+B55Pshmuw}?uFk>Mr!Isi*;70)#tt`FdD6{Uf9op~wtPcrFsit`E*^F{=rwvc- z*KvL{xXfAJBpHW%3=fs>L63vIX`T#qoEmu*&z~Q!@6DWlZP9ITgZW3NoYhu?nJ-fGl`bSARmJF%bC@gXl{6)q(~Ylln=Et5IXA`l#6U z00&7Y%Y;t3rV=d*VV{+3uhaIHo=-?x-FqE zW3dw-_(aBIxItz8bT*Phw}_ulq+lhv>Q|#85&5I%CBOTsirc93k#=Mf9lR`hG;K|Y zgkV8$%~8V3VZ7|OSp`@_msA~i>_&~(4Tuf2UHXV zJbCi{fMWm5b@&{J=w!H|31Do$#1-?FliC<5%-xnTk~T}V7G55omwl-+p+@O%;qv97 zKn*0BxTfHsskVojbya_@l2b#s)hdkSGa(upY+?sryA87gq$d9)-ij7gkpJ&E9n%VC zO@N-72zw4;#A%E)5|YRlMpZ#j@2U_cCz9{3!{J4_EsbKzJ$Keu?3J3I^-=Gbm$i@H zE>13+0*#S5uGh$JS5A&GFqmh2JSpjo_|}|Y8_GxY;^im=w&3!+eGZP-EzR4m3eV{M z$`_a`@S>C2>wKUTIJVOnX?IKf8YKyF_#?Q8o_SXk`_*$RWrL5gGr}|DbKoa@ zE1jDQwOPq?%xvcOz8>p*qTB@;1mR_2`4%FWrX6Q@Du{MID-S9pjC5Z0sHRJpXqpBxz%5|{(DkjIL0@DLVLN&PJe!2}vK z+`7ow?%2aeMgql_U@`e2&(ERa%qk-BAyuOOY#5*4rdsv4p}b}Re|bX*o*7IHzC={z zN_I{XyqIB>AK?UCmNYm)s6gLBI@j!$$nFt$ZBxd95-xq&K!9p;{7u4}P@6XxPv{rZ zbO?Q`U&e((Dl8zhdM98TzQ()~Sn0;qVH2%GLIMvOZKFqe&n>5rwV`a}8|4lwM5UmE zzd_nk5kZsDi(7KOKlGLRizVZP$K# zlWC;HkvKCDIfZz+hg9C3rbPHm}ES(6uuuyyAFP;ivF>{ z!R`^D{T)c{2_=GI@$SEmnU}k>-$u>P9YuGNw{Y3vFMEdBu&F_R z_sjIg^L%f0*r}=+3}6&l4n!qC+eS0;CcEC9`gxq=MQZHMiCgPoM@gxwRG9=F#Ya8N z8Y6s(xbPAGcAg!{gNihRU!hvbj<(w^aVh-)^Hws>jn^3+l@5O2jDJaH-Mr+z1*33H zEMKVK=GaZ~k>A5syX6|u-zCcvw&I0 zeZ==a7%HMvjv6KpVZ_Rx5Y)g8L-~guVOZ;T4bx=45-IA_2+h-!Ok+(8n}OSby+P6T zM39Yso$`QR)Q%yxub$=S#mPi(K;R{ zdG9<}n%5#^-z~>y@VI|C@AclxxI}##S;MMN$a0fCh;HE_i=rv{0Kgn{$FJf9=Y!tK z^5De7R>@48iQR$E+RM0tXmK#@0_79~y&r!x?ggo#u3yZW@wutih~2V1j4ek8xVRaf zJr%M0eBGhGeD@*1w-fT?YyF9HZ$G2tC9+~0C5JA>cq1oS*c9i;2k{xm*9`WS|J@`( zG+DtS-u#ANnQ#@k^2)cPi9&aFO#3bD6lyBJ`$QUH_pdLd;iBaEii6!XAMWG7#lMFo zjdi0hVCO*g$yN#UJKW?Yy~@2r5^z&OWy1_u${a!I<}6C2CF>c>a5*?_vVGfa3>?xi zJ-z(xK^Gwm{{GD#Het(5c(=$FWiMY9#_stp7NoG1bcDn#f+{Z8;!M+m1S3tT7_}8e znognz!NZW-F35->zZdtSu!*@{pBAFPnXWUA^c+EP^?APT3(oRAB-F@rfvPxT3RTq6p1 zk@AiuWRD0?XVeU`sZZNaBP^i5(%F338dkm-ktn&+pAn-*)gG1VTv9Q^ygZKe8gKua zyGYDb5=U$lq`q4*$nI|E20t0HRlK(%H-}v=k|ccXY;5<|^Yn0Po50)94Ag^@GxsgD zS`da>qME~{eiGgygbZ0aJ`N1(7BlBf@_$#Jea7(X)z_#fvczjGK5#i&0vvUe@~7yY z&m@)WRz1VnyC?&_rM)MoR(F?*@@T4idQ45vz7D4GnxHQox?$gIhyLOSwNIs>#-PbUv3u1~--@irq{8&}WT+8l# zH~a_ucy=E_XWaxto~bKOdFUi6uTr$B7HaTTN~uwRvJLe8L*=#R$!yidbnsV6!Gbw8 zH-uao^0LnG9Gpn z2K@wx>jD@wFOC&U8Az=C3#4pfUGGONq|LL7df*=pQ+gOA3_FMifXM9(3;8^Z)80qNr>hU zch{mz=ddaEC9BNRWM6v7p}FL6R5CYD&&lzzcBVuS z`g67X@Ci*vw*A8D&i2eX;_s2&B?s-`1BIs3bo}g)?^iEZUSOuY+1`Ad`tH|t)Yp^; zjw~K1PYgbZ8PoXhE*u(PPg51JOdydTkG*dSi+e0lsw;-$)z*Arn1ZeH z?&eGQ5x(mM0g84Zu;55_XAu$Nh2pHYla(WdeOuFwPE=nOWxtFBCKS|8-2}HfQ;Jxd z?=rsWElty6>dqxHq4nK@E%ZR16Z+m?QUKV})2_uU9d$M1Ai?=A z3KowIIMW&5eaqhf{0k`WSE*-y}U?Ps8Dk3Fr~HF zpt;gV1l1TgeM>R(jY!QHS@i7RkKzwkYBmDhe&?uKOolmZ zby>9F$RRm38@(M9#U-2ri`u5a?oYD{f*BTF#|#S~?&rZ!$j=9OtiV4@bgL8s-DnSf z+3RhubrCj5q_XD=F zT{s#I)#2J7wv&53e0>apV1bw8Bd9&(SFVxCS4?L#c+ZidpT3w;sE|^jnW9i>vA2X! zCUL4|&$e;`&DN+?AI(I3);-H2qXOhiv@jDL(ryEapGkXZ22;M3Q7Z6(=gp1c$%bu# zjm&TGN}L$K8cML5_x&2NIJFZbR{>V}+d%nY$@p|Q0Y2vg+~v>_l0TEW)yK3sYr2cn zO(7NZBQ7{cBPs;q`A;!(2~Go<+mb`ujrbsW-J{w&m4Uf9JD>Mb{f!OXJdKEM(WbQY zNfP!kZ-}uXf8&2S;J|)4pGqc0NvG#nOJMXLn<4kc13Z%VyTsraX1JYm;2hGg$4(`^U>_Qf$-7m^Z>zKZ^2 zyw~Wns=-Z->4!nn;2Z<>RWK;zi49G2=PFlf6R2Q%$1H=v<1!=#Hs$wkFuI6u6)+(a z-+2WVSz7>#KMxDV#}T(IQM-Bw!q^NI_91+%bC6O!UipC-inYvZ<~l3RAXlDWe>7N| zGA)Ds%4p^eLqb0chq8k=w!rNbsBB1%-#e$V&C)Wlh5G>B|}%RKl_8_QovE17z3uEqR4 zSt&{D2UVdj_EE4?dwnTvUUT;uigB_i6ipBr{6*myHfr?9Hg=W?!e-p;4DCUu{4w29 z+dY+Gd+-2mezx;1!E_soto*(5pMn3J&+|IFR*Dmypx~eXgrEMef?@+*#iOk4XnsB| zu74s#jr~DOrZ^uWUl+!>wE%h?gH|BE zu$e_FS%E`EQ*j8B-vi(Y+Je65@|ef>Zo*KdH=M3FvGIllIg z6Ve)oxZCT%kIuy<&)**L>N;%xxHvtDOn2jcFBVn;NJ@A}C2~6wuaOdv!9Dq#Yc4&R z*%Y=WiPV4JD9T_Vk*y?^%ci?_%(d7*2Ys4a51JiCocZvzC+A%)_7ATcqE%8wi<~Cq zY_L40vPo0?N8L&}hP{0ZY7Bq=7baW->YgD5w|$U5eVgKX2Hd7D0BDb^5RA3 z&T19@7Ko%FGfR&B{KmbBjmtwyJzf2q0j^4B9|kNuWBJi;>)=?MoWa5?$2ADs{KPu0 zChG8n{sz<*=wGmK{dCjImBg1r$~FYyXII-VpW4wgNV-gYT^*4$z0A+s^`fxAjQOfuIQY{>U?P+MCuuxqBf(rl z8QY-peT&g92ENzHoK^9_>5Zo$L$a_*{;}iSSVp}B&L6L<=XA0^AS1Xj7*NB&EF-58 zkwmML%1zLPy5!eWYr*pI!sM>(S_|uIU`eL6i#K`KAC`lUZgMs^v$igOXrzL}MuEvl z@E4m#0N&H~&a1Zb^!$jE@{wsFUVTHS{>K8XshzvdzO~RL|To&wt1?G*wHFB9HJ=ssKZ@sW^tFnVXlGAwtBgR2JV=VBiEi{ z6uc)py&@2DQ0(2(C~>XV|M&2+vNIk z2WIxJWu4%?PcPQL4TtMAmcZ%y%gNsfy#P_brPw@dK4j$a+;@Vf?tm?4mSbKjPTFjJ zDWFmavsH#{>VjL|*4gZtaI*F6O%L3k(9S@{&76xA@2MEF=s0a8Pqn_o?aKYhm@ls{ z1IrYaxu`?h455 z`rF`aR{*2Xfdi*FM!g<+WO-KBR|K}@ju10#y};MDhr2SwTZ%VGkmHncJbK@8OXjI0#` z#C}YQQ_Uz-6}bs6C|MD?3LtQ^WAgGB{BZn3%B3CW{Tt4os0hM9Cr1=oDXIkQ#BWMf zt2K}&prr+{P<4L}f`C}~Q@)pcN4svp^J;m)RSdvj>_Eo=9JHcseuvj;-18^2^k=mR z1|si2YG>-6lOG-FDJX<87@N1vWomd?sKP^%%!;5Ou~w)sC<<3r>>Rb?jkvsWIcByK zL6*8_@&z=3TM5A{=#S{Xsa52ytu7N<2h`+9{A6FuspK&0qh$%uN%-1u2E94K5^(0K z16x0~d7yxS2qw>cDuq`bIaBQ||smfTQ?b^b~G51j?MMimLfsty28i%}-V5 zFj_6rWKqCX_5!GS7t20I2IXljZe zQsAQMx@J~3=2uUq72@YBmc42oj#aHQ+mfmalEWS;*CN*@i5;hLjr`@TSNzrcyEYbo zS_z7%z(^&=`FzSEZesh<&O}AHAi4o(NeKL1X)`cuka~7m>V6XZJe0PC-%8SUGzkv7 z9-vST{*c7yWL}mv!QZCv714)DU6@qyCBX*M3Gwf&k!vqP;Hf!F=A~(AP2|(^Kx_0+ z>xEWe0NT7lSbv<%CLS0s@tMr5|KOs#I=5<$0q7ho2d*d46WLJ7`%cEVl&y}2#OVa7 zXO|M%VhOKavoR#jj3wHSRI>K|rkDT`Je=G-NxcQ-1#U&U0tJ^*p6g9n*Ka(aQlZKj zivfTMm{Vj1+4+c27?)`vEj4ZwXk94vZ;R=9q(n2ne^|0+5zHD7HcF-{#sKC3vp`GK zpYzqwg0K&BisL&!m9_p6m}BNANN_~Rro8<18lD$i<^Dbofh3%f{^#Yy`jQxcY3)U+F+RkpV z1Vn!bJHvU(ss}AH1@X4iwRwNce-3=qqjF1(| zVZzHcOe?OG#0AU?(lX#=NJ1g}64FX7Ro0C|mAD;KkJpQ_hVv>ao*zvJ2de+=eGY(V zHCk=V2ZGLPN`LT+0Ioq@3c*;?enwoYHH%0i!2Wfu0R;9yB)-+ct}KjCjsUcLG5Ag` zQ|l|$;GUiQnUEagE0e178p(~JlU=*MhBOL58+sv;l-ekncu;s{^M5yAn48^P?D#92 z3(1uCR=g@NP|y6i9;(Q0RPX_a(-U&V?tf0+)Kp3%4jrQAP-^|15=fi@{6uTkpAc7tof>ujtTZ6l`p-?G_E$)zG*y@a z&k$#~a8te$f-`$_vsh`0Np1d4?#w}gz))%cMBnE9TA(&c#rp{;*)K-cxcJjhCnfg#IwZtT{+NdlqxVvmbNE#o3g( zcp`1zPl@nbiB)?R;%{5{`fL~O(;*N@X}`>A!?GWj#8~C^%*TtKn72wx&Y6dF@4g;T z2n;0yAR_!kZRA%;=Gv9!DHCG8R4nE71fB6?z9Po){Ev16Zz5q~SI!Bsl6j60Di{L& zcdZ2oSYosXw~V3$a91Xbl7uv<@h73ulHOqqO0o|?*ddm@)@%F>Aw>m%Q!y&{ir#Dy zPDc_QbwUy$(iRgbAnw4foz45+!8K$E)Tz$M_mXEJ9ps1ROEejsSnD4EnVw-I*;BYbkJ=o0eUV0;Es)e_dZR@?D z7DDu6{s9X};Ea-|oR>oo`QOJyq(BI01zc+n0%%Uc>U-y=zeg|uZo!I^Qt8G%w~{Jpk-pV zhnpd?INgD)iQbUL-8#da7!x5DQ3jzSiFdNmA2xqV39l%g|B=ETNr$|mB0kIyudI;x zw{z=2A|<9Dd%slwvQHCc34?=&Kl600a}aFP6M|y;!P#PHu!N=5O0h!`~4Gpyri}2`{-MWFS$}WMw4rDxF$g(GM zYt1|JgT?QkOQloms!2m0e70{d+Y=LXGH6=>06+jqL_t)rvUXB~rdy`w1Lh>1v+r$P zx$$>x<^SK`VMiE%Qqf#Xrp-@eot&0;`8#@Ul@`$)V-9UUh@{f6$omDo`R$c zBFYfRYjVQ7y;87t^z$=vn$x|IsJ|xD07!mJ+MY&|wCztsAC*5&et%Lff(;S;4S9%P zTi%j`ALWp%mN9?{(Z{A+2oGNIL_aTc1vL*K{;#MCB24IvoYDyI!_kkdl#cK>4c?Yd zgh7SbvO<7fzkV`nO#vha;uwtNKvaJ9-cNY7b(vv*CUfjZs!qGrGBA#C!JwLepPzC^ z7=SA`Y$c-4NY|JhN&GAAv7?muetwMZa|a**QpL*}35!Z;q_gEc2*I`PKmd57xD-v6 zjaw3z)alZmn~{P-`$X)LD@CdGb#K<71)|ESVVpgc^*FGbC#2pP&tiTsvZZ4xkZjWA z6AoeLx}vBpdejNvkS~nd0`<-11SA0#WgAYQ)T%k~iu5RplldjD^R`AC)u*glSO5JxKel z(OB6jmI>2SN10Ys->eG>f2_}d@W?YY1sZj@?{`oTec*hEPXNw`^d$jX1u6qZ&t}fk zdEt)bnv%{(qNmE?hC=&xG!~oCXV2e&bJ}U#1A=eW{d#5ISq%m|za{$Ghx#fQOE{o* zZse|M8v+n~Gy!Oeh_v7{hh6h_7ryP$xC{FMfuUvqgk5s7aR;g*<9%)}Eq`FHn)^oq zw7gFjleNVAwRr7}_=|<3^wrC2$E9+;ypXl(;rWS#3WK0>b(psoYk-3woG6(C&nrp* z`}vMivYpa85Kn(iRrB!3ChOlldHO@hTWt{gdrHcKkH=l<(wCNH77$YZug;BH{{t%M zQ93tZ{wP8uTfVNjv4Us6O7}i4=?Kak6KaUspFZ1F45 zXhe(v66lzi0JflYplo%*?)$)|&V>(B-&e7T0r0TT$kQ5?A2z@s?thSG(8MI8 zdI}7H#)ia;1Wd!>Eh*JE48TKq*NZ2xtzDiBEj?j*w3tV}fS~f*+3AWUu=Fo_Zto+d zfXy?2Xn#I^oI zfc-cwEfks|(k%1Y?mw2Nbsn_;6LKSH83M!20JIE_;kp2IUPjU&&Q&?ZRHJ$&OwBs} z@uO>fg8<-{1Ge?UAmuIZk%&v>lmSFgd?2}e7T3skQQN=|c12j15RVC_np4Q}avZW00#{#8{_ zX?n^nDVcwfN}iekJ1ypG7h*D1QE@T`{`Siq%K(!5VeVnl zItGSF3rE5`lKL?@;ONI^u7w9^r9u2h#91KOe`Pvlz9LV5G#vBF;ZN@QwCob3oemg< zR(f{lb`t{N0QTUB^rT;zNy%R$W_?F~b$iq1d+6r9JJq);1itOXV&9RQ%bk=W(bVr} zo$C9besqsTWqtWD=EoL-koWLuI_T`^f9Dv0=&w=xBh=o+6CCwFQa@4|uY;7GnfYIW^V=n>inmn6*@vQEO+ttRSyHVKE}q{h3{98iHHd`$^Ft>Ax!CZ$GyE zhw(8N#*0%K38r8GChRMYfPt;Q?HjlYy9NQ)%{ppo@zC6qr4_7LhZRQF|o;B*LFjt>Li`%*~yum9-M_ZBL(*q!pV+)}d<0eJq;rM*Sf zIM2(O>#{an*EiZB>($cN;Cfr>Pik0RLC~&67Xnt2mKh)>&(#xSafJ*f6@M?4>`u(u z<_q#3q(G2pc_5{?AgW{kCy9@02|a06<^SYUlW(+#*hUZzV)oH25KekRMSRFGHzzf+ zjWdUV(MdQzx)yB+utI2W#-trYbClEC!GK}~DD7#5qCJCSFgkLI`DRILbw^IKcT4fg zk&^%cwd2D87_+EcO&3pL!Q>|I}PJ8_jH;DYH9kyzKj)od2`{#GuXwXt0^9 z`*#ku|Ci@d=B(navCSf#?Zf~58<@NEGY|;ZWJK$;ptV8`e>xvGmrti{mcKtP%S&74 zfm^EudFt;zi4tgm{@ZIF@06a={&0P(re25z2U6lm*)Pt@WI>k0KRX%hu@U*2z!X6D zNj~N1|1(P+$qz7URAC%Z5p%*v|2dx);rA@V_-MP-vPmeClN|IxDLPkd|Ie${_)n`T ze|2>yvD}F%b({gvUMl#$2X~^U-&X;LUZzN_2NEwW%sQ`8jZwU(R8w*6CYzrei7RO< z=^oDo?pRG<+TKgGqW`Y~sC+tVk$)bfhx?uZ|RF@BdB%{=Hs%nx3&UlNkJ7c$i7L5=#9RDGO;o zBmNEkG>3Wy0kMUklDbtDP;|_wP|n`6&X5^yGwgXX|t=~JP zqZ$CXuJB{=M>$El5rHp(aSoylXv#0A*GerZU!{Y;HVl z>G~&|dVe7HeAm1iHy2h%^<0eW49Q~AZXxnTDzx#~;)#=5VGy`$g9QP*mihx?#zrf% zS`enEjVLXxEvQ4%p6#+E^9sKy^iQ6Cm<4DDZs_2rhJV9*vu!XvV2a3z_>V~X$59N2 zQi}MD>od6}TpJDuu##wT&>s4FrMe_$#EKk6YBB>v+Y%rTt)MC&W4&V%Ba<=*Y?>RB zTg^Iq*I7l+7XMNHf8VQ(3QocQIu{JePn~7}B0q0!VoY7HXERoEd7w@5zaEx6BDuId zpNb{^TWzd4Ayh&?BHMU)?V&%UT)i-rvPoEQ$Z~BsAb^xTF5du(=e%-KIsc0ZbAKyg zmX+*hS2_G|D{lbKf-nJ8b!&$SU=IDrrmtPNg&) z8*ASzSW^5tvPl)LQo8Cn0UF5ajJ_*8Cs(d|*h0>b5V46aGu#(g{&l z92z6t;NDTk5`%qS-sF?A(c;XbuUs2&2tbe_{ABVY2o>oYsqEfHRSbX%`N(^FbEjs? zC6#p5T&sSYMgiR?u7`uz@bPrgd~I&TO8YO*Dyv^c`%4=vtwjHf$L*^D-&6+-AN)b3 zP*%54j++xQ1#HUXw4;0xMao4ELQ^n}9*rMsldcJe|58J~b(CR?M&zY{dq;cz+v*oh zaPBDmMd{k($N)gcGx{S&z#p&F4%cD&RWI%(d?lK&CV(id+Y4g>Arp3mx}U6TDG`38 zH0SyM0;mS}3$1a0nu9dLd?H>Oa}R}kU=CPwPbvUq_if7#VEfk}2myO3E2aaD5+S74 z&aaKOm$zte;pj$Q7Xo2v=7iRq^_L>R?gelR$TQ+{5CasEwo(+XF?D_r!UHaKpEZI>ez%c-=E=WYr z2VtL5snwa$q{6f0tR;J`RsV-ib417OhSeSfo-zP9d@ARDIc8p(O`C~a%{#8i zTxUTW9Qv2%c{B?6;XHpiQ_r)0erFPGEgO03KE+utjstIyovT_?3i|H{_`GpZ~ONet*AU z?r)UMrku7Q;_bAWE-b510O5z@>6E;{#lb854?^JD@qqwHkF1X=IeoCtCWT{gFs8&b z)gvR{Y$^@!Yj)&VF@5eY~ytp6~gbJ#(w(dmsPq(H{?V zR6pu?x<=SOx&BFM+w5E~7OTJ0R=KYQh$7RN5|{XO$xp8ekSC4eJhLW|lBMgSP~Bx- zB}#RTNjmD{alPPb5jCkA++H91B22Lso6j=fX2Db=z@5klf_T#mJjc40U#h$Os zfLn^8gn)L`aiQ_FnmvOp?vDp5Pk%0zN}ZL<<#^oq)55EJ9-qzj)t~cF(Tx(Htpf=S zM!dD6c06ADmel?UagB@Ym_casa{POFHlrBrn4JGq zl9eY@Z7@bZ2oR3;7|8)}cfD#owb!HpC{@M6g77+km^VfIf3vh{Gyk0$1rAAD>?bc( zFg_-(9S;Z~RLoDrUni{E{0vK^`XcMw1w0{~Q{DiS_Br+4S5tTssGla#C{p44D>p4u$uacYIK zZ`KOFS=Uik1|X#WPBrYjD?4RLc{QaRu31fk?R&5ukxqf**!F|Ue^jBMpvJshqpAA1 ztMt>N|3ls4)kNMTd?o)O7fkOsUTIybg7r$1($-NE)1lWrB zV@WG#?Q>xXR@8N_9S;bE>Q@{9cNMa-D*{bsz-AiTBs?0OPmf?DObn!i<+a)SH0s;^ zK6nry6x1f4Qivd$fdOjiy@;=~GD~CL9jL)~3(igh&@ISzv0rR*c@^y@>)R>i>Ry=K zi`9lJaBb)z0P)`v!6)4BPpa1T`JG*}qtB@98?}Q7|6y!H_wQn@2J?Pzrc@ zgfLSAd#D-U_FN;F55w<)Uy&!g6F1*HowZRXS0C(}hnpmx9)#vEs=B7C)0S5<*~CXG z?(-2oYJ)KHZjRA00NsRcm-b(IF<0AK=eG+dV-lH3byT)2w}XaUeea%#xvrR8Hp3Fu^^W^$_oq(Jt)yzKa| zYu~TSr}bB+kcf12SFg5B_ZfgJS;`Z)Bq#h*n@G+SwMFGN9taQnUM)ha_dyLek&UZr z7xDfI%aKaJNn@=-ad6Mq17)55wq^g{-mCssmym0!Fo$IK<25JS5eNM|*i!MJ27UuD z0hk$RTLn*{fi>$HPR$7zPuGqQ1VR&lV$&_|85bUcK`e+H9e{`c{1`A?oKL6Y-+piL z&6Njb^J|5yKgq6#=l@*VTU2T7ym0De+5d-{QRq4YAT`UjY%V{~X0v~E{J0PVYy`POZ|FxBS^}^_Z){c zYDG*asSkVS{8Um$%G&ck`C`qM5I*cWzK4aszge=(L#389w;pE{8DPw;YNDOM5yiD* z0|81v&&do#=;x_yZSI{)j%%>x`s!a!16aL1>0=q;GNp&`1T#J#9+Z z2ATtVf%`L55MUpHbJ%kT9x#-}fxf?+7dpmMx^W1TT6oz5qe6Ch@8YaiZNE}DwyRaH47U=Ew&N{<)VnwYJ3 zO_r6P#7*)}VRv`^%8gIAo4Dvmr#J@SNZ{+u#XN8jdDLwb<3RG~{eZ>c_6Ar(dKVH@ zp#j*H+Mc5z$sapxy~e%?oI_FDN^>Nl4dEuOE8g7H4;a<^vZ`O;+=n$Et$w{;LGC-M z3-9R_GX!HIHL#4O!>%pW1YHXuz}`Zu#C`%N;S8E8lN?A{Y+wKeqWRbZ#`p39*b(zt z55O~KfgVn!=-0fZ|8VX9yxpsNItHLukZ$)rJZWmE+;G4FiH}MBFK5c&2lsdAApr3w z`OdDK|JJ1eAU}U-ldn5@_KbB9r9c#ET!YfC`pPyW_q?k7fv(pJ75PT#O^Cb0*WlDA ze^C&SArN&hjL4=udSpdV|ACLelM~&Wp@#rM33~})g!};PjSW?ODT^@;?B`JjfhQiu z|9U2soC-%OjzIQ*Ux}}e2Yz)7K)CV|ePtVR=ovGoWW#-RE@ipyF{R@?9$Pe`3)7=v zLHt!x+j>~vT`QS?S}KU>Qu(j32Ob1|ZY*VMxz9^odwMJg9YuW`m)M@w2KwfF)`k+I zy8Uv!WIg_w+9CAcR5YH=a@iDXo>|&Rm}?IzGAjhy1s93ZqfHTk+aP~Xd<>zas;Ge1 z=10Ut*32K5cg^*O)d5Kp?9WP)`q1Z$n~Blxtmh zL~0frV&UjmouvjisQRtPt)#dSNpU3qamcm~(1z$I2r(+vva;`>a^EVEk~b*VTW^}| z8w7frC~D7agcGTa8rjnxA;IOOQQ{-}3S?drVYiV5hjfmV2e!oeKpg@Y5i|g@1Xu}> z`#R3_5a12j*Q_;r2U7rLf|cLJWTg@yQ>nMdpR!+~+aUHnmHwVKJ7BwupME#Q?|RAr zY;0FHCdLvU37`F=KzvSn<88s#B?YRp!7?KHbE8RXXQSxPgetMYd1H5VZTKNjQl{01 zTS3Slsd&lA2P04~dCwkww}|-6M8Xz}!ugK~66+JN2R6>jYA<2^M^kWNx@rmf-Swgk zF{}q&J;QQFte6{*b`^<`R7FbJ0Mt)Pbinzh)Pvmc{bq9mjLIKCTFLtxiZDqfq_L!K zgiKYDW&nn7En1yXPZ@xZZ+^N?4ra5Ph?9v)oZ@#hV8;N|SvBlyr}Jr>^$#RHAzND@ z#nrjU{WLTXAP@ijO-b6S_P4yHvcCnX->r?c{uGg6U=mL&QsnGp()s`-A@(76px-%X zMBhpHSC0Rrd1*$fIr$ho-mNsj0Qh16$}%5RBt$7Bv79w=I#U1Gh4xvBdwR1jdb zF-ayR(FOZ0s8bOu@ymg`XQ<|Xw8`+4dE0spd7QQ|lOL_W)~@a~1JK&&j_KJvHaKv3 zdZE#2?6nW}{Kr{-<-=JJK4} z%NN038_#&!XMq}bER(RN0GN#u6@U&wN!CG8_ah_W&DFA5l@r)}K5qRERC&#@0mHvd z?F`W>v4<$HGb2sX#aZoJ?S-v-6{{6ECSYiHniMDcSZu&e;r3fQyJo%Jj@r4-F#zpA zt=rd#$n#Jk{S&i&N_JdqxFB>MG}ne70-@vImQ?aLS4v9MBip|yGU0%Y+J3VUS^vhx z0MHKGIxv-aF#tBbbW(xPdcU%xJOScD#D5&v!cj0BN)g|9T(V}WY%;A9)j@VRw^I)U z-B5l>L9&E6#Ti8eD7xMztGd3jYaYvI#kIN+*iiDVkBY_WFUkDZi{wB1N8=cP{m_WW zdk`lc+emUF(y8KPgQqR3S=WXi0uWVfr`Y-nI(S9Z4pUH%{@d-LKS@z^Qbms`-+xp7 zBDzNS+a%@1&0b^AaV=i{+luoKiT(4C55LDDaYtGJd=AJBfGL2nU{*Mc>3F^`_j&Lk zfY1V-BBG?YXSM{7$rT{jyMsT)F3iC5^>6FSjlb)uO3R@Hwt(T@3f*ThVXMc>y5R2JpLef9l zfM%LaEF}UC(|Jbw4i$coBDgF+gpkx9+!G;+kGJKVSdN*;lK$6rl_FOCOzbfr2VdpB z9X1HC(}+;QR0-RN`AOTtY%6iqmUR{#wiyrpd>jK1!8JrBJEI~SU!Bb;JPWVdAd8zb zszz|Jx-cZv{Oj`izq=kp0PHB7s)QJ&b|@*hk|$O4*{Qfe{ZID4W}3ZrPkN(N z@RY($zg#aX(a)|?Bx2o$#Q$isY_>%7ABh2Yyq%N=LdAkKj$~kcsC&KpaX26V^9kbz z(@Cu^%rTc2w zAQ0d%nJcw##-xeLrCc@bL4x`%B*#fKt(1 zlfTeUv?0zZX!Lze^Ky5m8vWiu*kh?=;P@vs?D=WRw#IGvU-U=sMtD>RU`yYULmeT1 ze|lUnpWoS$H@-^#*30^{cT;lyzcQDR+BIQjl`?uo2yQ++U%9Cyu z#?#6^SToCt`Da}98c_pFiUuGg5c9*h7?EUL<^&M~_O@%o4*^!01Ht8MvniDemNX;n z)$&GJa-PgsVgXThPLO!J}^sz?B=n!0F+$A1r?Vp6re5jx`Ltw2`<# z+#{0aDEh-I(w%F=4*?KkSx(br|J#!EPkG*5GyqLZ9m#z%OV&Tx{$+zrK>XVvx`7%u zK;vX~Bl3iCJ?5(h4HYjM3lbJl$s7kpzVqc=XrJ#U`e<+q_w#^2Kxj<9070o}OhJ5R zJ2s=u8jvX-ZJ45YD!=vFUtjy1TSx2YXjk`*0XW*Qhx2M2u~6e?MfhO^xX|O-3);Mf(wi(`w?>zZCf1fVWuQ|g8S1`2sj3yr{J!Cvr&0X|Jmt` z<(wBLg8ctB`(ORb+^+);fsmkXsIbm&zbx3QYImhFB@~d~!ru>?W>yX(9OQ|3X^OPJ z%EK;%i!VUCjhJrQxHJlv7c%ndugU~aQaWN3xIqhF_dePeGzGg7{fkOpSXFJ|J8N?5 z(d&sZqk0xWUW@(byZ6Hj0T@%XN0@%jPiJJtirF&O*BF~lOYBO^TK!jEV zP8siOoAgJX|2WdLqsEQUl-y8SfIwN&@?#O(RUOv(Pm=UOgm!dt$h!G$yT*+(Ae9R@70#H?9)3-GBXd^`0tC=XQS+5B z=1GMF!dNUW(5}i4DglA^=V~sV4yPre*`aQwEGXUo5?6SGE{XwPXsfLvQx(tRN3Mlja228K+ z58)?$CU*Zbikm$*8KiE-<_dG*+HgYv&v~2dM-nq0^fw^!t!xSYlr#y% zh?CQz$hFd!kHgb zOzbl$MT(a8HFsgx&gwe@aK&7ynycnMGgO7Z^P+jjEO~TLM8j5Z6K9+hM_`?}oS4+L zO&_I>DX4?f9FjG8_lwd7kXWbLW*w}AO>ovbg4zGj+rgmXQ^0-55}1;MVxXC-KSoW- zngL>}3(tJe7$Ef>Gm-C5xE_}ukYS!UsEYH$7m&{|z81yR-$ug*Cj5Nw|MeIB_59xt z)V?zS`v-Fz--BQ$rCud&npZx%c03>u5PxO;lgb}QJ6<7_jT)Mvv^ENRg%B%DMp+@( zZuyJASmfoPTC-Y-Ak$k}c>)OWJ5)=tNAQ^sc?2G9Rjj#UB$+&tgJL7txZfiN0qvd8 zCxvpslgd*;c<81aUe~1%uQ-mduM6fFfWCqsMDCeaW-{{LRstX)^Q>~Gb_56(1=YQ?~u31u4@V1a*Gtxidq9>%9C$wr_38PL+y^gJ&TI}6u_@i7@QWvxo zw`8XHXH_TkJ^U1^rl9d3)Zk+(DkV%<pT+>oAdUBX31P#IchK-6D66e;P$ zkl?W4;vCaz=C;1=9Cza*K>z|7mi*n8{U3=4&-q6A(jl5iFqBxEL93nWyNHG8vQ{76L#P5Y4R0hQ?XrIhWbm)>?)8EyAE5_?Tr&{eG zb8M_OM8@I7{w_?9?vCnQiT-T0G*agx`PDm0760S|dvC`)R5iUY9Pl6rfIN}JdByJk`b#tu2(Z>LtY|yg$SW#$oe;v?Rdzu3!!c+|dGCGv+eg=LMP;s$(g1vN{S)FM zm)^bb-OcPsVqJR#L1U(zm^Wl@(+s`tOEAC&F(sW2WZbTa! zHdOzp%_;mdB1B3KfL`T`fccl0!G zx~aXWHy-y}?;#LkI)tAKGb!!qsaBcV@5Uw&+PG1rOFfIn*w_i$=v?bpFO`kydVQNVJn$kb)?pFSnR4lnx zy*{=brTImc0T99y;8Z5qCJ^;=LUZBXi4y!;8VfVP(GPWhN`I1){B5zH!Jk@7a5%On z3%8*P54`_-BP#3|~~rai;Pw|hs@H)?knMpW(Zw?yG1cq1Eza3L+22(t5$B3GUapj@x}B$pGLC_}=+9KHH8Zew|Lozb(4?9WBh@ zGXUD~r}8N?BTwiV<;NaVb~O;W@TqIZ00LCXd$^^Fe>*kVJ*75R_)`g=fI4$$Z{UGX z7B$rV=cnT~0Z>29M%e5VD&9q%02MVcSxIoexw~V-8tq&TnGpQ^wnTsC^f*~D_XUHp zU@|eHq#u1^XzGC8{nSMW2&Y@zr+`p%W+Ej`KtkCf%jUk415qcywcbD=k_-S-|Jlt? zi7+nwaPhk^3a|cm%=n3C`5Y$prhVJW3~h56<W$K`6VRImiyJ7ZiMO}{v+BiI89MWEhWr}83_d1&xXPuEdUH`R?>S`;Q>Wq69_uT zK@(%HcE0*k(m5Mq22#4FR82fFgYhZfbIBKK4_R?C(PzHT;@906GuUgf7mz?{$UOdYU~+^AO~T0)ePmNHz2Ohr4F$f%XsD0N1((0mlGz4YWbW zAafl4rewbzmmN1Pm98uI+hw3fsCx(alyjYfIbYK;$JF)p(QI^g()85c|R7wG5DcA|)~=1I!v!8*IWYqI(l@J@*9y zAwI;1WM)ieu2jv8t_x50Wi-d@rVhqkT5nJvj4d`UI9$}5AoM6oa9skyvF{ceKLnqo zH<$r9njvtw!J$@%_!Ax%{EIL+VN5hU_BP3_Fm+{c>T3eKkQ|7b@c0B2^h&y5n-Jz; zz9GUua6>yP1jsRA@ukI?GI@z1;5lG#4MxL_(Oc8ga9eX(s(Ix|+Wq&#(rEzpL!*Q5 zL55?gq`ah+zEvL5U!9doR~s5Nu4}^&0h0c#DA~`tq}w}6`TM8kLO|q{0=H3vaFQna zLLRg8{>_(4bhKSxhf5~J8!H8i=+_E* zPNov3C{sXCD2t*)y`GJ`U%w!LkIS6)dm*cQ89P<;Sf+qgrOLjyUL25Wf!?-kdmqcc z<)eg&t(1!9TEAe~?0p>r&@7}{JP*WWC4Uk89BN!SNd+lfTkOc4K9CSVvZd}lS^u_# z$ToK?_H#3nVeeozBGVoPfKahFAxt;Zb=S^8jVlelA^}vWXw)}Ez979dYNG!#jRZ_l zUS^IR!7Rh!FjFJY4c+f-7aI;LW_MKfq%rONqV~d|XifFp68(C!Sgii?KStd7FWfwl zf6V0lpUvxDM^N6Kh$gg|Ji%D-pI1+K=TBH!C>%CCYkqJ zRb@$4uC`R}*376e-55Xkq1C_uHXuDr8*T{x1^wuspa(znFA)4PeklX?Q>zDP^k^^! zEc8gNYL`n@D%CA7m3Q9nK{8^#ZylaYCXx`_o&`vh~gMQOZpE*l1?lB^{)0}QHU$6`ai+X{Nj-!m(@oEG9Eh1S!(at{7l1+bMO28wH>7e&aG%HlXGkynRz?&feV9POX1{J!PCS| z?H~Z$2WU|pPyo|*Pw5|B^vc{jy|=Ai`{Q?iFuS_B^~ncc{+C`Bz)|L?P|Cf56X-wm zRdd(4&vo9Hx)uZW3{+p}ZFal5K2b<6jrB*6(`g~(dxp`mw=Uvs(tY%aZxRWX`hD^L z;tSrpz=c^JVj#hUCsGM7Bcy!Lg-y*r{MvAdujk6iz4>x5HHDmd&B8htDHCnl-xaos zz5?#(GLovChUQ$E>Ec$wTD&fg3?${P99tjnBunLO=5_DPzCNNAtR=sBQjTC*ue^W& zoifdn2S7G`@FT1gY+~e@v`!D*7^{pk1RXax;>bp7&kb&imf?@zCdO z(4_cnJNc2O{Wz&tz=6dp$2!OpEIoh#nIxY*?qfHd6nkDm_N01P6+u!5t>@ z8+g}G`1`{ezc}%)y50hi-{~K(|ATd>o%35ejz!C!rrk^UrGW|v-+pX}lZ!&>-BLAP`!?f(LpVe#P zh`18kKLr331PmNKElh##N3g5qimM2BQN9@FXYGSV#*kS6ED3vilJdYIHFcN4&uLWK zm)Tp*a0LNilQ4$`+&o$aw%FfcYp(#nz=sE_$uQT*?5&kBkS7?Fc||^g@pFMU_1tR# z0U;4CFkoWpEGQHLQV3!7m(j^V5eop_yG;jfW68WL?>f28H1_1K7XTU?Mt5B08g`ps zgIV-nX~I>53t#9tT;RdImw91GX)ZjPq2}*wEC>Fm zi$4_hYPnOyFsw0tH$0lRgctCs)c{>$VW2#&p5;wkC>+`iEH``-un=E2kWS&}$uei> z)?tz0JbDTAo{aU#%S?;fd8(?uG)%vY?xUilIc;l$K;;#6d z)p}4^IM1cRRiI1@)dgm$X)pjm+PV=`2dslf8SGE^Tx1=*|L@=XH#>bh$%EBTNAE{b zyA$rz{>S$Ofc+3T!rw~slMuaW3ja*Psjg2BP$RuR7V=3Dup6Ri8|s6W0jW(NcXJGjjP?0yC78sM<#IB+x)lWLFrsBIv%^iI{@LZU=#S=atsU@z zQ`JJF1@=P-Fz;M0Bp2EDqI!Rao&R=y=QT#7^=EFa457~@_viDN$k{b>BuVZ}r;M`3 zPuu!c#=b4z{Ag)AEI6M$-vE!d5n$QLabX)qp9-?_dhs=L@i@jv#xZhZE&n76=2`} ztvZwItNsNO_zctKPY3SY`)NW&+ivT3*9rh^2Rz7jz{P5=GLo)=?p_A=&c&@ps_9b` zmJ7L&Na$`uilg)=2p}(>%TPx(qE$4xl?^m~G=3}LEyMhk=I1bvt1WL)5iTR3`=6Zy zsC|9-1EC6KjxB1>mn%ezQ#ZV?N*#KRH6(-`OwLB#$2`C_Q5sX?d0c2C;pf<*4{C;M z2s`HgYZk88)Rx5^i$uvd)&9k!6R~2{>uiJHJ711vi#^e-XHY8FB_L8S`+ z0D?e$zq_CeaMXkS$-*p$JD_@*A(S9Z6XnWiS59JA3IIP!euOHwd|4$rsr21JzQ$YD z#cg+LMc5)pswOvz`Tr!e(36Ie#51%b&Wo5wH(-YP!JlC^4WB8Jd@#pw1*AmuHI-@4 zAo_5r42}#X{VCz|#r8H!yKZ!J#b-VINe$>Ly6eXo0Biz66M?IFDEqRgJm zjyy2PGpWVmAm>!GXKS-CbMHVeHZgavaO0=cO(+7V5FGif0hMX20Jr4t4h{chf4l6R z>%ck~DhOKl6qcPM1<77JIt02>0Ki-_oaCqe7<~V8)Vvv%+)RkaM;~uT?%<;IP>8>b z8T$7xVV+h9Kzlha3)bc6nq1n|meiU)dbB|V{8li-*dmq-GGeY$x5G;IceR$X%>VlD zy^#zKmy(Hz@nn2*EXhkyi3*f?F3!&-4;Pk_oz0D;^6-9AU0aQ9bKk!_7z}lKxDZT; z$8@a8{5PT4R#-) z$>Jw!$ohlDBl~+6nwcBOYGJw^C96TksX5zm+YUnq;c3}qe;_W&e1&fxX7PSZ!R-`` zd{)dK&(P|ib;|0ET5&8;q%}a?ZnTBl#<``FdG@yN=NK9tN`@5xO2uRh93CDSesR04 zEn*C|%E@+rKMW1aM}8p5VKZLAwYAJ{cqIg)$NLuG^0E)lGv3HRctL0szlTx$kC#&u|HGwn@(qE0YEn{OJ{38g3SKR+xV_KTg#owE+xn zTUg5_Up~XyN7#K*VE==MEc(lV=OF$vT>D^~qLbzg!;8Lua&pAs~Y&?G%I> z9piCdr?}`?i<~bJT>b57m5EC7$YN=Yc*l|L>RZUodS?s_(A;Ed_$_ z?!WG&{_Y6?ZGgNI>eX?idkFYNn9KxaEY;-B`T~_SN+3=3y1&T6MCl6SuQZZd|(KYimenp7c?@$V0etD>FBXt%evlDP*@ zlFhZX!b@L@fn|_V!M!|JyeJkeA?>!oB&M6vy zv4k53SJk)v`FqKgH*O{u5M(c0o(>!yC=6i1dMSkl?&1azk2i1u%!0dX2m-(T^*<$_ zeaPPkv`e@{N_|+lsslk|j0?yUV%C04Zg`}}cA@J;tA}&0@KdQ2!-aI0ywU}*{?ihW zG5&7v`@th>?o&~Zd31n#QTVZn=%+MT~kqNA3zRcK&C)c`V-S2`o z@M6@qDUOa^Hvs?Zj@##}{hR-7(4Tmtb>ir~s@v^jy+AO_?=;;zMwypm&H*F>-VYTH z1Gapv5Mc1=023EDwF}#$Okdoa7hMur?1mXCfxDcY!)DC>PMY)Poc1$7@z9tR1zGV} z93C7Rir~KjLhgPcHt(LuyPeY2<=Y~^$z$=VAWUwquO`S$xB}=e>&meY07v-Z`Dl)l zL-$dn0)r2}SD2T+C`1MLx)&Nnx`-v9G7ip-Tizbu&dvuP|9q7$pJ>8WJy$+P6y z64n6vqbROfS85VWh`*i3RvW{NEo`k{|LX^9$zx2&rieLy2~(|R#XkeRu2J&$+cU*v ziXcFv=0kw2^}|#6U91)QEl$i)55kl{<7E~xvnxE+YOW*kP3CN2tqH6NTAd{Rt*)_b z-j<0k`1ONzaCj@ZH@6i5t5zeF`PZAjy?qw%pPTu~5f0kVT)*H!NzyEG> z;mS-hGCqduYA`f^1NrthHF@#dL+gOC0$RNModUt^ljj6c?r;@C6N0ueH3k`Lc~lxW z3J&k@OSk@=j#9=itjU&G26uMXn1X$s<<>RB0%!aB2m0PE_Vr)AJ#_u+PZz$nMAnJ5 zu41lO2d|UO_2=O9O=c8zF8n7a2ArA?jlz6tS^)>J zQeC}$J^B6r^?Sj*FW(03WCjRh>th%Q2%m^}9^}WEFX>KG5?j+V+_oQ#fnGmkC>pl@ zer&k^zjEh=w%xsj8+DZcP~FMl;H!=?j;^nvETPtPYXrB4RlU;uZNE9YElqh#%iT&e zjqbN#i>6c=VzW&GRw6B`I6~TobfMpv70wHnET4|0{Kfgha3IRrH`*$q}_@p>^fuAKKytwO`{b*vC zt#tQff3pi>Yv5QQ3sfOsQg@kVU8&*W^jmm}S9xkZC=6Jb$8wRA>=~azv$>)9D`Pg& z%#<|hUk0KZOXlB<=AS)T_tPMFJA~O@hCe(?)(YnTfq`O}{j=*DIS0f zZiJC!yS$ZDaA&PhDX|Y&*k|`l80g2gEDrC&xRGa^)@xJ-bSG&gGizpSkh09=m+}!i zFrKh2y&<5+Djx|PV+h+7B$^X$T_pgVFf5vSyvR68u%CI`MsZ8bOCY9%{VWr6vhK){ zmiSbYhl|@p{Ffu@T_WB}Gid4xk^JKX|6RIqHMIT#BB;aX-y$G$PzmoLJHGnHbp(RG zWM^?P`R0qyl73RI2Kok>I|K0205yydO*MzA2ybAT0cuYu7So6s(%}?f780=`TohBo zZW*1L{hR8*DZN;;z2jAz-y%(O469FtKtSue*UITm;hnIb04^u+&0t#t3rz_N3sHmc zPwWd?=@lS^-2N9!MnfYtHoi4bBf{?k$eQ50gu;W4se zZsSf^o}EbMzIu?%&XX~K;Qeq(YXzmS$%>#{075}aPdZgUiXj3UvjQypt?+2T=9Aq5 zyMEsy89G&PXlG}80ziw*0jQW@)oXy34%JD>qc-Tja$J=#RH$>e)trZ z)e1Q5R>e_sfakQ@bT?i9_U&Z+!bGCkf8yeWNcjtGzQvm=0DDZZCfD3UV&Bh2F| zf<)W8dg20rk^jT*t1B=H3sTE#8vmF(Esm! zRjmgRiqVX6<&(@9pIM_+#@co zB|-KmeLSh(Dh8NMA|)Pz1`p{&BY13+xpf_$l2&B1pzDA7ogeJXttHQX_W94KbaDLc z#07xly=OB?uJjlE`Re<*YHpn6*wtl!C192}mFZtY3Vrudx_g&?sk7Q-{wpv!eYX$M zo_=+2HF9y65derYj2XFX^% z8^7lmD-zE*?z8V+Ek!2D68UY`+OO5b!DU=7>lAldwM6f;BIf&C1j98_9LQ9{tXR_n zBK{xF;<`c*(6VsU76E&=ZYGmx{$o>95y^klduG2MAO-dU!GI;=07fRpLa=aytuLOg zCHG~11Hw*C6NK9cSKrj(!R*Czpr#-1sQ2xbApaHlc@%49SR@Q%hF z(4|+&IC0g6@82zMjd<4&e$=L>BjZ=gF$y}JFDEq6>1B+RJMCgCzuBw|Le09Xe=+&NDzn_qOD4n)?FjRoh^5f2wo~>J9Fkm~Hf~5hhBV?gi#j>3yooq9#3IVOI z57EW3ED^hqLuXWQGA>8PUp;YZ-VSIsc8?5a+<0vQVW5vf38$)vR;uiNpQ*f(-|0WG zp!X9)nIGKwL2+s*nSqO3XUZ*?cq*Di+(_m$6)82}1_?jIbXrm&YIcyf3ya? z1X(Zys9Hqu!=T$P0CLZ`x6knY{8#C7Ope1b$p~^pmTLj92eq2NvDB|eBkm9Hr!`p3 zSE7|=i!}1ATKzcf-#HgDE#u^@hFB`3TSAv%boJy)zqg))bFj_axm~dGo6~z(Q`6T~ zBjaym-VVFZfuHl>Py(3fOu#wtx>i2q-rh3~xxbc1nLRMD#r_k(!rb^B1R}}|rC*$r zSBxGJ)3e3C33%25Ji#RhwG91{}^{j zy(Z%yBV}ov!{D`>ll{r{iQRhc67+N0jGwuTE5!XX-nry%i2Y%{{fc@D^)z#10e*+M z_nT$oDUxJnlrx0HxPtv4&E{>9Xz_n~xIxVda2N|#J52xC)L8^*kW7G*1+6Su-9~Dl z^-9g(7?yta%IG5k&SMAjmF0tk4nD!eMWNYG_g;$xM@t6QS=AII}avYwPmBFEqG(z zzr^#%iwoW-u@3DRqNN!%cKK^3PxStV2-Dn?-Dk<)bzPXXU?ATDrsOKx{}dav)fCRg-^U6$!JmYr<)+`o_O+kCD0SKI!0MJ0+y5fVL~6%Lr7h-<_xL zTw9~8DPgJRro3j|d!{4~Py3snWkHt~p?Xe5%3)mZGo<70%2qFa;(eWGU04QiYF%ij z_MbgSA;Tl>c=qYh&ocH(McnmBixtV|<58R~jR50F>#z)41+#H zoK4HND3@^&8%UHp%=am1?0J)W|Igm}0l1%Ae6m^n*ioG!r|$PT4Xuh zCqARX2LM9mp9ugn6fJq@B0I!^Gb3&bDnj*Iz(;}TGG(;KsLOzdCMiAYhgoM@{lNrW zuv@cV=Kf@9n~3;wWO7(lvAD+s?scZBYUz8%E(nw7J;lh(B`HL_h@XvZ)*br098R-+0+ z_MHpvsnJiJhwbD!sR7ce?r+se(gA-=b<0Ki`r$}%-Hd?8$JOti0MG!WT}m>4bGL^j z;uND+t81sfx`A#S|0NP1i5moSg_4_E0C*e6Q9HwJQw@(B?nvMu`l-3Ty^Ge;zuwzt z#MZ%k%%ovT#-gE1U>yQ$q;)l{00srVB%k&ui~Ki=T(hgegY}wsFm2R;(F+Ixp3j5T zs2*JQejgSR;9_3)!aDx$An4$j!1Fi&%zh+J+LjT@v;a6AW_Ijv3JJOw2$~N;Kw+R0 z?WF~vy?2lM9$*1L4v3}&U>hO2pATes;&iCQe*Y4M+uNxwx|{3NrAp1DaLX!C>ZVgL zI2|olL>x5>4xA^5ldQPo?Dw$>07m{_D(2qBG&8Q*gVlYkgKZVCB9j4JS^+$t9^`6u z#Od!8pk;g>@XuiU!MKlh+gE-qf!kc)NY>d4T?U*M%6{dN*8kYOxA*t(S$ms*Y0dDE zcp?kL+(xXx?u8Om)6WbZ(|_!_5fgG|+O}r776yy|XffI&SM2S|xqfdpfX&((7l^M> zXo@{1&~CNj)M~~7SPvdAd{}5U+2cVN8+YM+6Z^;K>A5Ygs{=Z-gPUn%bv@ZwS&I_D z_PJg9w`-!jz8S|3maM7^(S^AS7g1WC??>(`tPmto_lk4C|6k08V>JfAbY5Mw#+L{S zEk%v2*B3WBXK#*G0GLYFqv%hL;6Elz$)~G}{$#iy;eKa&kO=NVqyU!4T{;gwHnpY4 zOBI-&)ze-^{!3ii)zr@Co;^=W_lJ^E>Yh(roQ@K{?YO_p+j}q1FUHwt?QIfIYr$9X zo0`urb1#FHQ%C@6q%w>R#w|1Y=t;V5tiFFYKC0K$L~c{0#G*g9F-=|^F$)1Ub!s$| zdLwMl_XNhFH&!7sw27FYsPF5CX?4 z08|S@dBRjI_j@aq?7Le=Rg=)aRDfz5^k>lt7grjod>}l})Q7S;&1oKzoK7qwckJ6( z+f3G$*C3vQLCn*V=SO!y z5eBHXueD0fm(mx_|Easi^I`XhhacfZ3&8r?T2fpsCS_t-)Y|*nfWvl4=DxAM&i$!4 zi7*hzw|5}C_s`T-m*Q(4_?-s&dIXTa_ES+vq1!d#@o-u$ESmA=BpaRdXlM@;Po;B}x65^Rh9m}lzFaT2H~7n@P}4m`7)Zs}rD z1Jjh|`%W~8<;v1hvM@WBEVE+r@%(|Ip%fP+IHi3WM(|h}jL+{Dh)DB~; zj5lNOD*7u{TPy2BR)b8B!}{_9EqLSBx8}HO{#R&x@!erR?tLB^6DaP)N^W4q)F$Y z$p9BIpDl4TVD)YZB%7E|KmG3?CtrQ`MRN1b8*F|0Rs`!=7|5d)r@1MA>&vSVSMZzv z_K(TE&%a7GSBL}PSqC*41@(QBS1p+JlR#3CGVmKcFWq`L@^YqT>>T(<;+{?$MHDj8xkAoCcfFJGi~{}8qN z$JxLKiKd070K0==G5d5j`S09&{@GkId-pNf25|QSnnLU1dNp}YHNbITacT5`i_Z8* z7G$zbk_qtL%cUs!{or{y+~414nI0dD z0ATRnNM&nK^9Izj<5JC0L6z}$Xn1IGS^f#!)LL{{1sR? zSP#qi|2;N0UJqo=Fh&^Y3USG( zege7R#s##fdIZ$#)|OTx5_|T^^W?Yx_;GUoi@V9^|NS?~^0S3xhaB)7?V|@QBmMPL z!v*HMU@?xU@gNa-iq`+Dd+gOoPVpQj0*Py_=5H==kE`PY$z_grFBc;9X!OXfP=-s zBF~^=CRTycNC`nUo%nu>9MmBk3d05znz8cu{=?+%r(Yy@fBQx9#jih2R&X(FE)xfU zOW+_J7RRoP7ZI8)uboRSk#>5jHpi(yxlLUm-dG{<`6~1SWkPGNH6iLO;ijI^$%|bE z{r!Dcin;zvZ;#&o>NoRW9&ElmVkbIo0hmhWib;Or530Goe+T)TM8fI%$p6fYYg*C` zij)>8|HYQP~)!XT5;+Of_b1#Hen|9>-2 zfFfUPHo$=t8pkj{UY!_Ze}TT(1Hk>t9b2z827w^l@Jxm8n*`I#>^_FMB@x_r_&uE3 zB?!E8P};~*u9?!Xn8P=W1%V-FdjcGOht|Id%M${on)kDB9w$}Qs<8_bxB_OiG){oHuom2Y9*h5?tW$VT&MckaRH!H9q-hAl^!*vNqnx27m^Wdw}TLJm5_7S zTqJNrkK zZh+;b<>dL@M-c}QsZ-g-hnyt@5d&cGV6H-W;0orq9A#!HCyH6tT8y6I%=x3g|7CK2`io@h(hSFR#I%e~j^WZ`q;J=+7DX{Bbx_g_N+zZKZ-}PtuPZo&jRjij|5dielFQfIS0WLj! zo_viMKYC$2x$*dI{Ha69IKsgA;6Cf4vlFk;s&fI9~$4T zb)nuo$>g`t;-m?4FdF|hr7X;;d{~QCsBH}4V>IQ@y8pV3tN~y#pPXE(!&mzl$@=2-hz&G+-QU&zdHH*KzMBrDL2Rbp9+ zY~y?B*0to~t*c3CXeizfjgT#{+YDG;(NabIfW_yyz<&Kt5fr<&MDG96M$`dtBcNVu z9s=qKGZY7!VqUf@X(?{AMXKOJW-V&|u4n5P9V1cZFb==BI)e#r27|@~#Ml+wy^Y@4 zK{b8Nju!nf`d=hEm(1Up5+EM7?4179k=M}BWZc}HP^V~p?18?rKombw(L~0EjXvAr z*9(@&k2XTuQmm^80qdMwX_|@4aEoo8p$5tS7pDJuj3cxQcZlTBZ8U&Hpdi_YA+Oh3 zgMe#JL9l=jFh+48h0tZJ0M=I!tO40Wz{^xdFdzltUbL{{9CH1Oi;nmvZf{5Z56|An z;eJ8@H4CK_ru;Icf``Idl|3rHU%dBZ=}~DMvL_0uBAW$W|M&0x@72CSe(A41{5xX-+Sc`s2>@HS zpW*x({5M$PzDU2{ogh=dlJ9 zFyE~Z%|ADU3t-QjgbM(l=@t?G5h2||KNUREC~?xr80sPx08Rg!1fDhGH!h-`Ud3HB z27#HxY+WGuNI|mKT8BUeJ}m*)$m_S3f(3q_EV*T~X?>}+=Ab876|WpwB`?2~FSm_q zzPxSCcLuT=&!QjSAOvdyEYB{5Dbr-5x3GrhlQ7xD8Lox41U>j z&gT<9~$5HAYET&rqS9EQ5c>|TNFoqwJ*!l!5=of!@`wxHd^RIut(XMWH zOaK@g?jNJ`KZKzED^{M&ziR~mwOe_dnw)va7e|f$PXqr>koUD~j$6Nva6y9eI1UTI zK^-k`l#|)}Pg2c>`Pu9L_j;K@8VIPXqBT>gO4d3lkMl$dbGu}Ybg@l{x9>gbHJOwJ zeEXiQH4s0XwLzqdHh?a6@%#(Yj%=9ffr?GUHoZh?o*jh`Fz#U(G=Vb{L6 zpv`aMd65%iTv;G=6W=b)}j$ZXmLeE=`@0yZ}u(L?jA{dd*{+IBO=Ub z?Y(uwcaypAR64y>Q=gzw|I%Db$JX-^iWXXj)VeJ7Yo>@UBk|}QqfEo|+USMC*o=T3 zTr&y-2IuMqSb>lqW&_BWeKY*C(f%}ZozI7O(|AD;NLy!cIq(ssENzWcOuf7)G) zBws{aiPK=PuIp66-{M~BlXi&Gl-x!{i{g2DDjF!}n*kx%|}(3i@5%6XN zLBI?E-$#7H*#%X@;Az)|uFWW=%-J{q*JuG1R|{RQy@Y_!P-oEv;JF8#m$31jN}e>{ z?V_`_s^EX#1*LPX=4OjVpP9s5Q?BMlaHZFPfQv(75(K3OlUHy;aL9xtDry%!oOuO2 zZT~}f8>18J6u7%)#Y^FSu2IwRbEvRr{?}cnY_vB}w(XN~if<^cxNfzQ7`qd&;xj+* z=7zA6Q>h*p7`XYT-}%9HOYFC!zwNaEw4+DIcn^JLQ$quR4A*f1yfrfrNTIK>*HX>P zS_p1Ztrpg6FCieYy-L3SzqmOVRsXCt-m@CLgipKQ=~jsh3ne0al6j z*L{=u{oDQAZoi*Gk!7-rWG_wPf^xm4DX87v^W*PdpO&NkYI1F|5HMO`m(MTfNV%jg zK-ONq%RcFnGr9(w8~sT>U-@2NCHKu_ar-Cq?8Bq>?tBXXK+hB|cWg7bNb|pefO*VB zy_QC@yQ7_ll5(%Tgn$f5En;T0KP*S*u~hU+XmtLa=!d5z$x&;eLJEL;ENHRY+&SNf z8QMP}aD8gM3~Lltfb$KN!|^d&w-+ebr2?q1Bn-P=fJ`%sZ!xl4q`BVB@AS7NxV|j{ zz>nViqw(Ql;oa?|asfcv$y1>(spFRZ%&+;91*W#xZh7mJ2$I!FsssL*sDMQ#`md7rp8ZYoakA3NoU}y%&>T_8SHDjO zZUy3<5@Fo6GKD5Fi4`CsL>T}yrq^~MAX8IP)=d%tR{}>u>m;9&Io=&g+fJGcC%~66 zwEqz^Mu3kkeQn!gdN(=%0$Knh;Krl4W`(`$_B^lI0W1It0hYNf@V*y^ascA&`$$@* z6=1v4hk}C+&FWnw9zfhW9W7TR#r}P1Fy#2>X5Y2i@0JCCsP+HO@(sXtOLGJqp)FB~ ztq!Eo-vt3N7z9E-j49W0zwSKd-=_!z8XfM@t^-(VTrL)g%tVPA+L&Dpo;d|oG>|Q%P!LFAFZ06{LoJ4WOt>2a1QHK;E?Qt7FUCSl0)ug^ zBCgY3Ya9ZSd;=$4#d8&2|5e~dxyKL9@^l9|(> z2g%STAQ+kEpVe>=A9rv4yn-gu6AL1ZAlls`a#oFNfgSQztySJk&ipHPJ)M6TA0j+P z5gOmRSPTtxjDWNr${hcEs=*n=C6T_N33s!aEK?cq{v5mTodfrjIF}ul*tg{OrqVhT zlt}2cCLmzYqh*%rR}>##Wa?z2KlkX-5CB})Y07s>-v+S+ z?~q}#agM;)vH;K!guCgY-AT2McoE`l%Ae8yJ*!#$Nphh?)H`;bjV-F(DMj(wi)~r! z-{MqXA&v|gV1=+~pcVgv+4OMd_;29O%5b@|HO?Bt3wk=5{dD0Bgvm@z z&O$C}klX1P!O8Vfjf34ZBf?mdnWnNLxz`q9Im-fo+N90;2i6Za@?Y0cnvRx!*Lyu# z2|P*;+TyA?=k<@@X$5xp(V+{UPGJY z6UKxoL&jFkt=;-qekBj)LF0dV=LgmD=Oe4l)&po-04Th>dL8T2pX96kmw=C{f2X2Z z;qTH&Ar{WX(KLVCNzaJwa89R*#sVTjkL&(Mc|Y6b1>!Qw^;1*d5&wfY)skTDJh zArCk?(W^s0>a)-xQ>*FR_LcxrG~GnT)3{Hzs|*M1~f$>Phw8 z)xS+XN$xf>=IolL1pvGwSE{+f50RU0Jw-8>guza!6gc2==;Aw@s?42xd;Ff z`Ch9H0YQ`%7%>Iz&8dN8iKy!hDrcQ#0l;8K3n1zD*Ytm7Ol>O-2CNOS-nYjE0fRG9 zf{EDS;gMw{Y5dtJSkwpPR)NB^Hv(4SI>#ugqmJ9XJS|r{$ z?jF7Eo~d&#L`^j}*Z1aNLZ(V3+&0-fb-4yt6PVff#5?1aM$uG9I1I{FYt>wxjkD&B zdSBLr0ANf=YYP`K3Uvqsl}H5?yC87g+NwGB1gPqcugaU7?Pk*gK(h}x$?H?X;2!Pz zaPyJNZWTAHh-D1zB>Q(7pRFb30y~aLd~OgpGrvwP_Hre8bjD1s&W)*$Z(hV*gm2Kj z=(9*`@v7Il3jzxG7e`VdU(3x{0n$cIfc47hE?T88dKed!k^VQ?vD6aCqu`>&f5gWg z9cvFB8e0*DBa)H7bx#aoER`P7+Y?)kzObMKMi@hS!l+k{)ElEVLbt43H z1I&!{B~zHADis7aOjX<4Nm8alAYR;V>~ceWa$F<3!Kb%yy?uwmHCauLh(+mj6Qm|UkbFJPT*0f1T8wBrHaiUKwv5b&P=z19K*gj+<&a@GtvjwZ6T zZM8rwEqt~O0aiHB?0S-9H=EyP`;d)K`7q75unJz}A=*clMPvFz@Ejo#>#UU%jN043;*Fl;iZn`o@){}BV=Lf^aCPw5;U zd$)yIm|0=|HzwQ~x0)<&lpWhtLOryR>RlY zv$Y;Oo;CA8=h?MhB^pXbd1De&`QT2nKv`Ee3v9GyxRc>cc5d|d?YItIl+AqzRL1q> zE`Q&iw$?!(L8yg@1CXBiuh)(afh{dY5UJHN7EQ88)^KNjIa`jp5^fMwIpgI3&a?pF z5*Z*ZNM9L=5+U8KZ?WYs+Fa@AxO1-WAU`w(X$(!vGqP3GW6wQJSUlUYsDk;QkOYvC z$7cKBxJDqlLsYn!B=TRgauf^E0?=z+4FRLLZKE*HH>T4}mETbuXdXh|OcQt1!j)c6 z0(~8Z8ysCCtHwGObY-GQ0t$^2Bw`wQrF!2QK8Q4MJ4GW)CLz+9R8r!8is((GL4)i zI(gRx;P#qoEt=BSF`+3KP%sdIV0Z6;hVS0oh0;S!rqL(oHmD@?&H``70<4QjbE{>d zEwmD;LEKdTV6SyK1a_xLca3fvO}bGg7n8YKUZa^RK?}H-As+>}aWD#0LGPi-IL>VV zh28aV3tyk9t5P4dk^Q>XCKRwS|2r1PVG3%L;cmmT&+{#UCoqZPhYUYv|S$!%>mJFjP20C2Ic5(%UNu)esREPc8ix&PDT&(2Up z!?dCAUMsP{(i9uhYB2syktyw>Lx|z&Un$=<&E;I#! zPBn?+B}$?ixMyzoFan7}$M2p|9@bpm2AhQTo#`S)WeOqG6c7-5d#pDIye_%Sl+9}t zZQ9t*MTysbY6Gle+I_N8j`)jM1nD6UHlFlXf7PpdROaOT{ z*O1_KvU&fs%yYU)iuE280GbWl2Q5PL*NP$W?FodPoVCu}+c}IXpWNOGpqVwGQ5^I- z59z`f?gRyZxxx-iA+-VoP8?AH$QFe6DxiQAITTu_*>A#>WixH2(Eja%)~x7zymSU; zXZ@~G(ieK*T*}-x$PigW(|^32*8N{cVOitg>%{#3m|k9vI?cxb*Th^@x}QaIFl z6;!%YssL-Sp9MDH0@8$;Ps*wNKdEk$mc>N4UAu+==VY7t@Ep_Qi=U^bS6;n;9J_Cv zlYVTWac0_lwx&}}rZWC?&J6B3=x(pvI{xssYJ9p7vh7v13c3G5=S7#-`_ud6Xb{j{ z<6IgPC^H=!OtVs4+s<`8tnmA|hKsEhs=shtW{hQE6CognfU9Dme^`bQ*MroeTi~{z z@IUY{o(UH1{>?2|8R>Ugs$}p};H{P`g-%^*mYcTtl(@(xDf2%XNY3rAohRo-_s=?- zzCnMQ`sY_ubN?y|ipM5AKvzJ6B%^EK365#@U;IBm|JhP2ojTJ3z(D|!Da=zUbQSeq zv+?Z0X86~xQPk%8ElWO@N--UF^<>pdq1F;PCq= zJmVkkt%d2|pwD_u6aOTd*CZvPjr_kfKEVA8FX9R6{l52o_d%c!iLQV!`Zlhq%b3z1 zE^Hq-J9y z$Q1}=B$mkb-hI#)X^26{;oBF_0!Lx|;H&X63IGbOe(x~~PDktk@42McT7*Dl*WCYX z)sp2fedg@bryJqMNnHTWA-rH6+N1E6N^UJa?ezZa+z9|mc$Nyq%@dOKDvZg6*N136 zh?!3b{4#!QQ`=(ox{`Yfv0)5XUQ#130yF}dF}W#BRY+^f2jiDAN;FrgvbBU@;L6h# zW4gBA3GThkk0qiW`b#6Ii-$-&htck?j`x!ug#aM&UVX_nEeH-6d%2zWx%9vWby z4d5vp6~@-erV*y@fGsKXo`osn=cfn&%nQF!>$f7GK~fTStMqx+N?7rQfY2E%0N&63_S)HifDG2=z7M6My`hVj4GF# zJ5&*+-i0`#1u+x`Zo5O}bzNPWnFmlHj4F4F>BARW5`CO;lOG|czKA*2#W@9&RV(+{*i17AHhHfQ#);}}yV}?8?WEt(-5{nC zUN<9>u)0g1rc#5w{M<$V^(>M-H+YO|VI>N{)VBaAI1JEFw~GEYb^k56BBAegntTuj z9`xC9kK?$X_d3?S{6xlT8QowC#QuEiVkx4(A1rR74Q@uT*UPJ4f472Z>Nk(pV=suC zlN8|rH!Of-DMb+5Gm z0r8&YABD)I1B+N?KBJhAv0h)zZie;8&)wNiIF$_5V+&S(FNb$&1LNl<47toL@`RfG)xmZcO1TV&Tt>rU5=OG=Ilsz1AQE)TlG9 zdU(%obPuG&SyzEURcKH9q;t3DgodYt5{_@+l)!f$_BQ8a1?k?+iI(!N?M1Og!dNJxwi z@H`c%ZkVQ_0&1kTUZI#24o$y8LxY(50^G6Yhft8+C)1V`%slo>UeM+c;7sdTqyphA zSsj}H6zb|VEeI1OEITlLtvmW?^~J_>>ixKl-)n)gdBQRsX9LRAR=m@U^f>A)7(b%N-mRY|h$`=m+{An@?(q_o|#gwqoM_TjLLe%o9%7WnMS zPVx_5Z(*uq2RtI$+(qyLfji&1HiT)B_tc4>Mi_{+qkTkg-)(z;9smJd1h&1z^q>;W zFK8`qUcBGE-v$JH4F+js_NPzRao=nP<8&jBdToID{JpEgFot|`d#aF3an@3?w?Fmn z(T?wfnM2JCFf&+5&2yRL9=uXfQ0XyyEBIN3aaI^t_?o$1;uczuOpN!MqqU@DO}v!h zwv)li;G;f5_7KiaV?P#f&jnQE%(U$#E{hSi<{hCU`nxS1tmexYH{ z?c4HJ#(ifxAG=pBZ1m$BE+lyv#|j$-0J;O4pX(wtKm60D<>WE+;PPmHs7r z_;}!rdLOnG0;2c|)>;L#-#y<+QYg8wCEQ_4};=>UE6-0k^et$8xjWGh(NI zvYqt(VxTy%_`yg2x#|4>teZy?03ehbz<7~`!ji6H-8~yzkTHdt1-(tSK{Sb89&`0T zo`Et%%SGDsKab+ymhce|6r&G<(4z_gpGIHyVokH4bAihQ)c~y2rW?nFe;84Jaev-8 zZ(}fhqyHn2(9d36&}i>lzfNXtKg$v;-fM9(P|5Kuv$70kl+dYtmifK{#j6nLDC2kG zdEVR|yW)=ZEEFd8HVe$Olex;ot@W@MPA>!iCbGa9F-SABIykr61jg-fqycZK0H8U2 zcpx>WyKu5#xU7V8jG>Kl5o=VfA1lcq!iw{~LmyUs#;YLP3%>YDd|CmwEW^; zdxo>rNwRMLIK$H-gKR?K4AI=al7Y(0>C+FF8#-U{po+Yd(^SEE&znt zq5m=S@ga+S98&f!oskco$!6e4PQ*h4o5d99S-B?&3j}YxBoRvqnL9)CKnmH@17Z3od? z+1g5;u9bt?Pf=iGhWy|QM7FzB;_F@0Zx`G53iBZNu|$UNt> zVCsw`m^wd@uw}F@JNtq6ih~!=w(RVChvQDT?H*QW%|2jn+ZB3W!+RdCJs@Nd-}A=) zD1^E9hVF#7W(K3KgS$$;cc~cKxx#=)yP9)r`dvR6Ja4d7u$%m7S$VQp4u(?eISOBo z$Q%d(2MOmJ_PSUDcgzy9WN)u^4gxHbWC`J37uZ8ei$7vd7mK)kMo}OA{2>sqP+Tj{ zQ?E4#ftCdT9#j3R5C3j~^TMCK^Ml*0FO`5Z*+i=toJhv8iAILfpY)-%UN&V!ntnx|T*rr^lOc8)Cn z*5Rhh!oI8VTerZ7W#bprWYl7^%4F^6MyEz|FrBUSwAa6rnXa-pthsT4v@JswwVYX# zJ!hF`Ru$9pV*RuO96r@KH#-ssqkU!gmKMG@ zZ>}#Ubck}jCb6Gkey&dpM1Hq(CZmxdb+XwrO_`5KPm+O1ync%oW2%ywCaDV})nq!^ zk8ZiQ@Ngc>WN$oPE(4D~1R)FPP&JZ_jO$p|u8(W2VSbdh+tjo?xN+5mVejjDWgj)F zQo5Emb_+?>dXz4iuja}yAZuI{B9PJRvw+rGzwLcj8v@eQ?cLzLC4yXZjTsc{reaq1 zw4AR3vhhByp^W+0zU!I!c9ZaxU$8*_ftpiKHp|I_c8shi0JI3jQ~)se4AXPpPO^aS zRz^QFw!`68viZm?H(*y_%w0!E>rNK9WE{n9Jq_)@e?MrrTSX zfuo8b7F?}1oowFS!;MIutr)CC90?WXe)E{ENYQGv2B=^LLF;T+Iyuay?(JCB`wec? zjiYNo0l=6JYcd!NXthUo?X;Y3yW}%10G8sl_)|agK0#IwCI4fBHVv$(1AkpN4;ISF zB0`E8Kx(PIK)|M^{?LoH-zn6)PUcu6kCs^9`1(J7vL5SO>x*;5*IXm^q%AuidgHV4 z#U=#e*E>7;zuw7L=NGqg&)PApwg><@%n>XCb|b2q5im%{&TKGX26T~5->eyf25oKd z5RMk|aiPPE$8UO%507*naRI?A!t)N*G;uBkYX4EV@XE*Jg8-jp@!o3{S z0^&djlQ7q6!d^nH1;Db-?j>N=KQ;eOPlKjq8GNMop~&Z+TVB238^LpmMTAYyi1JvC)SW=G*(acM+U2wxrh@gMjeW+%G)colnb;(foJkw<69$E1AbK>&E>;(m+^# z#Q#>>UI8)$f!1%;%Hv!94RCsZaQU|grcW6AFP>Gl-Tt?2tt|q;&ZndIx9+U`v|QPq z7^Gkhz9v;-5FX2zcGlRC>+U>p9$0p6Ocava6aB%gBYpGb>i6{d{`OYxr{(h2y1SAHU(e;^Y z`d5oKC%BQ>(Ud%lsn?u;12nBx=V!;!3|dmNRXCL3rF}k*C!eloWa}))n~3_x)V{_% zJKtrj0R#1 zx}xUHUaJiO=hJyj)(8y5Wn(~?RM4uMGTU}BSQ*7I={XAg7jV&C8DkeogvuubXx59( zkdY|Ne?7NJtU`bC#x&vyaXxf6tbVmec)q?3*BwKSwZL3v75wg+y@PV31wh}wFxTo+ z1_3nrx7t*wCRpQfCe}wQtjfZJ`Hi0Ny$`5u6##zr`Oh|geESc7vD(-F>w$rRoB4e8 zdpz&% zz?Q>M9wVG9gda~(P-IXd#Fx;Humt$KUV9Y+n$p+M2Z$ zUuLd5wf_0FwEl&a{QlcLYxk@DZ-qs`wIaQgc6zkh(hb}ClM-`rlNb+$yr&3t((<15 z?Nz9@RRHkTf0gVr9nhm2f~EK=f`EDt~ZX8{$5yn z$UK)|rsmE|OzQd20pSbx${>t{@RT@Gv!LI0zB%9a(@YT$1({x}4FMq}+*~}uF5B;* z-h9`->%rf;{#n-36)r zz0o<`w@6=?z{AJ6ErN$S)Y>ipbf}*ly{$;$Mlxm0Q09gY50r>LCRhxU@f$M}Aa9l6 ztulVxN-&?V<~9Opo9+}3+9Fi~7fJG*h49O}5Dk#@HxDTs1oDcs-@aVr`B(L281WwD)pLX{ww3fWQf4O>*EEA{}f_=CDg+8xSGC9tC^YB z=6HIoaR}%!St3A8p?QMT&rZls3fh+`JX8Wt4Ai{OT3=t+42|2#O$-ide8&z4vB$w2 zI|}|IcxiRPw{Zmw4^V1Y#&gi`&iH2vP#(_PtjwOZ`e0!zd4LO4w~iSwS*o9cyw@E? zup2_VSHm~6>f4l&o?s57pI67ya_k>H*-E}y-U@+Z%f12s?q7nrRe<<+C<}kn*O&ih zE-AGo=l>wo>j?lt#l;LF>gQLpgcoB4KzoERln51815t9-pdrGx$~;6onZM_47}|Vw zJG2c>vDL6r@Xe`Wm^$r81@XC0yXU!Xz=4Hp?1U}=Q-idg2%maQ0$jwRq5x3Bf9w{t zY6Y#%U%lrY_C4JX{Ibiw@3qDuz;X-A3G#Wsu<>6uccoUpan!`h?q)^08B%s}@O_-d z7i*b`T?8jl+Ys{D8Rg)goU2cTf#!BM?9tNsR6jpflBbydBW{Q9rFTb#mE=(+YLIn4 zMV5~#*H(HoEz`AD%@LykagShbk?E)pJcd~W_4F`3-rU^zXtT6A_p{G`UdJKTdZ#)j z0BE-8%afZ0f1Q1YU5Q~C{(ydHO=p8rn zsyCiaf3FEq6QLFWHE1KR707C8(!z?40m3i`juYE4!aQbm-~H}5n{#e|Yxem#J9L_! z1>Ay=d+t!bT~{)a0vz1nUu#8Jj#xZf!vb)a&=&pSMWw(1Ht!*RYB3bYvIz7Jb-Sx> zs{$pQq=zzFVPNcwwfhZz)e>M3uWQ)#+-!^W%u7ZCGjqQhzm`(X{$tG9D90c)Q>%_N z*)6hl-|D8JiygzEX2;XO^``qRvgsZXFr8~1696<@P!;GWmCDXFr0E|rb3bGlBb^&c zd+sc9e)@Ei0v}tL1q;b7a*)T=9Ip%pLh0*!`@%pVm8Pw`b7>jcXKMtqv_Rqg$?7&{ zF(iVu9fAeF+zN?yiahlxq}U5cwC_z1B8cdkNDJAx2wV{7p{=rTKY#8%-)3YGbhWV+ z1f-VPP*JU)`mqXd@3M2-HR>=}@l7|&al=hg=e8YHh)^8O)4uaC%_lxat$k|!mBAct zxDsn`Aa-c{7N}GBdrgbsrE0eYlTJ%KWScxG))f$w6`y&89f)>krjBmXM}>jf`R>+MetB4 z`xX-mvMgaH!@JDa{(O0Fx{_in?N6G9swmGoqgaIn1YiTle z5AXt_{S=GaUCK7MP4z*lgfS`Y&SKiKiJ+}C7g4q-TtKEFDZQ?3=aW_bHvQ@Lc~t7ZP0Hc>slOLp(gk?G{ZO{O9l5~*^`!>2T@(LmUc-KP0xLZVk8~A zj}qm%T}=y%t{Um`#nJu{ur5(qbp~ZgnrW<5r1tVmEiul+3K<;+4)@x@5a^fyaPZ^0 z|5MRb^Oa50(W3SIS;!-lGqO&I1VJ;QZ4#f;^~pjYd6g}IT?FnbW{`_(Ul)NU>}H$J z1R32r&vrrGpD&}qV(J>E>evvj0+4+SX*Z&kSrjM2*qR+wlh*^vfL-_y39_I%5Tn!bQpnK~xPMcjb8{oK8Ahjm`7iJRuP zf66w`GN>!#))Uc+sE}U!x3%uuP2b7XSCIf*pAQzdaex>0x zsR1t;zUF*4?RtBfOdgOxfog_B1?&RKm&_koAU0T2!pc%?SJ-3vYl{A8$z3z|_OvvT z%_xBQAIT*BW~b73Klx;4z0+Oq2>|;bEb~#5H%d?c^CIa$GJB<7n*>QSl$6M`0VoEz zXsQ?awH_GtDnjO4+_u3!=w9$x$LzgGYLbLWZP-nr^>y8ttfHwXAxF2)+>2A-+BJR7 zLP~SMAPuzSi#koq$V90S#6tl|3&2S=F-7fFA)p@#Kn=6k8izmzKVf(rLay^O5*7f{ z579P>38?VkzEyit8~3CW3JcG%+KIn1Ny!FXKQk?*O^l7qB?J5(qU@-oxXD#3!F>k- zCY|%tVy3_UB2^kK*rd7t(in;ulVm)PX}+&CI_DfnH{3-~E5K>u{~$ObRqhvm{qfK4 zzA~;3{?T~>05b-Ob$6MnzsEmwfhF-S4mL6Zy1&VQpHU64TK0O^Tj#U$)61+ZD&3GD80&a)E&QUuLG6}2xO+dKEIdt2cXrn{@a4!SI6e81k zIb;wg+R^TOxHMh?&Lf0?W9p{9WX;bPi61fJXBN`DE|Q6dhxEbEEq){JY7SpQF)(=5 zqGEX{t+;ubn(IxRb?r&V9GgmrRu0dPm&(EX*BGbQw9+z=2%Df`gz%?l-JAqfVr@0O zPeDM-o`IUW2lfNgz8Zs|l3-ugh#S&{npp)}>DzhuO~(ZQ(im0=1pM1lsq|k7lexhJ z{w08QyWp;G7OMEw{k162a}`sdTAWPXbQ1|v8aO2LPteG%B9jn!QN7Nv{(lJ_WtF^TEY1yYn5 z9)7#7{>DZ893u0@^D^!f>#~~~2_*u)htU~nf+dwT9hy1u_Dot0%;^7)4P003`wcPFDo$^$cl)fNro$3Ib~Eojy^mB%VlB06m7wI(vEE!YaApH^EIK6?8h7JN?(95mEI9CgYCNs_B^g{-{m3 zddT3llo`CM_OE-@H~`o3eulsQnRdf$D%~y~(vP~?Li=xW4*xz50pJB#xBAvL2a?i< z7QZ1Hn&dXz0o}Y^@a_U|F>8vpu!ZKlQ4q8iW0A-!sk*sP#n^yx&O1~Tgc!`>S2Vay ziFKJ272Zax$KYN7araI9j29(wiwCK37?88Do<_!+`ldFl*(`He97O=x|NKt;Eev7- z1E*C2i_|uj4g6~WI1+2!A!>`01pb-2d622F6aB7rPYIiXhy+dlWHuMIeskWx6K2kx zg`9!`u;yY5=a2&Y_IR31$GJ0(#4^9)m5fE4%+~E;Jux6~9{hC&(1{X)!4CMy#og+f z6E|IEGIV2~v|P9??<1o6ql7O(kkUZijw&Hu%m1cZ%pNcL@k{vijbWPL#q&58fQ$I~ zHTMr0JjpddXKPG()^D?>6ikd2^4x0tVbcRT@(-$;jPjEz835!md?=4R@-3UV!LbSe z$;UI#lJ{1BvR%nx)Tw@-+0`9zy@~!F>!qw=7Ce!pNWYr%TmaMfqb`sqyjLUG+o_D{ zK<|@VxCM5Ic6f8DABlM|d{n=CUJ28b8vuR$XeA_REe*{qrtDhJk6D<{*p*Di@+ocs z7nUaQKe#>=rnEPw2V#>G*O2uKy1B{x=P0P88Sg8$S^ebEdc2qD)@u^#_bv^xrK`bz zY7j;V*Vi2C-S?>9Yr34mEd$A8Y<i22zu0+at1Ftw83dzaGr z72Yq*{VW-2tSx+H08J97Jz7p z$VH-<20_e;q4Y=Aw)b8>z}*GqaZ1%8U3=p?&3@Tjv|HT7ew2+e=avQ|E+DLEeb7{A zAe;h%nMkfFk8U(YtLc+DiMi2(KpxZ(2~sxJbi}W|I&S`l#bpGhucoFIKsX#WH?Q8I zvwvwgcK+cVFaR~5G+@)YcD%L5*4dtQ@7{pI}}1+GuW(R#ny`AZq7xad0DL~S;ZPT#o#Kt$4$ z(GWC-^i`REvrQPr8s)`ttDtx`m*3qNEwMg7w`(qJ}lWv;vY-;{rvg4lxJf3OBLp-dvj@9x_2~v}oLb#e9*7vW^ zxEV6iSqRw2v0S=2$ut$r)*(>FHY{N6I5mn&XF>GJ-CZx5+pn;L<25Wg*0}(LC&oAx zps%38k7Aa#8s{fZbGY#=7F2`L?5@WxIFx4TuV<+zLQIhLM63;Pl1)A?gB16kW8#bIpDf7@b8e$bkWD4&I3sA-!kUAn^Y@4sjl?|fCeD1Rz8cFeSxwJY9>qg z?)xCb53d|WQR4Se(?b=*@p9%q~Cea0QP4r!&W*=oD){23k>GH6&cp01=D^?Zqo^Q+xlelr=g40w&%H_GRZr3dfJ4B9Hqal^r4mC0G3AxV1JY3le>%CxsGl3Td!{? zCIIB})g6#yiOE{RHa!f6vCO-(iBS5KB<3ayg038 zLmZc$*s}4ae|7#f3*IxM3GxuyzUF={Z`RzHg$_jo{gKT5DN;MO0x!}z8NcR!OZuwx z7_2KiwE`F{-Ac<~Xh5|Jj|aw!5L#Nf-wFI?vs%eL`0J1V(}NRu_KQbCHczG@WIh-i zNPY_AyM^NVC*aGilj%ugPmMLpixIM#smC_*$r}@eKxpsKtSMclV01D62^K6M4d-Eg zXtG!o^xM0e)f}cRec_8Z##=FK*=P5{c3~uI)#PKMFZ8i#4KPs9O*P({N`0-C)r>t3 z=nwyotvN31{F^TI3~gj?Io1C4pJt1^R%a{;`UbTSEEy`ZXm%cI-Mii284GY7v$p2Y zCs;b}&E~@V>T?#3m~ zG|w$IzCjzP(KN~}!dzjutnYj>w@Mxq>rPwuU*Z+4EBvy@%2L4+xsm zZ2lZKg>ec;HTNsvrooQn@nh|f@oV++kiJOgvhlYn00muR912%YnS-xqH$rhxIqS9N zAaG&=z|N{1gs&i<;ZtquaNd>-zCsEyB4@J2~@A-=gYYD*<(C zHA8ZLj&$>p^xl_7`eILkuVzK;GUjllThayN=}nHA4qJGL4GklpZr(i(AUv_nFbmP3 zCd&$^5;_t)rcKA`aa>J;vw$B@3I6#if)6eWwWV+i2=F>>3f2EDYK6NGxZ5SGWp!h- zS6U|&0A@z22_YC^u~5(nqT7v~KIT3-cLvMKM8?USn4#>exanFs>83X8!Waj~+-Kqf zII8wvR{#k72B(RlUqJwvCGKOC3YBWWGRjUbHkXiV$sjq`m)ibk6!rOx{WXl1U&Neg zEI?S{c!}#<1a_GFQ6w&e&%ZDE?zIw?QTs!f8-vzJd$ibPOEfz-`#6PVHGfDkN&|HA z`9{PmZ0-s>&ED@kUw>i(Kx71bo-9!u2p2$W{Ovd=F5^Y|0M}SJ?hvK~t;r0W=I`d$ za<%~U)!QnV>(i3L=EPzuLrYI`VWS#p76y2h5W_f$D*hJ zpn&Z;BLAUfb(Y(KJrNv1_jsd#7I7|o5GH=Ca+>>7p*$6IOn;Qj`MaYw`-?7vn@}2z zaKi@~T|DZ&JNtXw-bvpxC@N-U1m*!8BP=NYt9?Q~w~P zJ|PCJv}{pGOw?kHG`>bP!4i>UOSlDGNczO*Fxz>E?5i-1TK>nw`XO;pa7orULj?Zm zQJO8FiEeV&Om}sUnNB9PE1w*8%a%U3ACALfBM+&w{)7l*g%Lli*JSilrGe0(r^(Zv zAV)kkE7tqv$$l>!HG?|O2D#nDbunDrp@7py1Y4bK?@s>dYQ(oE2cl?_g{;i(P$w7&t@;mNq*^0+t(K{MKAwF;{l9Wc&7bH8iJ zGUPIQE1EvSy(S%bj+M>4{tyysb-31dOiJdikw%!YAxzwT)5G`E(34@TqzVTulE&A1 zAc*bv+^(fi%gQ!d#3`|&ErTTgnIU|QSa`Qw>0f?rfXDnnPXK5V;4)oz=aYHPA^<#J zwd5}w&ymV z9;S3@Y&*F)z8#V~`2sN5G*HLI-HE3CVVeMeb*SOj3!BN;b8Nb#l|xmkPSZ*;Ln!q) zk(ieuGSe1`!K8W`EC2>z+H_sqnI1%A+6gnJ!H?&?iED0iZPA~LFi7K4x@}GepqRSp z)=AZQfS_#M3ANC57GpixRcqm0eD2Q>;%`j!C+}Vwhzy+^7PnU1i2Ly+699Zpx)pAf z;yC5lS3{xS6l3X^76JPjE3N)=+y9vOTP#+qcl^@`YnuCwkCtA2z7kR7=D~zS2@11citn*|pG*xG) zPzfTem4h`ME$d_YS8K1rPZOlXGY;NMoU(Boc2~Qg$x&B{<#i1hQ-JB#607^ik-^US z)zmEvgst%b66J{0-v6UB{I~EJfC1`4GXutX)uj`4GekHiyo;LMQ7AEQT`kubnmj=^ z^X3KCS$(cMaUE(A)kW3D2Qdm05)e4u$}n9o;*Ezn{A$NmE;V*YrC~W@h%oo_oxy%u zA)0NDERNlN%J_vrnEPS)3T#>_rBkUpE_KhfTXo3Bsghugf)%v?GTh(K`F{!;1@No6 zl@GrBzZSctvnMV9%q68~m#Vp+RCkhb#NhYgGJgSZE_Vxn4(o>t>HZ>RCo8M$T2-J> zNE-2~)N91>QCqg?%oMnS1wa$m4ArvULu;^T(3b=WYUaB~?(QRUjFtK~a3phTx>-QT zqBLtPC{3=9=F^hGM%KSWxz3^gzrA;fkt;jzJkO0+W@ILrOg`&DvPx3vQI>6#yX_en z?U@l8wP4+#B&7^5%`ljeF1epL4$V`Hm*{=TGH* zdR=HTCcVS9kku5V9A)Vdfq$&wg_v>dRy-d1!;@k9pF5M&4vfbGpuyr0Jj5_UF(8F9-<>1hQAaTVMX_byv-0r>3$puF1xo z0&`f2Q0xXp#te>>^3~CaEyaqO`D4wm7iBO~U6%EeHYh9~Lr9 z-lU)9{egvfCN9jX90J(3H1H}0mj-N zK<9LHCv29%$3#XW$Lxky024-w?&m~^&D%soN9)|9t%fls#%4H7$ywFIujN&}$l%qf zX^ggb)b&R{^^y40zmRSCh-JQGXTCt586H8QO z3DF33Y@5t@2cIMtYRGs*2q-EBh=5(m7yvn&YJ*Vobe_=c12{JFT^6>!A^tR zJNDTvD?RG9#jTdcg4nTtfmyyVKjqHpfC#;yG_l7-gdkEdleCrCl#I{&%i6i|RbhQs zuAB#Qhcpz3!MO5!JEW~)9o84-c}xSD@$~u&Q{Ih+rHfS(Y3NSI9F_~1qINe`8%Px% zB?lrw5X*)4D_9MFac9e~1~{wj!rA^7S$Z$`8n?r=kLC+=zplB?a=t|+lzOD+!3=rN z8NmRdx|ag>(;4C#v{FTZs$Y6>+Q(&_m1_n84T0dKf*+x8xIa(=23?p7STk;@&A(iX z_6qTvowXj^TT2IB>-he`ruy#vu-SYs=Q5A0+T=9;CJAQ~0D9~N1|5ERHmo7l=dz^m zx-?^wybNqWA^_;QOwM>i5v_t#W6>#sj(5LGd1 zh#gu6%=WaR?$69erZ+N~!MdRV{D!*iTXYe0iFN+?C&`fx(ZqdnP~jP@4PQ`$$r6Xj z+^?xW=!XgZ9wET{RcFwwyep>ophPmC z{tXoR25Td8?2MKwqV1nw!0eCZFA@SeY2!f{bn-o#KL;5Nacw+M^gmVrFw@70FTzPw z*xf8E^8d#P3jT|MOOyc6$kt1RAa_}s&u#VXIe~Xp6GE&k*#&bp=|0ieNaqVPWC}DEZ#;bw0m9|6B#ndM8%&)f z?xCG*=LtbXD-b>O%!|6U7w$psaflefN}9vbefOyoxA3NN5hIp~Ey(XrCN-ky$EDDB z6x#QKEGqcGG3|1I?>_KEKTOGAKN|FhKE-tWf|$#3E%(E({Dk@Yi&D~{RyFBAg5VI( zA8x=T)kZo^$6ic|uQd&PPJXmmcC_=WL6}FvfO(2s3}Z*@=S&U|0%j2amX$YStz`=5 zUcD3#wf;aq(Cmwns1ZPqi{Y~aR=Ar`F$u<-KQfad3E1eqqwdlFgklY?={m$ij|pKVMU9Ww#SIlDYrhnpPX=H!y!zi@*${;M|?H>+C&Z z3)=pfxheP3v4S`Ee?~4$zxA=&bxL#2&p`>Lv7j2ayR#=8?kbc8_6WfpNdlu4MNUH3 z`f2evTn*Fx>qH3v7e4t3>mx6J>$7jp7iV&Rq=AN&J@JLXSklC=YKvdYR7_@4?hd2w zXxZ>t818&a*f`pV*2hCcSO-inA0^5-);*y97y3hcNW1y4rlm}J=DO7IlJW#>No##c zOCcfOGx^9|Cjm~^EB=MbwTi0aEdXdc2oglVlXqah!mqu*QEg+L@q(7k5h*U(G&UtB ztoT@8lvaGCAm6Ucx(Igx7>!Fy8K)d-S5b{VeKfCSaAa|eVmZ>7|)HI*+-;tQ24Ka{PINNt7XTxv=0f^Jf=-Q=nM5^{9)!l zA@>^V`OhDhP_KDLedwoAN@d;xAg!OkMgpyIw;~i+=??;l5&)hwNm-IILGFeI_=jR| zFKf8Ju2I<9Mf=HB$$6pi$dJS&&OjwYAj{^+4(ONg4Y{W9T<;&(8LfiXnDExCkz2q| zBvBrS3>g8LWW5yclhjR+&e2Y0yzrxFXjvfg(in~^t<%%xL4lsd&V~(1@!H=&*LI@0 z5b1wXTB1)hqL69UV0%HPO|a;t?n(TOC%6)BSY@#w>(S{W1+M|FDyS28NotG6q5gaG zG8!ce|Fl0xaU5yy%6PsN6u80ecc(KMc|=Kb1VrwuEZc5YzFeWQr#Xe zlCL1rGH^ru=3&`uF$fQ6G6xmb8~>}9W;~u}MWp=haC%PKnZ|DI#tcpjK$HApq0AK7%Qxxy18rMtM0tr=&(1W$h#; zh<@|?ZtT-I=D-al^TOIgdT4}k{P6c=0V2T{+j&PjMn(z-CQ)Y1!87OChXOGNZaHEv zUY?h^Uvuks<+>w?@u)O^glw!AaUklmcNp$Ftf~Y`!u0I{ToAqW;0xr(H7e1$nZFRQk5YZ3`bfcGU3 z;X(#xEiXh70fKuRR3MD3HX!p@UiXIak0bL32igVkH<{5>O(1LlH> z9A?8A#u<;(B-g)_y3r0?8)(zysUWdddkoB8_h23{C+KrxiLlOr%euD6UJ=9d$$VN& zf3JIV`Z;|-J#kqu|7SI}h*OHwFS9*)BIZ_$Hb5Uxs9}Aq1Q;^_= zfbxfF3mREQaR z?N~Q%|6Tj*ZtI)W>j|JdAt6mUkL1cBaB{|*l<%%q-5njB#7-ooZMcQbElg>g6}{R2 zGg?oM0E$aYdhZhp5%u+JzQy&@+NaE|X$h)ll-Nh!8+TON^{O;^ves36#Ow3D61SBz znK3_Bj(dvFF`@~>=kYcEi!S~UEAyfv`+>6W6O8Aw>RKIKLe)t71d2kspMPK=+xM7Hfk@QPh9%=ILWCB_F3R7E$*e-Pyx@dm|SU>W*v1btZ@ zu5`cRS>jzouRxa%OuPUfq4>^p!M&|mvoiwsD{9{#>qzd3fK~@!&b!SX_ZrA9Bz>(S z;eZO2)@!C1{nw3~3Q5!J5)mj2UFBO5n#Ri3# z*d25qpZB}0c*;`f{@+hARide=)>M2FiU%q(f!>iN1~06@dw0x2z;_CDw~TEAZR+W#;D*N8uW*|Yivm&-X>&d8N<_Jnrs z(|g=5EV;yr7Ux_@8pMnhmQaJ;eI*zKG!A-#12`vk%X}dm_)?|H(u$|p7-1jlb zMHnHiF7)<@OqV1uVgk;{e#x?%6-~Znnc*rT(`2OU#W)Ko=l-$bp7#e8u18BOOMoT` z$X2cF1FR1E-8g=a@Ku%|hIWWAG)?Az`Xe&uM$6c+K;6`VHHdM*IDBS2sN=oL^vs9c zC0&K;!_0^U()8ca$icJ)D;uVMHYPful{deu9R9>zoY5A`EV~5zf}{krey&ZT_K)zi ztv}ncp0XU0)e)rq9fknar(QwxztU&~?>4gGig?#jpYA)b+az89(00K{Rd84sJ_u!i0H#G!l%DQE-~e6`bx7Gg)kd5KxxN2|$5 zL^DE5Q?3E!D)EGkSOVj`a^%1z9<%6^CwtVrv0O2ei$Q`!P0pivSZB{3rJel95b33*rf4WgI^ zveZ?ceEk zqWeiY;oiq4!=03s3paokTDFeSzB{SpDxtxTaBG-(*~s^cB$cJIrs z^+2l1<)vmFr8O1B1b_i`69boJc6CwjhnsnKd!?Z$=+H-ZCshD|K#Q&@OPkCrdaZnn z$`D3eK&a?sHm6}j66NyO5pP%PO}2f;Fe09<$xo#cuLATLfcD$%;3PZVwqKdZ*VLD} z5W9d9S;+xaYEXTJ37=Im#8iAnKK{?1Acwz{3oY&C%=ZLbGIbCZl3rq*a6jIY+mwyM zNUe_PAGc%teNx*Yh;P?x!P{v58<~9h(v3?d01TwFqUcKY3()8eXEid!u98{eK@0D~ zcguQs%}wvijiuV8PEMk>DV!RiDlMQ16lC*}a*eHc^_jHNni|4qw5J!hfPcsye`5R+ zKl}HnzkU2H0#Ew$*^lkr0SLr2f*HwtJbeXM9Bq*9Fbp2tLvTs3;KAM9-5r7jcNipC zaCdiiCxg3df;$9vhgrTod+#skK3&~iZ#`BwBh6;_lHo@9;sn11sXoN=Md0$|SlH>v z$f5go{t5<+zfH>x%Fzv?>T-SYcD7;4XzV^aPWy-gh+lB&do`61aJ_`GePrL5{-xvB)??#4ZMsi=E_R(m5EgO4bTRsU~~Rt;)@$Fy3d$w}%;jiEaesVuq(0vw`?Vpvo>b~GDswD5 z>iAo-C+uJ&fr#n_?j~+Q!s>ZUCeX#ZDgP5-M&pl0E<9lXP7NjfxOj<29zCaO#&n!7`|OuDvHW<_ON+ z6$_yw2YJP^{X?W&6zFJGU9T@{NL|oav)QJ)I&17DvnhN9I$6uBKn0|lPLFFKHQ&m- zy7xbGX*|7aGd0tsnj;$*+*kAc+BD?gB zP?ZGD&2~r`I;&9Z__V8Sog=zLGw6yv5U@R^ifa)%z zyE>V}96KG>^~bGB zK{-|6p4N8!$TuI0ufc&Os{Dkv{H1u)L?!@Q5EW(^)t;e7bf)D~h74I_W|jA~J~Csx za^Fb}c`OLTzBD`+u;DL!DN1!r!Gz$g~4Co@b76)T1-zM zWz>J>so^F`rUoy-P2V*p@o%&p3&?!4TOa1pM+>Sm*3aiP_Tlrav3xc!+0K1{BoWW2 zJg^a%L14Fex;O)D;>?-KVO_U;Z`vrCot=%?NwwRc(f5gVgTPzHrxUum5gtXha!bC`vMec_Gx=T6^9gpsXZj_y4m>^Vyv-PU|J}PX?iu$-Rm4Ef zrJETZz?kIxP?oq9<@_JYomi{(_b2&44EVzogkJ3D(2SKn9dAAvi2WyJuP?wwC0 zV+h~YQ;iV_xi2hU_f})S1qv0UJt_(pd+O6oj%^nUD)DaoDX0p6B7iJ8B-4#z#;Q~K zYFG!(cvH+-=F{W#nS$sXYp7qcCM;I_nWjKpeW+KVSScA6kF)dKY1_*+Z?=C zJSBX#>~n4_db8HOV~+cwYwcK;3;sd>bXS6Z{j1{Gd0&EAZdQK|%T9&T$J0u~P|c+N z>IN~BmX}9bD{(q_kt*D;_Lw@IpZAW5C^i6i+%Ipesjb-iJ>S+q;!y%sbi}?FLWko0 z$XyJuF@C19pXRV2)#NO(1|~FkwvfpVA1Ek1emd}uTsjcYXwCB(U<3y8a1B!LJ7;~1 zJNf>gqnV^joWH`zrv)(3{_^m;d$6}mPM(-_h6%n@4*SLJRB=+Jj|dunZJegx0QbEq zXAp*J!Tfo!^b}1KQ?-OaY}8l&@wirj??8eVP4yz`ZjJc=vH%3$>wqP6QiP_}MYr3v zxcnH;&i-wgoa~oKzoWxuPJ6?3(pbLP1}Kmw!!Zq2MuRsJR}oz~a|q zKtL-KZ|nS5Zj1m8+aP$a=?K>_v7iHgf;kw$k{eeazqipF9}%K37_t@8bivU;#%Cfs zXjBwHY0Op~x!U9B-nRe^pwytBex(@vtr@;ay+-3bN3n3{-&-#Mo%TJ#{JLJVdxLtL zp~Idk5s?3hfwWuHP>0)1s21cz{bg^%49DtIa%6-FN%)I@q1xee=htmz$q#C!^LA-A zc^ZJy^XhIJ3BA1E{C>6|fcx!y{PWemu8bI4lJl0PnZt9Q63+@}A-qdLME_s01&2NNx_K;K9!N z*!@Mx!JFnfH-Js9KY+c*J&DRpG!T3@cc}~CqW_%^>5zC{{hsJ{yd%oCRizxEZ1wD(3_Pd; zK&&?w#kQ3ftt=G&c2IDPCSnr2@f@BjnEJM~2agCmO<)|YXc5R?j|5PjWU_cPk{a?i zQGN`H>!#H(I<%E%PyRIRsPTq?@YmN_gf ze3FF`64}rC7wFyb_lG6>;@jtmCe3yU3y`1@?|4U11l3+7FH3?W6Y+#~hQ3`*MPD!& z(`A@|4=WD~N`HWZxy~Z=TTtG}_=l*?-qDo=r!*)Q3j!lT`Bh%-4W0Ao>`rFD;c+b9 zl72+khye~fI3#JmWU%7Vv0O1uy>LdL0i$ zWjR||L=(H*E@E6EgE0PA%0b8L%$gr_`3MoPn*lO=$rSxG+m=%Pj)WG}Yh6IjK>lJQw=@Cc}K?=AYI`u&S5U9b@O8p^53hstfGV|f^F8cC(+qnhOa-1UxkTvk(gPuP404D<7$bIVJoYX}kH?FBJ z<%5wCGL9%0(s2Ph`tS7$>#(IvV44cq@@AEMZP;I{h%kK$`OS;xpEJ+K5)hNbywg5z z!8Gq|FWgaYeuWV|B25xM|2wVG3$t#*c<%zS`vK*Nt%|Ih)!FldxtE|yzo-rC?S^Xm zM(DBYsmJmKAykDmLS~fuJ|O*v=({_K#OWbh@r-y-=YM4td;iHQysI$)r~y0 ztQ->Cz<}WmTfIgy91}lD9W=()efeRdiWJz4OQ5B0E2RDg=`2`p$3VH2#3W}wT>2dwpBCe>tFkHLQ>zse!&mL~?~H9tVNU_Lz%CrSx?`1aN33f+|G zaRY7Z4+`KkN`B0Sq59Zw!gkI9#>>0u-~O)vUV(ywFXTeSALv86jWLaI_8O3*l#OHD zu9O?uyavfL)NmWot2|-&0Ta0zBXEzr6lP9c90%fu(Gr8LzSD#=n&U}a=)7?a zHlu|s$F< zpCT#(;K8`hDM2|ZDIE$?3qT_kC9b1TA9xtKg;W>NFosOU8>&5bSG6&#V(E9yVbn1fc_q~iAWBBlajPu?zry=7@(h(_W3Y7*TcUeiA$i$(8$&Jl7t z$O5s~y$h;HZt(GEcTU>{&m;RoFo}~;gx0BAsvGTk78`DVhB+K58vOUAV!>?b51X&4 z89V|S>w9(e&^*VeUNXUu75dqyy?aQFK9nSdew`S+Y5o5%tm2|Sk8hX5(L`^)I4@Hn z`^%eg>ml0pR?NSGbxdOycJn1ug2uHm_XUev8mL{9Ebt3{*PH3Mr$KYMLjj{37>l{z zZ}4OPl?x|OjB__%Zw{+lUi=Lk1$;a!bA_482P5=1>pv1TY%7%2I~%cYpHvDbN5)Ij z*uvDZYHuQ)!iFu~ikq_pRfKHS>c{&zV-16BdVuZnpw-_jf+m-pNfv8JA!PTpJ^##) zGF{&fvT}XWFd_^l@1gq{aX4F0l@63H=okD)NM|e#Q!S`UPx32ro$PvJ_r_p;HXIv} zNciKr!@*}KsCMzex^H31T>qjaaNBm4mxT1p*ADkZi`Ryj-eT>qr!{8vX0(E3lZa5& zAL*`yb+U?$ZcHJB3Tr#VBISG8dbcC9FRvy0R|5Q}uxa-0(d2;$(o)pFnu5bhE(5;J z!>_xRc>n^bWnW3`7T1zGR#X%|>Od89uO{P?=l4OftZF`j$5$;udTyt^OmBW{WgNt6 zT{H>+jM)F<^ln#b{Pp$NNe~*)-CFsZ%j)It0M5?Y)QFf0|>DOOM-CDBJ*1;2-lTg=F!BrV) z-)DmfX&gW{SnIzNr|@OHTvm%X%GCFkZs(6(%J%c0HK`B8<$}`8imHlr>7;n)SgR>j z%Wo8J8MznMTWG8j`dNF!xUSb_{4u~dJHHsm#KyR3CP++Qd(h=S0D#nA0p}@;$k@oH zsDo4ITsIpB&IcX+3K(=L7l8}nsv27E8p>Wlew&-FD#)&XRW3*^{LvW1en{~xFFu( z7tg!e!DyfpLBd6@0#IzelS6B!XJA@ZjYehP7&!#(+xS4f9c$&4*S90#0mc!yTJ5#9iNw}*mCXED zs8B`ux;;)W?)|l@Yup7=jZ_DrO>FkcUy08a$DVkXaMEn#1(Fg02rJk-3~b#(ur! zz7F9L2$=dPb6I;lihi!yno;zrPDmob2&+4{s0?!Vxw#nr4W(*Et5ZZGg4)tPkp_UL z5)6Vnwtx36d~b3)?i~`~)|;*1n|_jS1TZU^A$T~mH^wizIV=Iq*G7*^y;1Fjdje8j zs^q=_55p0ET3t9f2~bZmP7+sr>Uqg=6juhIF#w#Azlev~esLF4h3q-44Wgh6jQna; zs)S#T@1+7G5t}f3B~vzRO^L!$ZRnIW`arRfrA9OCfI`T;72?V8xBFXU0_V=Nvpv3S>W?^v<1VcU=>d0)xWSUQZhuS5Xb2#T2oaKoYx!q|C~rpg7DA zm4)c<DQ#frJMHRR+0? zAmiTWR%Ql_Pw!GPuF=+TMqnlUCK=-oGneQ$leYtsb%ZEXj^Aln2+;top`mWl#U9vU z)SkQDJ-X25=cJghy0`}x;oneZqclV58{d3b=yJzj5FoE^zEX1%zHN+~A~$F=5~o-Y z7s;oHxU}%M!MauJ##eP8d+=NX>%aYX+ipfsy>6NmNQ*?qq8ar(!xs;j`rOzEeKZ*) zUI^d-A407i-%PLI%y2JS7$&N_gf{81*eOk7u+>db2zy4mF#*r%gLj&Y%4&lcdUw-hlGAodtPsza?to%R0uUUzCm03J=l&x$CViJ1UR=!QrF_=}kGf z$Um?D+CGFnap3|1U%}~xmvG|FkT;PY;tcZ!GHW+0C+UyY8Yy|?;558_NOjQC=}#8n zuU>ZmE{YYwWkDj~7Lbd^A28@Uf-sdR1(?=xK*D{=zSjr|0^ntS49Mic07$_3G5~D7 zVRFwj63HScs}+4n(CF3##dq%9y6`&XFE%7-4Dlv7VT72|0n;C4@oNmtgw)=-F?qGK zf5T^fm8iG)7(co0-7>!n(?iFl5&$8~_|l%K=s-0ny@D+0$#XUmEcNCe-D*TQ{2d7p zSing>VEEwN7y%G;P?d$D{C0EGGNt$3;VBxxPeAGm@eJjR)RR{JVXdt~T@jPZITcfl z=D>9_rnUC5wP~^WqR-FEB*zjcWYSUG5K%XR(!l(gzB~VJYtpRVUc-1GL{eqgP?UdwOv1AVaWDgqx@@Wg5bzEejU%$*X*zn z-uEio?X)MvmvmNXyiQ^W(U|?xkh|-&XToeq0VsvR0emaa3F#3*9Pg`+wUGcJF|v7$ z@`KXYfaS}~;H8;*Qxh4SN63r|I%kCdJeeDc91wnISppm2c4Mow^4+X{K>TORYNCB1 zRmTKd#~(5Gm+vkPrv41U*+@J|Hiuy^KQYd}(VtJ#NoQ?)xn3##k@se6oz1m1Xy z?`xwOoHMkB4T}v4!+4v(U;w#lu^RF1g)u(bK)BDH#_q;WZ%GgzSgHlbrJ47AXHl*R zmg~z_F>GWWJIE&>J*~@jM%2|Td6jQz2p{Wk2bvi~Rrx1%{!=v~%;uPvLJ9W4AWAv6)J8B?Li_(uA{v>r+oI~oWXa)ySS_(W+R=7mi zqnNZAn%7q`W`rH(F7A?mFC_ED%D$v*7Re#&OJ{x+$g{nMD+WefgH!F} zP>2>oM@LaQ2*xS8pVBTk>G*J_r}*2mr>w|0Zd$d>^j);yH(`9#UmS(-DsZqGoNJ4> z@4SFG_k8s)qhgWw3fR*M52a&);!1H15z}5Na$a{FyIWZ~e#HPB;4Nk*FrAtR z$Ny5M0ly2v$5e8csNa{}NQnlxiT35~O}kime>~zJ=Shd#^la=y(PXl_xXkH#+c)); zbg<6<+FI)=!{&DMIF@jUfKX$eiwqe_l{wt`sgIN$4m1 zhMifD#XVgAKEOzCk^vIHA!uTKwXuW<(H2nR10JEKe`EQnqCqos02L5^BT20HLeV8Q z-0#Za^UAAI+$Ph-a=G3ULS{zkd@FB751*k+^ngOn6e4@|bSNF~o3&og%N!LT(iC#j zAWs8039_)6I1C*{y@)`xq#bEiBmn#z37>z{dfO;puhm!@1(D_2hZtjLQjbC05$t62 zWB&M?*@gTqcs0dL$J*H!`m(#}{9fZqL`BU-_BDp5coy{;c}xl^XnB@l7j?G-a-nQx zOZGPyqg5}p(*Vp`izQMl00xk3UDO>elDt{k4y%xGj?Y`n z1G{UzCBbF`x_xE!LAy%az*N{H{3Fzm@7o?F zRB?O{_cq7Z1ZLA75w0I*{CjI=HdAW^o)V^{ljWoJH2!-qRX5cK?6Eint~zCwMmp^Q zQ<|Rg3|uBe?aESP^Qim%eqx^7VsYP3ZShiUetV0>h6dv@V4Nfyo^GfV_^VbMrAbu*aT|coEmetIB6$AXkw0JFk18;*dl5%m^ivIY; zvLun(Dcue{aB`c?57^!gg1Jq0=|+rpZco15xWvsqKSUS)S?7wB*C*qSXyggwHkpO< z6ie_SIiD)~^FMtrbgfN@0CY;%6uUz6J^DQvS~C z(ly%qoF%ysN}kShe{^_|GX>?Q@*bjjyq{M?!oa-y$362h!DJ)65^3+d1@gNMW<@MJX{~&nf%PPAi7u*Pkl%UqT;L{ID!`Y2nzMas zQK@;R&br;-)D`cXp1=i37riX3QxJK%xqc}Q?c2s>_$hy{v1a~NcCd=esyPW3lNn}H zC@iyM^DFzRddqOp|3cQdb(l8temy9W++6eH7YCn7wsx1+zW*TxViu4={S(Wg?oZ)D z6pcui{hz;rPnRw}w#B-1>kWBY_SA#jw5D5o7uc`zEd;D-nX+rHhATW$k*BHJu}C@# zWYL{h`2EMOF-ROg$H^=U`cGA0-@;Yo;%~e@oZHisYarD`A_0)jbNe)y7C|lIwM&f| zx-{5NO)GOMt50^Ii++)Yi|~=F+9buzbMju6;>{ruo=Jv0B5`3;N>p|~om9cs$QY7) z^iB*z!CU&$KsKEg&H4BKD*#x1!uXIoX%b(BnOK3=C5c8i_!&0eflMN$h7jo7T)6~$dNxp? zJns(4FvO=Wn>VXZ^lDW_IlXU?a1>01UM1fd;+T(*)@9F;k2Tz<^&y7pU(8ioCE&?5 z%Mx*%E^7GZvrxdu*$k21X{F|jSmO%yFn)0|JbUHW2QZglFMl@rP0(t0yQTK+voV;A z`H5{5;YW+wWkBFNe0}KY>A=WUJpl@1Baje2Y-_?|d5ghsS^FF_nqB61Hs5%ZrT*G* z7S%8y1tygE$>Rerf?bweLe=wQ-xmw{RR^zF7i&L^{PD0Sx#O{Q8;$|xMot7BwHl2E z;1X^Zi%K+`y;z8q#O_#pd)*HYD`F6Pl*h)fp_8&GmgrtojSCZY$6P7E#uz4TTfIB8 zqq6Nf%~6)6D-#)bmo~v(`^&I)HnxMJ+Cn473RKSRa%wqyjP=d|! z)ZgbqpulLc3t7h6mSb4@^}wA&8x{wepJTM93_ap*?4q#4T*A_b9oFKD45`2?hh0Q) zgD}u;Ue=}J^$tlRiF`P;*5>u`x283P4BZ>Fx?XW}1;i>go!Ibv>f|XS8k4Boj}=Jg z7q6`aJ0r`8n>JTQ8#w1r(3U#nms!dTjZl+KTdsr1!=#X0o$ zO^dwR{sh-YYIDEBm=zR@H|?Y1AY8yaO5SGfvUu_b6F?({&=}04cvTV>*Kfg?P~*yQ zq&_!yKAzvUWR}@*L4l7u6?an_)WbeuV`pZtfV^W^d~!B;O)-9m}+kkuz9(y<9Yk) zr@qr6GL{^p_rWqs)FQZzWb1UY$^yrngdrAceyN~I0IvDawY5zv@XP>J>!rNQUZ zuD3vPd!j_p^sMSDu`t#Y`0Ow#FH4qyB4+AS;}PmsNps8LQ6C>zKoNqyac;C%uk2CW2-!02{C{W)Lx5wl|(i z4i&{C@uM!o*zIQrP27gOn3+Q=EU#>Vi&}TX9@Wy$du-{^WEh9MljRXmK6};lso%b^ z-vPcMvIv?A(!L{ZkQ>e^Infy{di9D0U3`JJ@Y9DS(a!9`at*;(#Wk6Pnw$@uF;LAjgZcHb zd_A5VUW2_pPbOFR6$hyVsW6a6G9c=Zl!lT?9YH{`A?O4Njy)(zsBmj=SX>bxvB9K& zb7=?0KmTg*hHKOeZ%n~pUuSI*{>FkS`^sIV+mB23Sag1SsOrq~8+F2)4&d_A5dGC` zxN_7kn5?bP@y4j?h>_mWM_Q7z?w3l`J3SM6C@`Z&Xo&lv)Pq?$PdhHdZ>~#p-hN0? zT{$fLb7A_U)T@<;+#N=5)@u5>)v(oiuUglLVHTPOi@_$6s2J?j2prh@;{-cB&F)XI zpW2f9C!pSLjku^N3JpE!BH*Z77~iS;5J`&aK#*6+WVGaL%}F#E|K({HwL2eN6vpGx zQ0Gqr3_lX{oqw|?M*NKWbvupcN!#OVAX*x9`o;6xY4HWsg~aq_>$R{y>6B@Rp^jbT z%HJtuG9`*XtZsyouD7~8$*+$yzst7DaJ4G|wX^g|B`}0Pl5q{$P5YuJF5jm-^{!) zbQnlj2RvH`glDs_LKiu%t97RR1 zSR1*hZx!@7n$FDCc9Gf>{;M9$;(}snjWc zloRFpz=^yxOt6Q4)tMi4;KX8O&KG4A%!_SQMFQBVEf$8y=k0&A?s8K(IT~(!CtLhD zQP$^E_}=Oa;MDeb62ABv566P5N&s2FE+rD6k}e>W>_!P;6`p~b&vq;O`|lV92>lYm z4*mr0R#{<`9e#he>UgMX;p*zkmxBMIcTyW4ra1HF-VdFXkIf}eWz>gX`Qn$_YX0p;~>xp9L) z#9QSk-!+-p`JU}zoDs9>{sM0$SjJIrR<`^%zh;BgpWB|q_e%)4uUl`spUBS0WMCt$ zZW@)Q2&IW6LF@+#j(%RDTb=m^mot~x)>hu8yl-1)`1M|5%wC*iA^>__cH7%90fBeb z^8G*6(~*zxMDPAbgkLX$RQWW3Pgp{J234&#kF@u`|4csz%3a7$Tsj=>v-4|=aja50HPZh>{~Aya++0Tanz`ZYI7qDdsz8T(R)GS?1j0CQ z{3Eg==uJT%K)G`^Ne56atJN^~6(XY@VGIDMsh4lI6O7c5+I9@67a=EiUp&r5UGzGwy|1^^%22;Wi297e}l5iTd@dMXJuebG|r@+Aq828mzoKHvM!~e z_l7s3o~MrDWiW4es!6rk)J~By#?y>@z!za0O>do+^~PQ;Mb=cP@9fL^83ZtPFt5j8 zhRG#;eI4nQIBklG5d+1@SR zl)hDI5gl1w*nUUOw=QrKjmr}xE0uUDkYP;!T^rb_l+y}moS>aWIc{jJjR12THlorf zKL6c4tYbjdRngYOIR|r4p#dZPhtk|8k6b=wGX6cl19OY_~-Z%Ak3=AyP$T9A3!@7A*00 zH}8?(M+T1V+!MdJw5Bwcau{PV6Uf~>w9R~DaoYFDbB<-vXK+jGg?IPJ*p% z=gar+L;i5#LklsO>!0#}pa;K|*P7cpF}uBXp*wDDy{I*PSWRb7j1L*ll2DT6%OXbm z8tNVK-qrh<79bW^g0b4J@^ZV4FM}nG1uL0hhre_O*vDwfq z^DqwN0!DKcciRjynoKx6Q0wegpv;vk_OQKecv~-*yhIV&>ZSN)cEO3EQ}JJrW@lad zJs9$Tkwrx8#B0-uc9V+CffIoE>bX7QCR0d!$T_#-yp<4@eB0vK!o(p^mM0a9l=%ki z>RtK*Bm`d$ zT&V?@ugF-wUO%rG)E4Iar;$Jn{ZAv&l{v;Yc$yyV^|>RN20+!}53j=;CZgR>ok*KZ zeD=*8k19H2s_Gv9B=as zL}Vi|dZYY~K^JQISbQI!C*FKQObU;0Nvf$DHW540?_dLnzu}w_F=4hoPjp7ls?{;~?;F zVOnQf3;yxoAuKjA>OA_aU#&lN+Vkmpf%li*?^4# zpbQMy>KG~+J$9QN^$xRZ%mF+uB8B|&+1`5=<{OL~kSnr^ zk=kYKgNO~wGf@tk% zOCf#FIU&uG9eq2g2{GHuXGMD!{y+@RebOv?8EnmTLu+VBBN6F+Fkvqlf;N?9Yk}8f zW^&ZqI*RI z9>{AwG@|`jwQx+b>r!59iVVVPQ3m#F3l1L9=q76B7)KLT+Q87;hjAOc~y#yE6(M;Sh3lfrq?)%0C3wFO*J@5rpxgWeU zflD{&IWdKYbq176*GaZYAGDPp$`#qhz32Eoga9N=US<(*rpW!a=f!R+PmfzheCtw6xd0mJ9}spydClq4Ns zhAHkLX@=mamuQ6pZ}?ta=P1M zSEZQ<6LHkb>-8L~_phx%x!uQQ)HiGbc0w$a+3py*z0P7857eK&#kLFv zGD(%D&n9gej^-_~#py@9&slBwr@@a?QqyU1v3=1Ymit#&Zo`mRM-(0_&XMTa-w!-e zU!+8hJ4-#YA7w*+1(NFGZaB*w{wkJ}T((Xt`>P%>(r}I-a>(EIP)+*}9|+#ieHSlO z*M3_1B@ZB2%0A84&we;Ww=@Ij0T{Z{v9~TZTpeqqJJo!qZFWw|xjxf`K7-QAq+SZjR9GkBd)6neY${t5GE_)OAu21o&L*W#=Sf=gt|R`=-}&mSC}#9{;n`@ri0V$ z%>M^3&r84lLxBz5EA=ptCWI7!0&A{11_7gvY&c@7@+1cEk+%4;mJkl1cibXL7iM4H zh;$^GiqeDP!*zj2PTS<>@x;1UfOSUDgpUI4aBMecFaWZ~h)>hV#s}*V<&;d*TX-pG z3X}U<|Cx3Ky@M{qUf2?(pr`0{ykd~31~y`5>?~d|(9mKSMjm*`Q06gaw_;L~pVbQ1 z9zuXE?(5ZyzEhlj!*P_#j7i6RD8K-zy4h!X`PU3hCklO**Nz}%%e;12jpI*hb?01xF=id=?>9c_-0IKqDm@K+^=6=+Iq(_kIqdz|o#q|q4Zd%Mr;rtGi~;|? z%W)=orZCzrN@BiIK+*i3YQ1vUuE*jkc%~INdXTkIG*@!_$WtYy5$aF2{MgJY#l&q@ zmJ~o{1nd?va-FO}Y(2tDBJtqW9p}ej(Y?LqM6~39)#aH5llXwqFlVhXPQ=$WV!+Etp;zZqOHn4o^xAG~ACgRDJqdad9|QTO8^34M zK=N)&iXQe`%8%VCe5$tO@ZgHgzccG%MjpnU(+tkgTg=@wHLdO3e@ocaye2H0SagPdmO_dNDW_ z=J}XlV`nQmxAD%jntr3$J9phQx)!s5@?qe>!Mj4x?foxgC20z%`7a-hmcILWz~r)I zdFUjQBGbQg$j2NIb2P?i`mra?>I_;AVmk2)<@VN~(U-Ydf;0*~lL4MXLh*~%6$`d2 z#>e_&)Vp^+1y$8-+w6#^AfoO}LMg@g815|5K?D@8NMWCBv;Rm3JeuAO-MWZAv#c)S zjWR>kDxc2_1Ki{{{+n+cl+HX0E=0@*>1erQ=lP17oxJ1{P$c{Ph+!GQsS|06}A>>KJPA$;Qh<_OTHBUSG958=5N38dThK=+>CR z{iU7~k|_qZqE`*i$0Y8AF6&ZK(p|BE@T;EhwI$eO-^k8wDw;@7_3UG80s*e36MlYO z9(I#F7RyrlHmq+jL+)k%pAS<9u|JaZZyFp26<;rf82Hpc^(Xrp;^rrWiO**}x4Rcc z)Km2unYA!VIuT@T`ZaJ4*e@+{BPmR4t!BV(Fz?j6#Ts_i?^&<7+y&5vS_Yt=P^|(@ zM}R@zUh0yHO49;idPAARJhnM;49$QOC1VPT{_|yB`x;aj(QZnNDg4RHx+%SINW_$P z-tc|u8z#;7`tG{^M15$xHc#tPSLj`v9)Kd&hB4*eGzsqrIxCD(SQn)0RHi}vK70S4 zpv@Viyyp#-|LqS(#q?Z(7R1k{Xg?t$QyJ|Jf4}LgelyQ_suhdI2N=;@XVCTnac031 zLfw7PxqLIENIv*xdQB7ALIr>g36hBarwtlrj7n!FaZ5V4|Ruh|%>W{nRkI%IE)=n@SGY!Ji|>#J9KyM!bF z>0@HDmI_`4*VQJWO~u}=3cY3R$rs;k{@nxvPHf}5NP$2L7y{t1T?}Fq!;hf*|42#~ zh0wfA9V`q)M&Z8A(%qzaiA4IU$3H7i{RSU_1;T*A4W#RsHi3gI@tjMR$n2<;=_lyGnk^|h$I^bMQ9)~v)tj@7((I83ypvJ79K zO^gIUFDmj*IFiG3U9OQ?#d*V>F9!}x3lZN)(R3(reoJ-;Ki}FL_F@tf$_q2SM zTj_v>BbFXSGC(8gvq;Y;n+djkGD*EZ<})R)1B^p?uB{A&wh)@%-?S~MpO9E(BLb$Y z{hNmH9tG3aSzwIVTRZ-fpD4DS+kAykSU#bvMR7`~B~_Djknclhy5u3EM&!CKOpLoe z#ogR@T}>v41VG~Vkve;cM(Gnr&Q42IMax=%B2x37&;w14qJgBwm?I`x*b79L;s}yQ zCR7AvT;ji0{0-Qwraj-#{oW^UWvrWUx`*-3RvU#GwFs6|Um9q5Z9XrbHR8xVoGwva za_kQ-*ij>tTQ+J630J_xYspq5kjL7BjMk}Owl{D6;K%_XXXpmbEdDp5Kqt+yJoculomUY6 zccQ_h|3ltec(oO5Z=fLr2=4A~#odaxxI4w&-Cc^iJG8jFwODa2S}0PaXmNMQ3-|u+ zdw;}ZR#wg;Cpl+k_WribeCN*A-2b$#yXIMhIiX1rA9BLZH2xcoisUo0shHqc&hF|bQMEnvD&hdHwFPsT=w zp zEM?7c$V3A`<4$%7EjrfB@7-->!{Jq9J9uBKP4&f3F@tx_sbfp6|M|}T zYXFBJ)UFed-cF17l%{<5^dAn`(BwkSw%_(+Md>MzO?dBR(*Is()$tp%9%_;{P7odO zH?d--72#1C_5N?;RjD{2NuUCrZFvjQ!qsM1nI~pq+X=yPv*%qztQx>U(`!uv!x1~AIstw%mSP!BE z(gh%?xB>5hIvSOI$h>Lfb8BD_=={XvaKp?|y4vwDrZsvFcouXQ?oWJwKPZhEx$5e* zmOj<*6j_k8qUg_~0LZ$hboK^N$W*6SwVMy;pn{mC^;5{mK;OeCXJCK~FF3VX9`kCk z0;545DkiLd^lQJ(eHcJqIr^1G^O8I(!Sv%P{=@Cb2g*;n%j3E`5b=DG!82$?L-qTQ zu-u%h07~n}VB}Jo@Mg|J4>7uLJ`#|+Az+{0PKB@!W{|O!WFN+FxDao)V1pjtQ|di} z9tcYt>|(pk#Z#)Lo?|6>Vm)X4pz}#LEchWB}>H$23 zeN}qtZ-x^Any}_V%Gf&4X!$r=M}=51I$$}3!OsnBBb@m{c?>~i{nMf0NxVCen|srn z^6yMUwvu%H|RrTu!27!OYZKu9SnK}qJ5!8ucpP^R>OS`ETFkw=ndi%jEP>Q z&t$;H6Ppy14A$`K!MM;45a5H(_gl&H+mI!a&`dWKacAnVPA8RCt(H*nVccL)@SXf} zD2<^2oEjXQhJP0#0SGJ|He4UwQ3T5v*XII~`ji@(2!Rp7lu53j#WeDrZn@5jts2uD z*7Xi^VhBxxq#{7ZxV%NXl5l~klOpINJdf$;gowII<36QTJpx zLg1eo;K6Z1agW?l#-}nG{2@IIkerpFUmUU9LTl6h*;^&k&nz{{NkY4WTqZ?JtDDP} zC{TLRC^A<`YWGnCaZw5zd1x8BTNM#J%65uW8@9Bq3bc9H!i8h}w73$1FjIG7y>#E! z4+;*yb2wQ!&fjc}Z?B$uZ2ar~6OQl>p$R@N9soF*h<@1TTDg%)bGElO=^shgbZP;c zbI+Bh!9+oSQN=v7#yX^b_r&$39x40T_T;f91)wX-33UT#I=wX7kxwNC47}{1>?1+d z-607g7@ZxozKxsxdyKlUWmtwU0o4f0v9d<~>vssZrIhTfVC%l^(W{?b7evpXr}Wpo z$Dvl2e^oAyku0zZvS(WLg&X$V&1Vz1=lFGBNMZB8XQWh}=_y(pJ~bXm-+uYe>_qhS zVX!0j^%b3*#dp(g=6tXGzW#iBWE4b;&!FfmhzB}D3-9Q~2beM%-&G&0Lnodcgc1Tn z2@R)$gIiaL-X@Tp5FCyRN8dO$El0pD|9R3kM>BQoph=5a5s+1J+BW>3lV-d?bz>|K zT3Lu^AXtaAPaYqzfqqOp>y*xvT$k*6_$v;9SRCB^FMHJtMf4NOVe?nR}za6 zWUjMiY1U-y_?uql>Tn2Q9`A8cx&W;3{*L)?FM{7>pbe>rD-X-#*N4)MuRI}V;WwK> z_}jfDWip~Q92ot5#$Rl@;Qv1w>GLk^wfPE4s= zxN9kuj?>{<>!4u7lY3P@A-W)a9$K{C&Xd4S9T#Z_u(K`eK3vGlAwqM(_}(DsX#lZ9 z-VhT{&+xNvclq}ef-xvmhs6nw7BK)9aL;&HdzjaW@P&$X5B<437MbPR)TTRwZf z^n7_SZM>h4s4kp<|AGFCqzzd~(IzG`v(2vHqTe5X2~Dd|*x^?pthK#1K}pC(J73h} zZS%{?ozmq|Nju?IOKKxcSY~3-)+Y-r9S+ckV5rKyrPmu&@@Y-=VCuz3DzIDM>B%#YvD`eme0k+PYjfnq)0H@%zwCjD?Xc)d0jP1R}wr7aeFc50G zhQAWmV*W?*P*6g6c8|R4)olC^Vs3^%E3;yPtSwNWLTHR4=wgMPBVL<=eCb8gIAO6Q zOnvod;iuhk_kWZ-pAsYdvE5}WHHdwxm#a_BO*2TXvb3x-Rdsd-_jJPiuAm>Vj(gQB zN?6;b^&hM;2D^trC+AO5dWxOhlI7U~)9J|@=4$K;F;=!|&c9(8jHGeYTMkdEA@ZgI z3cTA+bw;1&uhMD;KAxf-&EAr{**Id5%r*9!TF!+~2kuCj?qU)>SO=vKo8kELe_=@zjal3>ym=tT#m^bo{1S@fj)ga&~xgmAlfhUE6^9JZ-|| zE&tk8iWX>@`G07p008a*3AUB!2xbgCw5`9+!KUK$0eyx$dTVX|GwS~vhrf2j%f6Ku zJFI*I=WSSih3bZ9%`2fxykT-Kg?hhGLtd#dw1AnNWs{<`o#^#R5A~4g1|d4QH|Jvx z8@BGFgV1ve`)=TZ`__NxqbDJ`zB_BdBUOBLX5&JEvxS>3zNb8}$BO?K{;8 zSO>#+#c$@&>22iR(E9pK$3%iHMgLz=o=XTG0+e(k_*L}YUb9AidG^!8ulNH@JTR{- zB6DsjBzTKyr8SD)={4*%Ww4pYJ$h`EWx*L6c@LjYFjQrhi1p5dLu6QL5=@wDWuX_D zVFa$QNim1v;bLDS+W^Ub8JHE%7p{2x`_RYT>wag%){5TqG|ab`+d4e^h8*9{ojg*T zyVpho&7gN_7SgA8(BGaLf4o|UQTNWNvlJQy>HSPI8t|&~Z>Wl> zeWR_h8>`QL8g5LjRg4fT2ajSgz>29ZqRz`?p)Qo|2)Yzk_f}tVRoBb?m5dHdU@oHr;v(vJw zM?`JR0pP&G_FiB2EM5NU_nq(?7Z$LG3d~h2`3^;dhjeBKwWS~%>T+~s70hx$5?P;2FxblghG-UAl4=jMdib*eoj zewMr0^MhtDsURcm^)m?^IR6N<s|?u*ODX*T%Q$8?I+Cz@15h!D@?^KZU2B5SY$0WjufS>s>>VRiS>2yW_F^!3CE zLXHifvqx#+TY`3%o9>=RBG?qj)EjiXE5TX_G5d@_o*YqBROGCs1v5b)@WX5%=RF$K z5yCgen|(~bIU7)LJ0XYcJ<`?!8aT(mS;)pIenFXSY~TORWwU$BKfvq{-EBB$Z?btT zud!yXe1ytk5*etjGs5p}qXhBP@yQBQ_kX^q``SA3kb>V*MWDs&^_FzlXC#*UpP4;$ zul4Z~=4cIXSq{bz6ks_I{7->*heNrHj-NHVr{CgdE3uJ(o^RQij%OAJqOLbjvJ==M z*!Si&mB7M~UnGthUGb~LO>0q;-n8kVy%kU+KD#3oszVp;ESuir*AaFdbDkpD_7gMO zM`$i%WT}8cry3Z&3=ir$>{a@^`M!whG~bE!g?_&U48ISI3HC(wr_FSDEz}dd78QP3 zFofcnSPCY^kXJL=b0YMS$wQF59bdga(6B=n@F6n5w&2r$sd`EYTi{^BS*yMFX~cU{ zaw2r3Q9~M~DQXygOt-RwSKF`tSeqZ6oQ<^IEHDy;jv#Dr#8kItGCVQ2f398!lwDI$q;85&a*q z{r~=+SpqF01U|iNfYuwRh(hbk*{FMuTgj` z@SNI2mbn>~Az*k@cwLs9%;KVL_5_U?PV0FI>shmTyV)`i2ZI*W(68XH1`x4&#ZmV< zGizzGjy&=Wk^MC{(co1qcaaTD08)+(O|ePf`|dt3kuXW}wR2OIBspJ(C~&2rKKKh zAM8rQHUGuC~6U(hLz$@&YP~8%JVViF(+BQnU0()66bxHtl9hb1mFbN$+M03ysdvJ z*&opOnH3e+U|4xWzC@Po2>{l1em#`35=a=h&TVlLZtkYpE5W`jiA|d_a$&Xv?I6MC zD`(sYBDp)jw#HZlkm+ijWaW^DR9X5hWqnTdqi97NtPk!E=&PQ~m4|pQ7~EBCNv`HZ zBpNxwdHuqtxG;du5x<3wtfitNfC+jG2Eatx06@?q80d=#`hren5%7Qi0RoC(|G(q6 z4;k1qdjSA3z+3p_4RhSR>}0T%f7W@MjgCGxpt>L?*YuU&QbTFy1M7+h<|ffU;XxQy zV_05iA#u1jIx<>(%DR^fkJ=Ysr@}bu1i<|Nziqaz zrxsJPW?kAM+xw(4XpzBA&$TtOC1|1pFMla8#QHb@j`ZB&ec$)~c%RPQ+E-f6p(=aQ z^Ml=CfCWy&()auR8-*%&+Xfv&=kEWq|5eGdJ{w;=DJ#(ae8(<4u{xH`4_}lzL$cVz z$}&yF_7q5UYDBik_{~R}1lSX87k)L&C9wU`zb1gZpf~Hh3;o5r_THW0=eu(pyi)@( z%m^Sn$+yVf5eReltue|lx3SAGke-q zDu+(f>UT3frS$>o%pxO?34+mJ-IKl%E1oIIwL-Kr`j!Ygth!o2^_h8xt!PPW%4k5< zh+r$j%fEZ|+=J=fHMgn{bC(}uuztr}CjI&S4IO&t?&QPC-C>$1BeoSt)4zyI7NQ@Q zWg$1lJ5X}XC~na0B(T70(E{~8osl7HuGI`3Ch}L}7@i7Nx<70Fr?vW1d`^BF)7M^n zFplAFPFQeQA4hOs20?IQRS!h9e^84oZK__h^L6aK)ekcO4BrqvhBHwxeG$PaUuBE# z_4c0R^xb~pv=2nD|3^@<+0w*D z@8kCouYA!y7u~?AdCV|M!(%mv$!g32w$|R30N?bf4W9q#01PtB5iCt}QBYeI_e4KF zwrJP=9)FG`LeVx1j1`cd?C99?A5`o&DV!-lJIKK{=Qs8rTwn@(HrX5@YoEB<(S(-+ z1T-+5QTkL^%5&qOqaGY4R*i8!&xzSX_xT)-3RYPCQ9%SM)0*sPn-2*v1bxzW+~ykI zm9hGrYPEUVXyoNT?Lh2z-^2PEBZ5bJozf0hr?Cme24@=KM>ul2%m5M2jj6G#S7TW| zLZ|e5+}-6a5J`S*KHC4@KSIdu+dSNE;{B_}YFr6D^AW`}3erk`EoO}jD#@Hc{VCoD zZAEMt&Z>X_7(XwkL+$R4l-D@<{Cx$$AGp>dsA&JDz{uQmzC?)K5WQP3J;N5?nHxf1 zMLNq(Gy~ki40L;7I71u%I>z}(R~DpOs_S+mTj#{jDKBW?0T$&V!kI3Z((n`Ce}^;# z51RaJ-V(xk(qtQbcIC~u@2q2M`5h?)U7-!VQyATf4+CrRPeS@t*)5-ZRsRT_K|Vkq z>j_mturLA)Az+7(=5OLv8M`lo;{vkZe@efwfOAaYVzc2KN!xwDPWqWAJ0 zwhN>Qf8o_r^EJZ4;g2v0=n)7L#6ed7GToTt<;7|6ASQX=#Wvu~cR~arft}%*xoz8q zLKRNGpX%|GMK{$;xvvn=i|-9Mw?c#Yn4l(84Solk?!DkAH@9XOAau6Q1I|xBpY6Tj zsVx(-%eFECqDWSn+dAoasmFj9#sM2BO$z02JxF|83h%~?{q*!taIhKR8_X}V+w3vA z_1msXA;fJ(`AZ}B@ZQKiuXfk}j3j|E6mg7$@e{xJ^$5_LKvrX(@XJq2USW)s`27Ei z%?096Bo%>e{hs_End%dh5E}~Znh3fW&!r=~TO`MMv16q&rdYc8EG`7U376Et_NE4J ztw5r$P{Xz8uEg=vl2>73nO8Kp19I(k=cqyn9e!~G0Y1n7yM$Pu{6y~oCy%sklK`C^ zHLH-Z^5*(WZp$QK)1yG19>2Qh-#wl4Vd);;^Hi);+5X(RY3t#B`t^QSiYGt%pFOet zpa1R80TRfbvrU^9-`0g|yNZShMC)ic@SHHeSkL#UubUAnl1;xcCsYYbDIQ<;2v zm#5+8>#`0(726B+{Lj!IFoEP+IoM7gu=|ItJg5;ZyY0S?!LO_xlIEHM3RDFqh&7@q z^?Fp*VbvM*`|s-~R7gQO^mr(;j366>E37C>ORo} z|Ic;yGAx%HN9`F4R0ub_J#PQGNoQybmB71hE3EZWxp{FGiAeWkV^Do{sW8c9$ih}? zPp;7PT0_ONp6P6uCedM|?yHU>cBb*uaK(M0z0c@Q!JlYk$P%9mSN^f|sCX61ub041 zIkn!NeYL6d(QY@6#AT#)?D2;MFPDN>0(6}FW+aT!Kl)iJNRRBjbDYwu(yj~qR?BFX z7^mTPH%DA-$u}!pr%*y?7kCm!HyaVHKY#aRk&xaaQ~|Wp<0$1D0!Z0P>l4H+aQy4o z?@pbf>G^eR@po(SW$2_Z*N#rDiX{r7(BDOtNdNi?S){yn(T9H0GGyoHTW9$fF#zr3 z%O@UNi@h<~gDf0C(Zrle+`MeASD{vgtHHHYR0#jp;ulLmO?@c-Q-NkI_d`cai2kJl zVybuvK``SLzr$bb`Uk^fgq5Pyxit3)lzEzxH5)V_8gK+3cpoyv$}BJ=KYaCGeu$|T zTmrH( zT_eh%f*`FJ-psq+WjK%hH*XW_%;2>vRcuFVY5e?1C}RzDXiHR^+S%^e$PqNEir>c3 z+HYu4Y*z8co&C6>~FyPd3AL-?=W!F=uu#I~+|pm8TuT*{U^s_8TX7=n}xZHv50 z;H_5>RfM$n)3~V%YDM(QFQ2dr9>bSRfK$kPd{cmqo60x0WFb;ny%!1d9Zkw>;&!x@ z0jk(L%f+gC`#}31cD~$09r~ekeZlO@^%!kzHwoi7gF|1?3_$C^=d1@pn50gf=xAK@R(AnRtKW{&+sO5HAO2Qh$GAbl)_)-IjXK z|Ibylp!0n_Yt+iaEC=93?4;8!TzKy#=b_Q?=qqf#n-DG^q+-J9{nF4Ea*x2@d_RNC zjdz@<-i#eCSO*x?;is(-p!>%tHQ=&H=r1{dc`yjLsi%N7ZvXDW?qv=Kmc2ke?kL5p zMR=nQ**Aal+{ggjdQ)r$SOKoO;V+j6zbEb#zoOSvzM4eG>kT?B!dB z#V>PXd};ibi+Xk5h1s{dk`kL%)(fq45_q(G$jpYmFy3mvyE+B!M+sWU{=ZWx-cuK>mP z4SI+g=c#paywq=SPdA+!EQ%4xYr=v@G$8%>L8wmBNSVpzYYCLXu^VVQ3FrG(h691# z7G#k;B@o}<$c7qQi6K{thohkis(lj!-j@LWcQ#-}wV?ZV)4sKZoAlnUvBevu|F&8J z|Gs~w=l@x%R@vO|LOkWJ!SmYn_q2>hynN9K-k9|L2nT=^-umTKX6c~2@Z2^s3#n^* zs&p59oyW;O+cu<1?F`JEk4Z!jl@%0g{<-H}$OF=qWza$@3Ey$@HqyH%W<{tfH!Uv97lD#EEUuuUsi6 zH60B&?sh-pdT0^gO6EmWHRXu)G~8nNr{B)WLu&UO_bL)Ws7z%xlDfrxj;q;BHdS$W za_CeWA;Z^RhwN+2FpRV6K9=9 z-f=eMg@xnR0rAUBi<4yuq5M^?61tNb69#v$KxIbf07$L(8YF}6R~mM!R`$YWEP`au zojSjxmph7>leTc_RG=#Qa|n85;Ocmel&zQc7=!Cx30aKx&7ZW0tiu!bL}xxlG$~{? zq+WC<4%K;;g)D8@PIjLugou6LO=3)$#3}eAH!v7LC%pyNM?a-_r41!R^WMuE< z5C%m(z_2QKunml+s&!1YX$fts>!k&I*_gA$(=;{Q1&dv`@;ft;rUFTLoqP(@s>Jh( z(TJntkAms3xs~<-?l2*1nDZAMI>}#;7`GhkNWK-^m^fICVCCG+BmRh@#fF-++7a=) znO!-!m`J_zqxA`>xV+Sr`8Z^l!k4jP*5`b;i4P*Mkew z6z28sw=??js4Z|(mIa+?qAbVz1DWCOPp?{Xj0%X;>~9Egd6f#H&QeRWZ|O}v^5ncG3S#Xm+!q4=1(k#`>CpN(jgwvFNC1pKozES>HWu z+EZV6=2v;K^C|sh+slD)%j&k{V}h<6^&@8#elp%(X@!+!?%-$Pa*xJMwA^4)izMzZ6SJVEh_t)DzTlh}m1fQ5%@uy=}_+%-Y8q+j} zsPO9Z6pmzc2dDTyvrTQ2%6?wetmf^$ZD0teLsZ?+uAgw*L-KvdrkiOvo+En=8(K3Fi731hFbh^ z$9dBPL9uI2wCc13ynd{+)+J2Lr#r<{>?0}GZ%GI^Av!)?mYRB=KPkU*+-_@I?cgu3 z8MjH0dwkER-Z!TDJ7$1*60Pb2wbn~+_y~ccl4ivw2d|8fAQAJApPHSZKqz_e9%xs? z?!$}IH1^DZ-SKC|=f*ZK7Sp90v>V#h3Anm6P+Zb#9>lj`@S<#;e9}8b@o9rs- zlhrYlvYIS`d}ZBX$AETQUiWvk=~LUaDcrKk+86)6d!C)vxnY3cf)j=lQ0Zg(_{Lss zo3*1@fx9zj3Q^l%UCe0Yd09*bc|BUj2TS*6x_>;5i?Nzl=g)thpmjOY4rP5vG>)=hrih!{oh02YdnpBJkU`vVKJmNR?;!T~v z0iTeO!Njw%YfH=RrrS}#oW9*uq>yIVo}=wc!_$UAeOBb94WNgXU}-aLQE=Uh_o}aj zGXr;)Tsfz6Hr$xbEz)qgo!wiP15xM_rf(lsh3ux_$?&sqy6Te&WwrmBIsJKnO7dH3rR6eq*JONdCpa?*}FCe!T9+czBkYlRJ|BSEXIL z(!zAfiKXe+_fGEcPg+TMzNbyAc^i^ubwphpLfd8DDT}YUeszkmA2m`PFgk1|+Ybx0 zZfVs-44xgWeB%eycb!w z>g`KtQ;6?V7iX29qF*{y&!@Uk1)jI!$^+mp+PlQD8^H9A;(g?##{;sJ4?c#@w!-+OHcfQAzGdQpfnv->VbL-R3Oxamo z6Q-&g_&Yl#dpTi%1bL3D%TGmtqO|?UFksRWf6AtgD2O-RIqaycpmdckc1rhcku~Y= zX)XQ`0Q|jemKPaUpchVUE`p{dO=^R&GS(=6TKD@!XY_CEr$$@h7MI9d))41JdDx;Y zL^2EJAFcE15>m!cPCVZ8ypkaM%P&&iIzi!3HaPxDxX{*KWuvqYJq#J=7d@)5UDg0P z=?045an1RbCiIPvG>siDKTtE}{QS>&chqK6+hbYcMoR4AUmxMVi(U-&-c*>5Q@tL! z+7D)yBC>a}V+|(=z;XTh8y%OOks)`#ke5%>FEQDuKw@*Ezlk4;B8s%hCuE63n8a(h zT;)>)j{QpYj$O55k75S#O-7DOJ)iT4Ux%2T`b+SHT-=sMoW&?+05moEyfXmM;i|J| zzY|@xLD)48zFKOZQwHvPk05OF;3`>TdZ%X0R0EC3>jv6*;Go&}G;S-p>P5;rPBQE{ z$3JLmd(H));5pH0__+bfJ&q5kgVZePq6kC)Fyc!3Hw^5&FPQm7*vi}h#e}@hgs2MK zi4!|OFlpS+ijts7wC-5|xH5Xi$xc*6<9;u!Hf9+UR!oO_C8RBPuiyW7W?w`Uyax11 zg!}&Q9ZLfjuYFJv=tw5XvkcGe#Cd{^00EuQ4l=%V(EZS|u5c>^5EWHIOL6!~4=Wyn zD2g1{S>zpV)wY0_QVL75g$3p@OQqjM-bPX(ph_o+m1#*!g$;x*0*vz4T?r=TfH#Zb zG|X&CpNqdr85OUJn@wnroq#DAPKbq)5uhW;_E5LP`taE1O~C!`b-|!ULGy4{2Z=4Y zkVSqrVixmvH0Bp2+W|bR+1ye~O;)w@SoM>S;8d|^R4#kaa;Q>AfT*Gu-$+Mo>9K;ux}a(#d9P^g_vx_BaxX=?PPJy zD1DhSQ3izbYj4mY~isCbk*Zc)XXKn*|q~#65W%Y~zct-P+Zn9%KK`(_HS^8av=J>)9S}yVoxX~D2HvflCs5~SO&RbH@ElZ#a_(b~b_69O zD}ExsRfRZ4dVx%0CVaRof6bdXMHWs!Dn;qUGhJg@4P9|vTI0j`)3n|+p*0=+va4B= z60{)9&n&rp9VV2f(4ktnU%z<_c;y2J>Y~Vs&%M@XhQ4@ezf#NF(H^CS=9AJba3U$P z+fCn1TMdESf1f}bLyF!;IW4+E2}C%%V{_! zsu?zAF#PC8D*+G?aJ^|}TKQSe{@<83sqgks+Lj8Y!QoFq53h>V1g~v^;DoU2d-tdu zEE`l7F$T@0RLfw6s4LSH`{-Rd)rm3-5}XmkjN;r!DS3>7B_wXiOWZl*Y>TbIpC<*u`az1@&JklUDVJhgcoih)1TX5levC}!h$L^ z9p&$3it7YfBfqf)qu`C50FjogRstPlI-k+fx_I4A=EeEE>4O2-XkE`22mTmWpPMy0}-mJlvUuYfY?n!&;lE z=%|foFpkK)6{12vpP((as-3gH=R-ne`9D;K+>q!fU;(Vr;S22QsBv8D5mc zo}^o2Dhhb|UmFjV6n^U`JR56sV7evX3<+`}=Iw;c8WIVj6NpB_cP+r75%&)W&;Kba5 zGAwBZ$CA@Xr+wnjXh~%f_RCVVsL-Nh%0K+h4>UgD=SIC+scc~^kdb>SoE3zpv~M2_ zcZmMI0)`a`>p#!Ka|;iGJ#<_ouPh`&FBa7KUR)!WwP5uzZY1*vBf!Y=OK9h7MMaX} z^6LLLPbu5ueVHTN(MD338P43BsM+mHa=l=KGTrW9u6nb&-DRG-ufZX9fg z)0&3m{#sLt&0wtHU0jC@6tO`A7lW6hu$Yppi+6(&>vnYIXC@^Q@PKrIxxl0=BO*-9 zps!3aE;W#DCxA0#wjWIK38ObH%3s#4j)9ejLxlUb|wEVH?FFY(ak? z^&L=<74a3Q4g*U3vo_&!ArjN!h9&@G`xTy8Qf@k9#4vZ;cTo)4YISf51pJ&URf)CA zyG5(l$3nG`6tddFqt?1UI`;KX8&%wzdL33_Fvm zu&(NykSDoYj6_NI-3H(R1nAcoQ%6AL9Md16x<0YvTxB9*?#@fO2r@|Zcc1&Y61KL zXK;}{1?vysIdaS%+dRrR!#51ZW=Ihky!fW3TwieuG70c%^^!RZ8!vQL5UR!7AL7SO z0VnlYxiY%B(3zFK#?EK`80@nyAjw;jg%?{DQ)nlFX-!tU^z5%UZ%LXlL^{N7;g84A z2=K%8Kefq!Z@Ux1CQxkc;c#QhQX*@BHqvBqEvbk#~o|z#7L;b5-h1O>iIQPid9uSA*5Qup9B3XX`0ngCo+^Tw}FrbJU5aS ze2t*Slj58#d^Pt@VVoP_vZBciL;(ygW$?`Jh##Er*0p9HDdRDf4+p5FB-|wuhT0IK zzo1>sGQtn8H%y6!R9Zk7^v}RH0xkKaure*{V2Z$cPd?0 z2+Bc6yFof1BZH=6mUrZR2R^9`6|a#0aubvJ$6tor9Z|-9t2mN-ii$l(#(1cxrGT68 z2e%C!Oud!&N(SiT!X??!A`TCLMOcg=iMKLLdMA}UwdMdzTk~42Dz0Q z69%*0<)I}%>n{^OQA*#AGt%;vVcEl1r6V~giUh7U^?~h_Jx7Y zTfcwP=@ev9*mDp@%5K?Z(Gi*u_gs}PF2OKG@uhZqlinCh%F(}BWIePMe;muWj(n+( z8Cc@t^a#-T0Z1F<_UO(c5w z?AfQymbcyG4TKTCQ{CU*@sf}B*D{~`F|xhVZE!@ZJVy%_1-gEh;Edbger4tKQDmN- zrdtT-s^JG)-W5uD6t($eE4b`^Nv5o_2Jyl;}Sng^I$>BS|2O=qnc{9Nzh3` z^wYdCd_&}wkJPuzoG3mN#5saWwJJ{3gI?)tna>#aAL8BkozYMj;SS9NS7bNMOW#|( z6RV9A2=&{Zyem2Jd)oPKxk0kKVtK|dZ|Ca?0-IfgfHiX zL(CW?dmLb{j8;8qo<4?=tayP}2RDxv9D9UeZay?aw?j*(YRtm7&C;+;wk~Z4?gjP- z#n_WTwubmd8?dS2l#eF1{#~roco8~eYi$a>ZIY)qn>KvrVN|m9{(&g2WiaYZ2w=8 z1i@qtlWhANK~>^S)EbU&S1YyN{G<*k>r?|WM&ZB+Sjiek?&U5{Vxa7s%yc#Zjc>+nwp#5n*JAm7;f| zDKg0QA^7O>dxe>?RF6_#4BQ&Ml*n5Ia>%bM?TRn6TsE=yo3oxr#XD*m!zAH!7(eAh?#y5=GSYm)9XCkA zT|Rop5pzZcXuQ)3vS~^`OeZd6y3yT+mWEZX#-&SdKFvweq3BF}>|Rkd!?-?;^P2kJ zn72&EQW{TY6r^!bIl}2~=LR<&vQzSJO@0BZLOfaQ*4fyO)bsp!W{=3*&@PvIGV6e-eR?6v!@#df;E>WAe4-!65?91=NYQD+f74p_ zZaY!hoSaQHAnR`(AY;I zCe;2|qJr~I`SPYkViJ!7?Q(T3=5u8)WG&|4T5m-xs=<l6RcW6k#0$#K!KUIc6 zxoDQ(W9v>|pur&7-`8E=U;dq#8qr;)_kmx;xVg5K42ZgX>UB z-_;YN0LvojM2NkuUI&M1-o1#m$ST8VW=94bhW_l}WJs5+fw zdR$wqu+s}C$$?+A8U{lpX?@^qMVGxU;$HkYaZz2!3%#x}r56u?7^Ds=d>3ebLw$`6 zAKYrQxOMee^|uck5jOLM{I2rBKB9ETj)k7YxDxi`m#eQW`*+i@d#TrBS#^U5zpbDd zeBX8b5LYFm$bjj?>hU<@O)s4e1%iTjDEOgIxNq)5r~Px0{Ou-PfH*de!RF z9Yn3|WqYlRZ{wj;5oE z0r$`TPGjhWg2=C7rp4j6sh*PQC9>Uj@#`o7<$njbFeposN-saYMf(h_D`#!w^oxxB z1wUUrMlxEr!cb-DDbO4{NhqimZ>xtIkbYEd5u)h?{`g#Vt93SCeKi~0_(8aELEZSd ztwU8jPPHE1rRAy5tU%*o%?;@1@?`7i6HE>4x?}B4uTQVu?!UPL#!97Fw?p;eKyk!R zh^#(V7l9!$2aIzaJ_VH3v;4Z~qcA%Td>8j{`SUC;6kA-jRoBje;i! z^cu}gscQ7^#`lJ|4RZ4iy3`P{bNK5?5Up^Eq6jT7B^p8d`Q*q~;?vI(a&ejAM8;GGN$z3`3UXSdyGN9KE_$l55kd`p|Hb=N z_QVRjXJ^&K+vOxz^~%TrgP~#n3Vscn;asuGDogQYf*hNF7>iEftu$y{u>*;Q3jvU!M zkYAX6k}_urM6MiKaA#=?*?uBYeFz}|0}9SnGb8#5i%R+|k!vc)64W<*VOWB#@*fsU z1>t|_2LY7qLSVsB8qVV4WJ@L4q|;U7MT0xDEl#uz%W@6lfr*86Gk3uq&NSlI<_GTx z`^(a`S$gwGOc;E3V2eDExSqB~E};9dBx$X`)rP+J{~!d|(buWNFPm^R<08lY@zEnt zE1N9rTxm+(FQS`EW=ha+ML^Yxl2S4LP~Q5_Lm+!jdHHbf?=p)YsMvVOk+ioS zSE2;!S-9CddPW!yRc1QLsemx&P-sb*`cB-abq;rk|XBI z5h|hFtF5$=3gW7}ffou!nq)8~GWJb`NBN}Q)}o?ijsX$Q#aMJ2>i)r`2!rZKi4jl+ zcBN=vF)@bCFgIGgxTMa5;jFQxzWbTcqQB_JFKLIgW#n%1+b-nXtcpW9tXbIXl)2P? zgqorXEG7Yy@!dZ((rBwns|wF(J(NXLm;tT% zd}=t{ezKFkGCCcu)qBvCrkw<5>?masiq}q3#?0?VNIl)C+X(ggU7~2S80K=+XET7K zM&;FQ^!H7aRRLy7zsxHNXIk`}GA{vnUq?bAznIU^)U#71wfaNp!8AQxJISiuw2mNzeY)XX)vQ!rJzMMMW)r}Y!9JH=2p*{ z@8AZsS)Q4|0#nj0lhj@bgXvcAD7 zbEE%mF2!s+^lQQ7)=B)Z4q#Wf4^$ZYkdOfq5^z4kTMZp2|2wT$bIOprp|?!e8dAwL z?t*E-dIGPqEC5t^cirE7cMy{&?O+rqKUNwb2ltszbv@Bk;vrFqTFd=bK`S zFCxJ#2$QF>Lz-Zp1~HUBwMn#0>dP3um@AJvqqDquDpfi}suUgoqg1InenJ-yN*uUW|%eU7_ci{&@;D;7+>#+O{>VM#EZ zM`?<0TA?c&TgcQcm*`%ihfIU&^y6aRsoa;#dmVjhr%0?ZyBesrNZS{oId z$M)V>k>ycFJlI=hi`V(u@%8>Po#k_O^nFlZImMZq&vYzCEuLJ4F^`)USWELK=$k^l z=2fnr=Ft~aO4O$Dp{aH}PPq*v?4th%>Od90dIpMAfZ^)&u0R@u$$+T;`T4A6W_Cna z@2v-+b1*nKLgM#8V+@Rs?1@=1LqS3ut-Ja;uwr3wS<8PR>f~gA`B)KrKUb0_)YP|R zJ|U(bxP?+buxyC~(CNgJF?ZDZmrjjJ3nR7vNf_Bci!mk+h`BN^&KtHCnwMC|Z=TFr zrfTU?$!rM57M_}L_!`l`J82}X=hxkCmU6t*w`~4@q0r!e)5{0ag|Wb!J+2bNclR9LAWmhQ zwNRP}Ob}3EwT^~bs!J>B>JKIld^(cW2_Q&+i~Zf+Q}c)T6Yhrws8eHrq;m_@`eaN* zqw;W5seEMzAc4}_ZpAua!jQDv$WOq&#p8W)Ph*9~Wp<1@4fwAFFtu<9K-})F2Ie{* zS#n`i1si-_CS?w=5f*z|mZ(oe+kvOyJO$I3nA|6P=NalF$gD}ZF_xt-ur&aU2 zFE?$JKuD^>*G6}Pwvmq?r?%A{V-wq;5kO7*iW0Y=dcQQ6)~lQZ1@N>0&X_vYAdWP& z8q~9=F~?B)w_oTF7(K%F#bKTwZU*{GV5=@t;;x{_5&ZV!7U#2mRJI1E9|Q zpMC!EnqJo=b)VP6Wo3(lZql3YJSjPSRPI!7yNL?EH}`$^F7KUxBeNY#q@_t7m$cZI zF+4k>`kz3G5P8)8NK6n3-iH!t{i}!PSEOFZBc+B}Hl$-^`Ij?fRT44Y^yCc)ywXb| zSkNpy`XTW38b5PRNaIS17O+h)U)!aQ0P*fsE2#o=)VHh$M<*OFVd7}Krb5=Di@TD! z0vF~BoRIv9=U*2ywW$M7f08HhJCeU%nre0$;aqeD5Dzd5xzoZskg~ za$Z|nn&@V)JoGqW`e$le@&)tv0033bvL)HVH|H}OGdd@em>oJ?tGuB6q zNMqpIeX2hYxJe<<^EOOsaAW=(&$#g!zy#ugy?!j?oAZ-+(2F68jaUhGQ^f4%s_M^+ zuod)vr%l^6CV{)_MOAT=Ux2g&L?RHP_LZ5G%~S_r%c)cp$fOgDK;xMo{S1gdW)oX? zLG!z{TCs$GS!(}>a{2>iJ2)TeJkD*Y2@a&ZO(~}V=Bm?^l5hp1)5=wGLf)K2bj*n7 z$Bd)pvv6yv{~-)vx&%WAqK-@{xwt(qKI`qcp8)5- z4inIkj(gCFV*ok`KP_ZI{5tu?`Tn>z zTU1B@h>Mua=RK*b<5Ht4uf7P_^0q?Oq(euYEixv8)*h5da6|c4)YNNAcZ;|EEFo)> z*2Xfdi*FM!g<+WO-KBR|K}@ju10#y};MDhr2SwTZ%VGkmHncJbK@8OXjI0#`#C}YQ zQ_Uz-6}bs6C|MD?3LtQ^WAgGB{BZn3%B3CW{Tt4os0hM9Cr1=oDXIkQ#BWMft2K}& zprr+{P<4L}f`C}~Q@)pcN4svp^J;m)RSdvj>_Eo=9JHcseuvj;-18^2^k=mR1|si2 zYG>-6lOG-FDJX<87@N1vWomd?sKP^%%!;5Ou~w)sC<<3r>>Rb?jkvsWIcByKL6*8_ z@&z=3TM5A{=#S{Xsa52ytu7N<2h`+9{A6FuspK&0qh$%uN%-1u2E94K5^(0K16x0~ zd7yxS2qw>cDuq`bIaBQ||smfTQ?b^b~G51j?MMimLfsty28i%}-V5Fj_6< zC)3iZpzRgj4|i^~#(3Ch90PC|WRAi2sJc&&r>rWKqCX_5!GS7t20I2IXljZeQsAQM zx@J~3=2uUq72@YBmc42oj#aHQ+mfmalEWS;*CN*@i5;hLjr`@TSNzrcyEYboS_z7% zz(^&=`FzSEZesh<&O}AHAi4o(NeKL1X)`cuka~7m>V6XZJe0PC-%8SUGzkv79-vST z{*c7yWL}mv!QZCv714)DU6@qyCBX*M3Gwf&k!vqP;Hf!F=A~(AP2|(^Kx_0+>xEWe z0NT7lSbv<%CLS0s@tMr5|KOs#I=5<$0q7ho2d*d46WLJ7`%cEVl&y}2#OVa7XO|M% zVhOKavoR#jj3wHSRI>K|rkDT`Je=G-NxcQ-1#U&U0tJ^*p6g9n*Ka(aQlZKjivfTM zm{Vj1+4+c27?)`vEj4ZwXk94vZ;R=9q(n2ne^|0+5zHD7HcF-{#sKC3vp`GKpYzqw zg0K&BisL&!m9_p6m}BNANN_~Rro8<18lD$i<^Dbofh3%f{^#Yy`jQxcY3)U+F+RkpV1Vn!b zJHvU(ss}AH1@X4iwRwNce-3=qqjF1(|VZzHc zOe?OG#0AU?(lX#=NJ1g}64FX7Ro0C|mAD;KkJpQ_hVv>ao*zvJ2de+=eGY(VHCk=V z2ZGLPN`LT+0Ioq@3c*;?enwoYHH%0i!2Wfu0R;9yB)-+ct}KjCjsUcLG5Ag`Q|l|$ z;GUiQnUEagE0e178p(~JlU=*MhBOL58+sv;l-ekncu;s{^M5yAn48^P?D#923(1uC zR=g@NP|y6i9;(Q0RPX_a(-U&V?tf0+)Kp3%4jrQAP-^|15=fi@{6uTkpAc7tof>ujtTZ6l`p-?G_E$)zG*y@a&k$#~a8te$f-`$_vsh`0Np1d4?#w}gz))%cMBnE9TA(&c#rp{;*)K-cxcJjhCnfg#IwZtT{+NdlqxVvmbNE#o3g(cp`1z zPl@nbiB)?R;%{5{`fL~O(;*N@X}`>A!?GWj#8~C^%*TtKn72wx&Y6dF@4g;T2n;0y zAR_!kZRA%;=Gv9!DHCG8R4nE71fB6?z9Po){Ev16Zz5q~SI!Bsl6j60Di{L&cdZ2o zSYosXw~V3$a91Xbl7uv<@h73ulHOqqO0o|?*ddm@)@%F>Aw>m%Q!y&{ir#DyPDc_Q zbwUy$(iRgbAnw4foz45+!8K$E)Tz$M_mXEJ9ps1ROEejsSnD4EnVw-I*;BYbkJ=o0eUV0;Es)e_dZR@?D7DDu6 z{s9X};Ea-|oR>oo`QOJyq(BI01zc+n0%%Uc>U-y=zeg|uZo!I^Qt8G%w~{Jpk-pVhnpd? zINgD)iQbUL-8#da7!x5DQ3jzSiFdNmA2xqV39l%g|B=ETNr$|mB0kIyudI;xw{z=2 zA|<9Dd%slwvQHCc34?=&Kl600a}aFP6M|y;!P#PHu!N=5O0h!`~4Gpyri}2`{-MWFS$}WMw4rDxF$g(GMYt1|J zgT?QkOQloms!2m0e70{d+Y=LXGH6=>06+jqL_t)rvUXB~rdy`w1Lh>1v+r$Px$$>x z<^SK`VMiE%Qqf#Xrp-@eot&0;`8#@Ul@`$)V-9UUh@{f6$omDo`R$cBFYfR zYjVQ7y;87t^z$=vn$x|IsJ|xD07!mJ+MY&|wCztsAC*5&et%Lff(;S;4S9%PTi%j` zALWp%mN9?{(Z{A+2oGNIL_aTc1vL*K{;#MCB24IvoYDyI!_kkdl#cK>4c?Ydgh7Sb zvO<7fzkV`nO#vha;uwtNKvaJ9-cNY7b(vv*CUfjZs!qGrGBA#C!JwLepPzC^7=SA` zY$c-4NY|JhN&GAAv7?muetwMZa|a**QpL*}35!Z;q_gEc2*I`PKmd57xD-v6jaw3z z)alZmn~{P-`$X)LD@CdGb#K<71)|ESVVpgc^*FGbC#2pP&tiTsvZZ4xkZjWA6AoeL zx}v zBpdejNvkS~nd0`<-11SA0#WgAYQ)T%k~iu5RplldjD^R`AC)u*glSO5JxKel(OB6j zmI>2SN10Ys->eG>f2_}d@W?YY1sZj@?{`oTec*hEPXNw`^d$jX1u6qZ&t}fkdEt)b znv%{(qNmE?hC=&xG!~oCXV2e&bJ}U#1A=eW{d#5ISq%m|za{$Ghx#fQOE{o*Zse|M z8v+n~Gy!Oeh_v7{hh6h_7ryP$xC{FMfuUvqgk5s7aR;g*<9%)}Eq`FHn)^oqw7gFj zleNVAwRr7}_=|<3^wrC2$E9+;ypXl(;rWS#3WK0>b(psoYk-3woG6(C&nrp*`}vMi zvYpa85Kn(iRrB!3ChOlldHO@hTWt{gdrHcKkH=l<(wCNH77$YZug;BH{{t%MQ93tZ z{wP8uTfVNjv4Us6O7}i4=?Kak6KaUspFZ1F45Xhe(v z66lzi0JflYplo%*?)$)|&V>(B-&e7T0r0TT$kQ5?A2z@s?thSG(8MI8dI}7H z#)ia;1Wd!>Eh*JE48TKq*NZ2xtzDiBEj?j*w3tV}fS~f*+3AWUu=Fo_Zto+dfXy?2 zXn#I^oIfc-cw zEfks|(k%1Y?mw2Nbsn_;6LKSH83M!20JIE_;kp2IUPjU&&Q&?ZRHJ$&OwBs}@uO>f zg8<-{1Ge?UAmuIZk%&v>lmSFgd?2}e7T3skQQN=|c12j15RVC_np4Q}avZW00#{#8{_X?n^nDVcwfN}iekJ1ypG7h*D1QE@T`{`Siq%K(!5VeVnlItGSF z3rE5`lKL?@;ONI^u7w9^r9u2h#91KOe`Pvlz9LV5G#vBF;ZN@QwCob3oemg; z7GnfYIW^V=n>inmn6*@vQEO+ttRSyHVKE}q{h3{98iHHd`$^Ft>Ax!CZ$GyEhw(8N z#*0%K38r8GChRMYfPt;Q?HjlYy9NQ)%{ppo@zC6qr4_7LhZRQF|o;B*LFjt>Li`%*~yum9-M_ZBL(*q!pV+)}d<0eJq;rM*SfIM2(O z>#{an*EiZB>($cN;Cfr>Pik0RLC~&67Xnt2mKh)>&(#xSafJ*f6@M?4>`u(u<_q#3 zq(G2pc_5{?AgW{kCy9@02|a06<^SYUlW(+#*hUZzV)oH25KekRMSRFGHzzf+jWdUV z(MdQzx)yB+utI2W#-trYbClEC!GK}~DD7#5qCJCSFgkLI`DRILbw^IKcT4fgk&^%c zwd2D87_+EcO&3pL!Q>|I}PJ8_jH;DYH9kyzKj)od2`{#GuXwXt0^9`*#ku z|Ci@d=B(navCSf#?Zf~58<@NEGY|;ZWJK$;ptV8`e>xvGmrti{mcKtP%S&74fm^Eu zdFt;zi4tgm{@ZIF@06a={&0P(re25z2U6lm*)Pt@WI>k0KRX%hu@U*2z!X6DNj~N1 z|1(P+$qz7URAC%Z5p%*v|2dx);rA@V_-MP-vPmeClN|IxDLPkd|Ie${_)n`Te|2>y zvD}F%b({gvUMl#$2X~^U-&X;LUZzN_2NEwW%sQ`8jZwU(R8w*6CYzrei7RO<=^oDo z?pRG<+TKgGqW`Y~sC+tVk$)bfhx?uZ|RF@BdB%{=Hs%nx3&UlNkJ7c$i7L5=#9RDGO;oBmNEk zG>3Wy0kMUklDbtDP;|_wP|n`6&X5^yGwgXX|t=~JPqZ$CX zuJB{=M>$El5rHp(aSoylXv#0A*GerZU!{Y;HVl>G~&| zdVe7HeAm1iHy2h%^<0eW49Q~AZXxnTDzx#~;)#=5VGy`$g9QP*mihx?#zrf%S`enE zjVLXxEvQ4%p6#+E^9sKy^iQ6Cm<4DDZs_2rhJV9*vu!XvV2a3z_>V~X$59N2Qi}MD z>od6}TpJDuu##wT&>s4FrMe_$#EKk6YBB>v+Y%rTt)MC&W4&V%Ba<=*Y?>RBTg^Iq z*I7l+7XMNHf8VQ(3QocQIu{JePn~7}B0q0!VoY7HXERoEd7w@5zaEx6BDuIdpNb{^ zTWzd4Ayh&?BHMU)?V&%UT)i-rvPoEQ$Z~BsAb^xTF5du(=e%-KIsc0ZbAKygmX+*h zS2_G|D{lbKf-nJ8b!&$SU=IDrrmtPNg&)8*ASz zSW^5tvPl)LQo8Cn0UF5ajJ_*8Cs(d|*h0>b5V46aGu#(g{&l92z6t z;NDTk5`%qS-sF?A(c;XbuUs2&2tbe_{ABVY2o>oYsqEfHRSbX%`N(^FbEjs?C6#p5 zT&sSYMgiR?u7`uz@bPrgd~I&TO8YO*Dyv^c`%4=vtwjHf$L*^D-&6+-AN)b3P*%54 zj++xQ1#HUXw4;0xMao4ELQ^n}9*rMsldcJe|58J~b(CR?M&zY{dq;cz+v*ohaPBDm zMd{k($N)gcGx{S&z#p&F4%cD&RWI%(d?lK&CV(id+Y4g>Arp3mx}U6TDG`38H0SyM z0;mS}3$1a0nu9dLd?H>Oa}R}kU=CPwPbvUq_if7#VEfk}2myO3E2aaD5+S74&aaKO zm$zte;pj$Q7Xo2v=7iRq^_L>R?gelR$TQ+{5CasEwo(+XF?D_r!UHaKpEZI>ez%c-=E=WYr2VtL5 zsnwa$q{6f0tR;J`RsV-ib417OhSeSfo-zP9d@ARDIc8p(O`C~a%{#8iTxUTW9Qv2%c{B?6;XHpiQ_r)0erFPGEgO03KE+utjstIyovT_?3i|H{_`GpZ~ONet*AU?r)UM zrku7Q;_bAWE-b510O5z@>6E;{#lb854?^JD@qqwHkF1X=IeoCtCWT{gFs8&b)gvR{ zY$^@!Yj)&VF@5eY~ytp6~gbJ#(w(dmsPq(H{?VR6pu? zx<=SOx&BFM+w5E~7OTJ0R=KYQh$7RN5|{XO$xp8ekSC4eJhLW|lBMgSP~Bx-B}#RT zNjmD{alPPb5jCkA++H91B22Lso6j=fX2Db=z@5klf_T#mJjc40U#h$OsfLn^8 zgn)L`aiQ_FnmvOp?vDp5Pk%0zN}ZL<<#^oq)55EJ9-qzj)t~cF(Tx(Htpf=SM!dD6c06ADmel?UagB@Ym_casa{POFHlrBrn4JGql9eY@ zZ7@bZ2oR3;7|8)}cfD#owb!HpC{@M6g77+km^VfIf3vh{Gyk0$1rAAD>?bc(Fg_-( z9S;Z~RLoDrUni{E{0vK^`XcMw1w0{~Q{DiS_Br+4S5tTssGla#C{p44D>p4u$uacYIKZ`KOF zS=Uik1|X#WPBrYjD?4RLc{QaRu31fk?R&5ukxqf**!F|Ue^jBMpvJshqpAA1tMt>N z|3ls4)kNMTd?o)O7fkOsUTIybg7r$1($-NE)1lWrBV@WG# z?Q>xXR@8N_9S;bE>Q@{9cNMa-D*{bsz-AiTBs?0OPmf?DObn!i<+a)SH0s;^K6nry z6x1f4Qivd$fdOjiy@;=~GD~CL9jL)~3(igh&@ISzv0rR*c@^y@>)R>i>Ry=Ki`9lJ zaBb)z0P)`v!6)4BPpa1T`JG*}qtB@98?}Q7|6y!H_wQn@2J?Pzrc@gfLSA zd#D-U_FN;F55w<)Uy&!g6F1*HowZRXS0C(}hnpmx9)#vEs=B7C)0S5<*~CXG?(-2o zYJ)KHZjRA00NsRcm- zb(IF<0AK=eG+dV-lH3byT)2w}XaUeea%#xvrR8Hp3Fu^^W^$_oq(Jt)yzKa|Yu~TS zr}bB+kcf12SFg5B_ZfgJS;`Z)Bq#h*n@G+SwMFGN9taQnUM)ha_dyLek&UZr7xDfI z%aKaJNn@=-ad6Mq17)55wq^g{-mCssmym0!Fo$IK<25JS5eNM|*i!MJ27UuD0hk$R zTLn*{fi>$HPR$7zPuGqQ1VR&lV$&_|85bUcK`e+H9e{`c{1`A?oKL6Y-+piL&6Njb z^J|5yKgq6#=l@*VTU2T7ym0De+5d-{QRq4YAT`UjY%V{~X0v~E{J0PVYy`POZ|FxBS^}^_Z){cYDG*a zsSkVS{8Um$%G&ck`C`qM5I*cWzK4aszge=(L#389w;pE{8DPw;YNDOM5yiD*0|81v z&&do#=;x_yZSI{)j%%>x`s!a!16aL1>0=q;GNp&`1T#J#9+Z2ATtV zf%`L55MUpHbJ%kT9x#-}fxf?+7dpmMx^W1TT6oz5qe6Ch@8YaiZNE}DwyRaH47U=Ew&N{<)VnwYJ3O_r6P z#7*)}VRv`^%8gIAo4Dvmr#J@SNZ{+u#XN8jdDLwb<3RG~{eZ>c_6Ar(dKVH@p#j*H z+Mc5z$sapxy~e%?oI_FDN^>Nl4dEuOE8g7H4;a<^vZ`O;+=n$Et$w{;LGC-M3-9R_ zGX!HIHL#4O!>%pW1YHXuz}`Zu#C`%N;S8E8lN?A{Y+wKeqWRbZ#`p39*b(zt55O~K zfgVn!=-0fZ|8VX9yxpsNItHLukZ$)rJZWmE+;G4FiH}MBFK5c&2lsdAApr3w`OdDK z|JJ1eAU}U-ldn5@_KbB9r9c#ET!YfC`pPyW_q?k7fv(pJ75PT#O^Cb0*WlDAe^C&S zArN&hjL4=udSpdV|ACLelM~&Wp@#rM33~})g!};PjSW?ODT^@;?B`JjfhQiu|9U2s zoC-%OjzIQ*Ux}}e2Yz)7K)CV|ePtVR=ovGoWW#-RE@ipyF{R@?9$Pe`3)7=vLHt!x z+j>~vT`QS?S}KU>Qu(j32Ob1|ZY*VMxz9^odwMJg9YuW`m)M@w2KwfF)`k+Iy8Uv! zWIg_w+9CAcR5YH=a@iDXo>|&Rm}?IzGAjhy1s93ZqfHTk+aP~Xd<>zas;Ge1=10Ut z*32K5cg^*O)d5Kp?9WP)`q1Z$n~BlxtmhL~0fr zV&UjmouvjisQRtPt)#dSNpU3qamcm~(1z$I2r(+vva;`>a^EVEk~b*VTW^}|8w7fr zC~D7agcGTa8rjnxA;IOOQQ{-}3S?drVYiV5hjfmV2e!oeKpg@Y5i|g@1Xu}>`#R3_ z5a12j*Q_;r2U7rLf|cLJWTg@yQ>nMdpR!+~+aUHnmHwVKJ7BwupME#Q?|RArY;0FH zCdLvU37`F=KzvSn<88s#B?YRp!7?KHbE8RXXQSxPgetMYd1H5VZTKNjQl{01TS3Sl zsd&lA2P04~dCwkww}|-6M8Xz}!ugK~66+JN2R6>jYA<2^M^kWNx@rmf-SwgkF{}q& zJ;QQFte6{*b`^<`R7FbJ0Mt)Pbinzh)Pvmc{bq9mjLIKCTFLtxiZDqfq_L!KgiKYD zW&nn7En1yXPZ@xZZ+^N?4ra5Ph?9v)oZ@#hV8;N|SvBlyr}Jr>^$#RHAzND@#nrjU z{WLTXAP@ijO-b6S_P4yHvcCnX->r?c{uGg6U=mL&QsnGp()s`-A@(76px-%XMBhpH zSC0Rrd1*$fIr$ho-mNsj0Qh16$}%5RBt$7Bv79w=I#U1Gh4xvBdwR1jdbF-ayR z(FOZ0s8bOu@ymg`XQ<|Xw8`+4dE0spd7QQ|lOL_W)~@a~1JK&&j_KJvHaKv3dZE#2?6nW}{Kr{-<-=JJK4}%NN03 z8_#&!XMq}bER(RN0GN#u6@U&wN!CG8_ah_W&DFA5l@r)}K5qRERC&#@0mHvd?F`W> zv4<$HGb2sX#aZoJ?S-v-6{{6ECSYiHniMDcSZu&e;r3fQyJo%Jj@r4-F#zpAt=rd# z$n#Jk{S&i&N_JdqxFB>MG}ne70-@vImQ?aLS4v9MBip|yGU0%Y+J3VUS^vhx0MHKG zIxv-aF#tBbbW(xPdcU%xJOScD#D5&v!cj0BN)g|9T(V}WY%;A9)j@VRw^I)U-B5l> zL9&E6#Ti8eD7xMztGd3jYaYvI#kIN+*iiDVkBY_WFUkDZi{wB1N8=cP{m_WWdk`lc z+emUF(y8KPgQqR3S=WXi0uWVfr`Y-nI(S9Z4pUH%{@d-LKS@z^Qbms`-+xp7BDzNS z+a%@1&0b^AaV=i{+luoKiT(4C55LDDaYtGJd=AJBfGL2nU{*Mc>3F^`_j&LkfY1V- zBBG?YXSM{7$rT{jyMsT)F3iC5^>6FSjlb)uO3R@Hwt(T@3f*ThVXMc>y5R2JpLef9lfM%La zEF}UC(|Jbw4i$coBDgF+gpkx9+!G;+kGJKVSdN*;lK$6rl_FOCOzbfr2VdpB9X1HC z(}+;QR0-RN`AOTtY%6iqmUR{#wiyrpd>jK1!8JrBJEI~SU!Bb;JPWVdAd8zbszz|J zx-cZv{Oj`izq=kp0PHB7s)QJ&b|@*hk|$O4*{Qfe{ZID4W}3ZrPkN(N@RY($ zzg#aX(a)|?Bx2o$#Q$isY_>%7ABh2Yyq%N=LdAkKj$~kcsC&KpaX26V^9kbz(@Cu^%rTc2wAQ0d% znJcw##-xeLrCc@bL4x`%B*#fKt(1lfTeU zv?0zZX!Lze^Ky5m8vWiu*kh?=;P@vs?D=WRw#IGvU-U=sMtD>RU`yYULmeT1e|lUn zpWoS$H@-^#*30^{cT;lyzcQDR+BIQjl`?uo2yQ++U%9Cyu#?#6^ zSToCt`Da}98c_pFiUuGg5c9*h7?EUL<^&M~_O@%o4*^!01Ht8MvniDemNX;n)$&GJ za-PgsVgXThPLO!J}^sz?B=n!0F+$A1r?Vp6re5jx`Ltw2`<#+#{0a zDEh-I(w%F=4*?KkSx(br|J#!EPkG*5GyqLZ9m#z%OV&Tx{$+zrK>XVvx`7%uK;vX~ zBl3iCJ?5(h4HYjM3lbJl$s7kpzVqc=XrJ#U`e<+q_w#^2Kxj<9070o}OhJ5RJ2s=u z8jvX-ZJ45YD!=vFUtjy1TSx2YXjk`*0XW*Qhx2M2u~6e?MfhO^xX|O-3);Mf(wi(`w?>zZCf1fVWuQ|g8S1`2sj3yr{J!Cvr&0X|Jmt`<(wBL zg8ctB`(ORb+^+);fsmkXsIbm&zbx3QYImhFB@~d~!ru>?W>yX(9OQ|3X^OPJ%EK;% zi!VUCjhJrQxHJlv7c%ndugU~aQaWN3xIqhF_dePeGzGg7{fkOpSXFJ|J8N?5(d&sZ zqk0xWUW@(byZ6Hj0T@%XN0@%jPiJJtirF&O*BF~lOYBO^TK!jEVP8siO zoAgJX|2WdLqsEQUl-y8SfIwN&@?#O(RUOv(Pm=UOgm! zdt$h!G$yT*+(Ae9R@70#H?9)3-GBXd^`0tC=XQS+5B=1GMF z!dNUW(5}i4DglA^=V~sV4yPre*`aQwEGXUo5?6SGE{XwPXsfLvQx(tRN3Mlja228K+58)?$ zCU*Zbikm$*8KiE-<_dG*+HgYv&v~2dM-nq0^fw^!t!xSYlr#y%h?CQz z$hFd!kHgbOzbl$ zMT(a8HFsgx&gwe@aK&7ynycnMGgO7Z^P+jjEO~TLM8j5Z6K9+hM_`?}oS4+LO&_I> zDX4?f9FjG8_lwd7kXWbLW*w}AO>ovbg4zGj+rgmXQ^0-55}1;MVxXC-KSoW-ngL>} z3(tJe7$Ef>Gm-C5xE_}ukYS!UsEYH$7m&{|z81yR-$ug*Cj5Nw|MeIB_59xt)V?zS z`v-Fz--BQ$rCud&npZx%c03>u5PxO;lgb}QJ6<7_jT)Mvv^ENRg%B%DMp+@(ZuyJA zSmfoPTC-Y-Ak$k}c>)OWJ5)=tNAQ^sc?2G9Rjj#UB$+&tgJL7txZfiN0qvd8Cxvps zlgd*;c<81aUe~1%uQ-mduM6fFfWCqsMDCeaW-{{LRstX)^Q>~Gb_56(1=YQ?~u31u4@V1a*Gtxidq9>%9C$wr_38PL+y^gJ&TI}6u_@i7@QWvxow`8XH zXH_TkJ^U1^rl9d3)Zk+(DkV%<pT+>oAdUBX31P#IchK-6D66e;P$kl?W4 z;vCaz=C;1=9Cza*K>z|7mi*n8{U3=4&-q6A(jl5iFqBxEL93nWyNHG8vQ{76L#P5Y4R0hQ?XrIhWbm)>?)8EyAE5_?Tr&{eGb8M_O zM8@I7{w_?9?vCnQiT-T0G*agx`PDm0760S|dvC`)R5iUY9Pl6rfIN}JdByJk`b#tu2(Z>LtY|yg$SW#$oe;v?Rdzu3!!c+|dGCGv+eg=LMP;s$(g1vN{S)FMm)^bb z-OcPsVqJR#L1U(zm^Wl@(+s`tOEAC&F(sW2WZbTa!HdOzp z%_;mdB1B3KfL`T`fccl0!Gx~aXW zHy-y}?;#LkI)tAKGb!!qsaBcV@5Uw&+PG1rOFfIn*w_i$=v?bpFO`kydVQNVJn$kb)?pFSnR4lnxy*{=b zrTImc0T99y;8Z5qCJ^;=LUZBXi4y!;8VfVP(GPWhN`I1){B5zH!Jk@7a5%On3%8*P z54`_-BP#3|~~rai;Pw|hs@H)?knMpW(Zw z?yG1cq1Eza3L+22(t5$B3GUapj@x}B$pGLC_}=+9KHH8Zew|Lozb(4?9WBh@GXUD~ zr}8N?BTwiV<;NaVb~O;W@TqIZ00LCXd$^^Fe>*kVJ*75R_)`g=fI4$$Z{UGX7B$rV z=cnT~0Z>29M%e5VD&9q%02MVcSxIoexw~V-8tq&TnGpQ^wnTsC^f*~D_XUHpU@|eH zq#u1^XzGC8{nSMW2&Y@zr+`p%W+Ej`KtkCf%jUk415qcywcbD=k_-S-|Jlt?i7+nw zaPhk^3a|cm%=n3C`5Y$prhVJW3~h56<W$K z`6VRImiyJ7ZiMO}{v+BiI89MWEhWr}83_d1&xXPuEdUH`R?>S`;Q>Wq69_uTK@(%H zcE0*k(m5Mq22#4FR82fFgYhZfbIBKK4_R?C(PzHT;@906GuUgf7mz?{$UOdYU~+^AO~T0)ePmNHz2Ohr4F$f%XsD0N1((0mlGz4YWbWAafl4 zrewbzmmN1Pm98uI+hw3fsCx(alyjYfIbYK;$JF)p(QI^g()85c|R7wG5DcA|)~=1I!v!8*IWYqI(l@J@*9yAwI;1 zWM)ieu2jv8t_x50Wi-d@rVhqkT5nJvj4d`UI9$}5AoM6oa9skyvF{ceKLnqoH<$r9 znjvtw!J$@%_!Ax%{EIL+VN5hU_BP3_Fm+{c>T3eKkQ|7b@c0B2^h&y5n-Jz;z9GUu za6>yP1jsRA@ukI?GI@z1;5lG#4MxL_(Oc8ga9eX(s(Ix|+Wq&#(rEzpL!*Q5L55?g zq`ah+zEvL5U!9doR~s5Nu4}^&0h0c#DA~`tq}w}6`TM8kLO|q{0=H3vaFQnaLLRg8{>_(4bhKSxhf5~J8!H8i=+_E*PNov3 zC{sXCD2t*)y`GJ`U%w!LkIS6)dm*cQ89P<;Sf+qgrOLjyUL25Wf!?-kdmqcc<)eg& zt(1!9TEAe~?0p>r&@7}{JP*WWC4Uk89BN!SNd+lfTkOc4K9CSVvZd}lS^u_#$ToK? z_H#3nVeeozBGVoPfKahFAxt;Zb=S^8jVlelA^}vWXw)}Ez979dYNG!#jRZ_lUS^IR z!7Rh!FjFJY4c+f-7aI;LW_MKfq%rONqV~d|XifFp68(C!Sgii?KStd7FWfwlf6V0l zpUvxDM^N6Kh$gg|Ji%D-pI1+K=TBH!C>%CCYkqJRb@$4 zuC`R}*376e-55Xkq1C_uHXuDr8*T{x1^wuspa(znFA)4PeklX?Q>zDP^k^^!Ec8gN zYL`n@D%CA7m3Q9nK{8^#ZylaYCXx=++K!?Aqx6n>D{)2wgO`kbSGVU=Vxzn6(GGDL^2#b1wkB*~^+2k4U(hcA| zD>xfO+f8%L)G{7A%UNpg*!)bxj&tw({k0vX19N6 zP3<56+y`h;9Z&$%c2DUaUG&P_JH5B9Ui;&Be=xhcx%J5hU;dX~7Qj*Fs8GtiffMLI z^i^}$xX*Rom%0`M_6$^C=xuhpx;{}zE{*j^kke@)tEv9~VbZPI=8iEk1Kmim41 z|KbbYyTFB69%3NDgeOu7FC(OU(1lISKm6KoiLd9%$-Vh6D&P|0GTA8J?>*SofLasLiVJ3SmU|!rX=}8*1;Vn z@*8;9Px$-88oxO4ue#m>kl*PaujH#%|GO27DqC464!YVMkZ5ZGn9+x;1%O4~O`1&Y z-go|6r!hO=`4J7SFd#8ii^^JPO285W3Z|bb#4`OxCflv>v~Mzjc_ROx8JZ2DaGWVQa4dz`%zGs>v|d$n33^Fpwu0lzBxyg7I^KH}%|W z0RbTqE-+wX>MSS}0#XQJ^_S7fK@kf8-MdW(Zez*3EAKkF&NTMqt``6r8%B3ruAb#hTqqpc4J z=hkC^&77Q5(>qsT{DI5#+v{ZhZgZS9m;G9~FIn!dgd1RSBh~t?c(|~h3V7qFx#F() zoYi_zSUAt6!d0M53)KZ?scA3(LE5?zR0phsM;Yu-_*`Tiy#Mdt`!_p%JIRC9Pe<=Z zQM(iF)c(i!1c3bzIl|vc^OF$0X$t>L!l|xL4p1Y#KNj*y5U?W*r7b@Vh7K{DU#}*! zYcj)1a@RWe5Kc{}j~2_j>2Yj{%c^!=o94cEeajrlt$_g;M2RUCf?&k?4R#PyLpD%u zYA6-DG#l!JmI0|vA9r&Mi;VU8Y9*M&ljU+UySfzw>oB5aFSEl=5h zfm78&qXqUu2r%zlE+iM(_o8}#hn@d+edjesqxENQtqh^hCHLp^n8?{Rb0kUbOs9;p z#!uV&RmQ$8-~4E4J1jV#Jl_D1w-I33$#G#DMxP3@@_O+#bMZLOd-2zfx%M-3jQ8;w zcnp^_-s7zqi-e_?pW9?XX!S8}$P6NXCw`o#=9nXG8u(nsCHBv*4YRIvscmqqClz4d z{jEBa>#P0+6Zj0%FYf$m-g_RhtvMylyk z6P63Pkx1xnLyDvHCkP-fp36{2HKJ8Cxs?qxeKdY6;Vr}bmFDL#kE<7eH4 zfGCz|#r8H!yKZ!J#b-VINe$>Ly6eXo0Biz66M?IFDEqRgJmjyy2P zGpWVmAm>!GXKS-CbMHVeHZgavaO0=cO(+7V5FGif0hMX20Jr4t4h{chf4l6R>%ck~ zDhOKl6qcPM1<77JIt02>0Ki-_oaCqe7<~V8)Vvv%+)RkaM;~uT?%<;IP>8>b8T$7x zVV+h9Kzlha3)bc6nq1n|meiU)dbB|V{8li-*dmq-GGeY$x5G;IceR$X%>VlDy^#zK zmy(Hz@nn2*EXhkyi3*f?F3!&-4;Pk_oz0D;^6-9AU0aQ9bKk!_7z}lKxDZT;$8@a8 z{5PT4R#-)$>Jw! z$ohlDBl~+6nwcBOYGJw^C96TksX5zm+YUnq;c3}qe;_W&e1&fxX7PSZ!R-``d{)dK z&(P|ib;|0ET5&8;q%}a?ZnTBl#<``FdG@yN=NK9tN`@5xO2uRh93CDSesR04En*C| z%E@+rKMW1aM}8p5VKZLAwYAJ{cqIg)$NLuG^0E)lGv3HRctL0s zzlTx$kC#&u|HGwn@(qE0YEn{OJ{38g3SKR+xV_KTg#owE+xnTUg5_ zUp~XyN7#K*VE==MEc(lV=OF$vT>D^~qLbzg!;8Lua&pAs~Y&?G%I>9piCd zr?}`?i<~bJT>b57m5EC7$YN=Yc*l|L>RZUodS?s_(A;Ed_$_?!WG& z{_Y6?ZGgNI>eX?idkFYNn9KxaEY;-B`T~_SN+3=3y1&T6MCl6SuQZZd|(KYimenp7c?@$V0etD>FBXt%evlDP*@lFhZX z!b@L@fn|_V!M!|JyeJkeA?>!oB&M6vyv4k53 zSJk)v`FqKgH*O{u5M(c0o(>!yC=6i1dMSkl?&1azk2i1u%!0dX2m-(T^*<$_eaPPk zv`e@{N_|+lsslk|j0?yUV%C04Zg`}}cA@J;tA}&0@KdQ2!-aI0ywU}*{?ihWG5&7v z`@th>?o&~Zd31n#QTVZn=%+MT~kqNA3zRcK&C)c`V-S2`o@M6@q zDUOa^Hvs?Zj@##}{hR-7(4Tmtb>ir~s@v^jy+AO_?=;;zMwypm&H*F>-VYTH1Gapv z5Mc1=023EDwF}#$Okdoa7hMur?1mXCfxDcY!)DC>PMY)Poc1$7@z9tR1zGV}93C7R zir~KjLhgPcHt(LuyPeY2<=Y~^$z$=VAWUwquO`S$xB}=e>&meY07v-Z`Dl)lL-$dn z0)r2}SD2T+C`1MLx)&Nnx`-v9G7ip-Tizbu&dvuP|9q7$pJ>8WJy$+P6y64n6v zqbROfS85VWh`*i3RvW{NEo`k{|LX^9$zx2&rieLy2~(|R#XkeRu2J&$+cU*viXcFv z=0kw2^}|#6U91)QEl$i)55kl{<7E~xvnxE+YOW*kP3CN2tqH6NTAd{Rt*)_b-j<0k z`1ONzaCj@ZH@6i5t5zeF`PZAjy?qw%pPTu~5f0kVT)*H!NzyEG>;mS-h zGCqduYA`f^1NrthHF@#dL+gOC0$RNModUt^ljj6c?r;@C6N0ueH3k`Lc~lxW3J&k@ zOSk@=j#9=itjU&G26uMXn1X$s<<>RB0%!aB2m0PE_Vr)AJ#_u+PZz$nMAnJ5u41lO z2d|UO_2=O9O=c8zF8n7a2ArA?jlz6tS^)>JQeC}$ zJ^B6r^?Sj*FW(03WCjRh>th%Q2%m^}9^}WEFX>KG5?j+V+_oQ#fnGmkC>pl@er&k^ zzjEh=w%xsj8+DZcP~FMl;H!=?j;^nvETPtPYXrB4RlU;uZNE9YElqh#%iT&ejqbN# zi>6c=VzW&GRw6B`I6~TobfMpv70wHnET4|0{Kfgha3IRrH`*$q}_@p>^fuAKKytwO`{b*vCt#tQf zf3pi>Yv5QQ3sfOsQg@kVU8&*W^jmm}S9xkZC=6Jb$8wRA>=~azv$>)9D`Pg&%#<|h zUk0KZOXlB<=AS)T_tPMFJA~O@hCe(?)(YnTfq`O}{j=*DIS0fZiJC! zyS$ZDaA&PhDX|Y&*k|`l80g2gEDrC&xRGa^)@xJ-bSG&gGizpSkh09=m+}!iFrKh2 zy&<5+Djx|PV+h+7B$^X$T_pgVFf5vSyvR68u%CI`MsZ8bOCY9%{VWr6vhK){miSbY zhl|@p{Ffu@T_WB}Gid4xk^JKX|6RIqHMIT#BB;aX-y$G$PzmoLJHGnHbp(RGWM^?P z`R0qyl73RI2Kok>I|K0205yydO*MzA2ybAT0cuYu7So6s(%}?f780=`TohBoZW*1L z{hR8*DZN;;z2jAz-y%(O469FtKtSue*UITm;hnIb04^u+&0t#t3rz_N3sHmcPwWd?=@lS^-2N9!MnfYtHoi4bBf{?k$eQ50gu;W4seZsSf^ zo}EbMzIu?%&XX~K;Qeq(YXzmS$%>#{075}aPdZgUiXj3UvjQypt?+2T=9Aq5yMEsy z89G&PXlG}80ziw*0jQW@)oXy34%JD>qc-Tja$J=#RH$>e)trZ)e1Q5 zR>e_sfakQ@bT?i9_U&Z+!bGCkf8yeWNcjtGzQvm=0DDZZCfD3UV&Bh2F|f<)W8 zdg20rk^jT*t1B=H3sTE#8vmF(Esm!RjmgRiqVX6<&(@9pIM_+#@coB|-Km zeLSh(Dh8NMA|)Pz1`p{&BY13+xpf_$l2&B1pzDA7ogeJXttHQX_W94KbaDLc#07xl zy=OB?uJjlE`Re<*YHpn6*wtl!C192}mFZtY3Vrudx_g&?sk7Q-{wpv!eYX$Mo_=+2 zHF9y65derYj2XFX^%8^7lm zD-zE*?z8V+Ek!2D68UY`+OO5b!DU=7>lAldwM6f;BIf&C1j98_9LQ9{tXR_nBK{xF z;<`c*(6VsU76E&=ZYGmx{$o>95y^klduG2MAO-dU!GI;=07fRpLa=aytuLOgCHG~1 z1Hw*C6NK9cSKrj(!R*Czpr#-1sQ2xbApaHlc@%49SR@Q%hF(4|+& zIC0g6@82zMjd<4&e$=L>BjZ=gF$y}JFDEq6>1B+RJMCgCzuBw|Le09Xe=+&NDzn_qOD4n)?FjRoh^5f2wo~>J9Fkm~Hf~5hhBV?gi#j>3yooq9#3IVOI57EW3 zED^hqLuXWQGA>8PUp;YZ-VSIsc8?5a+<0vQVW5vf38$)vR;uiNpQ*f(-|0WGp!X9) znIGKwL2+s*nSqO3XUZ*?cq*Di+(_m$6)82}1_?jIbXrm&YIcyf3ya?1X(Zy zs9Hqu!=T$P0CLZ`x6knY{8#C7Ope1b$p~^pmTLj92eq2NvDB|eBkm9Hr!`p3SE7|= zi!}1ATKzcf-#HgDE#u^@hFB`3TSAv%boJy)zqg))bFj_axm~dGo6~z(Q`6T~Bjaym z-VVFZfuHl>Py(3fOu#wtx>i2q-rh3~xxbc1nLRMD#r_k(!rb^B1R}}|rC*$rSBxGJ z)3e3C33%25Ji#RhwG91{}^{jy(Z%y zBV}ov!{D`>ll{r{iQRhc67+N0jGwuTE5!XX-nry%i2Y%{{fc@D^)z#10e*+M_nT$o zDUxJnlrx0HxPtv4&E{>9Xz_n~xIxVda2N|#J52xC)L8^*kW7G*1+6Su-9~Dl^-9g( z7?yta%IG5k&SMAjmF0tk4nD!eMWNYG_g;$xM@t6QS=AII}avYwPmBFEqG(zzr^#% ziwoW-u@3DRqNN!%cKK^3PxStV2-Dn?-Dk<)bzPXXU?ATDrsOKx{}dav)fCRg-^U6$!JmYr<)+`o_O+kCD0SKI!0MJ0+y5fVL~6%Lr7h-<_xLTw9~8 zDPgJRro3j|d!{4~Py3snWkHt~p?Xe5%3)mZGo<70%2qFa;(eWGU04QiYF%ij_MbgS zA;Tl>c=qYh&ocH(McnmBixtV|<58R~jR50F>#z)41+#HoK4HN zD3@^&8%UHp%=am1?0J)W|Igm}0l1%Ae6m^n*ioG!r|$PT4XuhCqARX z2LM9mp9ugn6fJq@B0I!^Gb3&bDnj*Iz(;}TGG(;KsLOzdCMiAYhgoM@{lNrWuv@cV z=Kf@9n~3;wWO7(lvAD+s?scZBYUz8%E(nw7J;lh(B`HL_h@XvZ)*br098R-+0+_MHpv zsnJiJhwbD!sR7ce?r+se(gA-=b<0Ki`r$}%-Hd?8$JOti0MG!WT}m>4bGL^j;uND+ zt81sfx`A#S|0NP1i5moSg_4_E0C*e6Q9HwJQw@(B?nvMu`l-3Ty^Ge;zuwzt#MZ%k z%%ovT#-gE1U>yQ$q;)l{00srVB%k&ui~Ki=T(hgegY}wsFm2R;(F+Ixp3j5Ts2*JQ zejgSR;9_3)!aDx$An4$j!1Fi&%zh+J+LjT@v;a6AW_Ijv3JJOw2$~N;Kw+R0?WF~v zy?2lM9$*1L4v3}&U>hO2pATes;&iCQe*Y4M+uNxwx|{3NrAp1DaLX!C>ZVgLI2|ol zL>x5>4xA^5ldQPo?Dw$>07m{_D(2qBG&8Q*gVlYkgKZVCB9j4JS^+$t9^`6u#Od!8 zpk;g>@XuiU!MKlh+gE-qf!kc)NY>d4T?U*M%6{dN*8kYOxA*t(S$ms*Y0dDEcp?kL z+(xXx?u8Om)6WbZ(|_!_5fgG|+O}r776yy|XffI&SM2S|xqfdpfX&((7l^M>Xo@{1 z&~CNj)M~~7SPvdAd{}5U+2cVN8+YM+6Z^;K>A5Ygs{=Z-gPUn%bv@ZwS&I_D_PJg9 zw`-!jz8S|3maM7^(S^AS7g1WC??>(`tPmto_lk4C|6k08V>JfAbY5Mw#+L{SEk%v2 z*B3WBXK#*G0GLYFqv%hL;6Elz$)~G}{$#iy;eKa&kO=NVqyU!4T{;gwHnpY4OBI-& z)ze-^{!3ii)zr@Co;^=W_lJ^E>Yh(roQ@K{?YO_p+j}q1FUHwt?QIfIYr$9Xo0`ur zb1#FHQ%C@6q%w>R#w|1Y=t;V5tiFFYKC0K$L~c{0#G*g9F-=|^F$)1Ub!s$|dLwMl z_XNhFH&!7sw27FYsPF5CX?408|S@ zdBRjI_j@aq?7Le=Rg=)aRDfz5^k>lt7grjod>}l})Q7S;&1oKzoK7qwckJ6(+f3G$ z*C3vQLCn*V=SO!y5eBHX zueD0fm(mx_|Easi^I`XhhacfZ3&8r?T2fpsCS_t-)Y|*nfWvl4=DxAM&i$!4i7*hz zw|5}C_s`T-m*Q(4_?-s&dIXTa_ES+vq1!d#@o-u$ESmA=BpaRdXlM@;Po;B}x65^Rh9m}lzFaT2H~7n@P}4m`7)Zs}rD1Jjh| z`%W~8<;v1hvM@WBEVE+r@%(|Ip%fP+IHi3WM(|h}jL+{Dh)DB~;j5lNO zD*7u{TPy2BR)b8B!}{_9EqLSBx8}HO{#R&x@!erR?tLB^6DaP)N^W4q)F$Y$p9BI zpDl4TVD)YZB%7E|KmG3?CtrQ`MRN1b8*F|0Rs`!=7|5d)r@1MA>&vSVSMZzv_K(TE z&%a7GSBL}PSqC*41@(QBS1p+JlR#3CGVmKcFWq`L@^YqT>>T(<;+{?$MHDj8xkAoCcfFJGi~{}8qN$JxLK ziKd070K0==G5d5j`S09&{@GkId-pNf25|QSnnLU1dNp}YHNbITacT5`i_Z8*7G$zb zk_qtL%cUs!{or{y+~414nI0dD0ATRn zNM&nK^9Izj<5JC0L6z}$Xn1IGS^f#!)LL{{1sR?SP#qi z|2;N0UJqo=Fh&^Y3USG(ege7R z#s##fdIZ$#)|OTx5_|T^^W?Yx_;GUoi@V9^|NS?~^0S3xhaB)7?V|@QBmMPL!v*HM zU@?xU@gNa-iq`+Dd+gOoPVpQj0*Py_=5H==kE`PY$z_grFBc;9X!OXfP=-sBF~^= zCRTycNC`nUo%nu>9MmBk3d05znz8cu{=?+%r(Yy@fBQx9#jih2R&X(FE)xfUOW+_J z7RRoP7ZI8)uboRSk#>5jHpi(yxlLUm-dG{<`6~1SWkPGNH6iLO;ijI^$%|bE{r!Dc zin;zvZ;#&o>NoRW9&ElmVkbIo0hmhWib;Or530Goe+T)TM8fI%$p6fYYg*C`ij)>8|HYQP~)!XT5;+Of_b1#Hen|9>-2fFfUP zHo$=t8pkj{UY!_Ze}TT(1Hk>t9b2z827w^l@Jxm8n*`I#>^_FMB@x_r_&uE3B?!E8 zP};~*u9?!Xn8P=W1%V-FdjcGOht|Id%M${on)kDB9w$}Qs<8_bxB_O ziG){oHuom2Y9*h5?tW$VT&MckaRH!H9q-hAl^!*vNqnx27m^Wdw}TLJm5_7STqJNr zkKZh+;b z<>dL@M-c}QsZ-g-hnyt@5d&cGV6H-W;0orq9A#!HCyH6tT8y6I%=x3g|7CK2`io@h(hSFR#I%e~j^WZ`q;J=+7DX{Bbx_g_N+zZKZ-}PtuPZo&jRjij|5dielFQfIS0WLj!o_viM zKYC$2x$*dI{Ha69IKsgA;6Cf4vlFk;s&fI9~$4Tb)nuo z$>g`t;-m?4FdF|hr7X;;d{~QCsBH}4V>IQ@y8pV3tN~y#pPXE(!&mzl$@=2-hz&G+-QU&zdHH*KzMBrDL2Rbp9+Y~y?B z*0to~t*c3CXeizfjgT#{+YDG;(NabIfW_yyz<&Kt5fr<&MDG96M$`dtBcNVu9s=qK zGZY7!VqUf@X(?{AMXKOJW-V&|u4n5P9V1cZFb==BI)e#r27|@~#Ml+wy^Y@4K{b8N zju!nf`d=hEm(1Up5+EM7?4179k=M}BWZc}HP^V~p?18?rKombw(L~0EjXvAr*9(@& zk2XTuQmm^80qdMwX_|@4aEoo8p$5tS7pDJuj3cxQcZlTBZ8U&Hpdi_YA+Oh3gMe#J zL9l=jFh+48h0tZJ0M=I!tO40Wz{^xdFdzltUbL{{9CH1Oi;nmvZf{5Z56|An;eJ8@ zH4CK_ru;Icf``Idl|3rHU%dBZ=}~DMvL_0uBAW$W|M&0x@72CSe(A41{5xX-+Sc`s2>@HSpW*x( z{5M$PzDU2{ogh=dlJ9FyE~Z z%|ADU3t-QjgbM(l=@t?G5h2||KNUREC~?xr80sPx08Rg!1fDhGH!h-`Ud3HB27#Hx zY+WGuNI|mKT8BUeJ}m*)$m_S3f(3q_EV*T~X?>}+=Ab876|WpwB`?2~FSm_qzPxSC zcLuT=&!QjSAOvdyEYB{5Dbr-5x3GrhlQ7xD8Lox41U>j&gT<9~$5HAYET&rqS9EQ5c>|TNFoqwJ*!l!5=of!@`wxHd^RIut(XMWHOaK@g z?jNJ`KZKzED^{M&ziR~mwOe_dnw)va7e|f$PXqr>koUD~j$6Nva6y9eI1UTIK^-k` zl#|)}Pg2c>`Pu9L_j;K@8VIPXqBT>gO4d3lkMl$dbGu}Ybg@l{x9>gbHJOwJeEXiQH4s0XwLzqdHh?a6@%#(Yj%=9ffr?GUHoZh?o*jh`Fz#U(G=Vb{L6pv`aM zd65%iTv;G=6W=b)}j$ZXmLeE=`@0yZ}u(L?jA{dd*{+IBO=Ub?Y(uw zcaypAR64y>Q=gzw|I%Db$JX-^iWXXj)VeJ7Yo>@UBk|}QqfEo|+USMC*o=T3Tr&y- z2IuMqSb>lqW&_BWeKY*C(f%}ZozI7O(|AD;NLy!cIq(ssENzWcOuf7)G)Bws{a ziPK=PuIp66-{M~BlXi&Gl-x!{i{g2DDjF!}n*kx%|}(3i@5%6XNLBI?E z-$#7H*#%X@;Az)|uFWW=%-J{q*JuG1R|{RQy@Y_!P-oEv;JF8#m$31jN}e>{?V_`_ zs^EX#1*LPX=4OjVpP9s5Q?BMlaHZFPfQv(75(K3OlUHy;aL9xtDry%!oOuO2ZT~}f z8>18J6u7%)#Y^FSu2IwRbEvRr{?}cnY_vB}w(XN~if<^cxNfzQ7`qd&;xj+*=7zA6 zQ>h*p7`XYT-}%9HOYFC!zwNaEw4+DIcn^JLQ$quR4A*f1yfrfrNTIK>*HX>PS_p1Z ztrpg6FCieYy-L3SzqmOVRsXCt-m@CLgipKQ=~jsh3ne0al6j*L{=u z{oDQAZoi*Gk!7-rWG_wPf^xm4DX87v^W*PdpO&NkYI1F|5HMO`m(MTfNV%jgK-ONq z%RcFnGr9(w8~sT>U-@2NCHKu_ar-Cq?8Bq>?tBXXK+hB|cWg7bNb|pefO*VBy_QC@ zyQ7_ll5(%Tgn$f5En;T0KP*S*u~hU+XmtLa=!d5z$x&;eLJEL;ENHRY+&SNf8QMP} zaD8gM3~Lltfb$KN!|^d&w-+ebr2?q1Bn-P=fJ`%sZ!xl4q`BVB@AS7NxV|j{z>nVi zqw(Ql;oa?|asfcv$y1>(spFRZ z%&+;91*W#xZh7mJ2$I!FsssL*sDMQ#`md7rp8ZYoakA3NoU}y%&>T_8SHDjOZUy3< z5@Fo6GKD5Fi4`CsL>T}yrq^~MAX8IP)=d%tR{}>u>m;9&Io=&g+fJGcC%~66wEqz^ zMu3kkeQn!gdN(=%0$Knh;Krl4W`(`$_B^lI0W1It0hYNf@V*y^ascA&`$$@*6=1v4 zhk}C+&FWnw9zfhW9W7TR#r}P1Fy#2>X5Y2i@0JCCsP+HO@(sXtOLGJqp)FB~tq!Eo z-vt3N7z9E-j49W0zwSKd-=_!z8XfM@t^-(VTrL)g%tVPA+L&Dpo;d|oG>|Q%P!LFAFZ06{LoJ4WOt>2a1QHK;E?Qt7FUCSl0)ug^BCgY3 zYa9ZSd;=$4#d8&2|5e~dxyKL9@^l9|(>2g%ST zAQ+kEpVe>=A9rv4yn-gu6AL1ZAlls`a#oFNfgSQztySJk&ipHPJ)M6TA0j+P5gOmR zSPTtxjDWNr${hcEs=*n=C6T_N33s!aEK?cq{v5mTodfrjIF}ul*tg{OrqVhTlt}2c zCLmzYqh*%rR}>##Wa?z2KlkX-5CB})Y07s>-v+S+?~q}# zagM;)vH;K!guCgY-AT2McoE`l%Ae8yJ*!#$Nphh?)H`;bjV-F(DMj(wi)~r!-{MqX zA&v|gV1=+~pcVgv+4OMd_;29O%5b@|HO?Bt3wk=5{dD0Bgvm@z&O$C} zklX1P!O8Vfjf34ZBf?mdnWnNLxz`q9Im-fo+N90;2i6Za@?Y0cnvRx!*Lyu#2|P*;+TyA?=k<@@X$5xp(V+{UPGJY6UKxo zL&jFkt=;-qekBj)LF0dV=LgmD=Oe4l)&po-04Th>dL8T2pX96kmw=C{f2X2Z;qTH& zAr{WX(KLVCNzaJwa89R*#sVTjkL&(Mc|Y6b1>!Qw^;1*d5&wfY)skTDJhArCk?(W^s0>a)-xQ>*FR_LcxrG~GnT)3{Hzs|*M1~f$>Phw8)xS+X zN$xf>=IolL1pvGwSE{+f50RU0Jw-8>guza!6gc2==;Aw@s?42xd;Ff`Ch9H z0YQ`%7%>Iz&8dN8iKy!hDrcQ#0l;8K3n1zD*Ytm7Ol>O-2CNOS-nYjE0fRG9f{EDS;gMw{Y5dtJSkwpPR)NB^Hv(4SI>#ugqmJ9XJS|r{$?jF7E zo~d&#L`^j}*Z1aNLZ(V3+&0-fb-4yt6PVff#5?1aM$uG9I1I{FYt>wxjkD&BdSBLr z0ANf=YYP`K3Uvqsl}H5?yC87g+NwGB1gPqcugaU7?Pk*gK(h}x$?H?X;2!PzaPyJN zZWTAHh-D1zB>Q(7pRFb30y~aLd~OgpGrvwP_Hre8bjD1s&W)*$Z(hV*gm2Kj=(9*` z@v7Il3jzxG7e`VdU(3x{0n$cIfc47hE?T88dKed!k^VQ?vD6aCqu`>&f5gWg9cvFB z8e0*DBa)H7bx#aoER`P7+Y?)kzObMKMi@hS!l+k{)ElEVLbt43H1I&!{ zB~zHADis7aOjX<4Nm8alAYR;V>~ceWa$F<3!Kb%yy?uwmHCauLh(+mj6Qm|UkbFJPT*0f1T8wBrHaiUKwv5b&P=z19K*gj+<&a@GtvjwZ6TZM8rw zEqt~O0aiHB?0S-9H=EyP`;d)K`7q75unJz}A=*clMPvFz@Ejo#>#UU%jN043;*Fl;iZn`o@){}BV=Lf^aCPw5;Ud$)yI zm|0=|HzwQ~x0)<&lpWhtLOryR>RlYv$Y;O zo;CA8=h?MhB^pXbd1De&`QT2nKv`Ee3v9GyxRc>cc5d|d?YItIl+AqzRL1q>E`Q&i zw$?!(L8yg@1CXBiuh)(afh{dY5UJHN7EQ88)^KNjIa`jp5^fMwIpgI3&a?pF5*Z*Z zNM9L=5+U8KZ?WYs+Fa@AxO1-WAU`w(X$(!vGqP3GW6wQJSUlUYsDk;QkOYvC$7cKB zxJDqlLsYn!B=TRgauf^E0?=z+4FRLLZKE*HH>T4}mETbuXdXh|OcQt1!j)c60(~8Z z8ysCCtHwGObY-GQ0t$^2Bw`wQrF!2QK8Q4MJ4GW)CLz+9R8r!8is((GL4)iI(gRx z;P#qoEt=BSF`+3KP%sdIV0Z6;hVS0oh0;S!rqL(oHmD@?&H``70<4QjbE{>dEwmD; zLEKdTV6SyK1a_xLca3fvO}bGg7n8YKUZa^RK?}H-As+>}aWD#0LGPi-IL>VVh28aV z3tyk9t5P4dk^Q>XCKRwS|2r1PVG3%L;cmmT&+{#UCoqZPhYUYv|S$!%>mJFjP20C2Ic5(%UNu)esREPc8ix&PDT&(2Up!?dCA zUMsP{(i9uhYB2syktyw>Lx|z&Un$=<&E;I#!PBn?+ zB}$?ixMyzoFan7}$M2p|9@bpm2AhQTo#`S)WeOqG6c7-5d#pDIye_%Sl+9}tZQ9t* zMTysbY6Gle+I_N8j`)jM1nD6UHlFlXf7PpdROaOT{*O1_K zvU&fs%yYU)iuE280GbWl2Q5PL*NP$W?FodPoVCu}+c}IXpWNOGpqVwGQ5^I-59z`f z?gRyZxxx-iA+-VoP8?AH$QFe6DxiQAITTu_*>A#>WixH2(Eja%)~x7zymSU;XZ@~G z(ieK*T*}-x$PigW(|^32*8N{cVOitg>%{#3m|k9vI?cxb*Th^@x}QaIFl6;!%Y zssL-Sp9MDH0@8$;Ps*wNKdEk$mc>N4UAu+==VY7t@Ep_Qi=U^bS6;n;9J_CvlYVTW zac0_lwx&}}rZWC?&J6B3=x(pvI{xssYJ9p7vh7v13c3G5=S7#-`_ud6Xb{j{<6IgP zC^H=!OtVs4+s<`8tnmA|hKsEhs=shtW{hQE6CognfU9Dme^`bQ*MroeTi~{z@IUY{ zo(UH1{>?2|8R>Ugs$}p};H{P`g-%^*mYcTtl(@(MkU($0B`Nbi8c5FVubn67MfcA- zn!Z7Qn)>HgQ*-|+3W~=jJU~}KgCwJC;0cat_Fw!zKmXZME1f#i0>D84ktxhmDs&a~ zU$gP-!e;o_u2IzH`s83}vbQGEv^E!?i?G)kgMb7t-8R?MrD~L0UT1O5;a{7@4d4QM ziWO#q#imL2>C$$5wuYNV;ed4-Ej#;%_k^VfAsocH>wFlYKTUw1+Fk6ixd=efL424~edTF#0yGsmqws zA1-Vq_vfLO;9%BXzRN!0%!gYP50YW;RH?ns@-M-V-<~cZ+@@(_?mURHNF5AoVWehb zBghp9WF(fz_TGKa7iow=$l==;&jLqb{NStcG710+u72+^3QkAt0q?n_*II-?W!K#Q zY}JzGFn#9i)2AEZ#z|cO&LO;D9onPtmP&3dKJE1W?A!?eN_dtE#my6v^(u_Xh1Z8@ zK8Tr53H&mCY*X7}^}3RK3$bAgS6)&hF9I|InK8L3OjSs0$_L|@GDR^qz$&>av z2K4N?)oa}iUcie)-hj{Y7qdrUs$VRS)_>3x- zn>$nyrQU@&q6IM&25!4U<#kwV~KZKmc*^F0PL;ZM)jn?(L-C(A^-W z5?(hWlCZitZRQzaK1aq780Fu-D70Uw^lPY3etR)?+V- zo0Amb0XHmwWt{=X=?*NxOgvW>rkFj1~&2EPE$Iso_Pdc;L-Ppq8&eTtu763l@^1mzsfnWUS z%|9A14fX#|r&*K=g2{`>qm=03f}CGV(||6*6mCr6D`Mf#jHUrTGBkh3WxduQ1k|WA zt$KLRZ*&i]E?K~-o^`=oQX=Y)o*gc6Q#;FQ339`-ipWCiKo&54%wSOE53bI^Nl zr;uSPqY>Ed8KK73Yi9!jdo2Kyn4$Xmj7g!!djrGEEC8mDX-eKkh+vPH(A-lL>ETHO zU_72nAOJFXV>~qTKSuL6{r5g*e+9W-I~W2@3jhb-<_vubBB_ZQ&g-IovIGJl07yuT z5AZw{scx92p#o~8wqBu_6b?Bt zELk0z{}k%#H7y7eB`iBIeXTqCX!XU$bL#!Ljo)j5vU$QX9cKf|)K;{VNYuQd%6qTz zy{72&oZ_p0RS=-kPraMX{BcWP3Apw&VWYsbxA|w4`Sa{|&e`5(8k=r3Kh5AP>=m@U z^f>A)7(b%N-mRY|h$`=m+{An@?(q_o|#gwqoM_TjLLe%o9%7WnMSPVx_5 zZ(*uq2RtI$+(qyLfji&1HiT)B_tc4>Mi_{+qkTkg-)(z;9smJd1h&1z^q>;WFK8`q zUcBGE-v$JH4F+js_NPzRao=nP<8&jBdToID{JpEgFot|`d#aF3an@3?w?Fmn(T?wf znM2JCFf&+5&2yRL9=uXfQ0XyyEBIN3aaI^t_?o$1;uczuOpN!MqqU@DO}v!hwv)li z;G;f5_7KiaV?P#f&jnQE%(U$#E{hSi<{hCU`nxS1tmexYH{?c4HJ z#(ifxAG=pBZ1m$BE+lyv#|j$-0J;O4pX(wtKm60D<>WE+;PPmHs7r_;}!r zdLOnG0;2c|)>;L#-#y<+QYg8wCEQ_4};=>UE6-0k^et$8xjWGh(NIvYqt( zVxTy%_`yg2x#|4>teZy?03ehbz<7~`!ji6H-8~yzkTHdt1-(tSK{Sb89&`0To`Et% z%SGDsKab+ymhce|6r&G<(4z_gpGIHyVokH4bAihQ)c~y2rW?nFe;84Jaev-8Z(}fh zqyHn2(9d36&}i>lzfNXtKg$v;-fM9(P|5Kuv$70kl+dYtmifK{#j6nLDC2kGdEVR| zyW)=ZEEFd8HVe$Olex;ot@W@MPA>!iCbGa9F-SABIykr61jg-fqycZK0H8U2cpx>W zyKu5#xU7V8jG>Kl5o=VfA1lcq!iw{~LmyUs#;YLP3%>YDd|CmwEW^;dxo>rNwRMLIK$H-gKR?K4AI=al7Y(0>C+FF8#-U{po+Yd(^SEE&zntq5m=S z@ga+S98&f!oskco$!6e4PQ*h4o5d99S-B?&3j}YxBoRvqnL9)CKnmH@17Z3od?+1g5; zu9bt?Pf=iGhWy|QM7FzB;_F@0Zx`G53iBZNu|$UNt>VCsw` zm^wd@uw}F@JNtq6ih~!=w(RVChvQDT?H*QW%|2jn+ZB3W!+RdCJs@Nd-}A=)D1^E9 zhVF#7W(K3KgS$$;cc~cKxx#=)yP9)r`dvR6Ja4d7u$%m7S$VQp4u(?eISOBo$Q%d( z2MOmJ_PSUDcgzy9WN)u^4gxHbWC`J37uZ8ei$7vd7mK)kMo}OA{2>sqP+Tj{Q?E4# zftCdT9#j3R5C3j~^TMCK^Ml*0FO`5Z*+i=toJhv8iAILfpY)-%UN&V!ntnx|T*rr^lOc8)Cn*5Rhh z!oI8VTerZ7W#bprWYl7^%4F^6MyEz|FrBUSwAa6rnXa-pthsT4v@JswwVYX#J!hF` zRu$9pV*RuO96r@KH#-ssqkU!gmKMG@Z>}#U zbck}jCb6Gkey&dpM1Hq(CZmxdb+XwrO_`5KPm+O1ync%oW2%ywCaDV})nq!^k8ZiQ z@Ngc>WN$oPE(4D~1R)FPP&JZ_jO$p|u8(W2VSbdh+tjo?xN+5mVejjDWgj)FQo5Em zb_+?>dXz4iuja}yAZuI{B9PJRvw+rGzwLcj8v@eQ?cLzLC4yXZjTsc{reaq1w4AR3 zvhhByp^W+0zU!I!c9ZaxU$8*_ftpiKHp|I_c8shi0JI3jQ~)se4AXPpPO^aSRz^QF zw!`68viZm?H(*y_%w0!E>rNK9WE{n9Jq_)@e?MrrTSXfuo8b z7F?}1oowFS!;MIutr)CC90?WXe)E{ENYQGv2B=^LLF;T+Iyuay?(JCB`wec?jiYNo z0l=6JYcd!NXthUo?X;Y3yW}%10G8sl_)|agK0#IwCI4fBHVv$(1AkpN4;ISFB0`E8 zKx(PIK)|M^{?LoH-zn6)PUcu6kCs^9`1(J7vL5SO>x*;5*IXm^q%AuidgHV4#U=#e z*E>7;zuw7L=NGqg&)PApwg><@%n>XCb|b2q5im%{&TKGX26T~5->eyf25oKd5RMk|aiPPE$8UO%507*naRI?A!t)N*G;uBkYX4EV@XE*Jg8-jp@!o3{S0^&dj zlQ7q6!d^nH1;Db-?j>N=KQ;eOPlKjq8GNMop~&Z+TVB238^LpmMTAYyi1JvC)SW=G*(acM+U2wxrh@gMjeW+%G)colnb;(foJkw<69$E1AbK>&E>;(m+^##Q#>> zUI8)$f!1%;%Hv!94RCsZaQU|grcW6AFP>Gl-Tt?2tt|q;&ZndIx9+U`v|QPq7^Gkh zz9v;- z5FX2zcGlRC>+U>p9$0p6Ocava6aB%gBYpGb>i6{d{`OYxr{(h2y1SAHU(e;^Y`d5oK zC%BQ>(Ud%lsn?u;12nBx=V!;!3|dmNRXCL3rF}k*C!eloWa}))n~3_x)V{_%JKtrj z0R#1x}xUH zUaJiO=hJyj)(8y5Wn(~?RM4uMGTU}BSQ*7I={XAg7jV&C8DkeogvuubXx59(kdY|N ze?7NJtU`bC#x&vyaXxf6tbVmec)q?3*BwKSwZL3v75wg+y@PV31wh}wFxTo+1_3nr zx7t*wCRpQfCe}wQtjfZJ`Hi0Ny$`5u6##zr`Oh|geESc7vD(-F>w$rRoB4e8dpz&< zzGrsbpE6z<@F+fF_nW^#ir5JIcFqxD{&dOUOC&~*CVK9*-q*@)5(=3Bc$j>%z?Q>M z9wVG9gda~(P-IXd#Fx;Humt$KUV9Y+n$p+M2Z$UuLd5 zwf_0FwEl&a{QlcLYxk@DZ-qs`wIaQgc6zkh(hb}ClM-`rlNb+$yr&3t((<15?Nz9@ zRRHkTf0gVr9nhm2f~EK=f`EDt~ZX8{$5yn$UK)| zrsmE|OzQd20pSbx${>t{@RT@Gv!LI0zB%9a(@YT$1({x}4FMq}+*~}uF5B;*-h9`- z>%rf;{#n-36)rz0o<` zw@6=?z{AJ6ErN$S)Y>ipbf}*ly{$;$Mlxm0Q09gY50r>LCRhxU@f$M}Aa9l6tulVx zN-&?V<~9Opo9+}3+9Fi~7fJG*h49O}5Dk#@HxDTs1oDcs-@aVr`B(L281WwD)pLX{ww3fWQf4O>*EEA{}f_=CDg+8xSGC9tC^YB=6HIo zaR}%!St3A8p?QMT&rZls3fh+`JX8Wt4Ai{OT3=t+42|2#O$-ide8&z4vB$w2I|}|I zcxiRPw{Zmw4^V1Y#&gi`&iH2vP#(_PtjwOZ`e0!zd4LO4w~iSwS*o9cyw@E?up2_V zSHm~6>f4l&o?s57pI67ya_k>H*-E}y-U@+Z%f12s?q7nrRe<<+C<}kn*O&ihE-AGo z=l>wo>j?lt#l;LF>gQLpgcoB4KzoERln51815t9-pdrGx$~;6onZM_47}|VwJG2c> zvDL6r@Xe`Wm^$r81@XC0yXU!Xz=4Hp?1U}=Q-idg2%maQ0$jwRq5x3Bf9w{tY6Y#% zU%lrY_C4JX{Ibiw@3qDuz;X-A3G#Wsu<>6uccoUpan!`h?q)^08B%s}@O_-d7i*b` zT?8jl+Ys{D8Rg)goU2cTf#!BM?9tNsR6jpflBbydBW{Q9rFTb#mE=(+YLIn4MV5~# z*H(HoEz`AD%@LykagShbk?E)pJcd~W_4F`3-rU^zXtT6A_p{G`UdJKTdZ#)j0BE-8 z%afZ0f1Q1YU5Q~C{(ydHO=p8rnsyCia zf3FEq6QLFWHE1KR707C8(!z?40m3i`juYE4!aQbm-~H}5n{#e|Yxem#J9L_!1>Ay= zd+t!bT~{)a0vz1nUu#8Jj#xZf!vb)a&=&pSMWw(1Ht!*RYB3bYvIz7Jb-Sx>s{$pQ zq=zzFVPNcwwfhZz)e>M3uWQ)#+-!^W%u7ZCGjqQhzm`(X{$tG9D90c)Q>%_N*)6hl z-|D8JiygzEX2;XO^``qRvgsZXFr8~1696<@P!;GWmCDXFr0E|rb3bGlBb^&cd+sc9 ze)@Ei0v}tL1q;b7a*)T=9Ip%pLh0*!`@%pVm8Pw`b7>jcXKMtqv_Rqg$?7&{F(iVu z9fAeF+zN?yiahlxq}U5cwC_z1B8cdkNDJAx2wV{7p{=rTKY#8%-)3YGbhWV+1f-VP zP*JU)`mqXd@3M2-HR>=}@l7|&al=hg=e8YHh)^8O)4uaC%_lxat$k|!mBActxDsn` zAa-c{7N}GBdrgbsrE0eYlTJ%KWScxG))f$w6`y&89f)>krjBmXM}>jf`R>+MetB4`xX-mvMgaH!@JDa{(O0Fx{_in?N6G9swmGoqgaIn1YiTle5AXt_ z{S=GaUCK7MP4z*lgfS`Y&SKiKiJ+}C7g4q-TtKEFDZQ?3=aW_bHvQ@Lc~t7ZP0Hc>slOLp(gk?G{ZO{O9l5~*^`!>2T@(LmUc-KP0xLZVk8~Aj}qm% zT}=y%t{Um`#nJu{ur5(qbp~ZgnrW<5r1tVmEiul+3K<;+4)@x@5a^fyaPZ^0|5MRb z^Oa50(W3SIS;!-lGqO&I1VJ;QZ4#f;^~pjYd6g}IT?FnbW{`_(Ul)NU>}H$J1R32r z&vrrGpD&}qV(J>E>evvj0+4+SX*Z&kSrjM2*qR+wlh*^vfL-_y39_I%5Tn!bQpnK~xPMcjb8{oK8Ahjm`7iJRuPf66w` zGN>!#))Uc+sE}U!x3%uuP2b7XSCIf*pAQzdaex>0xsR1t; zzUF*4?RtBfOdgOxfog_B1?&RKm&_koAU0T2!pc%?SJ-3vYl{A8$z3z|_OvvT%_xBQ zAIT*BW~b73Klx;4z0+Oq2>|;bEb~#5H%d?c^CIa$GJB<7n*>QSl$6M`0VoEzXsQ?a zwH_GtDnjO4+_u3!=w9$x$LzgGYLbLWZP-nr^>y8ttfHwXAxF2)+>2A-+BJR7LP~SM zAPuzSi#koq$V90S#6tl|3&2S=F-7fFA)p@#Kn=6k8izmzKVf(rLay^O5*7f{579P> z38?VkzEyit8~3CW3JcG%+KIn1Ny!FXKQk?*O^l7qB?J5(qU@-oxXD#3!F>k-CY|%t zVy3_UB2^kK*rd7t(in;ulVm)PX}+&CI_DfnH{3-~E5K>u{~$ObRqhvm{qfK4zA~;3 z{?T~>05b-Ob$6MnzsEmwfhF-S4mL6Zy1&VQpHU64T zK0O^Tj#U$)61+ZD&3GD80&a)E&QUuLG6}2xO+dKEIdt2cXrn{@a4!SI6e81kIb;wg z+R^TOxHMh?&Lf0?W9p{9WX;bPi61fJXBN`DE|Q6dhxEbEEq){JY7SpQF)(=5qGEX{ zt+;ubn(IxRb?r&V9GgmrRu0dPm&(EX*BGbQw9+z=2%Df`gz%?l-JAqfVr@0OPeDM- zo`IUW2lfNgz8Zs|l3-ugh#S&{npp)}>DzhuO~(ZQ(im0=1pM1lsq|k7lexhJ{w08Q zyWp;G7OMEw{k162a}`sdTAWPXbQ1|v8aO2LPteG%B9jn!QN7Nv{(lJ_WtF^TEY1yYn59)7#7 z{>DZ893u0@^D^!f>#~~~2_*u)htU~nf+dwT9hy1u_Dot0%;^7)4P003`wcPFDo$^$cl)fNro$3Ib~Eojy^mB%VlB06m7wI(vEE!YaApH^EIK6?8h7JN?(95mEI9CgYCNs_B^g{-{m3ddT3l zlo`CM_OE-@H~`o3eulsQnRdf$D%~y~(vP~?Li=xW4*xz50pJB#xBAvL2a?i<7QZ1H zn&dXz0o}Y^@a_U|F>8vpu!ZKlQ4q8iW0A-!sk*sP#n^yx&O1~Tgc!`>S2VayiFKJ2 z72Zax$KYN7araI9j29(wiwCK37?88Do<_!+`ldFl*(`He97O=x|NKt;Eev7-1E*C2 zi_|uj4g6~WI1+2!A!>`01pb-2d622F6aB7rPYIiXhy+dlWHuMIeskWx6K2kxg`9!` zu;yY5=a2&Y_IR31$GJ0(#4^9)m5fE4%+~E;Jux6~9{hC&(1{X)!4CMy#og+f6E|IE zGIV2~v|P9??<1o6ql7O(kkUZijw&Hu%m1cZ%pNcL@k{vijbWPL#q&58fQ$I~HTMr0 zJjpddXKPG()^D?>6ikd2^4x0tVbcRT@(-$;jPjEz835!md?=4R@-3UV!LbSe$;UI# zlJ{1BvR%nx)Tw@-+0`9zy@~!F>!qw=7Ce!pNWYr%TmaMfqb`sqyjLUG+o_D{K<|@V zxCM5Ic6f8DABlM|d{n=CUJ28b8vuR$XeA_REe*{qrtDhJk6D<{*p*Di@+ocs7nUaQ zKe#>=rnEPw2V#>G*O2uKy1B{x=P0P88Sg8$S^ebEdc2qD)@u^#_bv^xrK`bzY7j;V z*Vi2C-S?>9Yr34mEd$A8Y<i22zu0+at1Ftw83dzaGr72Yq* z{VW-2tSx+H08J97Jz7p$VH-< z20_e;q4Y=Aw)b8>z}*GqaZ1%8U3=p?&3@Tjv|HT7ew2+e=avQ|E+DLEeb7{AAe;h% znMkfFk8U(YtLc+DiMi2(KpxZ(2~sxJbi}W|I&S`l#bpGhucoFIKsX#WH?Q8Ivwvwg zcK+cVFaR~5G+@)YcD%L5*4dtQ@7{pI}}1+GuW(R#ny`AZq7xad0DL~S;ZPT#o#Kt$4$(GWC- z^i`REvrQPr8s)`ttDtx`m*3qNEwMg7w`(qJ}lWv;vY-;{rvg4lxJf3OBLp-dvj@9x_2~v}oLb#e9*7vW^xEV6i zSqRw2v0S=2$ut$r)*(>FHY{N6I5mn&XF>GJ-CZx5+pn;L<25Wg*0}(LC&oAxps%38 zk7Aa#8s{fZbGY#=7F2`L?5@WxIFx4TuV<+zLQIhLM63;Pl1)A?gB16kW8#bIpDf7@b8e$bkWD4&I3sA-!kUAn^Y@4sjl?|fCeD1Rz8cFeSxwJY9>qg?)xC< zIgnb-u3pQcT>b53d|WQR4Se(?b=*@ zp9%q~Cea0QP4r!&W*=oD){23k> zGH6&cp01=D^?Zqo^Q+xlelr=g40w&%H_GRZr3dfJ4B9Hqal^r4mC0G3AxV1JY3le>%CxsGl3Td!{?CIIB} z)g6#yiOE{RHa!f6vCO-(iBS5KB<3ayg038LmZc$ z*s}4ae|7#f3*IxM3GxuyzUF={Z`RzHg$_jo{gKT5DN;MO0x!}z8NcR!OZuwx7_2Ki zwE`F{-Ac<~Xh5|Jj|aw!5L#Nf-wFI?vs%eL`0J1V(}NRu_KQbCHczG@WIh-iNPY_A zyM^NVC*aGilj%ugPmMLpixIM#smC_*$r}@eKxpsKtSMclV01D62^K6M4d-EgXtG!o z^xM0e)f}cRec_8Z##=FK*=P5{c3~uI)#PKMFZ8i#4KPs9O*P({N`0-C)r>t3=nwyo ztvN31{F^TI3~gj?Io1C4pJt1^R%a{;`UbTSEEy`ZXm%cI-Mii284GY7v$p2YCs;b} z&E~@V>T?#3m~G|w$I zzCjzP(KN~}!dzjutnYj>w@Mxq>rPwuU*Z+4EBvy@%2L4+xsmZ2lZK zg>ec;HTNsvrooQn@nh|f@oV++kiJOgvhlYn00muR912%YnS-xqH$rhxIqS9NAaG&= zz|N{1gs&i<;ZtquaNd>-zCsEyB4@J2~@A-=gYYD*<(CHA8ZL zj&$>p^xl_7`eILkuVzK;GUjllThayN=}nHA4qJGL4GklpZr(i(AUv_nFbmP3Cd&$^ z5;_t)rcKA`aa>J;vw$B@3I6#if)6eWwWV+i2=F>>3f2EDYK6NGxZ5SGWp!h-S6U|& z0A@z22_YC^u~5(nqT7v~KIT3-cLvMKM8?USn4#>exanFs>83X8!Waj~+-KqfII8wv zR{#k72B(RlUqJwvCGKOC3YBWWGRjUbHkXiV$sjq`m)ibk6!rOx{WXl1U&NegEI?S{ zc!}#<1a_GFQ6w&e&%ZDE?zIw?QTs!f8-vzJd$ibPOEfz-`#6PVHGfDkN&|HA`9{Pm zZ0-s>&ED@kUw>i(Kx71bo-9!u2p2$W{Ovd=F5^Y|0M}SJ?hvK~t;r0W=I`d$a<%~U z)!QnV>(i3L=EPzuLrYI`VWS z#p76y2h5W_f$D*hJpn&Z; zBLAUfb(Y(KJrNv1_jsd#7I7|o5GH=Ca+>>7p*$6IOn;Qj`MaYw`-?7vn@}2zaKi@~ zT|DZ&JNtXw-bvpxC@N-U1m*!8BP=NYt9?Q~w~PJ|PCJ zv}{pGOw?kHG`>bP!4i>UOSlDGNczO*Fxz>E?5i-1TK>nw`XO;pa7orULj?ZmQJO8F ziEeV&Om}sUnNB9PE1w*8%a%U3ACALfBM+&w{)7l*g%Lli*JSilrGe0(r^(ZvAV)kk zE7tqv$$l>!HG?|O2D#nDbunDrp@7py1Y4bK?@s>dYQ(oE2cl?_g{;i(P$w7&t@;mNq*^0+t(K{MKAwF;{l9Wc&7bH8iJGUPIQ zE1EvSy(S%bj+M>4{tyysb-31dOiJdikw%!YAxzwT)5G`E(34@TqzVTulE&A1Ac*bv z+^(fi%gQ!d#3`|&ErTTgnIU|QSa`Qw>0f?rfXDnnPXK5V;4)oz=aYHPA^<#Jwd5}w z&ymV9;S3@ zY&*F)z8#V~`2sN5G*HLI-HE3CVVeMeb*SOj3!BN;b8Nb#l|xmkPSZ*;Ln!q)k(ieu zGSe1`!K8W`EC2>z+H_sqnI1%A+6gnJ!H?&?iED0iZPA~LFi7K4x@}GepqRSp)=AZQ zfS_#M3ANC57GpixRcqm0eD2Q>;%`j!C+}Vwhzy+^7PnU1i2Ly+699Zpx)pAf;yC5l zS3{xS6l3X^76JPjE3N)=+y9vOTP#+qcl^@`YnuCwkCtA2z7kR7=D~zS2@11citn*|pG*xG)PzfTe zm4h`ME$d_YS8K1rPZOlXGY;NMoU(Boc2~Qg$x&B{<#i1hQ-JB#607^ik-^US)zmEv zgst%b66J{0-v6UB{I~EJfC1`4GXutX)uj`4GekHiyo;LMQ7AEQT`kubnmj=^^X3KC zS$(cMaUE(A)kW3D2Qdm05)e4u$}n9o;*Ezn{A$NmE;V*YrC~W@h%oo_oxy%uA)0ND zERNlN%J_vrnEPS)3T#>_rBkUpE_KhfTXo3Bsghugf)%v?GTh(K`F{!;1@No6l@GrB zzZSctvnMV9%q68~m#Vp+RCkhb#NhYgGJgSZE_Vxn4(o>t>HZ>RCo8M$T2-J>NE-2~ z)N91>QCqg?%oMnS1wa$m4ArvULu;^T(3b=WYUaB~?(QRUjFtK~a3phTx>-QTqBLtP zC{3=9=F^hGM%KSWxz3^gzrA;fkt;jzJkO0+W@ILrOg`&DvPx3vQI>6#yX_en?U@l8 zwP4+#B&7^5%`ljeF1epL4$V`Hm*{=TGH*dR=HT zCcVS9kku5V9A)Vdfq$&wg_v>dRy-d1!;@k9pF5M&4vfbGpuyr0Jj5_UF(8F9-<>1hQAaTVMX_byv-0r>3$puF1xo0&`f2Q0xXp#te>>^3~CaEyaqO`D4wm7iBO~U6%EeHYh9~Lr9-lU)9 z{egvfCN9jX90J(3H1H}0mj-NK<9LH zCv29%$3#XW$Lxky024-w?&m~^&D%soN9)|9t%fls#%4H7$ywFIujN&}$l%qfX^ggb z)b&R{^^y40zmRSCh-JQGXTCt586H8QO3DF33 zY@5t@2cIMtYRGs*2q-EBh=5(m7yvn&YJ*Vobe_=c12{JFT^6>!A^tRJNDTv zD?RG9#jTdcg4nTtfmyyVKjqHpfC#;yG_l7-gdkEdleCrCl#I{&%i6i|RbhQsuAB#Q zhcpz3!MO5!JEW~)9o84-c}xSD@$~u&Q{Ih+rHfS(Y3NSI9F_~1qINe`8%Px%B?lrw z5X*)4D_9MFac9e~1~{wj!rA^7S$Z$`8n?r=kLC+=zplB?a=t|+lzOD+!3=rN8NmRd zx|ag>(;4C#v{FTZs$Y6>+Q(&_m1_n84T0dKf*+x8xIa(=23?p7STk;@&A(iX_6qTv zowXj^TT2IB>-he`ruy#vu-SYs=Q5A0+T=9;CJAQ~0D9~N1|5ERHmo7l=dz^mx-?^w zybNqWA^_;QOwM>i5v_t#W6>#sj(5LGd1h#gu6 z%=WaR?$69erZ+N~!MdRV{D!*iTXYe0iFN+?C&`fx(ZqdnP~jP@4PQ`$$r6Xj+^?xW z=!XgZ9wET{RcFwwyep>ophPmC{tXoR z25Td8?2MKwqV1nw!0eCZFA@SeY2!f{bn-o#KL;5Nacw+M^gmVrFw@70FTzPw*xf8E z^8d#P3jT|MOOyc6$kt1RAa_}s&u#VXIe~Xp6GE&k*#&bp=|0ieNaqVPWC}DEZ#;bw0m9|6B#ndM8%&)f?xCG* z=LtbXD-b>O%!|6U7w$psaflefN}9vbefOyoxA3NN5hIp~Ey(XrCN-ky$EDDB6x#QK zEGqcGG3|1I?>_KEKTOGAKN|FhKE-tWf|$#3E%(E({Dk@Yi&D~{RyFBAg5VI(A8x=T z)kZo^$6ic|uQd&PPJXmmcC_=WL6}FvfO(2s3}Z*@=S&U|0%j2amX$YStz`=5UcD3# zwf;aq(Cmwns1ZPqi{Y~aR=Ar`F$u<-KQfad3E1eqqwdlFgklY?={m$ij|pKVMU9Ww#SIlDYrhnpPX=H!y!zi@*${;M|?H>+C&Z3)=pf zxheP3v4S`Ee?~4$zxA=&bxL#2&p`>Lv7j2ayR#=8?kbc8_6WfpNdlu4MNUH3`f2ev zTn*Fx>qH3v7e4t3>mx6J>$7jp7iV&Rq=AN&J@JLXSklC=YKvdYR7_@4?hd2wXxZ>t z818&a*f`pV*2hCcSO-inA0^5-);*y97y3hcNW1y4rlm}J=DO7IlJW#>No##cOCcfO zGx^9|Cjm~^EB=MbwTi0aEdXdc2oglVlXqah!mqu*QEg+L@q(7k5h*U(G&UtBtoT@8 zlvaGCAm6Ucx(Igx7>!Fy8K)d-S5b{VeKfCSaAa|eVmZ>7|)H zI*+-;tQ24Ka{PINNt7XTxv=0f^Jf=-Q=nM5^{9)!lA@>^V z`OhDhP_KDLedwoAN@d;xAg!OkMgpyIw;~i+=??;l5&)hwNm-IILGFeI_=jR|FKf8J zu2I<9Mf=HB$$6pi$dJS&&OjwYAj{^+4(ONg4Y{W9T<;&(8LfiXnDExCkz2q|BvBrS z3>g8LWW5yclhjR+&e2Y0yzrxFXjvfg(in~^t<%%xL4lsd&V~(1@!H=&*LI@05b1wX zTB1)hqL69UV0%HPO|a;t?n(TOC%6)BSY@#w>(S{W1+M|FDyS28NotG6q5gaGG8!ce z|Fl0xaU5yy%6PsN6u80ecc(KMc|=Kb1VrwuEZc5YzFeWQr#XelCL1r zGH^ru=3&`uF$fQ6G6xmb8~>}9W;~u}MWp=haC%PKnZ|DI#tcpjK$HApq0AK7%Qxxy18rMtM0tr=&(1W$h#;h<@|? zZtT-I=D-al^TOIgdT4}k{P6c=0V2T{+j&PjMn(z-CQ)Y1!87OChXOGNZaHEvUY?h^ zUvuks<+>w?@u)O^glw!AaUklmcNp$Ftf~Y`!u0I{ToAqW;0xr(H7e1$nZFRQk5YZ3`bfcGU3;X(#x zEiXh70fKuRR3MD3HX!p@UiXIak0bL32igVkH<{5>O(1LlH>9A?8A z#u<;(B-g)_y3r0?8)(zysUWdddkoB8_h23{C+KrxiLlOr%euD6UJ=9d$$VN&f3JIV z`Z;|-J#kqu|7SI}h*OHwFS9*)BIZ_$Hb5Uxs9}Aq1Q;^_=fbxfF3mREQaR?N~Q% z|6Tj*ZtI)W>j|JdAt6mUkL1cBaB{|*l<%%q0LMT$zug@joy1NgrER!{&Mi!7oE5#< z|1(-ojsS{FOnUDV3la78Yre(x(%PrYt!W9WXO!4S-WzvR+V!e5d9v13e8lVXy%M*T zGnp|zR*rj$&oQD2!{_le{);aD5G(VdBKw&~3v#)y&G zriPE5V^-2D#BJ2}H8cwfsXeD4z9Lc!+Kv`YWqC_lnluDVEhIcT%cO?x)+Nr}zf(M~4=alWWnZ zffgt&7}L`h)f7g9^#(1J;o)N(^m%g!vuBR*S(=LsUeZ(%b~X0tJ~f78zd}z_XgXQ% zdMW-nt?|Ja$?FgEd`^s?z(ls~MevGUFwFHNjwQwi5L87s$$t>#8}SCkUSJveZv=f= z9j&=>FeNF;$|esMb_`5{d^ZGJ)QaB?Ze25#ce8X@P+HKGMcCzgC6m z!^Kk6myu|Qi1V~Vy#PSN-~uTvn)W{EHd?=4_uBt30@sK?fZ4P92A9h@SYs z$we3;tuFNTh)kCxFk%AE$bQMPn-xvIWtrhBBGY7~>%}+=Dd+yN;hy&g6|P51EK7hU z3CLEh>;tS0`rSBwkMLENAcl5`FEmZ&fBGXb=SIueut43^fi;M6z&LznJgDQn%Jj^K z+$CLw>ch;41=952(a6EH1uGk-el{jLp_Mnks~rBsU7XPt%PhMD`+}qdw0^ElqV|vQ zw5>nevYxUWlGPEU{T+q?)u&!T^S{z)1n)Mo;fi?IQlIWSu-hbF0MK^9NL6=~9B4f| zom`^2Gq2`Oj<6nwSQiWXu=!+D8W-bbs+ zNJKM2OH-}^$J^%xlTAV1q4D7Orn7$|$Uh^kdR;K-U;5)Go=j_H|3V0FZbD z_oyI)c3WnEjLg_1^0=*F7A7RS_Yu*av;xCNGgI0EC7@9fA0;s&?|v>Q`U!bg`3<6& z2C~#up5zV~1c3t^?u+t-!6am-LH(c9f*?fm2Zi;Zai#9ehXY5wuHo8oD1&SV+WV}u zd!qYEI^^^9YpKmkO5y95%!Db^Qz+=>2~kS zuJu5w%H^eI9i=rD#RPx>brS=ZWp;H@?}wXtcYCFwDCp2fcPCWrJ+O#xNqDt;tWN60ZXE8G!cN?cgLk-nL(v$k)`D zxe&X65?RRsRccUug$bWkGQ?DTMn3+}o*;+6lnX8G<;?d4TrzbK7Ls0KoNzzhliQSy z!bq)-=^wXa{C!f}A&76+Yr)%S{u`Nm`O=L`CIAeiv!duq_6yMH4reto#IBNA;z0}V z!gtGhc+E}k%Z;Vlq)twvwkez%p(-t)2^3`Wk#dc#c=egI(wZ8=XSAmmw}5}h9)Du| z5-2n*1G=dq)eDuk-WcNpjU>8LgzCR=kkUWYNJrrhbT1re0 zeY}f=0L=c-OvX$q#tmCID0_2jFPpT$?(3Nv3jmVUbi@5)Mm}}%|1%mi-_VO!eJc|D z2LWJJ>sW89j`-z1cZxury!YJ^r5jDxWHx}>6-0G1A}%irAOO>!R!BJ-{c$Dl;UJ6OOWu{JbjnKWOUiM+ zN5P2a-;*08`~~!p)kbLh%SwWDd$pn*0MS<1d}n2_2&6^;AQXtXwu=vW_ZCfw z^O}>swWtI_>|Dm?A;%PPj6YyOOOK|Nt`^gsPbJ)Hw@LAb#NU2CkqE@B9L=7n{ux=} z=9RPKh&J%S{7)KRviN1h{Np5BX#@&J?}LkF1@LKqwFHnQz{xM{*_lz6l2Dl~g2kXF z^F<0vssw---(^=^IuizOX_@$ZkO{x5NSANwySe!Lq@59D{Xo~?{`g@FgNu=!29exnWr`&52ov6GC$-hEgqyqC!R(F>0tc7m`oN!O#SkzA zI!6F{5YvPcq7(o97v_8-F@$Zh`LSe|WIjH^A2VG_OH(QTpbsBED!WCkGGZSDl%#Z& zU{gf!gRmLABY$oQo|KMyKU>RG3IMVMRKNA*uU>c6T(&r!Thtuz8EBtvS~A}G;jBMQ zQv&5!;*!ZWd}c0$f~QkG8TCD=7SJ1F04W3{5r`z^UsjHsXZo8;AhevVd-GiiEs{~g z?5`n^1OznK$kTz}e?jp>BYWRS$#1sD7JD0YG4d7p`8~0JJ~dW2gf-fSgsTT^ThqRkBy)aaM& zAR(lrK!7MDA<0U#X=SO%DiEN@ToEvGT^|7rE5%(cI<*Mmg>-`0Hvxsbr4 zrfqodo|4&Il0C^hhzUq*VWXGhGkNd3t7{_^0+Wd3UEHW@XT<$o0JJ(jcp?3!D;P*% z#O`@kc1Ivo20l3}y~{@F%DNH)F!bcIhx%p!mO)9n_cj zGZV&Vxgy@{THnt57NqfcFNDEDZhwD3BJ_VOiZ)F3yZsUQ ze|M$s?#XRPx@!(A!$ZIXfZ<{7&0AZFc%^Ji*L~1SF|QCtZ}&>?)P~6Uz0XC6XB8Mo zuo6vTFZ50dS8)%{-STpXKffIt%Ge2DonE6pXo3%Cesd7F*?C{#N`H8KcYZg3_Y9Yu zxE%bP-RwAaEoz6h*6{72|2?yB`;4*RP2_w0eAj#K_hA?J_JV?h;PyJmPpnkh+UG#` z(e?mO>aMp377{3tikkyrcU%gA(NRFO&H`oY07YS3+MTp^q+S4!c5_qH;zLc26Jn*W zOWo0mfaDjV<(D_>B6gty0;Ao!sP#Ba@=kgI1~Wgcw56i{m^f#Z&%7J|Cd5C@QP7=W z)ax~vQ#TZo5$9K5W~pWu8Wstk&T2n-`UA~;(ENw7fY8Wu;w_N(YJ04a`xS7lip8Jt zc4QU+VIJ@3OC6{S@97KnM7UXM`b~lm2I!Yggn+#;NaN>`vcs-_iWSoGXex zZkvWV^;6|(*r>3^TDd6hDK&M>ojPdNyxdd&=CxVB1dSc#$XzkmBH<5ng*p+&xo>;h zc}9HUZlCq5!kylGkL$$$-MSKI)qjKOTib+->8&;6#2NjnWU}uAG$aedI=Ch*t}j*n zcE4mvO-%{ir*h}~NTlYjc+c(B)O(MLrd|Me_uAjxRNHQT`}MECk_&=TT10B3|HqPk zA#`375QUgD?Vm9Tub}vR0SCv~GqT4MzSO8vGDER&4=jGZVA(WNw5rFYgh>JOLtL&{^2n5Xiw9qTxfW!V# zn;C~#0oeFwc|(~*Q)dAPv|Ukf`Q57*ZypwoB>Y2nX$SxSKmbWZK~z5V0zeW#?4=G^ zy(;9r*4p`!G&DBaAvo~JROEN<<(^6S4HFzQv*x!CJnN)RXz;9QMi_u5cEiTG-pXJQ zz(DN-*ukK4 z=!8xTyRMV+JQDy$)YA}Gv}6+H!7O2#(@4W3s!u;2OWkSLlcU3%dVjB|q(AF7hh@23)r3pM?o3Pp0s$-Xvl19nk%2D z5zth|)h7d=KTm=W0NPd}=x@E@O0ocKX)yWi(OZJ$ArT$u&sFt1dcgm!{(4ZU`qu+} z-Y9Nv0>CIxZ+j;5pn)??#DO%ycaenxs^Y)pwUxso zK&}t=u9$?JAD*`94+)SSuGL&gc@VZm<9sqI#?;~A9Oxa?Rree9%)5SLlbOGL#(7hHs_=34nkst-}{N6KbZd4?UOCatG7O)-i!bv?3^)p zv>#tRwIdpXjKhdrn-I_wjO{-Xgnm9x0F?0x5>g4JN^*>Gnx*8V5WwO6`EW&}TAJIuSKiX`QXJ28`SFCEQkn_X?M ztHXqV5p>8HJQAT2|10Md5#BLCzCTT1VQ@oh zZ~4oTXDi7;U@QfI#>Yo)*IwIryV|TDD;9HqBDm8b04OTz3dMHjGXxB_KxxI;&ZpZx9gur083tcJpb+Zd;-mab|HMS1#(qF zjNkJl@lAvZ2b=06)tVIm<6Xv50EoM6Ba<)BhT(fMNBy(r-K&aY`i8zc7Jr}AGckdW zl`!MF%w{jtb8bPqvSsoRj>$QAKTVMX_d#;?z zPEBS1L>>BX)fI^s0I-tzodO?MyyX1FlLh6;&uHlou>fO8HIh3&Cc0+mLW-mD2d}Lh z4gth>pO{H=xsYX>ALjCy0jq&R&ImOAd)hbe)=JGSZ)!J6DK#`T1+86cNNKE88Xvl1 zy>apSrLoESx7+9)YXN`;U$}b7n{kvJNawVWu_$nBQwf5$6&P4mY(Q21zr3bT#)1KM z8^`upj|e=GCwC#v5Nm`{4qjV1JOZ$MFdtVj-j5gkA@ZLfz)c+lp|<${;Q<`@9h68u z3^p!&@(*Jh`QHxV*b4ydjZMpIEUxQ*&1}E?@pJ3gbMC?Py z<+BQnQDKjE@=>%idd4A}R28%jt$6`FO!j9V1U%i+Qa^V4TNVAfwOUp5f0Xu@gg{o3 zhkywH$?FrZZpnfs%}ePsWdSH_6OC!b0FXM{rcn|*ETwhYsfi*Hv<)z5Hl|w+qu(j* z;~`g@b^ZvAENLd{e>)~xZR2XlRkFwJgTTZH0L`FQ$p!g4B4l#CHRna>jz-3~eb5=v zSIm_-4?B^wYs#dB(torNxKQ~4AjVHjwfes%-9kC)+#rVnfzpkSuEr!pMUsU_Rt+_B$ z5?#I15U)sno%>V6i4g#*6?ZK^<-Xr=%@az+`AreOZwf+X+)s8{(;Axp@7|U>GL=y> zm!g}O0N{6b(@sI=2}pL|jl6cEy;_PbjVwAtq}vPoOq^tei6%p7+qqQB1Vxaxj+zw8BoHMs%OQG!j)jrYT5 z^F8g>_!wT1j(Q)ymJ=fYT=>MNbG)qF0Q1F}+#iYNVPQ{vA()2!v)M6lLsS4iZ8KCx zFe-0mfW-ITs4EC@hH>nsh%tPMibyA}bW6D>#A`IJp4J$C{-IGz+W3e4Oc*<9D4GXIDmTlD{QvQU1pmeTGBE-`+~BmGnL?h3 z>qVr^u~o1Fv)EBAZH=+|4iy2$4r}=F%laMIc03(^Pe3UafL(J;v?tN_){8&3{ z9V=$t*?DOZuHcSNXSG5=t=LJulinb!-@p+-2dimqU!_s^OR{$AWGMZo=JGy0?E{%^ zca9-~2T4dJi?xJ^r}2R{)CS&lsdxZ_B}@GtZPwkDyW3Ju8e0EPpz_X*@K?jg4*j z|4IFeP{3GANoj=5;LiVb?LXW~NqzfX%}Epho`6`(Y*h+D@v=;vw?zxjYPZ018og0& z)F)RZ=S56|nPjD{rdujVJJT|*BU7sMPW3H0)!w%zXp_V;!iYHY-b#NEfE444Ez3m9 zIVQ~>WBtJoO$-16B`yH(m(~gs3S$k%&+j<&-{*7ZyTncqDwptr)kRZQSa()YdV_x8 z;B`_8b=gwzA7+0JEJVJJe37Jz?gUIr{xrmItBQ9t-tWQ#c3v_`?vW<$YsOtiRyh%ov#dm$t&%1iXfX`KdQM0+GPt$a(q@U0EkNRs(oOMkX!r?X<|a0 zQ!2+C*v?^J&{WW0Lnfk<#>k_RUW+>GBQIdZyaz^fOsfERkpDods@`rC!WH-PQ%i}f;}9z}(E`8)ccI~a?(Qf# z(0X<{w<;#;Gw==(|E?be0T5mGD*_-OXm?lyw7aN;fae3bx_-b2en30Wh&myNBdA{b z9?2kS9d%^^#B~Jo=OYR@b9l>({-FKG78pKbd{H=Qc=8e6Ev~9uSDJ?`Eun zcYO5l$iDw1whpmR2^;@xDf%CQfX|a&guCLt!_{iFcI(2&J~>>{t31&9O|$?oP-F*q zb2PM))^=~#0%hIG>!^DTZb89;m~^d7HUj902!&|ya&kT;&MnRztsKGppK(P|01hkw za|$R$h-@f#KvTcva^1!Y4?f9@v{0-D+LFQ~fC(L9FnX3Fv*FJ}1ti~EskD6m6ADPm zn{5I>3Of*OuA~I-Xk(9MetRr4uW|*sIqervFx@PLJ=)(Y!?@AnrJzX=z(N$M|9lSb z&MM-skk2R~jm-ZtMN4jUGjanIWtVO!5N1n}-1_e|I&w}RaQa;Daw+6>lTd<#iJATF zv$Wg|U;m~ekP!kNZPk^Wtm=c25e%%P4*?SZ;yw$UfXt>`lReWxM)3Y+^=5yY^n}f1j znMed!9!h4m4ey(M76O?2w6y>EV#XcSfi-~W|7mglWAX)L74Ww#&91#b;42a|BVvye zRbvYo!OFl90Q;JxI9xFF>!?Z`9K}++=b8YZ-new*Qu%AIeeECTnzf%53Wf98Z1{)zI28f_E`XY(@dZV5zy3nO9h+sd zAZd1Ja!DF#R+ph6fSDHNzpjJV?S(NxE_(I=I3n9W+CLit;Q}}&e*ngP$p_DrNC41U zZwVFrp7&OUg8&ws8iAZT5CBF6eJlWMdjX*CR+J!$Bd$+;9u6wqKSksAcD)w7P4M4F zCSMlKHvvE}ngGxmOBb(SWI$Au9B55c?&-+xeyk1k7Z8Ow_ja5p$SeQMNRt4D+80TA zm=6L}|0_;Q1p0Z>jDSe~e~!3d8OUl00D_}8A;n+#bF_*8KW|e@0V6i_^taDkYH#|E z06iEVeZP5!H;i0KQE+KlQYEX8pf}Htt`2WF?8{i)8o0IAw(!G#dSahv0zhKBrn8E% z$qTbECo90B_TkQCvTjyk+Ed4r;1XhPW#SNkSeK=(bMU&oh|jwzNr-cD70pW3M;L&W zlY?PKL4XARu@tbqaF0#htE7#n7;HQ5*?z6P9zO(7GFG(QkAzSUlWJZ(Xy}OGa2{7QElV8lai;rtycZc5I`nmi&jkbylyWM zvzGu+hy?(Bs}C6~2n031)bCjd0EUlCp|nlLNfQ9t@tlO$(XwM=N)-9O(R2?tt3C!G zb_+}b2IJaB699JfY*5>3WU|FYg}2<4Zlol;%`>~$M0fmZbZ=vSEQFp3I+bhai2Q{R z?lvp$&?-rt;(B%RP}og;baK62vp)j#dqw714qoppxQNI^$3X#mVgV2WIIt457j8ii z8f(8f?{drb%kA|Z2>3CsKg>xm=6%u&^BCH)soW52O8L7X&A-98+HGJZt}&sk{+S0F zu@%L(EJE88SH%%m$OM2L9kA?*OJ~C1Eo~7ruUYzrX6s+-lDQq1CGKricDm~ix7=+d z0y?))aGyPuRRa|0C9RGuZ$l{}76hG%Mu_=DNn{RsE<`W~<59gir-3LQ)TU8=A2Vi_ zhThlieya-7!e857U_7{pPHM%-5#^0Q7y$E`ymKdJqx}GO1_9#xf4p6FRaaM{puGCH zAR)l|svXx(UW4;9X|Fr^(Fm{cf~3aJy3GH-SS-7{rJ4i*a*o6!ZG@HU=+VKZHv4%? zLd+GJu1e7Mj(#>l7Y&Owqy|z216DWP*7b6=c11bVu8Ux%LTY5PZYe>>ni2$&sDs@C zq5Z?2s?7S^1TFYRJMl8aF$fOSdq%Tl1%B>>#~?Gqp_o`w{)m z=OhKfQpDi{1Nl%t_1&m@5FE%oIs4TB`U34iKeN&$0%?H^;SrY){f_&GgY?pEL#oS` z5C8HY%xqlv1JT})73kh-#Vsphn*=@-7_~C}YiGYe zDWfA70Qvu8*^oUXjs(#v08#`Ov>uRq)ur(h1-`7v@2v8u6}4m5s!YP{889tLqG)?h zcznn44}qjNmm6+Xn*S<0{%Ksn{L>@=jAOs_vrQ%d?1N@&&FF{94e*KxXDP|HPZ$7& z$%)kdSpMIt-mq5J$^;;wZiZnKoF&V@cL!}sU60s*WNn7oqy1wMniAvYKq%zkb8i8V ztO!q6)->HL`jRnK(M=H|NdKyxU%e&3ZwjPKtc)iD$c`kFT4zbAxBeI{8MmG`0btyE zRPBKgux8gE5&#eaAl^fF1&krKNo^9O8Phy_M0gXu-nWx$-~ZzBX!;}o;=TU2{zPZq z!}l>5;XabSCt*?_sKod3n*+6;!#I0-172G>2mvsTkAtp_zefP$7*>g&MYKPuf64xr zj;Q|_A?5lHG6ejsG=rLD{EK1Pd_d+1RBn%VFkxn?VQ8BbCS;b=}!K#}TF9=zzs9|M|_vXYMI-sUS zeH^)xNHc!r)}~)x%u0Y1m1_#!ek;s`cX8vUsNO@bstEu?hkWo4RIs+&kngrGia`Pw zm;$<8l(+*jWM$kCfN*1`-ICuH-z|(I*5qg_>-=Y@lqGVSZGgq>^`1Qd;^Kn=^_FzN z2##~Qmi6E96`t?t4?%^obqS%vN_Plgc>#9LofP_z7z~b~!0r23&(gz)A0hT*Q|=#D z0;HzMU8@DFm*2g5@n&l3Jfe!40MG&Owaiwf;EGq8%7PYz!Mw=p>zcxe*L|3>E7I2R z^&V3|;0s5x-U8rhPp1^91AOd~(IWtTUXz+ge{TUc$tv3qoxy=(GzoqpnxKo{xxy(8b}~+OHe$aUX;Xlm;UI z@&8MfZ0#wr#}5+oaafT^do2vYl9K*hfo3PBY6-502>_jR&ec;(?#r8R)tiBm?1f(u z_4}cIcD9qBj`A8A0o=9wWq0#YM#+J4?iKb2Xl?%r0k@KV1R@5A)~`fRk1Fo=YLu*n zK+yJ5RD|Cv^LZ}_F0kw09^H>C;H4uu?G=!5_cAs2%N^Hk={Yx6s_w{k#?43se)WVh z2&wG2to_jgc^$WorLQba6$DA8{MFtf z8+vwiD5i(Q;uS1~P4`yaWiCs`tvG15l>-ql0pLK0oV(yIG~89U$fe);($`)Uu@P%c ze!&ZjIXKkJ@aHEU;pErdYS_w5CSPhcO~fBPf6uEE z^#!Y-7vKH(?`|flRtc|+2>=Q2pu<)#)&#dju-CMkTP73wwcFED-1=<<@Wev1@QKOt zu$52X?;*l5rr^gyI?gPi8%055BMmfuT9`lmjE^4o54;dxKlLl9f-_pTSJXie8b1Hs z3ZD`%%C^5n#R{+-P)q+km!<7cS@OaMVhv;(L>NH0@OudCcEbUD1~c4LTX9E}w(H&{ zL3553bG~f|I=ClxkzWQ-n=WptN>V5K#Mf<)KIr0(eY+IFPD4qIpCHe=0!bSlTgH)< z@kPJ{fbs2HuQ9AwYn}m2ZHDf*kF~+UmUAy1b?(KZQPg+8n_m|tsnle$lzO;R3+?x_ zMsYuw|MHghUle7`%CVl)Avnr6WS+Wz8JV|_2E=DCt-jfF#0pf2XMDk@qvOb9nI)@d2cN^ zuFV99GZ;oe=()8LG5(Jix7{a?%AToy?$osEomKs_?v1m>NPrneg*to&LKCT$rzF8} z=6w}5;sq=V^h`nz4XzF*0Cb6MkPREsp4Oxil{9gt_4C9A z143Sq>Y+0%-&&W}M3i@>t`z}@_AsnIGcAZoY_8v&^4)@TS|pvfO85;MPs(Cj$U)3%_*pF zqN->DK$qQ7ca4pL%YCHzQquf5rz!kOm$w+ntMRX8u{~ zrh^p#)C-~DpxQ;23}OJZB0$Rl`9-aj5;(p;kB4Ebi3!7YcRV`2Y#?Wz1%rb5@8Ob$ zdMBlhCIEEV6DyV6wc|7H`>vW<$YdMe6LI|aUEX3Om$NkNzuw*U3}H%~AM-8iy)h9V z#1KYO;RHPtbN`ZjqWIVEma6VQ{dz-Nq(;n!-1;vc%eyzfIOomJN5n8W^zpz7v^f$; z6tVrd>V9^&?6qO`Yi}=-N8IO6OuN&H>OU^i|E#POdG-B%RpYxt%a)H%{qrXa-pBs& zLxi5E3YO#{x45A(wOG~iQn?qj!eG#G0JNK`)lB`%`^J)yuVl z`)|%oOCXaAL}Prfc!zeZCiOLVvXX+Wey=?go?7DRmyxyr;rnEl@cl+3`*x!cuB>e3 zEb_n0&N2a@OTe_HBeCxAst`}!=iQXTey=iFtThi7r{j*;@OM7xUrZmF`$*kJz zkk;44GGJQA#J*iqkW?%13{x&lc2-XHE&1Z7vIJOS43Es>gK96@HySR-milsC!JV6O zOVG!ZxIfW`sT%?bRsia{AM=mv8~>iw0<#hj(FhTA;@|qG=ubtA7me|VG;Y$yVs^)R zFffLzXYy!t)c&(4JZ@8(&%S*FY5;wPO9xj@Nqxz7zw|fH zOheIFD^;u2TNgh5iS78;Lx-6F&;w{`{0RgS20=zIQpVm;K;WFX{})cP^IJ^X)7Y}X z*BN4nZ**HsmUB!T!Bn);r_BA$qpEga%(%}T&&%8{SAa0hz=8W%5JWi8{_m}6jkbvO z1Fh9wmopv{Zc7pN0svY(+VAn1z@0gQwMW8AWZUn!M#MH?-I&vw|6e~Kb|7N3+yL|X zrnJQe(j3VI@JMN6YfT9ndM56WScvHOVI5qErb;XemKfX*RdFhU)Z+d?@Q4KvhV(cL zYsQztc>NKrz32d_4lhAST9E~XcnShYA1Z$yu9euL(&26FvJ@j=0zis8ubZkvBpXC? zHv$O&4fpBenoQOi_n8v~@9X7WCI;+|D2yJ4;{^aTPQ3?D{)!j-;7SC5Ij?P#bcyAb z!c!OaQyYf)!+425xTl0a$lU|;@0KA>i;=&q z1X3&?j4TLR9;k;PNb0`?kcJN!lX1Zdl??^m1rj)l1VqM_>C>6#VTIXNJBe1W$luQa z6Z4(n8NLz$sV+IKTf`M0`K9mm=u>k#z75TLDgbo+vyLug$gIk3gdn-3ar9uV>MbjA zd`U+)v5QiTfC&JH=swX@8DA9)sCY0@p;?iee$%}})NaR5RZ)nEC#0JBNd0JdO!g4M zo!px5Xvk>Z!c@JHepJ_Y90FYAuSG*c3nnWc z)&T0jK^Jns=m|(9=RZ3@V#=rP-Eu>O7ySC<4Zxy+rHDFsv$20B0*UHGeboMG9SA24 zF5;Ow`dN4`f)1Jg(ENEe?WXV~@J7(_xbs1OAJ1ckUs`R9+u6?wAW$OMGIkk9b@f|1 zmi_tBzNz7<6oFPzfO ze@cdPOh-ZWr(ff1o^9RU?=&3T3`}3`KC7Js0|_;Lhrm+VGmQ9`tcdSK#?r0iBGal$4TIvVNesaYIA$GPF1)^-f$hO#tYzO9%%1 z?yFzDyq(GYY-%d|C!(?cR<}yN0H9gUa-R*q);B9A!qUL3JF1KZQ(2aLS=osIJZxb` z8{`PPty&aN2k{JbAON87vMQjg3-8M}Kb@=lcz_dPNSMz(3+z#{g3Y0s5rBAqt?qs$ zg3Z>fpD0la5%A&-PZ$Hw;V~@v`{L;-cTCTppH_A}?BP8WjcM=C7kgDKfS9_H9z2q- zcyXihtNxKUMtUId@=IKa-N&}2&^t4dV8A%&Q1T^PPaejJ- z;k=?=@zL=UX=r<6JO1JB7qWUpFnTv2LI5jUG;H;VK0ajkNB8Md?a3Mj8y7zL2aEha zB%&q&91=i&Uah!m`6>5(>0>8?Oz=(3@o(zmxjx*w@3&~5;-vq)qExC!#n4`t#&~3E z$`wSQvv9g0l|Z&UDUPx}aE3WhLm`-;LT#dz5`jA@^YXubsVFV9>3+RbcBQTDCpCp3 zK3K20RWa%rX~9*s%`9nZ;Cp;ZfdUesUhS0v!I zx+0g$I<^((Jc?nCD`}aX{xq&gAho1jbA2)=1OQ=gWwH=30iYlK{mH^c?X``ML1lo=88l8QY$621wFJNf;v=b``5-z~j$pWBGBNX>rFHGa zGeR_Tm?4B#LP9p*bSD)*Kq_67@ko_>_IK^~h+p%2+zj~FNkX)teu7ZrgJ*E*>t_o0 zX1)J$CGWxa&Qd%!^=W6Y&-|c^H)yOR5o(HBtQ91z$*obSE4WbB4+>R5ZsSaSv4Y?Z zIC%t1NR;tDn_(+zSpu|BpSS?D%_-6g>$AA9ihwofv<)Ut``P5^u`CK(Qdw6N?|?NV z4g!pS$C-A{>H$Z^{CRdi+TDKte!Nx{z4p)80aMz_#Tf_l$zebhwwa zs5M~{Fc?hq__@(GF?ZumfpK}sPs({s91(MMLD#OzNU)+EBZ$hMmFYb8i=zos5F987 zU{#-THK8<0xus$A1Nh$JLJ!-W;NE z4}|`zuw`s=Fn0aD4{_h&{8Y6J+?q6$l7`HkMo>#UogEG?hkJ?%0Efdb)xT!)Kag2< zO-$sp{KluWquZQ_=!;r6jt|+&^9Zof%sH)%M?)3c5yQ#3WhKG+#r-XBrjCEX(zv^i zx80I<&bzl1xSvT_AVU580&zN;U$$z}GPX+e&Bs^ZVOgu%w^T zQG!hkhxfx~^SzwQJjU&sj(R7%mL>osyMG2+JBUJA#3hs|mL2XkWfmlko6oK6jzAm` z$Cj@6+g}i7lvtaOe+7n{GTUzj3f$X9>y1p)MC5OaiQ=PXNg>RNKr9&CnqJ&oj{M#0 zO76o-jotb}KY7AL(fH3O&VV!d{*Q?fQn;p{C0Cu*$cjE5Yv>Cf98%x*uX4 zGtnncZZSQA5P9YwohwQcSDIgG|EIJYANPbA!XO7(Bxkszdd{4djsD&XQ|@K0a3Dd? z)%z9i7I>&t5Uhl#BLS!!M6KiRcb<2)fp!5GEhGlxcU~+iH$zUV6AJFU_FurF!nVwV zZvW1D4f;J}p)4$OI=~cI!K&MK5=ZjUV#O~bz3q2Vta&#U_e*%WCf)xnX;ybNh3-IO zo%^SRS2;b^GXbEFedU7zKXx~C%NyT*{p+u2?RYA73q-qsK-7KoI%~aUE<`;*)#`=X z^bymf-U?=f_Rm3q5zi>%e|f9s^98Ug9ng6K>t9H-*L#xp^ip^V-N7b9#1gOtFH7D? zdNWTfK~#-iZr#;yc#aQZRAmqrXB8__lskY;i3$YlNxfthX}l`nKnXxk2cAx@i!tE& zC**!P-4cFI%tkR4<0-0QRe7qhM?iJo*;tlVB&Z>*t;_9)fXZr$#23*}h51 zK5-2Z>^@#HfZ=fgROVxwA_!^^benpr0B6;BiHkl zY)}kYQTVD)Gt7I_JPd{aMq&XNTXEZ2+mA2Wk2jt9+^?KaBWQ_lk6y4kfY+>KAVqb2 zmhr<|I!?~ze7pm0h!G*s6YSWE$OC{I2mCk$!JYyT-+Ao7aPP~w!>$F_9=|8Rl$5`$ zDj}x=pII>kOaQPk;3*3c!ZmyWTCer12Nj=y2BLmaS{_=bmFE$NQNjQJ+R3~(+rK!U zm3eyF$5Z_Be${W2yQ~C0SO#K7cOdQ6jY(zem)uFif9*t3ns(N`tYktj9gY0`^V2eW zi{ZvhiGjyrxPX0Fw70NRqc7#zW2`;z|$-9<5h=;3=6DSM?oc z?HU#*ow*n7g!#`%(3z2t!;Xe$=JFDD$iI+r3p!{w$&RqT_&P`^NJC+~sCl0GONNVr zA6pgQb4=)qrc6iLX_=|nRe{N~?*~HYJ-PU1xpE#d_ewwPHCi=peW4|%M zck$tW)a4&mmvA72k*lJt!#{7ohj#f4yF9%Jm;f-KzN=-nDg{@(qUi=rJ1gFBPBRaB znCOBgh94Kr64S1a2N1C!$coRb?2iB%DW>_b83hNz%x0Ba0p?9$AIX*^BX}YV^P_m5 zxz6D0&E9%}X`6%1d`>CIkia*D0KeTW7bdhbJOgiya+W5-r13vv0igY|Cn?DfyDElm zL1zR`Tu*xI+g_AZfM75!yr$J&R#+^^3dQ)FQ95BXXfIGr(pZF9#|HzZ!@^vS_D|6M z6mmzgLIoj>TP3<^$)CI=w^h$OazI{zmL|HsDXyFe0E6nhtEZOSmp9+i7O|(aPyF{Z z^S-A?X_KI27tFLbw9_0!c)eV3zxPtXGoIs05(HzjvNrD6EbTlMod2xm0|TRPH!9)5`1Qb$aM&|H?JvD+P5T||8J!pgn~z_2_A z(`i=B8$7+qUOO!!T(NwxBB?+x{km*!hL#&?&IJ5L%LILzVF>^PzoGS~rTFHzs_SlMx=T;1Xb_7fSXh&lJuPUy{5?96<+W_MQRm<2cv z3Z~DR)_D6Pzo+|Cly1x1RsX``A9izciKLWsec;W9lH7Y3YB<9U7qXD6_UYf0aEBIq z?Yqtojs+%1E_LyPIt9lY?{KbaGx+?*L37>X?eF`RZJZop(L<71RjTfxhv)A-SaH&s zOGSsW7;)YCm1+7@(tnO^GvfU8^gVrQd8V&VnsfUynXUIV*kGG1-#nx)05j@Q+vL0 z_ZmgZ0nc{(V9EEa-`^fg!@eDP{x&Uf%=sA=74mFN&Bk~ar=xI?kZqa)9l8LPaEULPl_oC9<8J18EbTW5Z4T@Adfl;~bVkc&Th>zgDb5tEGZe2GZ4BP4H34o!+wmkW4KPV`QVSBG zLK>SJ*ObfU_S1hZ$=Izo=Y2S%lsfxHlRxnFtwFP|UDnUouvd?$_UM0B@|sqO_as>h z=ur?n$b1I>F?{XS+fkCzeg6%$s^%=UfSrLUeR8xGK-cWA(ie8UgdSGPR6V-C=fO-H zZCPKn)V1+AaYaVUDQe$&rBZs@7ghc7;?rlkwV#+1rXq&JCiI@@SA^@>&=Wpre_~JS z90@!xm$~q;wM_GDvpE*V6%~~>`u0?2TBi@J%=W5L|Cn2s@Y~jIX(s?!wSU>j41=Kw z7+@Wbqev^cpZf3P7b%_9gI~WgI!{B&bFX#w{e1k8d>mKd;kSvSPlzk9xA%ay8M5uD zTIt0!xr|ML@brkpfg{5+1*$0L+o0Kl7#fwZ=kG((xrv2|cnvOJfeMinl*>EPq%b-j zLgy!MU%F6>glvsQ+6%1`WlXyo_EO?L-JHEFdGGmA&Mc!<$F`%GQR%Y)(rb9$^v;k z=0vyH#e6JV`l}`wVHN?h1<=Di>M8P;%`&Jnqh$q}Cm~#~E1v`O*KF!Higc$U7E)&H zp4X(s#R*TohRR_NatdhZUqXw+`l@iyPU98v2;h|254 z7ctde{|SC+RBL{Kx^C>u$20Gp-6FQvSZPkm0J{dI)aj#anpcH0=Bgv|tEX@pWK|2Y z2W-U}2nD@vM_QJUlV;-^%gwJ@WV1k?QK@Z>CSbORV`IkHwpGpA%s6&!fd*QwMuniN zW%6!qBJ@0GkH^<5E~p%=jk0bE|7;t6e2XLxQ=u<=FQ4G0fl7Yt^=NxIYMPbc$Ly{z z(RCnbof`2>z;EXP%t^odHP_Ot@V3xJxjV({&$~Ae$MvbX?>Cb428XI>-MAkWKWxss zA}UAjOhwnPbzMN~zGUf`&b&YXyDQNSEXO+2jpJ#RIjJf+jA9=VEV&)wKP)zp;{WcX z(yBIA(CKJYy&Tw~?oMB?`1_u%(o+w|52EkR&r-R+&Pk>GuD}o0>CkSS&cslFeAU%c zxnl(qo=v~Lrf&J@%U$44?C!dd;!=C}o~OQO$qP!{&UIj}zjLSmOCb#~H-m+Cco0qC zT3>Xlx)?VxM4t9(g=%T#y+Cm(8E|ub7J9z=<;D^Du;vqJ&GF4Pvb{-e@7fs67sHn` zF;I9Xu|?6KB6;1{`fU0_p3p`i18go5%e-%E>AhanSg_qb-prfb+@QOC34lN23X&Ct z0;~Z_BiA)LIE(NqbxHQ@+na&m8q$LURP4E%_oQt?8~Ne#0kTn(%xxQPN3kEHe)fA6 zzogI zc!Cux?-)9gVBv2xzo-ZV{Pw>5_Of9ib$-OD1^QL$xKN8SPE!U;LBP{W z>z3R-c$ss3IX8 zD(~F*MZK26B0J5Vap+YNt$k_c^xF-PTEh8g*9*#911*`?zy7_qp{h=&z@!)l6q=MT zm}$7;NTtT#*YhmEP=qh@=qU!H!&%2_jMm!d3EX8m%kR23Vr&H53T!_e6zbo2?WL9= zGU-tBUF+k?zWo~?0ddH$GhEy2m-5JZS?}uI<};%!bS~L`&Wo3#Dd$C`w42#SX@iL; zbzDZ2(fSws*cj_%-*S#ib|`9ptn12)y#Pm@rLbQ;RsuFD?zjMO^+ou-_lU%-Py%6j0bI~;8oiZ-08YlNd#%0`^=MYe2bRhKnp)oyUE!=rgRvX#TyV2dy6p^Pq+1 z8xhW(4{O@wxqkB}LlekTML1Gzvw8f&Hn;9x?%$6JKI|`GC4)rkQ*(y37BD7r!RzM~cWnda^tSk>i$*Vk7; zO5Uj&9aqMM?$p%X=9fzwQc@pS7tdPl-R{8UgR!dKmw8ebe%ldHzbCWL&}r7S8vI^z zkU|KQ7fawxd?f11ncRjVDXM#xrRTOau{QQ1ga89Q{P$udb6VB8`_WY9vuYOyiq5b1 zs83)r2Sx@J<)T{DTZ6_o z;wDEMlkUe(T**(qK5!W}RRH_78u_+Kc_qOlo0a~v5e-}!gYXJxd^S{Bwzz>`$@H>_ z4RN@yU&E~UQctf@$mxo&Tn{ohRsG?@kq+6&L%MUH$DpTG$w)i+*2xR~N9P{FY!1I) zB;qg`84n^EDB83hJ*fH!#|G>_UK?Lco2`tf&hI@c{o&k4ba|hoj5s_30M@!g4;O{T zcNOEF+FP4_Y>(G-Sqt-Rz*-tMr47B}j2Zhof72JezDAfHg2u)|*RnDZlmo0yFVc%=Kd;bt=KBOXoSnFx zP3c!A7}!*2H1|mhz#)~wqY172!m*9frTru?u<%Qk2}U=aRaRU?3ng1TtmINTEPdo# z{F&5`hJ1fgvR+p)(kd%+61+KaZ84Oisd-p1JEs-^&RtV_cuxmStoEF-2zs(I;%#1= zPu`0-s@>0zYndH5W4~XVe3|>CKR|C9jwM?=cqA&p=4UK?i>kSH&9~OTQSLG6Q(N9e zBHZaRNqKtL&rk5|3iOf}+j}bsijOxgieQ_%oR+gk1 z2e6PjX#}Jjcza(;eAN7+o%Vjqy*carap#K>`$i8EN-i92+#zl)xm?ub)q4X}kXonO z_ND`<{1Gr;F0GpPwFl>tnKS};y-nXKOc3Sd$abv=2J6WQ2)#sza8lYlI*!#)1>?PM zYuowh+-Bf>$Ve&lx@$q`8o4ARf9t84=TFhO-$A`^&hzZNm)R0ODM}#O>D3ctlJ>8W|?!PlVtjBer&)WcV+&Z(=}gas#7*`;rO^rj!B z(#w#1Je)yjZLiPp4!2n?8gjH(e!8}=zP0kM`ttXR*iu&i#3;Y-CRXPS1mN;mNN#Te zjTuo5zdJSf!NjwZZild)d8QQ&rAu;`%NMAAg2R86@pZx-VL5_w+^*2#iP|zGFD|Df_kDI zx?qr&-gPdj{6M2f3e)2C9ea;CD>ErNZP&W#HFvb-(n+8<7=|?j2FIWEk4iK=-5n%m z_FGgzeiaLR{kgxo6R!)nn416(nMjsS%c{zledUmoRAd8M57&yf0N;O?BN_i#fOXQi zxSd79Py*2knz^4NRVScc8w_$?ZEqM0Y5yo51qDucVa2^z%f|ax*lt^(1}`n#+KK`a zLKKC#BZe;JUg67gTW+kS-BzQLH@ByT`s`L4|0s9bb((;qYRB)fe*C@7LM>X)bp5n) zv$AVrAAK3PxZZ8KpZ*b;zPQ~ibGfie?~koPkvjWB%$drJe`#koiryIFTwlZ6T%838 zj@&BUuXr0q@BkHpui-r`3Eq7V8QwXEBBaUJ?YM$gg>|ZxFMC(YODCiH2C>dU&j?s^MUPAp??a0(HK<3`jk!F^&Qv^h`WXUM6MuO`Jt$^V zwCi`#+*sb-P93kAD=eWs={FN{S2=6QqVh#2{%y@v0oYF%!<;%B!+@!azV(|y!q^Kf zFpsvH=0cl4RR4z#KkOLCJacL0c|}`)SDp9D&_-;Ok3Sz3@mF@tFdC-Bo!z9-gC*8x z_sHC;r7n&>EC%~aB-n`X!N(vSVUfaPiIWQc1>Yg(?|-(B?g3Lh=O?kucGaSggy>wS zDd;dgh%5_1IF4n`7p0G@>+TkonO@jj^6GX&LR-H?K*H>M2 z1x9l7X>8K#=Cgwze$oe8^*7c?4QNo~UIWVxc*^0e^$uRgq_Ak2e~t2BB=$4#u93v+ zee>Q9Z}96AFC+T*Q?RICmPMINPb)GQ@HwrVPA0ce$ zyu^31%>x0TB2Pi28CRhaG|2!5Y@F{Qm;a@UbUWv4V9dj&7H_|1jJqt!x7=xV-beCP zQa2s9iT+HjZ2N+@9Cg1PTw==`Cp&yCASf+xEk?gSR;Na+P887;aXZvzL0OPdUv%lO z@l!@KUhL`S<#vZww<7P7yr4q-wl#L0#mUEQrLl!-L9#id9p`To%4+86#@P1*hy7U!B_}ie);2T|EkNiBK{~EKA@s|4iSZJ@Jqe0P-1S-J(_|}xPcab$PFx% zQsMn)W{J$3GpjR<)xdnkgwd6oC;>p=Je~XWv}@Mb8?XAybwcN1O*csIX0y|f+m&7C z4<%(eXr>GJPAGh~CC}z?za=07HbH^@DBjRm%UtNKb-H!3D3dp|+}yJu$VUvtI!BPV za*bZCyw2Z5DrtX55VWh+I*tld-u>wXRWEOV?bXF@kCVxO`j3KQ%xC6@o(t6KugLu; zcj%w9Y#T1X^Eeb*;(DH=ucf>)L-C|Bz~f*&Ru`6-CF71aT+8cD=bBH~G!lynrr&E& zz()qyNf~1NV4vrk3|!l}*csO`_e8w)1HP;NdhS3Pq0dfFYgc=nzlan*xYf#Kbl@iT z^n<^xP_=XsPo?X%aLBq*wOOwg*ED%IZf=$K^AT4b#k0g7v&NEv62eC(-$W;`Sd*$U zz`}ONbbif{Oe`EBiF#LM&ej}2W;%e)o}xl@6hik7+kx82>K5qtS5lF%u z6O@^m`BYDjpqoMLgGFnKG!+yG(OX{naumP2(W?4vjT!uTbgK|35$q&QI){4c<(F8+ zSN~%$ncrRIkYMri{>;p7@v~>`^ zMv5;L!AEwcE8+=H$axO>5}Vl0iX-k@QkCGV931j2q{=SzCcgX2pHn%W1nM^St2yj9_RhrMj+XwTs~fZ|UCp*X&q7y^TkniN2dMq0EkTikb;g;bV!gcWSW5rO zX&MG8*ga``yj@7Be6n+~?8@JU?f?8&^i}woA2~y^*JNc`AOQVQl2dxk^um=DnB%v>C#At~IB0#FQz-2l2)F5KA zc@R8Z{tyXL9eo&`9sqw+1NEQ}KVl~eq#R--C;K@;0)&CDfk&rNAjN?!z)_Qrq&1EfI!pcMlGhQR;-IRml{IG&e$3%wql}Eq5r7E_g*4UGMOdPc zSOm3kybJDAhQEJ!#M@q<;1@>E5nL#QE`YE^7?~hU91nh{rmY+#zdQRDUU;%#jzl2P zx(I|4fIuSwgqcAV79?r#?^#@*p(3g!prrM<|a4*ZU#ZQg9YhUj=}CU@Fe0pxVLZ$g1gU5kSH9$ zh%nQy0>R_tlAqg=b0FCNYQ_8G^a%(y8ki%n00o4M;|{a}ulKPn{v9;RS#l_1w(u7S z9(X_-9YA=X3~R9<(S#9%_!5N7B*F-3zljCOx=hSW@y+HVSP*{l#uohs0y@FgxdjKF z8@2jF0&^3){9P<~?sxdF)fhiq$oP+_11xxJ{SC&%3<&@h2vgU*z-|z1dkYTe7#V5i zfN5ReX@h_1GiWj_zzFfLV?HYBN6Z>{5@2Lvy#Ox7d<{=o|7d8AgfKHkn7AH+E$)vK z;^KXcO`gQZ#@qT1f(Oo~NZaQC3Buf<4g|{zup?As?~^CKjg95jgJ7M38NweRF-4d< z?SN%nIFO(z2M3b0RY3$ZMd$_q#1O&C!Lm-Mr7jN#5_gn?)duFMK!9G?6!CbstP9+j z+o)@XUVjXBmmWGH5U3#FqA5zhys8`At14REE-(EJ_Les4n<0Y%B3%U1A-N9=0%!gp z(O`-j!W0z(z)ex6X5WG##X=)c$WVY3;<|BeTYE7c$GDU|$LIM~C8xk5L z{t|6Vta+drp}8dj6%N4ANK}X^0m=-o0{~YLH;#*dhX>LTu5ca#Zr%WZhLww(n}kn~RH+i$nRYjJ2k$th~IIz2yCex4EEJ z=5Z@q^ceIRNgSijhBmyPe_c5ixq0-0=T)^O~XP$%w+9+J&o@< zgawB_V&~=tpzZ*`$-~9L#VLXf4hjl$m(y0(dL9}S6ynLx#RYKiLG94wf4bv>h(ZGI zD1`^(f=I&j*tvPQczCe@G1SNbb%t6)?Nk*!LZH@QUm;FtXh;=Ep~0bESDcNaj4_Xk}5?l25I zw|RN_JVJtkLY=s73*7My4hpm8Tfzy}cr z;NsyF)W?Me2m8o*`Y1}vJaBSV@DB+J^U>mm(1O~zIIoE5-LnaOf!2z!yT!qAQ#V4x zIy}fs)<~3#O9)a6E*=hyr@X4WqW6m+Sq@GC_i#_er*dk3mbd?4FUrL!77$_~E~KKY zD#gipM_Ex>Q1VHb|6K_D=fGu79(H6{gt5M(gM+iKs>@SoNf8wuhGggA^an2U;wksS z*!o3CNVu6!cu2?#w8;ycC?qZc00l%h<49u+1bQe^9~(>(iZ+fkW#{3BR0HD6o#)2Z zVLom?NOLz=w-+`>0V14SERbqITsAhf34LyA;Egpl@(r^!La}pm1p{QdTwKCxcA>#R zfzN}9Lm#V&adEPQ07TGGJWnG6Ly2Cv@Ig`#2Ee(vIdrYD7@Re+jV%Id777Chp)Zc$ z+k_lkJlv4k=i-Gn*Wl%5ffPU}jGKT*;E!4;1}Wf_2$Yqe3zU?@RA8z~iU9znl7fME)!DJd!_Dr%`|nCNO~JbI*Ss;;RCU5iyz zQC55iK<#o$ii)aMmfAWNXl*?V@{vB;Oi5W$RZUkxQ3+5~R{)^F4KZkpk%p;-xq&7U zI@N_n2cU}pKoLrT6qQshv1k&E`6FFzT`LS0O>3?UwaKeP?MjN!=umeY8jeA$Xjwl; z(_r)zl$DiK6d?DY2wkUY`5aAzG1b;G$2><9Tc{{0DJwu)K?DsC_47x=ADNrzLQO;% z0|g}&CCEkK;e{roLz-hubTH^gD2%141sZLs3IPHs1k@h_7p?iG*!+*I~3Z)TdKS&}b_)h*AJVIDB^uNT2p{1^&i86Zx z{lr-4LL7nmL*!D^(=^3co9J4b$}1?y>sjlVSfhcE2t>r0BVTiYG{nHnu@lTwz{IC3Ra6$Ssj6~g5*IK@)J;RLyWb7 zJ{C!Z($hvEktjVQYmA|SvJ#{a@_5`a2By{+jJ2^I{?o(|V+r+yZup^q5I3+kFu`MD zq>tyPiGj5tM6*ChR}__1te_?fGYcaltQm>5iNT-wAccU0!@$r4V`ZpsVQFAsi7_=W zg4i1j5bHvHbWAaUXkrXP8zQYNqz#ZTC?Tx_F+|p85HS=&0U}5Q6!c63Oo&ZQ4IzPr z`~(aV*iZ!+^aGM~Mb%J9(iLPOAEBn89HIcpe->b4A;1q1cHIxvR9B!Rzd{bhq?AgE z548XQ4xPdQQex=K?;4js01(93Jb0j~^xy%DrjrBO#@-45ZpZjUcdNi2U1*bOe?$7< zO~@r4VnO{Y=+uN<94WCmu^~@N;@iwQ{z#nxLF=#gt-n}&wfmCo^4PS3^7$LmH>RL$ zv08_7k%hG!Q_U;p8L7p|XENA{^UhK;v;cXE4eMJoU+KmcH!1%I%#**(HKUb1Z^wlA z|BwK~qa^McG;Nj}`fW_e7Z=2_wtS$|P^vRK&qo(kkR(1w`Gdee;l{L8!MJyuK}D@m zvC^%ZpQ=AK#9g%fA!?oxCrl2s|MZ%Ft|~72xV-Idw}JhVu5nE?pGt((7`17bh-mln z62;9h26Qw)O=~qD9ZV);6%lv-r8Bcgyl$+pLfBDCjCMI2^NrvO(%DKD($Wm1k%T=P zGtwfJKd$p1Qr+cJQ=25<6~Vm@&i*L|OwJDUTxIe92H_=5vyg%38MQkRqC)tT?C+Hf*VXu@38LiT-rbME6~n%$Z=f zL%|AtdNm|WLEgQ_Z=Ke$&vAdw@Fxn!_WzInPl5k?6cD``J+pcz!|K1}|0yNzWP5e{ z#_^b_|N2uVC7*%1@Ypjvu!|cF^i9bu+{4 zKckGEKUiJDG@6Mm%Up4m>Fmmr$t|0%q(6kyxs%`;bk)KHnK8P%KlbNNWPRIL(ruzs>GGDtYBPHFNju8PGf|LCO3FvClv6R55(AMSusP|JT+z0n)t3yOWb;i=X z566?^QRKXk>Z~nndBd-usIEOvA$SaR!q;jazSXY@kkIi%!w3;D0_)*@^eO)Ul+0Gl=tP389m4}&nXWOR*oX2G^jM|KHJ{tE{ zbWyXEc{WXOmPd7aJDKQw-qclk95K(BDefb{-5%f6na(rIsVI$xvC+U!9G&F;pw%L( z(X_N0S?)WsArsU710xr|+uiUISpB1m)WNdpixP&0>j_TqB+NxB0%rA}XYx__`Pg&-|98otHhi$c!;~JPWKHC69Me+bBI&3 zJ?)44PD;nj<>T5Ke)i`H9y^@(tWMo5-hU-HGmU3bf`Y71k#Vij*>=I?S5qDf9_rJe z>D<9;VQrthbX{m)-(M3(%M+efqrN^+Jz2k&bM}|c%)Eok)KbuW$0}$5H0x~C*Kok6 z782gc^YKyJA7mecfkvlX9H5hT{GzMxLLH4d9`<1lDH9Jq+5TbmyQ2@r=2km^~CqwyB3$Ysl81bwcNrW z^q@l|Nmj}jdv(*c3Y)=iEZalD-`UzOGvqFN-gFAG_Q~oyC>hz8` zaO$-0_KHSc7)Sp4)x3tQm#hQ|!o2lkej^-5MpC#MbZ;@6^Z0SkGS!2IS*RA){T+j}Ddjn1_i46Yw=JJp2tba{d zH49BS(JwpID9Oq*1@}(3CS7+l*7)g8E?dba+#dLLaKn|DnE`iFo|EA+{JA+ct-^St z>)3K|_>NC(+MAAr+~%Q=o+C6*iZ6y5pJ8q-t>CRL-tfA@KmLxC$;cgpOre*dp7)%a zG`K@=Sp7?9u`yQgcvpcPX^D&zPnVvqu21V1A)qrbhBT|-@fWf0Y`=YAPFvcR3-`cB zRl>JP3fF61nG{BJTga#%-;vjx+I{AB?nGLdSpf1Ec$Xo}p1DyUL~y}SVAa)F;>!7G zyMvkeFP3&E$2sk9`!oEc6KGj>FBxn%h#x|Di{64HN=@ldlFonxmcc}EJk=*ux$JjC zrKO1&G+mlP3bqu*fu>?>vD?iI#3fX<&#wH9Qw#iws_e{um?1$PvT94Y?4GNU>ekIp z(qON&AI$O@Ojps(ofR99t@#}d@%neP@`T5fH_U%i*5X*^3(X1U%KpAim2bpmv*0Z2 zd%k{IujP}RCE(g$nt)e^@D|xN-(5KOyA%DC%Pii6)@Y!YN;ABvi+E0GVU!q4$>oe` zhZo(15O4`NsV6I-#rTLgebW|hR zyA4Uf4;O_96{(&=WOIKCKbe)Ox#1Qhnk+j!rp4Fnl+%;%!*L+<2vO|1p3?+Yj%MVW z!tDibF82OmX$zc*7aCz_nB?+mfc2WZ* z%c?C2&6K<*n8(k^Iwt&9`qR%0F~zYP)LVT~6KN+J6LKrf7d-Yt*8-Bo|))`pXk$lj166^y8`9GY@)iT+hp6t{}aVwna4ST$yu9ZB4UZC@6cgtk@v|P2_d~LD5ZK3&J z-MN?fa&E?3v|mN|(tPmNMd#a>@Am=S^l8vLKHXh?-+z5d^$QY)lGpG87JKUt;>M|J zsBaQWw}bUPPBwCn!6r{&b%}7shWN-qgkrJpZyKx4X2W?gP#spAl3p@aBhx~S`eagw zaPq?Iej(g0-tBuPW@v+&WUnfW{qH$%%tS4nm7O%(YR*J@A6vtL_6LXiG#SB0GOrx| zXa^K){V`vJ&Rp~Hi{{>WgBs4V&cpoJ?buaOuXe~B zd=r>W`*n1K&Oi!p3J}S6<^H8L{-lH(7Wj&wOAJ*y!#T{DnfH!4{M~Q*_PfNA?w;N$ zQvLRTfu7BQG+PM@>dwtj@>cX8nE#lGi);gr*s+ao26_Ow9*U>$YyvfsFUzhJd$H#z zDA!|%4Zg#i2UTZ9AhTCP$&+FCpU?`LoKY^*d=_jnI`PZ>4p@l6lR8*_Yqg(=6?gf0 z4(T2HB?w4mLIU1GOjAODA|(G}_!qJM{>0VO4*UWo?ebSZ{`#cLvUKE__*WE*r+USv zlRO9{6fu{CLDL$1)^bew*BszaP=@fl)vjcMA;wHm=%rrW${`yT(8bnx1XD`pL~&ze z^BI^7`mr36*7cI@8etpy*iK7!uDr*FKi#ZeZv>LejBU&@{Lh$puM5S0j3zaG<7gV$ zLm6Ms$>|kmH;K2)3RVKPc74-nZ@7b&GpqAGyccRk-S!)LZ$DZ5_>X-4;pioKh|7Vm zT|Tk#v%_SL(fmDSW?U}nXv`?~Dpr2dscUEe2TpeF_ZRt2AjXklI$rI2KSI8##6O;b;DDEUs z%rQ;S7OaNSC4H3$1xG9oJ|w=$2;AzY!RBc(V+)q^^Z_Y?g41GUoY=Mhm83U#YJYj| zSf+|fX{lNLxz@1S`F*Bp$d>~gi;M&7Mw{!;G~RAi{bRBqUq2*7xFgIwySJEecHb%Z zBT76XA2m$@cKFA|H4ZO$Qg+)_+}D zry}-GQy3e0Y7luOAQ98f#9KjewU!Jj{yy^+htiY+eq6?y&$vUS@#&4?^xU@~GM5B+ zKwxn7KWs$N!r!OsLWqXvFPa2FyJ99y%d?ZIeV$1U0$f~ z76!qfn5O>zt_xd<3CtB%V=~>8O7qe?ngrGr%+UC_in(rM@uDHOBATiJD292vv~qOWJa{j z9oD81+9~{-^*-f-y_DH9-Bt9yQ9HjV7oVZ5F4h&cS|OZ!=AcIHep~G49j|l`C{#1s z?Zl&{$C}VR;Uu?z_c*iFzQGJG)BWD5tU3*|hFIYYq@Y@;jd+1bV0p({~Y z__%xU^V}z?hp=H*hxppMBmLnj>7v`9#PU(UzTSsA@Ie7x-#>y{04KOiz#664S$ykc3U+V_&!OO+LI`Jj7_oteidl#u(mJZe};g%N9$< zb9St&yVlXEuEMNqck+UlsQtgPW{9=a0g#MQzspSPg>siDP)?6&r~IpJb?o9XWDUPg zA%k2Zb`xXMROilwYDXOZ+XUgyabkFjPepSrkZdyWul&>P*h_0WL$TLSPe*klM*uXe z<;xMzc&{-fd+bDx4+;MFD5rx4a1Dz50AGJN+rAcCgx;Cx&+9AC>p4q}JAEwojtNL6 z{t~ZsmQ0YTMGZv3YUof%%l{qaH!ge$A?I)j&EN^va3%6FwRf?Jq~#&EVzl7CzWH|9SS|zx5#e zNlOat`=n@_)+{H;W6%v$G)26<(|Dhpw2zLC~ zV&qNIm-o@kz|vU+okg$HP}lexU9V?FW~zh{BqS0v20;WYAXLVhGTV5E&!XHE=Ne50 zdBk-w-cD!BD^c=L{G+;li?(S=o_2ttMsKg&fq1I?o+BdifMH$7Lu@F{uhGPhS2~F? z-VW3DkIJFS$rRW6%bF+%PBp>G9N|pG zV4r;x;1%G_gdf{R zrr|N22Z>Miw5&$fj^Y=?wan}@3l6k9pHxrSm+vK_M{l+430rLGCzNxFd@qT68BQk% za8Kw@jq7KecX`phE2-lpE;1mX_O^U*?CFpH))2hkCAL_KJWLhb{;^(lUSPBib0%qL zW5<6sY9@j{IQ;00hoTPSetNZ{9C3?_sxaeF>Sr!}`b*X>?>4^$ zJ9SqXL&;rkV`7ok_W~;;_e)24Dpq=5w)hrgj)f+bI~X$K)TIb)(X~ zza|f^{Pc_EGJojy5PT_E@VboJI-!8!3O9)Q)z@{m4&#aT-QATDyvj08SuE?i^5B^e zpsDoxTT*5IX=Iydb(m{CcmB18SKR@xpY(6JG7)*K+~GZiRh|inFb3Nn%Z+%B$4-1~ z;2h$yaJgR;N**T2h*>*(vHI2Dzu#~Bur!CuByG!4`9Hj$y?(w%Ig&T*q{C_DXoz@6 ze2KobU?P#)Zt21D=emL+JL`g=i0TSerf~n&qvU&-zWrgBGd{1zky+1{HxMJJZ)SEk=i$WWoy zdT}xc^C=#VHd$9Bj8DbV1->a|PnnvmOU>FZmN%<6!OmYH56w+F)+s0ZIJ5pua>M*k6&SyM>q zs(&n;G`MM@o6mk(BB=VNUG^!()4MrLMC+FFvwZIzj)x`>+_{-@e?~Jz0GY zBSt_k_s)};ONK!^EquP&&yyartT4HWkKqs^e3o$374qPHn%S&Z@g%2EM#OHIvpgpj zpLknHS&}s>d*j}G#=knnM1yndEG(3iSg|zDDVCNxD8y9MHQj$_I69x=*PQxzE#BX$ zCM3N@=}BRkmIe4=271S)w8dk*yBCq$Z4CIqZJv2{J!3vaKPx#Gwcb^G^}6+*|5o4# za$j9NDlV*cX3k5@c$=_svomveV*@RB$ZfY!?_s!R%d2gu*?lV5y>F>BoWy3VHQJB_ z)x7^^3@(+f#^?#p)&mc38GDzBV7_$kX9+%yVZPN*(mUlc4trxA#Ovn@kUPgtI_Xkh zfWy;Y*)ofa?6ta&!3^t`(uEPR?Z%B?P|acEe9iVL*HzxCELG|5vL89)(xZgp`|F2W zt(#hT%QB0)hZ<4iDc1bmR2#46Nt|Bar7ZVu$k0tg<~@`i$qbFk`u%p(4v3OWDe8Al zqJH=3mD;~Bem)PuSlG4d=8*sv#YdejKw+jsd{BIft5>P&6Xtpbr88yR{_Xyhijv zm5ft!bfS=CWtQxpfut#UvWZ z)%$2kyY6?`k%K1TcUG=2ZN-2wLc%NKH?HmoJK1F!PBMkDpV@tL80?2?LM}F2f~Kpg zMBj1eyOHs`Z5NqtUn{za&FRz2vbj9rDKM%xrqZALMWL0U*y7jBzja%Yh!$O^e52xR z`|ESxL?aMg8bT{6Cn{F9nJ+j-`4)Pd2Tev3j2FY65#1;!KoOCf#m8pq*$JOv8YM>! z_8N{3HqL%H(7&8)&{h+V^*%W+Uv4R<`8u?OhPIya4301v5)A* zs;L*YutbAfPo}#E~ou*XQ@xE_xMJ5+1a~1&+R5V_eH*Qhbegs zh&-j>U9gJ*!|?EJ#NBg_BJUklQ@+T$ape2DIh))4BrLG)EBjBtT)>4} zPoAHTG+M|yQ9(aJ<5Jw7{O*glab3SEqy4hqk$deCu$|qov^fTpEZvfPc4mp8P`TL( zY~A;IQ6@BgZnej1f%IRa0G+J+w+xrd_DbAxUrfA{Ze#R0$*^l2Y#@^fIIsuYNwXyF zn1p@OD$WuRB0ge_hMKn*KHQn%=qA_5mdNlsdNH;k^p`m#lDOEjP@egZ5$x-x%^rK) zX8lD?rDx}5m6hw>M`wz^@($7KB~-{)fuZL`3Fe>ag=6PgJ5@T=lnpxkrrv0$?W=dK zjxwipu%laS`E__9mbsGw7MRM||`Iz`7g!c5BSc}Dv#&D9qr&|)(#$t{>x z?`}q1hbY>1VWAH5kcoKcYi387S@p#DmqirYH&CX6+}4L)?I-W(yT3~<4xW5o?%T|1v0RA+Vk9_QiujgGg zZugde5pwjtlp=2o$+uAA6%u2N({wDL-`r+s;Aa3=_!;SDPACEYQnXUXepy4r7g$t* zyZ0~&>qht6@9FOt#jO9y?{T+i2HN@J4l0^@c@ANGPlt@O_6$X@B{{^3XVllii(Z0v zgyPV_obpI3e}S@zNlM_$8Z-3~luATo`DKeA>QGa+(xAx_K=kWHyF`4oH0$+VMA7w@ zIJVo+yMwl$dE8Yh`zV~)IJYS2my5g;kE&!u7?ywT;eHz5uHUJ_)A@))#1UtHBm zD;)X7-E`|&u-s%}-g5lA`oTCp!#&IqHsHjCoy>ln!#UtD6`&3Tl)VQ}zaC|KXpBzp z)upy&^xgF30z-pF^Gau8L2JU7f>OMB>JEc34p~G2P(@;T$whd%Vv=Lq7P-5{UPl@E zeuIe{oJvb8HsOS75D8%uqQ;yAy@<@?-Oa9I7njszqTjL;mor{9A054oksQeWPCB!F z$u0_f!S^MV{Ot;hruyQos>9fn)(WxHlnx9_lp6J0^08ZG6&73q zLq}VQTzsY-AC?+Q)MDn3=vI~_)HCk-oUEh{v-;$!EiIWBml#Gn;U17DZLm?$F%z(Aeog9ng+5sPRSluV=FD2w zD!^;rc*A2X+%m*gNJth@S#_(qH#-SQX~jO@YSO3?L$CcE=~EPy(jt%dzHh_6CM&^r zA`-Tcl(*ikcQ+g-HvT`BzACJZu5CA1arffxQrz9$T}$y6D-?HkcXx;46fat&P=Xf- zZV40!uGzfbzfa~glbLI+SBf&SSYNc^hN>{hoUK zBS9t6{(Pw_rCva~lu7NRHA_EggpICAlqo}+cc$X%C}sS6p(s|?yx;O|-r^O5f1$%g z#`@?|&MDY_6UiHG1PT|lUYE45_%7ya=F+j9rVkDrx}Vgy=}$q}9qG16{4A2|joPT# z+bMNl;{tNYf7yS^B1^YyG||CZpLzxvuq~fj67HG=0Wj+CMMsJm--3>)M_s-qEQzMM zpJ76Lz%hBpO>cdoIQZWq(weKI@Rd>>3()%Rt%(?p*+g} zz+!L(8IRoHgV$C5kw|6!DJBqEy&~i@v$kF`Svp8aJ=md9x8H8B?C?Xe-OKrA zs+mRe*O=YSYS1Y$ur$S_KflmO9j1I&>YSuT?7rps`LWOYOF2%KNzxpI{|RNAnF!yJ zfF_2kg|@Ho+pQ_pJHbw9+>^M%2Ekvl&Ym&w|Pe4WsMQ&5D z#iON1_i{o70RIk0Fy1l?s)8RnbqQAedpt%-#(SAo1vrkoz8n^lz_eFkX)e1Zt z9|JIFL^qS>5D%r__HbVG!2=<@l;g; zI!+h&65qhgP}NGG;Fdi81$@%(K)g;TkM?QDylvZ+)|fi?SUrsn4>G4PMm0Ku$M zL^{BMF=$wiz!~6q}w%#?ki+yPM<6mO;W+g zGpr@ZhSBnP&MgR^&5N)41$sz#{Q!nc9Hh?H&dnh zR=-w#JvkA52}LY34amEqD-56~-DZnzeVhK}!z&Nt92drln?7pZl-@VTc__L=+EsXr zIQIa~(Y|;nrE0-4j((b>1e`~pvXm|S^{^ln)C9F9R0a3yd`HDJv2{VhjRoIUvQNM+ zt_&4DbONo6j2@n<{QlhPN58^_rpd~CkQju>-<>Q&iY4Mx1h9K|`G!lMukAw18nJt~ z-5W$=n{yz?5f_;u-82S}lDbI_VDH6CiFyAfH7Qp!wS82Q1$2ruex^nW(;WgX5ohxa zK;)(Q_L0ZnZL41!;uEByJr3Ty1+UtCz?{XNbPU$2jkS<-R=yuL;W<1gLr_i` z)P{eL6y|K=)Ea00LIYAo0Qegs&o*aQq?iSQ5|iPvq8{>l? zDQ6!=wh0C5y=x$)N-7QK8Ap-HzVt?ll54;57o%W#I2U=y+dF|K6}|W+Jf2}VuE+&L zipz_@nw;gBe(&d)&xye(US%BoO_g3WY*LSnO_S4HgY`VfbEEB`Cja(DFO!2RD1+sx zE;f3Dr-=5q#CUJbp4Ynjz*9~{z3_XrPr2_sz#?ARMoohX?q7jUdSj4u2(Oxz#@kst z2VY6C%@J|2w0WxXH2a!%)%qRW$@}%k<8{+h_&AXWrq>JH<-FfG)E#yS93T0g4Bwf{ zLeQ})k2w=Ul=;~Cs;h{0SC6_pse&v$V)*#(L^eaiQ5nv>)D^u6tpJ zm|Py31!=qs(c%;Gk&p55^k`pp7N+gB0i|QQAc1z zFd;K_dQ~Lh($DeJlGSEU8K=8-oDxj*9XR^Q`{tsGIW5OXJ!}tLqNwd%Mi>8-{XA#z^|Vj$=$G21WpZ*AMh@m&(% zG7@x*wK-STy&UcPPC2^@oUQF;CTI9Jn5%< zx0>pT^(DFTnJJj)M%4EQzu?6&_W4+iQZ0MC9SM=X(Go|-3pe;zi7Xeu&tBe@h|tO~ z3Ij^BCN)?2QD-2Acn{0=Cz*KUbff2}^MB}$5ygK>84Y701~wU~;dsyVi{rs$9uzbI04-Bt%_o$u z(P(P2Fis|OLf}IjUs;@_VWCf(4U71P*J2j{+R5tr2e;OlA1#q$p^Bun)jkcnyU_1^;Zbq;hawC+)#LVzG22jO~j5ZrQ~kpNwvx3b%8aM7pZk-q%Q}U)_erPsO3UMf0ulm zGS~^&IZ^3i1XiQ4ODSQNRG+aT;izY0c_CMzSAX}wI41Mw+gL$RGhNIzwXLEOWLilF zJ-3xWobaB3qE`&EX{yHZjg{r8G9#{Y`Fm;KWL*&2!Q-YO*J1WATV4CLacp?B%RP%C z@qb4wQKO4#!QB6RA>?~ob@=3;%eOV#^Zcg@}N{dkG-GV;Nun+F4& zBU&W-oNA;K2`EB=5t;Oj|+f1f$u03L6qp`IbL>S@ne}1&e;P zfSP^ojyg=bE)3VGEHvK^t$7iTlJKGFaAI!o% zfN)^uxdTv1r5{@}EQ4Xf6BG@#y)^nOAt5G&Y%`t(5^X++59>=kUzvhIuEd@WMPHg8 zC);XZxLGe|0$n%%W4T=9-VT~%)=*{ac&W%X$yoR$-dID)rCZ^&IEm9V1dl`**TDr> z!MGSZJNnEQ6#K^o*OkT>lun@~KX!(D`=@@RwqSj!6tUD>?}pmSfk1Qr(kelK$}Uy% z#3fl+^`{O6AzcW*AU(ZfBgk}ht2^;^^h>@cHTB19snrDG>PoW`sn65t_tB4_v z01a4acv~>*I^5&$c{R`<0@Vo=>(y?4s+Jpb|wAu6c}PwqwjA# zxC4yy%j5&$mXx)m9>O3xBQ_@x?le}1bW@NWOL!AhUC_PAv}lflR1&2lw1N|I!e^1O ze*KfQGr#vY&d5!+4pnV7i3aNF!fvumu$v6qU;%7WX-xDECQ9Nc-0W|2;@u>ge((-~ zCw_Ab}x zMfIa5u@w%p!D~{<(ZQn3yW$bB5)>1Tmmg+CN?Q|eOex)e)CBI>vD!>Fpe(FCmi1g! zuJmVgCsPDrk~hQVeE5&7{zLygWHPc*8(g@E=634i1$3)F?1pf0O+Q(aQY%y<+~M6z zV@eAa6`OiIkdj#3E4U7d1JdosZzt|`DXe~cO==b(+i=efPPu!O@VoTbDjCA0ZZ8VL?bT4?)JLKz4JGglB}2A%7RbW4BOgb7=SwBj*l~|>#9iFpCWd{PW!EC2uX^W6^YU!;itc` zLfG)DD^}hC%(a%jG< zB(>dMf91PZ8Z0V?$^HZP-1tL(kOF$fyx(v0=Mao-Yn_j6oqXf!I30cJoQlGm*X=qv z@!nsmg=)V&b!2(KJSt7xa!`g@XBS^o&sJtg)fvgtbCO_m6cQvL;)jXy@=42F&Mbc7 z)`oWQ=W$oslvc|6lS8%N`Y7Mo5ql~aHJ7LHHoCFz6r~uyc(FB)XVi(FT^grS6mGu> zo8_Hoy)OtiH!>xOUa%R```TbQ@N1t+8;sLzlsfU#Z9PhwmTov$P+9Q_;5mp77Bkcv z?iWtC1PT|kPYya)=57KQ=z+9}O$Mjn8!`UA2&bNg8wa@`)mw*o)ezbR^$+0D_HXJB zWqhrg6y450EWFokVHS5ab>}K;3h09Zz;17l;lz8*n@v*Hv#&^%#ic^_@9Xe3pY%1% zRFxopuF}C{l73sdSZJ4OPQMfA=VhADe6qp*P$k)hZ^MIVBXv$knK$0iUzztF%{`Vx z`F9V(m#}JX}{)gaYuz%u{^8<{!iWcranHj0|=dCe#cJc=Q=AL2ft|S zN~skywkX}z?8_z2*?1Oa)g1m$o|TP=(}~mPa8TVVIs889;vMh;r?reNSZOVLrJ~V< zxn2h6yu%$Q%Bt!^e%ff14XVG##pu2I_!k}xLYhoxoJAZvT%ba`fwB(zPBwxn8VLpM z7rYYL?ZtspsKzb+kAA-oxdE zrugm;x%sU$T!17S`JQ*u-g9%cy?|fh339(dnOb!mFYV!bFjW*hiC6okwjtrJcs@Z# zGXlkg(o>ihD zqF7Y#Wtl99t}(^oml7ZV;Mpu{EdvCx#_}BxgkZ-vMn}tG?C=*n@QxJ+ROg57C8?qj z%MP_VK}D)lasXQrK-fvLckf#AonMRjU0TYWlW>lI#RD^Uo*PH#AfzThZnybCmI!y% z9Ywi*UFsRwx7W-E_#y44&VKcj>jF-t%tUZb(C<}QPbz@j*baf~%$P-G%sFo!1z1W1 z#^3Dh5cjqup=*Q*qDnLKT#bLVJ*Ks$-n9lA`di!;nRN}!63+%q;Hi4ofQ!1;f4_!S zI2U;p)UwT&nFEs@#g~WNK>A|UvW>z~J84G*vO^+BV0|8ClxX_aEMCJX znr_gsISZBT&(vh7_@nxNa#s*_h+E`PWJr`-VpzP?@17!OvoQ0tQBZd)?jL#^LSRG= z9=dzH@F@CWtbIVpu7Z^;w`_MgnLA96gRi}4`U8N&ga#$aYnO+gWwYpC>$Q2~dzueI zDGDrP*DBQVB(gXT-usuYa??iZ7)Vkyq{>6=9jRhokn8Qy*Ipm@SrOzMIn@Sevo>za z_hQ65+rz|mEe=^@tsio(gv%R@sT@I!ymR}m1WGeNtR z=+V;#y~va>xNu64^Z=L##Xd++UluCqep4g&5to||3S8VLdrV)@2TAHg3(we8a^BZkRF&Ij)jF5Y^2b|C3B<$v8*X^#ty*FEJ#< z&i6(f35+d`rlChe9nU*VJ;hu7Ck*W{Hv{nhvjC>wmnN&`Pj_?_4D~DL zzt^SPHh!=D77AWTrG!05XH?|O>61WAt|#jdD6z7fkG<=fkq*@~ZlxY=s|XCT{I;F7 zAy^seb^5i>IIFPAb|cx3*vrdFeD1H0zA@AINtf;Z*GI|?!31%YU)O$*1zuZ+f$2nk z8k6Hj{&(59+j{8zVr7}8l10wF1_6*Robv{sI}k5okti4q&F^fxS%7hGsW=j3`)2Ml zIDVi!Zo^qI3qXw7Z^VhdW_lqwr!>!xs&bRuways|T$=AnJ2n3g2Ix*362k;+lsqo2 zz+zhG(gI&kxV}F_o5YqZ{m}li6o^^DE4%GB;xtDYsamDc%d1LqyrS68EH%KaQHE0S z8*48aeUU|X<+pXJAB5;4v?yxmD}YOsm+h4x00sg{4Lb8HlEWsD<4u99tva8>Qgl7+ zBGR z9c5T#JhC3g$u|6&nL}mI>*gCL>fddpvIhMU=86o<_U`%;U1yFP>M8!9=FGX(;?PO? zhpL`({`?GnaU37T50YZzlZ(2wE~Eq99#Oqq7r?Hei_-*oE>LCc7lpYu`KvqFj<%{HlSd?2X6MH#Qr)T_?wgy!Up2(foE6A z)HcwLwYjA0n+K!pXyXke*oXyq5VsOFyDl@k(xj+23LowEiEdU7#N4Aknv9COZA8%i zbSDV1!y1KK{E@e$N(!enkZL%xleuh-E#>2!)MoK*yU4S7%t6}9A-q6i`Z8nOpf(mu zUg;H!Y{yWhAF3Fn3kq+WwY-~Sc!-q`{-wH@T>Ax#GvTDdYFSv^*&%#aVk$4PUF_HV z-A*!R?j@-n-+JG?()~wM*J0-HXqKN^ZdL2p>JqTnp<^jIgKL*9?pSPW)iapqmq1j9 z=iL5__*wdZesE`1F+!)W0NFqnY^fQtA!Ugnbs; zSs3q1MoV-*tLbpYYqV;2pqMhI-m@S*pXxT8ZAP+A3|CKWE&7V`(iNdgeKdWmDVE^}H$TW0Qf>xD^LgQx%gqL@GvWH0DQ2Ef$$&Efx@F zg?XcT#>|uM-hm}*S}PiIz5D3~BvX#2n^kwjKG+)V8tbF|4JiaK*zeBJ!}IH`&O{Z8tnRL{P#DaRecwG) zJzFtH2mejBwl&eU_`*S8C??v{glXLM8#zwG7RJ;w@q_(4_k+d&u0w2hPE~EzE8um6+O2^T5M7rGEAhT91@Fy^d@`MbBuV`1Re9QCKI>JvniZ+= z*I#fz7^=%kSg?gBQvx8tg<{ftR{g8pzIqH8_UbRGQ7Os#80j|tgZ(iWf&&#=!3%}& zpV-gigE$pC7HfTXJdpXZ3VdZl9rWoM|H?ZN@5BxZ!1qeAX>v{g#K}G-%xR9G(e5HW zOWXG2;4$!}EuYWweGLV|5hI!qq^}%tp!VE+My zBa3R70E$HMeB2?Vj0~}6x8BwXI~vdN*Wh3goz@!yicjJT*B9FtId*y_Fck9biBOtm za?CLvC`+8&OunCRIf-JfwHJC?WT&jwdnHxvnawXEWr2CJD4x)BnAVy4WK9f35&I@& zEQ$v;Cn8-alF8gLp<@u85 zYC*bZKfa0=a@t0f;jDFHU|%NUe9x+K8zWgBk*XA#6t^Zo-%6I3^>}7{I}Dp?{H+VG z`y5z_{1Dl*JlK7krY}S@cYA|)el#Z6HgzD*F!?TobZYGFvi+d*HrcM@ONGdUCbDGE zH?(YK?#3}4E~<>=rC_}KW!!~nMkmZ3(b8>@mfe$V8%batfJGfsDkcXhYmZZkwtFX} zACJlLF}j)mP@f~IT7*a@|CL&dlc(Zo)cj*L?)MG~4i6Ig3;?hWR*I0wPx9J;>vzKO zy^#_<5Yv9BTZTYcS`8v!6-?@`i-_{FKWHH5(AY@Wk_E=>R}s<(nn&M3ivAHnKc&s)Zzb}8%C zSFsb$eH{84nM_h7JQencTlBK@cd!~{a~)O2&P{NkuG~K0@la-TGG zq_%}k!~lU(lfK}BWROdQP3pM9 z2GK~pFGGLhLCi`n-ohD%T2cP+Q&eKwqQZ%IRmwY{(*c0G_}uUt{B8$h2Y;pS%a=Nd z>mm^a0@ZNbjGf(b5fwO`I(`ChX3H`ZCeu4g*x;~tu4o5(8Nu&X)Tg7Be>EMYHwEVs zaq>ISWCSLqy7c@6ZWf=3EXHZ0z{pJ$-npbq^cKkY>O_@#vT%K8spV~1w3YXj&vi-+ zMsBjsRm@3ZtCVjIW)AOIXqkRg^>v%=gLhZw8qHVnvAFAY7gGoM2yhXbi%;|r`sxRK zX%NDk^wIo99j)WdnHX;@UQw@-+h`j0!;Uia^F@PhRFD2=MPW$b(4Y5H?<t z{V09AV-NyN^7KDn1k}t+MhRL2d>SLOUiaEqMMcJ#|BwJ;BAhcdi}+s#R{+i#-cg%F zRJO(T!8Onl@A0CjcUA?ZJys2sGu#^vnRS267Bu@Z#K%jv5p-E#OyxZiI?*+B#5)6U z7WAWJ*aaaKQKxENw@6bk*emS$EkK*X=Xulexv{7%ZsJkxgaI3i_dieEJD;yV^rCD| zC?1~=k0$Y}DOF-gbN{liC3*(#U46(l6~V(d5(uS7DD9&mpwX*tYHL~kxws@y*qgy8 zkZg#O<%gq8Hja7&O?JP%Ry+3cG6Q;uV@M4uBUOG8$3&<>8mx-^!a5(HaTAk|WaDGf z%^xP-4>)d}UiGlaek z=$OMJP{B+4xR7^t@={xz)gN2hT>I+o-04fno%6>Op0Rd)@cpw_p8)wsz)+6P;EHEa zlSTp1PumyO^Kk?x8@>J1xvSBd^{W+MD_$-HOiH6+$6sA8=sGyw;#%z~2=1-hUIy?9`Qqqtidm#%_}3X!hpK3cOc3ai~r zv<)W!TowAZ6Jon?6otRrm11lsxMD7!R=dEcYO?VUh9u_RW5Gzyi!t~r45IbhAKNZ2 z4xdoMnD>o!c`AY?RfZBak8+MP z!LJ`P5|cZav9YjI-xO$f#^6hiA$6-ja$4G3kD+%~=SN|F2E-E^o zm!+1GYa{$F!;NSP(^j68=O3x9B23>PyYlM~dG(si`=s{aDyN9u!F7ygrO&dLDR{%D zgp*H_H!LjdpG9XBLj3aKH8@CNFX0@*{(89!3z7CWa=%x%YM(@Sxk?;Lt>iUH^dk&) zJZss=Z72>JoQ?1W-$~lJmI#KHVMV4v*XN&56)Tx@7vZJ?4DUA6XhM!6zoJ~ud{62> zc%{L1G?Vm>8v6mQucJC=gsK)!Om4vfd@0ReYM;1w?*%9JjF{|Bgf8VaRHv{`hbEt> zWHHpr1(Uzs@^*4?lHjb?4$`Kc6Yp?ObP6A&ydtl!_md;< z__h*O-Z#tpUIuxrqQF;sltc@@6`?p{-~4Ha6b$UY&R7P2C-1^itS(nLUyXsfF}nOG zSu-6n8LEBHVnFdr=DCAB36}nsW06dXYPjKskL_M&Uvh~mBe>68BxlGEFNK~oENvNO zJuSA3!A}uhj6=AiB)Wti4hta!vB$AA7?i6aLv~5)KzmGU)VWk=1~e(}q7D06{by0b zgEeyZ?I&5c0OIPRot2J77L)Uojq#q9W5&zVD|uzfYw+FQQFZfA{+O+7I5zkL@roc` z(iRuU+^N#r2gV#2=_`v)Q9P>XjE$c3ycI#OXjX(NPG7B zbh|K;$B+!EA_DB)q`o6DZM^maMG3Qd%0WC|`OE$=$uH=+;$BV>XU`*qyeQRm2_b}k z!iVTsIJ|YrRK9swWGM0Mgp6PEw=&AKP9pK7Yx6EZmyHj;~eN zP$IM_{x_Q!vEt-Ut@LS`VVscVA5%sTnkrb3Kd{)&B*qM>aUv#$^5%PO0>?F*sg{Zm ztD)iVX&A;V<|KCB5^Q2~T=H*WH<*=b z?Q}4!1uy&{m{PM<7885d`+g{*RJCD^IO!h$JbiUX3Lga0oH%Mf)DGo3XDp)Y{P63~ z$RYB`-&&TD&tO+M&ONm+AvVQB5Gi>gg$SYV`;rUibOQ_|JR~y1ZU8WYmrl-zmNbZ5 zAX2LG-<$bVEn%*{?#A`#=m^${B@7`d1ou^ckRE(E-k&~22PI%(r~LN?Fh5Q+yg3lh z1h7_yzaH+e=uTuu4Jsn z1YHr^jU1Z~v*eTN(zFExAv`8?41)G*T$-wyifEB(l&VwqffB*Q_?#9Pe`C(#5m0V> zn9wxvuQ^ktOHOXRwSr8<9of6xc*ebV{4IQUN z0C+|D>&_CmSI9#2G2CJ)v8Q;QP42WGFa4#PW4Hmw1%E%37#$b**l*a_Nlm5|30h;> z=6Y1=OZq+^tsM$ineF->TAN35n-ga+Vrw^`}Js7K9qXaEb8DM=qb+Fk{FN zqdApK;VV6yNQA@L*8-b`>AE9P0(FLhmR}JUa*bS)HWH4?vbfT#3$SCFA(6AM+>7Uj z-c6PoB{#J$aPs#|1#H+T3-(U96V%}@au5TrTVvSaPM@F^7xDGDaQKG2Jb|<7H(EdX zDb=yIWr?ux?8G+pE>LqL1J_H$Yr>_z!B+zUU2xCLWQ_dN_UHOl%Z$AI2zyF$1YM?f zQF?cWH0I+$k;pmick%w<%aJg_Q6cN*Ee0NslKlz)>Q`Jk6cvdP^V!9l(5;K~6hHY3 z&K@5{{!8=GA2s{&`Z3%M`TnE4<&vju7wRG6jjtruKAqf$a;|_`o1I5HV?CWCT-SZ& z;Njc~aEb+KUR}EY%HmC9ZTv<8&vsZ94v_`X)vs-l3EZj2{5F^0Srq;Y#finu1DbCm z;|9}9{XcOM&2g~K;%^hKYx8^i*b3P|f+)kn9-WCC1@kR8RANb;9&!JlDulBy^o*!; zLf^nDA+qjeq9Q7Upkq>A3K>dxL3mxd{Y_1$!Qho+IO^D3ez?k#Y6IJGB%6%#%+zHT zu~b-WPc6?K@8-;Wi#BKS(wTtCZhJA}`4uWf5cnm=6Ezs;UK!$i_ypX{$YV> z05haiphy1B@`bc$mr!ZxGBH}dbvJ5Ga`bV&VECaAV74T-g>E+?56R_%IdLkT5{+t? zdq?#b6VI3RBS$!->JmG2OL*bmT8tPPU zEC}#@=kGppX_EVE@K(ua648ANccEp%ogv)!$udQyyk~O z|I5NZx2*4=Bcg51MRbc?Alz&2)ZWNn>q_S|AbHj%qkGzmprjHTEOc5a*Y%4wetw7n zdGFJUUi)oTuy?=!wCKMvXAbG&_AL3$^4zZxR3Z^?3F*Di3{bo-H9}?znK2lGs!b;W zXTL3I)Nw>3D{jlA%7mm7M6^=912noz+{HL{RwT&BC$su-I5ZP6GC*icj>XfY>XHZg zH0UCZ-aE(BNfCfG5_-p2s2jhux5IYyH#f5a5&z)~jMzw~ZOONzZ*7B^QMMd6hI8rF z5gjI%`MozOUQf%FXgrHuAtZ4rQy6erTex;yi08Egnpe3ZeT(XdE>no;^ezAg?echU zkoBwHc%-huJ<^bD0A*C3`0t<;3;=!BE;U!nvI6rnt4#*pRH6S6P2W&FE=T4DwuBpQ z6V79Y9}b*Gj)vB=bv!he;C+y(XU?J8!*KE4v<>NzR+Z9R)^c*iy~~M^ql)9F=s=+V zLsA`~8Ug=Qy2qH(T4qGVo2oF>Uwumyi}PO7ZAl#L^4#&06(fF%O5-`nb`{;$k_p;m z-7{#Ihf9-?DiUqOP6D{Z#8KcPaX@86*9hjuIvcdBpHO;7{JW~eTXI&^_xxgD=izT~ z0?-nB?`QYa<=MD4`_p9u1lwJUsGj6Lsq`BAQoQVu*FZDlu{T5#wZIVUA4+5N%rcQG z;6`0<9xacD<0RgX+%vWkWuoe;X~&1;pj< zEsSkMQW&!k@=#CF2jG}xN{^21I?<*NE|PjUsas~(4Cz&zn4U%uWk;A18y)$P8Hs?BQ2QF3!w~+oTLp7mtMlZA zOg*EP@p}h!TJ4Ln9JJHAyeV$kT>_;d1LkfOJM-D!nx_@ND}L*a*JcjR>vK)<>DR^> z9&rfRyJw%jz_F-2v$0(6Gj%+|p5~dK!pWcdGu)H!u%}D?0DZt2WczC=V5Av(JZ^t_ zjPcW>W%H!kshiboDIC%uiCqQ%ISz=IN&~stCoEE zCYpeFS}o<;ppUu8AX0StflYaYDmo#@h1>3;V9`t0mNs0i$rYY=%4~5#|M%)+HN*`2 z9G!5;kTnS=yT@Yz<{#r;7e)T9#B-t+clonMgyhvubN+3ZdHfH6x|ynbFF}>m#V<~omtOtld5px=F^R+6=Q_6;lzM%3-*w9P)x}uIyLyw znl1oTDbm@r`^+}sSSWKb(n1+V-jCpbBx89f#5C4%r%jPTNjdh(llr$)1R9NAkwSSs z)|;56oejx}{zDa|=%!iq-o7IRd=1@@DcLELc`8pi`!H$=S}J>`M}l*nBukRR!xu2q zL>XsW^I1*X$>?F%Ar!d{EvQVWzsY$9imN5C2=Q6&?!P^NaG{-@^-S+{$!2gMS5NVQ zu`dS#q2gz{h7j-gBK!f<1y|5%^xKzkE}kNCCuDzwA=rI~u&zmUmm($!haXlpeOo16B4 z4ljnrZ&^rXF;ER$ON&dluI;;94^6(^`HhR#9b66cy(iKu&3afQoYeXb04F?z$d9sL z(Cq5Uzqx-b`4Z)l+=E(<@1rf7#(Gn-tr|0HpY`yg&iGt=bsL*S&W@}4%9 zn9LRL(Cyi?cFbFu}L-vd0H_MJ3*sM=Z)lMDEDQ1!I0@9+vGVbN{E<6^sFA4fykvdgZ$> zz=?u0ZUgmia@|&1(1n6ls-?F=dJgEJZ%%EiJu4yEH_1-Dl+eXcAubBGKqH_ zn7bLer3@eis|E$P5$k1+#s<`|6yJV|K3A1>RAmKzh?d9kk#%tXhJ$8|bAM4|?tT8L zr}1%B0mWaP5;q{#%yAQ3=ldgLBA`HO$^69GIyqoa#8ufS8&^t4q}9T6M}Ehk84mGD zYrLbWqLNdVXunA18)5=3kf{|vk+-UuULl_Qb3_R1`39buzep^RJ2VVBhX~2AX-{*` z{@3%fEXe~2rlXWixxbY;WaaU2WM2Ky{dWH$NBzF5#-;aHgsUOSe=l6ZQ+yDpVSE~#D zVNtp$fRb>MYZ!>jdcYM=vcG@kdB&qzsSS;$vM_*UedpSsGoM*@Dp^Ow&@k!#%~&>M zH(ApB7Gq)L|DTg25VuL`UC_(?$~u9C=)S`@6RJCN-|V3+2CCXkPPO&KTW`|jA)<}lsaJ~!%L z?)$?nYH*%wmKkbB@kKiKYzpsBQ0Hm;B}EPWW?#8c8H!w>8JcQi3^wCMRmVLJKhtF= zuooG&69+70KS7s6TbF#lg{MAQC*c}1SGY=iYNRu9rBlhL4wx+p#_0+tbV)Q%@5Z3-pII|ELMO~ zqi&Am(%xjg*p^PmrK6Wi*}&LbiF6hr-{6Y7U~xirmEgwvtIy;7V&!c2|GGJ*J`o&< zVEmj3xY`eQA^H5~P;uT{U%?N==fg3NDy)GcDEnVPGJELTw5}TRjN=lDknvx5^=wfh z7Qg?`oHcaDhnL?LDTa+-IRtPwZle`UrO|rFdt0M#>gd#);MvLfW`~craKfGA? zqlN}lQ3Cd(18NbZvQ(8dEPmlaZBRlI0FEet#&15F)#zMdJX2Dofr!#KW!h@!S=)qx zVgwZ(EGT_F#Y%8Ank<%TruucxvQG{FnpCsRGxkCYDBs%jbS8i7=Zh^O=b;$7%Y!&Q zo{JE{Vs9uBJDfQ8v!eA%qR*?tSameclz8J<`$hS-4ZEE7Cs7Q5NbwQ&-W!%WY&qfn zV8$hImfdnybD?(GbS_}=Ogg6tkraxsDKGF~q5ne4mNSkcFDq$(^!7#m@N_S* zPhy?hm~=zo3KKkdxHWu4!_Sd~qbzKIzPPv)TIm;-|FeIIz5R8fxqF(;(*LAizh;o`B>dsp@M@fIQweg)>sUY55&u?; z36nXpR3|leoE@h36RV&#(XwFJq~2C4(w=_TNRHxzci&#J4}45Ce0C6bnay{hG1jWf zB0BR_r#;u>sD*u&dp`^)nd++icgeia{_44?9+K79xFEB6x@1!ISR0B6&f9r-g(=Zs!qA>Tt9ib4P@&6k|!Xt3+;14wPL(W2X$~j7U89 z5#(fo_K2m9aJwlHq7AaG{hpx5(9VBPuhY}D=j}3x4vm_jin)PBH(wp^pNF5e_147< zo9DoHve$q=ZJNm;_+HpUBtyQq?J9+ld2}p~KN|Kv%u^+V_e}w9MnqPwem@oc0lqsA zj6&ae&Nv$YIriQCKLE)#IKFpt?d%>=vD>Gd?+g_4P-in1Smd za&ewlhk$0PQ2ms&m7g=4y51H?dVRdq_G!B~uSRoBsSN;ggz2bOhXAt@Ov=Y{x7In8 z#KDv=`k;pJk!A)^-Ivz#AjFU5l?#euQu0}ckf&pz04f=XJ+%n5yfm%Cog&}*0|#V3 zBl&R zz)HTlGHNgb0^9Tq&`QdH{*bRaxT&_WqNC6zV=pb)7y(f5W#bGV8)5Y29M0Fm^RHDb zT0OWx-s8R4@PJ8Lct=WDiQZsQPgLIbQO=uODB_6#sOp&aHAZi{+m){hIfI$BWKt`3 zkLAOv%AUt3R`eZA;64bbmg?27HwAjjv2>PF?_C-&UrJy+ooM=(ZXpLTa|$*9%n78U zSuFzpJ|3L!x7#l|dG%FT+pDUtM{7r^+}C&D6SZ059I&tb-fE?MJyR2{#1d$>n zs~v(3EkP)^A83DP(8{*d0u)`~l|>8#2tx>!n;NEjIN&I-t8Vk#0}1mX*w?LpkJK&p z@t_bNwE#)VNAN$9R&cETzTe{UTd(@$7?3f*-|09CH;TbB0=HpI@)>f`>smm?$a5<1 zz_#UfEAbIvD zg*->J+8@i`M?C*}oLJMVhG+T@ujKGg=U@Ohk!9^z&le8jIEU+a=6U|Z%fGhYc>J}$ zu%O44Axb!OP2>0J-p`C6yx)1R^Ltl+>gT_m{Xp*kw$u6B@`b{bfAu?}HUK#UP>8bsO5J}CrCb-qI9dVm7)^^i<9=Vo6n)FXrXRol`6SvVmy74y@C5bI29pJH?BzE=Qosmeca*#a=13Zf2) zdRq44fl*N?(y@x-NQ{pK`YFsc!I{I}r&i+a7`(sH- zM^ahFdTG2kYY$F7dOpOd`CNhx0CU0UNLGyi4hcUiK0AHX?_JDFP*sy1&kV4bl0r~H z*u&FtPtsloVMhvJbOV)es7(nxb!!qSKp+a?hMk8Hl-Zh3t^AW^1i?bWUQ2Ixgz7K= z=+(~lM59x_J8y;XpY!&YBG5Ud2jh~1WKrE(iRC|iDC@GL6>F?XK2Y{K76t$b`~wv- z1`WCd^Od5^@n}At$h!fGOv(ToX<)I=b1;F8CLkq_F8s9Mk77~Fz8JASl z;HXCSD1qJ;X6ZgTpSB}yzf?!--Mp;&-O&l>IGDhF5uimyKGgCB@yQS5pCInH;|C^t+u8)EarD59>r9*y%=^ zRZIU`r3YCBfd%0ZXHT(~Ac*3RfD=NXewQrxU_wA9w`SQ1-WXh&;*ESf;5xCaa`b)fmjLx0#gA70Q1BZ?pO*etdR~rX1c>qfdQedG zM>3k7>o=~`?!5&bO*A6j+dR{}ul|4b-laLNBTdshagP9k0H{lf*`>~wG@6;JN*gmR zYuRPii^?{e-DuN`vHoeb&mWMrD6{Hi??#(yo3Y_!s>`}6sbWebs2~GCT+Tet=f{IV zilj&)A`k%FffMfM=Eu+Lm-F5Hb2t5b9ALt(d4C85ILOe?K2w_5Oy}u77T$brdvNI2 z3L*#FL-LSn|9vts&RcKPLhFvDY z6$1?V0tn||?rd!HW!G{z5AqmCo7j6m!!udNFnon%p6Zzy!w-Hl;>qa5#$<;gb+ zffim}yNBbGVYNITj~1i$VzIynKs|=s(hRH(p=J9k;XM&p2e0YKK?KUw$@sgO4#My~ zpK%**6T6HhaE10vqEJnn$2I>qW~yj7-|A{Y%mIl(neF5$oE?J4@)ien^phDEg%OrV zqBUa59*ho}P1i7&$hAGRxj;t4!uoE8;2Ye}2h!nN`6~n&hMcx-Jy%KvwzFzxtWdIl>{o%x|9huPW=?~ zbo!|lN;>{is2c(Sj)hY{F0>oFc|HHJT}k<`;X}nsW?w7~bK(oOW;c$NpMpR^pdhdf z1hy&w&=Wg$Hk0wN^$ir^V}>}q8dk@cu?`V)e+3EnImhtISsbx#qjGO^o_KTZ-|e;T zRr9mYdvyzs|Fa>YreoV1?d~(ktTCiY|2AXv-tP{GGHDVnZiXS;zC_{)-t2#Y6`g6< zgqD!eBs6)1PS?Kob8p~{lGy=7~?~kV{p}<0{lTR&}tA*4m#B@#>tDcbA@#b z_`@^Y!eV5lDRG`7tSRvfm7?dj#!XPJU{o4zC7hL!e!^Vly1oczKt{p#@eE%EM@$U} zr;ut9d0$rw7&Bj3Gh(l?85z5uraC;YY^Ockw%{v^iSS1Iu#JGHZK6k8aB79;taF*m zsD}920a8ubtS6m%TAza^ZGVsIBEAg)=wa~fe=_>i1N%vCvCepyX`Pb(;!ZCZ($s&en9?;y8hMwo z-Elmx0&s{#^28NisQ>7*E)zEh9rX=l5wBOX_N3IYXz8;-zM1ps< zf^qT0e#0Buu3v9FnBwuZcNaC|XFU6__+Y~Dy9^+fCwhW~u7OJFCd!$;65a^`okE3o zXsADl$L!4Lka_bQv;ZIb`Y9KfJ^Abdf*U$ zuA*=U0$avjQ(~tAqLEOs2@jV>93olzaMTUPG&}Y? zWXw6DpMHdQf9L+EaX)FT74pUy#!uuy1F)@{F5#yLtmC2}#qrLiU_UrW!d|Hsebqfh zXxhg&j8jM!;j4}|!5FKsuY`g?L7*V82?Vw(0Ki+}qPkx#U0V}|?E7jesaGBM`<>%X zE7oD3Fb0+}Z4)(F@^}#e+FEzik?BX{U;QN{36>}IF&|WER6etnYr?L7k6*mM?ENeP z5`r=%f++DArlBBvh+V~y*tOq1*JqTG!7E_D8d6{cn+6@CL|`KobB@Ml;(<6Sg! z9rV;*vm7Gc!+UVCF{hXto$j#Y>!6Q-5>j8uNXTfrIdCO`Pm0CccB7ccYu(>f@ToF- zmdScfTW-ge=9UCN&y0K))J-(0r-Z3ul2)zDr&@&U?R^3Bqj->lK*O#KMv{;kh!H zEK~FRT%H`KGq>wEnypQjO^Xbgcl1{UbLR-?6aW~zIYr0gZwxsB?9BCJ z-llgd<@2E>6a)$aHx_|i2mo#@cpG`Kv;+CHTm6LfoAW7W-wpwpGMW964vcUNC4rb7yd5x~Q36uJF>C%M zYRW+UcG$l5Dln8O)Kq^T1l`WL&teG;6&94y#U*j6nz**B3-qspS*^y}bQpz}A| zpw0;`b}rG2cV4eO#bvhN9Bj9G@Skx|X5MqQUrqWSPH^r=?7mZS+$QJA2Y_jX0BZg@ zz3O{8-)+a%hnoF99mqc9elYhWjxhJPk0pj5hpkM_(UGfTyLDpsr9R`O@hy7CN4%zW(%F%aSB8O`_a)dseynrwGu89kFxh zpaO~jU;`jtA`523w(jyx{S0~5U$vL5Gd|R(kjM}C4tzV9o}E7YY%-3;_U^&_p5lG$$hF}BGy>%81baB0DH<|w$VC6}=oy3HE1V^F2h3OD&Bh-vc z#t#SuCg*u&fXt+%>!+M|&artI7U5btr8oVXpE2hdK1Sw8=Y*R4fBy_kDZT=R=V};} zxv>n&tnR{az?9HvY-4Vp;Lsq_pdbzs%q}5#liQW=nS<}k>&9|yF)tfofL0XZISXG9 z;NcA!@O1waGHTHkkTf%C-Dn76JQ-V_1xTq@z`E;2?Ygv}2_d+s-rmyd;5>ZIAg?v_soo z2^&FxuP5|jtMz${?+l~Yy`aH6Oi^A|-*g}#S=)dy&ges$7+fdO!NHyJpY0SU?oL`~{l9#gK(2sHSG2YuE41bGN)Ks|KN@t&t< z$YU}i|2kP#PtX>*JfO#QLZ%1uL`W{G+1mZV;6#T0Y&bM!lo^^U!}L9xcXQAle zfBBgHMxD8Dz1i1JT$zI%>)7CNH}AXd^_t#je}2VPfw_1as0IYN_(oaw7|Rjx(f%B_ z9YYn|o=&l)vLThQAp~l*|BPTdC2#+)q%VN?l;QSC^0PBLgTDNM*B9`Vj&Tn z7BK7}f}c)VRK0{ZB5<+1(;BtAUDRUYsKY;&mkdIS>BDONgZVEwz7fTDJ1HY4L8ul` zqA@v)N{oR(=k48cEmyw@0Wodp#JEf{K&;BL002M$Nkl2pTs9il)zka|qoIjMjqp>V$JF(p;AX6_HoI z=a$8}&mj+pz9$w}QKz;d%VJy9w0yO(?{WTPy9w2^-rj7lwndsJmMRJWQVf3lzw40eBiEDGH8S^(9SUzW_0P-sg=GNeyvEHC_c!&Lj+Ea`BOt~G zW4)lH`cVi-J7OF+SReViI8c5zfPe@?AJ#2w=vW~t9FsRFLrtWTFSXB?17Lei##%0}KHGl_LY-{BbUM9@f*(k8UKj$~q^Ab5dq1 zBmm8m+;!wr%i3W?c)oCTiBLYxD^SG~{?K9ZPZ|1(+kb(gMKtsdo09&Zhal%X7wUwpp#^)rcp^m5k4;wr7 zg|N>9&9(3Nm~#1R5wPV~%>J(iD;k`^708Fum9M?6oY*P?^JTx)VcBko0r;U>C+X1j z&1f1?=?IstfFc06EQ00!g1`+%Ack0eAP*Ds$h&5SKeR<~09pW;4rV|GlWWS*d^L?i z+$Hj@lR2Y1mg0OnIf}6S2-9BJfM8g%X|wa0v#GP+RH>dkouK*D?&L*$mW;m4R5(kc zs5-vmXqa5ban6NKfcNuF5;K#E$Dz(uwtJXoiDp_!#lwdx24j^k$n0$d_y z9a8P9eUtASB#qtFN(Q#1Rm=>#nEsXB}7h5 z+uK<|%YzC61%YdiKoJ03`zkCiP!QN60uqJI)`9piEfL}pMPdFgQ4)kn(gJ$Egxw(^ zv$gIqUhPpA&G`VzUcyjDzai@p0LaEA(`1LR#!ayEw%YlntaZCD7Ncu(WdlJQa`SNE*9!DxR(Df86fx1Ro(1 zGy8!|yjp(#Ve}JopPcw9l^ZzjSZ=+gybAxn;VOFP?CeJh02HiQFSbSNMuC7% z9Boz}GhX=1zfCU=7?)5GC4m%X@D*&*IOe0MR(L2RDy<)hat&YJfR5AnE<06^kjrc-|j&O>jzOkjHv0umWn zozb0>(ChqVYf${e`3?>UKLX&;CNmz~#i86U?{uqQu!*aZL+$?`x=1(@iU|`?ys-NxSFc61uUqRz+>QDfH_rUBpBC#%0a928aIJMa#wx%#cNz0FZCt|c5wI?XP~~mv z)Yx`<#&j;*<&U#&_4r)pfN6yn+wD`9Q+52-YKrDpLP4M)P!M=)1d0IQt)VIx76fh| z0@8sJgL;zgLga_XVq-Qn6gZWR4Z1T3QZ>-mp5wYs)lB8A)9Nw-J;+9hd@rBaszseq4w+it38jCO#c~9b z!v2VZ3NvKZQfTHY^9Bl^Vu?fBAmL@}h;;ON{@Mq_0)T#J%{V1TMr~yQz>Z5S3S1k8 z!Rt2_qD_(ikj}2=ScX!efEEE^g~2&vgV6>by{T7teLgfi+nxfbIbA`k3i$n)Ug0Vl zr!ZKj2mz3{#5H>P*CIe+KrsB=8*R7ACn_gD&5GJ`u%3gVr;>6#>FNIv#$Jmb>qP;L zu~UE)poO0n?u+Z2`_|jbdV0&nFWJ9@fPF{&wE$p6)M!q*8AV^G?>X|#=nO)QJ<`O^ z*;YU!!-D&`ARra!IG0?lt%RoMz20rt!RIRk@Zml`2wW2S0*H5N(yk65qt<1!{NAVL z+v{&~5QCTBS4P1)Tkn^fWC$X!z&$aJ*(yV(6J8`370ZzOzt>-sDt`_?!A;eeiB{spwxesRakztgGnjO@9}HXagonC<=d_wfyaolT>Ec=>xG!PMUiD0=Me~|r{>*V-<(#?cc?d{W z1GL?^C;y~kN7*@a9YXF?3Te6wZ(`-T?rNZ&hkrnQxpkNdpk{gjw}8kbFtgqa8Q-)Q zhhF((m994*=Z8DJN7L~Gu#NfU9CP2XpaVyem=f=7u#MDy{N(q}r4bG{=lgpuG5b!N zNLD4rQ`G%oWkPJ)E%^aJ0Kj>t2Y8?TXj>tnnR3r*an36~sKvKmI~{98J*eTgy&s_c zj}7aL11&T>$9u;9{{gLUm;9lOSKA9gON zckf9fLn{0ho8}dxv#ZCmo(}Y6(3Q}aUw`o*uypLzz4`Rw>9-%xep~&P4^RmOf$NPx z5dd88`nz?dm-(N3Iq7~<9gJrOb+dG^t27DIGOXa!y%zz+OIW za%aIJ$@n@=Kql=1YXG9EM*(9QHl_%8jZAE2m`z)JH-iLpPNyT+=Qo$z*gDM4-mEi& z?|cCkOWx2UOZLShHY!pr_nU%wwh^RwubcUsw{0Y*{BTY`APNM1R>t;Cd0zE7-`!x_ zwXKA9-?q%LaE7U7pE=adt=0#IY1>wEl$kK|+Ps<9P6HKTU1WilX@#z~)D6Uef^hUh<8wwA{X%|k})dyLzk zF=&qqsk6DZA%QPZ>3q_EbEh9#K#7lI_D`H+3Z)sus1zn4{ci60C8x_HU>!ou1hdd> zNR}>ICFgU0_CaE#Wq3lPgl7FN-oh`~iRJj9hR&D^tw;37&laHxT`^Kv5O`l?xeZc? zJk7RQSv{;1ba2SeZ7ErkIYTyy}G>g2#xg^vy~4H^@$);l+4)G zh^kGCwt7t(ti=olP>wD_dZ&9w-KfW3jFZzpwbL$DrzTs02)nV#_)eKOq2NSJUlq*R zDF%mNCSE>DMp7#g`~Sz|7NcWy~g~K=KpmFUj7XOKk@Vt4VYnJMfw=8 za7UwV65Ij0$3MX7<(Nuv-oN!=+yCIOTiv0)vco@zX0Np_n=CJ1(2~Q}5Xe>@Is!Cw z(f%us{otTm{o7;4T-v6-C>(p2wBLCMt`^Z|qGj^qm~W$x{m7;KyaNJ#kdu!53d^$- zG)!M{?^%1%I<1yx-`wrB|L~hHfAipX_kYKNpd}Opt}g;b0C0Wl?sk@ZG#@cA->q6* z2jzQEH23(*sk36huJ3Y;^CfHw0f|S=jYm+A5}<1-2~??Ny*jCx)HG1)Hy|aAZQGeT-wjKR%g- z(?5kIGWS0q0C0&v&Ha;uKbS1exh$pknV)&s6#3t7gC~0xju}%I?{u6!WFhzuS&2bG zmUXU>#?@Gk7?W-LGWc3&jkB{x(%~e|{Kzb?z}p+o_UiH8Z~cZdoAHT5#TdHXBYm@={P8x>Ir)q!aX2=yyR zdqP}p+Ybba0AN4rp_Jk+5$K;YrMFiPq2ST@SD!Laf11dfs*kjG#C~bhZ;9pGUM2ye z)?Xoj^GYT9!y7p9$_ud+W(j+XfYl?B7-S6*mh&)3FcoOcjU^amgj_A)Y}QHU^-!9j z;pd#o2igW^n3qdv@?UXit08Bf30wDN!1NRF3`WfsXWLW&V7V5x^WJ5IlF7R15vv_s z$iR6W_j-ND9Wx`kP9a#rrp)#`-(lEewtu`YyS!IoRuRlm!8)+BPGx8{XScqz>r-CW580p9>GQ8qLRxz&~nts!I`In(rqdz%M zZCi_yBpg{H09b@(HMIO(FP1KaDdjKL=CO-*6L^Yi18fMns2k_=k1)}&eY@PBOrZkV zBZOQEWIb*jIyNh`bwB4}++^oll(6RrL|rwNXCJfeDah2ng8~50>kQR4VPA|(<0ejc zvFT{A#3j5N0yQ5JhBvkUd^iSxdBNc7@Y-!HTPLk$J?zh>UQ0lsp|n|!D{|t{;PlImoC0_680r*6af*Hz)*7kb)xZ z$N~WUC3@Vt)KQ@I_o^U{q3y@2AkX=?ulo$m{heZ=P#}jWL*CiPI6)Hc>nq!KY&J;% zkfyk(@%OmXCb9*9?fs46XHLKI-nTrtj;(E1*ku5pJ7HWC03Zao1Dlc=e)}|47J{)= zgL%v6mdo}J0R@@10sExRK5d?D#<|=ktapW=*$Lndn&%+I4Smw!GstB@_g%H3C;60O+={PprD<^q>i3%&8up5wEeU3I`MJFzLS5xL&UC z>HUYmazsY;x(Cv~{r10Tc2vgr%OPCuegy*YMdK%CM-#i>ftb|Ow|t@mhYSJ#4fE$k zZm-C!9J+o8GaLR^Kc9XqQ2PuNl;+Yg>w{_j7@F}F)#TAaDH<8l^)3NxLWmq_Y9#2b)Tm~7`ud@ zf)Is5wvpIYn26HuLR1$&vThmLs5^=fPFDfnYZSUzcM7%C_=Xc!W5#5y1MB%9FN_fOQ>JYs?nk<)(dfapYXiX52GeErj~`E28GzAt zH5y-iaZ3sI9|3g|kSfeLVKEQCV(`OEIYH2L-fdmfvn{{D&IJJd_K)@Ae6(y2I?PCH z<4&_W<+Yu0gRhNNm!+wCyz}~JZOahsSN34xSD(*&3p4lnsNe5H%yn1>2Fe8wq{qqjn9@jGWTMh^5EwH55}OhnY%8XK^IlWJ%|XEW%=K$s>qTBh!%Yq3f{{rk zTjEnA{0oRq_cgC43gR;uZ6Wc%j)IT!^J!V!WcVe7QSh!9zz{TZoiWp?Q+wvRXv4gp zhM!tRtcN(aeqayNmNA!;S4e%7*n0GY?|~5M^OIn|&sHAM@3}X(g(jP{~-hjiYul>L_6V}xbVO8Ag+P271yK_^Pl>g=VK{9 z`krkT1@LOKf4X`9iZSGRc1yT{2w2C_r+GK~YU-mB&Jv6Ygwj}?$KPnrSJ9|%Aol5% zUcEBF;o@Na-ItTu*Z04km*oL(Y~^p^#rWzVi>5c9F=$Uvf-mXD2mo3WChDCnaZuB5 zPq*5GcP;?<=lSRYuNsr_u=Nd-Ngp$&eZw0LdGTYu!YA;*f6cLbpp8-6tqgq{1@B*f z)umO`?InVehJMXcX8)pb7HFH4D4tP1y}D0u`X;d) zCe*ygnD+f4p3>ELJEvVH>X>=&2Z)}B>}dI5NY*ZVj?-=&+ovCkrRlFy3r&IzlBkCv z;Bd}=vUpG{ZhhQgbqB=Ad}d!=3u^fl037P*(`>=P?Ct9c4?LEh5|4TzosR70*V5ax z@>`g;T~0A9{E@o$9c_F1iEv*34|wy}Qbp~5UIpNXr>p|Nec8u_?IfjhuG@bXv6Zd8eMGj9d<4N`1sq8 zXRf1puxR}OJ^y1Q9pie@($OtUZ~I=Wy}-MB>qB1pb_xKXwIFTohSw@+0dSZ@+aVQV zx`ZA`NAv%C9J5`E2hweJjN6#E41h$x%YJ38lXLy=;CXlqQ;-~iCDKx2Q$ke2Bptbp zh+Y48Uavdr(mEZnRDd3jBs4YWhc|&KVOcTefhUJ@RrQQd*Au*^cUc`k=38Pj9L+(1 z%5cY2fHhv~H9Y6W0CZXcMW|{V0pvD0!#d%NN&in_1Z}S}S9(Z48geb=^j&qPrf>ij zD~M>BCs#_yxESa3#tOT^jC?Qn{Z%2Us;bR+j$xn*K*;$>d)lKw7$TUVV3Zkput@gM>^V+JtS= zd&MKB1iS&8ZMMyt{Om(e2~Wb z)+vL&LV#~ZRkuH#{=`ysdo85od}|Ppa8z?TfKiYrl^9h+ItnclS}HV6YNvA8Cx#%G zsl=BDwn$p#Gni}%&(q!<2DITvrBn@x zVoh;6ve{qq_Yry;NKk9nvvd;tQHVnKB@_Miy4_w{)rQ_?@m^Lq_;=Pz>KK}B>SFmqQ zzPwrIG{4YXBOKRjJl4}P2ngKzt!x}%+hvqbYsO(QamWZvJwyIDhy zw+dT_2&NQ7>E!PSCSPl-PmWmdhQ{a^Y@0kFKID9~&!&1qP(wa1w}BlLzdT}x2;{cn z*lKX!I@c+n)*=_M2{XoNSO0j@#o`HzmKWMY+*N)qi2#(5t54M*f~`Y|W6?kHJI0=j zt!9KRp&;;f2-t@B9y<;G0Rp`Lc5$&#cyx*&X)&3NW>|Tv{om>rY3Blf2AaAtVPCDI z`JevcKR#*KZM58VbgZ|7@P=N65P(`|sIP|-C^2_cr5uVlW`!^YZPhB|hJspN>unH_ zXpksXJDNB5LrcoBY#wc1Tw1~nL}0y+{ruP_v`C7GeKq}hmdBDF;Xu-^FVMCd)K0?3 z#Fmb1`3uIgC{q07*xl!!O^?PI9N;Bp>s(<4nlk6ccV+PTU%(uutK#nGKlxyOndfKr zBlU`MSyR@-Z3=!ILpVh}hLsAj48LaoOr=a|>)7k4X|FZp>P>sQ|Fv()#5>kH#v1pS z;4Rbdl8g%DOk16Om_3CBrlU{pzPROySBSuRotxGgz_B{o4ei9X+@)c2D9&ABQE%v@ zqXx80Ryo&%`iKr0R?Lf*w0Q{yfpQVW3sbS-|h!x&;Fti?TC80$Zd+W z0;yDgX=%Ug z9J8-^Pri?22?c>@4-8MyR{T!e-e$xxT?c%k-Q-5YIfv+-bbwMf1%Y=*U`6X^tMEeW zzdMNUdPfleyenem`kf)bG?9L_d^9|)4n9YHH+20~Bmnq|=&&uACebP;SC^@cOb|3g zf&_!w{=>$xR9xTd}&(~N@r96!qC#MCB;8^YqbOH#o4B_d# z>nTpk=JQ@KWin!FN}PkAwXhtDJk+>np}?+)Qp=M36)dZ#Oa%8m<{XgO`X1B&#iGM; zRtTW2OHYsSz!DDDV94^E_~Z<)(XcoD=K9s=^(e7z%{t5N$bIWihX0g$GLC)P(o=m> z;Ctef%_#uR?Vqp=z}W@u6Jd|OEX>Qc7L3rAA+Tx0JbeSsg+hhp^0TIaU!6|uupPll9`;%y6xa^5(O}05Vo4p}7 zpS+)rmfwQF)g!P@>n}&iFI=JZw_h(s0I+?G%M;!cfx&z_>I{b`tTFZ#oasqxS)ZVR zJBFG&VF|Lkup>BS$)C0Ne8?@mEl|sp0I~aSGC0R*aV1pUBwFu4q#j~^nhDWc3fZlF zZQBJr$f~Pp4O08%pxrp9TtKq^zsI#2I3u3Y$mXmLK~uvD zAmzUPjLVn!r#WQCtB3mDwCzm6s{P_4W(wQBw60PJpu<&H*Kkz|$6DK#Or6IMxF+9g z>oSkX`=^gAM{WrMd6bEE5$y+yB^tCoP3$ZEvt@NSnO8$%{Q=kya(iDitEAi#sLE?z zBLvoIeVX;XWvgaoxI*i1y9$c{VA}|n{NEb^Xrgh|n%}GH-rvA9e9D*XQ$A*pMJ-6A zs6-zj0+S3D11`L%$Z^`+{I9J-X1x!Gp9M!c zpgdws{o;duIQknQFmvtp5jJdU{67c_fTY3oUWEZJ^%gCxX=e7rCb{`SmmIGZ0@)(T z|*vDS@O2^&K|0+UU` z)JDJx- zQa=6@A>43*7GEJ)hy5ixp(*C8s8&r21({CY%Y8kw^1mXecUSP+`J}*_S5J(5s)IBI zAWaL1dH)D=Wb(Bv$xEOGGt|_RHP%%1&Ox+o<&AE^IGR?@P|NQMD+&jk|F6JlNIFjo z2x)U=(;mXWlxb@f3b6 zUvT=CvDuhrRCQU>Q0IkAFN)2T0%mZWbB*nq9Cwu2xQ_^GgZ?P3qw>1eKxfS3IHTnJ(uHsdxuh<{X;-{)3zk7DRHWH z%6awrlbE4mSK}ED+0fW}{+xNLGIjiiW_|`krxxA}f?hQ?)4cDwEd-zCChzF;;r%fw z*SXx1{=mjN$h_Gvmb}LWT&Kb^-&KOUkDZdS`#IQeN_aH_*0*@)5cSW23mS*k7M|3s zU&j6k^O=p^&r<-f^rt<&XrZAXzsf)4opH29W8y_m}JEl5aCXcbU zC8v;RWfkKqgu8j8 zLEx4kAi*HRqV`EbRA$T>7donP?qH8|Y7>Z7j836YZ%O00#B0)If}xQQlUX))j;c;1 z7iiL%`kXYnFxO5kIm52rIog_bD+qY%4eYNNMD{(t?cgJ_;#dKN+J5JhKZR&+w-;4! zo|w2!rxV2Vj0+^L?ISXHhfP0m-(bIKBe^gx;im{>ymQqClfsUxFt}`_$Fsvfz5C1j zyRXJyFaO!Ugl$L0Kj!&wxtT;&`r~tmO-4QQvR+L4UMId=b+VCF2+rZOG76pL~B2WYX8w9gt zRS>u?2uP61@c;6-A90wWLX>`h!!sS9#Zpvg5DXHkdAly}SIW6t1Ty~;T5@*?OPG6^ zvtg%SeM+0s1HXi*%-#=pYu8!e6lP9_ThIJz+<*W5c{Mplc2l;RDXP`GcAeiG$rPQWe&AwP*A41_7DzFCz)T+kBr?{nC7`L z44<+4B_sr7?8W<>dbN&i3m&uXW$yp`qgm8{nEAu7$oT)uql-oWkQf-5eV3NZf+O=Y z4}Ae}YOBGj%+p>>4^ZE$)&KN^LBx)!GWd?CpB{B1c3so5l4HOS6xmK z^w|;G|Nl&U#z=HdCw@Tupf_s$?YNqfuG~r}2<#mKMF6mORZyp!gWT#HkBO%N|?%zjfmlkLi^UyT4302LMh)jg1&lV13_ z6K9sK9`HVjapbs?zp7o zan}uj+*XVwKW$qytMIblZ3F-V=FHDAqnFW^Mx8^T&Vs3K7$WZT{+t)dtpv-L!|GWa z^teo@WwY#_li`#J?YJBO2cn-~e%ry1K?pWD?E5$;*SLh&BOpBMo7jrARsk{hpIEF- zugB)-ed29wh}fT11;*i>{Ma@!LvK4*NR&2&t1$c78o=wc29Yzc-H=>`_4MjsNfZR$ z5`nd*6v8CG;Z7|HKNI9ceT83CU9|ShFmyL4DJ9Y=NDL*2q$1tj(nxoRq;z*T(%mH~ zNOyO4%*-9%``zF74>0F!_CEVr>xtzcp5_eEu52@H*!k5cDYi$w(lYzW#TxxON=gY& zHA?7&ol#6S5$dX$P=*0&eV>g6a1Fm>$W&`QY~k~3`uoxcdP52ao@iE`junwAbwb@- zktibTIx0K9{?pEv7Wz~M#E2TnQ#KxwJS{d?_#UzYcWm2KuV=q+(`Y?tVRM?!rn`e{ z-fWZ}lm?aVJ?ID5Zy#zP9#l`7W>NMlYab(Y$oWG_yUSORN>P({UL5Xua^LW&5PLCaSPoD2zkBZxrb6bW zjuZS)L4sRLWo{DOTY~^uI!8jAyk4PUwD$DqF#HL)&#J4SwfR z1E!;jBww_vAiXkMTTd=^>B5RN<(Ab2L1n0-$rbD$I@i*b8l z+44fBpp;)EsuIJAg}~X$q^s?XoN7@`#Qk@FV;v5yjQ5y45d;_@&mRcsz_De2IVmP6 zOuR3L(wr{iUqd@D`0z_O&fiZ@CINXrU&zqKmc4xCKbzH&RyPVc`vc8?N5&zO0rM(J zs(37Jw~5}+;i~-|(%;HFq2T+YuRN6r9fTg}G6AQ$^FmaG3Umgwjn%tt>_oRI4cevevOO8;d`rJBEC1$K3USC_Li$AIJsG*s7YpqUZVh>gq zxS*=-yLRdK&l3v8`{&E(wVnHRL;mY0XG6!Yp2OSAa$Pp1rSMtJdc?TguT{{{HLr&> z)XrA85Jim6^m%{Y#flyr9D7#f-sp*^jP#+Lu=tn|?bLJZNq;)2bl zN+@M~Gu)&?iT$odDnUMpYAKFbZ22?7u9qZDBb**5r?<3x`;F7@(?C27aH874}^CcTRO||BHoy4vy*U^YYk^NOK05Dy?~`U zS~wmhQgm;CPfhT=hrO$AmRFEQ9TtARemf*pO77Z<4ph-N;CMAVi!hc%rQVs` z|INM&#Q$rfBYjSBWoPmCyDs2qR8h@lPZ$pQ( zByl%iyBDpfb${$Y_7CV*UFSW1slVYA?o9yc3fWSMOaHp_Ck)aZIOyR zY!5%MZsE%C^pYNtB7x!)JLnG%(kpmi=D41koVNFxRs6n*b*_O9JrO}|Y#j_0uaTh0 zs$5tdZRQTSb@`4EY$4y!j@H0N;A&+)cBC{h*$p&MDzr=H?YtL~z3L%98PF*1^YTO^pjH_B5@G0MQ60^dzz+@=9;7J{nO zC+piPfY?V>6%%Bn=YJ3I0+hq1Kcb6YG^+RVp%ANY1~(D=OwT$3?(GL}H~OjK?us-x`p{4Uv&h~fqb*OIbmY#- zD8i&(QEHp|j1fN6|J`S@gIQK~S|&XTWw4jYsDJ^SPzN>$CO(}jyfm;ZPhd^I?Df*o z_ZmlGi_lv*m<13m6!qIa5+8@GRUaV)D8)NpI@O5FsMLybLX70Ty$ZCOBHA~xl(M~* z`-0E`FTeSxt5Q?FTWzz!%tD4<7%X}g48kR(Z0mA^c*s1_0}aw?2zaf7n{M2?+M}8e zZlIBgg(hlvZpa{x#<#alJarWz#v*9yd!i-XFMiwZRH^rJGR9ri5n|=b@`PomUjGQy zhbdouANugPs4#YmW!s5WCEt0q&expq;qD~jB)Z-kk5;_RE`5vDfxzDF#3#hyuVk$F zLtm;zREtkHiRG%cf9SNp^``Hidmvu4&1_bRch$EgGc*h6g*f@%u0QuF?H40RkP0t{ zLJ4uqZo(%Cyll=c>xIvhe42a@d+xHWd_YoAGj@3Y(QAG$tN80$+_-=Q9$e9U^$g1H)82y} zik-HlmSV1$CHEovzvI+8RBqPzA4Z^T)Y!OXfq%+`VbgpVg7w|Iw8>vn;j+sg+frQv zslKxzWo}`Mb|C5+g!b3Qw;6j;u|RdkLbiz=Pa_y5Lt}T8#khD~1w)CvaLdaGk@Np| zMpaqMXHk-@yLZNi1?_x3;sKP}75XK>oBYFC#jbAhN=)*{wI5qt)7|RJ(|Bhe7Km)f zmfw;~;I1WIrwfe29~?dgu`mohdt$@=f8L$GEAM?FFN|yt^I5w1=}ZE!c%%ja%Jb?_ z?AKf^(s-k9Z0F9DGu83jw86O={oe#H~PetDRjqt{?G985dd*5a_r(Hy^+DhGS z5^~TWMApp4-tpwf{qzW%yLwq|bGHQeRH;cTX1C=uq%IxeJH8swaR>eb4+;w~mlJln zT~cY?QItfW8luxnPChvCD7<>@34`GLO5O9h%05B0rx4HnG;8;7r|@CF)(ipY8?ULU zkO})y*F%}r7xXY1i*gXZs$Z;<__{e>%!h;mjIwy;r6XWn2y-vKk=|sp7^z3fgeKnEwWx+$wu}r;Hox2V>H47y z=X~1yvvqhkb5NM6cG--Bmc3a7`=t4d1ij=sP+1&Iu>~;S)DB(%#8B}8cY-82tv90EYvhZ^^08Tk z2}yM%EyHDg#6on|(?(yEjOf6T?rn)7E3FT|-0D`~pGKuuenLVYdVuwldRuRRyVG=( zqYRe4s;(n0$LkErhC}P>2@ydWEGD1O&a2`C18@^9;!DJ?l!!U!eDURkc6XpeH1fS~ zx~@v11nS}pM4Fh-iIh^jk2X!_-=op=B53p7D<^KYha|6jlQKV~BowI_W8DQk?13Lj zX)6M12%x7P+%(jjQrmToU^xZNBmPus+=sY@q{7KNeD^*X(V7FNy} zL3XEEbChPqt4@Zvx6`D&;8#W)>yMw>kG*# zmuwLbc4bpHlmEn3I&boPE%5q@;Maj~o-u_aD!0KQMmgW6c0Pj{`b&wfGta5VZ#1A5 zajfF*j|h`;yH;*fP(YU|UG*xCZ6iii9mrl45GdaXX3e+h(?pnKhvOHXsc8K~K~3}l z5vAYwTMyO?eVuj{j%#(bnoh9@BwoSGmpF&>T-B$G>FQ=cANZzw~ ztGleQy(grF%YMXk>h9h-UEAVu&udA3^L*yQzHlGm+<F)rbU6KAAdfNJQGdcIbU> zZ4vjl9B=Ky(Yr6D?nc!E%|v^btFrx(Tl6+7BfgnYol|m$If3)5plFDX;Ti_&qUx`3ZQn`*c|`z`+6g-?xd$e5&a%U ze1rH8AIoc;E4K0#^}+^ja%VK_bnz{!~Omh-SpY+p#Djz$ZzX^ z%4;TY?#iO;ip2=`3_<-n+cdTmFko+(mJ!LIaz!q}BF)$pv-vZ*iX`FtR}olhs8bIL z&{SKSXYkf&hv^3&j2C7p#y)pgx1Yuiy-|^oq=sMQ$_~DV-NVW<#^_nsJnYvLoV){H zNR5Q7e^}A{F2Pxa+s;6MiQE}zo*c-@(JB;&=?P+ z&C1ko_+iG5PoTVCk^1N*d-Maesx=R>K*!OAd72@#3x%za;ScS`98+> z{1P)B`fs7$gEu87l&)6ncr{a>)kmL__FYb`e=vYCGM--fnA%0-TP&7EP+YUTy6FQ@ z8#Q?I>ZnmiI_$b`5c&0efwWqvdsku?5`E>}E$np>LKWd@c(5IQBZ1fRupVNZVt%DQ zzaG?SuQ%kie0BlK2lPJdDNdvqA>=!Zdrnk$x-t$p0}0NjwT4k04N8}vX}KQbtAO2P zs~xGRy;-ABZ`2VeonoJJOEI3Lq2Iiy;hO>SU(Z22XTRI^Cg84t341)LABm`u(5)qcx=-y7G>bn;>4(;->TNZH#u4~!bdU*2~pa#9AS`v1Y zr_Bi0%AG&7(;$kMCq-0^RfqE`z|ujvc`VR+nRyad9A`%#q>GGcp1N9@hHo#OushD5xTD=pzK0s29RLkm6xw|IH z{Ihb-9sQ~ZL0e~Am9+1IKi%sZO?M}M{+&tQ+?hjw^fDl{uR1(Jp!6i`^EmazE~NJ& zD_Ml@BB&XPn>A(x55R2ar+_((*GT}bIHOt?mC5zTL*|M=X?cC8JeU3Vg_GMjQ6&BH zY@Jt68JJL@9=u=KW+z;mr(evr|yG;HlP@s1X>huzO zJ~8lceLSx{(7dU@Tv%N`Kd%jKN8H>t2#%3Q%6EY2Z1tBa?t>5Y^lPO=11XjFs;WKg zc=Oc2uCp^mDsMr-CzDB-klDA}J^4ic*N;SLDN33|4-=nKG}M9t|HYG3iHp5cjhqD( zLh!4Sr@8>m+gXYgqS7c8iJ#u7^^oY%eczfhQO6+#-(molDgxLjHFjylg+kg|gsjxF zCOa5xOh4^@N`-KgvpNbWnz2ARmlkj)bFq)=CfCGXw8 z-s>63kv@26Hkb*ZW}eRSi8ob{XvX)x8PC@Cb_?a8C5nZr?%qsq>0KX~RIYA!i?EG5 zEI2VHTt5XFV7)eRJ!XpHJ|j$I5;gTaCp!^sazADs$JE(bZyk0&-XkUz-rH?W&7!@G zlsf*pCnN8j$@Q~`hWwr`{n6VIwN>J7<~gw9WP2iN`aU_B1>-&O8$q+h0W1tOet+xV z70)E#N)%wKgM8fQ^MogpUm=#NcN#`FIEli9qO167X0R0#EN|m0 z9DDNDZ7vE4+pK>TOuI}EroN4sslY;C?p4Tt)8{?9H_@VEqb#$frt6DL;@cksP2z2! zGwLvELMco9#@wh#bXRl#wo)E(uDY&GA8w#kdrH1g`v5+%*SwEV*q>kZ$#q90v#~C@ z>0fn-<4ND3+28=f=H`c{aDBa=(==WsUa zM|kt8u*Oy&RrNC-J$UTYzXbsd%8X$nkV7!1>j#zo|H}dhM$9RIU&y|!3Ueluiltqf zDEg>_D5l#`fSs?nKe)#gM@Ay~?5|*@Dx`h%bSOsL;+2SU8LybvAZ*DV<-28BP99^b zu{W8jI#!Ioi@R8gX#WT;=Q&lgw#4=ztUzc3LgkEC*(LS@ARAVY@a@1kPtls& znNT`WNE^eh{xrOA|25jwUDzTF7}>SefLeuXZb9E>ZmiKE**sE!>~Bk#o%fewF?q;< z9*r&LF%;Uh-r+2M2o^f5yV%w~ze-I2lG&poJpc9#O=;A>-xxfp3_j5V=Bd9_ip$cB zQxph&Ao`iTp`5n{PXNXJBS#kl(F;?i%e1r|u{%fsm5;jK@>H{Yp5MFoLRe7wivSkD_4G&dH+R#P z+$wh&UY)_FjJ8?d*6tLej;5Zf)RXoc&jq>fJM1_gkDqjcag@em1?@FHRYL^LVGy&P ziE8xOCExWzPQRD0XEA#s(%}u8Zm2#KBVL+!3cox1(tX4}+>F1d9X){b96!Wda8R&v zqu|IQe54-P3U_ZWtjx88IUY#Bw_xB?1hmh=#J?ckd#&a&pjr9P+Yg=!;BjW>WlI@5 zD&1TJyfE-YLsVh6)hn*!QyhP@a0s;Tw767rMGi(wuS!KdIrYY^lh0Ug-n_ZW685>h zS?OBNBxe!&J9uL18KH_gXr<=*Jz>8wa;9Q`9?+#kvqumJ$vna$1OnvCv8wtwI*zA2ZjOh% zoL<+hI1X~JwbJ_#zmr;6?>d=C6i0D&T>S_FOu1_X?r8P=WtXIhNnWGUM@}*9{qg-< zsF&k;#>y`M{Yg;*;dNY0;&t2t>Ps*P1L$1p7Bv6~BJ|@nC%oEg-d%1aeG>iZai`>x zPUe0=UQNk)ir@dhwUvnjh@?bCO`pz^7=8LifyF5ElX>G|;ub^Sksa+A9mM}F^t-53 zcfdstS5aX0MZ^x^ChO;u=IPQOktU|IvtjFaqkq2OBryb!#SUCeSjC7&y$Nm{oz-t# zyiu3WlvQCUokjYd(%GATl(tYsy;?O}_~p2*4MAlY#o}T*VZ&27eye*)b@XA42|~uN znq>rC?z=sY+f3fx`iqeHMOb9F*z|PqIN%7k^vSWd~yoxoD;f;`wV;Vs+tJA1c z2nRWl1qKxN?DaZbyn1T%xo;01*0{#+AX4WC^%#);PvU^dpuny!`fjAcx(vqlw71Mk z%xK3X{x`cB^01|u_PDQf$0~r9a_Km=n@|~%M-d6J@?46y$B4R%JDN@i@Ikl$dX%rN*2yhnq&*`Sse!pWr+)D-|I11Wul+^f%}QO29mphbLDEOLxEI$gKN03tb<7S>B>v@z&p)( zoz^W%S>Pv|^@(a4i}YL5L!3>Z%+Xv07b6D$tscEk7QAV1X1o-#IYYdyQ7|3ClCZS zqE^QFM?zETe2XcQ@K0U8yot zLG;5{pR%gCKG2NdZI3}&Y_ZPZ?{>xmx;$yuUlS5c-v4}Hc-g?9-%i^DAU6mzxYt2j z+s0zy3Jn{-@p+0^Vh`-Ev#G=}F!=FR1ts}5MES05RG{plg4gY$gCa0mN&ekksyaK| zW6^8nEiNDY>t;+a5Cg1(7_Mz{ExDyMV6^WIqoLxgzru6riFQePd!#UisOU!E!^h^` zMNY7-cjJ5fe|WrdA87}5(~d_+y+(rajtQS~hKp&4lm4{FYzm(1YFSWC03d8L z7uxxiPxtpz2im+0L5?*>=FCM15?_4fZu#k2C&a4b}yZ{E)dozvGRc zBAs5sg_>t(Xyte41%BUyfae*pqt8VUW9^s6>t%PzlnG`P>S4dAhvi3$aS z;UWq<#GUoZ2)Ex0zN=UyAeeYC&l{9>JhG3lrO(X`4@I^R6(#^^3c>jelO`&;0bLc+ zT|NwKlcvu+t~vOFf=?0N@|bjQpIUn$6BW^(jW601I)n#EP2TpQZGCToJ%+>m{v3O8 z2~v;Uc7@$BuGZZkY_`+XxP|Rz?sr>m>f3c*Ki+Y5*=;g2MBhmuK9Q@K4UPzG2f-c6&#!dkoF+~L7FA?1SqcbI8no&rWYw5Mp8K=UP_)IJO9Bd>YjS{G3HsqlvQ@$NsT`MWV%3iwDU+1tn_I>$-?c4if zO49^2ptk4>DmB!+onlSoh;KI;nhWhO$&QOTceo(`v<^N$WCcIt&HIAszcLr>!oJd? zt@}Co#|d6Jfyj|HEI^oCZSWbbk4?JZfXN-15d9EMka~j~n0(jKp+)VFiSTaQ1btYF z{QE(4HnRMAA{_TZu-{8!wB%4ot3APftyf{UV-Z4wyrd7_I^v~fBSE=PRVt&%p8i zp3G*7-9L4Kc9dlvEi+1&1*oi`2IMJeQy{QYG>AH8I!>LG`$|sZ}-wS zVg|HZPU1)7XUx5sI-u7T85(Ajn?QG_08e(g98qX?FI2gP?+m-&6q#G52t}Biqq#(aX zQI+Wqa@=uP<>QUBV*!i1?yl;{8-pZqJEl7lV3(o+J1#St4Jr_QdGg0kWx%@Dd}?o{ z5b%099fPp*BtCar3$KX^ZQE!4#W{Yqd0>99dwjEVWtr61sY;>uKVjf4;n0tXHC|w@ zWhX_!o`9iO)4h?=ezw^$)P~{=i`WhkS#@4(j;27OMZ0m9K7qXqlPD**%#RTreb8u& z=SXjJa$qnm=It1S2jvtnq8K3Iw_OGv$Z(J^uLpb(eNi`V?^RYRBsp!F*;z#<_x$sN z-dGl?s&vtR$FT@)yM`0@&F#0O8DV>xh@VQ`j1qwIKo0?N&6gj2QZ)rHPto!d+BDG1 zTwY!G;dG(_-mn4><6zB$m97j`-+W8#Zxn1v_-^!mX!POLsMFOaG?#pWjESW_Y-Pd6`-r&S!b7H_c0KT4)Uu;seXl4vrOlZ%=@3F=QY|h1?aY( z;#f#)3ajSD7v1$sKaqi?D=J0l1M@O2o;Br7xmS4Kaqt>(@P7UJ;u@q9vPrGwa@2H6 z>1_7Vp~}*pn6r@ES?lJ~ZK3q+;#KFSYLsL)#hJ6M{z>ZWkW#oV7y#+tPNnXymtJ}C zw_+!mRB*P;qI0E;#15=RgJ-J^GsVwa;G+d9sHYJ677Dtpo88f<!>zT%1Z)>-$d)@?9 zt(bI}NQ{lvyWY~ByR&VW(JJRX*m(#4Jpg0RsJAU45;BbR{#+vDN>9`paaX{j@JzeM z4BewzqvmL zBRhQJh>Eq`4`p=<)~rniQdg{27l(=bRA%C zqSgCtH?u)pRI(@RdaM8Uf*#)3^H9@AzCl=8__dz7>G931E(b|pShtU=v|iD4Pqo4P zXG67Q{J^Yz|BDaFX=?ABv zxa|8~T(V~BFDK-g@6Fe0I~{;sZk#7S&x);FAapR67vA#X>klgiFzp-N-;*rV7?TTP zF+XEIi2BJQIK~Y_^Y`@B)%0q3v*{dC7bsoqn#&kqM}02Wms8#8Dpkf}gB5~u#x6@M zhitJ$~)~R*`h3 z`j12xO!bZ|6A~o-;{k>>4)4Q@%xvPxRM5S*!ZchwiI7HQb1cm0PoYQs^q`c~|5%&z zfV=A}G_l%qbnKZZjwT5_s@}u1O_lRaxkVDSXJ(DLP5tlt^i9go$9V43%Ynzyvb3ap zVzkjv*(VMpZxjcw5WMw;QRl@hJyt_2QZBo8Y{E=wL!%OtTAo!knlq`kIE&RS)#x9u z&IzkXg{oStBnK~rw5iUg(=`wd$*sh3X!;r=0tdJ?H>jab!Q(&XATO0rve?<#Vay*PsT`$izQR!58|{*S|68SD3GmWenn8cC6~+xd-A$8TUBhp`i6p36 zV?9N#*1KFHZup>&bS_!huWjyxdv3IB%y0<0VZG@Bo$KgIoP8nandM)}=LP}uhN)_2 z1TI?RR_i$lz($bLwvMyI3B@)iqZ|~X89@7qkUyX8Jjo)SABG*+pRBiRNx!3F8isHc zOnbq3zjFEbt7}auQN%H*xPp^MW66gpCL9^7yi*)=5r4i33v|ns;qXS^$)cLt4AqZn zWGlz=Ell0}iF<(j5f1q={i6_D>$dbl+DQP;OY(K)Etn|e@(bEZ zWU-1|MFx$tzLRLugfGfskUbZEeaztx@uSuZWYd+Hr`b9q8L!(%zTk?wDv$w-?G({e zTY`^0zNT0z>T*36SkKmSirLgWGwS=WIgornl`tLF{i{jek^?(xe9QuC#QN>D;xzY0t!~er7ifwf z1g@wFo~0G-<32H;so~8Ff_@cyJWf!QXdU|u3jV5;)30@=q^5tDBR7wL$R&m4A*B#N zitpZ-;y4#pkRRj1X!qP8VDCkDS3Sc?D%Ts<7&qU+*MqMHbC3q#xSGJOtQe@bYPT(t z3jwc%{|v7IIBy=akkfX54sfab_vTB|G>9@!x>u+`pCMh78DT2KwIJ9L!YgC3NtD{*8bGEMy5PhMAI3KbpYie75R+3{K+u znHkF2hwucFRBOJsIEIW*?$6xRi0oWNxACPOTSPH5{^4K7IzXFQC@FjdcmcfIDnnT`B;=-Jn>#tWmVefUMnI&|F= z7nbAX6dQ5;p(KE3*y;8kgh@pS<)kysIHg2?PW<3ozRzv z4uZv+Zg9x#{W?I}cOiU6iJ}a~n-U6G2bQG?jgHC7-cen76 zr6|ABCrLKjXpJMw?1KnBqOD%NlV4&|eK#L{l~|4?qTOSLZveI}nJ;%J)ZLJ89r)QW zUUz{ySs6+>D$J4i4dXlw89KXLh~k|kN$I2u!)NkSxK~1C`DN8zRBF^8B9j2#Unl!B zUqs4?1!#q8`a~cs$oX+DVw}*OTKlafG52m3X~Znl(T+Jy!sg&XkG~I9CuG~LS!^kjoa_ad?~12Pk$Sl?LQ_sqO7okQosc>xZe z|4#H%sGsh1NBK^iw`M?E4r}4Xhfuc;=QZrPmp*_X4O+heVl-eKJCC4qK}D%iLNnsy z^is@V4H@v#tv?nSt=Q%TXUN*o-Bai1?^A~};j6iP@Y6P*YbwR}S^{(jzT8w1W+PV) z=6`HCNWzWj<7HfQsU@de$SQt++aOS|qZCFrf%K#f9xBV(=G8p0#4X&sWWQK;wElOd z$-(q7w2QBrB>W=IX|G>OpVRhDJ2y-?`|*>qYfMU~h`Hif9A5*^k}f?3+cqahl5D1C!-<1a=D)3Cz zp~N54!9Ps;gAifT)?9xAauKey9Lb4+kp<^~tl&e~k`QrCp8AMP?_s&&MW@kaTw|ef)2XK5uDNDm0PP@J(w#>q1ONbOUkOoKz8?)ug40yHl&VV4zFeaJ z9j^NpydNRX`?TfDLtq88A6mY{d8Aaoju}1^S8aS3o-9*l#`cVC{|xE|fc5W@E%p!a zz{jaQ+L0`bQ%Ag5{yj33PYj{HR;UkEr|W6k;{YHkVjz*nGS`SIK1HL1vY6U%Qjbg7 z{O46fJtj%BkLz>XS(r5X9=D9@v0hF12Q%^Kcl={9M&t8g4pwu4Gh-DmI%y{VRuHoO z7S)yLRlzK3*RM`!%7kT(}(|WHCttWe%vTlzYz`XhyVBqv%6=EaM*Sp9n2Y- zhs#~=xQ`mYq4sO+y&_isq%^gJ|5!Y2LM2LmR|0qU#;!!<(K+4+t>c8%(3`i zC!7}KJTn;;ZFtKNfz5q2$g1ZVA@e+W*}GkKk+rFOB$J5pypbXS5EiQW+!ZZ=>2>Y= z33|1qi3VnWAqUuPQ*~NiJ@(LWp|Fx{H8uC&sQ{WhV`H%Omn=jYBftyd0XV?NoeRJ1{RUMAGq2nU9%=K+sWRAl{SRs} zZ2$u9?=sYjCU$?aj|1O+Nt!(M67z^i^d0*R@LjFcb{bGnNAXLnf<-yp{vX-&(Qbd( zE(=?@n@lv@3KuRCxs9CZ2lb4TVcUz8zGBkUoOH>~}>fTw7l95-tC_I0n9W)f2x=#;nksyIk0dbX6P22}THpM6lj!JDVio zexaE(?>1MkDcX|~}p6Vq;bI zb%pT2L|3#5E(BBr%#5>Sf-d4_f4Rh3xLI7XV09B39Q8|WWnF?*t+610PGz?MmhxP6 z(H1m-9{Nb6Q1KASiI+&oc@MH`5EGeIB*Mv-al0~lb0hKYQnW3W8_1gMTLci>=Du#! zz!!S=c*QXg8R*o=K_ByqMKYU2Az*0?U!RdqdCm6c@9BT+LzcKfq*)=-6~`y}&Y3q| z`QWQqj&rPCeQb|oYrPS^TbHNCB~hGwt4n6H2+2fsOooy2I0$llVJ$$6s@obV`k7mB z7m5G`2MkM^>HssuFYHw z*N;IlR7WXpSverC!om~o{ys6o;vUe#0q>Tn;td~p8xpzu&l(%-q|ajv5YLf17{H-q z{ww1==+JI}VkI|`dq&b`x9}ODT>Axm(YIWiP}5rUEPbU_1OcHky$VNvYm~U~=zDk?)wzD6@hi ze_+f|dkfy&Lfv!aAbetXH8=Ih{xxxeYfxbT4md@q)Q0zu2x$_9BCOj~c4;D2Ikxbb zMwI?t9Cv8>u(fOR?{Vzcp`v1S&z5ux&+rA$BqrIY?a}F#tqk&qp4wC!{y!Q+mG*Ak z4wZCMp{nr{5o8Y)`&_?s;ihO&{|-c zkd40+)5dk9U%l->n!`9az)Y)+fl#A%uHj?`T*At2QdgwL{AOpge$g_!p5n4l*vN)x z8+&b(81NG1uiYM6c{sqEq0rg6&?b%`Pr}cbtMZApa~0nnoQVQ_3b~Q|VSrs^0BUG> zSjDdvBqIj!H0^%`p3gJP#_jQj+WxjQI z@I8%d`L0IULbYOjS#v`~De`+jBG7*$Dng(HRi+Dno(tT^45roU0Pu$NZcZ>v1FtKt z#_sB7Nmx+w?~x}3o5ES=xOFTrs9Xftl*tw{)e-?!^3S${oDTSu@2`m`8DCz^@jn=A zEJqCryMQVDS0w$a7ExYHwe>htQ{pQaEqqpcTXU9GbD*9D_nQ7NcdEJ|cZ}AC>7Sx^ z`g0ZT)P73G3>^44k?^3yk{el=VRBoE*|^cf0%*t~Zo`Ta3kGB`MZh&1Q-p=SE)~Q5 z+n#$Z;03ozeU>k$u^TJ8JeD5c4lLu`N7JpN*|34gscndph0=asch0a9b~q#(C+~0R zC0_3sQcpbaxZ5-*w4RjY!_GLEhWEK`_Ws1lwJjhO4qXsw>*>oIiZ^>WUVqDV^zr!1 z2}jF|Wv&*YL4JC4t6(G*p zAB;SIO2S{-8p9{r0Lba7BudpDvac)P??AX=Vji@ZeooA%@J}4i&Cl5xao-vM==e+1qyvnKvM5`Zwtsz;3z zVkY*pT9hB%c?{Zq?|WsvzpFKn5>^0C{(*4`n^ze>xh|oCTj`ld1dKxq09JzAI^adX z2y-OOfY}ni6f{KyKv7~)JUx#K8GAgq!V$%eN*^$Nm;EQZ$=+1jv-3zsY*p~Jv< zW!hl*@y%Nn04Hg(*Q-xg&x<$fh?VA(^gE*nEyVhm4G=2~joL_#I*>bmJ;@b?&+5_* z@!RNhkpyVB)tI#zAfH3wH*4@Sc(WYwvLL3P89zR=r4^$IH=b$t(k8KA8E{>;kKb}Q zOz>c#pwR93msP{Z6#n0J`$eW(_hl2lbQ~}Y5%LV!#@5EwwqSYD&y%Xysu!VI(8*MO z&l6d$;PNc{jLIb1*O?^(+lHE8EIyxdRBZKW%_;Y(u*F#4MgKoIbk-xH-B&`Os4hhs zJaQB1XJW3j{$pxViTu6*_xkN{cj_FY3x`{a6OAIV(&QtW9jko<73O@b?yf+@*8NKX z^xpM4xNwU03RfVG<*sH(<&1+6=27PoDx>ZPhbp{Mwi55EGj2o7pSU>JVk=c`FzX#2 z`$nzQ-wWB4s!^C7IlIw7%N|`TiHSCUuQ$5w!bMrUK9|d!^oUkz17SQmDfTk3Nz^}FLs~<;vr`q~dA`{cC?)8t1 z87JsNVOsA7IqE5;Ywy}!*0kZV_|N9tOOLiZvK$5P+y2^A*02jgtWDv<(C1C=2ma>A z71B=YtokYQVVmnCwGjg%@;T8t)Fuqcq8Ktuhmdg+7W zj=@n}-a3BU+?0EUXly(3Lut7jWSryQUO$oW6X%k?CBI;j-~Mo9md*rO9gl^d8O260 z+^_xaCIQ{{+nXsil$J-U{rtQ$ax)o5L4X*&bm+iMW9Oo&m#jyK^S$gezC_K;Y?y5@P3tQ=|8^4LrKSKo& zk=AZ#&7H7f`1JBq+*e-z;n)GvutYh6r3>HvL1Mr+{yS?_PuU+2KDPzn$Df-TL0&Yv zC*kyIK6xboyr?oC^Y2-Qwe+@Lg;030*M!tX|7HQf^9T>+w0G=o9JWFUpp}CArm(Lt z z0b+9~!2(~8SJ4Q)AeMl$>>wiZ(EbcyHgUX_0^*HrkJcoDp;0j+Uoew-IBg{lE}CpH z{Jen!M+azsseqq!f@}z@KY@u1ujMJOXFf#KIJS2KRloE%@D`eUkQq8So-IlaHeMSiHC8U`#CzR^aTGAVH_20Vx-; zLC5ufPM9;gkrTB#5;ayTjG~cfG_=+Z+>-vOKy~ zeW-^DPzo`<4=kffj>7N#$XeGdibxN6zkW9GFRW^N6j)O{e7gOOJHpkUXYjwngGV(D z#?F1vgA%ZHAK~S1OL=&%VtXuK4elx((hAL~>2b2hS=y{=l^G$w=dayG@faR--R4c zIkWTse$QfZpv})^r&k!aHFasO^Rs7esYFTKvwK}DWBM0;LMrDfzhXd(&pg-QrA;3#=_ z?JanGtugZ!zcimiLp-wcclL+f(+a0fWNnXpc#sf*SP4&Q>Eba4JuEMq>FDJNOlA>o?eS zs5;hjnp?VNgS7c?!pLo{=-^Q?>=s*NZ+yl@*v&YAZvhD#D$eTpoF^_bugu0PO1t>^ zqvo2JP9@luhnNzMpFK&E^2E^*+7bG0~<1{2cS}yXDs^ z{$WF2!tPUg#{mA8`{6EvYz#ytQg7sLH=6_u{gFL8zzSY_dxqQy@oJI;ImlxHroFK- zr0O!_DrwqDR#qM1;7TTYJWi4SLz(c)D19lC%K6MOSsxdX4A!)rFIOWPr!^^qmDNsg z9ee~lWqWJip4y3O^TB40Zb z{i}TLF3ypIt__Flm9mx2OzN}t49BK8Xa5Q0xyD}IPwYBizVG75nq3`E|V! zMsg0BKH9JWm9&ftL7oCaix^ZmAuO_0W`fReDKBHUv>|+FiOL`iw9xKbA+tzT9&CVV zk!I2Cs|mV3EJ4(!81BTCG4V=DYg3=YS37abLX1(!m>sqm0M`M z#|1>zqKWuhE!qb{{-lJk_3JLA{RmE7qmY6ky%>T@SbsPhO0bHobw!TFaawZC1PW$r zcqQDj8~=8G-@dy5m5CO*n@1_2F5q?oZwOfz9ls} z?%K(c#Gj}Z%O_7wlKh9!1DJt4TW->xX^sYLxaE;(1!R0}Z~AR5mIUvA$OA%f9Cw)N zLDu2-J?oV5xfa7mjAl({Wh=?7@SwBX#g$UX{O&3?BLBGz|7{(EHH)t7GxYcAZ?-O? zOcL&mu>nafsqNILogP%W+zWw|WLM*P-@|TmFa+KXZ&+uKnGjU9n`Ou$fL z?PDvYnWkF&W-!DRDt5&uV&C(cN%E%V=X2!VU*y)`x+$_}d%FkPzmC`U$*q1>@umLO zl$rb65(ceON7XbGp-Tkbl{KHrFTxl*yZ|Tp)|uuJ+=xT zLc9<)|DJmpTC^ctiBJ>1q&||A<|RM`GslVJ&IsPsYXt2jteUkDi{%t>$*51=8c$*b z&gQapP16>h#W@?(-CAs!<17bQ9FPHLF52~DnM^=oLifrGv9Oq~K@Vf%J6S0=7J6Dy z)B)U(Gc-G{lN^YY)T#jnySRco%LV*C(1dg-03b@nOQT*HHDKLjF}O2_6&WC{c|Xua z;J!hx4m4pWX9Dbq;ntS|R3+5B6(=`F)F*eZrN)lEghjc)tdv&F0S?D~0@`M>nu`DR zrdG6`W*ysKyE?YwBcvIzv08u0qK=Oa4&>m!I+tEJMk!q(O^ZD`$t8`ESdSXCQE)1K zFC{&{Vz)-|_s3$Q%^WyV+u10ndULPYz=9K@&ueI0@G)RlRttF^MLtANl+*Cq1HzIf z4t*%=;?OF7+}^H)`~6>O-ASy#$epT$Od%1&rl&6{e`FZ@nnkoyG{0B`9YG_X(}Ffd zSw*ZyNbs-ax^Hy-r*j`EzS4d)Blqoc zGA6n%@FXImY^vm-wNkz%8dI5pVHFz)CZ-8dmP33??3{qyKGxG%kIsy0Pcq;Ojz`fS zDSYr?_eoU(!$B$Tcsr5{!!=|TZPin)C8tt{pM-{oK!uj@c1=`bs~7o9;QT*v@PCm& zBFMc+-UsNR-*i0nYRd5D4CgJu{HP%7Z^bnunqa|_z4**XGGX83FAd=C63U-b z>9d7e; z?@XTISg>bUP#ihpd0Pn}r-TaF(=v1(&8VLCwWjk;W?nO%BtH@geK&zDJBh6zKOQeE zAW^Iqk+|ABSs7e!Hq?^rW4h`a=ySRH{7R0m3zX3JJ;+Hjuj9Nd`9kSusRm9a_TREu zeF?naL9#c4ujhJc%JMr<9&1tcSRa;#y0dkw*rT>dT$inGkI?}Jx?|qs*DE+66>lGa zLb6-n`%25&Md){73Gm}MV2Kphlx9w`8*?~af+>9uk2%~nQ!tk zyQ-Nb<0;C2LA40T6LXG1QlbV{T?r3uDR%I9iAs{f^R3t?I}>>+o5@4_Hd9ciC(b5g zEc>nwh%}?hH@P${15rayf)D$Pu7ndo`o}Dll@@htW`BA*6ng%1`Jf@PlIK39d9HHt zzMll4T4~%&goQNqug;+Ank(wlSjzlrG}TE0a-7uj?cWvY_uR?GR9-!TA><}O>l-h= zA>t?p9`g+Y)G%T(I^{36*O#nk?2@PoCeMJ-iT@=_d|#eQ=h`U?V+BH+RKMtQCGKLQ zKdBlx$eiB6A8mb!zv%?oE}=0NbW;f8%KMZP`3E*UhquHum*8QUy-&&XVzO%l3^zl3 zU^+{5aM{n{G{ct52&Y9pv;-v?5k=p-l^OrMj^qan>v%oW=X=bnz^4Nh*h4O~5mm03 zc$u1aD(6p3zMD{5Cp**Ld^Onr;W(tijfTs*nv_a@vJ0-4o=QjSS2S*&spRG5(u8y)vy*G^I-B|&; z!Q7!t2^swsPUQS{V}$Vk#)!lV-;8Tr=Z}`uVi)~Rv)qC1;@B)H@)dtX%M3_*LOSPz z%6)G43CxFzckl*kak&P9ky8Ra!WSBz`Q4`&=@k3}X2Uwzz<*ekCTJw@d6~ab=@U zMH(G8Rt-$yWY)jIY9Dnx%wvERi38l&0WDz;%7LcQ^blFPkM^u(&VX%z4u0;V^hwzj z6;qCdXkr6`JUKWwnp2Ic-5JtjZWy?;uQCJ^tuL;<2;ON)ovl|a>oVv%HOH%vNz*fy zH*&DzEhEKj`-oT9`KFfsM*zg77_i14`P;aXfd60qh%=*?&(Z@`5IDC?ISKcx{ z5wF-BC0G4px#&v+Iq(ij5=g8&)^BDW(3wAE_1UUxkwoAhCT|=vKo5vzdlGFT7%prB zgu4?^D**eBrT|O)!paAs?*EL#T$5T{;P0zlr}TGSgQ{Gy&=h2YP&?LUCZBk%(|`kQBR2q z{>Lu`YaXY-NNL@rM7F1NxNd8gmw1oC@|AHRyg5&oFx?l3Gk@t}I$SR$aH})TJS5^_(w80L{ZTGWp zw5iSok*E^j3_QasIvHoG!_u(mUv}O$+%@J)53^RCD%u}=#qi18GV8kxrkwo0tOUX9Q={uRk?sF!fBShv&9yA0W zLw)c!Z%dY&EMd`w(FL!KEN@fsc^NeMjau&0rpc$48jh&hYM4Ar;3E9m9p8B_h{n9} zz)KbaW4!YC&b8m?eSJz^FXuH99b#WExo3L;yy6%py08XA(xcu7dNB!Qm}gqWx8I^i zw`fONEo}`Yg_(Lu^Nn87UlGO*Ok$6|0eJ43=lHFgks3Z*XrG=-)}d_h(wB5s3UOj< ze&3alo9XYdG$oVcDsj62uCU>-BAIdfH^c7BZB|`(dav0o)oVSf1DdG1;b+hG(l}WR zzEz}y+@+~EAs`1%BfYYJ&@~9!y$ui6dR;~D30V+ut)iZY455j>zwu?Jv|(j2D+|66 zv|_YLV@Bx)m`aEgq~89ovXw!Z&7^?&PnI(uc=Y*ZRD;s8#!gO53HA~B1So=b#deDA zMh(tBwbOfD^bK!|OjFGJF6pzTUAHFyW!8$OWzwiW=~$diwWL$F_J#WFfDI~Ft*Vt(b&sOQ$i0tn(Zh>0G!LKLxn6aZD`iD4nV@Q&uNdP9% zOZQ2;@-l;WUOOw{L74~C-^AM5qSG^V7Td~hZOj4CDJmCR~{%XNUURi z_PEBEn&Qw>nHzALuARev5Bg%8Ci^n^D7*3@=jB1WYf2#9SNUVFvsgwajpR$htuufA zmm$=hZFaY+hZXXg7-ZJV#jH$JrwcCfi)a2L@N@D>u9sKt2pWlxivMQRI8*6;y3%#1 z_^xLy)0z`DYj-r}jz95FzjvI3>70_XqYi93DwLo*9E|Stp6WHT?wkI^?o6icEWCtX zrWjRKcT*kZTC2>WmnLMDmmo3*M3St}G_^6FD9=cC@j0_NMKfvaFc%$GCY z%p6=rb6JS)TaKUd-U4ml)hCvWUAvvkh!Sd6M9$*70cM9nPS6&RY}tTa-?|RdMkP9H zM;Iba@zT&%SJnhF@6-iZeoAeEA*F!GoNo`B$PypOOy~GnI#IlHn#B6?kQC8ElrlcP zW4_uZW+3yEO$&V$OT&LvmxDU~XuJ3*#WPQYB#QSRm}iDuHly(yu<=eX>>wr>O+g+{ zk@ebW3xu5SVBwY-n9A_9S+_4%%t)g9!r+^m_n5&O#8Ta)fHv@1wlN6GbBvme{e|O} z8m$iPpljyS3;}k13d#}*?XG89Vcy6+d8~>oJMa!9zC9S1WeWfQvjE;MJM}4j<0bu> za*C{*Q6h^W@Zp%`kn*%+Wf%!g7%#2D-A*6vd*`qv8FyX$$0)d*Lw;QiTJ}MTmEhtZ zG**HaN?1v*NQ2Bs(!o6v)_jGGO8lq9tsl~kQ8r45$NyKavK^*YJpU3ObJQOgnz zG`zAu@B1$T!4158-_hQ@7!8P|ELWE#Y@xxW7a7@*LQVZF=)s94gb6k&h%(2&TSXR)xY7Ba=x0v`*L zZjDc490w+X1A%NN_-V{8_A_aL@s~Y#??aUqT5UhM@5;ke0`WBrt_;4W^_iAb6UGQU z*+p!Eu=TTpGc-Mmt6b$b#r`LqbVGvN?iBSI@;Mla4F|k11pW*O?(yJY%T1JVf3hRK zBKi3=1#mZ>`B*06Y(98V&UY{8)4AgQQKs#^H59p_TYsiuU^?elMGUS3s`On0Ea+%K zb_RA`W$AA)K3Lmpvs}{0_OJ5y}H;{=fVR+jBVrdS}`g<_|ZB!PpPzqA>6gI6XLUhG?`Si=zcRoSFZ?4<|6wXLq^>Y8aC2G!_=yySH&}~9uCk@ z2oYp|_J3G>03%_S>(KRi%6^kESATiuT~0JXqIxkkhlb}{$%#GAkCTKY)-x4R35+^w zW2prWrbp$H5$d48%9#If*B-?iN=?lp>oY1H6Iwn9T_zMzfs6quU!2R6SE!HXS$}0n zdnYO|Vd3jUPyZ;DnD-*;qwCgBA+9Lp=XWT2O%syK0C-a|8-_af5B==2(0s>=>_v4e zun~{C*(iQkJFt8;1)tcw=X4CRp_6Uis+ntcXf2YCe+L^(A|>KvbjuoQ`ej7nOzbUf zl=JxWcjP4q9j-%S@{9!Zl#Dgtv0oB>oxW%dotOV{q`1q69{YEB7Ag{xsng#-8I?91 zyKc&5m=|=jl0rTrF8m@LA1Hjj9H3__lk$bV1N%@#^*D?DciBotc{)i&g;d7Ow02+h zZ8U`Zp#Jz~HWzy8Fu6ine#iTn6y{>k^a`>sP^QJla?5`ASm?5tRta^4A20b?p8i$< zQdR9g@iKY4jcph$(y#~od+$t$aqbE(=&Y?h<(*hCK=e}L+m$A|pOezPF4tEmbHWn+ zh5u}x2>WDl5p-PIN0YHf>Q7fraQ-nB-3%VZm#`@kmJE7?PU|XzXVZL*{bgT>rXfq5D67=s!x6orC%LLAc@orE62r zcM`z1ettl&yBVP7iZBI7*C}Y<3vOPTzfa&%IZM%cL@mBb+@be@!XeDEC%SJ}F_$dv z8XsSh2-fi8|x1U(=wiHOcC#cL)pwuEW zg@K`ZhdK$7BzxCq6n<4&DwGN}0hk^F3|t=0DRw+=mZt#QAOy-t@eILn*hPk(xJ>nPIE zK%sn$1wDKhykO~vFEaD5$wv;7@$5WV#nlN+gKlbYr#5h5VS_3Ri4u5rFeQh-fpe<( z#Vv)ES3?L<%e9`|C9P$ljY(@uXO8>Z|C!;?wK}fZDswkyEI0)#o&#zoK#kcU#Opa-<4xT%@0qYzxIJpj6>GJDVtGmN04@jkw?ePX{!iweq)m9z zpwuiy7?=S%&MjO^n8zhxtE4bAKY^=hMBOvVQ875p!LmXwo-j^8oixHc`v_V*cwpI%pIsHA`B zgA$=m#;B#!3_=@8uI;qa6w};Vh#e3?@E7V6NoUm)Q!{gg2T(UOaEl>;`?X`&l)FP^ za{aMxERtzql5{N@TolNbxGo$LT=L5+bPXHf?gx9-f(yM9SbpPQ$HYoEvX4x8>q)z3 zqP-YD+|$3%43r-(JpfAU3m6nEH1Y!-M7c)#0_oS*m2rn8WHg3V%<;Nhigm*jH4o?Tl@L}=<_E-9iJg!y^p#}jVn!SEv1 z6V^a-0li@1{G1H=&I%&L=O*l49jY`9f_@vODKfT_th9Eb4kYcX5Pq@Jc+Y+sU+#d_dcbQ6eeN;!p3riF*d$}-@s+~wZ#C})df2)k;F`2fm+YIfI z47dwEabCh?g+FCQoR|O_tl_ecpR+6TSaBrXX75=g9dA^cb4sK)wy#MtYNY{r=4;@) zGMY-Rlc6X2*)fS~#kzwlB)ziRW1(sUtc4FYu_}775~jlUZwlWJ3Ba{(BZ4u5CSGMB zB)4fQNoEoR{dy`QzS4RIvVlzIPOCWrwnps0rY{lK71qZh>op&-#) zw^*{#T`j_7&+Lf9q=tCIRIHH;zgb(d)?X@5+pL`UNzj(O>)y=6g+p=lvA@ijo9c?p zcmBaEi1S2CIx&ABQ=-f|DdhSP9j)KYx75F@ogggG&&6MyA}7S55f5uL!3vS&22!_& zPNc5BVOX#nvJas=Yf9d4A{KyWTEZ5gRPidrNC+xR+Vl`spuUe)WRR zi<*(8=NLt(puN9d`p;}w#gc~|qHVpAq17x)W|HfitPyTg@pSs*v^OV?&-B04MjUeK zOa0vdmc}NNuCQniFeq1+d?yOLSejI*Vtq5!a%5b8fDLI^WS>U{57{&Bf<-sY`H7kY1c6na5d2rLlbmoukT^ zIN3T71L~_ej+Zn;&*)DY``fu?iK$BCN*hytS478|QpId#XQb#s{Ft_l@t%&{l`w&3 z2hTr!{KmWdn%Va~_ON%fz@>Gz>Jgy1N$IsfJ^F^6-qK<_2GvBJMx|(f8o>6aLOf(p z^~syXTTfS|dS;8%@Hcq)1TA8^a{=Xcb3)qGoBcjtW4)5hD+XTYaszY(uECRu%mROX z=Buum!ULt8UwvV1q9IR75m(xob@=*$+Ss+O?B*uj<9+wnr0_)_c&Ady)_nL1zJ(ZKc(xxB%s$t zU^nY3fc4Yzpy_hP+h2FzWs`V2=!WA_6K^PH%<~t&O!|u+^G9B2XdTPQq(=qW#g;pf zxL?Jky*xhB<-UTy2LygcTgMBZI;&3hc?-0>VXF+~cvQs=_OGwKy3XDAxNqdNk*ael zIb(dhtlJK7( znac>^#TGLw?WsI2d^6q3X~^u|V3w^v^M1%+x^%p`m=#OA{ZnNk09CQ7FCZ#L7H*RM zp|0gUl2iM`U5)-s@|8?}I{DB?dTwp$Fm7P)N1f&SD0q?WyWJ9=JxnDM%C51&E=eAx zozoYko_f~iolzXID^m~9jXuN)MA$JvcC0iu=?5K_AF~iEwvwtI^UNZoJ*hR<+rdk` zqPU9cT-_|3@Z7;4puA%z(vYAObq~%dgpVk6vD}t!tXf`@ubO(}0JKrRzh2w?5a^Pl zHc}|3`DNL|UYR@2V+l-s5PA!{v6ihCUq_8|A!2C!5hU4wJ0wH1l@ie_P z=lr-0{T3_`+8G)@ns0>@vaHgbMfmNM3G#;KZ0+{@)>ki$#qknf@U6%xwvCn1#mp(X zwEB&ddW_;;rSsJjEHmlxLzCmRxH$^DdNhsFO2&;wl` zT+Qa_l7X)1t{jI}Ft!K0Ae-YmrhlkEm*T=t zzDCk6e_K$IL-Rno#J}7lJ~IG)s+bUF8qIMgeV%*2KdgCy3ja zRDR&Sr1UjIIWL6sr_M}5!45kIpd)FdtAL4FCNPj!CKh*L`&i(op48wZAd5Dg0q>Bo z`n~YMFqr9Ct}f#ftBctLH*@O7@99hoQb{*?a-le6IKx=~{tq?evOR|F))h_i%gZ8V z4*8Z^r)GKCaaO0shXU6|k0Quz^jZZYu!f0yeTj9+%&D$|w|at9bARp0v^Q>(+$#W{ zmu3R*{|}_WcY-DOh-r{`tSL-ImPxT2`-P=Ymf?eQ( zZjGAiBx^u34+FRhfywMf8Y+t_Iu==)h|ImFX)V>jx6Tf>yVnnJZmODc9JB@=fR9fHg&SLV}Ln2faB;DnP4wu%Z~EU+_Pa&b^j z-{t}GQO|6thmwCEze_4xy4Csql^Lnf8zelU`2(a`GYc|l8#&GsIPy6Dm?q`yG+5}> zuv#eT)-49m7|w-Ud!eiegMx z3=!w6z+oHpj%Kq+@72!dZ^&i0d_@QF#q<4bIQYW3o}`wz$$gf<;n?eV`0r5Og~?0$ znuZ2=jNDzd_vx$dJ#2Sp)&(L+nWLsX)VeeX`fzTPiwd9$pB=SS-*aj8yN=o8P49DL zRO(e=Y)qK_O2;hlEwv()%%p02Ye%!R&?GV9%`sVRG^`3-asz zAE%bw$^c~azpgSvZyolKLXu+oO#r-G5w5qRQpO5Jei68)-N6%4j&j?uvS;7JZEgwa zPOw}35de=|0Z9vEuUx_fF~j5he?8(Zi_8n6n2KZAnuKoH?&;zR^mWsKut^$PhdRsh zYOk>E{FV)5Zg(GVxy7U!G$U#KmUP3m6e1SXYo;&kHe zC4G4PL}v8Z@>qrWMp|p)&1_}9?D@?*LbjxMsReS>ew2kdJB>)vNirx3YZ?ywm;Bop zEb#TY=GU5cVXMx}TB%AonBTn&$`p=yw=6J2tnc|l%;Wh)BCe2{iuyv!zQ2?iWYUMi zgA6B0U|e|;a-_N<=pZvF`m8}Lye1BFj(Y1F)Xe`z6ahP z39QHnVk2>T;rH`Ni8()#Y%Hiq{{C{-ButhuWh!(Vo{X`V?V#JuSgv$+Pr@FGd{n*J z@&1DqHe#8lkaf2xU*wx2%YEK_Yo}!haR-pAlP*Op=E=4E&T_{rF~=~Ex`${+@1eQ~5b1FiGV0H#b} z!%D3CTkojQ40PLgoMG#y-F@)5%MQ&qB*ky+_!mVI1j>Fo37GFI^&Gmvv5r|vXgQG{ zMP8!QHqRUmAqaVRg8;|u7c{9)bLuvD@4r5^x}|U6yNvQu%ErP;hY2DS;jDB=)2R2b z7Y_IkTt^e@RM9PRSTmxfQ4XtIwiurd?AR$r&9W%_hI-2GGp$((EyFv$L@dX7pK%Iz za2p5M`S3^-vR*Ny#J6d$9%O*61^#G$H``TAC0mJ+zv5?Nupj|kJgy|PI&0cReVc!y zT)o%W%Qf!>Q+@bs1r#|;s$I4JeDJ(XY|H%A@7=wh@$2xvD^&cm#V+NWAsZMbv zjnTeuZPF(p$@@%?e_22CUeg5&NJ?jHP|sCcRsLDG;@&PXJq|IVd2%U=j&Q>D86ZiN zWRsW&$N!brqy1B>oR^f=NmF2(m?YnuLUAb5tfLl_N%5SgwA+p29_vXyVUW9bd0~SP zyxI`(>I>k&w6gYOkcE6tET`$=WQfp5Obig1wEQ=Da?A99CS)MJA<4w&*~5s$DG=xGU0yVUo@OWs5by0CM_{;dVb~FnEQ-g*I+KeYIkHIZZ5EIX`m6V!b zhk`fT^KkG^;fso)E(`5cSqn`~MnsMJTNRQ*7LxG@s(oUGdVPL85V9^uzeWSS zkxFIWCj3yPB!`d)J!`jba#QwII1I8%p=6T-T=lY!uCVMXZ}BX!^6-y!* zeq2fat(awA0UjOE%5d5{PxL`#J^P+Me^y}uBcE9ZKW;(!J~72x3Fm#FH~w28^;(49{j4(_4*Mq z?E1Bz7`UCpM<8y31SnzIiZJ4&#Xqqh`qx2l2}eM*6ttV5*5)u%pU5B8q9tI2#-6+= zaeM*X8?29Gr>+3%=4f{YO318*wQqm?2VbGM8K4%k;k$J$tk!L<)U!^F zz;NZ4wvW{&BF~%~sluwQ#9{Q#Ib`QzhJ;=JZk?fn1NSNzW*UnGtTb+H$_X(XLa&hA zUhoh`EYxBpz=M85cgDd9#Fy_M5H4%?a%6`-aNw@~y8mwiur5W5$Lv9y^J^V}BMa0> zyV8A1V5|!d-W0x1qhD;hu#+l%un;kV1rneITN~QFGMqQ^Ik}OXpao;PW0)~s%5#d{ zDyOI`k`zRe^JDzK6n9bg9&erejo++B)|KKy@5cuUwwOg$dr1kWALr5`Qb`=JDEb?) zwRDecF@Pnwn7w2q&zsFdZjDD2`z(@}1w#b0P`7_w@?54O0vcK;gU_}JG1??=MdXvd zx>sW;G8a)#F^#*q*D2SyVwW4ccELQ*yMsP<&E4ZetT`wTLa)A3i!Ik3p~1I%rRK%i zMH6q+R(^bYj5w7~>`r6?`l54?CVIbhdY>5ps~#?V$yCOsh8~`;V!3|nslDNY{ddua4>fTilN4a~5_qe| zlUz9F^6JNcf6zlN1Vfm@P#;i++MFwC{o#v>c#HgC#U}QP>C?oaun{m1V>(@UbbEL- zK)v*l?=!|4{nSjRxY_fGjL;*vhMSc-P;d&aeFBQ{i1UUY%fFzLK3v)>7Fcn2X2yRs zdEWDcO)rmXI+)Bp;}0VT=Ft2kFrpg|!5tdc|l$5I{T|^L^#biSv9R|ykz-fhc&BDEvq^8M?EqwUPbPu9%EihQ2iH-hZx4_IW?cb0V#d9 zX0vIx`$7)fmisE4y{nu1ih>7Zgx#U(y6)jf3PEKny+n()UyFGHO(~gh<06SN=#Pov z=pOfDk0sIN&aq!~#Zs~2tl=_3lCt`_??)?&<~<~I*$CTs7gMjcITs;`^(En0#y9n- zlYKx09uK9M$Imnv*YIoc!01OvXjzQX@E<=w5+Y+@ht=s>*gZbvrmmuF7}dv!7? z$TQs&o;H6uGGysr$^OhJ@Rz#TMyXD1SA=TaKIiWWjQqJbvOi+c>#3T}WCC@*)l)Hp zc9d@=h_U+`t~<`da=0BkdO@$=OEVlUBAIVDO<7EAqmOh&-$@XxD`6i!A^-8g00Zcm zi-u~NfEWmKt+3ImGUsajI7t_lh@)Z+#rs)VS~-Wg!Xe?rdNGa+tti?B}ii zNK;5J4_->0i*GGMd~BePKKYSH7+0clY8D^0Ez{^Al4L=+m~OUcf1V95dU=Rwf@6uI zIP zNB)t(CGJKoyL8qc@E1cF;la`29I6XhzX>Ab!#CHU!o$Kx=PL=%c9Rw^*3 zV%bHyngc@#-Pqo6a}vRR7tjhuBq_G9VPqSh$5I$&$0&C6EM@be1~w2VW~Py?#r4)9XNf=^pgp$fJ)XZZ$r{F0k5j=6 zv`*las${$9mlgi@uj$S$K?1v{@xeQ6yXDIf-YHR?lf)eyy%m8KS<3eEi_g(@zH3L! z8jfWv4@2_=*Qol1>Ko&)_j#c(@44~n0%x<8J2Cp6NfKSNmpEWYTYvc9R|5gwDRh(G+B;gaCz~Nsyu$DVC*Zv zElEmM_O-X(fIlvy|MMV7IyRe;3fQ9CNfebFa!Ezz&;DCW{A07Il0+rs zzYfO$tRW>>FwF0k-iYo#Dx*6%2xW<%n-RIPA(T?h^Mc1Bo=);C?qev85h%Wrk#J!{ zE-L4LVnP@yYqeZMr;>Ceb9+;wo*-HobZ{Z}8u-|-POLPtDONR!wMjKBVywG`PSOe! zty_Nq&AQE&!aLs#_Dfq1pTfUEZz=4O2*6Hf1IPCXsaqzA<|iWKF0o1ykGTZOmxwNL z`H>HD_!tB5Mx@6|B<%`0P9uft!~0?!sq?qRO|o-htHr<-~UALL^2}k04u_7S)l*J5S7(^L58-v|2-?KpG zelv@bV6i%J1*mW|qw(<&3`#w1KNB8i+lj9;`oZ&8Qq}}EZgdF@+Iu0AWvBXXnp2p_ zsNPlvDww~q&d0oNc?haAC>*mn;2kHpNPe7}M{Moq>OSf({lV7LaJs!Dcrm_pndPe9 zlx$~~>3qvFfO-LGyM96k!LLQ-&)>`9{;gkI=0@F#uO?^I?(CQPVtYKZ-6i4dyHJ@c z8Va`NQ37}N$@9W=w2Q5`YWCpc48#r*u5z->D&j4ns_c-jLT-zoe_QiY5gtyv@NG~; zqhupcBJI6i0>QipVwlN#DkD4=ZbYG|e3*odK;gI@yzP%Uli4~}q=Z}MG+8qd0Y%Ml z!WYrhA2Yz|lnbApkMq_FKTCK~LFu8b4;@V6H^LOSfC8xDJXp&w>f_q1P$HukIBQqT4uLCEnuCy8U#m-|8octZf#V4(E41LZ1TffB#K6 zlHz&j1EC89ddtF!gavSbGhBgX1qiZjVGD+ZG1ylNc-6)hyU_8EsstM?(>LS~owdN_3cQP~cdNg3zUvNxT67{06fW~p8} zG_9v-wNBA|;j%*V(Wu|t#Gs5z=S@F5W=X-Pv}D0<&lSGMFPSU zFz79n08NXkg=jWLjpr~dDd!T0-wS+~byH@Am#O~;{+wYf%OPAv&}Kslry`}1bZZu! zb=IHvDy)K7=P2DmD85N9r&9vky}dF=Lj@>s2>(fFQkLT;|6(1Ro5O2uSB2UM?ctH( z>!_<7Sm#tA3Pq)=ZyQqB%6cfrw(vlI=|RrYAf1y89OXaFDnTgyyF+=v1KvuYtJYym zq_A50mP-FTcEgL}?TjEP;&+OuHPswmGTzQ;v725oJM@}Xb$C3Vcp@i_&wIO1a(2V+ z&%4w)$d>QOVe*{5EJ?-mFa8Z_WudmWT#+-x7->se+ezY8Fn{nYS^UDHrMOSmIw${+ zFy%;(|Eg((m-b9$e{KzRS!$(?XgxQ%SPZHArO(J=YM}D3W%?L`GPs>q>V9&JY&?DK zaJ*KNbGsL2O&U%-xu3EVzSQ?q%RBE6s(_%_${R%#0KQo`wtOMaAXMjfFZ)|0H7P(- zALuUXtNAtUVxF~r%;tm2O`9+{B3_8SSNh~DrY%0~5*a9*_qsmH<_lHPBq9T*84^Xp>SN=XKp6fr`1L5Rv490c>1|Mm0m^6 z9yk2ds>bs#?TK^-S)10Z$~izrST$eZ0`VflvXj@rV!wE9+`OI&X^e6nbKAr z`H42)IAyBt65aweGeJQg9vI*wu#zx$Mzu`v1Yl&4Vo0PO7OMmg%VzRU;8flQQQQ!4^rRU>z7}(qiu&Bn$q>1R|jf(purcM=Wq#R$O6bck89iH_;4c}=Y z5E6V7Quq7W*HKqH)cL3jR;Zpe0eCRfbGp!7DV&fbCZ32hNBP@88#Xv@1I-J~Jtk4f zy&N`?->{&9M={wy%okeztKw;S9aV^{q1M$i$xhYl*kt;2@xAPF!aTI>Z@*oc~4~Umxj6WTL z7;oJzWW;38u4JZF6>P1Z+k+95e4`S$kJUhRNB<*L2&@<)@FP+Y$`KLT3PNIE)B6#d z{6tr(ZWGTE|NRXI7XUjX1MCy#%6$!@6=X+9OWY9W_Z-lNT#mm*&J3_*1lXd(XKb3y zsRAAox;q4r-UI2`CFqDNc{qXmshTma6pl^syPKrxHvhU^^jP^dsRJ^X#qmTb?6ty| z))AraoQ(qCS#6;R-eNrb7Uilh`bE{sh zmWQH@zW5B7zm}V=G@?`=-<6W#Md*r?yYD#tGWgNe{3t>sUw;v$KcRKGONVpHj?&8^ zQ?{AWAI=FRO(z?wfnj53 znxQV&5TQI9L&HnC$K?To%rj?bV||;4wPe4(*7s`kN-p2%V%Ja~ZX=j%N~j5@6W4#I z#RMs=r-dALr07|*5hL2aRN`P;I{#t3$0l9cC+n$yg-U_Ea)|moSnvm*_1#c8qW+Cy zCwD7Y&IaP-{L&e!d4b~u|xWL5F&XL&t8s+orrdAs}@+K{@<&of1+ARK-tp7=lzsH0ZXDvFr~LJWYTr* zhJgkbv!Qq#V{+Fz@yBK*OCHprXyXS%V{2f+r9UiWmu=7aziKbi{MVL&_GxXD2BcI> z@>R#wh#J=TTy8Fv%VwD~ey5UM)BK#Er_djX8rDn*mjL>xNCByrS%D#oupFf>+yM?T_^ignC~wjPgIvi}bGN|LWlNa`68#b=E;qcyYVmU1|a85TrY$JER*V z1*Jo}1?gCn?(S}BB&EAMq`SL2_O8G8y>l;rG7K{d=X~>dK4&W5^z#Km2ozttoiL2r zGa1SU5`;jS%rRWaRX!+ZzJ(!%n&j+|x;M_P1TuApzF*WO55(;ED8l#Q1-JqlMuYZo zgDrlrpZPK!T0@Ma_UU5h{?oZ20fTvVv%%0XRgPaNg-$He@WT-Sp_v`=*jV=|&~X>c z=bQX@6GSEFW?v;UA9FuP#9-YkO=+cux^!c&k6QHe7Y)?EwqD^Q!m^+?6}h-S1kM}w zGs;frjSkFH2K$LHQAQS~11iL|!G%EqQvDI<{qxJ(^GY3$^rpk*VKkj!Ab`qdQ@A%8 z5v`hibFom}@EWDiIWGJlvU98VJG@mG1lcD6bKuzrzV{EDD7;KYnD{0!Jc~BFWMC|A z6cs)3PquJNt8DJ$G`6xUc4%nSQs%I+RQ#sREN*#jxlup@8`<2x?MQ>!V0p;3Nni6P zPwv^qKZOLz9HRZk&8Z;3=*O8;zG?#;@AhS^`HwIY>wXZB48%n@i*e1)?51_xV@0bL)(hqUmXwH$?aT4*-fXP8vT;xPv=j|oXa0jNQk3{Kt_?}g* z34AgBHyqc1zkkh0k&`c5+0AHn-i=BFYQEI7QARcvF>?Ym!U@bCx#kjcOwQosbJbVjm$ zr8M6~ZqvN|h%qtyY?<D-kKw_-B=I~|ZKZ6sWH3QEVau0{3znPDo%E4^`TyPd@O zn=&CqV0Qi37bF#mI5}~d@paXrlSZ@_zzf+=?|JGX)%$B`iOGE$kE&-@`GOu)wEFb0zwl>7;I@y4 zH@qi^>Xqtbc|iSnXH-!?i-}G};6wU0Ewg6R4~Ndsc9@bg#hV|OhQUbdE2O(^9i4+7bZup>IAMgoy zH&=N!Leg9$?_t6uGJB`)d&AU_g(9UN-vBg9H&!#~PvxG3EuxVfI24!%jG?m9&@Tw< zKf4Pfo=r&n)u))s5j;7^Vs!>%9er49JLllta;0^4tEa9??ph{j)_!%iskY>&sUBqv z+b@yTQIDv+F30zGr~S)DN|IjfJYdm#DR37!t$y;nD}3_+x|O$2C{Wh9(l84cAZrd= z>{!Qx)r{D{WL_mW-m`NyCuSU1!J#-*Wayf~$GGahLFOos-~OiA@q|o%-+#I(ns9yj zlFxLIc_2+sB*JHrj17P}I^n`CNd`aWx0*eW=jdE|b0p?|FS|&tkyx;jzvrbBteGie z|NPE@SqWfUSg6BvUKVre%HQ+O*;tbx`30R$jqhvu_FEZd1%hXe^XzcsE|iZ@G(=dN zY30{J$s6?hCR?iRT_37Al?s3$qcOt@Az8?s|E~s3xxP5$4c1?3sSVoKr)aREW_N&tuvo+r7Kz#2}y1c=!$zGm3_u3y<$bJKwJ?vpMqSdhbpCoC>7{H83e_T4R zC*C*}<*1+Cq$jDZ)ZkwMvJ?s!Su*xNn1(2GOk+^>NRBz#!2NFTgsdz0Z%`vDLygk- zizO|T-k_#ksMx9bbq%UMYhk+FFu?)IpqTtMR6fBPbjTlJ`8wF%ip`k z`pVQa!Ac92)$heAmXrJW3?w^GBHboSh=un7VoyyUMvP`3XE4=DZl7Q6##?~7w3{{}sh zrW#08uo2l`l7fYEangb}Z!P(0i3>1EOrSJ&rp{{Q{8_|sDY~$3!Emg6cM}z((1e|} zodTeWo6QF7)9QODMLu%yucbq8)G55v{-_kmk} z{4IRkxv+o#P$F?aA(|f!1zY5OEXQIuyR>ic;xeB-9VsU`J=UU{OZ)oSz7{FBrifxn z2i{u~5kfot5r3B4uz|s?%+}JH2RB*UP+h#1l|+S7*uOxN)^H{FY3;f^7S&;^GV(fj?f-w)KhnE#Q z9q+H>gu0;Fe9>lrv+*q}Hv%K94f})+(+EX9Kc4vym^kjtQBc@cTmED1;=nIT7-vVB z>iel`c75_HwoNabtuG}~h%oy~7O_9ggMK86Q`gPbEHmLoa?EWN)3v(o$lcb(UH*2j zf5S{P6n7dUU=r7jTDWs{nk?m3m@*{NI73 z&RCeviO}dY7n3|(wE2`Ojp4K^U7)9!wxeSNimMZc0JbX^RU=3u$0LIZbjavb&$ zPw)Gq#;GTNlirkEifHTx{BQVxh?;*~|B4KuG%R*zhE+7^DVXk2jUfONLK9rZyg0!^ z0C|si)NMx2*LQ$7jY>e3N@UV+`8p}PK@2+UxMmxHiC+pLj^Dj}4$@sASI@I{7%zkv zl5{=ASZa&5!LUKU?KTt6Jz=Fu2RkNxG#z~bb?BKK9oQdF7=RZodoXvVwb#I z46z^DzwsisSeYi`fRqekF5{fB`8F}8=7bjFXPiY(19Y*hC~V6hZ*71yvDjL=k|iBW zKZ5H27#zhr{2SzdNHo^aOGmn|(Sw+0>tA_kpC#lJtKMc9Eyf^2!Sx(W8JtmJ+kq#& zFCN^TYIS~ZWYmO;=8T&h`)0!<8!ec)zs{B^tU$|0mK9kf)A>~!qw7POI_}AeZ-bJIjp(0%icJY&t0hL{;Yg+j^09g z>G1WhY)I~!bO*&Yrpsn}vB+1k=hc4}8)|URY8I}TGF&U|wh}ik@n1iTuGPZE23c#H zTG{2?uUbU>zr7QH?<7gZ6}wk`pwfj+n{_{>@UIDJmkBP`gTq`#Y$K%Dm#ZD;An0S0 zg+$p1YKHkyPr2mABn-rm-hMaFa$v2&A@~>!^4gook@}$eD4Mr({{|9>lY`1`HQhZ; zPL8c_l^dvO9Ji5d(XeYoH}{*6XuxTtSd{e2!T#SzvoAg!bK_J%Trs<9Nl1iJU-hX+ zkf$F82u&(@Fq&C7s~F%R)x+2cKEf^Xl_`Vo&;Upk z1f4`<(~-#v@(>oI<+ii0NW^S^Mz)=%ZO6`>yNB-Sd z!^p0{`)LQWgQmzvHyH~k>I!RfHb!)}bRC9ijPmD`2E~VozdW{I6Tw4UFaE7<|2B{4 zeip5>XC1sl1`0arz1z&VWE4paUpQc~7P++FVuJsgo$qmqM|NV;&CLd$Ya&%&zT(lm zZmuA8U0xGFg}15O-!4Jwc?>$ayn=z}%#I@du`?+vawlJ=cA`1)syBpm~RRk-f<2Z(wZCk|EK;-OStrY%)$ibsvr2PYBiR}~*^8^+}$+mp(R-wU+KwtE4&A72dDHY+kPZ_Z-qjQ!*Ln#)WC zG1>G!OFmn4*hj$6f{s{>^W%TpuN81Gzr<&g{66CXa7Q)K?km`3_grbu){>9Io>HS0 zQEY8LoIqObmOGoeLJezkd*eHNhwVmiIXd|W<}G_`k$b9DO`sRA1IxC4`()L#XiGl@ zHIN4GjmQ}?KJ~@RM*7aWd3ARBe>#vlanXF4TA6sUHP7swdjJftI))`h8Q$(3`O1){ zzhBfCFd5vw`zivxy}QC&$o#F*{AlyPy@i1UIm#=hy$L0E?hGbXi1}#)0UwFS(Y&?m z6$z~UoTq`uJXD%bG7Pc_`sK>9fyFAuW;fu-UZKr85IBiurxBAdKrHTR%w<4LDLX;k zY&MH@I{AHF^36y?y+t?5bn`VmS7*rvH-!Oj*E@d(L_Q?RFOu6SlQVNVj-T>h1Y`_{ zv4Y`KonwkE$UemWlDEBxodD@b*XADS=rgfqQaP~x3wx}|Leo4{9bl|*6B!A9zV#n4 z4HYysiHWnqEHTA}H}aJewEe$@rx#J-N}g3)BXdh!wU7SCZ=5eORT2Xd=RXH=SR1~v z`5B0)HU{zg|8?d*YcFymH&?s&W7DVpzbpXw^+xRBhRg2TeQ^go=&T<`jpx3;@yEudb2XcV`e^C|5e01Ttlo7PQg5k|WP}U;y2^Mgni8%zIdk3H_Cms2*H8DGomS?8#l_6k;ezMv)W$%{kWBYwd{?m4vNx zu#lBq@qrgsW;~&A6`cGrKPZBLBBuHkk?cw89ss94ge~-%LGJ%H;=EYFAmRevJzwK7 zyIS~j<%R%UAjh3n3vP`I6-|dmArNL7)2(wm;iwC8$yWTDBH*uj>xsrdNb`p@>@V@) z!hW?7lxiT?zX!Wx54wh7)M=GW61HK7EGoUdgx!~x3I}LQK30b3vz19dbWL#CPF)9Z z=-TGJTnak~CU(h=TU#e%V64{~$=J@Tb#5oZUj8oWcdPr}-sH~m@8Iae-)C+hBR?L; z#gchn^}kbmN9O-Yu8N*$C~D-hGcr;Q{hNZkYgnVn1-obS2fJ5YKLIU-S9V{f4vw>p zG8>ILzJRJ&PtjN_l(3u$E|b@Ip8NkB_4`R}L``~2n>%dZ=KTdW;pGK(xRsj%kOT@x z4Pnt4}Ic~|KIH@JRq88?}#f=TjUZI_9pKyZMY}hP}P#`0XG}n26My&2YZqXd4 z$!D_k-17!zvp#BhHvi0QL&*uc-bMOUC?^7dv72AIE@OKc!_-0Ca_a4{Lw#akNo@x2rnLEs0ue#1zO z(0>9abdXA-Y^l`LB;q^-JMUR((v>;(oS|2P)dK4aY{}Xj`43jniqQI8SjWAqD}rh6 z?`uL(hlXS4qL=@%?#0#-=H>u&72f=wm3p7U@ZfT?S$=k){Q=J zWkgOh#4mIUz~*L#B-hW2bYdOBoIhH5=B4M&XULS^%R?ept!f(z|9q1F&xZs1upJKU z80nMR{vx+ntC%pZXMa96y3v3GJON>i(hTtiMnUfAQ_@7#w-^TLW2$1xDrq&GX(b_7 z-&3HHdCDF{pH{AA68r&>943-LX;|VCaMB#J^d_aJkyak$itK$9JYV%$OQ2!Sn{3J-CxFU>n>z5LI zD!ZN;b{m-wPrxKRH#$DdOrj*HhT`@a71n{3A!Hdm&x6TF@=|QW_^AUJ&Vtp*lBp`B zzfCEO#>BH9QOcE^q)CR7br4Pq>1{2Ew+5S@7 zNxS{ATj>~JQ;^75*R9pIyH2Q!%4771V9@nNN0k?B;SCqjW4RT5X5W-vgdf5z6PVhL`%o%4(btLM zhb1On@+w^_@?SgrjuLNK8^7L*M!FgmD(8E6q?uoAc}a>X#&+J^W_>-s@rF%Ei$*!@cNUI~we_#tg`H4g1-lEZW(7R%M1?`H_CiG^ z@}+`o$7p}0yoAY__y-#94M@Ur3v68#1!y6_`Vdy!nKYLhF~CM3p>$)yutbD1Py~Ak zog_5I+ae+^^+rsedN%rk4I@dFZ=>7=4P8H=PTLv60m5CgfL|o;ih1OiC2$ zRqk{g%>^x=bc21^?{D@4v=oLnae+yOnDiit$aqSzmr*_dpSXD0M8+jM4Ldq{h6wqT z5(QR7xz)zZkF-huA3HdcZd@uw7&ZuD0{j(Y)lKtB@JgWZeUhjYJ~fRq9^&fT;&}h4 zT>{>sm_(pMJL=Z;d(R95%}4igmC}r*@k2QP6VJLMoZY{Dw<8sE^V%7%t-Z343{Q>7 zg>3MFBM&K{j{WwCHa6EKX`?RL5k)nkRp=b@mDzM)NJ?VpMjR$e#DPU^ma1_Y&Jep} zKp`O6pmA*a?s*a6je4^6@qwTdsI2iVLVKT=p?-M`4~57+vP!0vm3 zo5-xcZ&^RBM-uIC0Rko~Y>2QEtu;hk4LD~ji=YZcEV5lKrUCcG%({hoTkPuen*`n! zc;)4&?*7><#hrHhVcs(9Vs93jBD|slzfN|Odu|VWjFJ-7@)lKg&dzy@e%S44J_HG9 z(LLW4Bs<+~hY{WWn`112HvYqJT7H9_-uThK8g~1?FZq~$x6wxRq|z{t^nAH?ae zA>?6LU#TO#|3P$o@co(vcAFy|^LSGxT~~$0XxMpNI&R*oL~q>b#x z*1rMU&|Qo`Qs;zd{n&;-{W!uk2f7oew%H+!5aLo$ZRy@owFbq06^D*@gS{xr}?(O3HLWE^Irc zDcxM|%f8zoL1oCr=8p3+8^jSl(!C+PgGBaBe5g3u(ylZNzv1oK*3wf%N170gqHsSx z!fUhiuhoEenhar`F5hc-(c5qBSa$DDT&X&Wecymh!!M(_4n?MxebtzO%-l4hBY&hz z3iG+U!JK2#b(QmpgIFKewR89RUVg5UCmV<05=VSX2vVW|>>7?+v0UIKhpwd2_Z_^= zPnVE|_g;-kcAfnuDbT?I*>x2~Z!=5vw|WWRr5Mr36wio`;M2mm@Y!9l(0SUgtqbcw zSj(1;nTi}PG0A_-H#k|MJsIR<(rOiih@kUTFOx@CNo|u*_S_9x)Frx>q6ax0zbCEA zgUuCM#36Tx9&qEx&_QfJhC&Gskpm_Usj~QErP-*~Q290JZd+5keQN*&QAJ@;WuYc; z^5*wKGOdZ_vP|rsz}QylLDcu{>qe&Vx_o+sO%6uwoI=(#<>tBY(CnNX^TQI{X>`_# z{jSPx)MMe{RJL)}et6W%C73myC#uZNw3j&kT4VJO`?wx%AMpdfa~lN=Du1z$1_yiY z!W5_GLN|l|hoq3E>;%`e;g;_ECmoR%gFdgrR%%m&$~>vo*sBvaqpvDkZACD<;^sh` z`*RXp$C*zmd=wOHZdI#O4ssCAO!?F?zIO4#pG{V4WN;PwS&y0#KWa9+ozIS@ozIq* z>~05MC7fK5)SVEE@&?8x8)^8#7q1-kj_2g3{gsCXz1d*l2@E%28`7HT3rK6WLX}Z<(NcQ53o8QvdqecTteGJhh}iD1uaW zrI>2>MGP^2Ze#bipl5nJi?tlhm%gZ=@@1ntL;i6q8VoxRnQTJLj&-zTmM&NDd!At| z+q|+ccekSXGX{famlI-np&gGaqTTeM7fDFHfsOC}3tjG0*)zvXv-Pzsy165{hYv%j z>SV2y)^07M1;_4DG_0LWb5{v1vbB~ znOxeET!z$YV87mMK-F3oSXIHH4zivJvYrJq-F=wZKMQNlHr#dm2|7hiw_?(41$<$E zpk~f}qNG_Bs+&{`KV~-q5D@*J~DO>BM1o)`h#f zD-y$1M}ihGd%=5)yd3ye9kjQbSfm?9`R{Rj=5Z|LJ!-g}t#;o#3dYCYvI?l-mCGrZ@~A(1PKW8-bRxd8p|k0zZ=@{ZMQE z5~`1+^Nj;sTDi&%tyr557LaO|S{%Al&ilM9XQA@E-qGY?lbs?ROQK?9D%3 z!@GaQJl3)DEkm#<&SNG=N2 zmzzhzNH~@p$#$`IIFcpnLu}r%<;kz%Dc(=Z33+#jjID|AG}`&B>$#q94U^Z--c=dFT#F!GBDKqmuvnJnDg;DGyKla#9kFeENUmAv(#@Dzpb_tg%YR=xD%8qA-Anl4Q0 z@I$inOgb4Y0ao+>uhK`7>UcDIF-+BZ8OGtIdhN-EZOnt%aBRQU9Zrzd8v0z7OOl`O z^bLJ^$eq9>>>mg=PDP@UA9Fw(X^X`A-XTDkVM4Y$QxV1MnYZirCtmB@O1M^(Hv!O- z&J;&>=))Q78T(n~$z``uK_Or9ZTIA1WBV|oj17(p8x8Fgt>hxRUEHDX4rUsl6XH_y z@Um!FyC6+a7oi4mVm}+gxI)zuhbaGGcvlY!UqoPf<1h2DnxeB&=DZaU<`xtjc7|8v zJ{R2Lt1h)$a7n6CIP>B1#e=BSlpZZqVNc#2}bYx-NTc5K->>@bJ!E)2@^Z1>7mwN{ew1# zegAi>ZX?)-Uu5kfqE#Z>3VI(|1y6pa`Z<&98gD}n?Tq`L*^AN>X>ON^IU-Y23$?V2 zJF@-pnV0T1e3h9j+;bZC{E{&6a#W-t)72RP24t+`smrYi7!Y?@2xQ1Yg$jrUp#dWm z`Q3$%$v}NJC5thgd8nAm<|75)2Nr2Dme^9+60WhgH~6j;jfC>tE79B45ci)c^M?Nr z9Jg)@&qVAkU3kw#L=4V+*b7&kXg$`O{ovP=9Bj1|8Kd>kXqNSpJQ>xv+PnP4*_ho_ zFmnx8Uo;9}_@h1e4TGY7zEqk=#tm#E|cKFVhXFJHtUS;+~v&HK~PJBv$ALG8QS7tgG}&R7Q?=Ccy~G#x|9w3jd>55 z6q@@^1r_+o%qUpCYu+>4B>dG0%~ldW zGU!q~=_e)Yt`x&^*OZCh9DV6YuaWd=_lwXm9q8|CiH8@0>`m(DHOwNK-aDTX*XL7S z7m?~E#qu?M!t508>tDNZ`N?nU7}F}>RKZWcVl;?({IL-{L70~yGSxE<&1&VfjAiD> z=FB=ft(MWmi?+z_1m)9rXW~cBSmsya9&|&@{|5MCi9lM$17rqNWGVPDxeDKAt9<#> zbGY&3m9@yJJU&uf26i0>yGEH~?V8#1s>8lMsjG8-x)(X*LCJ8;IOV0fnFoGB)ccuj zh)7zWJUd4U1gZbI;23y2q-%^uYTao{oAF6)z*Tzi`zCr1yVIU`y3>mrX|XA`sfwxS zw2hNT^&aQ6-0k1!l+LDDicXUh1;+-Y@7$v!g>SkX8+!lv4{M`ZG?k?M`S89+@4fbv zP0dlpT85weU&P0ot9;Rr}H&`4yzAnw%QE`NHwhMagPH>qJ z$q-Qoo)Q%0igW^3EGw}z0eXx3`A4J^JBk}CBI+xDsNq3-wu!&<>TX_sn2wHr)b52D zTTIEd&suG!u?fY4LJ3`%ZMeb)Y=mwL%NICGbcWzJzv5mv0@UC%&do3l_6T=S#H#ftVK zHNde4s=eb2buIcT`iRHp`&cTTsR?rz)RRU zN*00IT%116I3_m9&rjHm`pOtHd&_Upf2|96C#@E@%FuG5z5u2EsE9vC91Ds#IWu96 zZ?!O++KUxyiDd5`jNeX`6*QI^i{h9CK-mS3{k!#xh*91;Vw~yRZ$5jX?zwQ6o@Zm` zL?_Lm8f=~z1dMUNOZ#vUc>6}o4^d6=KF8%zmEgj#BT>T*?y>7uFtATZ0%$1s=haCW zm}8SvG=ndK(yr{*O_^>PXb}{rzZl9?-dQ*62p;tCUa}?WgicdI%xVQ<(7DhX@t7JlfK8|=ZA^W~E7u)}-p{5ILGr%gtwl~kdr=x!2{t)F$Ln0(31 zKDS;#^BodE>yhs@7yzf>;=}r&Ac+58zlLt|vRyq}uP9*C9L3J(PB|qXboY+&{!dM5 zm;)e>kKLR?TQ=C|!#B_2C>*f2kr}+l%k)MtyYmmX+u;YgV*UbL#eF8@k7pq!s8ir;+0BYdQNpSg;1NE zJL`|dj>;kHZD$fA3`$5;1F`B~)=AVZovSy$G$fQe;b zRh*`ipuB&4s=;y1+3=(OxZbpmFMW@&so?w7Eq4=m4rgm)?0w-%P=`->o=iE$1l~`r zDQind>{~D3v2?n%dWRWi>h;j!ouwE+L++mX5>w-ERA-#`zG{SD-q>QRnckX6TnE!x zWtTR^L?J@6QhP`XHGAJYUlMx`@TWOmnnM!fc>y>4TJ z=f=?&(80V zTX3}O(w$~vbrBAsfGMaa3^`uyT|v?Kll8SSsUj^fwzS|J9B{)9*5IxpmgWgL1M#J%H4Lc!Vr*JhzKZQ0m44I3Ihr#4i zaS5?3XKoJjIXs2FQP37!M#yy5b^o}WyaX~i7H#9HK_+FIZG-A%qfN)9oNFwZ-|v>h zn;aCPf>IwIbkKT~Tl~*j`bnyD*`0hYKe4WT@36aF^Acw~@Wju9+?EZ|AtPUB1!?R= zMBI#vGA?BO9USHT1$m%Ddtb%NZs}SrhQN#FBnG~u*(<$!zVk53r|wTl?PRiRt)5sO zPMh+5V%ia2n(%zlM5R?vm=1sGgA9ou($XgEuv$)_{_ zKKKqyJgl0Euak>>FN%;8MpxHBPa~OoY8SR6m-}1&w!-mu-|@E*8-br-{iu|0A7|hY z-Xe4k37fU#VW{w_w#qTSA}yLP(~T~e4z)XuGuOgBCMS!O)orle!vJu>Zs8|%V1 zG(%*;4ez>ZZFFSw{DGit6F1{hkel4L&4*>tqddX|^HL$q$kUxZ4Wq_Jr#Vde^cU^N zwJg~aycO9*TOzF7aV{dHT+b)iSCQ7#2nej<}rnkp`bY*RBZnZ87t^9^`kwv`u}!VYvA?Sj!RbZE?>``+YZv^Hp^4ENfS?hE6{` zbi8(kmEbf3fJkx(P6!_AQKlavl4J+?D^o1Vq-Sn61PozOi_CL>f{uJA3 z%g4NrHi*KwkMZ>Xj1eljc2La~!DaR=$Gx$(W-W90*OvpR*Z32EjLDSRR6v-w_p8e8 zWokfQr>sga58jm1Edc3jF~X0H>s8YFap^0Ldc-xHH7q8m7_67$T41<+XDRXnFdbNWGDV~*|4iwqffq;YC;nCA@? z-#(A*ISuENOGfzUB#q^MMN3tDn!(y90^g3BS^Ka&3f>z(5xRwcQ1EP8``CC&}x(M!IS#7Ptq?3uww zj}e68WW?&A*$34S%>Wco8pscJ>7o+%!=G9`8lIRu?vt*s_NEuhP-8>_;rRT#MGDgH z)b85*6-ssBKgUMiNLTl@WZ*0G9J1HGMGQ(DNV#$;sfbxn$ zj6y2#8HFjz%cRNsm%)-0?Z?zy%wJWNc_PyeJVT3Pq-b+~1Pns{mw_6O8Ue7>?9kAv zF;8E-=!;i7StsS0kqhYPqb0C4|MiJDLILRfhxD~RlH-gKH?1*^_~N7a13F=xG#{V^ zsGNpdSWbu`rYlz2@ZEcgrI-4(1w)?mQ^ld~4c^#u?%FR0so@gJ)7e>zk6J)6KHXLb zV%pu8HD2mjMu^SjGzCNmQ=_mH(TK#k@e^0`Nj}&?2C)APm8q;%IKHO#)9)#e!F}P0 zNi-snt@=HGOQ@<$uCpr}aaB3_HGx#wopT5O`%(0q&ZqS>E?sTX#1KB$n}UbdgOyiJ zqa+IQ6D%Zj74m~O8UcoI@K;_)q@ruwEEF|l6KGYqk1-_gT?jvaT7d;~SfxU$G_}au z6pm#ee$Z}a@f0d!vVz=C4=SV!$6E1hsc5>v2gh%HJ4a*$q$f-xCoNq!!iiqi9^P7p zIJWQL&Hf_sgTNJr=Yz61-8o_gyHCsRr+bsVuo3a#bqx;RT=46sw|z6C==Px- zH=p<@(j(YKGv0&}W}aGURc*4pQ6kh3_-T4F;2fKD;C}{|mN4sX`#sue#@09X#x#w- zk$>wE8!dW_7>`*T*z=%r3Bf2u>@;iU%PZ=reCv=0f*RSNCE~)3?Gk)_Y!v}(r2q0E z#Rg5f0@_CEIib1o8Dj2a6w9{)Qhe6cd~2`eRd$VCEW(P{3N4{=3eUwB8v6wbea&@< zgyE_HMIvz6bS~OXJ!r(~qTz-_S(^ed5T_(;}@&w}+E5`p<2)lz+Un*k3cbbtI&5t0)y@^{gGQ z{*ZcXy%Mf+QiayhI8!-k)0p2qX?&UC6HvuGupKUM;q*JDz}1XRL+DTQq8JYw%<_y7 z#>upARK?3xS`Ou3baNV$yPN|j+y5ei@o^4|-6g^Z8SwO<6lv0L&G;(hzhgmfJI%j?B_I9r0UGKv8rH{ zsr6u8vK!T&Z+Nj13tzr*_4ua=L{*EX8Y5sP22FiSw3aKHL~>veOhLdX_9C^eWyTIH z{4}ozmW9&*1S4&>o!I+25ax0*T$IQGF;ym73Q_RjdEozx18D4-^%pDLCv1IW@mvgT)C^AS+mXa)h%Ix2F#a=P4b!QGOwx< zz$ZEH>M~E?osyyEo(bI%zutHfW<(s?w!CEBI zgs+k#!E!KS!TM567fz3thSeQI*iw2EXn@AK2tZ@yUI{T2#y?Uc)e+gq7ESW^aRBm3 zw!S?xKn8=I9KMB&QKO{9*o>Y|3RP2|i%@Z)fs-R^A-zNROt{ z&Tx9h-h0sd-z(pr!ZY#v{pz8cI;GF*KOifD%-tQp3sKXTr;h+rIgpG}7Pdo4lO5N; zx=fmPe40V`T}R5KKdNi?%{;ej#ZCOLIEo~(@rwh|UCSAb*TK%|l-d!6AsY{`kcDzf z{b|qgb&Toqe#-#wv#1m!wY3^6%)2)m<-VQG?IWh(Yqqd4`jCA0ztH`KL7~bASh#KN ze#r-~-`B5?s2{%}zmnTAp$(-0;v`}#es=xH;swtIqgAG;tUKoK#|cOOHCift>Yv@(g%_w zML4@xc(jiW;dRf>^DYe%?WUms0_(-K!|&ZF)=>*ScoitmB&Ym*^GvsTpbp^$kYRN= zBj7iJXwB05rfwtlKt$zV>70)wtK2P3$O7Sxa+AwHHJfoEXNc$)7h=1hdoacvWa*VL z--GS_6n>P`fQiC~imCTOq6_s`wDl(;jAYH)aUnSCF)z&bgny|Q#}kQK>WMY8itjtr zGi-uFl5r9Zo;O8aN86EJ(MGE2x=UZH44yZxFB^L8PbyGnPI2(ehV?xK&6`XUibI=U zFry)~{S0G01!5YY#Q+9-_Rx9)eohJQyB^`9rX>R?ofcg2EhV=LBz_~l^G#o@$<+dC z%dh_GJ}v@Pv&YuA(p?9E~MSW7aPt z{7+Yaj>Z>jJn%Vjr&r^L#Wv%+dHle*&7?$Px$3Qp2zgTDk>{l`8Yu1@a#NVwJeZJi4F@vqEI5>(<6u37&b#7mwYVl^jM?C6 z1^|vH%TQ&N-_y3fab?S^j-K)y2cJHy9fo4QyT5>^Q9Re+~+~8T>zhJ*u`_y)Eu^;3X0GBI1}L>xuI@|Y0ugw|&iqR_Bj zR=V5m`dFb?{v{nTd25W;$lj2jB3cM=BYPZf3&p`eY{H*!%4S=3+S>mAPQV=L%f9_OXk3E zJwti$AAdY*eh()Y^&1YbdUl~qF;)^i5@~fqhb4Z5Xa!Jvk3Cv<=5Dia;_KVBVBk_; zV%d(bwSW|423@U$9VnD3=BKS94lSoa0(w_nU(BMhEz#&;jiJAVgti~M~eGY%=B z`nyi2GF7h%I?k&trwH&oeZh40Mq6zux8_ztea%i|dtEOO#ROKZgFp## z0+}HTt7s$FzPsit^BM3U3OF{~reg~c2%jzGAD}-J?ag>hn1!SlE z@xN1$O2L6s(u*erb7bbdW)tY%g&obA6Pr8$oCNx~ZnB`~{hJ1_08Sv4fKwRb=g47W z72=wRb<(>2Fj2Oww*L2(=~yfHcIt()#BT28-{`)$=u+rONp>C9tajft{3*}q&2ZAy zyl0_U?A>+LM)_M1K?4WT<({ zy~Q;olGWh_*J^*KFX*#jojboEt)+4}$fQ9}rP{n;MdZISFrV{l*R4`OaBFtW%N1at z`mofGEi3uxu=$W_pa(sP)jP`lhPFU`$*69yd4B5a#rvi=O$Ta;m!11*7-IdH{o_po z>c>p1!Xs84pFGaJvDYI78ir4PM;k!F3S!*$N0Ha>`JO?8*x6xs(3hXxn*=#}P3ilr zW>9(zZEeHJwV(KVNlBKZzO)l7a=#{$vsDt%4ch)d(5a5Mia!Z>MX23oz0RG5CHX$q z^LaAp1~xhvx#N%NusY6LElM(A>fqNfL3AWf8|r4{t>%*(!I3&B8UcEK5Q&hQSAJ@y ze2XZLd>tz{iDlP_1aA%h(^Newq>#_3QoJ z6T+$x#T=8bRlmLp1Hh$Btf5W-dy+i=c_5w62^z08&pV{>$%vPNT*C5edN}v#7@~>o z5b2a0fA+zzpBBqV;FhtShiW7W+zL3M%`fXz;qTx){|^AiKsdkNLdqoo&MtK@^Y7~Q zzuiu2{4qK0V1laYX;uJOjM#VD%*XK7a{E_SOh!{i(HYAzCaF^x%BYtNMziGt0A?Mv zc%zT-YGE`YA1eVkZU7ev!u`SE)}ulS~hrM+N#C%*thnai^6PIz<($I5^@=L-6X^p~PQg7#6#@?GXNR!@XB#X>Wr@bQhMe6hWw$4v(=2fD*@h zR0vEa<8bskpWr^@>Vg9Y+kW+KI5i6V z(gxez=&^LF3hRb|1j5~tT?T-ykewC^0QJIId#&5RnOMJnSV8_4uvZFzTYyrYs{Q7x z-?R@p{Z`SJEewo00?}M+!ie(24^~(100H2qIb4QgXvgWfZDvp0Z&zZeQ!e-4PCfxD z;&}(CZpAG&y91-ll1!%|%=uwD-O8_z1%SuS_-lUZEZYsorg1kvArza&)8)@Q2a6Ye zI6v>kF*1vkCF2Ow*v7oS3sc#_3P67VJyOj~UEA;WQeMxV|2sXF1h@kLag{J*N=;@p z6acP8g%M>;Ln_5r0NM=_OhRj&8nZR5L;7p$IlX1@=v7YuL(~Su4 zwOzso1&=gq676r>6RAqzi}%dKVt#(|$1{n}z>GmxgoX6Qn9Oz4a)4mexCRMIg2Ru$ zwz}gHAz;^~hlJux#j&U{p@THh=;#l__7!)=oYQ}X#A962oie#s=@(H?o0Qlp58C89 zU|qwh_;YK(Oy3r_=?pS+udl7|=Eb@?bDxBL1bcv^V6T4_m)~e@BOR9Z`i%FvC8vGz zoj$m}qNO+4-vq85lb`^v(zV@#&O(2UU9GZ+L4&2k`dQ(gE=#g{`}=JDw-*->$>pFo`cJ?A zPjH#337#_aR^wK87jm_4if`buUw0W|dedH`J)Y{`b#|mi39s8tkZYmK?=B!7tY$#d zK~b8S22E1(9>-u=LHog);*)-;9Oz^?;V(nDlMLf{I?POwWn+D*PN*_tn$~H4(o%tB z;+a4E!!*h`I-~Czf4xc95$tvF*W#Rx{gQ*gWF4TDz&dd>iouTzu)%LNCd_2r! zM)5c2{ibp8bD-Lzg~k3xv4ZwGv#(%pxfNJF4uu=ny+6mtD0I*)`zkb8<*HlyIpePn zfKawVhl3U);JZXidbtkzmhnhMYwQH$yLoY)?`Z*+EDun``aP5cHTQwMWlx=17L_Lb_c zk3j%ZtcG7Te;3ujk=B|j0^J#)m=62McRB@>3<~}y8!83BElscRfBDsA^R(4I;qoaW z$pPLa8~E9Jj*6Odn3{y1@0N$FQy+x@bnk8OfEWos$~zq>q-!>1*0M7liKq8eVf!Hv zgB{gSz?b1Pm4WWF-rH@ouHcxiNo7jYY_RK4>2dFq>UELr60t()57j-|`CvAi%qp?2 zl=Y_jc#&?ntsgT#I{%XaRS}@Me+cI=hAI#ITRrd=nN0>O%TcHD**}0g5FE0lRXD41 zoh!=&>L>uljA_m?x*Zj#n8l+X0b7-4_#X=9BNz}YYAoR!wxM1_;<#3Rr0 zf?={A60g`)Bb1Ga>0XVH__Xg4PYtm&$9_i)Px@R`5Hwh{y8ZMKkSh8c#V|EUUxmed zi`DYj?f%^NevIZqllFM7Ow*H}=64PfGenb^y@v)Ku~UEYrcT7+hX} z2f&ME0xldF(#}^dq2P6%mzH$kw!6A3uv@B7{?Y-$|t;Kn&FMDTafN5=VNl!eZ=T$ocEMNp^ScMND@V75| z2l#-(uDXD7|# z3!HIjF2#Q+0%YLy#$AQ&gg^{#j&=msk;*Io<$PScaeFBoO?fU%|6!PZ|NS7gor>9i z9+7#{`IM<$5(IR>*5kiSqyoKyzccM${>@j#Uw!%4#bJ!e)C!5HUMBb-zH#qmIx!P_jc|Jop1s5w*We8% z?c!f)B@+)V@gwbW?YKLY=w;qv>WJ?iv!lc>j@glb{Rqcc?}$U<+X2odL&@}j9gk{K zM(9GROtTMO0*g!N#sBj!gGfDA;8zLz;&K$q0X_Y5YjjwbI{4hD41z?ndSPY$U?1h{ z$x-od{?D%?Js>Y#R_OovzxwM)JvPMb9<*EE0E={ZF&{Vnz>oL&?Fb-`V^$qSLvN3< zM-4RL7~1cS0@q)CSq$760OYwmJ1?$q7WiNP@BdQVkX{16YjC11uwJB{0Xit3oX|J{ zFaGAIt)M~DogCza9uOV!XRGuRK|mUSH~QkUJ&cIK15sgs0kKNKpW$S7AAWWpClQWm zT8CU?s^@Mu_ED{es;~wKu=xUS`4#a5G2`E$cpcNtPyoz+->A=ichI@{&&A*NFN^<% zALS<;Dh0qTjo%*h>8d@XP-pvVc$cjGl3sj-&_Um>9gxQY08d;M*}esOKXR%Z>FA6B zJrB)097;G{H>JRj-;Okc3DcCAyB~RPb$M$b;1DPC?V5gP`;!77DFI~CU4!a%2Smri zJcz!da9ig%DK6Txkr3-hgKqn$5bnCt{Uf%PV#cS-Y?J1uJ%sRfw;L7!nt(SL`65^iG&*4FD6`V_U&l%*?SC6=rv+1p7Qr@fpP$?96K*k*(T#sydQ!E9M6$>vkZ8B$t_h@IC@=yS9F6a$Rn@pOD z0)x%Do)%5-tA0N*1fsoCi#cYvtfNie>9z3APTM;7>QbXt1NUul@9O2TOG%%!W=d;) zlfupIzhd1yOoPfyzh{}EFZvMqhYV59#r_5ya34}!Kx;1mO7{34{zIsX!bw;FSV$fY zh3ls*u<#(3NrD$0QB(hY1o6Y;qvG)NBxd1bSr&BI)LD5$&`jDoFPn;!_x)LDlXxFU zqe3?PMwH*s4~$A;mxU#NEx^Y1g%(fwzEVcK2AF99FhcmpP9SD{TX<-7NrLQQ$E9ATTwd! z_Q<)~N2&l&;Gdne2cA>Td)lE=0Nm34!|7p-!G&FfrpMFT>^YtDm)wR!iOh&r-Srl* z-TPE9VG>KCOm{GBV5IwRyN{|2ljohWG+%}7hkyei{7=lt>1FyF(|^rToq2K@z!hfS zM$bJ&kX`dn#!-{cIuc-30%!?vfXV&w$uTCU-MFV+diz%y@X44q@g3$NOQJCw1?}m< zC^&oQaT_k;#&vCYzaBH)(8oN&Sq51R9eJ@ES>fE+#D)I^`eczof3Q_eyc9s8KO6zd?31r(&7GjRaW{!3 znUi)kPlG<>vW}is9kPD}6-(g&(wpOfIKA|bcr2c3Ht+jYrNavU6Wd+|0E&T$h!;x+5HGks zA~rbg(D+?=8*dfPRoGn!NCV^zbOf3x0w_UN3!qqT25lHXo1_Q!w+6>mST_WuB23{H zGp)8=%56BK`@O2oYR9wIUSl*UXIRp;C;3|`0G?#CKW?S%nKI&8xVNh&@2H%Ih4OeF zEvgSxVQV1}=KBnQGEB~f85Qu~jfpw(o(B!$7BM;fOEdp%7)ztGPtE`-5r(iYQpWA7 z3?44zZ!MM~q*Tgfx%Ee2c{NFGG2>L!*{>UC07LRJ@>apV=KjO_2v88nL50FY2U|EC zr8x^p`D}6p63wx7Zs_$Kmn~b=${$};R?(^ghJZzrwloGE@;uSoj8)iv2t-@*lW;$I z+;)tK813#Dl&(i@!IQkKNR+v6SbRp+Rdxud5vQ*p`s;YL(!OZexZ*WA_UqAvCI351}8*C-HNQ zzsKff((`?$RoGq#IFQL;$^e<>SuvR*x8h2Sa!Sny1Ny+DUoe}@?lhZt{g;U&hJ!y% zN|Btsz8D{~H7tk0RED^TB|EaV|hlMXs{&Zl#lN=X!D(2NomP&*j_06+jqL_t)`Dr^@7 zWHgOe1I^O)F3nn5xF4C%KcA0^zg&!qSD2paY)WR?SrOOL%CJ6Eh?#$wx0BDG701N= zlf&3s+L@fso_|psV*c;G({B9~xU1>EQp7_dWce*;$6WCmRuc3F_ox2s`A>=wi+S}| zFdX2K8pnX+7k?>Uo}U*luKLA{X+Qgw&hL&`5jUQ)uQnafK4rf<;Jm5F4-uRJRr#aS zzt^<=CCfgv2+HWj~;^d3d;+VKcUMqk@7*4m|Q}}nisX+#GabSO_a{A5KbztFx zo;yTgF&I3HS+ntQRNP!%6)zC}FUJ?f)fGzt*sAJqDAQ5L13F6u=vyOZ*qjYD_NZ2@ z8u-K=EVromU|Z{T{7s(-9@zfj{1G9i7Az*up-};lG=LAl<4ch>Z{O90Z4*}DK0dCtDf-h@h zCgf$p#lO3TxR;NGRSl`fE@KgoU*lk}55ixe@v`4xixbMvHdDU1eVc#cZEk*Q<;-Fo z^1A+4?0ygmSIVaU6}Q=R+fDr!u$LrLKd0e#BcM+tCE{I>B=v_In>N}x6l06{s% zJ1w(RLM4AL`zQj$jKgkPs#S41580xh_FIeisj{a9K~f1Qe>-G5Nco^Ceen_+#i?ZZ zTJ}3>QWYu)d};_(3V=@y;p+YGK)@ke*

PdrZ+BVhNC(a_Ny9Cn-=uc=BN0@`DdN z&gaio=^7y5Kn5Qi4*s0&)zP080B(EZ9@K6{w;_;Uk1>gkOam`xwRmX~yp%3SWN1VrvDEltMTkvy7!cGC&{+1&}wD^^3`K5xGdoVNEh;UdcK)A+)VlZLeSB33|fMbSCx0C)fZJ-l}Y{8Y3 z0&SRX6-y!XgI@||n&>YTGJcjRh6I7f7bv#|2>X2&`)bjqLPGpb7B1;#X^D!`b<&hP zgP&8ssepe5EpWzBivX>zQomCmEz4x(E!HD+0xhweU02Xo@!IM@f0Od;a6t^n zLm}X+RKlusap0l&@>okpCxk}0I_$=R&(X|XAn2=h|j2D*4>-?`2qFH%^bR=S# zm%eCz-72Gw0KHd5foTNsd91O<-kakx5Lm=laoBz)PRHg`&^pj7 zjx&BQ^NnSniT+lWM`7QX{cl)K6&99j9v1WSlb<>xjZI2`@b0f47U-}_fd&G#+oC>} z(EonTGY$5Jrw+2k*Zk1ynxBzg)w;2C~Juy20bjgXdunM~k0Y?|D7-pQ+Lc0_%l9r2t6qto`Pz-?R@p{Z`S}P^&n`r0fiuYDwYatMWRylHo|05V21y32CEbN;)_{o?q zjCYS$XK@nz3nmkRUvpIRGNc>b_unhGGk&*~TQJLGhQJN%Wkd}2Lz(~BF}G~9bRmA% z_G7s+k88UdWqh*($SnM4Cy?vcmz;03=X& zbE$_GFgRn6RNRc^A0mnEAB1U@8m7)9j%!s=hnbi#0~`uCI-KX75jKdQS^1RCtn%IT zti?}#DVM`28(6+RKA84 zgXK4R?0K&*6x?wzkAutz&FJ=jmxht zo2RYX^LDX!I_*slU{H@S^g5ugK1WPBV4a{yQ-viVAi;AmbR?9}Y4Y#d%sousbcW-k zrRy`@Czr8vV+Z>stN)}PlZjENG-&=94zYALg{k)$Cb62KJNu+lKm|W;f7}3G%E0MO z-(k%Dul+bw!~_+4R*j?1z<_Iz%`$1djw-v_C5M%cnnj0}zz0qyGn z4h?Y{YvW3I_;%cOYQ9s*JX6S#bny za2#eSEWGf(zOdMm$4M|PMcl5_C2`S(H=%>>Kr=uak>ix>YR?`-1y!i4(>jWp-^=|i95b7`KoX;V_$I#_2i)FureK@t> z?g0Kjd=&s3|3TDrw&sA=c7SBylNmq48|pD0nmZ1lbj=tL9Z=(`hc9mR#nm9-I3?4m zuxUP&-MI#k7W`Nc4^LO55FU%(i>-i(St;gDO*F8 zS1EG!>?eT0Vw=YJQ;t2)_M2gmblp-RiPwJUl56#ABL9j(yqCs_>vcGN<-ie~Ru5!5 zLkSmK!!y$t<>8!O;IVX%A9X>91u;zE`OU!uX;66djLu=P}M zR_WSB_}@EZ-9OItI?!XI=e@p(Av=+6_j*_^Kxd%6gZ{9%xw;7&4A&F%)HWZ36z7bC zH_b$Y^$P(7VrLS(!qX;GD3FQ*NwKlDkt5G0w!-F@$(FWF+YCf1fb4TxiR_~{Iz;$C zMIoub5OE?DF>6fcj6gKO~SXW_DJ>s@>2x+SW2MdL6e6wtrFpOp1$80we{1 z_h~s23V`_fYCnn#Diq!3@Nwo2#0irOm)mlw0Ep!P-0KYECbsixP^g}^Q8OMZ zL&|__b@tg_Ba}`m0L07JSL~88SPu?J(iE=6c@+xwsj*5ga8Sp9N_p-NFYaN0!}VFQ z&qGnp6_HSSsx?FGxWcTz<+vK8tyopMUaN4UzV^3VPSeK@(l3>kU7VWk;rOq+*JW0{ z9Zpz1@AXX#v2={`btrFhocsXXT;FhfofH7HakMKy*4+OLC9Lf(zoR2$al?ra$4YV4 zddBJyE|es>qD!0D@b@hf?d)DOTDv;)Z?NC*wcP=tQJkQpwqNQfQf1_fKC6S|P3W8V zt?I_!ufa`$JU6R-3A3t4(J}BnqZuw~nm6^v9Y>3`(572t%^}xi?!8iNK~9A~*PN;Z$V&j6adKIJ z%Bzj!z}_o2rG#04fq-d#zudxEo%Xp)Fh*(SQGU~gP#7N}1Wik(B=tU9$TzMYQd=hl zgr5HuieiQXoXC5TXH!SZW3d83R{XRyM_S_&fD>l@v(=IN!mE4?pCizk`ErN#RCmQv zSC6k70@17@1HrD`Lh^D42$ulBZ^u|9KDs1LoLC1M*xkf?m$djo-|0DV|C%)hnWQKL zdd;P8USG$@eU0>2x;sDtU_ZZc`_*y@fPHXr*A?$yM}PA*tAw-!06`kl`5qqsHUHO{ z-dknnI$CCx68^xm(qYC5P5v7Idx{p*-3nS*Foe}YG&Ufn_<0F{H4!D9_|W{nKXQ9W z@B)0mI3Vx<7O4t51_9gLB>*{?OH1+ChL|0(S`Z|a1R{%8I^G(}p!`6Ut_A_+xLWOi zmvo@F&u9IWL`m-k`>s1N>uwguH*P69yhknQUkP!Zl9Thiaa9j=R zAMa@i+&q$z0Olz36UqRnTrgg7Z9NRwSG4; zhJ2ik?*CgNg5|QkjS5{6DM&abgK3sA{uIG_22)K&rK2$vkq{x8JJoO1I(~|RSMU5J z5U{BOCuo0{yfE_~sl;bV=Sg zCLRJ)DnwWf;_p3;5&(z-?OE5priB)%+3sxe^nATNH3dL;`~T@dr#G4o4jPkYeKMIG z7xmc@?7#_lpk^r3=l3wDI$Q<<5;;x&HFry<|2X>LI6QJg=mfU+MCu3TnPt9KJ@Z%) zaF~LR2{T7$)XeAE-sI_+bKK*7hMcCO2mBS+{J3~#;xRul6U<aYG2uV5r=oaizfX3g9~8YvOeZve*-cvazfv7O>BCf(#tB6XFbM%#;V3$`fLH<;pR~N?%ggdPPQ+M2 z+2?s1*$7R_VG)nSGHs9@AEk*=O9-^|s_uGlkU?_p`6}!l1Z-E)!*NMV0LLW>zzDw~ z9hLxS4J*FrFfamn{&Tj>RY?VbQpz-)WtslS{ zv(|scx{#O~S{|a+lXD0-5ITT4plR}-JpU&j06P9t+vz|b113*7mR7^v9DG}sad2{m zBb-cepkt#D_Ba;|t^?S@2$q@MhpK2EM-!)2e$|sidwpJ}qV`=uIvBc#D%_IavCmlE znGx7phr~3O^(yOU9cQ$IFgNOOPeEabX|U$Ot{L`S3PhSDFDgmyQ?zK_vXK>dG(pZv z2A6^uv-N?(Nk)hJeB*pnbMkwf9ipvkr={1^>OW`bwkDr7)9H1UlpDf1fsw2 zBhwA~fuE`0sG#n!G^C5qi0CVTW#}3v{@1ZoNjy(|0n=Xko8@TnZFj#J;e61UdCh(2 z^pQLi=*Js}2NwH~eJc8!%MB>MGt4RgRK`X<7H|IXw?FtDZxygAVN-nYLfrF%wo+JP z*?#=#7%&~DG31?b1boU(U|TybYMJGj87;05NV`kN@_nAE!fryqu}WVwJrt0v#ADpm z{9hh9?NXzXTt%fJS%_3vk)Toit^)!LKTjv<@&9?ddy0F%FVV_=O~+E;uRQ*LA`}Q_ z3*x4?@e|fu9^imWM_e-Hhve0IO8iJ7cz>DwL?x7xhX5I{7dcJnMIr$l9d$Z#= zJ^oJ^!IB4nPgRP$y%+Wj1}$Nl+b}g@G#hoz{)r7dP8+!{6Bp^-Ui+YTMd|G?!6aL5 znFlQZ^jqMTLPW~nAlj+CU*RC&&%qETLq@~x8D%MSOvI}~QD+~OG2xX9C4kNDlR}r86S{SjA<7 ze%oY=jutVN0dVYjuWxLiaCI7M2K|X;SkfTdHZLV}Y8SvqTTK(a^}zAaaZwy|%#<%u zAv=a%)o0QzlnFe#*_GeeR@wN1iPMfpu{#Y;0O_7Zzs(5E`D-jfR04=K;*_`}=7bX8 znt7rM6$Dm`t{{Jhz38Gd?fnl>)+6Aj!$0`l8teb3J&tw!w<1`p$Ay3l zjON6rc=`L;VXJrsk(1a-*W8-xfGIf{b7w7c*yw>i7qUt}fItX)3`%Y__2Oa_LS6`a zjB0wmc2>$6Fd5+-7W4VsCrv>!@r%A50;At*z+pwy380K8IBBZj-+*xollo0W%Cb!Q zEof5q+=Zh2bTG@F>PCfuxnLh7-30jg>2q54;`^&4Am|}ni=0E8a-Q^9D93e~llz9G z2nvZQ1P&dJy=MEzKc9_?7gtFEAfux)z@-P`&8ornbjD2K{}>@onxjegDW2Yq&xq0) z&nZj@D;ewi*KXC4R?j~s1ne?lAp!zq+#X_Tt&;K!#wXX{c3KHXRbV2Hh1tGB^r{TE z)-YN`eRqi$c>0xNU>AHHK@_#P8Q=&`&OdFJD}wqjW`8_n;lR|O)q?%)+w&3M!s9

?wO?Hr> zk?W2nq|rRI!;Jewh@-h24;>dBAHQSF%)+2MBSF23XZz4~?psz78wIO=HWmVoMOpwk z(V%sjJ-CmPUTK44;~u-!xJB=*mZnYY+u}?WDhR9#0#8Z-Fq|INdT!QQ$JBq8g#YC6 zUn)LBIS||Z{n(hhF4#Yr_ox7OaC64PD1l3+%83cD0KOxrP%<@)Q!t!mB>%qPznQfK z3nc^Z#LX<}yQNwvvlpWL@7|G*WH5{d`3R$JV=OIDQ(JL9-$!<(;Cs^hlMtdHqR_8E zRLU$Xy+0n#Am9Mf63Ppi=7?OA{zNSAQvtw#6`mjjqI-aUS-0g_BtCkqazINGqqA^% z=GkY~5yW?`07wG&!wDS6@cPjgrRh{A^?CdrgwVq z5pN3gu*_u{LMae+;DjgV7d|YcI3ay>%rrP|N{em7t!S_)b@j6>1f(sAKTgeU+XsKl z`HVUrL7!zmTRm4nARzFh6ady(Pmj}@3IH7c)nfL)^7y}o&M+rZQnd2GwaLzQ#Clw2 zJDj0&)+3lL6amS+jgRMt*HDh?6%P!7%vF*)10tj8tdp~9E(dTxlj+UtX!Gq4T*@c? zx!@&jAD|$^X_UdFiDK<(DB}=>T1Y)Z`lZ+SUn-<^|LN&5oUQnvnQLrIrn4)e?@vsw zN`U3Qv`8|t(65qF`qGi+P>(LNZ<^IA*oAPX!|;NoUQ&BRBV@`^#!`P36FSJBU~wcu#*YRp{eOu_d2Y01D~~0!=PDJ}ORnY$%JG z4sQ9)ecDU*za;p(%*Xx>P3BvEFO+WgE%+_p`;@1w_xpdJ@_XxfcQhEUN;s8Fj6{*N z%tU;#{Ue$;a^SJ!V)C2A7zr}U6AkhD7K);{seIUilC8q-K)`-&7gy2f_#{4Py_P1+ zT*j8pO+Xk?!R1Lo3rgJC9Y{QOO=hf~tl@KW>K1{=-~9UTSx(rRwZ{G7)gQkY{kHfm zkl)Slh{It6wQnG@0o(j&x;4+Skru?F2Blq!NrobEXGM znr5F^@6XJC@}({d?~U2W@|p4zcTX&g&z5asu`GX!HU2%Exw+$rG)Ir_DgwN%y!GzisP23c z5XiWp;wUWD7^7S#-(VYz*bb9%Di2c`T{AA_xrCaYwMMmZF}wNpU^Y#+7Yg>%!$$G^s8M`zpyz*G8=(l`yfZH4*tR=>cp^2C zpNtuP1v=`igDag(y~NSmKfW18UMGNY4M|WzQ=#v%gC)@WvNL6d;S{G_lW{Q`4T+;- zJQ(7IxfRa;>S%(EGqhDnj7-mL!mm)5;hc`g#h7wWMvPY+kB8KQbEEMn>NZ;NI=xtK znNHj6&*#H1byiTeuMWn|;;YXwoo8g|ITcxkSPz{KZb@Sn3*cT3@N|wxYZ;cb|M(0? zgJbYw6wb+XZe$St;nnz-cYW-Xv_qs?Or0@tr;(rFH0xh~(TW{LT-LCMH|;|lDRr3E z4TYmbsR~aX0*hD>4rn=X3hs2kp%d^^FZ!C!pRr!$3Yw;4F8AOPb3?&a=I;ea9##k+ zDj4!Q%it237G~cJlSL0CcDYKAGPpxda$K1l=DHN&{U9lo2D{v9ztAI33sgzV~cmuv|gE#_U`G{c1wb8k|gJ-`sKsKq==GS{e!f@)O!4qT>v4 z9DAw?b?RYL8mX8xQyM(OBkPbcNFGWj$y$TRAN^v~>StvLKym?ei&pq|wfgL9dfFvj z?2LxEq*?wme9`YlfBWM1#ed^?2ScR**uh49j4C-m8D00^Wbl%(HPv8}PZj_O7$uYA zeKKASc<~TP6}Alma%>J#iAj^0F}j{LEC6IoOXf3K0HpgKvz9WGhB-gxfhVnp0x1l# zQ6^v142I5oHaGzCB$NbmUzXpKxn8CQPg$ulST`Pl@l4Ew${a1>+>biilBp@T1~fQ3 z;r)4*qI|`z3C$l2tO@{l_<2;}kfRUgB1SoHebYxyM&oe3tdDd=0be?!0JVnzB^^?s z;W@9Dw|?ZM)upEj0dXT)0GKOvOXhXcjSrUc{kXY>j*IpT=e>U99kscx4f@9wWScC&9hoZ3lO&@?mz>a zK$9lOuMANXsvwlF4qCLORo%E^&tjGXz1r*q;>#XLwIO9Z1LWtmY3-yoopsxz{@a<5 zjjO>I}zt#ZGZWuftId6XKKpCPKWm$q><42*_g3c6t<)F1q;oO}0dhBxF`u)ss zmgTm*0qj&Ct7M#qg}mpjW-I}Kv1EB+1kF!9|ITys7yNL>R~uC%I2)-TqH@6)+mP{2 zXEREa_~94b@k&|T5TvpbrI8Le;q{ZXJG?mK6JHETR?dd>3Jc_Z3pD9}csyI^p zCoZ!CT_^^;o`oVO^BJ1PKXWkZQ; zL)HRFGkHba>LZ3o^MQz4g`I#vG-cVI(iiED%h|LBu^-1d=tEX!??k0HrV>HXG4sSB zd@;ize1tQ)3a_P>158Z~?E;Z!?58d*<|XnHiP zqHC=goKO38b|tq^QrLG!PUbuEoO58S!tOvoK17B`fzNy_2k_YQ3z2MMT+D26a1G}a zj(Li!T_bK`lQ6yErC%(!?P1wN3GgH3p0^Ph9!tz-&U;+ubCbm|nGY6I?u+34aICC2 z2!>XERz-l!tqKL-x2_;52|pub%!jvjXpJ<-axTVoeY1McjzgftrHng{S6T&VnyV#% z{xMRYkluN`9)ylQ%d435jgF|bR1)anx&OcR_(U!0jmLRNk$fQ1&`kMF0vvG%a zZ$l{SOGPpr*5asMFHP$=wJtV7z&|ODlOGxC5Dmh$MJe8UZwaZr(gpLGR+g{c)SBN? zlj_vEAfQq^mM4J((xC}so5w0BwGuE!;CfJkNZ0lC>OD^d0=ujLsNun}xqoodEvAEm z-lSKbOeV){y>q}Y*@aE4Lu?sRtFR6T2uNd?;+D*~j8m9VX2yISAFAH*=nz0}(kgKc1_99| zNBh2!;VZ^3$1vAoI~zCEBR3iEUBrJ5?;V2f_)an^6r3HND^lq{p$OnZj;V$ypFa!1 zci2~>Y&qpR+3gAzn93+P&y}!rBkm47Glg33<5l|jSp_eOoRGCL}YGazO z!frz#_i~xD||(CQLdwI_2D6IO39NFDX|}x2l#^ zVKW3|PH#rZ>6`pcf2hen5t~-8l{`?~9)ncb?D_dkNn@rArgLy}Q(RqK78h^MaNsl! z=YREP6F(M4oB{^>y(y)3;FsZiTkb)>A2VkcXXnMBe^X3wq~$#y3tk`5W~_fp3k#W>rMcTR zXF(Lmp7ok@;Y>@T&kq{KX9um~sM{n0eTsKw+|VJdV-wc*un9Wf zn-yOiwgP8+3}Py?bPV|Na#WlTq<09aKhnQ`44!re`^g24`)~sneam&fBi4Ugo-YPC zCS>_s2c?7Lv&~8U=$QA@bblNa7R3dQH?N_;D&Nk@OM|s2Sh09NR{XVISMVQ0v#!sf z!LQGX(O^)tI_;v}=@f^j$3c%V#@M(JN>_m0zq$^ZJ0*Y3IMTno!ihLeP;ew!b3KQR z9nqgMkWHsm0Ht$d8cnMy@j?fWpaFOR9VV4dOZyDzH?q~w4nZJj5RKkzp$zLKK8hht zG%8N_5McJ2vBOduoU(tr+)}5J6Yyo5`uh$w*;ZF&XB7Ykoj#r&Tc<^>@g*(vck~qm z{(?0gHPYEB`5siX85Ed_HhIt^P>6ETmi7!=c=b4 z5dsR1GMk!shrmzg>cgzaAiJh~j4ik`SqxE+SjxM7+SwboY#I$mAvpIhuLDOL2>*MB zLk8AmeAVCW+pFAlTe&Uk%~hYe{CK%#7Hl0Y9^@uvD4l5>A*iPv9csl*=|E4W6T?H8 zPi`tXPoL$c!*N5qd-I%^)YhN zYasYHV4Tc1Od@Q@9KeD;AsO zwidx8$2gIGqV*-n1nK1%n(QtD?h-Mg%~b}8n^W4}+c;H)9fg2RYkSWa(`fZTm+%Py zUD#)*dX?KP@_`TFLMWg>3ejwVz%W~O8PdC!`yH?3G^mOl+toHT z+NW_y9Ky!CAmF6`G%QJc(`Vh1Ov?cU8B^coY?jx&chz8dWPTL>y9oN-QUPGz*-?Ft zs36E;A)8NCX$64~AfQmCU@gLnrebQ0VQ9TZ2;r{H_o9w$U4Q!)!+dDD;2X6fP>7%L z<2#L7;S7=HM_gkGe9@%uh{s`ZkcyECNM6QY#hbKOc{$VI=xW}(B_Uu=J+%T*8Cw_5 zIACa?4Uv{beKxQzQa|aMw0A;7jM@6c)MJJJa41XpBjmO#>yewsr62P^-n!D1DAH3D z_4vPq2G5_WGP?o+`nfbfepx+sw7V%$huZF}g`Zw5m6A`AG^w^8dbBGbtm<)h2<)r^ zV2J0&E*>1GvAsj_3kLWTz=Eeo0-dJ{>xFo#oeDQ3k~{=hXoiqpy)}b|@J*m^GQV5qoDFWx~?TleO5yntWJp&Hk;!UCJFb z;wPuJk$kG{rXb;LqQZrOkqn3Mmj*(-AS%)n z1vCYJ6^tHh2@nneVV0^;LEt_R5YO}Iq=2uniwdPKW7wE==wq~C_JO3#Uby)_A9?>? z-eu3#p)&6fpbe^xMFZHOK!*NKQ~;1_y%0*+XyAqNzTYlB@8$A*RMkVC zvkh~MTgrRPS7CP{Kvz%va!HY#r&RgZLMf&aK%CGT&s3|uv?xe;R0y)WSn+9UwzCR= zPXn&%twKNo<&T6%hQ*+OpJJM!Q}7R_s!Y>e)$guAAch)#GH*6dx2=L!gu=_fjhVS| z_Q_~{CNSb5Of%9h&R##}GL~G9m=)7hG=%M74EbD@HV z)$}2x$bk4)_WsCuPT|9|X{)~^=nK1fubFD)I?!flH1KZY=UcD*%gjr+r^KYlU#ZSh;E^7e;H0kHjT`0zC-iIK!7 z_9mqSuDk6x^Iw|cIveM#Trlh0CjqO%?m$3ZOomkRRcDJeJvHy3w7{ore1$XPd2`(_ zF3-=42J6U2%s{o;?c(UOlVa~+uNd#Ku(rhCdbtPBV=$mH!73ySt~Zt0)Y;U<*;#S^ z>P<0*alSggDsFHHs3pLCnNK-PnNDXO6+X}R!KD|Z8xiU5 zUO+-X>5xXGr9m2&?(XjHP(osnl$Mn466wyp_y64Y^J?Gib^XqqGiPSbcRmwk-0k-? zM$^@{jK+YJEw)PfiQO?4vyZpBtt)zgm2uUPkQ=FmX?zl8MS+PcBQn<$HYU;aS?!P=)vF3N~FugIFtyi^pcxJOA(6fITHfEcS+e1nCiJy(dYeF^8AC91Q$`8B27)c6#1ws4kLMWf z3a;T!AwH_|2|gZT(<%srvq0({qU12|?~PR~T4GI`9QoSjGeckH8)^JO@4owOo#o-@ z!1kh|$o4fMo-GBL&y!_GS7u*g5Vhx}e!({RO`^O}}w6)M+dmXu5lxdZDR3XrJxP@P_J2#5? zD8Fna&qSnlTKqjpoBt>BJDqqKai2KUn((B!y+wsKT~?WDW1QVL@2Ay^fu*@C45#7( zbOb;VxLTjjj?y^M`)PW$2FqA_gF0H&0b<~E_d|NzcAk~-^TQNP6gtu8=`H`XikCzH zIgEv6G53VZBVvVIZ97VgICt+~ zw)d;}pEYgnF7PX_(g_`p>JvPgLs1)OF!Pi=KA0b8+@;-(MwcxScZJ?PK2NOmYljx~ zgxi(ZZhBQqby`wP26OjvBh<_zj;od1KwX$Qbax6_fprKmg(Z`3!Qcq+?G z8I*_Z>uu$7Y1o;9s!W$X-s>J+vxCm3qL}AtOjBLmxuNo>;`bSI?1SFFL_0(YU*`cX z#W78~DWJg`nfJTu$Z#@TUV$i?DBai}(caq8g^PkfSGkv(sI8U0O?zXxVT{Gz(jO(~ zQGPxp)Sw?=S92Y*?i`Z!40G;gpq4HH zu$lz~Gu%vr5)Ga;DXsR26mROo9!po#JMKPp-}?P(ahNgt-|i<-O!|UfyNL02=GT4I zlFE_F9$>__xgIu;A~Q)CGAn>WujO|IEuP<*VZZ~?;NLa5E#^#j4S@Lz|Hj~Um*}$Y zb%U$pI^bFR6F!d2F(0PIZo}LG2jbTI$VW< z8v$Mi3;@3nm4(tw1YU+$y@M#KjsD)cIQ0*~2BeTGVL!{cS{;fa7xuXT`@Db%N(4#u zBlS$#angVN{+Vb1R}Jd*z69{72DWd@*3y$QAp?i2pr--C9WXzmn+vA_nyI$J$NQ@u ztDTI{eF~zAUmTz7cZuGEZu*eB9cT=M|D9tbMxIqiPVLUDC(wI zRNp($ee0v^2Sf({{(a6&<`^lrfe zN(CxP7XzVUe}d3kK~VC^0{`;-Gz77v0Wl$JvDS{)_WLSFe{dfGjP_vS;3)4mw(2e5 zP;!c++m<*cNX>s>zy1l?4*JEPLP8Oz`2F~I^Ff6Lvb5;jl&h;COu9u8?D(NDPHN$b6>@|UB-TYON8bT5^>_pM`Jdx6Hnu(@v)Do zm-3H!8(T{xVC3JI49vjl*R2lv+b5rse946h(2{hdnsp(% z%rhkAL*Hb`-%-AOhKx3|B_1o-63%?G9@q@h>I#G>LLhrnx}ijX+b=_A*N~G`ZJ+vF9{7TN1Wiw9%ue@_FjLe?3kHaLAisoC`b@5oaK2d0_#8v_(Nk&wo z${?*Jtjy^Ai}8w~zzpXQ72@GaqM$p-^WILUx2W5Bcy9)!j ze7CV%bk+O>Vb-TIB&~}sM7Eb+t~jN-ArCX{r)i4}oJGin+3SWb;3Fka>Pr|KYl=sa z!2Ij1I=|`X|I_PU;+C!7RdXA3Pd@Q+v!C##QZtMp0K92WQ1dM)Y zU4FydKE8>1bNj*Va90NEWtGgG!0h(@RY`>DuX(}lH*>vH>LeM;Eb20uIXo%iQe=)n z%zF&uIpbVSCp;t5Q0Dpx-y`cfs}?$@g3LWr$SQv&#@Uuz)zi=|%KR6LOv~*1^c15e z&7Q{Vx>~cNii$x0WU>u^#XGYwz-q$b*R`Lg`v$OyJ2r##VGpKTyo>);u+gg$Mb zxv~|6#cMYpUUHL|%l0IGuYGKGUG@j*c-&IHR=}mp?vrxEIFk&YRhESs}qIu6VXVw_**C0Y2ik5;s_l?xfVDEpy3K; z*3C$+wj+DF(@jMqD|{d=^y1qhrH-i`1RDD`w|X|WWeEvd80@~g+50y4Wh!%Kz@Kc? zi01Lxq{A*2#a$ZZ^!*tQg?ZO8v0zgYaSZBt+L9n&E-z2^@QNt^yP=DwXi%V3XKWBl*7A76r}&2K57WwB(A2YRrpqRQIk(aT z`BR$4Yt1tMd(?Qf@Bdi1Kj)`?L^w1Y?S8iPve%9`ulAP%D@ zCR4McHM=gY%*!rhZFA~NH#Xk0tkBn+gfE5cL!It8`6M8%a#{$a-gt@RJr3p__gur9 z0k^yN^A_~+cgHv*Z%M^% zRb)u@(I(H%h-Ov-4vdx?8ykfI^FOS)A9XvOSmklP*1ctBN8dgr|1geG%@10Vuwt%| zisBM~sv=F@tqkuJ6Sa&s@o~%m9_A0EKeIk6#gXK7syIDn=3y`Xh)?Zrov5nxc1Hsi z3O2kOoM&PBj`|u$ewk}k99`zle$bkW@r-Blq;|%i_iq4Yh1f@LqpJ5DhsW4lpDbZs zPA*(jSDZ)&A+uk%;s4robA+U3@93EM({mc%!2Gv^f5ijTv92k5Oo{$z{QWWR?1zzp zs43s82_wZOv%)he-+5dwecNn$lG6q5qe7KnsJ=kQqQ2pWs`@NQyl}*~O0F+-fLGXW zTpZDJ?8x1is(L?z7woOW*2O`dMEPs$26ex_YNGl8|0~Na9TJ);YvRcd_FtX?x&f_Q z(scRqHZ2uAV?TdR)!?2^IfF*uWvC@$q@3|}-H+V#->f||oa>>s;;d(-)kWG`6m+;_ zYuL1v=>l(;JLUbCV)g@D!ncol|DBRZ`-5pNopLm;@E<3ffTw>m?dl=QwGDvyyZs<+ z)6VrD+SmcXsP4IIjh5K*6 zCJLZMWC(O#**=}GFgybYZaQyoJ1?eQnXl6+hS*wFuIUsA1SYh-{q?n{B~$@0MV4YG z=reuNzJ=riUOqq!JSWK8*E>hLFHwn8??*2)X`&DnG@?k^H-FR#;*yH`x~{j!h(sPH zg6e9Ss^Gnh$2KJp@NW`GSKc3pL{AgoVag1GtgVd(Ib+1le$2Y~)ap{tBM!yp40Sk+ zx+0qELWqxuC(b&$Fpr6XQGb{1x~BUK1X<10*!=OH?P|Xx2LHLn{_$>K{!h!@5dw!W zUfz3blMMCpi*}ro@PO68HkqsSCY+r{HH2Sk{l!=G^!ICgi(`U0SLi_+U`8ib~SfdL_sV_O(|a3FCIPnMha7`;FPIu}?{Z)1atS1dJrWV`EN zGx?QD@^wQO)1nE_jQOI1UfMGI`Et;o<#_|~B(v?H3TT}}Ty^{C7<6_q-FrN@D5HEO zvnHEhSfnLYfy1kMoAY!+%;Rvw?%&q^*!e?-J52bDxI||Kh0q4SluT=()8Cw?fhvm> z-62TAjvRR^Gq$J+4fp&-WO!0=cTA3t)Fkvvh(M`9Fy-q)X_YuO!WL%P_4TfS+iY(e|g zGQnf@!y5*TKqli>h6q;`H0VEHc}C($8E@Xm0R&#=yxg75vZM$MmxeHGFf8GN@b{Qz z%yg$t=J!V5#<|}xK`T7i#qV6kYd&>9>`9VvK0l^yV)s}1>HK^JV69F=Jlg4+c_Il| zy^HooAzzRgbhL-^4c8dhLZ^_Dm1K%+J*a9xm zy9>9SiUWEaOBc%a-Ijh8s}`6@OD*eS0s?=eYV_!DbdP^g!6GQ?6-T>F-!Pq%yp9Or zb(u8O>G#!vr`*`!thX$`89BGQt}9|r0p`y0&}Kh78CO4%SQc`P(5uuAe; zrv*qj82W8{erk)=95ULODw`>WoC6vj9FvZ1!#4@nanj)KyY1;3Hq#iiOcNQjk&|mFT!)^tqxiwBy1}9u7^(Lm2E( z@`ox=Hm_P=c<(m9;Bf&1-->F@v)&zP4ye^ZhLfHK3YX|+d?y8o%z`bkiq_o^DZpe% z{lv=n&^wRC$5v4OSaopIEBs$r=JS8GC%Y)DTJCB`^M{f-kj8=G0ca2L=}1*#Z}ZxR zaL+k{bS&-4e%2nUxZTfME_K>k1d5E_vc`wWuvp}XJd6&hI4a7HQbEf!@g?TyHj^@b zZ7*@f_j37mnrb658#m$w;TEN&&3E)5>819hlrrp`=S|Q#oe~P zys`wuh$LWai6DgO!2a0#-SOqdIhyaoYwRgFe~N%4ad5_jJdmsRTrtd_iRR}9&E92C;j|W2gV7d+oa`5-&uYUK zNbh8>gWI-H$dy_Z&D!1S)uSx~KdY^VndNbNIPn2iwBLEdiJrEE?03C^Ige+iQ$njY zP%-U|pm?8pUVx*x>T~uCm&$TuJufDVJ?KH3NT>RPemt`G2|K=tx(DVmb{OSU#iB#> z)e|g~*g=!#zqBj%agN!-KRe{r-`J&(9^%J%ihJHX-eYs5jD}nlTv0~uZ-&P6>s9N_ z=+@Is_gdLcbNG5S{erx+WfLnXFWm%Y@j~D+)TO2ZF@7>7Fms-PYJdBnrLKm)HVAFu zml6}xE8&AoGa`EKEfu34`eVaQ3dzfGK8AJq!uXd7Er@p2S&81X zT3?qo62p0X9dm4jXz;IYaJ&{o7DT720pVAOHX1MM92x_&g)}d%k9qZ=_|*ysn4Gn< zpA$)?YG{L`Co%#Z`| z)$F7q$zXrdcT~rQ&7xmM_*s{U@+)*~DAC%L;WK9M)rlVU+0TnN3huwR19-`l-ha$F z7+#}XH@`<`4Z29P-#pe8g#TQ=8vMDH$1azxfrW{`=8 z1UFnagkOcQSJR_c^SZuKmxZ2=8MHj%m?z>M-?1JjeCQz9B6z5!w-!EJlWkk2Zzo=4 zO43Nk^ai%J`uUc* zLi|)yj;)~7$AqegV(rL;Vt*&+Rv8Fyr;8_AzX!n??Qc=jLGNL2ekIwW!T&J79}=o1 z=tfqmi0P0NTH*q=9Z47r+Y>L+dC`t|BJMOemY$>2DewAkze;RoKR0#6YoT1;^r38> z4Gbs@g~1}=N-TvDl(q~Y|MT6*C zIv9#5@M`iMqf~XwD9GCm80R*>690Knekr>lzvo~2f9Gb+Rct|Oa$=gjhn{@F&e@bk9UeK!m6Ksugf;Is@7&kqZH$@_t37@V{yv&w-p|6W>HV#vk1e{T zvr8ufo^dfJVX8cU?(`$hryrjsIXMxJhE5TMJv->!hZ9#W`_6eQUCp=7yOLkU=&o)N zmM4apTZ*-yF$xu0Xgn>iQVH98t#7YK-p2{(U4I?5QL}0KxLZnMcQx(BPjMvQY1OoN zt)5j$MlqsY@kMibhV@-MElQtD*U9ab2?Ue@f}p$w3f`~r#W(ziZNC8dHEHCY<_?T6kd!6+YnYR~m+bLN?xi zvXV|>v(o%;Ij{k|K$VbBA0z~(B%yVf-O^=OZq9DUUFSN%FDwyQn7PF0039?i9sm?M zJGhjX^*RP%{}@J>Kbh%H4LGjTpUTXFiqv(ofcjeCO1DPqVs3j&xMW__FHPbomuNrt z7;)_x91OhoeyjFRW}ytV{hvv{|H;3h?x(db>p;r4OZLw*&fTIz` zv?7Npne)ls9)?2qYcd?a!z)F*M!p?O#pU>Y6P!{Sw$}5olB!#A=O!D_*Zb?rx>ZrW zZ=1}&3Nu2l`*?jaRyE(SriBvS>-;p4s68)4d?6~Wcgv^apO$)WQX5SULeZ4qtlxj!5neu1F6a-1>#DMKv% z>jlYPtTzTqh*HKFPk*dBRzUdu-;EtlQT6hZ(#AU4?o3x+!}pk3y+KdA>rsOVC)NNs z+Ux;}Oz>}>_xYd_z0UDQdWH5I(ME(GD!{&coLig$K4ZI#Fn-2I1rT&eAJ~=LCi_W# zCuO76X-lu~{Vk6gL*_GTj-Yfa-*X&}N&Fd|)!fG{TVRzS+OLp?NJj^ZZ8&~046OM$ zo(ySy4-R&+*$Fs@|M>~Y$Imy}355D?p0+#AJns+rm1*p@d!LM`AH|8jhb@s@AFD{1m!eu6}c(r18y59UHv=o%rR$OveWq3 z0jJmNwokDSJ0h_KHBni?g z0@{wVPZ6|XitCawx+@)||7~nAd|XN3ab&T3EkFHW@vCvdIBwA5Zsqtv1CzG8BkN>o z>W?t`60^%IFY#1sbJdmKDiGs@OFAgCZ0b`?|b$NSt1=25o(6Np$l>n7f;BL(8;DF6Dbi@-Anmmy0OX z4kyLfUX~4@Uf90MJnrt-%3Q+R9EbtLhz}stSSh{Zr!i*h`Hh-!TSa;umzI_~vTT-u z_8!xDM$Vg&et(t+-=G zc!7>jmc{-*i3bdhcf@=9yGt$(i@uWRpZgguw|((OR97>ARyQK*A-z;jX>lU#zZhiP zKEI{JAd}emW)G(Q7t)L5gw5%TnPN^&ThynEXHqHqBd&gJXE}6dd;<0 zaau3K#j!VqAB?qm9$voe)^+?8qS&=CNSiPF(Ev^%>TKQ^W?^2d_;^NR6wGQvljpXV z$?~PkVr_6Y7jruO$B2PO(1*6_lp-~9$R(JINek4ocxg2~Jb0yse(U|7mbWM>FNTQL z@y@dQ;z@;hCc!@_E5{5DCef38#7Qc8YQICAp5=RX=xj!lIv&9qT|;$?l-vgt%j|E5 zI>hK4&A>wE4kpVq)C3UsJPDa@51IA$q>GOiVLF|p6?x@v?_c}Gd~ zzD17mr9c<;D`vmpzq7|$?=abx7h(ekD!xu3-JQXL1FF#nXY~~kP;RPKjkR@?cxRppCwuG(7Qy%K?y8LkC9^C^V z6(ORZ_u5Hp+Ko;%98-=RGxfo-kZyPsj9Yx}FryYHyx3q6JMxlOTnr3KYz|k)-wlwW z1NB;vUtEh`zKpSK_(FT?adGa(Wp$WVa!LuKqZo3CLc5~)T-T2w8A9_D5|0i+PWPB z=B<2KmHhk+@S`m!(Vs_TcW;=#!Ce>#HRv!%<&eZ#YR%Ror7$zagv&o|2o5aLFE&>r zTKR-t#Le1^6qh)e*wtE(Ihgrs0@1jBFTi;_F(~tI8h>#^t8TupzdCZlHCF`pmHS-= z5P!SLmyhGlR#`9LKrNR-jCsVW5r|wx9sn?!+++ZUg>z}g69+A9WFhQ{Xi58h6IsXG z^J7*vE3(Pod5*GCOgvUi<_sY_{rHI8x{YXmoO)RVopw6@RgQOAO;2=cTl0b_JwP;? ztzCt!N$ssLhXx=TN*=$KNir0=Ob&uaz+YaZuzy|Eqd$I@qQibA)K2UQD#6qReNGS3 zb54VFh%x)H%+P;u#@iWIpGWHk=cF086m*_F@+{K4;*cZ=gkisCe1FKU|BCXX2guPl26%kx=soqV%lM)EWbD!PyG|xDU0M_MIf=Ol z_I?qNZuR$9y`9zGeQdi)a1QTMcJBj>-ll_Xx+_g2$e#~nEH>@7Y!c7Hv@OGg9 zg8P$B%4!N*v)jmpe?Q*WWL*37XS9+^{> z2P`VtT;>l^OCx<}Ri!%&gsD3xKYm%}-TDeH{@fzMJrX58m{7z3wuZ2GX(y$MX$jjB zh1I&XbnF{RLzi_uxpQy~bC;~&U1NC)Z!=-vOcg30hC&%B-C*DDt%^zedhQB;kp3G( zzYnu>Sp13#e}Rv1a`#k!#ewO;|EzVnW&}M^9MSy8K-dtYUHI}03}ih_FZ7DMeo`~CDAW7ZSj2S_}z-}u)D_7TQo zOi-~*hSxhm#y(F`ro8?Qh;rfE2z!K{ zrsD+y^U@-V>|b0wNyZ!+3Yv37|5m3F1bZa*i`W}rnu}A+3tD_`r*iP=p_j+1r916< zOWLo~>A`;I*zZV*ti&0;YOq)7N)Rk^_`&|UBsT0zE5Yh*Oy5g;0!4{%0@q-w7qO2r zX4gv7`?RR-!1pRm$ACm!| z#b(zzqyDdECCF)1NMYc28dlNM0wMxeI==Na5#-gqMST3E(6$-nx(9iWfS|~VTnH6D zrzJ#^{#tLHN2Uv3C_sUDQ{^aV4`D{C^d9?!&v$46t=nbZPXwY{$tDI42?K8UD>Isz zc0nrN_x?g4Wzekrn9aB>H^DfhyY#^Z7KS|=Kkmuvackg&TQfzvr25;T`0 z$2dseH&Wstfb?BgLK}|jWh^QK?;>3OPlA3Vx+ws_$R;ZN&)^KHACgI+^33{T0R;%d znV(-ul=?1BkSfMJ;1kTzFyLGuDU&hnX zqPI?}p8>i4M0?0M6uZb3fBYN4@FRlGPYDfGGJlP$59mL4CDqB9RrC_S8hl@<<`hgv zD?s>$p41k=bzoS?wZ)%709*p2I3-E7nA13c#$hlOu1wqgMS6mk0F;ylEWDM0mLUN< zRM2ae%_Kg~Pn-ct2`vwsw*q{8OZ6!bi2L8>w#sQ0E}?cU>Z++3Gevl?nCGFk-+;dD zj5Y z1|`yj#+kg{eY9Lu1GA<0E~zm5y)s;_n#q};kZHhcSH}iy{KZQM*bbIj_f;wCt2~qu zsq4T2D`45aVpGiM#qqP<&pb#}LL|fuc(U%z+487>HNoN3_FCJOo`vs2RCn_Np{Lqy z?n=plErDX{;VchNPH+%@#lLdn)^cU=Ewvoxf&&h z#PKh`UyPI*kO7GX`3ooXchBez)p_ zJNJ_c;#pGq#Jja>VxU{nxI^gi+VOs5H|^GB*;f%2zTzMB!#cp{?N4}ITUKD}@H2$O zKyZ%}SwP_^hHnZZ_klB(6CH@~^k=b26{KpNz$39$9gx(0A_Z-BOd9`J^ca_ZQdjud z-q4gmZBIPj+@OXnX7R4tTXIqK(idyX3^`4ztlKFq z!x~jR3O>#w$h4gW^p?Vj@s>T(KX1d|placdzxt_Ngg*fTC)b3lA^4K+-vji971fNF zpICh98)-XZ*u}2{Waef$*7BYlP>Z$Fal8v*dW#}U(~{qaF6iR?`&?duox}K&G+x z9_<8Kn-sKZI&)>|bosb~qJ4?tI*F1V-#L`h%SZW+D2ZnkynOS#36CRst~f+Mh&25U z?adxL%w-b=w?2i_yrjuT#i=Z26jO((*Q|NNYM#(Y=to&6+G~yGk2-=UX{P-rxUM8l za2_~s#y=RJ4_$ndye1b^#)H`cjwB3T$*3uiakKq)(F{D7*xfA(^1J9SGI9w%_wPIR ze}fywuz11C0Q$_e8HK-0gHJn^?!;oZrx7EYEZpP5!vNwyh7b_<*zox0UdE?gPMVuu zeziD&U<`m8$3zy^<`GxT2>MP2+-!|R;Mzd(dp-hD z?I+37Hv;|({4MV#4hHga(@P~hJ>+}j`DsH;2~lD4SG4a|$xVjyRHOiuEPpgVBoYg^ z==gcN0>lG%J4CVs2dfp&C7+k+pw>OTLKMJl_0o6YnEz_;yuHE&-Z;1{`$BEIpFy5<+35_zrAy+48@gd*8Hk}Iw8iWum|KvYtyMZ$rrdq zNLDd^7N&E=2ePv#l=dalL*xVIoju|dEEDGpg7xOy!9b#r9NZGxqj0^^1ML7aoiFUM zgKdurO8CUce*pqFWp^re=T?xbRf~yY9i_j~LQ1{lywgHG(yR^MxA{GImA8JCBYc|D z*o#ga)@k28A${Elt5I0`4A}RX_L;WxyB<_?iOi$K2p3233(E;<(rQ*>D6K{MDRrjw z3ayV=1=2!6FS!FLIIOG?1Yiw(U3HO&tI68%4yOM-0J)ZzSj4Jqjalr1e@@VBYj zJHW`WOZ^JBx3XzpFHRb*rpU{v4^qnq6)*(!92IU3ZU#B23Hr_rPtZV^l`q(n-4!3u zHx@gYZYAJLvDScggiH9Ze6&6G7Y)&3{?tIw<9WZ3X1URthtgypwdhU>K7`Av@UdC8 zIt-BRGx$x4)=`K=-}VB58vHvOZO*fIiJ8kzWS2qWi<*c2$1}~qzx{v ze9`aUCJwjF2kg0A^p_BFjCK||3q2ALp};mH%iTD;22)&&*AB1KhX?rVdHZ7g&4L;j zhFvA}Y0wpm(mGDoT;7;7tMd)G`7QnK+aRv?mbv#QuLMJobT&8r>7)w*i~!LvXw-8E z#}v8sws!|{E(HpTIQMi=Fw95up42(Lku)n(vb>=$OH0fv9y*+6gau-W(^1g({f)E#ByxzH0H9wEO62+W$%d8;h; zkNfWg3sJJDF@KcsgQ))W+!5^0m>%6$!Y`qdZuC2s4=rwukReu_^1HFzbe%OuaQI6e z0{Q2h5u4^u;sDM4)g*((v|6F$)SZW5v^+nO?zXJZe#jT8^nAfl>y$ZcDPkP}lXTr> zIhZD|%a-~*_#Y6|XI^1g;4>xrM2^#_BnI8xNKFvvEH4ECl2g&`l!0wnH16znguIW( z<|QQr`F8n_XoZ*gaSTFD9(}tnw=CJ?#XzHO-{Sh82QAm3s|JK zJ7+5aKQOWx|E+f`b(7A=TIVz*1DL_1NUE_<$6D5w@6TcSMVd6nIT93nL3O8$&o54| z4okafabvfZ)e=+U|!N zd=kw^bLwzry4Fz9h0njmq~R-vWc!grjEaJBM6S0dWMI(f#~?=I=zIJ}Wv{JW zuYE}oasf#XF{@mBY*jN@dSU!49G;0}$R91RromjRR?@(A8EE$D055cGp^pfKcr5+s zM@!T0-C?Px+ju2FPNSbI{=gIp?KdL%Cl504`zjQz_VHlL7!rKP%;&Pbjvc;bp+^p+ zM)x>ppGUUT$(v2ld1znVcWIoWU+g;M9q}{nUF`B_p}aGsT3?GDY_R!-eV=Uo9zvVT z=zjmM?8ohkU@C2pZuZ4T-C(CPySg8@R~xGf{&s#}y`~m8@u z6M|UT{MjE{y{Yt5&`zH<;J@s~{3ZLu19*)mQ=ThVua4e1y+1&^9g$ruzTiiIbX%oJpqA;i}>7bqNrXF(3f}s zT3=W4HS|Znjem&7rSKJ0Q^5P7_<6h2-fR0HK~n(TPLGU3&2B8utU>j)cH9#tzK}M0 zzFBu+E&jUoe)m|#-m=Xid5IUd(Eb&1RHtXtE#~%ve!gRITjf4hMC{=JCS9d@J?5@n z?l%GvDi{KfsMSr7_M~MR0`dR5Ec;Ndm~DhI&WKl(b06?TYEzsh*L7FOr? z&7H|ZO$=qbz?3wER_|JOr$pz6ZEg8Zihnkz@)vMM{c!*h8l zS1P)=A9i`utq|6!$R_qS;wA;#a6va*b{EMUIVr@|x3@@dn^=i|~ zji%z!c5$}h>Yqd{){ydkJloD!+Riw}0UiGYsHQ$-b^l=mZdytF#VK>)3*o>=?8!YE z*ZY0SU+P3T7w8Con#WxkrsE!_2^4-lnix+$~NqGuQ=iQQ&kCXy8N%p)5%)u~m&EPU7go>%(+h>Q+8>~_H2LWyttTf@zXDWvN zpM~T>AkW4%yI9IW_xKEO=LuSE1JOhK+AZex>K?XG(?9S zUyf$V>5JF8y)^d=F3%kQSW`DygH_)k!)WBFp6#*rXT**vUJIfT6UK`2uiF|7RH%oZ zj+4S7ZS9BcrfN0`>W}BPpN=tsE%pF#i@$bn0M#?-@3jl%N>?a3H4r8~wBk3NAxTg3 zy08Y1fih(l?FN1q>gqMZaPH5UK1Qd!hmSq@dYah z2x>!OzvhjD;kK$7ys0U|d9VIily&)sJ8s1AVG}97VrO>wC{vDOW`+S|ig7x2X{88+ zUOi&94BJSruR>|}8GPJ9hHTRXd|+34aK2o?AnAuNh2`^I7oD{Xs33Q~+!`{3c!Hze zl{if8`dJv(-Cdf3ROu+_!eT}jpdrOEH(MhPEBd4m)wmAEYUNLti1_JnQ27+61NB6{ zSvg6?h#!DMiATkg$J`W&VP0ld@1rK!{22i}*LB^JlADdI3##ugfGL7MFw=*- zn}-6V56^A4goGia)BryH9Z94;;58J0fs4VM9_sPDsJq`CNl3SYRiZi5e0%WqkSR!* z7g!HXOK3&DL5Z{bx7U>JxO^o91~9TWG7a@T`kyKW%rQ0cq*6!IV*_Sh)_uK{F)mo} z+q%z@*(l&iUWM9TJ9&P+@L1bNM7-32eaUS~HHjro(BB)rL1pUOd=hx>Wy6XAL%#`(x^9p0k^p!Z4D&!c!1&0X{78ff zyWQs&eA}9@gdB8v;3ox0OgfN|Hu=_6nZ1yq)u(w$UuHnaD{}q!*9|`D-G>$+%J1?I zf<4pxr%FE>Qa`6ioqdV0rBq#ipT}e3NRzmhdJ{jVpu;vFJ~t5Q*lqMG0G0B>Ly-_j zHysoL^9`&y-#rEA7ya?)n_9BW*6T;`yi?&ldINfljjh%u*Ok}HiKBXWoICoS<&a*p zN(QbbAfi~A>z6!J7w>tWxlLEbE=z|oIqtJLVQobYka(f^<7=$=T&&Fpb%6WZ4g3p7 z8s4?F*ry>+#OUnT?m#)@nSW19i}_28@=3513DcFXceDGrGVB#?`UFi>C1QXIHXNyBV4|z^8S;y{0 z2_kO|JTzd{KF4#_DH-f$L>_(1CR1a-O#1E)H^(lX7N z9n-A=4hAY{_>m`rPI8ONH}`uN%i_F5mPVx(Ji>KM2)tiB)&g+thj`5TBzN8?X-)Z z(c`=1D@JR8({GA*&*J2x1xnHPGRf#GWUKG0Ri{ip!jk_0wQqJ9aR4_vO0`{KRC@0U zj{aaSvD|$?@%Z^YEWQ9%OSJeD`xaT@T?PRQ<2{N2MbN6uItnwIdG52LDg2m7cjnnu z(51eU-P1E>OW51y#P3#s(;qq;bB&j82uNlox`{aBV-JfJ@R2bm0@D$rgSZT-t= zb43r|ei@$@*$Uxj9&2-yf!L=|^@$dV$rT#u@)xCi=lI0=sKc=xBBWaq8V|qR|FJi< z{Xj=C6PPXT*c~=0pohiLvjK6}TY>OHr?Wp*dlr6@wDIV$hcl`*Z>%4#M{~O$P#l}3 zOt)4P0kXI80K&568~cQmV0a4h4PrnARKNL`V6>7H9XKJRefNHwdDMt(V96_BO`=PZ zJW47!m4dHK$xzlpD`R2qX6P#e$o?hpe3wI`=pScdFLT?Ox+|jW@NHhBafXeN&D{@M z$fOcS^rqihs+?Y$v6pi0ekwDaqb<^!g2TAPk#T=9T~23cVRL4gr+?d8=HNb)Z|Lf+ zQK5TTyIJ-Z(5?T|59j(W*tIqzaITWc>V+BshXw$?O0u{~Zn3i+Jp`CoU_xMwZgjvd@K z-;FgTA+;EUKT*zER~~s)R?AVKJBaPWr01rdQ|2SjIld$rqswy96Y5vujSdZX!*taD zRRR9>@emtgn7vM=dT8Ao_CQx4akJ+5;AuH~QHYH1`JqrLfn=X!qG{#*mG)K<_Iz?c zGmAU~9?o(oBPh2vcPi%@H9|}KYqClvq+e6MIT9tG92j{{1LW9aHb8Fo*PvCY{L3P( z%m_fOIygwcv9ek1C5+Fd%aOyLEJZ2u4mWg38?)0`Vo0!=O-1wg{72<2jR|%gIv}Rl z&-RQK>78qlefEl9S$x$VC<__dRj4^}Z3*M4g#lbe1bYJ$hAn=3xoc?Y4}eo^hu?RH zo|a#MpW_Nh8G|}EqDYMMvVAX(1T~@o6j+_DYN&7;|Hr|eY3ru7l{Ewg;ql-Q%s4X-{H_Snvch5GQwny?P{;6bZ7qPN1~@_WIpb6lril|L`kTaz<6{%%z^5pFZc>lOu|bk(i> zc%e40wV=|C{Mj!r*t7c2&rN9E7oMq^sS3NiSmn%0hqR)BL3q2uReUZ1vhP=qhRKX6 zug!>vtV=&PUZmz%zD2`ctR&2}W~GCy@o#hC6c3w#+aS_=+LFFAapuk?Ny8Qm6V6nc#2J+WuU+C52hT2<$*UId*;t7>K#z_qQ%`HR8zt z(Xlwu&Rt2@y&T?s06Of>MWNT>=EFoR;&yAgX}2Xy-`lhZ9fm0J7XjtqejK^pD+eM_ zUf&s`OyhS%-5gSWnx|IV%P#hrm(Ek-P+u4Ldp(UG9U5D;Zg*Vz>N-8Q#@qBgPZ@Go zxj~`T&Y2NHmW^T|5k6Oa<&A7^Zg8(gaMNtc>?2=j^XBCvQv84GrlqI(d_zWFU;A54 z7D1*~GiGQvwDR+An&cZ7GxUWDKjY9UBZ^-g=xRF33Q@#$oE9N@} z0sDX)Xiiep{8GFQCO`kS{-B$Bxev*fHFGsjdD8j@>wjh?n z?sgF$L^7g7A80%2H)5b~0gQ7$zO>XO!`*Cxx5TZKP@yX>=P$xbHuC_kOo{Ud89Xkx zTSsEyw-P_Gy1&tvvj0-)77vvco@=F}`)&rpI-<{4FELB-5sWrR3<9x`NIF0b7aXd^ zE+`+hcyN(!?>~qYWVV$w?)wO$1G_S|mZ@5DY%XoFGh!F}g`eb--XlJS0MbFAv)6}8 z6*DqbYt5fmEyK!*^D-sYuz?cl2?I|NH6!SOWd9V$9J?$>P1hc&X?4f?^)73oj>5Cb z^r%dYu3#?4XevSTNtOWW;Z8e)L2Yd=q0JR2Vf)$jbxqcHQ_DOwfSyivBH59#m=9GHkiLuoK0NGh+z1GDoDHf z`3+u^e8>G*;-Nk{i)=D5LaAN2u`$UWC=fsAdT~ap6i0$`eSPs-H%j$|{EYQ_$kZ-3 zfbX+@G2h7!h-Hh9{^FUS+Iqgz#Aw#xaJYtsPi)XHKg359%i*PBXd_p`gL-IpH9tja zZ&Cf(^($IkWqN^|hGPK-tT3S~{Z3+gFOj_xXO;YyTGw$!lQ?bie{95V72nXjH2Cx` zTa3Isos^Dd>H@@bzm*J27a-s(eu@vjx`WYITYxJ~nZkWcq6k59G*@+kxaj_ukgZcn zbGvnbL!`g~qaYak%>CXKi!l4EBWFEBQ$Qc?c}CJZlu9HZDy1N*=5*CQasQ_dwO%&R z59G{~O;+IiLPEJ2D=CldYZQ%BvnNWoe+~p)RUS4P-z<5ja&}zS8r_C(-=Z{r)dTS{FZ=btop{I^}7!lB#p!Oj``gMKWKsn zTHFETwgSoXq#e=%2bt0UC3>a7Pz={e`adV@6pH(v04ifO-?kw~qzUr5+}tQSx|Htp2=1R3A3s6jY#RScDo|RF9P{brlT_&JWam!-gcR&Gfl4!np?YOz zE;{4>AH<&=MtW>BW6rF6+%;d~9_zwWhb_j1ffW6pr3SO(+%XPXo<2AHR&9o7iQK<; zY?;aF3Srh*{_SUp5ZEguT2OYx5ih4+|JxQBy&SP^Zc4aDD#~RF!&HU~#J}rzCoR~~ z+%+F)Y%acVfV;r6RFFg}lS{X0{c`0n{WMW03F{kr-j$JKTBN?qjuiEJ$oOe5W!ym! z8jE*7Y>K23MevJQcSxN9-uE$?=-S;6tuiqiY%bSoj)L;E1f5JpXxLIXsy7e!Hx^O+ zRqh`OCFs6PD0i-hs{$>tuWVsj%4jvOz-GY)alIeel*>y{J<@`@pM$_p`U73OvtOwW zXi_}}f0jU|Czgty52J?y52*xa#5IAo0MmIm<^e^+Nu!qpeO-oq!OwgtKRNJgC)rO2 zZfFKCEBLapdUtIn;?HK@OFUwwow4;bDxwspfOojs;%9lg&B<|`&i6LyWdh;1p}Oi=|3UCf3hb*Ri&Ka zKXDo5;YE}7V#dJfOB@r7F;IE z`5y&;x-k9!>&vq@#*e)yku>J0^tX8Y~E0r;oU zedoN0qt3_t1;9luTZC9kaB;2+q3otz;krC5ZCjkX-Ms$y>9q0k*FN$qq&p0x8Ty<=Z?FKTPj8UY4ffk?a7{f|J8 z8@Vv|xAD{{a-y5TocEUkg~08->Xm;CuAR&Gv6sW&OSiMfx-^eN`>7x0%?pwd)w+y8 zr9|sN@VKy4<d&r0>KDWW5pCD$qJB@^-*c&Z! zX6H`@TdF_LxGzbn1UR>p++oQh@qS!#=JNhu%2_JX(F7vYg*=Rg02{Am*{<| zCyLDngT(~F&u#+G;!7=ETnM=W=;SQ{(2spGOD|@C}|9dXd5WTlp<-9?k0(5dWl(mKl;-D6z@(> zOQ_J>!8a_9>3lfE9lN_?0-HzP|M?%$zzzOauJ&)$9p`X|2$7tEFU8*-q9XMW_jE^5 zI=L=aVdvnCI<<2kkSPaUHh_^se8tU>0>&V#8=rRQENZXBv~lR&xzsLxiryD{9I=u~ z8_m!}GyaR#j%Xc1&y{87XF#gix&F(9KHbZksVjl7;@LIIN=%H7YdX&mHbDOX1d`f@(Pe*t$~kh*%LY4&JmfwEWT*MTY{=^`*ss-qPkx=0g5l~x zjus4XYh}rwPnVnf4Hg}<#evR-QyUpzHv{@z`9B9?ZFtR&s$Ji*(FQ1lmxN(+KL zWyB~v?0!E%!!bC!D!9fG!U1axIn5vM8Pw1<adTNh+7MLMeQC&Cuxvlkk{>P zavyc4&Xaey6WcHW%HB=6XP!)CWc|$*j$MXxL_BbbCN|cV_1g0ni4P~Tc4DHy>_Bu?j!e6T;4yn$x9^z*&Bz$j z3DRWK78VkXm1$iJPgUAsI;*>fpxw)r_6Jy+4(xG>Xuswgm4m}2u@v7Nc6T`M*;|xZ zKy_T!nL%D(E{+!F5+7pHoIZD=dx?A*M?A=e zN5j)wOSg1I)vQ3F!z$CCxLUQuOx3jH`5Y+M2^*+4(Y}}K(iwlOLaGo>v#W}FX)C$+ zsvHbBaBE#&M92#eg93Em2lZps;9JZ6rY;IWv+dB=&%O}}AT!=Up2`tk2W|K+$g>J^YKpXaJc^UaHf(aIuan`rNt; z2R+5Kkic*DgjUvcJg{WgDpm`>UEtYI%$#sreOvoPA|yvB1(4+86QxMDk=1|KVXuBSa?z-6v{?DWpHNO@4OvyUJ~G(vWX_O0VDf)pO_Z0q z#gPte_!p)7S1a$fv8a+iPXFHE4Ppq}ZM#lr@ntrwA>f03BEdZ~N`Uw(#qks6r-S1t zu8=$07H8fgBne4s2Kdogr7P&o{^E~eDO!riA6vuF4P%0U_y2zu>aTi+q$L3`Q)Of-<^65hM?K=I({>c360mY{gYj^^}3~z9_XaGml5hn>|$#E_tg*fk%$wgdK zg)h%<_uSn+Fb)(yQWM|)W+54I+wT${DmL#TB~5d;?~-?M5#s@nUiT6D85qj)4K}{3 zVBUI*0MBV)upz(eBS!!4{&yL5Rcp|1D4xsKcn&jXE}Jco!K3uZSh;fg&moDggQc=s znY$}hRsKY$f|@tpuPG5__Qv1`oVYJR|<_^Esfk z`Y*LBmuxo_?Hb<0a%;kADNJgsY@e>~@)XG&<^9!?MuyN57_v-}#K;NW#{jhKGw}kr zGltgdYk5HU?+&nlb{ADY7J1%Rten2`gd)xMelnSlEhGfgD2F(V`tIZbyPdoho3!G| zx(BY_y$5`~%(b_n?bMRMiUFh5#rb8W@=vA)Jup4Y+?hvCa0Mu@2D@xe2!F zp={Jw-5uL6(!yZwLiUq%saGDjMMZHG4DJyOl0=Nyf{K^knMIInGpFM@ag<~3sRp=L zw`)f3d-Ck*pX7|fd?wCF(cj+cxK`##uAhDw`N5I3)yR-K8O;-O;iO^Yc`72-vt1Ia zE)5Rr7>we?e=OJU^F7LjJmCJ?rv#3Vy!njB-ndZ9i+EX@06~6@D_e73lx>i4+qJr@S!@Lb*A%)DbM0DcEEQ%+8zj4sy*AI z3pKgxhe)6y74iz?9`#9{_dF}p8U3{j5&~W|z8Sh}VEbTvyzD&JYqKmEIQQ~k+vlB; zyFC{AU7tsg3i(ZFOaxnExn!~1%c^)w#;Q{aJ%J`v7vA|}|1zlCg&t7K9*2}93A~Yu zCA4hF+}^VSrH-T*RE-Ned^w*_bRv&yagj~_Zj3F{J@_S6B%SJvEua9)mBPdgWwZ16 z$}5Ub2M>zAsh2!FuY+U5V1HJZPfZEvq`EPEkU`l04E0O_UswB;3lW&b_>7?1d}Ziw zNF=vf@!jPeEeM8pmy=SC;xIa#@umD4w7MO}LWyi=Lwvns_tZ~oHuMxCcGxam_BAw6 z-0|0x4<>c;lq#_ea^R~*{z6n0h@Z5^=&g@$*pj7kk`1a&PTA~|!!df)Y;%|K%BM)c z><(A6LzH}$c<)gTBp7o4d7M6S%+vPspMI?0;47Y5`}mbQ=}@X2C0X23kR+7{;R z8W}U(JYaw;bEGU33H9ok=_NAMldIf(*R&LS31E$^iEEHAsHEcBf-uLnNUO&JB zP=O8kOxVx>oKBxmFe)UF*Mz-!)vjOk4>=$MG+HT3YuliO9e5lY!U2 zklF*pkY2hqOpqTfsftHmUdyP?vy|$ST#+v=C*{4jVQ=y|0tWt*slw~%_HT1SV$PZ( z&G4g%HjjAO19GKKD!t1asp@=A2yf$%^vw(L+27>@c%yKwb$b0KX#mlD@=f;_)aiM> zQF06)C*wep;u1j#{%nmqFJ%D{{8ZGKo*}XwE761wM0B;K?*`ZoKknFtfyV5Dxl{y* zee#U%9JP*3SV>^G;IA?{@*SI|g0Ng|Z0s!&6vgIX8DL^g)Sbq%vj=`p!^n(Vm;aZL zGZmmC@vOPLbQs_-1p>(+YPxi`J9W?xXY?s`Oio0agS{j~_??YSf8XJ70ykG!2>6q0s|I%v zZBWH85I;(?NOEpd48tSD#@tg)ibJ$eu%Hr7_2T@mb1$p&Lo^m}4`7@m3H>Pmx#aYN zZSf(k>muaH75@LuX}(J|ILU?-IlUHZ4!_DS*L9$%|CT-46dafO$~sN($a?vR<+NA2e`7@ekdes3fV2`iZeHOw;oG`4$ZE6oZ!;oJDQEr}cXMU!!abdWgzW z{oqP*3FT34Wxl$-=79NN6bhX6WmW5v5l7I!f%`RA2Biy<&hL5Ue)-K;SIxW5NkpGIw&>fIE zW+-CHMCDBWr~Dgw>3>6)64ad{fHRna?lIKaT;9|PEDTvcUDus zz&_LLIJwLfN*QX$(LSw3Ug`h-xcZ>6w_WcTn(z9m?iZc^Y>IZk{_u&u(7^;S&0G?z zw#xT5x$JxhaW8JrY!ei`L|?UhG9{BMQv9)LF3x#p?KULhx$YL({=A^4!1>$NZJP6# zWjhoR3#}N3rqh^e|Gt(N(Veoiq7@&INze51V-|5vMF1fiWjAPk?`@`D-voe;u2H`r{QzC-VAi-dFF zgP}rvxwDm11!kAv1llUjYckZdpMn-lACp96MV2vj=p@II1>pz&<|GH3r|I_PX*Ji{ zsf-(6=~K95vu7)_2WqzD_VmX!#ZV&H2!>WD+-_xs;{ABw7Gy}DnI9Kd=zS=SN6P)x z`K>fR8D{SCQyLf4M1Q-^iiKx;jY=sxz1`@mR?`Ar7_5*?Z9C{I|2Gj9A;>0L=2y5y zAG`7+@s6}TuQTfK9&7j2+;jNcMGpAwq`et^^s;lJymTWf=n^nXHg+>JrI|^DKbGZ% z?av2TUsHZO9~fn(C+&WyznU;!=jntRDhB{Z*Qr!}6tb(D_5um6O)K>9L@8U}DmjK@ zpAS`Z2&|ueZ-mk$AlyobYZz8y@u5^BF05VZfQZ-f%Jz$`fjHJ#|9g*^@OSLH>MkIq*w-`ok*0bNy^JnJjIf{ zy@4z^CH1F{La-_;fgqh^gPDdF@d7F-v;nbtoy;Z_}gC$@sR z59#UtX^Gn#If(RaLy9|j{9SOta92@(G&(k}-KxI?bEqTKpwNfsTeB*NFA=)zZs$OD z_SYS!@0k-_w&FVi|Iv7B;44zmKws)giJt-<6s>p>IPVlD^w9*tl4B^;{QSu(w4-#~ z;Krb@hDdup3jCZ6Pl^~ect}{^`F|IAW-?!n^uI41)>F~eFu8dOC}X~Ej{YDh5MlgR zOQk%Gy61F*xsrWlaC!A{l_uEKDR{nh1+N1IN1(tlJ3^@>sOfRd?b5I_cZqGEjc#a| z0(Ysprn3JG8E@xB*(-0t)^6o?^;fE9n2Xloe=Mgh2s~*HuT%b_Oy!?cq^J2g;YWEw zvCn?SiK)9c^WbdVEBdbaf@yIZRS6lU+a#rjxEZ)LKqiY*f)XvyQ{h>CD52AH%WsG_q-(us z!Y6U$nFb;0MsR=Aj8d+@U!)5TDHpDBFvt)+y&cuO+;+{$A@oLLkES#HMgboi>9ru> zx-Ho=?M0XmhBE8%p27GS+2ZJ1=uct5Dcwj4!V++_{6ub7dwVrH{6hhw6vzGza{n9c zL=+Y*O|Fj|>Ys%fW0b3HI1(KvjsJR2+$HBK@V6riJV<#!(NX=s`pdVIm<+VPi5j_I z#x_Wy51^rA3>(6V3w2LdPtJjD;<&CbpbZ9??k$Q{V&zls8Y}cblA9>C4O+u`OO^j= z*NWNE%PsPpsbfeO$B&50q*#2xT>QGOP*zp~X{NYWX4K2!kGH)l;G%@~dd>b(dC@Dh zYxCyB^Q(ou_~R_$C7iA2$aejg6E-Z*^45@Qo(o8O(2z{1?J

QVdPOP4Qw$!LiJ`c1}GdW_#rsAylGs|l;>u*4gFFc&+*Uvdc~C-Ee9*aqDfgkdDuo%Rs$rT8QadKmr3maqK+8(22f=~lIOC7v-9 z4RfQxM2t9~TKjiR?pUltWcUCayX&ue-X!>7s7IRn{at!o2*Sl8UF6^}iN8+CIgoCSyS@p~_Tb!Zt>aWpsR@heN6wVr{OvJcL?r&&g(4rv@BsAA z3c;PHHV61_;gd79A&3hBG~lJYK2DC(lPQsG?CF~@r#4+<=5ZO%P1$43;`a=y=~KaG z92+8?Y=H-U3HU~*uHAx9RTHe+gG}>qUeiuY%vsIZEP-Bab>dIl*kfG$0&5{dR`a6I8q(zx8C7QSQ zWoTU^d#0%?E8Ay~?N@rYSOTHKv!0wm*?gv%#YWr4Ek(|c%BqtXroaT2 zkD{wnksV1){dCA41xftxFg>4N(ah=`mra%26={zi9%!;J6#ikMuVy45fNvpAX#QP2N|Wj`-jK0*l7gac}6@F*8e?aXyiWX+;bu9&y#Smx14WK z_P7(7k)#LXjt>{5TE=lBJ;8Nw*06_r#JgUwa?w95p`zh}U{Q`GcBg&)07W=%Lf}j` zIYvOs)t(M3b^;4}-H)vQyjTL5Ku5)#>RsAQxc?GQ_h*q@zE7XEm`RIVSl9eyLJHFIbAWTKGoq@JZ8&SU^*Dt;hWLA3E zW@d_M`8#B$5iE0|9920lWmEJN9giVfVo_H4xa|)Mx(w?w2B64Npt|y81IM@ySTYvqSYD$hvmOMOw;7WWChY#1Xe)2%j^F^hYxdax0__8u6rEJN`Rv#0qG}`2Ux`A%>L*n znj*lYjr1)lE0Q5tq|#o$v~Y&t=0W;`_@UCmPE0-+|TTMx*4`FOs&V`Y{qq z0o`B4=OF2Notq7*wY+3vL2m6QLovU)6QyUgdJ+s%&@7t$#0EhUM?~#))3^y}OuayEoNd|z?etU$On<#V z(3;Dh$8LLV=;$L0%=9m<=L&h={T)-?;hlYDd1>(g1%DCeyUF3L_1 z0t!Yi)5_ayuIe2GBIdQP`2kP04+l^GG{&nDigpe z(W3WM*S{carIAP9J z$zc0td1c5-;?0lfymK`wOT!#7t(M23sRfl1dbO_aw6+E*##CXUe04T7aX*)pY>*_a zZDPY(WH$vXza*_tyS?2bo(lLTYv?O#c&`3MbCu1#aH=dxuM$XT+7+_uRM1jZa&EMd zbP&{S00IdQyyW-V!Bt}2-fkuMDNTN9VSXAf8AJw|rt`F2g)jXoiXH^KR?dLm{VYSU zDN@E?4L4Cfl2QVFXP!|v%gf~*OOZ*D9vp`CFI#9T)iLb(@9lOUJf_M(*^H#GVnzuj z4q#?%r!S@PI6C1Q;5U=hNA$T84`X&6DCLZpkg^N2InypS_j*#ovnikmBOl72RcbesHs=uiq zYeauDe0r6yrrlxV5t>oA89J?>#~jgo5PizkNrXTNTTe7c{Qu8Vx=&tPV)#=vuI}H<@4AJT;m2qHYkVS3js;**AezRt z3KBkCaWvY(# z=mML;Zb#drP|}R^11rn|**tU$`B(e6mfm-X=H$fZDCER5`PPf$(ILNU7FnP(_I2`_ z*V$z*TcHILuG3_-*<--79yG%0{tPb{x)s#!ee1~7f^{G3(l>E5 zYuQoA4&2;%6);ulH_H#WC%NGo-kQD#JoX={a+9IWa-RWd+_2ZLLTVF=yV@p-* zEb8z7hIS63lMriZvpjX~&ypaEp|W3lcKI%%aEE-L={;i!4XgBgj^JHOqg1O!T&g-s0a+C3(G1iR_^|QrU}b`~q$-8nMzgdtflD$bmH!6ecf+SO8=BL1hZ)(^3>ys0=wS ze7RTN!Ldh3E<=ZD>B@2#-mj2ycUeEM_J?hhJ#uVj8#?LuUal)I$aAtz)!Fw6F z@^ha0*7M31h$LLEbNs*Tf*D~J&d3X z`e*2qChzQw%5jYK!mNK@HtYJ_DrP1vy+d&&M1tNl9dSbcMGR@|X6WwKOW*>oC~%Qj zpO4`5WX!Lbv6~UYl&Sg~f^GRriX9c!r^)l=o+U$*Q1KK&Se-CwmdV}qqJ$YGppnM$ zyxE`KpU{g@NJg1VzTqQ>5}njvjbt?shxiM86nv3yb>-MKNZn*8sG{qfGXrJ4gw_!Z zvF-*W7v6IV+jeeGhY$CUSU>{1uLKS40M&`MoNN**w=e!q=BCbf4AmEuRnFIA?VDQZ zu3{&;@Zp1=?Y@aD-|thb1zUThTW$5PBy{8Fyf^5b>IoM90ov)A>&^sYFA1UpHNHC(ef#?hk!2n`4p}!9#?gPZ+@Z*p4_LCT869;C*Tb|w)ZaCyN zy*d1GL0?rzW8E_sgHY`5);kSo6`kterW-6XZHcWT>$WfV5Qz4QKo=eYKqe?xL7Z7q zMjza-P?ZAAiDDGi-%Dh{f8C^hdEF;QvG42n!TROQ{S_Cjq;z`N#e!+Sf<>4N#@g4p zHnoj752@`JDY|%%e@540hU#Wpv5!p`c#n}oH`y~VDErk7_oJw+_**u|&aO1G$Fw_P zGrXvwr1sO70Q;e>2zp=5-aedv(bL^YzDn(YbTl|s*pps=M_kf`h4&g~3W7NJbsr^O z)!Z3ST5r^IO{|wFbxK^Km07u+1HSoJzSjQsPxAr&`c_MSoJW^%+Kskww?+3sJKh+Q ziz-*lRG4vdp2NrwC=E*g4rn!@@Gb^yR_woh=3A4ooM;9gZ6OQa?V>SYtF=x4MtiSu z<-z0gIFmdvlr`6)RD;p}rh3Zlio(Y?S}TIOO=&V<^SoN` zWPiZjD%nK`ppVtU?%#byduPCc{=g5MX=)G0kLt;65gEHhZj+LM{J)BRnca4MA0E^< zjba*lh}i9Ks@>dF_tY69-KC+z_OH~ZTzcP}6p#r?Hc&tf(d)z6VsW_G{p@DYWHgPI zV26rJ@;9137J{0I`^E62^qKs3&CmG{3;2I5DACv_dG8ITNX(m4MvFKu-fI&*$q|E0-xwwJ~+ze%OB>K zEnrpqzBVovdRUnbqN z&A_3P%hfc*3dBN+zEgdtBB1}c6t_YWgxv;!NKeP>iuQlf<0RD(G00 zonEhQmR%g-ZM=yEr@Q6Pzd1k>tUmJe-7PP=H&Zh_F39nhUYP-4h;FB)+GPzb-tV#2 zka)X!zsFLuU{^uezgGWn2I( z62H)nWnCl6u*Dg`?IDr9`+$dBEZ||U&a9;IjX1>9r*mikQ{7gtBw;Je-j`vd=(_P- zPYhZ)wj1eQ5Wvyt^FHA=(5ahrga4m_2WR~4?MC_!SDh=}NR+GBvh983xDmL*f9rh8 zAwr)NNr+3rVWU?r33vVT5zSrZP&?LYfcbXC?k{(?&P-J@(w$>EV>afVy|Th)KI4&u zGav6LSkg_*%vg6P-L|>tT|Rv#1!d59<>*D` z+RbCaH1Qrr6*+ClW?#dp)7X{LZbZE1xsukUhXIuRLUCy%8UlZ^yroZ;8y+&HSH@#D1&cxX~yu3Y9Q z#+!eML{-TAzSq+Qv}!SJyoyOVr$X9(3x^hj-`wFqar;9;7~sZ>jSAVr+4k` z!kk1caX)|RA=Ht@HXV`Ja3Nf*2&t7p*Hm^N{`{c`aQH(<8jKYs1Lppcz`%)Ri)F@y zVSAV}>|__QEuz~eUUegDaGKs~#7pdR)hsd8UIp<`ke0*CS$U7y zKI+fN_+G!C0hLSrn-Tb_pH4;4Jh@JLsZ6yHKK^8Sf8>j+sUGs=pOJEBJS}XjQ$%+( z(dWn9+q<%#rvTi;I%=(>S}ie3JAcFmw+KV)@$p?PL3eQ-y!|S--c6_ZSR7Tj>t-SD zGCw9S{qKcM2%ov2qXnej_+Q4oOzN65jQ@<~`{Nev>n+qh4CzHh4h!oydrMFHug z1ZdzR(Yx(u+6y*N6k{ypNmT0rF7Z+YhZ!{isocH+j@+oxkF)w{lRqiF{rDcr9H?P3 zGn~i8J`~1#Mvih{?0-OyqmpRHGR#UWzwT_ViHJ2N=MCO2*9sCtvy(E$&S4J{<3M|D z_0tb=?|p&*@l_;fVc&`qx>$p;uxE-^$(p<~h&P64?Ll@k;sv|;v$PELtQa;?nVWYn z?|gKlO7+laqEt$!$qw4QNEMk+Jx2Wt^pT@^QWPm zEn9I0m!e9XQ;c_=s`ht|R`aLA`nQt1@mJI=LJqbYwrOhH6u4K$fdmzgwJkN?*G~09&7l(CBc~F73)7TNeg@nP$+NqgyW0 z=~0%+t_f6VxCWZ`lcNz)%*!qq#JaF^_JCh=n5Vu@9-rQC$$m;@?x+VJ=TWLF zV0vU(1Z#4iNt@3r2sIHvi44jyZl#+hFGD4_7$+>>ee(s7=OgR1iRp#KyeO@*V%!E> z{2$rK;~Bx)!~kFoX@v_16y~;+lFO{MZ=-!Sll`3rSNV4qjxHS#NXR$dZ9>yj=|8Cs zHT@$#(EbI?F7#EKc#_IPb*1V{JEX(k?tyyUsFL^gGxzroKlfa6?wwSUCUkEHY0@qo zC-Nqv@3ya&A^A~4`{H=yq(YMn;R2TlyUnycy#w>US?*ELZ$4w4I($R4Mv;o%FWN)} zP&>3uob|eMHj|Cy9bTQonl?z^zTr|!*7hFsu8iSjx5j&b9I&Z}3=ZouK_GAZv(oNrb1v3N47*+SK!T>^0AH_`+RMf?g=0r=SR#c6Xp8igu%)j<0#%=HAwlYOS{sg%F5`HM&&=g^j-Y&4-XOFw8m z=tP!ffBW~UM?r9$r_~5W5Mo9d&w>L;&!Zy?BT`Q}N&kT=YDcz|8givX^QEdUBi_fZ z&DtZwB~FMdu+gKdHqfgdD5EGwb0BopqwGyjJ34gcd6?+SV=!Y5cu?oYVzq@@-IB@5 zle)hi{oV#ergNtuy8|cA(d#=IX71dWXcuPJ9^*mtxK~=|3@tRtQTuP0J`VeRj7g<` zyb0aOTCm|$VTdTd4oSA_nXvL|8PH@*-RO-FXk=cH#oXFs@D@Hnr)9r4`q$0f!-jm9 zp7k@3IFm!)p4bWZrFYer|8RwJ_OBi~a$xf4h$17tsr(9|7mSI%TArXfyhS*`=(s(s z9jlY@QF^9`BP9_c>L>Qw=`8TJZTCf)V8xXLeDQ6TOBV85eBpM-AmDSOiyPg4r%L;D ziyjwOrA&%mH`&#VZ20DoisMHHjs6Z>a<5j#`CmwNH79Uq^Bl~rhX^~;6#hfE!(+MQ z{1L)9rq@Z2R>;88O3zA_t`Lc%FJr2q@aC0pZ!a)x1c^S!9fi2S48+HT%5%`1s`jHe zX4pH4PQ_EkvgSh;j!^-O1PPosDvu2Cg;1jRVi1U({}-^>o)K{X3(aXF1Y{lBg*I-XSI1D_bpKU$X z8c21i)ClXz7hJanAoCl;NEHFvlc9IWq*-dsV-35mJupvB$K}&~>ca-AG1Q+i24j|- zevZpS*n`^q1xD_i%_K|2rrMZv%ZB%74LC_7caL~vnY(ZUwt=9KYM)-gMz@Lfmp<6&f64*S zKMqdq$aClk_f?EU4dpKn=11GC$BoNoMCLe}p+~f+96kkfz|ur0jobLp=i$FlBytqH z4FD*=PX}hp3m;hOa`$a^`v*Ck*Y1b_;D`9F9gizjV`p_cC3x|k(V;v!V0jm6zJLLf zsfe-asqT~6)2nghMAUieIiF~Zm;8&H!x_;a9xa>;Q9?ckN$+C8e1uV?`jmj6;Eb*H z`4fRKi8`v2K=3b0N#Czn&6qkG4+!V1tgu!QKy;HcL*8t}wIaSE>~>Z_WOA#cIWKXIMqjt6rl^kXjVhZbus|u1-gow z6_3l-qX0DSlPZlX_pI&)@=}+qCJFb>!9<>VJ6n-B`^JBp%MTZoY?

ezG)WNrBMw zwhbZwklhlWkry+bG(wbW6dv|pN<}~AOmBDY23>~#R{=xMt{fkJ3)zF0d67)Iez^&q zR3W_PIcad1)?6(ES7 zkGJ4MS{tA4P!3DmV=NH~i9KXRYmW9WytB?_ ztZXpgwDTkCXLoo5Ns-TZbBiQEi{xz@S(flWh6ESB?N;2W(^p7>!GL>?QS=Y$7@bl+ zSYp4A0n@SDob&t~ZSzJM0f)h>J*+D*aRmR`gzDeM16W^tJaB5nLkjLMxu(+#G5)5L zv*uA=bvadm6b9p`&N0!I+vg|G9tx4;BMT-&4UcDRJP$Mv)WryWIIF){W_V$HTkIVQ zXOzV(IG0;(Jp`;W@zn}X466D)PB66FrlX%B9)X)xZM$yOqZ_*x0dfSrH>MXZTa?Vn zAv{d6;G?8MUbs4vK4*p%`8TQ1y*H)uKK4tS5^G+JO6z&=u%E-5t;iCc~2F z?q6>9{J!{la_*ukaw22NBGp*KGlsQsw_FJr1H3QiI7ae>pTNR!rzIReF%7(B`U;<;DNF>S>ng^D8`2OVmKfZmvLw$>fE_RxsRUHk9o7`{bRlQn^LRcWq z?(y&oNnW){$%PwN@_dUartw0wDqZg!!Zr$`6j;-RK4oLEVBr()TPimx$Z*Fq%V8O> zzrzIpBcE1eFf0zVngyEhnYqZ{4yyu#O>qH-2$wCn`{W0;47h~UgWG5%gCF|-qUqb1 z$vM|Nv(6~U=7AW}B8F+N7TGvhkcW{4b<>P?;PMBx07AjQ+O;)R%uZ;_OGa z4Y5JL-z#|P%$6hH8C9qpvi_CE z5yzr*j#B$C!1wf;cGL#>5FICZcU#k>#=>$+(a}g7d(CRw*)wj01(WEJ+S^xYKuD6` zmY2%;H#RwALH+B?uI9J<_}a{mcJ>YrRR>%EFnp8Yyd?nElTL#s zM?0$bjo)tazH*Ab&72%~;cV2r{|x)odeG6_c4`_@_r?ym$ z`lFIbZ~Uz1_A2c6m##cwEQ5Zv-|dSVMZ`a7%+4VkO+fTv#89>o3yH^vqeMXCmBM@2c|U=jEBQf^A!89k2hB0W>Ta+9t~m6qA2QQZtgzIOED82k09 zNw<#(dPq8(kgCB?JWjPDNa@u5x;b;(WM32xsT#_;FS}+NZ&5}qm|VIxq(tk*P_a*a z=m3K-ZqL$$h)uV=!v#J_a1IJRhM|EuQcIp^W~@BI2kTb6r{Cj+sZu`byp5cqBpGkw zUU|P(H8Vw*)yY}L?!RVm1cvTgc;|dp@^as+;U}QY+VlCV#W*VL=&N0&djBOvM)Yyl zw2?Sy?Ovx&@%1loGYc(Yv|@GGA&PG&JsfJFxnoKL^oOW(7Zn(?;a=vhD>;01TfAhH67q0TrccKyZ-fg z7_k*ucHNn!GJPL=4a^yYt-jZCr%&mQf!D|Id zDn}^abi9jyW=(GpycA&u6go8WX2IidBd1n<*ZwJpH-#3IyBJM*nv2|MGD`)n+psf&whO9QqJ>i z5%*}Me9R2HU3+cNzI?&wgRR)Pq#}13pt9u@Q6fOxxep^-F!;03YU?!;hQt08wG>FS zzG|7tDr=?!KA)?bS<|x=>k7qA&@dO6!{TkHDgIl9;1;-3>Tbn8C zHM@Z?ZF$-0r+xo^RgNA_qpCUe7poyO2R|+cSn)hioga_w4B^cXJ?1CPtoLd%9=~K!2u3WYv%_zH zFl8k?FlIZ_)_4H)yJNaWAA?EbT!~{NQRAjY6~hSwSX6RscRO(AMy&`!=p7`6<>$To z)g`Um=`ukQTwMqywk|1Dh$o|apbHL!OV>+BIVG-)XGCB#uNQk9sB!e>gLKKR56;OQ z34t|1KwMWCC_-=IVt4>ZU_O<;=kTX4&12gMfz?mC zEQd%|YP##H7&|4+o+OGmMaHQ!dX+eB(uF>em4Vzw@TQ4*Z4*;wyg3YFNfU)IF z34x75-~vKxa94hGcg=O@7kBL!?vrOIcfLX6f7^Nh$V>F68-1sh zn_t$4e78*Nh5%pNz%ZZ&)N7933YfdcV9_ztn90Y-zx>UABj3)nH3op0Ty@Y)2fe`w z5IhEqYIwcQqSIe2n0SYEfKB}_(*uEk1k26H>275tLBXMn;3;@1q?pga47;Af3-OF1 zx!)+~40^sNR8Y;75V&0kfRo~9%nwAMQ2BI(z`6>C#{${c&?*IK4PPR?xUL>!KPbkq zQnznd524$0cRUF{K?pb_7pCD+0o^EbGNl{s2refDym$Pav-z8afcPYyh*PG(DSlL# z^&k<`YTxo4`_ir?n`(PCoQG!+FQQFj3Q<@$O$bQwBaGhT<j zsmS5Vr>`($o=oRVd@|bU7N@N-0E~%@NC3ApVD+4?c!=bB2oKn!zE2qs0CM=l7Q-@| zwq^)ObYs*sgf07-qXSavE^{^wn%0FV6Syyx1O`e(mbdt&XY<~7-%y1(wr zd>s(@AmTq@Mu#N^z?Mb)zwBkV#sF|~K6-IDpPUXyo&Q32J6y~iig~`3Y8l9~=_o46R?m%z#5jqfYae z>9@~iw0!w7KGr{3-Ln%IS(D=HYxU%_n5 z&!1R6ufMZ{AJ0obVit(RL zC$WNtJVw6biW>bN?w6G{DA!bj+tvDA=Us_In^gQeoo@5vqha(7`;RWqeD7;(80c3%pWOKqpv^L_^FKPzGD=7V#7vX&_FM5eh#lbCP42A0Yc>|p8JMl=j5=OFba6BeEtna zW^?P__g%-iW<#%eS@aqju$o7ER@$_zU-KDx*(wA&FF*tX{{0s|`f~;;p9B1JgsZd3 z>Ed5bx+4d*w>E8!0pRf4!|BP_CufhsdNLT!I(Sla7t@|Ss)J_$eR6A|m<`(q1QZ?| zJ(xJxpIoA*0_VXKBT$Fo%jLf)D`4-n zg0|;5LK$+lm0eCDcmZE&MeCl4ttAwiGj_a1%TT#-G)H#gcUTj+cNuEg_K!iJ?T7N7 z=#=yiyU3Uym*?zJAiz_2+SD$qau5dV=*XpSNdvak3&8h*6hsUF1M3c;K@k%G{^|br z=z=fl1@&bqepb2!j_CSLx9y|4Sf8VGwIGXU+CMWT1U3f&5X}F|1IX>?dH`rNJ_8>$ z6w8}WFbcSejH`p_u_A7s8motEJT9ubFY>ltaXFr0pSg=v`|9fRZDiel7j3$~&%^^j z&-KOgS(k|a^Um+)gT-us&FG{%I8(3P=2zbu1Avhczxno?>6@SZ@uWG`ITg$BaQI~2 znVocrs0e`RrmGme&0x*8tqTH$xNy)V{QucHMi_$vhP@ItFdnve=2Ov z?d!;DA=H_NL4ch#-h$siXfFk|U3}qe^Q}MtR0(|YVfGR6$&o>o0hLz=)c2UQK4BB@ zJbzbG^nG5+>2fyQ_fx{E)91nfu%?$0F2cuT?xwNMP5#_v(=gCyYfmdD=Wjw_0}v3$ zE^hYiijU%?d_$x0IS0;g_pb&3y<&oAbBDr9+Iw-m_ssOrAn;+t|KVh?c(eC*zHJfz zFZ(iEV*sFW3yc7>=AT>zprrr&v%e#HVxJWO8uAiZF;@nt&{==35TGRBvPY4(k0|?# z`@Fk6Tm`3R{q|0cZZ7Op!d$>66BUPMY z-W@RfKVx%#w5tiI1bDA1=#;t8am$-FJFia&1O!$`1ZUu+QLl9`jA+7)GAM9Vcf;y2 za8uAZ;;2WAuKoK^ck$i$7`olM--z4pt7%(DC~NyZHwLjC`By*B+7ReRMU&1zdqnUz&rx3x@)E>mVw3MojCATa~| z{~f>n_((Ar1n}@MT?g)SzkVn8y7oA4%G9x>KqNX(Zc0q6~ z&0T;1WmBiE`Z1c!Q=8d-eG<*kC#~tPU1A0c0JMUo-rM;55SIcV_c!5RpB-?+r-0xC za|jtOEhN$DbomMErt!Vhq=q8F^mwH3k39~Mr%SFX2xETVzZzA-u z>f4=`o4Ar+VHI%N^Ytvo7SGb( zpQe{T3z7KWLnfZ3#!oL?>+yk)Lm)^4A>PUg49q}7I6UdMHN;z)E+9}?e}51Jxu1o$ zyz!&qkj^v=$(fw#rMV>X;(GV+0W$_>RhQxb?EL(w%BcKXS^NCQl=%;@RvLqmSx`F# zJdTsJueOty&Q$w1r{0e;^|4{C^q?G2PW2atsGW?zBLsc~0{SDt^L4CIx)4ec_2TV$ zoC+B;XOmYMJBG|@1`mcq8u7krFn&4x`^nB)9AGY2e}0rc&mktqfV`CVS%j;8>$W@( zc0980##z(9zCJGh3KRg=Zi2J*n(i6H2!X3WKp#5DjNn-iV{MGMr z{x)gOc>fgPLRT)@n~Se`_CKN=XtVvU?WT{u&AoVCV>?-;xS}juWT?KMZ{oil&TFTi z6k=<=`S!8f6}qm@FW#O^-ej!q2QL}hV;CXuLlC&t3V_4at^Kv5jM$a+U#BN;#{7RH zR3JM6EPu1KeERq4^0o;a6#zffx);oc*P3VWW+sgL>LCpP06+jqL_t(}aZYgHT<^)& zN|XW1#T4wt>uG#5Ox^@>l3Y5PVOZ7E7ySHZ-CplbLCwD42t=Ivne~60pOd)bV0j4{ z23N-8dAzLepA@r3xPKg`=GnGxAI76hquc$dOg&RV{~-B;$5MSLi7|{2_)G}YOmrBA zW_i*~&zZa$D;SNvFcs2hGsdK584~NuR}GX}n1shsXg$js0L-&vGsk*5JjUTZC;(<* z1%?2#GA(Vmupr+yk~{0h83bYUzG(P%wk70`KpDZ;@;xzlk70zs4?#fJatP@@HG@xig&yd4mWELXzsN+< zzw92DIX{8lHGSN@EJZ23jCET~*V3Mxc<=1t8;c6)wXwj7uU$u^+eFn;cK2DStvoj< z_mgkg+p&7&IEphBKu@8p9etLW`$8CpWRb>3RQ3$+S=ihpZU3co5*&xHDIg8|M@@v9~rRcAr;wck=!?i zxj_KJplLVr9~=^9prr{AW)KM!%mg9Mlh`@iQVh~T$`_4nW^phC4k=RtVEBdNYf`9{ z2>&aY85C$xGbfXnkkA|+nc>eZd9Dc&>XOrGeLT77y5*B&xWf?8XY@&Z(OAJ)^uu{S zuA_?jD$5rFza=onZJUv7WA$G4)fT%!{+ z5O?cENo@1YCfU14|IHXFfaJgGs_sHJ$7*Ne?+Aeb0y7=E`uot&n9z11p)(h~KVAiP zLwBq(S-mwG@>bHBo2UQQZ>{;SG3%mE+&l7BK7Pxd1^OBA|3&)4>$j}^xmoKJzTYZvv{!a8xho4ONH^kPxvP9gxOa+-=6() zFTof;R0vS`PD7HG(Gm9TwxeH3o9*vHK7$h$Vfa4%i=Q6%)sYuX2->K`}cz zi~0OXx;-JMKeHd?>M&FYJVpfVDwD!GU(^uk-)Z=UbH)(&g~ z%pN}C2r&8ZYB^3@OMSzM~^!UCz1p(bh-$75S59Pdk z?&Mq^dRvP2j6Y8B_A=YgFO2?WjsIR|{`E0RfHLt9q43UG`n@oKq)+2BjVY5*f_`pd z4bQom0pPqT;Fcy|JX)Q6xgA=c`{iK(j8`r^4x<5B%2+*&2B7g6sa0Um25Y_g{q&GE zMi+uWdeh{2l#wrDRk#=Up9cKT(-6;(CntM3CJEQ~z}i19MMq znTAmTFa^bfOJGEd0kJ^%=RoLt-gu*I+RK62z@zixdIQAIB*W zkJ>zVa~RZ}16BXcbvSt1U?l5f`W<>_d>e-tdik#3 zl|UFl51(w>F(m$T;tZbP82?^g?Bihyr6zXW~f=|?7S(At-wxyGQGa5VUBM7Ebv zyt~KvbRT-mS)O~0TYa#`YVKKdfGY^|cx&T88jo%A>8XAnJkt2lrSPMxH5Mr6j8(-| z7k&)XOC2acO1Y7`8J}vj_*yL*iVOSRK^p1X0RDIRVg&yzLtoC;^ylX<&rVie9v>XX z1^L|yqXOV=wb!+m7O^M5ECqW&GPM8OHWdvTfrt=f;XLL5=Nph`Joe_?tQ&;!0LN#m*#agW z)gw@~J=MV^TG=4z7)F#;09cIS;X**qs{WrJ1X&7yP9i^2abj;94M%&Sl)Q z00=K0+8MeKragXUEB`KazMAz9VYAy0dX09MSMj7@@5kp)Dt2JjKY+}o$w^YVbg3#5 z>bmj(N&>&k9mF}CiGtIjx2*9fF$r`O6TaKSJw;=^dSg5TfmsDW@ssIvtm^bplR90w z{xTit`KyyA+^2uxKi$_Sq|o5<%ai=WJgDzFglJ zKH>kHCwAjt#b?$(+nr@w-)Euk(l!24_|w-B{wV-bCv~F;IFyg>*8W?)+@|*Wu`Xvvj`93Lr{=<%83sqZiq=Zd3q#E-1(QZXpC9i4?0G=>=K9 z7Y>M1$j)LxMudq78@WK>F!NaV8e`O8`aP{Z>)brJ!8XTQhkhp>Fg(i1CS{l!3_Ma8bPqJM# z`relUJc~6d1%Ue~Le%g3@E7N?{kH_bIpM#Jm(=G&1T?K|tMd4JQ3#yJ#DD2I;s5aP z^uMGPcGs8J&W|?Uo=txH_5QCWzXb6+6-EnyJJnp*TiC)6`RTE*L7e@Zd<*%A8`5es zkcE(P69#BKQy=ygUdld6C$Svh<+nYPhZY@X%I?!u4~Zbk6t%*s@+GwrX68II92WI- zVWJ33Fm0lhiS(e(mB_My1=g=={FQ<^A<>rt-dA@gI-K} z`mt?1YZ;Fl9#GH}A07{=`eRjZ0s`r9)#t?hP!5@QD8Lj&3xhMV%B|tBq|thMMAtet z>G7qj=lvcYV8k37jx3xBPv%ln);FA4c=OU98y~zk5EurSzEVei2>Ug=pV_8z<{Y6v zBzJHC7{NH_?v?k+@MG6M`S8Ur8bkd+rX3;Mq2bN!pS5kDh?36|fPz5U#F~etP3_nC zF@|y!h-42(qA|<`0!!zSeGVfi>=FKd`P+Zmoy%))@FjOn0kCxZ_VjFR^>A&n_B{Q5 zwV4Oi-0S-J(%Dve{Z2*(3+*w?0RoT+#7T&;y_T5`W=pkZDe-2UA!EDMH?l%OgtK^6F9V4PYGsm+o50Tba9Eh@O`LE+r zP)XnF-770ms=w)zX8pG_FwL6PR-K8fANHFs^|8D6(7+qM6T$#{3?l?a2z(v{27EC^ z8%e+;1QM2hn=ws(1kYt$m6c$mjgxp~zZs|1hNd-HyN2Pon%1t~j^CLt1n!&y;AmrVvNl<=*5Y}_0p(YgqMc`KE$y72FMW|gvYG5r0Whb2 z0*RD%H3)|}6`{770Y(@u376@6Lui1`JRU-We9ao1gn&b6#Qv4^wo(A(r3XRS5EzZW zZxq5x;wTB{?MGrUp$JSqT0|pARo}AyaWBq~oS0dq)L4n2udF9A2Uw1#pqWMHmd(J1 zMhd1*C@aMQ}`Ki0B3*Vj*TPz<t`A)a-|2W0=ea%I1pKV)k7s{MDCZ`VIM_8oMOlESOjbQpGvYS@9R1S7 zkiIG{#vAf!LGZ^qi_Pd8S0>4Fx%^M;q0gn6d=gvFcb9`s&|7c$RXUg`0tBQ;f-rsF|#-dUd3J%>sZggY6(_>$23J zhSn_XHokQecuoNo+`xiU!VJ6wL#`q3HKtDIr-#z@Bm3S3qR4}9X?pSpzR zQ=Yz!M0cDi&W5|<7%lm4;-6<_PzuN|i@Ye`OUd~V5ZtE#cZaXd(m`|-ZLAsW#Q7ft z-1EsFvttv%z69PdiYAHViRIzlIKR^;@n?8U&+AVo(Ev#2e_;WMMyJ`3}Up+rwUWv^1BtQR4T-X0^VR?U^ ziaz^TWyidWLjYoC(#( ze8~A(>b)IP$c-RvN&(gmTRA8}#LgjY0@`X!9++xB2q`Spe276G$F%(UEen~(h+tUj zNZ;NMLu(D9KISJzAs6r3o=ND8g!Edt2eV_(wpQaUcy02t$E#&NnBiBuR$$t@vcUcK zIsk6Cqhj>Z9_JXqmR0!;`1cwDW93E&%mo5ujTMr~G@NJFax(d1duj4lUo;#?z`_qK z?PZVkeFm6?-KI}mCV}P?0C&$$rn9DIH;+$NCNJKc#FDC6-obArB&~+FrmS${816m< zbRm6Ue@1{e*|Z;8`rGf0Dt4~#*)sBZ`b2FVnRED^&X8CBJx-`_i}Zjp(**w%2laSE^Bb^y^G4 zycrxCnoPr&8ykPl@uw(6_&=z7hq1KidVe;6#v5-91n!&y;Fs~V_>15C;^fW$`rY38 z+pXo}vrNxsjBISI>}Ae1bGG!+vc@nk2tXu=Gt(lFh1^PDhFufn^#kt!2?^FJ!Ox`} zU=H813(;9B5E-TX0uYKIaBK&nUN`oa49{G53F{zE`BJ1{Sf>IXwQQDZ=cdlJnp)G)~prh!Ecrnr1m$jcxy zj@&bb5dtFwMhIL40<(BSnm`itQkHycL2B?FLA*?yFY@9axz~G{Y(reWJH@5BfS#hS z{PRfXe@d^D9K$_JNEvIBk6+nWb=FFl>tU z4t-cZ#xEN`4!*N&*^4Jer|qgGO<|9gQyBE|=KlN_51|T`2AAOgr_OiuI9cx$ARZdS z2!T&P;La%kQkU~60Zu3XbrMgEE&Sr|{vkrw@?Jd7$3tTm-}==tz7hl?_)`EtFi|ij zL?{lDLY&RBnfHg7`WASQpX+sJVo1u2fBZ8{g_7f3rrli2n!eQt)AW{PYKW~pS&uK| zC!cJgCldFhvR4I$Tb$Fnb_GBazOqMg3JyNC-pupEF+4U#_Z$K>KJr5;=3vbt&VTg< z@ife)rvz&s!@ zON4_<%He2m3;cpcLt$n#(Z5Fgul~_Oi}}fdg7$&D@-#&CG_wdL+$6i4tOoHFKAjHk z*}`7E9OD~q76R3*x`6i1QG91Q-PFmQbPjyjtODQ%OPzPzR47`E9uG&iUZJQj1wby? zG0SpNN`YjT0st?qS9d-dAX+sVPf>6In*lzjVLXi$JjWPD2wVgLcToXwk@}7IUJ(LN z3b`L{A@mC?(a2is%<0HM4qDdpsOBN9TAQ4NbPt4;(r)G*kf6Zv3Dc*uEHb1ZfXtb6 zy^X2gS~Lya8H`V!=ELGC&Ch#S49xHf&K3|L;0Sm5I3Y_KA+lTX>!_ufgR6@fSq#$B z@@bnjK>@%ZM_U(`)1-TloIUUPV|@D{Fztgw(J&|ouYRzfe##TE*t!1U{;P)SA9)D| zoR?tdmYY~Bh?!VR=h3%1qv85{1JLEr&+Le+VkEeZQl+rvWy9b%s5AAe~`F||~ z2tR0PXz_@7ZdSRzNI+Hp%1=!(iu3V4*4GBQ?7WtLX3#vzQqi8)cXx8WmHW_e{g{9+ zAHAw~{Xk^~5EajDnIV{oSwu8AzCR9r;5B3RXG5U4wzD+KUM80J1MZ_hI9mWN{XT8~ z@_6a^aDDl;X1ViWQ~=!hCcOR%7!C+KkLHD}GYTvR;OYD@2jqoGC?u91m8DPbLNZ-r z?u3c3Uqhl2V7Beokf$K@rp;ZB0w5lTL*NP1KIgmpxvReK^TSb$hR1WhsNhp2@TFkt z#6c2o2g_M?@_Ik(6=TuB#{s6uQ3!rW7h%ZH(uQZ7z^2^t!9zTT+Yf;n3n`{pU=`yy zj7H2Fydunr%MnlR8F=A#^QC{A^^A4@d~E1@6abiTj{n+hT$_<|&GGQMQ-hH)S2HJp zy|u~Nxi?TB8tc;VA0Hec@WT+$!wQclF&%$N%fAmnozQN}_5^ihy1Z7$I|YDIhCY_^ z-i=orc3}aE51>q44NfH}od(}gx#JmDGqykc#C?G|@ZFa%OEY^~n-cU-k|EE;iGy=! zJd{2pF2#D86=nIYg@qF3(*%cOS}`W!X4?$>+8J!_0?LC zTnIph*Fi8HEhmS8V&}zRq&%;qedtPpA1?iTuRy|Ym4qju0RKfBnT;nS5FE<#{keFg9|M_*G^wj2C^AqaN{91Exa%`Eq-8@^7B3SK2*^SmBj6#OoyJm~2dZU+R4-%C+uX{I(M!2aRY@#JM_rQLIC0w|d#f&Q?2T(kb@ zramO@+WvqZJX`%X&+D&sEx4n)Zv6~=@=!qh2H5=R&C%rB@EOv^1vq{mC!BUZNY)tU1%aA# zT(ZRcKb@Z~{W_TKH1BGwOX&58@d+%Dqq*nGPH_;*vKA2 z6h_t!w(W!x=c2J^hD^{0J@b%4)j2=2767%I$3p|eLu&L}QdeC}|AYVgabLz@uN(Yw zJ}B`amu3ObERcc@vu$o$o4<10aF6G%8j9WqmmWm9aGLj@#2)}0XErtS0ZkaZ>PN=s zuN?xjG!uV_UYg{wQx4De6aeMmFZ7c#o#MPEozljhJsgC6>HJq-rJXD|)Hu%|M`xt< zMS}fSG=C;_SmqJ_DFtkOEp}e_<{5MwKRipTk?pct8|qR#L+&x@CU8cLpP&d}0buFv z+r(grpGH%mD%bA8F+LWsc|P=PUm$sOQz$+WG0)^5)*&$>G7T;*5R{-?{Nu zj0%7oZ-To~fR20-b_j=%;|LxqrbjFkGHS``uV*ku6%xZ&!Wl$3oVe_OFzfw!-&u%0 z!Vg&nhfbL*y9>DYCIUG0k>AVE5ODK>pkS&*VFFQ?Ps-%Er_UUFeyDcEJ$)1P8NYck z2xzO>LtAQF3=77l6XaT=l6?2hC8Q6vnS#b})G`yya1QzUqT@k_03e`fM*vaj2Mv(1 z@{58%AAhs)qP~5t-!Y!@0YXYhbNTxrcu1qmWo=YAsFlLK^9qG$2i&?81-o~{ujpv! zM>>9Wt?o}e&oa!bE)V%Vy}^%Ia*XR|A}kNc)<{+ym8O?AXaAweL$aZ5QAZ#Kr6U> z@Z5NMIS8b!qJydF&e*7&?HwJR?&eLq>&G#3K6!bx^X=EewCCmCf194VHwu8Im2;D- z2NAZCL?Xj)lASis%V6y`0sgL(S&qz>gTyND|U{3MPykoh}!m%H{Wj z{uzANkY^gJZP?JRG{*gW@+g>-X(0ko-cWPlQJe*~bk_dmo7oyT-;E|UGwI7YS1b&H zA4qa2W`TG%T^U#Ssdwe4zWBk5vm5gC%}sHU-x?^3>{B^5m=a&fj^pnGT;t?(I{gWAuvY>7;<&w z$N19+I}We>IezP=UZoyo#iw!!r;%XRqPgG5INOSFBb#L(zJRkn_Zn*Xt^57FOHH$W zG+`z*!#H@g366xFjimVd==xrL-T6Q$FTL4%}!s3HMebHg)LY?fEpsp9!curx^6OJ$Yl1}+%{I|0FZhMH! z0w7}|#-BCYFQxzxN`YU>fV{JEe)y++UJWqd0{c{P7s{{JC}BSUSYPKrS)}8ZuN_tP_{|8zBI3|-re_r z5sZ(?Dgh)J@QM5}j1ahP2vlF|eQt=a`fncZ8+KmFbF=U4?_Ow} zzN8O#T2eGN8D{Wp*JpO0qifeKg(2i~=4ULR%TwB%cX*FO=y5yy;opKme!x!}&(UEy zo387_JwN=m@yZB+ABMoaPyqb%$=1;~tE#j;nQ#5u$O7Jlw~D83>ra*W-3JDI8+a85R3emmY74tFSfF+Q2TdF%fU=;9Bo8rOM+pr^e!s*6k z-Re6-{ziD9ON2D(=~~6y~Urt9qp))MA%QHqtV{PPviBH zAE(V4%Z)eV#BV1SsT-N`$D85T%UMK**{`TwR)4zL>?HW=Y z%K*-^+1k?CPKM~uC(BgCOaEhjaE7A-;1iwUb1y+kkUKN3rKKnVg3;a{H6i6^k5?!A zxmK2Wxeu~~SeGpmknY$oiJtV`V%zhg?e(^#097e#f_!XaWvzsK=gj{gz?J2bik%}u zZ$e;l*W} zVO$zvp-g$Y6#|mDW_oX6lX?$(X013Sq-s!k{AdNLU2BZUqT24bTV#y7L^32kk z;D_0J9|(BHYAz50)&GLaG5`Ib^uBNXuGa@Wqu|(y5cFk)q@cla)@ye6y9_vEF)~RL zsD^g9jDP}#Qs6Ot?hZZIA`GoAuLS>6GBmnQ`L~s^4qIA&H;(uL=+_i!OFRyK>?ntJ zu$^C;sXY6Czh{Rg4x*bW|93gpUg#vg(esv$);557QEsjGp|4W~`ZbmPhhD4KG-`_% z4NVO}-Cob`I+>W+$&|l7X14z)+biMMnEf}pD<9wXq!dr#>yplY9hU6%bpP&;(P_$$ zW+Bm>B-YqUp9d@OqezjaI01KCa~mVlV;CWDQ3%`%1;8)kk?|jw|M6gZbK~XlS;)}w z(#q2L%9Bu_osbC20m23rp1CM|#``}Cfrjj9bC;rlgghHT$f6lYKly|bjCfs3`o|Cy z#7MZsICaN>h&hCq-y#~$F&VU@uflk&C2cKYBPzLed|b%*I4|=(Xz?cY0Y~5vv*UER zVq8*|s$c5JI-sW!j+0ZWkE|t&u&?eqD&GcMadhBbFe#qKP1A&vF)SJa`cxy%1pda% znEAIYmiE@}jtbH`|0P42v!{P}MOl8NZ(zD@#kHB)Hx?qUF-1)FgAT@1jj0S4V~P+a zNS8_SaQ#68X{`LUKtNCH_?)u0mgOW)*Q@;26R+jd3w>$TLq!wR4SiqB&!hG50>O2z ztb}gCQGGmCrU&3eH+0-6?{fkin)?Wvp7#p0-f=`I%J&X)l#)CbPojC4;@{7C|9H=( zA)sA~=R^0BK8?mFq_Bij>-wcTFG%?3-JkGUyjt!TQyO@OHuDZKI(p%Ll1Hu@^sVY+ zNz|lG_3tQ_LSVPD-45Q2vYnv+B(}t?0MM58nT9k&9rq5p%Qz*QZP-2}{hb_{nL0{21zkR~|))o*@vnAj14e=_;S-~GeW&Goe} z!$|LDaGYj9F4D3{5$LXd>Ji3n{5e0kUW~uw?WW&|KQ;I=4X6z=1cQS`fd@k0ZDsv)ha)S7QZES zUIKr>N7&ohXi5`8e}M{!%}@m?42@ZR>5*bBz6PzY$##@H~yYmBJ=c$l=d>PmoV zp{l{AXP=eOtPV_e2@y%_(++D7F(D3C0PZU$Bnjz<97;Bmg(7TM)K2F7b?0)u3?$C|K8lju8|0u+Y5XZt#z2y;nTK(IYyX?rT=kh}- z-iL2w%Ph5Og63hA|0Vz^BjCoHnmxWlO*g-yrd_m83Gj*0zxQclGhL@Jw`A^R6abdW zO$Ara8#u#z=2x)Gze#Z%crJa}a#l-vSl;0^(cQ}i#XU!y=a?SkNxHG^| z5d%kG(*^lMrZtma6hG4JvQNVd%A^K!)hJS~dxSPxfXy)>PK^3X7^x5Gm1vXFbD6G&)=YMs!~ zI!j+2vi0)uXBGl!m-pIBTct12&LhC>(BE6zy?^)Yi!PSdo)~C5L`ZiBLuMO3+Rz%` z@qmNs8w<@OQ+otQ1b*Q7z(MnXd&PXKUj4`qq%ia=K8LzM91>ItCw!=JyW5_PNZkg>xK*bbija>@lqBQ4SxDK*#5-?!v$VH-ufvB z=+R3@xr6br8M@uXuOcrVoMm^pPd$0puiak-z}Cs>$z*A7eX{aA}oI@g}@jt0)dXK0>qv~dm(zWp>Jw^blVmPQ&h+^BZ{z|(+dU66^c`F zo`ir`f(#!;6T4bV01*BlW+ll+569Ui;PBY2L>ea9Y4ox4H{voDooSou96wT)D_z#?zBe>~@3ZA_I)hX;x?!Adr4QNS zFJ!m+6B_$fiCy8m8nayY8f%1&bH2Ro8mU0o!-lRM)V}hofkSM~n zk7y;sJst6S_fQqdYYVgAP$uSGU}i}JLWGfg69on(3dD~BWA(IK0L5-I0$0n zV7Mo(eS_6VAF5fxbU1ykk5%J!Gm?XY;x4Dv9}Q9W+{d79@DOk4a3vbeca0rw>;iNc z+H5?caj$ayJY!M%s?rNwjAp*fP6NE8@2xgo<2?K665@{HQ)lC>Q9`gM=H1sexQ>2> zN2}R-ZI#e@l!N0*dH2U};hn7W=kfph2&rhFWti^W4IlF=dD7^o7~Rxn()nLV_}H-1 zNA+1_)NV1o4;!z~;_Qz@=B@Nu;~719wzZmxrtAV>Oy|64ag+hJ0`wlw3;LJja9Vkv z`_ti#hZR^>@GE2|13D}sCPLRa!&XjJxp}wQ+5{c`3IZ@8H`8hdg@IYuX(Hn(^b#qNA0G0^O(TPO> zoCQXyV&)%U!gLk$RcSx%J(jzS;c^fFIYhS190`)?Pr5n9(d*1^nBlOEQO*1(AMvIH z=tTT-Uu{nrAC|LVM!t%&N88sL#9dvdxuD{Rh=xS}T+tz8Lg^#l~+oVH=eg(Kx&<6Lh z9jbD3{sFp>vZ8mDPP@USdohsy{-Dk3<5}=eCN2K(>bUr=qJMdSu+PJP9;vwOX00`_U09>~Vz_<+H^Wb`c zyQ11!+7D{_-TB$_uLJ4dNy|)U0>uJ!}*(_pQ8-wn%uinEp`hEJK)p5*ye|c7h>@M-9RG0I*7{p zWk`!KYc>wy?$2Gc3XpFP5HG?GLHGA%+sDai6b#!TGEFf+A;DBT2!B7G^byl zV@S+Wn3ceMPyk@IEdg*1<5FhGzBM*-!2n(;Wl6Ln*+8R|W3Q8Du36|~gwqv!=J*rT5c;hX~>LM>=NsC{$1YkFo zw6*Lo{4`int`Jx{T(AD^@eOImfb=|)!qRiX|KZ{3e+g7}*O%AM*G>+OCK3M0lJ^x- z_f`R*!K&fDtu1LYzgjy3L}*>M#^dmPb)>6fxDW(_kP3g-Ptu7)7itz1(SSe+;Rtj` zZWnt0CvNs`OxQH!yTQ{_hCo?-8>3*`wDrssxSg+nV%FT7haLSb;yiZkVfGu9?mq*H zL0BjSR${Vp8YO@b)+tzuc_ZR##$R39n?&l{&aS=q5AFj6+YaNzlmd2~=uQ9&?ymQa zGzSRjtF!(e^x5j)gxTI{=lH!j6!}J9LF6(fF#8xhqnb0#f>|!KKl#;K5@Q%4aA61- za&_dV^|uz6V37-y72O;TB=x{(C0|LCoI?Iy$0u9RH;CG)Hh_d7nL6 zX{Lz-a#hA+B2tG!v?(S+#zRP}+hU>CGWWiv`jiDq1BI}ELGmG*0I7=IrM z1mI5nX(OGBUlufsrH-9vDC}IH9h=g#=l2;->Ioj;W4p9Y`!kVQuxf%-gHgdD1Fw{( zmmGk`aS!MR?C))s02RGUo1CXSto?+C(iW|@fB8EDvch{{SW1(`t~qa(lN|ds z9NEq}`)elXXrVlo0Nt_^{Ab(aFTZ@fIoefw6Vz)QPR|?7tle~yr*HC6@g&Pcx3grF zf(mblR-h5pM?E`pOiz+G?*Qhl?>1OE2VXn}tBXW+6nuw!-QyU;Wgt+!^eDY#Ke%Tv zfW!qfkNXYjn8}(0!9uEG1F)a0Hkczou=5#HNs(18-m3x zD5Et8Vx|7X~2x|M9hq9Yn@g)pA228`bqhFGDRptKp=e^(?^=?lPq~C!WHJ`EDn%3;VIM9JjY~w)AbOz z4rB0WGgWz6(mix|_46@qa0h$v)|UzX7^U*Y8#pJsF+9D20Btt?xj@t=Ihe}66I?MQ z5&DCfr$^d2O*y^ib6@|E?&}QTY7+XnzK`K^tVc*Qhms~NJa8I~8n4#XWS42ThXmNM zu2TrWmHLx)!4CMA)391coNL`Y*aI^tcKZB%PpC5m=9XF15UtFsO^$LNa#Vs@0c8d- zJ$a!aINV&y0|3prEI7GL)q|_~0D%u``A@D{BIo>}hXHg22Ds2+Ib=^$e4}|UV|tD^ zLh~qA_BmV3#Dk@8hjE7{{~($jywmjA0k|b%&3trq$%AM zs(r?A1qduHr8l1+1duP&!p{TMKspKl9{-omUj}O&9B*u4w;xOx6#x&Wsjs)9P!nQc zof0qUknrK)gzNP|9IOQCK8n^ggRGFNJyr>@&gMa!;c7^~UM`&ioa@~w6DtH|GaIoT zg#Rd#70quc00sj1DvsT(bEDPngza@6SD!XpsD_IPzEm^J9pOibqWrH8FrCe`88K7G zH%DmT$Z+Er z-h%-65c-V+V+Bsx3ir1i<=;*u06PP$WGN6uS(-G=Rr6GRi6(pRyW`2jfq*_{d_Ilx zxVE892B))jp5Ppe!nv(EpOT?_4V+mt1U3XI%I7cQ1TZLavL7t~db+)LGTF?UaquBL z3T0(=8HDNC9B0hp5|qK@!eLxIy^XfP?kohSQqBZ#o~3OzQzk|L1VmxB+?DYFe!vVL z)S%xiCbY$?tn2TBz8JsrJdXV+1}r($T()-RsCYMRNLWQ5%O6hY;c+&s*VZOUgm+Z5 zP8af{J+-%O{I)VS@s;h|*0~&i#X=`Hp`Tl&09cKKT}}X-LND31_P>^kChcAUKRG1p zeBM7mhR-nT|0=MpF%$UXMe!OY&a{K&7DBf=oOd}M%=-e$wCIb}_uF*TU*`x%9gqKM zg33Evd$D)CdT$;6eXN~F1;EFkGUiB3D`j7UA7i+GX#KlVB`rgBfMcsXte7*X@u{Od(FE$#B0_UV$S?U zAzccAdcgvAL+b`B9Uxo>L z5+`!q3_7=!mU}rcp*RpI1AI^X&;vMh4ZHrJ&0oX|@AEi0>fU(u8{YrR%W>Lf;CTvk zv+2);WwqnbF18!eEEwp=X}70aQRL(>ekiNlt3P)W$It!7L%2`(H6FlkRR>Stnf)L_ z!L#l2$xn9LmW{@?T}t+1&T4P+8ef-bxQ77Lv5tQM0W*da*+1Q0t0iP5P-JX2r5ynz z3ReSl2s6o^b#LRYnB+vwtSfxFpY3d!8Zk(uj%eswb0K4{L{o~A5rd-kZ|%}>7g0bHDAWAH(Q%kQ&8lDJQN z5I+B(KOU3-ijM>DrXX;C^WEbOnlifI2+>c31 zwUdSuZUVUN%lGB?C^+MJW^R6+ssH~lIa_`e0c7`d<>YK_d2{c@!QuX|e)Fq11bjGQ zQ~*4jw!Yq)206h`2yWK~p9H~0<_`gBhnaNOg&}GubNZdd-Ch}dUNeE^I4x;J$~|I2 zm9jwkgndlQda!@ChA3ge>~3dj2pBgSQqKGrAx3xwj3#2m>~Sd-iXy%dGg$noOe+(aiK*1$H#=OwS zN5K?hmjF!E1n9hn*Q|2jvae5mdVp0Gz#}-jjUS zZ#o!rb>s(rFF`)BaoLstPzW?%FqC$b0P;I+Q2t5}?{U7U0DQ%#&9p)9x5;`3>qQml z9)Bk|$Q)0^k}Ly=@PBl4WcI&2{!dO`9_@VlwIu{T;$ZzCzCo>d$1rTfTyZ^X-7s}0F}cr9 z5k(YkWOjUjQAb6}DS%=733H=-IbC7a7pfN@fGKbCk|wA!8*BB5<3ogT1}@$;hEGF4 z0*XUF002M$Nkll zX$W)>i5oN6;Hb@NmM0emaDMtl7G(5S`#@hy~ec zC*)!)=j%6*7CzQgcPFpYh|(PAgaE~x`kQE!3E!9j%K_FiHGLkzb)iBwq#sa1{>rH= z%21l(n9tj8k6DLeg-FQ_O#eF=o~MFrm~#wUq@wG;xe+AGaH zhdmfmOe!L%ep{)Z{gS)6w0dr44F*98JbIu(e@FowF+tk2%<>}nml7oztSKmnno^}H z1)A!}Q&$Y(TC@RX#X5g|@lj%4wB^;f=p6GEKlR`q4=nX*eg9GHL9IVFMwtB{#x&VA zJ8R^B8s^ZV(|3)Dy9) z6tN4Al%Hr@Ep1FJA6yP2;J-d-anZTlWY!Pz{VT4JUU+Rm{NrlqF| z{MPpGgkR&6!wkPXc%P*wi3R07vogxE4>3JU+%l?e8_Aj;zKG9grS$zf9D>7U@fDQDl)q2D6wXT? zrND01mV*(HgA2awb==RFg&R7T^Zj^8O)?6Uw2CA5*3lCS1XT^aG*WHjr~QV*#Kv|6 zeX%?-KOEcP*PfKGspjFIKcubnJ-#Q<*2eR+XKteL!BamK3)+bu>^Ox7y_%lLa-Vg{0dqCm8e!YMEcQ1aozCL*w1n_@n=65yb=})6e+zGPz_kqOU z=lFRJih(g)00QqJsvu>-f*Kg_k8e&Uhe2iEh1d{q2s~#?3@tEEEAKH*W~Z+!cs0$= zQohggy3mQ-BMN{ZU;<#7UPbleO1PsF#mvr2zOzoZ--jBbb}C(c8DVaoZkfrz$Y6X1$oKfFAHJv0&-PEuo}DmY^3|2~Vrb2- zXv=i_$)DrgvH8x>0~({%)RXrVIAgUw9|Gwg`Vz-t=n-6K=xxVQ=NH>6lb=0ao%|Fo z&ESM1U58)A4>_nky4oH4X7rWt_E%5V$`b9%IA}8oK)Ctm7e{eGcM=+!-8h=={L-K! z?6w)yIQNZ2rA^?@*V!S*$o}e4%)nVzg~oMLa7tF?yXzWg?E76HAjwRer#1rJqCgf+CAf;MsBDgGSz|8sU-_>@$&fW@MPt5ZTbBC^y%r!`IFVj z5~V=)h%Ck1b;0{#ytgB*5E#j30YFF%iyg#kZ90##h|GB~920zjR-TuR>$Czu0E!q4 z-}K!oE@F{9ri0xaT-of!z8SEMiqa@xuEa^ug3u`~3w+e(VZFhT$XidGndIi!u%5=4SnSpYbU&wdPD>0tveKnPXE{-bfq#PZ<2 z(6_#GO^?enf9i=|p=HhwL(2}bxtZ;mce9HP-UYpLyvY`~Jq?6t8zncGILyv;MySYpcDXs;`z>Rc1|XL^WDK@b?a>ZY`;*!dQ1Y&m(T14 zc@kXGap|=I{h_=V@^T=~wEn9hNRS=boAw!jxhVuUO9P~fScI`FkNL@fglUfLo$GpO z!;@!H(X4DP6;bpYCwuO>i2!~bhE;}~P+WjN?cB;3S?o^izF7UjFAEz?^yFl*vhlB4kWw{pbG0hw(yiEuk$x7(i`%61Yl( zw`=Yh!w7*-L7@6kZVzhhfxB=2(x%FbPY^Q z05qjRqi>$it=B^9E?T?7(di=kkVDrose$*xn-Taa%PHt6%s0}hHj+k51H=TfkKTTZFWvaXhX#R%rU0143gC3|EnRVD`xAcgcmF_;gE0>B zD}M}EfB?kS13fZi)-;?+1j@YYBQUH2c&fKn&rGyX2>U~9-Cs7xSYv<4dtg#}+0R2Y zdb!<**`o-U#Dc*L?RJPL!m3`Y(crFTdcn-%a;_sdf_HUIdOw$0nsl+uD}?FuJu&l6 zWv>bh?`I)k*5hr=+?AW)Uu8+eO?%6!?3SL*XxVq; zH}$Om6sa>J%qoQz9!#o#rfuc-S(U3)tiS|@pw|Aws8|k@8H>OfL*p=$;T-I1ooYRJ zl`lWsXDLzibu*Bfn{^M19x$$DNNIQY$2IM2Y)H2si7`Ax2pDqkon<8YNmIx*p`+IP zC!aGw*W10us)rsTv_4Yj0S>m`l`Q-gJa-(&zi=((bK$sLPxfRbd41enhCcRZx%ciI z+)8NjQHa?Qi@hA^o9~Ka_nABWDc{{`G|COUQ-#EmHG5z5y=92Lv`^=bsEecKwqNzq>evb`shG_^V62>uLO#bIx)(&UJ3= zx!#llZM}d0Vt-dZzsXqp)r8AHYufC8<0Frf*AH!6{c}z*zsWk^=(oeprjHr@+1(pd zmv%$#_kv>%SC@{?Gdv!AcyJ1U2jAj1004N*@Iz!A80`-ah}w}$Qv#6WjRu5;@as^n z(3oQSy1*viJp?txr~M6e&Hc5;IB`ShttvOAVN(9y1_*D#Ka3Y=cNinvV5pya4w45k zWW#`J4EY4tV(60Yq3YjH?obRCJrrDWf-qC)lyjsqpNv45&I%s$3P8t1st@F+GhK6R@&^g;`%r0phHy&wr!X=m zDUys|FUZuHT3&VkOcKAbKA$e4c*uWQ6R5_)|U zS33V?&R@QVk|B?9Z!LK{)W6X*v<)m${f6&`IMIy+VAt*jQAPQIpN+S^bdgX`Nxm6o z1itP{07|$Thk3v|(9<5@NM%0bquQ!?-=aB<->~Em4bm>XJwM}B<1L>B0Rw^&?6Qg7 zy-eVprJqCru(TJOmxHVTKU-gptvi;U^^l{9! zc6UxEJ8_@97Qu5PGXpgf5QLsB8G`H^Qoqi!{n)z^6$EExX(bF`Bd#0^@ofBT+e|Tp z-psblG<+53P~s9IgVDu*io>yf4i=F8ez1ODPVi%8nAdfL2eEl`(Aog8d>fAFb?SGX zau%UYrZ(Ux=_fmDjo@N<0|;wft+#vn-uk)_=iI*zCJI_V^D6Q6y{GzQe$09Z7$(P` z-yBc=5Ql#+vPP1lDhh$`leQOfE?+rN5)mZ6*jO(nhu}|X^po_PpKPy9zIe1+=gb;i z!{lhEI|9}#!)M+vFn-fLs~>x}CX9_voGY%x>Eik1ufN==cGuR%<)7oI^mUv+m^qtu zs^!@+JS+&*TK{yOuQDdT%)u!q0fbXY4m)?^By}T1im)Q+YhC&Kupss6+QNBg%68Ui zOFxQ6c|U1u(!YDOoEhoW%z8FwXwWgbh&~#RHDEve`cGcRwH(tgGAZx#a z&@M3TJjibD?e~FCy?6MA?<^$LSZP*R-8(<9H(@{YFypHn6(_R^{mYY|hDLTvIeGZQ z#-lX+?seE zVwRq2mpoIJ4v8^OG9ZwE=*|eLz@F+gt$X6|Q)#zJgugJ05%~WU%1}aKZp2t=(|Pbn zU~ZYoPWBGN%;;NSi6MO)#&jnzrXY}4pv{kG@;`=4LO{O+z$E}pSPtX39q#ez=6Nxi z+P!-vH)anr*bkPHIfe%f0pku%B=~>1wHoCdC2(7B{3JU8*j|qEGto47W(@B`07wkI zwoZF_a=J#@-vrs6!12+VrE_IlR{Q~b-CC0#;&@f^h~9F7w-+aO-@Q34p5jqx7(N7! z&2VotZ>Gq%stH4M2#tceRDDwDx1r%J-Ik~QG4q)(v#?)1yLbOrQS`st1)$ZLB|yh@ zUR!S--g{+W&%p4-&dvW8jE7=qGrYuJn5$Zfk#?M(r=zwdXldI;Hpb0^=sHV~>BjRHVYa_L zKbidJz;!RZWH*xldr^wLI9%P@|C&Fz2OdTRzyt39*INA`JxH)j?!*Iv#uS*5=576$ zkRHXgMiv97S@d|4dl3NlBUHod z+DjMk+X+6O1l?hwaxnWpPdliWb42$jOwv}EGs6EyxFvb#*`{GV76Zq27Kp!w_AI6( z4{6{;cPD5FZPA#_vrq&5lQ!OL|C_?<^6wO4kn zFAf(|@Xh^q3anQF&NV$dh2wx=jDvu=VE-sEKaN5Gl#O5QQsdy7$}+z8?m(c%YKqHJ zjhAtX077F^(D5>^uiUN~r+(Sw%oNAZuAc4TMF&Lbd8h1WB- z3BH6wdu6K>=A$fuc2YCJ_CLUS|e*Bm60dl;2wf)Qq-alUYcX#+?hEOTlx&pxR8Z?m?e@eDCA=DHAthV|&9PMah zu%-K(<=@r-)UDm3XbzvjnxpZJtPNWmRbQ-zdDky*YklrOx#HYvDSqCTUWVuW{p57z zReD@308WpN50CbDe*49VUE%IPJ+8HgqXOVs!TYdZKH#!){L?j?k&O$1BVhgLL|+UE z_UCRIddh=w=z+X|V1-TSSyQ2(Sr>%vFgxTqiUKnYlpV<~h9j4X7*AiYfJDJFKRhFU z7!JDEq+B5EABGq`3InkbbIqe*PJ|a>-3oCwW2$c8LXq&1*M0c<+jXw`IfiH&Bh1?G zAdKRhfq8O|w#0PJBLrOO6BGcA(E-oK^yFM#j;;XkJ@=2{>JR`&{Z+tp>dyo@=D^0L zOYo3=)4By z#_P=Z*Yk zWA2Hihr@aQrhanQ!j#WQpJbt-4U2C~o4KYb7Tg?n4TPUv-q3tQ<}l;@WhStGe|&WG z@|VB;CrbdvFav>60T5uUE?X|LdKf%ZW+Kf~ed&bPqnuvLs0?}48%P&J`KuoJG1E(!SP%0UT;GKM)YbL1ndF(pzzoDZP8(HAGgGd9q0i_SiA$MOIKMyHo*#cd z69NOhG|oz)68y))3_S$Vnt_*qe6J8Z-4Eki5a%;B<3xQDquyt~} zANgi;vidw^=r#gWO?*dh-+DTo|Hua*e;fLosv65gOC@2>mqlabZxiHsOiedd>_E#Ctnx`(mO7 z;Yr=hl=0dtjcyA_e^0lZoB35g%#x)iZ{q$1v$P*)j+|2Q*FeFcJv!s1%`OwF+I~Eq zSdxH2nAq9i467ZK0A{56y8qh%qs!FyEFXRJ=jAsL1llew1mH%=d=@9twrldtKj zFC>vJv$#Qcmw%4*CRpWWXH0fZw|W0LQaM@geitO4Lc3L|}H44)5yS@U|Z14p#} zN&KInA7-l2#>1TK9yAmE&wu+5-wD6hBtzbZoOUmJ6Hik&oqWg4X*pBZo3=&l4UsU5Y4*ttHAzu3!q|Gl(V z`E1B_OEowI)L!l#!w7*zLg2wG0KR)OIe8X^>c-CGTT)IC&GQVzr%7yPeu2_k$Ef^Zx`6S9Z+d09TlT6%<`6JhaE1nPc1haz0*6(15J0)}#NXM0md!dJ! zMJi{lnH+?F1RHa45HoX*|nQ)Lj;xr zK$$U4m?~|8I2RKYLD1g37_MrA{FJHag`oKE=2rXa9$|#qY{%oRrIyFg<+KXa{`DO#kiC(&UeO zQNqmuxW=pT?cImbV0)#*H+&2J&t~8m-%UtGoR)KrR?Xq_`O;^El&_ohAB8X}GakYY zLvK?s0zZ0;UvM8MwJ)-(fpq67nEs<(v)=vHS>3&FVPOCo_~8cx$};W!G)^>47U1g| zpTPNTc4Ejm=YO6tu3USTdnp1em-8C$O0(RlF^A8nHVQ8x))v}JJMHdgx0&?4*WvlJ zHKjo9*0AXIT3E*zKXjWR@Zc2yUw`}c@!rq(&L5pF{Vr0<%jjHpBA7j0K3m!e4tkd5 z0MQRDeU(O}Bp$;RAyCLML=6!>j|V!A*C6@*5QM{140tom>x39734N&0$jWeXgVAg` z5Gh{9w`!}vW>$=7JJy!h3duwC&N-M{SCtOLrjggQHqu$L1w z=T#P9zRv7F>zsMPmhhFKJvnD{oe+we8CN?4EXUHI?MMLkr>P@+^4*gI5u8^6>Z|Yn zczqAj%2!H?C>x;})`4%r3+#q3_`_bdL`>1m_M0?$k!}3G&yEJt=olPDN$Gjdc_01? zzG~`({-wpl+L@wzHN;HX7&5u@EE5H`x$Vw0!Q0LncD^y&-<1Z&l|0XsB|+YyDexC! z@4O2^J7^n9o#OqH)`oA=rgmu9jRN3R6gTd1{V0dYAkU3qguo&p@Zc2y@dSA~`OW0) zAAkOj&v!QV|2)~}mL|KNt*<={4#Y{?K1o=&vWqtu_y=>06!l)QIh&+Zw@J#+& zU1D8x(`~37s(vY}wg~SO1pub2wFyfAWk-}NT%pM7T`=r0NDMwq{w)J|ve}p#W|8_y zhgJLV_z$A{!bm49H6AYFByba!`7l7)G|Hn!9`>y=$3%sCcQhrJZOj*hrErf}J`ulA0 zXE^{NAKm&sv;W^mgCX5%lro>^9(5HemIsDCv#u=WM?02+ET7YsX7yQCJ&PxQ2SpoY z_m_{_^0%$vT|AIBH@5l?8`BPx_n?h;Du90uZHMnGo+JIvOuvZ+ih$QSG>`kbmB5{< z<5<{x0f7gv02o^F{8zvE)nU?($uIuyA0SX@=ODi!4P%%q1cLMsuoLUt^I`~|FiiM@ z5G#l~;_rF=e7NN{Mcv%Jmq|UA!tHbua?XZA1Hx!F-F&Ye0*L`>1h7^4rm9(B&QFo1 zI%(yJoL@cwILB1BPS|O+SpY;=lUbcSkpEQk<9x^PzG<6wKrJ|K1h*+AQ}Vj8%+l zKeRi%B|NMZINs6=AuKe3W3ON%0}RHpTr(FFK$%{ZNiB;ImG^7uBRl^2N>6zm@xw<;?wy z3}9GCeu|fIo-~Dz@nnL8wZ}#jYDt#Dt@+w$eL-29axMMCgZCixHtlh>ZQ2u`TfDA1 zYWw?9-Fwht53dTN0^s4b`Hj}N(Scw&5kgQ^4Fe?tQLsWX0N%pL3iTig_gephPPNcn7 zc}+`Qjx%%0#O+1AYHG7Gn@#>^m^=HdPZb?09R_el4x5%Yx6d_HnTLW;%bhY?dSd3E z4*>PRXwk$Ew_d3Xyu%+KtYGZAe&x}e`$8e&~?&G=V-6IOcD z-xoOChI$xd@L(+Oi~yZS~k^`uW#r+xY?ebk_fRi+>|;$4B7PinmC^ zbtR7TvIxlaH0PApkRHQTAYe32;@x9;KVx?&`^Am9a@eeRc9EgAhwg0bm{O%1X|$6El6I_1gLLmCr8tc;nR}kcO)< zm-aROgu+u9tHvk6pFa*UxcCj!1$ z5TE4dtsQm}tDEc1!S1joJUEB2&rAH9C;;q=P?G|ov*>N-1KJxG_-oumvNCpnLtNt~ zw2ZRGIXb6JYv+u#pL7ZQNl>b8hKI{MM`&Ve`>C}@u6w?Rj+guSU5Pu7p}hL+N%b+- z8_mfuMGt>N)rJ&9L%*o{&8913{J9VWmh4k?evm=-GQH_}x@s)B&u!`cJjctUjY$^W zZh`t3K7hce0QdkP1W=Pili9TUUJQbTC-?|;$_N&*dyqeYD^AIw=@L+P5+nM=e4%h>_ArvSZB)_ z8+*XqS|MkdeQ8ReA?r$!GQ;`>uf!=<&9GpuUdFtFNhQy`uGfrE5>?MDeHDY#W=5WF zt(L&AAM+~C5t22Kn;2a{ODNbSseiYeD}H=@{6`@$3uk?AHNdy_`#7A#YWk$UfDwNc z$v_{sX1KQH%)J=Bn{cr8qm>$O_$&z2*i3s9PV@ug67GE*6M$zKpI^o~89bFCJM{%% zjL!C%&-&1K(+@%bED4YZ{Yu6YoJ+6Gg6vt^;W)g&QIy}chC1W0Or~=Wf%z(-eJ40S z`2R&5?4eQnp+jb>ze>A5=F~Pcg7QJo2g~~`ES-Zi>%BweQUN!(tK&F=Cc8R!QYj3-Hmmm{B(n| zGO1_$1P<5D!d+9fZ^vr`-T?7M3iEB?`0E@mgW@Rxo=-S;KU&@2TVBgOWB3pPqXOVV zh>Yjg3xNSsB6f%w1u2q zRT~(@H6v~JgK!?s5sEj_6z?8PvL%ukfOoUh|GG<1&5UGV=U#z;D-5UYWRAb}-u!4I zA8}5@eV78U03b{Yh`Qqst8r);=2{v9fYJMRU#wRf^0dx8Fqfkg@UEu)ueQxQe8Y3| z3V~9}8pbhL@U83INxR0)NXcW;OYjEr&Xr8TmTe9y=m%c%?r- zs~rGriPUBr`}fLW$K(^O1oI>47ZX|s*<$yXVF6j+$>SLrJlxV@US zYvVJMhG{E3E1`aV;3H%9t%HCuM(AUi^C;7ia1CLfF!J@z>g3-%S($vj-Efq9;VL+c z(qIhpfdEi~X3Y*~{Ja}N4bDG%vYuh%KWl22(6Sx=gsW@?)&gJRkt>~PuaK^!SMUN z=G>36Be#|!-810heF9qM+M)w!5dOWahLOR9lakc|>SEb85;x z0mFJL2&VQ(d0^BoFo#cSX41vLH0J6w*hL%Cdbe2*j8J=4TYE=8S`FKIPv`QLLw~bL^%HJMea3KhQz4qu9E5_zY ze)LaF>W6K0p_h+0-#Q5N_J&Jmk7|G8HTcTmK6%=I4D*LTfi*uJykK1l@&KH$30I<( zHS6rYe&ZF3g68&_cCVyx_w6*a?a z-$tuEBit6=2fX@GZIqKv+iJEyaJ0mcbpV)B0Jxup4MnT=?CmJOr<1%_L9=Q1Hn}tS za7c!8`9rhbt2*CgWzvtZp)o$qGJvz?SI0+3QN1K=ot*A1ZygW9{|NSvG}5R5_y|zO ztm}iobQJbPVo8V*(L<1#mlq?RjHwp<`iiu$HzRs|w!1!>IwOkAsRl_v`}xIb);jX8Ht-12)V*h+NhgWD@g%dc@%1@5%Xiu= zt}oAo{>fIPnF~Ljtnc}zBlrZ}DVl@F@!iy~d;o;tn;Gtw0zdxd#owX&(<2Gv2R}ma zN8c=?fG=6oPy&M>Q*33&@Hr3|6#yTB()#l2jNYx&^No$&7z#cQ{{C-4;ZH(|R$`{UlVQA- z$6_q@AsPF~%f_tFf&hfcw7j(AA&8jp)l3Pjg(+uhev-Hg!d{5L-1#sNLrWhzUDTu0 z%`B8P@|Xf-^f-A>Vs`f|YwkJ|k;e*Nr(s@d20Q1N4MH|-DD7BenEhrkJ*f{ShxyxD z6dIHpn7qw61ys0`IA1EoM()E@SoVSc>my5Sjdnl(+`6cC=xxD@slRh&u?)&?FDPA?6Hq?mDl^?@{88REvbLim}yx`e@pXQ!8s) zIj=GnnGjp6T&B7+#n=2}xMdJXJN9v@WJ_sW!o97?m@pfsOu)@h%4;UdO=!ub8q4+L zWDM5|ff|2-Go0C-FxnoFu&$k02TW-|23xZ8bMYd1#F3#~O&d}WS{XWOY=<_W5ju_6 zZ|2T$eDz)DLeaODZ0AEBn%u8-%Jasdnnz zw$0mGZ?gh?38?$y!#Lf^A>A2{{V*6TCJ-A+6hg1{>?loF3M>RNt!awt_atIh<;l-} z)&UqSCu?>KVBL@+o^klZ1hX-DVt*Kn+%jcCtiuGC+4k=6`c&O?7ogbBH>7#tr3?T& z=gR4rhQG*|Uis04*{i||xpO7uUnNwejx0GidDN$!zsJb_=v&7d76pOADQP1{0A;n2 z*!pk|NcSTw5=?5$rN{6}u3uLtQb6f6|NEWEg)!U|1T;qliQz;W8=E}-?*swCLwt19 z3}*JBE4-7N;Ffyi-Ouo{F}f}Y05ufKKf^|FrkNHq=7gq>GqsHnLmB# zdIvh-8-zB3xh;q}{8X0s=dC_6`&vSNZZBb9zUR&AQXGh#!xN6(a`^X7$bS=JL;=vv z{weCwKe6{NG8Fv$_-s zh2Hfb#!=v>?eJgPUYatgmOQ2poThzCS)Bad&Pk48gutzUz^DNDSTp?BulM(UzIVK~ zw)CHZ?0y~eymNlKv=S`&O%U6^&m(`I9e zWd7~b)d;l_g5xYNw5G1_L<8Lg$BTdM_B^4U*iFoaaGneV_tE-!IgLXsYDPbafBS4X z+w85E@PWaQ|L1r!=Bx~3fsrw@@TY7c@Op4yhACNOIL)-batJA6F#`X*dph}UkMlpw zLt_*$;9FrDx|1!81dE5i{%XBuTrm|Gbj=RdJ(4Vk8!y4Hm;^IeSHbQr8vnHKrt{p7*}!rGrPD_4yFFPXR8%IO<#4Nx|TJ7 zo5pY}AfR2ehFuw$e;Y2F5ddrr_P1ZIXAG{?%pLrsKmG1chm-GKA6LI}@3hYiiRt;R zsNKUZgm&f7t(0@xIQYg+9Dns>b@D&`Y_rY@J7~%manMQG11G^zLJ2S}S?A-=d?7Fk zT6jLKHTfnu^I2#ZVd_5=3(@_a--E32y?q|_2;M!Q%M0(Jyl z^c4osVWc}RI_ziL(_DJbLmdR@I!6Kv?aTs=CP$!FB~oNPI;-*-8T=Hq~Ba11bd|{SnVJ&<}kJeH+g~A2O8m_m^`V7lzeCZ~})i;p_83812_S z<=VC&?@(4a}LVnOz!ajdTaWLll>tO8xUxwue?e;tIVWjL^U!nHGDoJd6a0Ipi z0apOrQwwk8G4ivwpN&VKo}ITkEiEU}Y-MSdrnOuvxXCIYO|<5u(e!)n_Ct#x08K<# zMS!i@h$8*0Ji4s*7`~CooQ8Fkt-f zGCY*E18Xt&HA-*R0d$lwg-UH?g}N^|3f>iK2hzs^IGF_iqyA#F7~R1dfKiue`-#u1 z6b5k7V8XvRA($aX8F?=)D?haW;87Vl;gz0v>_s2J?^^-^>V)<=r6<~l@g2!;1?`M) zjc+f^Z`Iez#c=fm+K{eaNFw2W57i?;eGqRFt3Rd_;j!G-CYJz%;*oFm2nztdUq#x2 zg_st*o~#Z5pr#&Br1Bv_y9crsYl&7vGdXJY66tlUo47ygqv?8XG5W8Ra?}+C3jp;G z*A4m^^`R<|K7;e+ze1Q(ZA86gTw7nzEu4e^!QI`ZP~6?!-HH~c6fI70hvHh?t$2ar+5*Mh z-6<6Jy!@Z%z4v}QUy|S1XV1)DGi&ym0$YqGqf9(edMAJ=wXN&W;bsu8TXQBtF|0snF2bbw@&4^Pj@4! zl@?EWdWjOgKi2f9!5v~7VCR&4=}R$>S25arxe?Rb^tlW&fMXD}wIFmcV3`;u2b5bD zzj{%ur}|iMMZ~OGo9MAx{3$p;;YPV`G6b>U zeO*#Pz-DYO^E`&fAfG!U)jXXC$x84)vgL%1p(ZpJ4m0mJ=K{ZdW?^HjhXZG^g~LF} zrVFuvfA7509gQOwnN(5>+rqBxj15DOma)!airLrqj;bNz6hwpJ@{$aHd^<8eooJV6 z$oy1|NU54J*UmWUPi?7R5i5xg!XJ1a7mW#T=KXEfHt^5-PpTYD+XNn0ayjNn-m=S{ z1SHEqT2r84f*9}<;2oD(BWiyDe}G%A)+oYAecvbS6T<$4T+@x%PLgO;JO0m~A_wKX zjK-?DESWJohNAW1uICV{appdT$8k<>p1NmKvl1HL;$#S}S-A-fh9+*?5k-V%92q=q z&7B3sJdLgl7L;MzKP79v};9C+MmUK(1CR7*Hd^baK{C6c+a zCWUFF=j@QwxNrzbV>rn4nCbZ&1eT(Ij7+$2vmlD6}p|N)Kw3UoujOD)w62 z^IUOSoL(kjlPya&ILP7sVw$%c8^hEH!xn_{k?}x*ptYp*yvNr=eKLVU?1W=u)oXnKWkxW-3eCu8F@Lv-IpP`E}@w^aEoRANaF_?*ia-lq<4g6fC3Im#|BeU!*|xY zyn6&CihAZ`^d4i_3b)3>+0-F+6767256on^D}Eeub|wF~X@5L)aI6P<&; zP&KH%_?D9boBk4GC!x~g>k&G9@^ocF-bgKrdj+#?lUvL7-~qU#Qfc*eir<}=nf~&8 zFb1oS1NYdNRX$WobR`)c=M{(fl$?GTPb@Jtlof{(|6zeJd=qwS>24cKs*=dfxtml( zWaUw{I~;PPto_w5NT>6wE9_pe<6V({Pf^A1`G2$wXaa{wG^??(JvnvSW)>4=R+UIisJnk{P8w+mO1#(wKpUI+bp{CVz1QW9biRU{U8 zGS|=el@QkwVKBcVU_JxcQkajBh(=3WjOLYqe>>5QfmCT|icCqTTvd;{8osn5?YaM| z^iPCUD3*=IM5OEMg6@dmuYW{DY)jqv_TPW-iOpgJi+LE#6fe^JkqOU~x`#axur^+M zy=Nuaz}+B0-8kBt)Vvj`CICL=-UEF~9N)xh<@c*pX&$6E=$n!?#?oDn?~4v5$2J28 zQsCOj+ap|7Y;=qVxG+a@f#UJ;Y)!gTNIvl3h8^_5Ek#`NtOTz<~B5&K>f@!ha1RKa(+?>Fw;@@ucl%zCOn%VPXZp&0(U zaYQQ426*Md#Jplm-MyOa9UG}&r0JDhF>;A2l78$a>FtCoxAKT)f;Cd%&V5*hAQ5`{ z)A=RD_JSw%ITa?+m-L{0tevMsWbZB-no>4#(;-vDDU`y)4OGm`7x)R59LqM^njY8e zUg--vs&MCf=0C9_J-|B-qluYowOu?fTX@EK3Cou1fXPyQlkJ};^@M@nrug>E;UEyt zr>1CJ7}cZ%_=V#4NjDP5Bd^AIXA-+NO9`nYMpL-B=CK0r1t`LVwi)osxKxS^nA523 zI-{A=wH2X1TDa{jG1QTr2Uuryuik}o8F;`KIOmJaV&~O=7x)zCq56_AA{|*u@nZCa zg|2rowA1XPEJZf0NeY&tN}Fn>p9Z+6A9uH3P;_PfAr)aS@We>nJoZ?Opg&X-RU+>T zZJ}}03ghI1wWtq$*w0!jNt7v5zUBg>Goh#n@*}I&jk^^4q8fb>0*_ymc+t7%2KAC1 z2vzbuAzQYDj4$UyHOQg1h^=)b-$ZP`9WEL2$*(I`nZ&`zd2k_#SlZd$D#07-8 z#7|*-??Rh1(O}^=*5Ry?s-l^CvfTesVR1tjp8R-wp+9jm8or2_j~cO&FP3@XbQ02~ z890>|I*i?UEp|dyEzyJVvS@~@*{*vC&%wOs?7>~%^O54Y{;4<271<9} z&&e__98dZA{bAV$vl3Z~s&nw;;1@}XaEsXUkRF$qN2Z3RojoU^&j2xdI+>U7=&=FI z(4ds48qQF7KsD0vW@qfM0Kj_qbJS1 z_*ze!`%bw3%pF=GhxG{;z}Y)XFw|x6{uixL2>VbQn%Ts-adiHJ9n=Q036 z@{pSwK`3V`u?&C2C$5qBX}sm1?)hTs^nRm|Q*IOh(Zm1I0w7O0Lc1x-fHksE>WWXg z;vPX@2ZrkyqnujAhaW!m2gj#$@RC94B&8GsZ6&*jh0)pTt)cPtFOv8w#it<*8-r1z zq;+fygQXjG??%d8V+31#<`c41HZ|z(cOckf)(Y7d>P(DD`F(ia=zka<77I`NPM#yCX^d<#; z2@UE^dZPhx$(r!M+!lVf8KRmOon_z(7u`}DfazHuZH2sLB_U7HRCMB~$SDS>pi8Iw z5o+rY%C3o@qE%X_S=4k}LV?4W@_u<;66y7c7UUNbUH@V6Q#2+wE>UOLtHG>Gm)^-c zYAin%x9_dPlLik0hb$boYLidu!^e%10(f8d7d?X{}^$bcedqmF=jL=*rojopofX_c);MvNK z^iE{r^J&EZ@*ozSB(`Ml$nCJrqE|bGrkpLl^{fpi2!K|g(072A0{my9zcK(&W!FN^ z3p+u=l#{GSBtN2~qR@t>V#LR>ARkTyltWw$UTBy(u6SL%{CJ9HTvnhvscae18xFMg zE^?e^M~Lvh2wyT+ZDcZb@A<1Y0znLJHEE*%>`hMiUiM#&TZtc8#-T)4_sGEPFKI2d zw@Kr_LU}O>iAfv3nT#SrJC(c=jvAByQ5W>I)FRl#rr#RSk1I3EDEuq;E5?nPYh{)y z4|^jbeOun(Drq+jnE^oD?Ven{u%v-cEO8HU81sFB0VBQBNYsDG1|8U2D=M*3U>n40 zQd()Nu<6Y-13MtTNv9C6;?tFj+Mv?`33v-1GJpR$Ol7!bfb+8i*i(r7IB_SJH#qG_ z|BG2>$zG?JBHOv!)_?FRWqJCO58LBchq!Ak{=K>cJfiuJV`nX0iFXI M-JO|pAI zQYP0Cq7;6Ia9H;RTEbi})cY@tk78+T@YUvZ#HhOVg0-SC6)No?eZ(Go?lFOf@QV3C z)fkzI3NLpN%fri>(z$jK7b=f==*s>XF%&M~7DS-`BROf%nKVE}NSQaj<#>K+1LEyv zTry(`j*m}3k5$Fc>XiE1J-N8}UxvR2l0Cgtmy0;egvW5qmkxZWeZ@?$K#;rGLJ_S1 zMk)q z{trZvmsL(WSTvY8s4KEVnADw|!9@utK0@h&D}fUFRYklv8eUGyA8-;7ceBPL1I<20 zA-=q|)v>~>bVf@Cwh%!6@q+n@5zrBrNXu~wvmR=Z(yeJ*+M zW_Oi7fuy>szS~|35Xkf2cz-wNzUh9Sb^INjF-L+U__Jvhj|w4z-i&@sqFq#Dbe||H zx+E8|6E3isoBvOd)(q=!#*f@q{{Szc#wpRz&i706_ z8gm`&JnrGqh?{UGg-_;H92EN_I4>EJi5EFEq2L18BksSM+12yQRBX%rJ5_|Jvt>Nz z*+#-{qC!c*0sc8`@xj=enYz+Qx&nTU(vg;g0s)6V8+Wil1%9)Q5jQkum)*xLbmnO~ zWLbjH3$ZjABX%3T&SRsZfK43ArYq(O4P0aCMi? z#M^2d)MuS-ohNi2c;e)jiB}4h*ms&zFTb=>Fgk7+T|`k`e4$0{oQ<&kFg8b!^NSiW zEXSRzv`NCHs1ikjNpjs1xFyE^Pv%X3xIV9nG$>M#{#buSodAy2$f2+Mn1GID!{8Y| zU9)AdFJ&kIv5Q8>`aIuglFZ{3r;|`9gRS--}%Lx4Bhb;&c2XbsF>F zDlBYS)JZh{HqclZ7%GcZoiLrfc2@NF@58ao?&vf(zifq2#^;$ zKHj0SB;SN1i1R0W$AR{bknb~@2FwBNZZ6Od;4aB=VH}jXC19}BbI)~BH#JOx zV@eux$C8xfJP?cznOf}$G9nS50dqgEQjA=sxSJD&lx=VI3P!sqbshTOuTrgU5%6FO4Pj;7whVo}@ z-&*R0OHUnmgWSihwW#@{6U2|W;{}GqNVdNP;f>9Wo!t3Pg&!fiC+3Klb#b^Dpef=KWF87^WxNFiOq%+c@I{X^bnrWLRkI@rS#va z6pF2{0+%5N92l^%JGsQ~@q)#- zLL2wnF`rbJwY4$}?T5k)FQjM0!y10;n{d2jg)waZHD>}F1M6`8IDv{o{nkmIMhZJM z4ad=3=vZ->D;k-0vZ<^QS_TF!#|;9LqgIOdaGA)WI9k0v2V=fcJ#F}WYtv!;vv6OudJg$fiJoRr zfH#0AbD`ps`x5#6&-gMCDKF9QC`usb@j~FwT!^U_OxfPq%i)K#w;QiwDhjg)C@b&WwV}M}8oOgkxM{+rq6f%IB z5Ah{rJxKW`WhVl6`eIF&cgww?{59$=tpM4s*8xP8Bqag-E=*VyHq`T1rNSgd?cE!; z&$3Ot8!QN@z>rwSPYU{VU(pplqrv%?`M4wEBPQdh;^?kh>)kD}h_{)*jO3naxp|kS z+KpzMj@|3XEsmxhKzmMnn{lEo%`0Ro{J!vp{G*2wN9Q-&Jj{Smkb#a`ef9%bxUZ2! zT5vKkp8_5))JPd3cc|MO;sJkv>FVO{#v=D+%%cb$)6Zv~Ci-F8pWO^h#*3&>kaF|d zSA2!rVdIsR-4x9rQQzwuKZw7nB_W(zI5IoglQ(|M`whgPbPLWYQumBW`!(-RC3@n2 z@VuBtWrn6KCQ~9~leM7;#O)^1EaVD1sSxT?(&d!kf36i$ZO0D2=hgVsGPvdUg$^Ei z8v+=R^{XWb6Bp)GNo5*QJ6>W1l$KX}^ow%5#8%X)<4J7M%S@z(vZqY=+32bWCK$A= zHO`k+x3Fi?Yw1*zQIq>=KH-j+vSjeYs3z0tB<6k^%}b^UsQ|3)i?l20vY& zn3-b)S>OwqmEL>Xr4)2Qew`g88RQPN9oh($8@Bfj`7;b zk!X14xa?UkzaPx@c^$GAf*)h>!N?z%7oPf!IWC>AH2d=d@dmxgg`j{9$i|4cF5qAG z79m#Y*V%r1EcSPFTVmu1P71a1SN*~+Dp)gj6@e()a>XGDx^b$KgJYWciqs8;jJNOa z^iAt)w|{uN>36KQxXlvak+7|5(At(&JIA;-+ z2S!C3Qko&p@p7>%wKE|=GW8}(PyY<$qHQL2c5a0d5<*OB8CLAPo{;o?LhiRIn;bDz z@{pir!NcEHWFJJ%EKNDrbiB)*SKK{Adh(qo`|+eH(dJJzuPDi$4a2$Ubg>Y|DCHY+ z`8YY#>72xuJh2|Y+x0m3V{P7}j5iB)Mz`w5BFTEqFt;*@)2&=OES+V-nJtnlB|RT} zr5ZXtO(g_>Y%Ft4%r5`wrN@!|yc-qWpAa>|BUn(5fq$McFpI&z^wS=0sFC4)@5QPO;I>vaJ+NSsT-sOqO}o4Xn(BHNIgRg{Z#G2^(HliKhB_BQux}q}a49DMWa)Db ze9ay;{E&RhOrX|4d56Vyb&U_=Wu}RaHM0*jAO(_9Jn>>oE;So@i8SM6hI^6K@TAJZ zn}@6GO@t$mB7QlOH4EQ*XZA)o^;qdQRq;LHOX26)w=&MwU%?e|gPE648rvuU5vo-$x=WALxh4r*sn^fGzQFI|GZr5Q^o?ne2}1K6$tF?6Wktc7{%x{Rnb&8A#*igabpN>lCvopY0Y;yWd(a%2V;3mk;j60H$!*`wyC$q!Ym z(+U!mKgvGQj#_fsIMGC7?&gyM<_{Gd+1Qd8w^Fm`$jsRvdj|+~Qa{$PlphebilX zmGQ`z3XHxhCl#02EHDJyT3#^o3zOS#(!^hLZBS5XF%lJ^C9a>^XIJAle{`EMtH@6^ zJW=-AepYj+C^4<_&WoR(q9hCIh^Tx+o8ohkJ^>CAfP(2@M(~Pt5}n1`g5OQrJiPQ` zy2YxGanr%N+PE%jOF*X}zmf+EzlG!q?m`ee;3@Nhd+ejmWkRu6TA-|1q{*vun1IZzLcEiYaV$WqLpn9_iB z_qHLHb4HbUm3sO5JMYenHOvA|epYXzS&$iq=_VE&Xb^#Z>M|UY9-fcvKU@=7E0n#a zX!wPC;{q9%^Azso+QvC#AL>D7@BrOybD^=TGUwYw*vPUr6HOLD(F<=mZ7;<-f+#`> z!0koj-e{M+r9$~FKtR7wbk(J#!*beSW4qHWet_BO*K$_Vd~#;FH&QHB>X@h&8wlYT zd{)6r*zll$Q*#6J(F}K=o37*Ph6!#I^-sIn54_36u`BmC+9nrObT{JB$ zDQ40HBX+8`3)El{>v}1q#}^l_wnq&i+fkm?iRae0uHg;bCEmEUB_v5vDZJ7?rtr7}CIC|x&`)bco~#ZA$`rTEbstfqXzI6OSA!nHuWFClWkHi$>}deKF(nP@h}D=pxvvc`UrTgH)9fm}StP$Lix$iVT2; z@Yl6IKpjI0E$q(f*PtgW+R0AKz}f6^W+oS>q8w#V1e2#g(@k%#j@Xbd(>GBe#OtoD zf%)!mF|#?yB<5ecv9Q{z<7we;A;{vF@;r>LQK1Fa2BDWBg0`h|12T}1KF4(leccKq zfbhk}9j4#+ih^bwIu*EoUF2?aA*uAr@tMjlujLOnfOw|L5__odD;=BiJ1j~7mP}6$OLqA&f11}J;(`Jl$?y98 z3lG4je?N~-1;C=8B(g{vjO20~kAmdom? zk40|%VZ~&o^}c6An5O_j=^BA3*Ms#LO!_v`?gFz;bmp;dks&-FxfeO-e#)8pnF}83 z4bulrevyR-;KyD!{_vSL5{ig88d-1MB-^qhT&;s7R2=rbGOKt0yB8z1=70M6IRRGH z^Wd3V3uuj#qw|f+3NJ`TaMQOMO8O9Om2szt72jU|MGhCt`M1~?*o4Jf*4N>=DECWl zarm8snzbQ7$!27lUuq6?P!3r z42*21-t_3!Z9RWCR<$Xr{k+JGm2rv9Eqb;Xu&8{zEwL>c|B87@v}98(=5sr|M1c>G z-Z4!9z)vE%Ck{kf1(Cm1-PNM#MsMsx_(AbdnmS!@@kvX5Eeqpl zD)p|L@O7Lf0JqQIecJ}qT`R$n53`f5`1GJ3|JsNS-Xx^Ku$V6%v=xKHgFI_o$Z?o0 zz~j4-CG3CX2fdSei(>DfxAr;i3B++T)>$xB3)nlNA060pwpHb7#ruy8L@M3Wx1)Ht z{$C9^+b5o@N7omd_T0-erqE>3&iIljM5~MrR&*W8+y-~!RWnrQ`z;_)awXi~YV{v` zcyD?t0r!YzzA1LIrX&+WhGh90%C#sef(P%LKWsLi9#7UaCM1*9zP$R7rmao2pl7!L z5TeOeeoQ|P|F13gU@fwMu2lwF=85UUy2a}v5FZLipAs7=(yyN09_rNwiYby|HioKq za~$KqEtTLf!05{nwjQ{;)kh^mkEmKR6rQHS$m7ongzByDN1_ZC*+9X*%@@Bb+f%9qTss zwGnr%ISo6Eon~{#I0F5;O&54LssQbX8TlnY%%~+bo}|G#Y3G|k0krfCH6Os>C;Dx( z2cO1lLE+p^XvzM#asEr9$g^&I^Zv_PR>#S_;|K!r7eRegoAzU~;kSo#$Aj+_-TnW& zezRsskLWCIDNXCn0YYy7{<%^eY^t#o>Aqzk&nqb&fiB?LI0UO50?a!hiZr804m{Di z3BeBXh86Fgo4@_Giv1ol?70hpSsm0tVfDN1KCzKpvb*E;q_1Gy#fd|>ei`vccA8<-~ z#AzmbL*9`wgX}Q%v9v|Bp->J+pmGMmaRFUgm=57$F2wdGb*q*8z1*?Hk(CAj^%|ZX zmKVhl`?|0z1+WBVn{^Vuxt_}e_VANA4traNy7;%w+{fOv#Fqdx1 z#G*_TSto(UK{Fm-UGSkJGWCBJDgI3FolNJf(j^eL6nJ~|{R4S(7DY@shCAxgu#^r& zyeCzU)TDGLG+(MUf_J;x-=v3wbml42m8XX!#6pZpjQ1mw-F_S?(B?&{Je|5UDk`v? zM)>$v9bEp2TZEoS={~(ud_l=uJ=RY>kK^nAi^^u%nE4Yliu*qzVI;qwqpBa1ZLpuC z_#cZ*g|L6TRBHX03Y2|8YU!xe%mMo>l7b{!<$7!cEh4Q`%i$m}e$e%sos#@#rBOR6t$@NqEdxP&t*kVH+764NVJAN?OZ4T=b6Yn4acWulu|-V(%PD4_Ai{7M$SQ( zT1{5C$_aWAIea7SrwRe|H;uxZ!e6s!xB`sug&<&D$6QT7bpR`xP>a|d+ymPG-H~TV zN+vhhOZfroaOe4@rNzm#=+UDv^a=BTZS-8974#YZ0yFkB0*H*jB#BE>C{g4!XrH%J z=Ni$d6ybA_DEiHUeO6cL#QxK#`au0i$73fGVBPy&p&iGZ;x*lAuPW-F(+mM@BSp2; zB2$Y8a>v@Di7B(2i|yogc_TrQO0|3z7ADHc79j7l zZR6Tt5NxX5d)3DXfGuP_1+tE6kiKl807?K0UgZ!VIN-<-d{jZ- znPXR3iy)1oGOuFA`Yks;$%QYmT3f>|_YV%F6|{*2trUDY)8TyGEf2`$zGCA>eN@Ds zs0LHMO>PT@soiMUmOyvqhu=zUl!qS$x?p?dayirjE_TaszSc1Kb^ z>g1~M@nXpT`rX@+Vh{f7ihRno(H+;{eGs>+-vS%Pel_@uVGg570Vyt(u{pk}Li&uo zSZ=f)+uS~jN%ypd2Y-RyiT^r}RvXyb2ty!w>uz;6RsaG_w;hKf2;cije?D37%DDIr zK22Y?&bk)2bUj!W%ZoiZ*tnJZOXFk?J@`3=2e{g5zL@*o?ISWGDkAjL`6IM==(+H^ z`vh?-4-X{F0u|?;FSL|ZC$)W@PX!O2(~38oq9a4=-QyhH$b+K=QO zaM;eu3R$g@QoQjPgnPD|?LPN6*`Bbig!ezgWNt@v-j{;=--WzaYy6o8%4Hw=* z(sO>q`}H@1&4>AVQpxqRc3W;u{zj{O-EKrj z_gOV}Etdv!1>KHmb&2&`g{{`vy!JfFgDta91Y!eIx8^{--)Ydw^94SY*-a_MiBhy; ztOwByGe!EX3!iRg_kj)?h}fTZi~Kd=YZQg)jiY3*eKZK?Z z6I9|k`ZE9F1_wTGF@Rv?DJZ*i14J@7e!U2Hce-uPUOh~}@}Xy4OAMv^+#k=?)cVe3 zm}~XR8E??f=z}h{iKowuCc~vN;4V&4(%tpx=C`5PPE{1ap$qb?_Survf$irK1k8iZ zhWNla0m;s|>zIUmq)p3T!mr8E45TAAvvr$j22qhGZR2k*lkgU0%_tn^HWWZsll$m< ze@fmRy#JMI>$6wJa5SQUQ3V(K5%hI$UWb9qW3jbNYLa_@iNp9Eh#syg-Vpo#L>@MO zfMouEdZRxQb> zT(HGJqVQSAA-cc5z-e%5R^ngZToUtE&9PmUmYfa53?xCETg{w7{78b&S;ij#z9d_m z%Z<)gq!$Fr@RSWIEMgm{Z4aqclF)W*chl4S2ZBO>_K84BiEg2<3xN*+GakUR>{Mtb zde6-xr3gRBH|j0s+%_1#lu&NQzSO zmdK-n&2Y;=;w-|!n-w&lDM0XAiku!4MP#=-kv1;B{lBn%iFFPw7wa;ROk_zv$6xzn zH1{!R0JpeRyPE9Xdgj^1{*?%KQS_I7|It2`m)Vr}2$r>L`*m+tg28pX|A{eO1tvC+ zfx@EVnnupnh2oU4v4l~z^~sa#>3>0NX)LBS$0pGJmTnr3sw5S$>}!hW5r|J&d+p?nDbX$ z>mHI5`xwegxg|hQ4srBKKk`jSfo&>^Z9-N<#3eSiiLxfYN&V9AKNqFS_+oe%Wo`2A6*ElXF@1NWUZ=sG#GT-=&Sa+E5~gG z9`aga3B4~H_p0S|)Cr*AFr6lB_O24X6>&rau>hUoVY846M6>aNR9Dh>*oBC4o0yny z;R?Z_O~*G+ZTcX@dR;i(V0pnOMQJeoSlZBA@F)_Ff2!;8yT8SEUPOMYZ&sJ`B3$!ZAhNO;wcht7A*{f%pC z;7Psse-Z&wSYN_474svxgzpgu@<3dU>p#G#BL-L7Evl7Y)!Wi(No5{P zj7>WH3mUtLUXb(&?r#EU|0w%wGn8;rlUaW~gCd?8ANJ04Xoe3}t zw5o=<_-Hif{?=$(2uL_j7%k=3F-g5!`E4;e7=Gb*nkpaMcZ?#49svnEpUl}r&?*fyxFja}7Is}IB zTi11PSGz2(d^i7_3FP;zc*IZ<)c{F-#Af`r-nJj@j0>A2-0aj>EaKH;FnC%ucnnV z&o(!jjPQ}K;AGJWjD1J;7HQ?d2_81wA^rCO1`H z0a{=S519{~f7RH^%|fP_x9?ny2bmdI97aS@&+;ZB1VvTf@3FY`P1+0s!xl$(d@eEA zjD82g{v|Sv#P4?Cv{5J#v%oEW_|2g8@8@csNqN1|Mu~rg55uR)i9Qb>Q;*g3CI1Dy zj~lQtBUg9434hbvJ2PGUb^VtEXV=r)^ca`R!0w8$lJ#q+MeNTO;71y}C%ln6N2K6a zLOd!cotQ2Ke_}C`IT5_d^U24wju%hcr;CuU@`vRHVe1R?%s#mxqq2hh8j7K>V!vh| zP?lD^@VY&yngOWyN46Pxbmt#Vh%vGy{{vN$ANINw)|CTHo;+Yn8c%-QM6oBCe!upY zl-g+0d-C#5c6uk;76mbMC;?6piQsu}rnaa5_ag+K1>obV7>tZgecH!Wq4P>7! zMr2w{L_eQ@OCaq~>uF`{eyYtD?V)*aew<~_ed-dX=wbOFNLYq_k+j>vQ{NIz0j%a@ zf8^Rr`5e9QiI|fuPzO>T{y=~sXOayMy%wDU0nT9;L0{iQ1t>2As(cFJMhTW=OsN`_ z1F{Zo#E#x|5b_KsTc4G$kYPOLDIo~5E4LhbyyewP$G_Cpb7PZXw@zYq?)04s6Yb5O zKi5+;l}#!BNRQ!Pk4nivpmbMo!b7zNd;$~?%VQ-XOn3*e>|RQcSJ-^@j-mmNx0^J*qdWoh+hOU zF4P9Go!_t5q+gePBLY^ok|y%*a8KGkI;Vji|LwDLRdbmzKC&%^@w85N&9ba*vHzTm zBd9UdLcM>u*sWSX{VaIORnV3d^OhnL8{3TQ=mHEbQ%fD_qrk7U52rJxzXAF4sjFhG z2Akgdr_X(InBvKxdYfHk-9}zT9Yk*qIH?jEmsuJ#yjh+l>{n>mR=-bIg4J=EZnXF* znY?9jkC)$49}OEMWD&D$@hkfbchH+^9e3q#VH+(#k4!Bkp%gO>MJZsF*_?v59B;E( z$zZ*5BAUXl+JmFO@@!~8`J%v-JG1nVV1xyyOYa^qDU)L1Ct$l#T7DMg*U>u(W} z8azK+5&$JoeV$=6i%MenBa0szh-G+HkdLzKtY9#Zu(KaOd>MD}_ro7iNxGmjuiVw8 z_!8ThenBkEd(^{4GmH=+W?xzZ_u?apu~)))T)F`45KulLIBR}Wkis)>bb5iF%!4{p zwlY2Djmk{G?&(kakN9S-fCR?gP>Rc83Wlqb2LK~uOMg>46359*zBa+-eX2eX=u9C2 zmw*5HZ{N|wNcUhQ9=A6~K61GK!%cV9c=sd2Ehz>W@UZ;fNQrZ@=a>ui3i4D0d!J6E*NTxi-6l9gwd&DmbMY%^GkxNkMAMcm5|f0wbKlQf4I^@^HMg+Yli zbsBveSCCtE+j^{thm~OD;N2CC(@D;}c?NQbXA!I#K8W$Uv6#q(R>yxe{-Dweb2%t8 zYNVLxJ8bGoOczKzhI-Iy3r}T5BCD+TwbSMZQVjuCS6m2T>S_hm64S7%wY-1*4so5h zRRB|GmcpzD^6Z$=ZVrxzaEQR6c7aXmXDiLz%ih7YKa8TE2t|f`QD&Tnvb6X$n;KFD zw0^gajbxUgc6-`^d%h*UIta*h9)@|pdcBIx+{&;i)mm|eVL-d$!yNi~{<|`c4*zx* zlPan_`4WSAX_de6i?jqU>T@(0mb)*eMCfPEC?5AKP=d_0tXo`1Gi--6HwjPCVFnB! zaSB#$OwPzty?KrPZ%lod(Rv+Z0=173iJ=unI+PB2z-r{RmodI3B8PSo@7{7~fBn34 z@vJV%W|zSC0ybR zv)1rF2{2}xz*>a-w~3I`4~Mdt>I~usvEx73LGs7!@W-7lkbb|qp#5;F(=BkkK#L5l zE^+uqG7QSBJZE+zZlVt+WNzpKWQR{;%Q=@a4aex4yZmUN4#bG>`AY?j#2LuRcE4Ni z%kv7vV$X^P{lFDy`5gJ8Dli9-Jj|At+Sf%`8BJ-vka~=x6#tenSavUT^)!$Z=)~0% zf@6ydHs8ao$7$!)ttdKxkRrdmB!5cQ?HwvOR~5*DY;T(Eu3+P|Y^L0Y>p)EZOA#4x zPRKjs)w5O@aF~BISyM#cS<$CR9?$!kFNx#XC^i*$H}fr13huc!irv`D?MUw)5~ai} zzctAke|3shRpT^e+`xJ&c$$82y{W2_hCV_~#V`1ROqqnqS9nv^pK_!mHhZ!7wf}39 z3**U~zfz|8>Q7W>a{NypNj(^&itT{asac>YETJR`v-Ed7J7b1-l=qvucC0FBDPv z438-vj-~7)Tm1`jUR77gSCk#D>S#gqhbLuh9v8kfXp#0G%eMkad;Mn>l5uLBzx}Sq zb3;TLoAbO;>gfw41In3kL8OBM1$AWbmE0jMvze-rMXf_2G@r{Mq0R$7rxrQ{E&X4% zHiiP4kq1{gI;=3SLc7AE7r9}=^R~a`#8Y%vPq)lOfv(zOdTKYRJ~e?eo3J-V z`;6iU?x~~!CV!Op*5}3&#q(pvWwZ4_aopU#(40R`;L}`Fmq*s~MQd({ts1u}i`VU* zPqtF|$Y(l$t=Aim8l*xEmDj@I=(DTfO+_%=1C7%=434hjgYVJxSL7b+?TBXqE0>|K zq&k#6_pt_V!@Tm&sK#Nm7p97vFe5&$8JNS_9q!i6m;nuePdP#m{}seoD^bb!ZuGv(EtT2WxJn^PE#%E6g^y)?n5RFA z5-rzD5`MepP4(=+42}4RC5{t&oG~7ZzH3;yvo4Pes3Dmp;~ryxs%FBATU9FT+&1hz zz5GuZpCuTI#P=w{^gAgvv;tmy@y1kc(+rdG?)R#(TWFI;$L7-8s;?ha|BTe@4M zI|Zb>hVDi{x&;MMX%QHtr9)D>yBmfx`2NoKoqxHwVDJ5`xYvEJwI0CF{F5O6_k7ZzI@nTPULDeCx(r3Sz# zhV`T1<);i^k{R=B!Ede^JWER%fXVG4SyP+Gl0pF2$YVJ@Bm?A+(iLf<@Q`ekYJ|>S z{!u?4Uho7nN}fo|pF)S&&ru)y_{sc4IWK`vZv)`U5p|^c{bsX@GA(<(Rz3Lrvaxi| zoWPEIT2iC8^@Q(?8&|Q#b5ATf`xt8gPHvx!DmsxS61V}?Rbv09<{#beAWNM}Jx=Tk z^$DoPUW*X=C+VVsyV5!`I=*!GF_VY`^-hyIC}_6?hwCXn$-%7B+N=n2GfjW`Uiqg3 zC>w|o*~QUyFgTfVMc#15TQd~Ib7}at={UjlVJjeqs^-IK7 zWECx}CbITmW_JpE@Wu$Iz|P*=EuZ1{R`Uk_XA(sCDEaL1$58}%TxYfgYssI9w?-C@ zt^vS=YvsS0#@$XykDsDm(NS!^+j|6B82Tchx}iQh9MPKr*nHg~92H8pjFX4VXl2a) z)@>|JNMCybi8tj~W=r_??e1YH?u_V@xEDeT7z$VGfnbcPA>zF*bs;>Lg@^Eu_eYuA zYvR^#?_~Md?{IYJ${)v03aSB6PNol!*u1+n!G<)@J+__%Yjp9+yOV3G&Fg|4{~rjd z{g2C>dQWGmCE;LH?C9{%Mpl4V`KS9=Z(<}zspA4zxPn#Gh#2l|@!$@M%F>V3J@oeW zq3k9NWUKU~7K}5O|0#~yn~+cA7f6QrCom;9Q6=LJ+hP7-?&UV8bDs(MmKj;P+0)^uGFXTqdQ$;N^qct zn+`|PqwmUzDdXeZ(E!KtfYr>lr-P~hD2r_7VhQ2x7nX+%?{Goi=*)Rp>#4Zw#+{{F zIsEj;N58gbr0}h@)Ad3z72(XFL>D|n8hJWPX=wnXK}t$+c<{*_A$RO5@fMq43>);= ziVlK*+Qz={ncv>t)sb#wE#eCpXg918UcqYqIDbVa>_=vvC?WuSKOD?rBaV<9lJx!bS{jf37GRbHM`v({-+Nk?av-j zf_CZRVrdBJc3O#oon*Asp)mvJG?lYk$t1abHCte!v z&i!=G0gim+Kvi+#qU(%N(N2BT69?{or>WbpvN{va$e5j)=irIkbCG!8Sm6ECmQ(lw zylh?chO3#I^x@Mdz{pdF1EmbCO=6gkEWn>IU2TLQO0*Ig=+ce%QVNrk-)6Dhhi~(7 z4IQDc++i%w{H8xyeM{Nl?h3vHA^d+|k`HU1BBjMDAiZ=e^|+l-v6JLMnZ363c0eM9 zRx+xdUH(GkSXksF`kcsv63LS>`;_xzu;8PmH9+XS=9UzSc8M;&M$2R=nkOQKcm7M^ zn2>0N563md#mCr+JSNj5i+nmzq7?{K(uK9x`Fbrn3-s`#6?m;fB#EDGn~|k7n>4_S z-JNsMRf8);=*#!zQLuSZ3E+wP7=PKmJ(;%FkPe*dLGd!g0%~HpAVku4 zEO-Bv73=5`QzYLW8(~k_Zl1 z?q~Z|!@XOBzsFp=>0xn=6CZRB!#!pJv`hd_N0x%bN7wMuM#LZ9XDpJx{p?D!Vmjbb zs#F#{tnC40S52J7+o{(fHzC~(4=H|GG@5Y*Soj@eO8}t(q))Y|SIw;j zAsZ5iC;LhWrVTb!dt-c@F88U>t@-5o)4|F@@6%S{7><>c6!8Mo)XSQDvfYBs0!hs!mAh5ls0>5-{GI`*QH{ zgQ2eW#}eaLRXSuc|6j8T;%O-$%XB1+%%!f*BKboF7UE#q0_&3d)egtkjB@t`D?_;` zIoKl~YY2-&-xSN3Qfa*L|3DwB94Nb>$g`uqZvXA|p3v4EZ?d6p1Y!j8yFq?Xg+!7_ z=NrXwF(IXPh4I`!z_!shKBY-#WfjYuWz~0;i3ohY-Y_gp08!}3&F$|{sTEi98|Q6Y zKdG5eNn5jF7qXA2GZ}B3taB{yGi@{&%ty{&rN z^)rRlFDO`6E2e+gmr*uQ0W(hGfFV?zk4t?!rpMP*>42?ieYZu+a|vc(xwndd1y=rW z6g2WWZC!^BZu+}{m;_1LG9WQesponZ(L^?qhE?82>4o!To@QkyeNVFf_;0|CYerC| zE8N|FP3@GeIu7+tCU2fKuyh=|RARIY0pcqXKWKYZ4{sTHW*q6MfsrF785aV>Q0d-=UXFPNv=alt7hVk{1|{X@A=qZm^TU>;^Y z`MLo~CjgLOW%1#;(?kuvkrA?!V>zajA6gyC`YyzGN5uWzDLOSu7T3%;?j`W%wYVBw zRXbjt>B0!!>tOu4d$w?Fr!^z&RQBc79e*4YTk{>^V}vqi4Mw4gnm&nSgOKUXZveXV z?Rvtht5b7zn~k(_Wo^;fD;gLYaMMLj+9HGoOH8eK&5y^u{a;gN2!%8t+ehGVhV^>w zkOh=_;Z!QuTwL^jcL7i#L5WFjwwj_AWB>+Xm5-95dg>tbTAc*qt-EO;eS}v z79u0wl6yGIdb_vJ1KKUYy$m}pc=38t{9S4oDXgs1__Ill7rn`6PRe-PU}SS13abKZ zfF_ntweFkGw>qQE81xFcTfPT-!8cdI4JVScRR+8(5oYrw8ris9X?f!#I`1yR7)dut5?;=4;KQ2-rE3rV zLr{TI%U^cd`~K$}FJxhI7wD_MF7>!ijrQdi2YRetu+P492VassS+MuR?fmO#K%SbZ z`w*j>??R>nz6$yVakpP-tpmmMGrQ6cgpvW3XRa;3dXKCA73O+qAryS+3;YT zDFq#lXtre>-ixYxow=!RVz*Oju$zl>zZ1V5W${C&D5&-y5`0YuIC){0<#hV|gYCcS zM>SXogPpg>FkVQxs{dK6PJsaLQHbG3-|NVKzn??{zzm7{b;5(hd3wV%pzOmg`Z$1U zwgMToO9-;^Kc^n@t0I>`@H#J^c%8}YtE+snkUL@0K{(TDxF$w?y_1@K7mg*v|FE)? z9Duc)_~VFu_`4rh%gC7v3D_q4O#et(mEBxt8>p>7- znhiXExj0$TFaE+Fqifo;d(OD@R3~%mt50^(2Sy54N79l@4FV89noXXT2B5IgMoDr( zn58I@7Fr)=F)H+5de8|4+#f9nO7|qwFtR;cX+O3tw&W?UU$atbP>%?eFE9XqJwi}J z_~2`$;wV3H;`s(^-d|qVF#@$ef{ zu?sANoov!%jFfd_zMWTevJJ;y)t`8sg16d7>sN=)fC-m8EdNR^ActF zgI^>b$xt=10$+Uc7N!-ZC!Z+HD=*pQEwi_uN?nP$2Ti_Jtd-%{kNaOo*lTKN5H2bB zAZ9H5l!+$_t*F9r2SpxQrT6xRS(?n8b*u{e`?%0=*vgbb${!5{AvwyP4`~P=#sUN9 z9u|D4YfN?=EL2I50{&=pZMXV-Zc~ACKOFs@#^ZhgT&>U7g#v!HhX-CjOqJ08zEQQK zPI)-5#_#+X*iT51zN+tti#Xz$;@=hPntll;<4Wx+yaL9e3zggUhGaEed9A7Z>Y8_? zg7i<$iXAbRhmY#!tJ#0B6A`cfbc7_s1R& ztj$UlHcQj1{FT^*JPY`%Tlg;szWt<@4dpkIgAG%8qlDL zEM$m!HH4ZPR}q77NlgQ$Xf*DnIwr`~#D)Em2FIR+1=#kf#>eYD!?YH_D6a8G1~xQb z3)vtmXs1XDEHm7lmtU!UBel~^dfa>)6Jl^`pITH>9Yt=`#Uxm<)oa)rkpeiN+O-8F?_XhI9;f3HWOZD}w zr}zs!>H@zh3y9n9RB6^FPx`fg*gET1pyv`}qfi-bW(u9kEL{4t{rK2IoE@=YtsdGA z!TuGjKDEuvDy9<)(}>@(7gr4a_};i}_-t-Ep^l^V9sfRdl$ceVlJeOgf8tY;FnRVz z9G0^_v_4&LdX=^=tn7#RT+Fa~Do!!m)4^9*cZ)agyRK7z)CV$(H&&55jf`4=_%4}F z@=hEs(q6vstVpp(&nKj9<_j)l^C{qx0 z7Y@ya|5{oWU-`PM=gblx70Oa?UB3wfP?XDdGj!DsMhOG`H=GI z%~BS1Vsqgad!^fD7U3O?U)7`U%txN8E<}8%3QZiUD!#50Y$(C@*{YIR7!~)ElB^4mb9IP}0 zVriV|DXHD?J!@ch>A#*(bQ+v~Opo3SN^2{X6sN3BA9&GVk;EwdbGU5sF%nEnF2|-w zU6v$W-Iqr0jK|Fl?F#2IC_oI-#ZNhRbI@>5?JnlY<-nqW5t&IM+Ros80ke3U|Stge7pN<7ET*oo~vA$C80u`#+$mLRQeGyU?dIfYz z6Azj$!Cqsc2>Fc*n^(IodEHz6JMIJ{QwHz@$Zl@Ze*Zw?6cYSB@I*_qIp#Pib>ivO z*^&@en9)1xE@%ce3*AYMZ1!X49=)9wH4v`wJKZ{SbsPJ_Wb0JjQc+Z&mXYhkr=<;g z3g%A)jVuoI7uW7$rmCZN76Z`(o6pdc3rVk87L{chjf5cEfn9ehoe*e0nq$Qb9n|A` z0}SAj=_n|B^!tIrNc;Llucr29o&zF%=g3GdCRHyv^%H0*uYL|&{=LV^xcmm14 z2AyF(xQLjz6U{FDB0t6#KJH-9Re(1Z9^}TlJ*Yx*CyNNh$JB`M?t?=ru>u)P1jIXI zaE;3dbJHd|EDO(gAqy3+S3Vs>m}DVCe0aCewYdn|YSNZ*mUJ8kw^+=Wme%%ULbOi# z+pDd}ypD4r55<|5M+1pnNEKPgXoDpg_Rf!jj)5G^kP2{gz;l-0-C5tA@MEa#-v3MR zPXU+h+0whee=F?ACQCyi%BN0LIJG`h3}{z-)pyxud(?%g=B73Y~e8_=Uw_Ao12vlhz>eBlLT7_P{D^#Xc z?2u9v1MtQKChk3snOScmvlTRD=+FLIO5lq7c;-dXM~l3DYR=@Tr#Bmqro+|XuA)5Wjii@hdzEfP&akX2U zZw?usvgMFp&R%70NQK=?5`n#;FKwsjfx9kF*`MvWTa=ORC z&)qcC=4@CbFB@XU-E4aTjEM3711^JcfU5ZgK?h0|^;q2Ou1l2=9?un&4Y$$%qJlcL z1|xyRd&k@HnL&71f3TjKerP+h>dhR}_lt>I8frcFX1_|$4N^-s;e5pt|H_q5hd#x! zMDjL`!iH)=e}>or6W@wPIbkm?nrtkh$%m`eKP&&f_9zeo= zl{<9Wkk0ltyMBF6<+h3F*Z8-ZJQk?1NkzmAbn+$reZfZBEzzfVZBDI%)~6h)K~QTUVzK}NhC15uPchfD;SX(_03-Wm2{w?*D>Dyk zuk418F4e2`J>#cJeIx1&7r}Yz<27W8(tN3yE%i23AIu3N{PO4jZKC=tu6xRh*a)D1 zs;gb40>p`ZO6W)@f3DSeMsbII5#t@g`~ZrI=h78XMRIFH;@R&un>*ML)8j8oUJele zj}TM^O1Pw1p{&ID@F)@OJmHNmz05`lyXK3Ir3~gjV*4du113Q*G%Sxe9@L4`0qhOZ zAwc3IqVkrMq#kD|bDaAUw49msdor_q*Fp8!cK^9VDqgwJtY@P6dkAsd3p;gO_~XXX z^c=BQr6@0i2evl{SoT3gV}=v|gD~Mv|Ik8PJuyh?`|&j=PttOYnP|KJJ+>M7 zehs3+tlK@R!f;{1bLA+8!PE!dAnUyrDr8{HqEN|8&#zDqn-_mkJ-7{G##jxXlCL9wGr6q}fWN}(^L)0># za0`cuteltgw49RdIXsISs)uwh0B^Sfus?+B(Qfe6JKuiOeLYOM9+re>*BP<>$6w_k zL3g9y|G*TB=b$z_X7e$wf#ha1iRGE2EjD0gkX1zt{`p#)5B|zv0&1FH(9-)jq|?`s zzKaHRs*K^2koUC)U@nm>GR+K6^vBG=3J?U^!0X71*IXoy%nW1H?Ly5$nPFrX{^b~U zvCmijYHEAw?`~xIn_m%jcS`%{iGh|KrD6pen;3=T>3+3OckG^A;n_&TwxeA;lC=1S;%2Hdj$^Q>al=$#bm=>*MsNSSjN(S zbo(A~5bPUjUoJrtvX7J_>-+2R^ zVTL|?2t!H^%-KZH%n2cNmVsB-?OYZ9t|f@PP)xcN9P>n^-?QB~23*8BFEz;J&AHGL zT^OK!WhVo02N@voCRvJ{*&cd_FH(`mVyO@k+N|~{Ea;goxUlsYau6F@!0zPvxUqgr zinI*Xk=F-gNy$YPgP>07AjSp9&B~uT^2>@~6$la1(2rS}?dXL3g))XZ>%TH0?Pk&ed#?p-1xH7qt8JfWQGYn<*~3kLN8Yn z%&78P+h~D2hP}FM|0DhX4h4y;FG>!Jkw3vgL6{P!iBRJoAUsQ9273h|urrin9hl|E z>`HxpS|Lj!$;?*nJos3{Qog%wrWSgZQ#@f<%+~HH1g1@TMM@{svo^4C=q&>JLk9>% zrAy-aQ#K}_NEGVZJUNr0h#N}05{0~S!yt@gRj`(uG@T~@A>*6+GV!2jNIK-~umgAI z1=z07+p-&5p|+ijmBYgMI}(#!#EsNA@^9&eZ(ykUk21h=b#q529wa(5Gich!AN3JY zV=a;gai$b4{a|{^f9fyBO)ZIVAU3s|2f7%jCEom|x_Jc@6XLzguZaWsGviy14dC%l zoP-~^IQ*4jnNw99?F3@VloEM`a@7Krh3 zPRU4jwTy-aF>A~}^Chkg$JI+Hp;$mr$wA?kDLV;oBV+^5SCuyz|Hd85oK8;Z>-XkJ zW+_5T<~KxNSDp$YEG*#WnY45vu8KCP9ZK5cXY&jXf&=2-ZWL|+p8a44Vuk$v6B2#{ zs3LLUJmkWatX19eV&`o1Y=+o;xzWhIN)d)#J!BUPOWlBp`5gGMuM|uf&)hvfoDE0K zTHRMugcw4yC~RHnpT8buqwOxl@3TUd#809KKN-(+^=RiPe$EzG$X+F-rEEaUMaWR7 zOjUWXoLR~+H~BWX5g&!|4@%mCR>KQcW=g>x_nEWk;e-ovOBSK-39^Gv_BlVtfH;Uz znM4?z4oZzJX3=kn*aQjGWvB7lIC=ajpf>!a>uOtcI(+Y)-ETPEy7M-~kr44Nj)cLS z50W^AFS{lWzDrYl6W;c=S8R6N9uvQ36 zL4M@X5oL+yRb`falNblOoxmRGWeev-$0eLf#CoHb7P69AM!+l?ZiTEu^o>$Ne?JE+js zk?*UZ!~*Pq_ojp*HCqiG>_)VprfUeGdKV-4iu2GqzKBI;49hZ~4s?>=RKu(H{JoYU z9CCTmg!-`WZp1Xhl;Z>cf>&q+o}Z||^eObANIj_Zes7{_45AfK+iR-n7k1h%23s6`$#OwP>D4*GgV=Tv|s( z)F+%mI`V$}LZ+N~l{VmVv3?|aFn}4tRhGwdDSRO6@wyM8w!+@gz?col+36UHC3!b* zzP$-~$7}(24w9`)NNX3QFjLa;z@1$DQtKcaqa(*i8jkd8U_E8r2g~ktRw7KHpd2*w zO~>0+*B60_FEK*wTRt8Tje!@${6~CEQQ)|uTn|Tay)>8W_em@Oa@Gk>u5*oNAcJ2b zsy_@)Yf#QLtj5l3WPR1;ckBR3rm`&gy+KdCzYi6?N{bdVhbq~Mb8l=uetoM2ikFq3 zKmN!>OGg!xATn-_*wPZ}WtPDm)_H^JzSa}*u`03@uJ%$787BD>PaenrP}Bp8==%!^ zdj&{LfV@ojmFz_JYJ$HvACI4!s_$s5R^l8i3%FyP-V4Au5gx8ZToJ0Jo8Yh}n^YRI zgB>4i^bKm+K?b7}xMh(yBf?#_)R|<$v#_5R*BPd=V=m-|+fa8}!@o|?uc+}plC=^x+lL5>7aEMJ(V28fU_S$akf zwu)saA5!J=rnU$FQ84jBlQ;fBy4k|<2YmqP`#%*43km3B0S=TK<+$rmoT_JwbktE* z;X{b0YdIvX@-x9fI_inp=HZXsnnRa{2|v<-o~YM+3HMuWXnN`tm3KN<~rr=8zwZ8E%L>`(kW!c>E@Zo zu7d>OGv3W_B=I>de~YK>A~7zXG#(1x?ERq^Y8l4cCOqGrH<{_FVP2yo1ZSuM1$jqf zr)wA$8LpXvlUa_4c3BpG;{h08lHpAfyBnsAh>_e+EWPftC{JYscD>%1$j|>fenGM+ zrK*Z})C=C4b{ri(Eq z6R=Ez_z6lQpb3RdqD}%bI(5NcG(^PWd^H)>ECKm@$G8oQW`6dO5D@Vz)jsK1{8*sT zMc8HV@R?g%*y`BIpEGTeBe>%4_XCPAN3!*K4yUpoM!MHVGV@GZN1jZgz`+~tJ@7Oh zQ4@6H)abcTnZPK$wlsI(_h!GK9C@Z`_`dm;;))h(&PGA+{_qVT3FIP76W5KhKU?z< zKTit5y=pr#A$};N`?>^yG51yoL@6Zjc;b9QcxPC6Z}f<~Qjn;LXmF#b8X^FB5rph~eB-FnE__g?NkVaJTU;}?LUr|W6L()0 zn|7@l4Kf);!A$iqn62k6er~6vZ-uFKD+G}v7QG-cM*^Tgi2Ib~n9Ff#)qds;uW-YX z%k?~B3-uH0+t3z!54<7(UWAoD>($pa^?KwQ@F+UsJ6lyzbA!+2(X+Fo%-FxhS7VY8 zp89Ddw}lq@m*eVbbbiJGT=-mO^ZsQfA#!&5Mjf$v9LAM2HTC{lcY8_Ey`|8N<-132 z581oT1*#pB5YuwhIx;?OQ66uqUTFHnWXvj0%jF^VNUuBBoHt)`Wlg*)39626zoPO8 z%T}1tKcVwmDm3nquIZYMd(-J=w%FL~Fl@4w9r2!22NR^Dj7b0lT>mF1K8`~Pd< z6u`CJ9XQXoo^NzAB%>3**xz&-xD~xOIw=&xN5Z5V8i%HpEbEJW-NPzIaIWMaP>)&M zXZ~6P7LZx~tnTSLL?l!sG*B7zm;ymXgEr9Kk#nM7HdsK1`7}Wq73K7NyU%mP5wHql zl)IRPr4+oZn~w5fd#Y*}jcV|(`qQK6(&4lRgW+G&0p%FX>FGyNACUR#V*99HmO;l* zjZp&4=bxBGN8~5qgzU}v2bT-H4LHegT6C#*!UA+lejZ7GEjjw>qeZbMK%wYeJl|mb z|B$cQr?vFLEGcxGA{5eg0rPeadkMMcaP$^BX*^WK#!0FjilOm7t%ID@WmHVSNQe|D zq+qY;O?YLh)YiX`L`hqiwA2=?trqDV#0+PUsfy-tVwJ+cA4H`lQ+qDifhfjtpQ0k} zOsj@`;r87m(11$MBm6u`E0+ij%JnPrEj{K$F!A0hPt?d0*aZXIZiqEOe-lprO1t2@ zwI-~}%R@^M=tBRCZO&_u6**Tz!&}Ll;wZFn*L?OD05dP9i~gk>7WOP0Qh z-_7|LOd&awlw#@lB&Q-))=MI=(Yz+M;RC+C*hm-WuF6i$A^ENT@o=kP=K> z$JdH(buwI&$eRU$Epj+Mo^7pqi$+V{SG>3A7G%lblDi@L$ih8`&3>%G2hy1Gs4$W* zKDsv0QQ)k%coR&UFSdVILwC&e#QqbLNzj1}>aTqHeZOq%Gh0yCwS8yx{nywj~_^Mn~Ck9F*oRXB$ma%hauO17{ zWdLxdDb9^BmT>>{f6w4&au@Z)@YblY29`YaZ>j%Vr4^hK`#CM+>ixEgVMIXj$xgv; zALpMxh_yDBPxGQ5+G`fiz6{4`Ot)$N}$g>1vYc>U>ZY%0mO%`=)XeW z4a+@ulmV70a1(@nNocZ)9xVJI6jritz_tsPO6q19IG(2nvBoL2p_kA8=r4*3;L+5# z;*XmbW8F{1v;S~_p5#t}OBv}tGLXBezBggwHf$)WVRbpvi*Ij!RHve5joFe7*01Vf zh5AGtU1_^x3Kj#bz&HR}&|7H4hn$w2oIsm*Up|$W_xMbVd6o)Xa4h@NVIY@#uax^8 z2M`Bp=Fe6V!n23)`XdZ~f8|t$uCcTLnA2QBV01LF8P(th3bu2VUy*oYE~wrT;A@Vv zwvx(d_t%f@M)9>vRM-cC37YzsnwGliRIWW=T@Wh|K0Jb||9X?^OE6DM;Ho=#(Bi{Oo72UYiFA2bhV>ds^4~Jsj+ftq+vYeam*q4eTeL&%RlzvA(GJp563~ z?NQ`Gay9~B_N?ONT@nop-QMLSB|`AXULF9soCuSf$`m)Iu`tjweGceT@YZEb^*d}B z;~PcyvBMArhxaP>*8F~j$M|PUhB~xmr@3kun4(Vo6%6{*mUsIT!+z&kQ!);~B4-qA zY0)IX*V)q~85~IxwPVXhlc@e6O2F2NWYnusaJYie%XMD5u&ra(^Eu`B1X+?QVy{+6 zOi%e(X-Wd0iEEodjp13#$tzb{96b!+ZRSPbg)s#Gxz~G8&9;A>xzEJwpS>Ev0ckOG zS<}No-_Iq6+A?vVv%z3MuEd*YLb{{iruQ^lT1SVd_t@Qamud>_M5;4?sKqqqR;&cY zL%mFACumEmWvIab!`Z6OpC{+{57vhSoPW%6woxHQH!lNwG zM-?y|L11XghgPY0@o+J-B)%~Iaq+o90MA{oqo*{L(XctWOHR?pcU+PkhF2B*!urI%dn?UdAatCoSIM3<9+4PrLku z8m6upQ#&V`=L@#=Yl2xzdf3^5+{NYrHh6&e_m|X1`>GK!=aK;MZ+JW*V9%gR{l&Y7 z>x?$?W%f`>i|sMca%;ghg)Q5UWc=k)f%k-#cmPQv9$f|?;7z3AGth2laWneR@m3Vq zV_3&}!6X_}wLz_2;$5f8phVr>i+loKM@|q8U}mwwSbxZo`erIV{=ENuD6`!H`3I{d zT`E5h3dK8X2t*Yi`y-JWFs%XUtZ{an6PGVSH`+%FYIt9$h(oUt{U!-$D?7YG=b=88 z-&Wi!!yf`0hv+pPcG9Gq*->+EZuXUw9_-K9q!Xf?8+s2;og@vmWtzf=tz~wf6kti4N z?=nbP>UVy?Z?g<+-w1RtC?r7&+L$*RY=^f>7vU2ggvQZ2Jj-1a&?HHXqelgn0(=#B z`XYS*VoGU)+bq zfGIqe+AxChuow-{b8V#=is!O)oa*JnN`WzTCQ+m^B%lz7RvqVc2T#Gf9C%x(as8x1m z6h()bG=;Yp?8FiRPWx+f7wm9vk>_Ge+53`BnG%l!2Lk3t1}?f22Tt89&;Dg(@-uJ} zhrK+j;fA`x9MiT9dTLxkB&oj!J;khtIjaNZNeU0Sn(Q0R4E1(`KUYb`D_*=kR29qn z$c4s0d`>n>5LBdr(arPGF|qzaN;UFv*c!0B);$^T?Row!g3CE3_1zRY-R@_(z0fah zbjPlS)W=;ANzSuFX{$Br@)MltEuQZidk*jEZA50#%)J&Uj>K!>fiU5R;}$l^v`cyQ^-M3 z91t=7$#X~*0m94lK5x#%{|b13@2%9d-(IYY%!x3S>?{Wcj#sIcr-fCS{7kR5eNFqQ zB%yLljRR=}g9-&oz8+8qI?5}Fj(Z6&<;BOJG*$!4K%*gr(}q6>&P|>SXvPO@_DwUV zAR1}|;R-dFlh_J#^%|#&nNdS-s9@&hp92!WmxvwLYMv)Ktv!pp_hJCdOb`agljPb? zql1BiQ)`guS`nDe&N^z#@E4$J8I4TEUTFF-MfL{kQ9ZA*gvM-7nw9_CLC3;;PjQ9N zxWz<{WI)kRhJp7?RH(oIJAEWzc!C|B@wTzVE={i0M_PM0^MML7z(2c!IuQyZUxOU^ z_%@N$$7>;j=945LfQI%2s^Z9Q_}#-dFXV08gyA}Y0}|BfxAnYbX#*OC{GTT9@np=K z{?Nc2$*FYd4`%?d=In*td_19Npb(fg9c*6g=!5!Ny|+3E2|A%C*VSPfVEI-=$QJ2` zn&DsUFYWhAFD%}R zw~TmE@H_VS*KY&FRV)&4fNbZ}E8Wt;9O7&tQg;y2E)4wV(Nj0cu{`^fC5BLD&~rGqvqa))z>LmVScTg-(kfFNt9L<&~bQ9ctQqo!~^}@ z;u@&>V7(CRcce{jF+aQ`x-%`*16T~6yvVb6F8;-N)Z5dyXG6wu&C-jB{g7WI>p zB~8hO4gD&A2I)sqfd}Z{XN~Ut!YG>OF>}n`UG&)6w^za>f>aR#{ic7K9z552_w)G~ zoF@NF$9GP|xNU|NrwJPr0HXe3eoeD38$||u*HfR) zpFN3(SXMwkEo=0j(V>T&e_>*s*YMPv@0lO>WBT%W-GpIS1d&@?9~mJ!Qrc5e-0PW% zY1xF+G?1osc+8rhdZd?@Wh@wl(WPs~PBjudfnloJ6;xCS=g!bXOPv!+#C4R0BhsWX6~r)A z;#C!Kk{yv8jTZ`j;GP-vn4z)gulJHpJyxW+SJCF#l^)vEuj*+_JZQFrX;2$cBly0Qh?OR8wp(X{eb0d?4|1>zCm4=e)XS)LZj7x3p7N^PVKPNYhFQLOAOOi<*m>&ep>@i4t%P9J?DWceclOKWt@0H7Y`?50 z8ma7{{w)vjC)du>sGZxJlaMtVzHb%Q{-h2j9&cyWO(tv0<8M24}{frdSD=$f<-S*K^9JamG|LQMSZvihLRoF!Jr8f?Rq8T9O`M>0Z8FG%5Sqt|o zFh+t+O})fvx{7t(SE~_I!P31mw787DE!llNcyACSW)r-R8VE?Y$=}6~@>{{l`@wt%3Fd z#tZF)Q>KzlpSh|7kCwGpk9+%0v>1)wZ#65Up8Ntk!f~ApQbi1mMvJy1FJfGBj7-*- zy5hl1h9rCF-y--*U3bpM`kc;<0j|RItKRJJ0BZ5u`hPtG2JGHto`0oH2SAQ zwV+$_H-h=xx&rIA7o$SxpD(Br)=vLoHp;tEf8~bwn0|>Q)4qO&W>8J!8cq%YIW4qD zV@s)`o8^x;pQZ2zDRkhkIYBUAsgSrWz}A>HfYfOUHh{`W2`V zQiGPuWExge5PLiU5ex`FY#x}9$mdkSf^hvPeSC zLWCP^XJT)byto(MJ2z#&fL{Jumo~5LO$zp`s-c)J^5f}qd!M%p!4~kPpfkg803ylw zKUu4P2a$jXNRK8DLwLfVWprhlsq=vLQAZn?o7tnt8q| zmq+$>R+8(#3@-}qVr2>?lOo&vpb>=o$3X54#uW{@Re~4@P`g^w1kci>L!3_>?h*V5y8XYr6Rs2Y5X@03*Yrj~aK>GL71!%Jy7XMj11aY|H zK#g1zj1cqhQo`?kzpyu1U$V!apUy*}#r@M8%gLJ+Q?4mm2OvZ`fBxp=*|KFkCe*~< z%7M4vdY#q&nVF-*Cq5V(s>>VW`8|8wC(FI1PL4jHAKi<&lLtul?KfA>?gnmV=a9$K zMM`q7_d2%PgK&0<7X$TsQums%BMa`isH15+G-5=f&IC3TxlblTPD?6vrlF;n%8vXg zCwm#6wufz3Vdia`&prk66rh503{-EOcRN`!HaoPPI7Xb_9%wbE%)V0ZL;i*eyrtX5 z@@<{wz(4l(Z6kB`8Y(C}*SjByZ9@*XY<-N{RhPOQUw!9v=Lw3<;P=`GYEbSD(Rqh0{f`kfrusw;2XvF7 zT4w8{ctMHW^9`nxq0KKSyFT|$n^W2+#Y0IGyuHJd)uOp@j&gE#YG=Hs~Zz_u;W`HQ4*F z&|GkP(o+(2aYocXF3u1-Bpe@BR>bW$?IgDCAc&2hR-M|c5Q-vm?vuJ5@r26&Q;ODL1|yheN!_&^tQl9I1HDlWGU}HZTt{=zc$0WF z%}6HWSO|peOSGga$l23`Do%rD?&rS9VwTg6-}+r8m5tue#Xa5~Wn0=E)HB=TTdX~S z&mMu5Ii12$7;pcgb?*yYasRW88m#PtDO-acM72Z>pJe(mRX&`L z37~lsp~+<_Di;pMZLV}8-w29?M6dQhnU1*T9k}m$@TiE;O7**QT+MqZZSq_Tt3L$) zcSXqJa$Bm``lF%I;~R`1J2dMGnan!cGE4wqcb)}`nO?KWXv7)5#Ukm_qV zRbyG4*@ z=F&DYIro0JKViT7jkTUy9yFlytzEdN!Gl)Y9zerizo7vM5%H3J z$`zitQi54Vc~M^GT?qM00z)Uf^b zK%zmo-3H5i!K=RBVfJ}!a3C+EGQxhksQD_fm;hESxcLv?7OprsOuv%kg0wtw81nQC z){nWc5IC5}!q#Pc>K^}HSrEk}=EgK31_k(&}|!bV59SJ(F&P`b$%w!n9=FP0~|`;a3$=&pca? z+>+%@BC(W>ntjocN$YYc%L-AK5JKdFB!yYa`gk9Fs9~EZnJypqc73 zBkw!=QlKU>qWpVpaxv;?DB{Cx;Obv5+XglT9BR`{ZaXxtmPVEpTvWUC-kN%?cL|S~ zdSJUgt9h57BI8f$A|@G^YS7%qZ4ebfLh40vRXRHOVi>+~xex>K--k*3`>+K=)io&m z^h`A_t<`{Q>-yffh647YiD&qh#S}$l&O^t6ccCQ>2r{d4RB7h4g->*J;1zoR@-lj%$LQHl^EZh_x7{)eZm7Vv11qD=jYV~s1w7e z{mqI@WVq#$5srM^DRX-5gA{iw%VwtPn$-fNJwSeMJk5bOov9h zs%teB6<879FiM>s*BjO(Xbh~DWB09&=){Y9$4Zt1nNh7Z2bE?Sjs0!?b=}lE0|=Edb3wg49pz*OT;AM zl_?ryl5Apdxibr;mLp2>T5X{4YI^CB{qDbI{J$m6STtAN7VYYo@cJ=G*|z)<*B{=a zh>j8>U`>cMjp?**tzdUzFYmyg+?KIsIyVUqE~ym{Rfsyk+sbR90Bojy&sX8jC7i4e z<;1Y15T|F9rZrMOEatiy*Ny+UWAdGX!oRer2z)S@G4n}%PC(#XH3ipXNW*umjsh@L zi;Qo(HJ?z=WDB^C!*r~GJ#|7*Vg`@{Aw~E;sjotau($n^m^`*F(Ke^sO{g?=R0E0Z z-xS&^+K9ODKQG)CXVS9ko&4luDDw%X=QXP;jb=roLd2N(aQ?S+8}!aCr8(!ap4#rb z?ez14OFKe$=*c;&!WnE200?7YTp4Xx86DzEI<`b-WFWxz{H2u{EZf6Z57XLq?w{xg zjd85Fn@t*3praXY#AJEI@lG*=IZ&iT*#|8(YQ$EzqMz~kd08P@m&>40l`6gd$oV~Us2tcL}9ZJd?nr%CTGi0o8VWw|wVst6Wr3EVHlDAgdV@~~yRH_P`u#IP^T^>k++ z`Zyu>Z8>eNed7!;QWy@ENYT|j-PgU;+hO?!vzR99efK$DIvj_(Bh7u!OLoX_jx2X{ z^1H>isswYHBRTtr#QVF=?+2aD5n7TdGe%f{B<&lHo8PGEvFXd{qf`CqtcW&0mIZ)S zLLO%;Y4OzT9(c#!D-Q-^y%<5EM~hZ}`P|%kzvRF_e9IOs($7mpXN-3x<3}&qAu};2 zByMw^Sy0XO3P88lGh7O9@xPl-66|3ApYoCwXuhEr^O1T4 zAq~y>Gsh<`%|Mq2vHkpUu8h5pS8W@0{DOCv+8gL&uTS}5-a|AWF(LHS6W@uHE66OqGX|z<0QaVWa4pm1^=XsV%n|*w9P!q|pV+8e zM#%h7C!HOd)kpXv=#41_f=ZRc(@Gn=6dgN=MriNm_P#-oaym~3M=R{ zm!jldTrM!HDdv4LD86Dk3-N5Cr&KwPIZ}Wcyq6hxY-AiJiRAi%&PzeDra&-1zIXYf zl68rachajI*{)orvo7CzJSdVv6ap<@xhsdc%etb{iv(*2L^r}KJGy8~HgJMYy6ko{p zg42{IW_J%=?0%F-McYX!0h@1BNfZuC5)4ifokKEE?feH00k~s-)3w{qVaiMmk!hwa zk0ot4zL<3PX*;HRnZI^p7UejarMj!RHIJI$f2`v%)WBfno=etcpU?Ga25k<$3-R9M zKeXLR94L;%*Y(CKH%Lv6nJCUD+vu#J`^9;3GS;c}nFnR0fq60@3Q-!Hz(iDx$BC(x zbxM;Fi>b`h&u8-z*%5#)J8~@eA z;6bIXh8!m<6GyLoJ3va9v5t~am;Bri3g2#a=SD?BCfsJQUOE%F-hb+9r2-aCPs4f1lx@B82t z`A{677`cWY;P(ipvLvSmF%4RNq6J*VhxrxxH9y=@A>mz z4oXt$FVWJ>ZzX@R4w3v#JeIZSWONx?2@e5?l9%^%e-SlY;JTTV&Lrr@Ge%OOJvnew zCUc3b+Pa<|ob=5KyX39 zTAlWA@uHEi5Kcgy)tV)=#3#XDS9CQ5a!H2}O_6XV>9c4e(Hg1FpYxFmc852`~Bk+6T#xQe;H31~#blPou+vMRyPb6o5!m!3Z z^0R}Fkd)_}D~pA{@yGtrOebf3WrogwRcf%ZIc53nJ#y+U_~|P*L@1pYqAa#udy%%2xCI%WlKqqNNWUj94&D||s zq9E_vqG}HD0+J`awM5mj<^nZ{CjOA)8vSl0sKthtVC&YyShcOWd5V$fMH(O*WA4K? z1|t(!IkN_{J|py;RNbcEMsq~GS9|?M9=ShHm9l4Lcp9F2PJPKtdb2lCI~e<`!V1I#DwB0rfwbPT;d=2BGnkDw9}S8$Uw zRZqN4{&sSuwnM8k?t+E4DM|AV6d`t2Hdv%=d#8qOp$TY^e$LRu1eB=FC9QlMbUbiQ z%L3zf^EBm`%T6EoTIN5?dSk<=r~J@MBhb* z3WH#SIAP#N2y)sVE;Z4;rb4}61|Dfdweogs;uf^>u)W(O~` zrz@LehpH9GH?w4Crahoc@(v@+S%I;KLncjYG)u!F#?CHDBEX&qQ`QtfC%g08q&M0S zRx#`1l^gde+|S!d=TeMx*wRl9ANg*QrT?;F0sg)>iA)-3&S0o;COR(BnFo=dAr87t zxX&Xx4~jbtIJ6TkU(+y05;=;JF{3vzz+Q{F`{F6$m--B`d4G9ZeY9T3sd?H!KxJHR z3Z7CkbG^LH*q{W0pu95@bi24=r|wYrb?s0Z27Z5#5nf}(VKpdk|MZT)Rsty|qb_-<^?PeP6>cvW@HC}-);!-N`EH!A3A#RrLT|7>05jCy_W z7d5Eqc=X^9S|PytDN6tAOV<5WUqy4n!NPk|UkJ@gu=s_66w~}H)gqx`ity@UGI%R0|z#RY<6S%fRwriJVDG+7* zM33YrFp*F$FYJ2*MG~oOE-e;vZcC>JF7ZjIql#+wF4#*>U3Gi5zUC5q9!;o-_Zx{q zH~Bc+C4&ibx$k;fRz|itQ}t=_E#x-&&Q)@>HxOWLGGHLMFPK>sJKwHweJ?fd+aa#w zrO-G~7G|+NyHLNPFKn4Z6CrTO0E)ht31Q+9xAT}}LK^hmq-qb8J~W&Br3CigN_ zz6UOl`bipE^DALfNm41OGmAeN#7Jpr;(FWdDM=MplV_uZ3=d&r|2SYa$;LDyKeqys zqf=0(^;-nvQJIg?$I(1qgzupLmL`6wpnY$}KURC^OJMGsIL$G)a%Z-jO~R-@!n=vA zHw%U^k=Sf3{=Oh>378s4UHBnw(u-h3++?VGvy46VJ+ zo6)&LRrbsAC#t71JeTnSWS*>JkVqgcGvhgXIlbh4b$-nbM33#s8dII>sI(%DP4F}Z zB9b}eSWnO9ca#M84h3O5b4zH}1+OQ7rjV|c-8}T|D$O-?z7T|a?Eo93>G9tGT{KVk zw7naO@x-~K1=4~=LdYlkE%&E4H}VCnbiQaTd_@&uKD@}%6WslWlx!O+{~T$ z?>N$1q(m?_5<=fgZZA8Fv;$2NdF^@TbdYn}6Ib$G`s0yiP5V9sMiFkAOr=DiSn}#_|2@ntj-XC7lw#r>8_NCX|mT zhxH@qE)g~lr1Fju)^M%WGEnQo$D26lE`rIO2o08_paQUpu=23hr9ie1PV4r)*eRRB z*+nEtg`W&i-51fLt5Fafdg8w|Fj>7TNaF^`a!f{G#|WhHDaoo1>>{PVOCaFbSSD|)@QNx zbfh;-){Ka^h5oe5AHG?_P@@9MLVR=`8yk|s{QuF(GyjwpRzustqJ|LkO^Gwf65ARA zF~nIz$zY@n#3d$c{+&kd{6cE5%3g9v}_Fcjy zSMc?DmGqAZW&p;6UziEty<0MkU&!nDM)C%K_SHAf68iKj6Y1)!!zQ`k)NY zDCMSi>>cviS!U2-$*ZvT##~I$s4#UU8uU71GI}LYVg0bVDFoIM|8*h1`)HK>^$Q|{Ji~FR%(8TPu zR3Ni#aN!7zA093>G|hS{inb&vswO*fwJho0%^4F!L>BB(C~hfQUY0Ls=|fEuUb7ga1>&1w zQ%F(Ia;&7k{IpdeK?j=tiHexfy;t9$vZrCPqbutIdl8rM*h47s+C$?M;o(H>w%!{V^LFyX&XIg*`46ITjIj~FqYC)V4Ks$f* zZBKR5KRb{dwdLEpE|Ffw&C#!ms0T7b3c^-ykJ#%Zi(g0vuhokM4l>-BGL-an3)rMa z3i6H^tE_|P$4L3~D<>@)_u4sp__kfkIE8=Cpb1>7yT%p#7dz)ND&NQDl&m@R9JC@*K-ul z3P1o7v%Zx^XLgRelNEPtTA#&j*LgPGNCGhTE@On>!lZost-r`i>i$-JVP}Fd^yMqR=P$9}Fns5As>1#$xx3lmCVc za7KLWH`JGp4=fTKs}u@g!E<8Qf!nuvp+maa^EReg#rGyrOK~=TGrO;(MFpFF#Hb)P zU)@`JH$QrhAl^*8hHY`{Ta|?Wsu$ED?_bf1S3Nee%9J8P7l3fTi;)=Ckb6=s&{a#B z`o@iFUr|Qjm6eH$#YPl9xd{2p?BLk|w&gOVRxV)Ievi)$%-0<;5{%g=3}DRYjJD_4f&dz7rnxC*5$7v^C#iBF9|GS1!zVZt_Vm9!1-^eg?VZR zLOG{KD=x1;lwOBUN%rR2blg%_3WrB8;#cr69|+|lyRT;*86*J=V_MUTp1vhKjg#e_ z(bHfY)J!t8)wJ(xk=*)Q5>#cPo38%|CIMk5*Ll$MsY}(9`j0u=y*ItjgWTK3 z!2OPLr>C)P=YGb+c4Am_C;bkqT zQ=y8Ok!&-{KUf=lWDJO{R_b`ltt`ME4Gg2Vp0$FVYNz@a5t>0-J^M>pY+W&9D5`k) zEd{4J_bsM>9H7=8vfzQg0;=Q8fVZcesZhIA?L(9rREkq^Kv~p`P=WpCusw;0@A6p3 z^Kf*3%{AKW%T-F$F+_=J73V$Z))5gQLU;%by?1+`weZ6_Q;@`m*PGGg72-Xf6Iij| z@0CNA@px?JpYufxtCQhGZOx$%8(zev0K4~0hu5vy4oHrorpO>O+nQ;Gub zpYxvFi#{a`M57hVY8DX3Wp0YDAHu?{Q>~=G`s9+3V%PJI3f%^NQuEK|geE2>-akR8 zymW!+&mUYo+9wZzSoK$5jtoMdnlJ5q*8InXq8jZ#TmNxHqWGiLIV5bkn8cy=BrX54>{JBuQ>f2A70Z133g=@INLFUmUD+N__GXB31ynDDGw)W~F1y8V74#OtWqt$wa5!n);g0+sC`i$GO{FK;WQ*i{ai!Da~UV+zR^Oca$d%a4Ft=$b< zzW3KaK^+6)YCUD9>2Z+Mc_F=x#UdavkqIrt91U$ttbPMa2gDS2o6WeB zF^YamV(FhMBQBs)iG;BBJ$;N)IsajgZzPP#~boEk8nQ*rruJfNGj zUoz|xy)u>DCcPO90JHY1|Hv}vBs9=W*iTA+H2H+yBn3qA9)DUj8^TWyEQ?eHt^b8w ztIc?qd+S%+JJ!#qeu}~WyN|y2_V)Vrn)CMJs2q_A=hy#lGaK{vtD{AVHgf=;_fZAl zEHVB@GJ0+H*jn8}GtWIf8eio}(}2+dIO?I_LzN*i_V|JbpA-0L&`j9QUNXBeM0lti z)ihQ4$0AQOS@x>KR76QsYm48ln}ZHVg-0rJB%;t2Z`48RSo_na3DnR$eW|a5*Ig>) z+Wb{Ed}i{|KI zeozIn%fGt#DE&JJ11Fo%r0rTckHZQ9xZ{_5p<|h7SE#!8b(Sm}3?MS-qn1cx_`aCXyzwl1?KIx~afZ~W1TVNETGOvqq5Q&;@abM$n}oT){7=XEr!t#tg7I&)z*jar;y z0`uKmgl^()%i14aNgMenW#1Er(}XTvUeIZ?Mn2Z7 zG=q_tLxJpF9F57^4%St-O80Nle1GV2(wC8fViCl^Nf`~@DR%aDPhsLj3$UX=Ko3?P z($bb0ea`^hq`!2$?}Md0x4G6NT>dXAZ{|`D58TemthTj4UJ=q?YC6l;v&H3goil3# z0e42NuPu>kuD9~b=$0P_L=XX?3Apxr!19XG+uQqZe72MGkrIc=d-xgW3l>S+W1S>N zgHLz2bHdjDbf{PV){iuf*9Q|gF{t&Uc$(LCuixRiJsGO=mL@xf^9~bE5dZ5tmpNlW zzto+8ZBIlxBAw;2geFDySF#fTn$z^VPnxAis4Fw^gAkDd0 zZl4sU8)^0~<@BQ!`p2{AM;d704hQXV-O{qSm33RYMzVKDZ!}D4_vhC204Gcw-413A z_E1xUNmOTqIE4qp+c!7C+E^2VyB7y`*ufiO=QA0nI$Zlz%$OgQQ~^FuCmy#C`n`|T z^fw@7AD^i)pW4-G-oZIch6mQA(gA0H@OGX?v5uC(@lbkMlTUgb3svkG_=mL1iRXtb zDx6vtMiidNtpj4I*~sOon@65;>hKD6(gW>&k!Lf*hWB3|H(!q-w~P}@eFCGkaLL@= z9|NRk5@oEfuitsm7awDmOgfG$c4=u{2av#R^Nz^2QvGFPD!oj7^`eIJ4a~gLBlYp+ zl#wDYG>!sv)#1+}{9WYX!x=rifyQ-qx~VpHM2d;7q;igob8QJn0a!nHB3ZlCCn197 z>9RA(gIRg|7Q5{4Y@t?+ToFI{|1Vjfq)IfP};%P9nL}S20luu zif_(%*yAWm=Fi9kpq}M&PG)So#U1zgi(E$*)MnX(V+W#xEzLcp$rG2wk%P}-ifivHV@=k{B5g=ZBLj(v zoGP9~>icmO=!37(0d4CJguMuGp%`tG{m+n5Sf{z8QY?ikCeJ4McX)k9aibr3&QM!1 zs?ErjZZc#=zvb#7wXBT$X*s2`ny_B)5MsGHJ;WkPQT-{2PS-;j=6rf(WIJAK?NvfI zzC5xv6_2)S&_#2INS7VEEChhloEr+Md#`Lr2-stH5MzE1$l`qGMEM3L;%{Z#Ks@^3D5N{m&Z5=3w%nUuKKM7zgra56^_**aHipdYHDvsO^_A|M z$$|n_FiCqTf4xsJA|E_!#)9bqTodlvzt^S zMJ`hYKpx=5;U^b@p%VWKngAw0LzJJXW$d;4{_}$*$4vfCJ;gkGD)x{fVxEKhK z;~Ymnhlq4qKS{J?2AA9B)`VY#JG3G);P9Z*M43fqv{2uSAL)mmgwR>zg9HWsHSNdg zUUm$bt_b%eQhdV4bi{>PD)!YjdTLa3ZB~d$ul~7e#E(4kGGh@9h2#t+9mZ;phsg;%WPU3$ z{>Xgsku|}hXr`hQi!#-OsEIQoZGt84C|?(;&8mS5MktO3U?<&N@jNG%9Y-v6YfW{a za1i7AzqA|57dL`?&ih)YdodS)Z%wWl_h<6m#(PPb)QKGr4mS$S-fl;Il z^Dr|!r%kC^-VYI&81r)hZwiSG2kM)Z7t}=tqZiRLJvE(BXl9r2Q`Ys+p1haba*MuT zBh>3>h=$&k?J+)MUy}y%ue0wPvpXVlnytN(@#FqkZn&WY3p`@Vdqh!&AQW8ULyCKr z)>3EEqRf*U9XQ&;a$Si49(9Q1~aqMxqE>u<>mIjVz6P#KX)n7vJPZbe{L+h=nD+s;Rd3E{36Ehd%-*^jUkFk zqo_1JaSO%7x*_D0h~{#F6>0?`s@TMn`DoEeXyP9pVXOkj_VQuGNzmaxWT1gO<3mb& zdTkHhGzPES+Sp#qo@k9?R@z6dofQup##xnw5nqW-Id2|2~FDbU?) zf%mWnPYxVkys}jq$CFqsyZ^{|Yf?#Me5$4tOw&tntze_W@p!0-Af8-h1?t}578R-6 zZ^5ghKt{E1HM4@*Y*rWAk3UFiv}t&ZFZTJhDy%q2<6CU-U*A!n`pU10n8LUR{LMHuybbZN`ug+xuR^tJ z+ekR)%zrmXBXD6Sn`2pFgX1sb2b|etEC~TEel{Nktk*4ioj$|tYZua@(fQ1O;AYtp zPcyu{{j#aaelFc?2sC>2Z7Jp?n9R>)^=pgaK)%yBFMFsQRjs#7Bnb92xFI^3(cprF zqrnNqPf5x@1=$>;1*iDX^xzD*?5?_%yA2)zSNS;&-Ks~jPEpaR@OHM z3W0!(!!zijg%_1=W2EX)38IK&@L=(g;C$j^Z7~BUnh0S(z)n*wGGGuyJA-0V$V`{9 z%;Jx^9-_D9Om2jK5(C&wLNWi=#t?u6qPO%N*>}k@Lys$#2Jfj4R_}f|MAj}(4TKR+ zU0Zub({Oz(6Uh!a??Cx;kWaQtVDX#R3=<$IMnP77YZK?B9G`1)6Y+yE6x)2zV;vOOND5*?Ay;{Z%5uIqAJ;UNBrqV^Jb+x42G^E?O3etAfTz& zsTZ|TD!w9H2CFxXn+kEnG7UEPoF&Yzy!QxGfcsmELX{lAO}~C@`=&e+B=P#YtRZOw zh+9OWI{F$D5Paw~eE;wnz|eT9ExoUeo2ckjX65;88q@HxHs@ECcM5O-FN1m9NG2XS z!RSim#m^NWF*to#1{bYIwetkIK#~8jioz#iy*G*xz7HYpVsA>H5Nf(i{H-ZOQ_1Y8 z^`7L)Yh~5grxN9W$Y;Hm8fscRA~E~4d6fpg)Va_Qylx?qu(^Lj>dz1=O`q+ z_DH^IH?C3C_J&kjifw*yFnEPeMoaH-qA2{|i!!VUwe~@dKY%(ew~6 zi~|F<3!G8j!5Q_kg+RS4W*DqRK@<5(=Jd5lSX9|{@_mhF5>6*j@M&fJaKvYbJDGRk z${Cr%u{0-HYAX>BerIK6OI*;is7se%2@*-5NpCf9%^)t)*u-wK%0Ks*#^ekGu6WRB zK?V%XozeDb4NcPi>Z!L%=4W#sgWvi6^(WigF9f3&*F47Aew%aoo9@(0s?59-t(=(r zmuddbmytF7e`NS=Qgq*_bp0AuUUe<+4*HH?*?TTmKQHzw#gDx-E!Ug1jp#Z@xxd`s z&`}OdI4~-sH%*DFb{<~}V>&q2iA=h5WAkdYG62aY+?%`D*U~9PX;bf%zkXps`gDHA zBThfSB)F4?T++n7MvpPISTb}&kmhQ3{pUL;F=@~GtMth+w)QhGZ4Mn}O z+Yo;OpmG5!Lo)A|r8ae0m?jMX#1)Wz?VXk*34(%OBh9= zMNyJh`v8K89BvMpzVOje$J);pEUQIcNn$$kynp&0afXkLC}} z-nwc^rc9WpsV3Dk{Vu4SxBiBk{rsBh2tlGLF!mV2Er+aQ(A!V8mkiQ(DgkPUw z9>Y;R12r9VhA}1wT3wOGIz2z3qV)zw^i*)cUdOnG5o@sJ@gsLjC1?BO7&e;63{VsHbWjnxJP4}Cw*rKFsU8w|q zg@ZGsq+EWG#qyZd6qkD8!7C-UR|j<+=Tlm8e5VwoPXj9VM{HLZP($ptv09AIN!PqUthg+}8^Ly6&lgDL=s<3;{EtJA} zEfG|wG)8$aMQ}~PvS)L?&`IItMqHjq^zXk!_e$yXSr<`==9U*X&SYw9iam66 zY;4&W9%0>h2lV0W41TH1ax0XKZ&mgtwW#k)Z&`}`2=>FQR&ylK^u>es)TV^!G>E`6 z)}$F7y$t83hgslmtxy2$w!Jy~$$mOHjNxhKPU_!X>tt@of*b+QF@)Lvrz+=WIMc>GhS-PoplOhU(RfH-lC zK82HtBd~SR+cLCJ^_1kZEJklr0MJ2Yj_Al^<;J^hdmN>dQb+7_1CPF%1erqeQE^L& zT5^)Ge#(>pxV|xN_z7|;41OcWDDNFbKyLf0HhnfoD9>yr=10~p_GxJrsd^&?z9v2x zZpXvbI%)@%dr^Kp46pz;|IHqE#}#-Bl&Zr3Ou!92r!m#}65fu$barfIGRo{s!iP8A z6Se+Ue7XA)sVvo*k$t(RDp+dhIK$LvAFtzs#=r&J=fquVBzTaT1uXd%xXZ!-mg4c* zKEo%?)yp!2m0W%LvSnAx>F@v}H|+vN`BdnnMt<=bQBJlnJPvA>s$S0<=7|GuCC&V~0@^eurF1G0hj zQK9sT_7n7@AUK+-hDDOlJCCmL&DvU_}>qm}q&!@=iNa zZGHrGbjvPvxI7{ijnW0@uSksG*DDl`>omsSXT#Oh-V>o;5VJfKR1Z=sex1@W&{ORpAf+XW4=pdgM|3w0D&esa)qCPU# zqnF}i3&fs|aEInIas58m*jh2u>#_|$|73JUTX9@vz&$nr8wo-|YGrt?>O`@_!vwN7 z043r?vftbB1dA_@8M078b_D!cTghViO8hHKfU+rQdZ!fjtVxWPlX#e~&nL>Om_BoL z_4%&qHJn;6i_vHzzH~x7QH7v)3cN+m*94~(xK_=Wee-Bh;2WzOtksVASMg4D%E4=A zH`|xVHHpR#V{d5twYR=(!xQxI8f>*5UToa?MY<+tGckx7W$!YRI4L72V#y8y{JJ}R zRpiOSrG(~?1*SIF$!@~YertyymO~GX_P;|@Z=3uP8+6>cLH48b@b*rn*jnzym-^-n zN*rCOj~5b<=ed&C^8;r{kE{kn>)bw5nQCZA^7h?JuEngv8pi2t@lTX7i2teE1$AL> zFMVdmOXw~0I+#Ie*0@-;#X(^w7C@;P&dT)IBi-I@&kk}K95tn~j}i^BNpTKN^t5mi z$S)VfipJSTAK38v5Owq`<=Y{R#aQQIiLc4A&i1j&gZaW;koZLYWhbq9EP+#{vmeaG z1N-`Qri*ehX}sOnjOn6ym{fEid{c#$_dsHDppbtzC$*(-Y9b*X{di3^H!T^Vs5|o- z`q~xUe$-)}fr#(%K@NyczdlD50Vrtu5CQ-w0_L7EUdd662>_8IR=Q>OHC!ieh$Ri% zoK$0NzTx51dU|kxd0+FF>Ax9^fNJ~k7?-)?j|U5a2`4grfAB&+n?EY~gU^b_jb}1V zckC;cLxO*&r%Qcmc>fPi)Jw2e4ysOm+t=b*1_M174brl>m%==YGrWA9H6yqz?o-*meAa>jjWfb9ZPFNz z<`qo!o_mqae>L>W?LD1F+v09|@K8}sHDWreFTM=-O();nX}u#gxc_Z`k%+ru02;(ohwG`o%R~)KHSHX2(LZ^+ zJbSycQe03stJM+^d&tbmi8>bsiFS2evjzZa$TTNIp-)8xt8l*n^nnIlHncK11Hg>6Qw>IVah~GA0q+^!mj&!r?p0U zi~ijZMx125oD#MxSAC~-#KS=a7K%Z;`Lg_FgUI$1>xLBmi;1ph3Sbv6apZ+YhxD9$ z(`E_TdETLYs|4#P%it}zgz8N0gCXN-juX59HAt5KkT{ad95L5cNo%$Csgl#deM0H) zAR58!uV~@jQ+8J^DG^Oni(O61-AaCe&qSW|#bG?o^)-4qD{0&tOcrnBxZrsvD?iS8 z1av?(5SZe_L@076+I7xyc6aafcUkkvzu24J6ub86KCZE2*Ga&_LL_PY_vF>s$nVG> zZ36hLX0kX=vw&D!z!{$2+sKnjdvm<9B;F@cG7?lhqYGcvw(S;|f6n@&IYQOse*goR zro;mTZ;qa-*hPflpfEv*%^hCN1rkl|W$XoESZH8(12m=wjbp2~l??(zxq5WAo+B4< zwr~An^K5xdsIMhL`CG|W`F%(ry)ZXhsVDC*O5B{weLZ6Y>rir*W46>62#A4E^+I+N ze2~Um-chQVXw*BSiVCl!{jvCl6qJCDCY#rjB>fx^H7pseHzDovVOLx zT6ats#eH78;#8ptkW}ChEl_wh=MjE!sREiX!x#m4X<13JD>4Y691~u5<)qtbBl?e_ z7##$LR$f2JphBuH9H9Av-=|be3t8ykDv^aiopf37LQqqXeb6wS)dqumpBQ;H3hXrn ze%|2(pd58ku-YyuWE@~VI*+3$L7rmfHjDi+OdANJX8YxQUy_+3Yc3{*ZVGj==o>w> zKs5^}mkOYwy+XQ$>bq`U;FLKa(f|GU#ZcOA+=X}+O@9QmS-J2` zO9hriud@>nPMoTcHhe*TR@1?nA#E1tS}rWE;}o*eYz0{0e!H%4@0^OdYYQdt9gu{w z=^WO%K#p8`IxY5sFeyJ=WIc-emRYJlBP%05Cizv;MDN?F#X`=0ol9?Q!L_+NR85?5O0UEb>K&qYYbmmwG3nO;rMqgJ}e_hIzC_v+rMt|ma2$UPV7?KkU*9P7iL$U+;Sj}(c$QtFI%t`7su@V#g9{L_lLb4Zp3V?x!L1;lP4u0nX#)RVj00^1wWFjPo|o>I}-DJ2-dtZQZvDP-#+3myl~o@ zg?Egwtc5Or{wnFWo6H-%4|#i&OBV7!uiIwVsh@U+S)UenjBfWKVQjCF#gTEk@^Am~ zbNK;4f$*Z&0Z>@B$sa1l3svzCjbFGaga?nm+L$Z#DXMXNXSEbl()DA0e;NuHi**w> zN&Qro&e0v8%4$9#olwUCIMUMDzH}|1)&DaFN_o;|D7V>5^(xrt}a#ioLh zP;O&3ucR1=PP1Wrge;Irp}o~6h;Qp6?=%Jg#O`{a!-KaV&I6|!hp6x+4`)-^g@yq0 z^+3K)RBTVgHe*I>Mtq<=0!AE|;B9c`zXc<8l{2TX?Zo^jiv7+B8z1iW z5UMr6Popoy%o4T<4-8Ba(+FmE6N~U?ll>-R;$=zez%YZMYpra5gT^$i;zCX! z+F6q9q53piUP6iW8;ouh-sLAEGax*BvSgAj|z5`zC?qemNb8_nPw6)-Z#R7EZiC1d@(4*kjiQM zi@I>YfxH^=pNarWn{I?x!qA^9qsa2b15!49|3%`m2HwcBJo$^iz~kO@F$R1fR8s17 zd5(@JJFg;H0W#Q@1?4t%g~$wuUBgFZREKWNlW#_1r*E-tjTKM=(EAI0Qt`z&j=8(L zv_B7Pq;Lh33BjbcTuz0W`y96w7Lm1ziyVxwDYF8FvbdSu{)t7qv`D?C^%3=`f zshTWu6GDu1cS02lvUfBK%49*sfhWk_(f}U{!HD9=KGM9*K89S9=lohU zYO<3A`*56*)n~^AVx}BJB12Fhng3 zwsq&gGBZjtBN!RT4TABWfI?3h0bLKgN2i>GA>(cLQK7YyrndVWQN4Eao5*plz6_YB zDdA+cN>K}~1ydL$*v7g{i#-3s)?0@~xpn`;_Y8xKl%UcgrGOxzq~y>_2uSxJ(jX`) z0yBt+fPjgD5`uJhGb2i;bf<)L_sqQasOLQA_r2bKyu8Glz4qEGKWpu^&&<4!E&L&A zB8iga9h4hwH@V@=P0pAsfy?p1C3`MMpr7;Gd>JCU2s!^_MPSwNzEGpLE_gw-@ElN z-8$%s;IjJ+7 zd>Sdv?#VWX9-ic~_{?^$2Orp8tnb;TlwR*j0rgBMI%86DwmeE@hD7q{iU=+}*l|zy z)U~ej*Vp3tWmX+mF}EL19GBBg2eobi{bqw##eTiToQphEAeHGu$h{QaG-hYh4sWzZ z$*ko$e@!tD!V;T^rlj;aU0rDzMWt%zb+UVwbA6IWye|d2I?to5ehwO?&Bo55((6M3 zq>2EXgDT~nMh_GR(0=Sgmgc^D3YER;5^*R*?i!r7Efzq_r2m{Oo}Z?EoC(4xI5Za{ zbzbB|asc^lXwX@U!ukL?QJ{A1rPvCENr1hoCgDz~mh<<=>a&aqT{VH9B)PR2uiJF) zARed5mVbg6s?A@BXKJiVu@Lr;lo(_VPN$R_p(BAnnqu;teVVm7n$~;`D;j73zOVKo zD0d5Kh&*AVs;xtzy~<@Wm2VcEoKInc>>CT z0iU}*=@&owZfanJM%u+s15Z*A6bOnb&l(;j7g=4VcF~u{z09WY$r#<~D&9Dxf!0RO zc969y_#83^1gJ-YFHH7I?7DFjTe++by!ndg!)Wn>(_9oG9pMVXz0s#D>4OydFYm6t zel$4iD=5-+N3I~&LLKXZ&nr38jpksrOI$`C7f>LYuSIp=X`Ow`jA`QpqMjVJ`YL|x z<$>ErkrdRuUW_9t7;j1KJ<=j1`0tio_p1A~Vk|$*v0eJT$t6*syYJf(0f8T}1orbOsmu&P&}UaEA-~*q_CDiFGdRH9fz&&hn+n2qff{ew^U}!zHkSsf_iR zoqlE41+}w+Y8}(_N76*Hyu@{S70vVqU$8~ELTIhQicU!OiMarRnXVQZ@uZ_BT4 zCefZOm7Oeop6GSr9T~_hCCm{V2p=DoyA()c4N@mV0Tcb?&{wf@L}__revI$JVZnT& zN#M+o^w;8fO#1F4Spmq@hi$HqG8mHXOq2}q#+5ViUc-R+>UO5FPd8Jo)4ZCA9L~_H z3H7!KJ{2X`AdJW8oFE*fs3B-!dES_;o5FHhV+8xa2tu>Z6i0lK6>MO}<#gsv$x>OP7| zHVB2tORT73E^8a%d?0|M!)ehkbULe}{r;SFcz6aAc$1O`F+SX?pM)`sG1qu@ zviB%(Cg2)g_{8EG-X2%D+VhRN1%7&nmh>@?1Lf~|7>FsNm2p3J{Z-vHIFfN=&G?9|{iU~kNer`^%C9ppC}n@z30Hx-t# z9F@Phb@xPLdGR<-9GA_Ip`{UD<$s%IOar+1e&RI5ZuMGw=w~?}Mv4-=DaX?rW#8M_ zcU^nDmgmMDpo=%bk&0ZFkDl_VF@hr2bsoZE;}Y-+=spnE_2+Z5=^z&_JlPEn!swkI z`*HWK{kgiYY+Y)@XGT~Kg~khC5DF!Q0upVQu;ggs{3`)sOPARMw(E-@ankC8y@>T= zlN1qFE!$+vFORD&)}1W1&5F}mtEG)Yd`uPj)|V&rT1t$|PuWley6 zzIniq3{qse!BrkYBRt_Br2x)5F`rSaY)}FFcWK}`5a`WohepliWqh!k?HGML-B$ov zLHr4fo*0csaMBGDFThbk&R^h=Gk*Vk6LYHfEO{uWBF!nRZ|oqev9&#?*Q0*N6u*0g zvb3@pV$2ZbE9~KS6G$$BA=U3S^l5o*1M_0LuDrfF%rz(0KN=HxY}Bk&XK-=feOP88 zf8YNgFZrk|J*GPn|DauL+Asu7%iwapw|}ok)*2iy@sj?R&ysdiiod8seAzHlsJo<0D6P< zGn(mSTV2@~1=})jJRe?YjJMf+!&Rzk#Cq!bf^arnQ{2R2^a$%cS+KwxNxWOfGVZT? z0A8G+MPqNlT12y3#-K0cWC-qYqh)CO>-zW1NW@D+;=+xZ5y-q(jr&GcCEs7l`wmN8 z7h<_tT#;xdnqNWe6a@*LI%G#`)&-hxQ)-FHOmZC4Us-$NSjY2$@^VeYw~J9X=5G0W z`@e5*eOz-@&?WR2VfyY}P5B2c&>|1T0DldWud)gB<()Jy%~c-6ybh!(nB95Pu}|}WQbmG3FtWwgwLIK(zjuaJ z3#a6MOZjEYX?=K}ih73!d!Xu(wIf7?z3e;UK~M0a39O-l`#0GOVQ9GYl+ zjA`cR%UH|sh=0H-*Lf=@t(|y90J~}NX)c8&J@tH$+p5l;jg*K7d*hqk`UF~l*F<_# z(?tTP3!OH6hStY#=b3I;==R|s-1$kkI@j1+qpKNL8o#EC>b64SUgEDz%CNxZWBsGp>JQ5! zyK#JpN(m0*9F)jLw#2%^qw7cCj1IB(pWdHWz7%}rsrN*pUGJK^r1d=Ei|SN6Nu-_V^~{VM$WQj$*yhK)0HPG0e%)34P4 zH1y)&guy8C$8}lE^1UCtU8hWR-*97p{#=#7sKCJiyx=2-UaVP54+yg|8p`Xg#vJ`QDbPnlATv7DigKymI!f)RVr0G`_ zjPRMP`MiF$o;^l(_hA1r-1o9TKI4wxU{-so26?n$-47235F-tMX+t5e&0U&ebiwPs{QNt*iMnYp1 zHo%mSU*DAO8EGmUeinR}2OOVKz_fZk)%;?RqFE`WC(FfW^1r7pdYg81sZf4e48B3j zimc&Xs2^v8Z0@Nh6{yf&qWAXX=@_FVN$!D1&Ul801Qtx}4B>*pF8npm10;|)V*#2wF!GZi>`Y5;9Pz&w=36~U#rU9b2C7r_C zjbHp^+3GeGV}gHg|2>)Ow@F2&&~BVzf5#50&zHM_wRP{{ou%f5pXRhDZ`UQZ?@eA= zS4kk`YLqi=cwvP{xL)1S&|AK~6FhhPJ8K;Nhr{^edjv z{~qSu`gO|6icPx=`W;}00(nf$TZtA7XVDr5YfO52ntj~Knp;XA2w#y~+O1n(44mmZ z9-uyi9S%m_c_{#rlS?GWZ19WjXJV94q4?%48I6E%fVI6U@G-6)eNq_vf?q;5Fm{%B z$?CUD#n#eh1h#=MgSfSxJX=1nQ>SK!RMmZ4fhE1vIJ6UdyROw0vBN^sT&O0~9ByjV z*V+O;+z~;A1}pFbTwt`A>o${tqzf`Pv~YgZ7?-b?s=*iyo4B`~%h?rogdNBpR6ALN zOV)B8U8WVG5#4$s^O5gqQY-_^JoZB6amsAxUosF=~pBdjey$UCp)UoaNFUB7m z_Cw#M$M!e1%D*qtqkb?LrNoF#a1MLF&kOfqfQilu9VtnLRldLlIT)W{C|*41@@5Aq zT6Jj3XyAlC@O!E`U|zEg%ChlkQ2X=UUeP<xbAZ4YXtnG>A=~lT3xL+z=_j$E0z}V0fh#~r1795d;hY`2Z5Mm88_%vd$vyd zIUs%Iy~qWD^(-IS#Qnj!X0apD)(tD}Z>V2p!@W)(+i`~y%LHypcPjLfUS8X^p8G1; zvIBn6wnVnf;WH}7k(XhTy3snO-jbc%x~xsSfLAdnHn4r+QaJN-FtSXFT6qJfBf z>(;OGBSk`r+`cp%@EnXf5h}eV%TzL_1OR9EJ(~v$?~er^GL$LlN`uK$u4)3#6O&NO z2+n(2g#jzm>kw}h z@i!$D3AJz5vsrca*Ud&s9`&(`)lW@sH8--dgoCC{ykt6gsEuobhzw!yOM{{E&g-9E zqy_Kq-YOOcr*IYdZ5Dmyxgq4eqkCX}gNugfsK<~$!g83bnv-Zp310Ug?&+~G;CNML z)UjWAsPDz$g1^!o;Q6KJCGP*qGaqfnFBUzu?yGfiGs*xh;iP8#n98md6m9)k0Ek%mdDrQQ2PJf-UHQm8SM#)l2dlgAwU~x$h}_GW<~p6+$jGiLVQ^!40l% z2>2K(Q^Gox&r@?0A5k^1B)b$mq@32{RR+7d%z~^L1ouBwNc~9}z$b9PX`nl@KQGSk zd+c4y{%MFyqiuS3=I3aW4^5C{I|1x(yglTjy8!=|3x_z!Wa1+Ruy9Y~6D!)BR~>f5 zcAXBFa^Br=8Zg@`xt=3(Wn}e@pP_VL`QCo(5rSRdL923`$EJu89g5GGxbLDz2GyU4-~JHL%DY@_M=n|y(M+mRd*!+$zZdrT>l1>Bk%_4%txw*s;<{N-1Jbb*}+u zyV@>d^(vI#a`z5ISU@BQEbl?EN=eL*?o&cnC=29ScZ&@13XK2ZJAM<;O5B^Ba&?n8v)Or07Bx^Jj6dt!hs{KvxR!o*Y9*Ta0q!)D^b^R#d(^dse&gBzSWH_suH z!e>G8AzOsp?mCdV`8+ZAhYq;zTYG^+iwK-u(VoH2c|sS)n>TP1M161~Yxw3^!~_ zN$unn>|A!{F!TKDh=S(uI)p5JZ~kgL#XSA(FQiHF30t$)EYs*!p!8WtG$$!EIvBLe z7s(cU)xk}{A${x|o}EXtD|dnI#DQ@i@Vym{L-|C1kHoM(QD0IsWZB8R5;x)HZ)-pUw2!U*$iK8!uiPq45bVqj+ zF?E^->8%tkrlfYZ#~J#)%!=8G0z1vP9tR2Al+b7%19R#>%0M%OfE_&LfFYMLb#)II z&fedqx^$lzkP4j|Jw!#gA!We^cs-MyRgqEomg4oITR8ohi;jTivy`s|<%EvxYmJG6 z88yTL!w(`joT7B`3?(c<2-?{ul9C?@*dvmt9TkCLNOvdNMC&_kJ*39lMp(XDlJj5B zWI=;W!z{JTLhK2-&yV^(&qo`wFarxm%^YL%j9lnX!M_Gk6?E+Eq-UlsJolFTJ6-+& zLWh6a4t8?HJi_#ofGW7V22TXEElUMQ2JM^@BYp8xVN<;Ca0Wo{F7u@?UboUBR1 znp37aU)a5Cv4R4vE0eF$>D^}XV3xK!lH_p`u4UnbASGAon{ksjacuq6`g|lyX2N}pu=H*+iSY$6u z)ID2HtgVtS8ZE;VxeyxqBfRhJ22|J8^oc;t1Gaaw7j$7X2;?jj28iEoCEvG5VymT` zduXf@?EBf%Gjp#?WVK1`;|nR=*J$&CTT5ee!^_8Yr5#r=rNJ)7Kl=ehKo&Zoz#ZSO z;h_$`RD=K^Fq#UQ0^A3SrC8VVr!&;5Ej()dCZJ>4`^e`Ybz|=U2)g?9yvLsERDOeS zbv_p%g#?vDsnE)qt$e+cb#a3ryQ`SUcXq=@cJC&O=+MVVCtro!Uira-X2J(LK@0sn z2IsR@pGRq5zh~xl9Q4M>Jxm_mb%an&a)UE!!R>dmLslzF*?e9#uB(hB4wdfK)usmb z&a#y(;*JPgT`HmSy_sN9Ig$hu^0xlyH@F88hXCuA`)BOOSIUmUy($h5ZUjEMPblq(-9e*qV!J-Tx zl}eq|l&->saao=*XV`z-0L1BDmxifdrYLc)MqP_QQ#BzKh-^Yk$&%)zpmy)Nl^?Se z^c{254%(e9@EUrzntOG6l0SZ#jlUJCH$L(RFGM#~Htcpsh3rlT+J+VH=ZWs=%U6Z? zBWQgohzG6!o}S$ENQqJt#IUu7&rFtfg8%i4RPzM)?JshZn8|g5$I3Uo)n9s~(3K?7 zwr8*$$i349ODEGWsdgj8A|sX`YU?4k+Jc1);~N>^r8b z#&o9T?T~Vz7UQL zh0_2BG-4CIq(SbU4@>%i#)MkX21U^UA7IOsGg8BNki zOQWt{nd-NT6H5>rHT!t=)UqOxSq`5OxAz7GN6X6Sk*^k7 z(1Wtw%?lvO1+EY%FiLNSgR{FkLnxp_4fOFao&7D>4_{>OiCoJ$%V#0;@UZ@<|K#r( za0j&s;q&I^8 zY2Htsig~=D$h^&B&Mim5{Nn2q0GfZA2JoeNS`99|4=roUW6^qFi;|vMUrr(TnLl3l z%Sc;eU}oLeZ3ld;H05ZC0)UJrEn&%A*eKmTTDcp5dhDVXv08uCWHnprN8*zKTYWNM zrA{`mGGer^sW8D$*{M_?`s$BusU*~^b68ctxbdf-KDsuHlt=>*<#_Zmj__7?jMw}-Ul zeD{+L5}Y!o3I+B0Q*Vz`+C)3T7+s9_ zb1SNA&JnrvZKMUJOJ%XO0~0!vCbHkez{3O0mjs2entTub(vQ^iadqv%vdO|GaWV3# zyBVlCymx{#J)q(6_<`w1R-6@YZ*B(-wn#Tflj>Pv+G#1nyoSlAH1U_^s9*6HtHgAS zZGqT2P5egFk}uVHRXCy7*C$P#z5N>J8iLz0V(}tp-Zio<4R@V9-Lm z@&l|5-?O^0rJ{+U_*oS~u;7FlOb13uEFvFaQN5 zP9Q^B91;}3kg?yHZBdeEB(c1j++VWZuW?8Va(Et|G3ZN6TA+iqV;Hp<&Jh>?AaKV{ zN_xlTSVX$C!!K*4Q+(MXzvI+H2Xx45d_0>R!KExj8~F}iavNmea@Jd_kr;FrhVL6v zIKtvym|b1>+6h;@B%?E1qJsnhk(iCrp4cKUBr*`>BBB?Tvkuyoxl#8ESg8|llSpZ+ z@p5N+@U)XogQg-*hvKcZ5UePOpZZf`R)NjxgqoA+i@y7ei**+b6NZzPBfi%^E|fig z_K$pY!uwcJm*4hR9`0Th@y@k;6mM_$O#zUOcP&+4KFGiAw){ejB^-c8(z65Ii%Xx~ zBhP-|@fh3}GS3;dql0zp)CJui8~jjrJkq~>chC!wq}z}t{QI!so+enSSu39%S1GjL zMTEtuwV%bXhg7`3YjQucdhU>%hZi|owyr7%$B{!c0>*l102_y{sa1lua##Dx+`z9D z5Efb+aGYmwEjoSk2@^Z@BI$vv{8WPr1}BLPF^peuHu3uDQ&1B|)lf^lY+V+vDtpbSckZczRq=MQsNED`Yp z9bX2MP>jyL)q-`EhppR9v9lh!0u-v&Yjsh)UlTz4JoF|!`NN+Nev{w{J{zOJKt7Pc{aj`Ahu4!peRRzQFnsU&E?{m*m{pTxocOPhAGA%Hxb7+-0n;-k3 zZ!`K`qzM<$+N#~-FIib#v~df*Li`YyC$Ap;mu=uLK#Y=#La{vXadkntUjZhTQ1uBdu z(cIwRr{mT+eB}d(}vCetYW{O!pfwvzdh+p@Sku9F{KKL7YSrqsjaBn43R0+ z2x1YzGZm;f5?6%=a`sq}eYgNhyYTaB`8a=R0gXWmukr;zV^{v_0K z=it`k68GxDecVD-ex|r_Bt4JR@KnxV2syf|7RG7tWu9q3pt|X|$QP7DkC>u8Z|$VJ zN$m7mE{)t~E^k`l2wY+@sFuo@Nq17h-FD5+%yORRVnv>|Hhp%lhL_xy0O~-{89H`g z`hA-a>S%kLS6Q8$v9-#p(C^7|Be}8BBz?7|h2j2c+;4}9 z>>?FJEee!9-o4}2Kj-ndE<}JYkTp!GcH|Co?t{RClVXzy@hB|?R|V6kebyTqcW#ZsAHemieqpSV4Y7Nsi zpfCLGQouhdL?9n=PeF!>^CCq^Di+T47_DW$k?~0sDmwA4yh=b0yY!FOd}2Zdvo&Yk z7b;l7<@@2Xoz1>2RgUII8vV(1ezT^)2sE!l*-Oa<11z<(@UQC+W5uiDc`Y0cTq+ndsigXsULx2}6+#{q^=`rl9f5&VC=M^uMho zWkNsqz|qMM4dQH5qfV+#qsng_HQQgb*<*qt$$ODzEYp7M$D}Kb0Pw#!3}5> zuU(f!Bt1jkXRM2?K zgabMVKiQm?LCHM&!FFU-$#)^6R~}x+tRm<)lf0oHghm-GpuYHwg#5dW-$V>m0O^3s zr?+&R8)@jvg||{HWuffrmBl^BdQ`xHq7B4lVl;EY+T{K%&uwtl$6r7FTMe+!V2qFg zj+=(TSZUaA*PgE%O1S%I?_l;Z8E#^~c3*@QXQYYXA$>`TZ1^X{{Y$kmNcA%X6ILi( z)Cx&wU&z7WeI;vZtLR}YH)pt*U{+^u5`)1jpCgpbwuoq8T&UkS(w>7S;sohEztf)s zEWV8IAM${&&lsTnx%i8w{8*7zxC6I)xj8>^q&f8dY5Q|oBU>}L-S(TAy_&byF>(_@ z0F;dr0Zb|N`JREoOH{FOTuaCN0L@qtTrgae>($7oxDP`O?=Wz5=bW(cQm|2BD@Ryg-keM?mfT()n3~BUVz#062rpN-c@{tNv{f zwnLf@FYO?}R(We}%ubmvtEPbI&A=gS0<<3Gpfh)u=CKR`psqB=Vt~*`#tq_g`jQNgg;ZV5kmyMsjJhffrpWZpB~nlz-O&YT>q&F<@$2HQ!M-9u|pC z+J6|?87hD8lXuvzh3Pncqja;An<59lI)w2uGu#(i+uSdM?qbg2!e9>1-hV0&IA+Oj z>pxChII11w&97onm)NFCS(Qhs;lC+z?AgR`9w;VC7&Zt7{7lPw>&Wzavx0F%Fcy@T zva1LH*@n%z2!@FqI_3L09F7u#W^Z;kP^G8rDWu!=D~wF$j>rMy6py}gBx2==9w?YN zW{wE>8Fv^|4#&?TDF+V$ot%CgNO3ulr?TgI|R#M)xC9YoUgmV-UFw_8Q)+n|uW>dAJisWbZ{Z+zynS}HfJp7KfMnLkQ?1!VI1 zX2!21w->d1>kajz0+SsVGf=Y1LQLdA$x=i1FICr+aVdzOOiJOIpD)gv=#UwI!}pJv zUvM~&n7jf{YGKYLt!ljM7F}}ItUT9wym5tDrE)XZkelC(@DPTr8ugzg^V&Kcf17hu zL3=lUb>HiJ)*G7K$wet_dR3HAu&j6D9N1%vk9QcVBfX?jVMn>g*%cv0*n6zKPQWeO zoLkqCjcN7F;W`O)i6_hzodoY;iv#;8hNrSyWl@rYFK+#W48g>g5mT(7L!5wo?*)Me z!5{-lY*}lXMZxjz4~}?hWN2JYfJ6Lb&>9c3pc9q$a!r2mP&HTXkW}FA>aJn=ZQc8U z`K|Zj`}`hzk1l)o89H*UkUD!%o;%yeDq3Q)k*Cx@ zUQra2bh~=I;9SF?Ne**HxED-f*V4_P($o8=RXpL_LSlz_sog*@D7Fls*godB48t~( z))Y--i69rg$2-qS8GoV9q^4i;*6yHVMOha27jK>|cMcgiERQI?1|o1j{Oo|;C$&&W zVXZ^QJ|E7O-@N6GXw$l=%M-BnlP zjLWc+)E?L6FT;l;12=O#0GFgDddC2U8$gatca5dO_ygTSQT5ujr2EJ>&f0i$PuTK$ z-<+;>bw`gS^9xrVk$>-nieB*v_#B1G>aFd3x2U!8AM|EypkqANGkRNM3;p%*gZ$!Q*mi%W)FH-*YXJ)doSqps5!6P;9{ZWw zC!Yg`4EqNy0&!RRq)JrN8UXU$&YVKWUVuC90yb*QjnmOgw#2fSd}v`PH7vqaq8M|6Ch^0 zLDk=+doXX|kpT}`pVseW`5Y*aQo0>3_s)^3@}zL0WMcP@TQW_O0)m&?_Y9iJC8G4wGbT9MT5k^kx%dfDdPR#h^_q(iEZXrV%ertG_0@7-PD+Yy@2R|y^x&d))`Y8mV4snXPm2(r)P5l$mF zIWm{Un1mqjuCLuyS$?wAG#H3ly_-_)VzO!$jKh!GFq|3e+9U(Lw?i=)B#I9Tjt=++ z`L%x3v+td_no~dwzjA))X5B-#zHT$O9?iq%k3P;Wn;bl|_-1+3h;7K*(HOK$ptP2g z9#G-S$o9clAZQm1KyuugnQ4YTAmsg+_z1POLOW?v8xPXxMpuA+&mqY>lE^P2^qBPH z7<$L?Sq>I{5&qx6Akl}{(PlIm89$8ICli||pS|aIKP{%Ma^!mN*n)=hy8pJ{eGW9% z0@Mv+5>6511HTt+-R}`Noh;nn-6Zmn3E@^43)yiqXo1ADWUY6#1MBWT9f{BwG3fL8aiSnnjj82wn(fOF{?>>il&EmyGz-fs_1;t z^f&558bPH-Cl6CQR|j_*c0X0AA1v6nTFU5gW2?4N2PY!ccA%J_0E^ZbV063O~bBQ6nf)Bw;PKHZ3ws{7+j`1c&h5sYQf=J)HI6LWm!{61Jb#p1H zjV8-+|6TkU(O;ditOWV))$}-@{idZu`5uwr+VPv@QUCn~W}ura@VYApYBcO(f?Rtq zkK@7ZUsj7(t5xVKzhu?dW^8e>Ze#L$A?MjPp0ORO5pa0O`?F1fSZl0xh9u+in zt-zBf@91X>&+NLiL`zovv!lu7{n^KbXSDkdwdvqG&;)T8fzf|d;P*C0!E#N3fXP4v z*6t73mR%C@c<;a_9-n^|=oHu~FKIIVD~5kEkf3rRZvTt9s@e-AbUO)cfXnQVR- zdOiM0Mqy}Rn(p(3xQ`C82UmYea-b2j;NghYh4Zrihk4TU8EO}v>Br;4q;NJ~1PW4! z9rMc%hMkcNdCg-lb;G3dF3VjE@&S*>mxmV2@cxz2q*!h{qE_az8V2#btCfBMj*XG9&at-j2E6E z<;|<#LZ?NsMj+3S7Jbz@ z?PKv^>O_Cg#tX03S^diIm}ETg%V^cFb1J`PWyfT~U;IBZ2O>2Er22+o*fP?dIbk+~ z!?E3WUJ?T?HRMoUjxL16+H!FspF%X9!dCzaoFdk0{zfk_DjT%_TNu6)0p^`68d|!yVH4-!%%=P!fijw-2aEo!2>l5&wtf)#mv#vX-dRi>ZRKsz_P&H}1=3P} z^P65V)7Ljmn%x3F@89ih2twAyMqkF?VN6@viC)dXtff1tIm~m&m+n3D z*nOeAZhEeZ3yt_jw4ynRywuQTxPv$PR{jDCw3NnV1hrNQ#_UHawj z)n)%v>wgiOpco36#2|fLw`BdSPNlY&)U@%kRn8J>Km*?L~z_g12DE)dUvvwEdwOYuldm2k3N5m3PJ+5wD)9$%1KmH>3L@$E`1W8%3{`O$k`nTNrSkcSc^fh z=zpp}{F||=J23|RpOql~&7m4xx@=H*^mnEEKOdk$R&I#>pZ|?WA|VE_IxQ}o|B?9L z3&DT`>i@GKdp886Wd(7$IBc9A^f>1Tm*W}-Q#%i1_Z2)KPEXhvI2Xose9ps5jzK)^ z_uAdJAnWQ3W9F+t>Z`C)mOe{&4yp-(b8f3}kaUL#BCUOwMuV_aP!FSfvJ1T+T-mof zh`Rq}4UZ*7(U|Yr)UG1lf2DF;u>QMMhhnrBzJGXM(C;$|2}S9-n1RFn&P08|7Yt5n zC6Bg;#bH(42q4K>taHy>8=25Po+R-=*DL4SiA}>U*~r()!B>Ri@C~rc1Ouw}8;}uy z5XYAY_sS8HbEOnfUb6kTOzQJjcfSw4!-ur>MS-RGpaEj2p=D6S0XG+Vsao6M5UX|k z4gX*+OE<$m2f%V^LARO@w=%u=#G&YaM5XGAPDZgj6^EQ54T6mPq#|62sgk$G`nQgScczMt_3O?RR1JK|q4 zFxnar?% zXZ2TC>2acIsFf@}5N@FT(G5fOpp9ur=Fg_8xRW|jyfqO$Qx>0bsLXgmrH`CIRt+d8 zWZ<&@Mj_Ne%cMm?QiM*SofQy~G~+^0LA?%ei+5pY>F#eZAa7xo*gpg6=AJ=PcVOUW zTA++zr?Q=|Wal%`x$Jmd$_N~t(Zuh)hR%aI!wu=U9Im8z1oBoPP| zAbR(iR@+{hid#s}>wG;tzh3ra}Gm?}h`Bl<*6Xik7IRKSwcS1BnC* zXwlZ@e~NC(9l%2C)kr@IXSS8iGF?v)o(}<)*NlDhENq0^eUTdMDN%PF^euG-vpfIw zq{-vDXD}y8ByMc!O5WeoO=5+Xty?XPp;$=W_m<^xdqS&2SIeLf*nNHwb1b`4Lmfm7 zKk#Fm5)&bjOr`Yv>=p)<#$x}9Qa2RhwJjuqcV2g8MG5S-%ilY}V5fwj@x*dli<5l{ zt*;)jGs$+hcG>>a#=Epr_w3LT{l2i@-Z-d{-2cs{e1&Gv2?D&FGeAeGG}Q^p#2Awp z+fzg_Dc+Q_SnbyJwZn}}ZdeT)pO2MmNRT@2W++Wy&W$IDOMA6;ZltMyLGvIDh}Q0M z`Ur|}d%vj_kJ)*ZhNJp_s$_X!#JFf0SzHSrW8c`=v9^>;&@>;D$M3ZRg?6V39D{JE zA8cSf_Fz-^k6Lw{-SF_j^L!4E<~K?>ph=~Nx$Bnj7P}N_HXlsp93_jw$08A)5^Uen zLC4)o`j)Yvh!V?}-a+YZ=!arfJPENSAF$18YM~t4QUBuq$N2hlZFn`JMiWN^$M@F# z{4^U0ZZ9iB5Q88h-CfFT|CqMIg#AMNg<(p-t&?&Jr+EVqJO>}IfhPQ84;tbA@tg8( zMMBhxC~;@a7G5|MzM=`nd!37iNZ6hd*6!5v8nZ>2dXGq02FsaN{tMYVU~!7g;O3Rz zM{X*VBwM_#zO%5kd>vmNxIK&j#io=fHhPeNE9mmwRlK!_RxD> z<10OvdoJ+JJKuI%0Y)Pn)HS{^ZT|*!38;AfOh_EoMELXYL;57b3)?&R3pOCe`ek{&@X zR61M6>u|!DFY8D!-(F?{Ru-QSa~*q+(~sF=1byQlHvbdrR?}@Lv=Bixb8eEK4}3LtGGpkTSm3rHb_k9!14kpNr*PSw;c+|(LXhvPO%sD`b{jI zPbdbaG3l|8*L(+roL(q(-@d&a+SLfQOzgs;Sxi71K@(`my$;jEi?sU=^F{E>dqaC| zaYa6_oaM)58Os>|V^Sb6r=UUmQwVD6WB?#@xuvY2uah#1XyOvICv(@{<+M9BRsDwr zSaH~sVi&x(`h*x0Vf+dcp**Nliq_#hpyWAfkmy1M_2Kychg^6c_6v!gonqkouJ-)q@O5PDnd@DeXz(HVU^%F`5myV-`QZF#gv7*>o| z2JByOql1gfCLRBnWDsOB+;*}gJ9`|Ub&6(GR9T%ii85%{B;3t{O8c*S#U8_XtE2Yv z6_ieJzw38%n(K+X+m`MPqAc-pCBN{CCzvyQ`6@{^#4yir+C>ohk*4Cl9a^%}{QPB+ z4h5G;+EhqI9Zol$@8RcsE_zup-4EfO=jiIC{` zZJFIY5Q>JI)&m+Vu z)2_`_(iFbX`*d)ce~xUTYgs>A{rjU$$p?Rvl)nxr7V|ArhmQ-PXVn05Ne^t=A8Q)BE&9$ameN8sIFNy;BT z>%3k>MEn05{Q8%>#T41*+VZy0kBp$EAcW_D(Tb-|5&ju^H>6cq{~Uq6_%KC@K+yw!Z=iE zCKgwD+DKBYCi67J_w1-5jhU=FwYX8LW^rzf>+IUsTh=T3A8$8)B?Q?B0j~kd8oC?< z*0mlm|K1wO-djhA<<#yiWfQ6C3bOwiWPeS2w<3pTs*t=^D=aRxF{_*bF_>;DHyGr%6gC*`wun z?$c}MU2ugL*`O~SB2$jrEQSP-H)-6>7G>UO4pl6HL*S*C+$4y5rVnAxD8Ip5>vq%Fb zr`U%Ne*yr&L<|C~=n3-A_ zLIbu3n~P?oy(ADN;*1BSoQGANKcj|SKIi+mSyeKG=F;R^ovM`Tbm2-+&gy(M&-rJR zmZz}F!IP{wr5P3gT?Pj7(k>9**y5`chTxytve!?z{Scb?IBZ_wG&WvA)~so}!aQhm zQlqpu8Pd|qriZMvV-;tMFMcLZWS9qrrC$1qF8&hGX;xW#w-k9N%kpPR9fQ6AOAFA&OQR zjZbDx!tujWOrGAj6+_BMG^)75e<_L`o{vPq;hZ#apx(_HIIL=3P~wnL_75GMDWDiE zBqu6=Ne(czFnX9L#;I!MJv1l9HM^MXstY<9QyH)qmS(98TA%}E>80+<)8X7eRWayC zeCBu7pJnieym9m&dlvE3U!aPyy)?M-QA^QKrq-$7h}Hh$d^dvdaecrM7=kJT2Nls1rA@=q}4PL#v$1YQ|<~k-!-XJHOUk?C0#Y( zQLjW2ZhF@ZLR2In;15kaU}Bk3#TTMmXGPP#d-INCr(}m0si<+O&tjw2>9F6ozY%b{ zobxE;ksE^e?T}ALr@C2Fe^uT14$)yz+bWEzYAuVto+-8pr%esJ0V|cpxkZ8dIJG`G zV=X~i6DN@+9Byti!*Ut10n(XknUB!$Wz)L+wSZ=K=gwNnk;Q<`6KBO77y_d)Ro)Wp zXHfXEg=cg}e67ilF6PPND9TxL;s^LZr((SPdtboRK~|&*n|9kX+Qk5M%)>0A>@@9L z+b8b>=SeCAsCS2p-y6{=g79EC-A+yzN9sg74(Y}$<3HTBzv_Gs+LCgF#O)SX3IR(u zxJ$ZZBwu#NH)r#xB+O_>V&DJ4I6OEsIAQ=cym^yfiPxbFdvWnxJMmA4ioBxBO@)3w z)KwpnDrb|gD^Nn?GNv7#2fa@K3f_NG+JBRA(_~EZgFpO$9#Jc-XcaUZqI2g zJjK4=eO^-dIA<03L_hG+U0^_L1C&7Q$U{G~{MNClyxQ8K;8mPy8k>(fJh*iP_~ji4 z=wpcR+vw@A<XvvbCo>SZ z%KWrvMd2UBujT!uRp+}_)B}PPxZh3542`_4eE<1I+oCGli5GQ5Ag_4P+lxT?+e@_~ zm(%*R#}auEny&<Sb?GET{3+bL(!Zk_sIQT0b9z z%cCsY<(vs`#>29Up&z3yBw01=h)G^Few_DH14=H|8tK;NU;U8|&!qe_%_F1A@ofo@ z%SyN#=e^RdTE-D;OTVzd>v<*|AEQvI@1g z=cEkN>+6PoF;t2pf!wZ7#QDr=V9Kr3tf=4H(r=G_MK8p5d1>x6O-rF;)n-C!BA0X;?6BvO7An zdt4ZXKBr(?Q1;wa6lvt_@RNf#h&YJ2Sb|aA5B0btpWK5t6Ptv{w6nz&KS%*q4CWusn=IrKX5n$6UF)%uh@=|KK9qeJ`) z$X*clP0TLklp5_J+Of4$xX%``+ZX13r<9XCPfqK!J zXU*{fL81%RCTGg;LG>MMAC^9QwMrG|z@2DVUwd9ORcE0Jp=8$jyd>S=u>eo(&8x8T zQMf_{vj^XrPmf5irHAswa+2Qdv0L_VnF7uM{VPcJpaTcJ-3rN*$zwtIn|eT zEZ-S%>Fs;-)hxq+!=6jaNe>#jV|*oY|LNj@bTP^4qI6<7K7K+WAitH@{Al&hJtn;v(1Zvz?3%0C_1u18SahL@ z`jx;FJnfc549K#w0L6?zQZo!4K4!G2uAj6*QL|I!xsDBs1h|_f8>z1ZQlN1fup)Cd zkf!(g64T~fE1}ZCN0=!(qBQDD?tJi*U`5Ls>oT9y z?|oWFaZhx(xL6%uK;8Er1d=7!-W-^WGPiJ(PZJ}AU#W2ygv zVE&}vg4hM#L}EHlHDO|-x;Pe!Ho}haIV|{+hD$)hhMw zdVeV{sc1nVQH?X2yGoE`euEH!d3lw*p$V}motl+i;^ys@}Xs^ zJ~rHgTeH4$T*{AMdA+*YQT}u#OIqXFcC}qP3sbzH09I5~RJCg6UWQ>~`1W|8AZ1>` zQ$LD#TdF-uJx4*7L$SKkaYQ~Dp;flJ5)HhyD?TPzdnAIR%GyKm&wKYs-RxQjZbR{& zL^y*C)4NQbAFq)w4oWkRH9h&4C1V|My5QrdxA4|~#_1;-?M|H3XI+f8PK_@JHEMp3{BTww^xMoq$lrXvb zwO=(q13~{lVs}RmGIE03d%9w8zsJZxW5Q)HPDW zpm5}l*m*z56%;blHPZX512in$5Me19q_%;Pg`Kmfe`rKhbZlHgQd(J6Lt|4*$Kb@$ U>KUiV+&Kf5X94m%`zQVJFD104SpWb4 literal 0 HcmV?d00001 diff --git a/assets/windows/tiger.ico b/assets/windows/tiger.ico new file mode 100644 index 0000000000000000000000000000000000000000..a1b190cafe6b05cc0dbbafd9eb45ca1328e54016 GIT binary patch literal 17598 zcmdRWg;U$#6KwgRtqAbG?Y2Z#S=uc)J*xVqUos{UJ}mpR*?jU z+fNosZ_2mt)WNuX!)pA3erLyP^lRSx2L}fy z+O#LfL9fU5rik0CIyM8wf>yEUk_Wc_`AhzP^?=KgUM~%``>JIC+y&hO1^^O3=X>}N zqz(fProd2wk|KoG9nViW>VPSr#t{+DuxtcJ<1vQ?2OqWph6~nrkscYe@8GiBF(s&d z*yZk;a~SK$p{E6Vok&cY6aCoIxuhKfx-3;BHRv9s40fknf>HV?r6Ay7IN=Sghdv=| z)xbrCB=LV?!~jPUH~deyH@G>5a)PG{r7KeYfEzO8IfHsea_>L?0%hDU02P=A)A|&m z2PBv^4_-@$AaRjC>t7kv@7i$BB*!SI4#9O_wZyRfIgu|O*2g}e@4o!uKTkm)Zy(Oj z2Lkx>BY_rE{L)Esw$AHJjwIbK-wA#RdQxBkE$p7(gh`DUhg{VTm?i52qO%Tn{(T(T z)}t@ex_M_Ix=m-CxNNDKO6|g2Cl-??EG3q-GI1nBDsz*orO4Cg2kOCeJGzKY0AwdE zXPiS5Q-*waj6z&-m%$IxF=@6dv@P~2j!tN5Fr?VJicKs%zq|g3vs4|H0lp-LWZd@| z(cGRwP?YKC2+!N#wWFbvpW15ZruSRW+SYRj>L0>e>3I*$1W3UAkugjeIsC zYu!;{r|6z%m)&`>_6!cCz)mu(ap-VW9;p*gd%oalEoKHou}3gfq$pSvputdWsY}Ru z=-qRvw2pOt>W0}vs+m6g%-MD=e)(yB&9U{pMEh&yAYOdHD0&T=0aavr+{q4?P<#)? zN~RPv6DUyvV41W2#(?({wFzu7X{q6-{w#oMGz7=_H5L_u0ETJ%Enq}p!ec6oT_vAJ z{$Q)$;V;0;@)57{yJ)GFKLHV8d@A%7El|u9L&+Da>wQ`abiV)IF1N27p9^e4f-nTU)ZM+@YI-+p5%-g9 zpMHI^r?wzppBYrI<8G%Xt7@)Ro``bRvf1KmA9EggT_fCc$>>eT`M6#<-$VpR&gz*1 z*n^*A>2d(oilXQ(vd)7e<8K<#>t0ha0IBBBp70hg!@jwq$IUTptFMWrb?E}R?wd1omsB?r-X-wO8TAS$Uw3wvDM*cTsDT3n23JJ!p#R?#x} zhq~HiZ5&av&oz5quTrWI8WUY6i5b2H*lL_W5A)qM-LKuX@&vPKh^~diDYm>=jQd>` z!XvCBD8`bWipnssMX2_TSI=4;B#=TwF2Y6HNW2qRZ7&zG7mV;l@J_NxipHd|di8kI zdZgWk_#z+ixJ|#;3(8O@XPukpDQ(D8)h@vh^bulhyw}|$j`P^_^w=2z+1?FnUD(*m znSh`PY5bD7njH!6Q*NcG#pslNufp5wHx5NI)W3em*~#Q6ZQwc;ybLOoBF@qSS%lpN z5#@N67cYD#Jh!MuIN&>yEE>A=oLQbGOi$;hTGTl;4Xv94A9#^J8PxT1)_N$aBiF#4 z5bn2SOvQCL$bKP6DEh<8m^9{Hin%f^f0i76QU0E1J`Y<$R?bPbW$12=R)@=t6w=OH zz36K}6`ZbK`ka8UIBRGs3-o`-})=#DSBR}(#Bu8JQI8A4zwFCBC0f~GnCWop0oGc zLuS#BR$i*{YmmKtnh)yAN$@uPihXW}hflwwKjG#%KAELZer+w)x_R@8!F*!VpaB=W zCb(DEfEYB2%AQ6$xB4pV&%%c~aWDtG*LZjphAT&?bLfL0B0lYTaXirX_pBgqr(4LUMl z{c?B9anR|>mPGLyZ{4bcYw&Tj(UF9w+;hBotlJ*rWOKRg`11i-Czw*Dv1W=H@eC{n zT~ippYxJgk3qx{!!RXSjIu{ubq7-9H>>UahyVcG;UaH3yo;#H{;<}Y2j?gPcR8>%d z_9pi44nFI{Vr1@9nzv371j7J#RYF$P)VL@0FOuf~s!j%<#~Fu_U?Zx?4r;JzK{+Jp z*bsv829^ATAg!on98ysU#-(IFUN?`lJC6t(!|?PnuKH2NpteF^0})rbgSmEeKw<_jfaVc|~RhN9_(=NO~U^TzV+uwvt*A!2Ua>!;FGwPMC2 zEDW{dRT5a)(xm8GZB|{lD}3~(g_j6M^H_(?DJSf@jGQWWvNOD;|MW}wz%K4Wf_uT& zBR60V_3}21-Hk5c#9r%^`s2ci036wx-=_?Io9w7vL2vub!}qc&y2y6SN0#WKsMU%OV{-3Q?YV@ZqAo)5l!=$;KMiM^*UYk&7QU1PaG9<-?<=TJ3BCp$RX zIxiL!!%`d+8oIECOpr0chQ8RAF-t~6v`jm#rP~tQBm|}YY$3G?6623as{v~bpKT-v zL7uTXsoU`UlzLqRsD1@88NWfLphUU62aKEdBDk{^bG33x{Sm36KO4wQt8l_l#kXA{ zpT}-8J|*fOFaDBV3ZDcPCDl*^W!xJ;Kqy(${H&myL#0u<7F;5*LKKq+EKT<7x0J1K ziqeM7dKZFV!ezmU&*0};GIR{n^S_LR%&mW&7}#Op@x|9eQi$4=m;XF%uv-_$!ghmh znTM?rrEI;(Z+c-Ov~;I6Dp*Gexw{wk>lF;@C$1f~k0GnjQA=$SipvZ$EQ>LVsTw*2 z+N9eO9p^+e|Cu7_`_DKCHXM;(7s@X3J{Fk9?XZ+d+S zarxVT%a`$tx)M;P|FeDS&chGio)y(_F-q*s?T{uevBW!@BA4JW=E0TgHMT7K=}DUr zmj`8%EkJdoEa}Qz>r3wb$j~CY=}yc(bN>R&CK5n@;3JlB=*1`-Mu<`4sf`$G_2F)63~)82Q%R z^BA=WuHsmUOY6Y!^L2T*g%TKrS0fW#YB=^I5PwcTH zZC9HE8CITYW5YUyLOSm3jGo;9T;@ zjP3SV@Y#oZfwxA;6~?Pf-6C-958 zg)8Z?KP@tek((j0*H!b=Pqn6t+hc%(Wr5bOw}33P44Id@v_gbuazdySM&CBw;&Edr zabHOBD|V>8#r>-mocRxdKd|{O06PvF&e8bS7z$RaPvifg1A1b9$L$u)%@|1HwY6K{ zr!DJSzzJ#jr)Kd7Eq*!f(-5at|2$@6W1JxoO}HJ6zVNM;P;Me6i^Z<&$7PZ1J@NG_ z)cGe-6IDy%+@{h9&K_ zr$DR9!E$4!THhZf7q#W;&(~<6Nv#qv!nMTVXMNx^>95~!cArBP_FS!ruNYV#l&Bj` z#PPT>O*T1Q#I6dEd37Oa0byvXeZj8{#$>fcOfJNQ81#y>{t=XEiTzf1p#2PrEDG+F zVOzf1{qO#iy&wI`?aQ|HYubD!q)B3!cZFzDFo`W%5~W<8Bz|6u{C}oRP+HBFbzGZ) zm=sSB3U0rgHW?Hs`T#;q=SooP;(#be@s(2TqqpczLtp>tralZOu9X!AyO{iZWTSBO zaqBSFN`P{!31BW@_UFW>Xu3srvAQd|Ew^gojCIrYi6&fjArpY5xe=(cg#JgY(Y2~4 z2IHq&b+^4g&n5xfoeM`j3y=*<2_mr3mBH4Xt^WDyHZPF~=JIh}&XI&pT3p#U!B9FF zN0;BJ9NnOGojfd5xi?U1(yf0Q>yT1H{L=9mGLo03N%6O8_@3w6b3g@)x#)~@M=!@i&6}_iyg7k_UMt-Y| z3}tr5?)~nXS4Ec;okVR4JyV50T{KW2e2VP>+w1S~vRlx>>6GxaDD>aO3v01e3hrz_ zxwi%_gon6aQx`H^$50KN5M(F>4f`;1@TsL+pV&cE_^CULUi_hHdJz$dE}ipNg71LhdUZf66#5pfXijSzbc8;ic~99S-_ z*24P@*8F`P07oCHR)}=WV36}5Z+N?s(0i^AC&_VDa-^1JL7R`l=zye3j32Ev{&1Hh zv>mhLZMte*j^H@q;#utsGiPLC*W95ae17=3$0uT`Y))s1U&BIQuKG z#D(6CPqS8i@SoB~)SyQz-ZyJ~#oM!MP^h!6DgusBe?x~e4iKZe$%(gU=#xTWX413V zC5n6F3718I!k3A?+?tVJt4I7Ynb<>kkrtxt{kyyLu}x`)@CEA-tWP{kp1-Fg)R>Qw zi~AdLb6PuuKkW@1Ic@iL;m&x%A(Zp^pM%(eDU^WIH$L|!OM&K~PhY96v$bGX3Vn*# z3LE_`;Bh`Gg}tegG88-TtWYiTGOGq5mnX)Gu&MdcI`IKy1Ha{uqjT9WM{UslkQ;r2^!TGsp& z+MhpF1bYZczdIU}H@MKqq5#lHO5eiHSB}9L&%n%xP%5P5tB=yq=@Y~Ko9N*l^CXPz z=N$Q*+cgv}OYueWf=4+7Z@lvo#F60%v6w|IH!=OU>!}AEOFs-G}A6XihJAsZW zYwEDNmjfke8bz`!`Y#(ukp)5!(-Kjs9Wjt?-<3D=&EupcMx5&{$m0`Lqy&#^<1km3 zovfBe4XIO=imr5P#G_4s1Qo-qy}9dIPSp1cbHVx(SyS6hecP5UvNl20)L2u0w8|yl zw~cszScXshAVw^~VT)d+3qwlGAsj(Rv@}b?4j9m@^a$Bo>ia}nhjRs{#NPagb-&7} z8P%gkA?(Bm7^iF8^%o)((b8RTx0_pC!9YIc7a?Z2Q2AqMIHpk1$vZXmRXlfeGkwl6 zckP>IY*D^}X2Kh@%#*I>7LH5 zV*wTY8+Q3eW7^39qiY)6Ltb2UV~d!MQ?Wya01DxM!EAW*rIMF{>z^6apD;{?2gI-x zDceJO;xf?ORbd`M)BlJBC_7awuH$?;8AQ;)*e8aPA;ban~iiUc(bieQqQM*1i1jTjs5+ zX*d22u;#GN}Pw>%1R=hr=2P2P>`nJn(sMxys1VH zsUzVPd=!23)b_1AlR}M1k1JEdmBL%`DT#-L7de?G-cmI$NVvS1PK3eu16^O><*5PF zpS!D4AKV9F7jJD^@Ax2Bx4ca{T|m6e<}bt;zPTCcFcUAJuWS^%e*Erxwx*UXVOTFDS4PZHjXG2q(TURyKrp;Tb$UR2C)stP`W{oIudWo8a6Eq3{GBec3UcXURE6ZVTh(Lq;KMAkrpT zDE)kVIK&|L_^4TY`P1Gc-O>GRztD30MvZc6y*_Dh7W;qqDFKhYUzS4Az3h7mjMU{) zmFhs~k-tI^(a; zfrXQKPL;qyfXV7mIJ=!xD7qU#`6YeNx0R>@j1S9btv+48#8-H-gshdZ&95hF^WMv0&x;(N()iC_wStKrV68RIkp7XMr}NVml9J zeyQL{GU}2Uv;MA3;$*^M#hcE^nOD~KkGmWZ@}-lYD`{ny^~AxQADh@9-H^DA&b@%w z$V%)al>W_PuZAYtMskFhXSM3O8daowuj%9U$5AUMw^rY=C&IeSSXv6f#StuxBH_q* znI)rC^Q28cQUyX3g;+*Dt{&$wsMDbcl5)hDX3gaV*&EwRemdW$W=?0M@$Z2`%VUsyOF>y9J8_sO0=djKfsrDIz%K?|a zQ58-sT`0mQ-4f5}+Q=8P8}9+tcDxw@9ATH;3`AF?{|LmOd?FKC2mzBg-TH`AKNKOi z{XlE)VRgFuM`?)XA~6h2rOl0tc|?ZN#&;ozd30~GhC%=}5Bt)H82N&>Bs0G!x&9_u z#|OE;?d~Vnp2DzF0AKxsU&&qQ*d|@XC+3{T2G2ntm5>bD6K7&?SI2uve$tR+g$XU+ zwY`bOhC2?O@{!$rzw<@vOH!ApYDy2S-5Y%LtKT9AM_+^>oQ@#Pr@*M=R#aSPi>gtK5|#{NGFS7%gnt>}VY%Y`opia<}o;p2*YgQ*^OhR6HT0KfPI@e<>_{JMM<|t46KQ%ktgyG1PWDA$(?xmp@WL@ zhjg5YwH|3fxYueA)`M7LF9GG70ztcR+PBrGkAXUZ$VCzJd^!8SeIr>4IPqY~}<65qteKBE$8^fGk~D=7nM z4~m<(L!D{G!|9)4g^YeV!F+oq&HpsQc+0isC&f^(GRfiC_3*4#b_Zcfx7st_q1HO! z5wk5|7%W;OU^7eFE=3f_&42jd3f&sN4B%`&Z+XjHo^{=W;<+DLQukKiGo_7!(~2r! zci7Vk`jx_4c=!vJ#~T^1lJ=-$tU#nx2Ww&7F?Tu&z`8XJ=Er(XAsr|@CX{+ z1@mga#)b3YtY7H<2mQ!K0?%nfpWm%_DK<8G{tiwt%02l6IT)Kb@?~l-O4AAsPZPKg z#j5PGU9>{&NC=y3rP_qLDmok>)~WUd*4Ab&t6IFU>&JtR-%eD^=cHV8R90KW+#9V~`Sm;UHGS#J%pI@w zT=?8@qt3X|ldA2Tyb*u|W&mf=?z@K%RFO;BrDJjxZW|hv^bgKj@6P|JtUp#*AoVrH znz)q?j5;s5WNz+ze~Rrs!0K-VoQ68IiofuR6O-M^I7BUGToEj|tk(%acpYXVS^sZS z`@l%`-U9c5P`KJI@&Ozn4862~r+#UKfJM#2QN+K)m8y5?J|<| zkKZOsf*LLmlUDYFndNrx&3*yg;E5A)S>ZLDHm!kS2sBn8#a?(3ceu7EGv=FgzdnFbrUneXbjS4}IsSb?U3K@Emk8quS#<|v~sE<|eOEyFl@g3Ag z-`C%QvFi}b5t9`71m&xOo4qEB<<r ziXt2b>ZpQs@ii$5XQdxz4$g~|*D}^p1YcO_%x}45(u-#NL)Fq{v>iel^!Mn7zsO4{ z?S%i!(;nyIA9`5ApRcsb{XH1j0t}2%8n*Xk7mV$vwwS5E+-)o#ek#vCT`+VZ%BlY@ zF)#|f`e5OD&$sR3^;0+_mbv|t zx!i{*=iSEN(->zQLTt(vJ}b@OIUc;Sq94wf_gt?C&9Fbk8nLcH+QVez-zrUwgGETI z740G~{M zT<1VR-xwhAJ;%L}T!P}yaGW8xCmB)G0Ha)hFI8lD$)4i~o2`@)`5_|R=Fn)S!YN9U zn(~-N#GWh;%1L|$CmVrhpl|nb#k<*Hiu!@6A~$s`)OnejGT!k?$p%yW<3q|)9@fT^ zJP0(~s?=66Heagi1pQMd`EjEbyJis8)U>3v{L5?kY)ilBSEb4!i_W1?w6lQo{v zV!nbaOTp(&{wM6#uurs;WwT6RWTvO!hcd)9N(IK%Ir^MqQ4oPK#Zqy!ASqN~?qHUi zJ!bborcKv7)E)m5ep{$(>9-I{+++TBi$mk29KwAg0CW?jj&}mu(ALK7yqz8H`5H70Hy4<2R!pfi^ca4_D#!kv6>nArQ4CD!G&@Q-;-uO$+_1+=y?5oNbU)8RE z{*B>9;h*EwA80jgA$n98Pp4~Hn%ev)Oi$Y{*EyoZ*j5(p2;-^OiWtvOgbfDuG+`-U7@9A zda|7`uYYLjq!h+0vPaR@PUNtPPTz>BkSDk0>E4I$o#Cq==||H@0eUN~prSSBzK6wT zQcT9n8R4Z<4V6;f#?RR(nNah*C-X(;LiXIHVQ!qxQ?K*HXpMDK6pEW{Bg5_*D*>^W z!xowcl>mPXZ$lB~BMmUl1lNs-FM{JdXmmJ%^WELFQo1lymL@d3K8|!#B&~EC$XX1k z9=r>cBe0S|L(n*#bQ-4=RWhu7S9SmF=8)L|fbth~W1{R)I?p7`X0 zshwp*Cd!Cin?J&%T$~ON>`j~ZgscR`!y~-KWdfmtM*~*PVbz*tyP;)T^PbQz>=#}% z$N-^A_%f>|dyBxyznZ@b?IJ&4vt{pfHMworE|B|ULfD-8KOR)AC5&6AFMo7c__Fvt zkSXWVubhmPuPn)IMpS7Yd$l&O|bv6qns>njlgK3+L>NJ4?%A&(&&PfOJh)tP4pR;7F zlU#M21um9T?Bd@2g15p7a6Z8kjmI%&45IeC$SvMD9jQ4*R&c2uGao~1Im?IN=xsv+E!>r|qZoj7x^C~6ozJ>R&gSh{YCqm^Q zXoe`Lt0v&SL9Zt$?KZbsl2lyHDzH4z%uD>ny`r+^?X;zv1e8Xpqd^ms8#>!vfvn$_ zkne1E);^E2V%>L|##qrPrH60a}CcDrJACMks!^nesIN{5sVT=;fX`SDZ?tBtU@TE=_UkIk_7RMijWOI4!;Np_T^l5m;=bA@beeP12qSJ8t;}XD=W3*!r>2h)?DKLZ)~b- zXi&q(C&*3X&t2V%I+~~q@JLI&foeU@J?zILugagjWN!%)x;wIqaj7@l8XGU2E+1Oa zrSUr3isFZO20d)-{{{qv`VcqtSi_Fb21L;*-OSRw>BS%wOujb_jiTQExlITAi9yCzw?DSxy;|CE> zGLCA%kODSr%UHKTt{=u=@Q$H|X7rrOu3AMo8Ep@Vy(LY*uO$a^KOGp_m#^7{77g{* zd1TA9=q0q2N?W;?loL@xIf_zo-hb`S^!R?cvPc{#qY%uMo9=%o0efl7tGp1dJRxm7 zuJV))Wh%?YQtvmh#H%2(B578{9`77q2<`QF8>*}6Lc7kXA@+w)1Go@ zDD5&?ZoM^ln%LC4_|yr&eh9I^d=?EWO!8sBnt%_gYLP5P9E%8bVi}q>Vv}fBgi>T~ zouqZsUIO8?rVrH<0$ZdioRtjR8$%Umnx-FOJd5)qF-lw|L}>Bu<`|Fd$f!Q8Ve*`e zXQfp;*}TpPw^hZdQI6`d>-A(l5%kwGu9|`4m>HMgy#oBnXAS`(=Xbc_pD62Ovni`nd_}8~f@C_&Vm!D1us(KE@gc3O`!&-Iv0h;etb$ z`ycQY*pif-ub)v8`9BuNKF*ZXeUEbqR>q}%*nZfp^f&XHYT4>>@txOTW0c7y%bHWW z`UXY6pSahXzN?n8mav_``m}k{w$TcAe1Zzr0UFgK|}wJ zNrldoDiVi-V>@-^hq>}KHq96wm-WrPZwGGU-Qo6)<=4R)w)NhH$%gk%f*u_W_^kqjWm<6JgM4_~&k&biYm_W^4YaBr*P3d@ ztM9P`5vIq~X({}8AL*(*=l*kf-^b4slSb-D!lL3FtaxH=oN+QYNZf==c|}&_CaaNU zyM++^7KH*6Cqm6g-{N5Qe;nT^kwB8cG+QpNUylqyPluYXHkKSdts8j~BdrHqhrZGf zOzdNDZBY%exadwWc#_gl3-p5kC}q|drOXoSSZurU4?zV{_`pgHzeYKsPqrOUsbAAc zTltH3uyc<0`N+WTk^E`(cmNgq)WuNC^Zx+VRm z*5Y;3F7<4#8L#uPI04TBa>+i=AiUa#VUg@k>_2+d078{<@SS;RF|)VRP?NEiXw^}} z6y&IDl8pc|{>4Z<#aZOuSc~ zq<}QX?_ruPW@cBn9K;cX0PVOl9wln)+P0WY>9i%-sKHhSe;cA-)=NO(q|GS05uVXY zWm7ikkmCKCmHIRkqiu2@|NB=H-6F{$kD!z1Dotd4Qvk&97>)JQXvS-j)KBDh)QU`$etd?z(-B zW#O0?Hdr(y+@~+roepaAtG(fzCyFwdN&LhBrRF>AxIdgjpZb-xhsv_V(Q`8%auS)I z@lJ|o3}?rKnM(zRphxGrWemR^bS!=Q(f%`4^mMxS-4CG2ObG1sK?MsP${XkZg;ck{ zuZigoxmbEX=xfV{z2HdiyOU2nviG4?f$AkUQA=y4(M$Y$t@ZbzoZlu+nIo5Oundphgf&hNjPFT#Z zdERpf5Is`A+X@yt4#{{$E!l!g}6#Ko|nm07@EZ)vDz$g9%N^r1fu{K zOgbQ4krn`DY<&y!SS*UXZRgtsp+cKg4Ssq8*YB(syr^dVI zwbHZ`$(8@Ok6qb`G3#D7H`iY_!3E-f1YzW)i_%*`H_86+X@U?W3u0hJtQtEc9dx3% zo!cEVgaXwC(TzOmzA`bj-*goE)?%x3Yk6>Lne9DDMoLR!bSX+9hI-T~&LC1cXcHEXYuVQxQ@$2;*YWNv#n&)*$BalLorwSwzLY2&7&aH?C@RY* zvu$h=SBR;IKug0-qI=5q`WIG&(6{vQz|7PIJ-`r|2+JRf6 z#xIEJfxH_R%lwrPCAzA$(Mn4#oB6`I$-W6(@ z@zHOK3{Nb;jNYDXw70G|=kmhC zOfG7PcADtFoZz1D=}keeQ}bjZr<~xq?rU-kZGaHxhX%rHzP&IjNo*)dWu#^(G05WZ zkA+bpCIn`Pa=66fmTMlR8lZi`qi>Wi^?llwSA z2FT_zC89^sF`O~Iq<+NDTXHYYY2EAJ089XcDwB%`5rejw#q7E8og}ReR_MW>u$p^U z(J0{sKR!}|LaaG9gft_SIh+>HET=~Tw40>Ol7tc(hp!LgqO%xTJgFiH!1QmfU72js z43gTZp0VJ?iV;)wRz#P~qg=W+g;beT2za*e`^fn>(5Xt^uUc{eXY@2*qT;vE)4p|( z9<=e{EEv$CT4$};Q}@J4%W`mh7^a%9eaGpjLcoQuCN~-#?R1bs(zOjk$#7Ezzf*un!8zk$g{M#%DRAA?vnVzDFuJ- zrudhkly&ON1XgN5MYf6-d7dl#ochof`;g>C-cCS{)#)_cq7xgcJN<3LE|XU~%=Epn zYjQdLaA`dqg9?-DTzY^7Win_zYB|VPss*!QGEnkyU6CX@nYVN(55|gUk{W!-P^pU`ikTt;O<0Kqaynq-*IHx50lR*sWKzjP?yQuL4+Fx(S z9)F?qR7@J&Bc;6;`6F><+e3Av>9)owe$hRQ^Xz5MN&`wFBn|LcO|n~MM?8|kQPyFW25X^7nbNZ$W!wy>^5To)ZzQp5e_>OUf zD&kux=ykmD)Cx)h<6F3#Zggn=ZJ@tAQ|A1`dm}x$-w?h>vHouTD@vz0Y*76`*-{Kw zyY=d?#{~43<0142x})!U>2yCz^NKWj%LGU`xrShUQ?$6wti=?=o5%6jO}>6Mve0nS zjq3l_IKhI*onqr!+U;cDU8RKzad0jlKiTDuse_~)(KZ~A@^R?L@GAg9j1&-cKaMwO zMJ;?)t*S~D83Zs_mfDENP|Vds{(cd5++yU2MO``qUw$MqMW zl*|IDWj}C#l~Cqgy%kQGq!gV5$ewZxwEmJLD!Xsjg}k}>Bu5BR&e$BPd_X_?Nj)ed zgt9t^bb5cUYQgiui&6mWs9t?UrPm=2xN$@&VTT(}=V)xk0-=&s4lZQkClq5K3;skZ zfC~3Wv!BI#jVA;(^h@`+$Wk$eB0T{;3BGgY3=p)mQ7F3BoXH^a<8pW$j7L>); zq}HrcR!~vSof+nqdgUHN=PPXzpKGR9pJkvzUEf&*F#DhvMgSMNdVqmin_W5hw|@%| ziY9B;Re(0E0jr}gfL038`?sWNgd95^u3QRuxeJd(zUwIgyLx{K5zJO1h;D9Q1LmoE z0+S$*0R^Jv_2h_{*HNCwpwIt!0R<)X{lVxXXtxcPT^7uJBOFKT4!ig?VFgj6(yIR& z7?)-$=o6Io>rM4YDPPaM?9kBCOrQA`7*z>|^Ixip4|_`f5vu`PP@aM;*qiZt4Mj@e zA;zuWiwyASbaY8uHHUIOXJp&-DfLo`OrAlhjP*cc1=Sn`d-->5=hPrI7>Mt z_yOy1+pNw+tuWrL?_-W&+&EMYR_|Xw(j+R-NMxNy1k)OwkYZyxthQ89+fD6*zB9<3 zTWsxaC3|uM)N~gy9b9Oob2mXrD+2LWKONDf>J&wwA>%HD7ingg4^1Ttw>vQK`GxqZ z6j%>d)!YkV zj)Q%f0Fv*m6)eyl0bTe;*=k$jJ@W5uxs5<- zu;++n?$LG5ruQ4e-1LaWzshZ35t4ep(MP=AFsQ5$&5V@S=B*EwnqYYj3 zEa;5+8udF<`YDn)CZ#hN!g>;vfPC?&YriEaoYVd|uj)xic@07+%suA!M|+dirL(Wi z;FQ4@*kzEjpdpl}GUDjPt;Ls0#H^H%3Wh_f?&II>6stG^4tx2zoO){@Aw6t=2}hT&kE)noV5 zK@UL6IZ}(qTHL%nD;pA1DExx-~ zRRd0zJ6t>rgM39IT8%;LGt*Vidh4sBVkhWX_9a-*fmE^-d(ps5N;o=||84VMA}5g; zOb0a`N`ZlKpST9ckang%VeEpE=f9PqinFJkWKE3$K11pV9-;o#T6tjxZh~=x1aE_!VC&kQ1KcRa&!(@4R*T~I|l)$V| z+1=t#mu@RMjS~@u(~tX>HaLG-LrV?D-GC3)RFSmQ;1MD#`nb6jRV_h^Ayc7Us7{K< zCuh;@qFtI*MNftW*ZtSId}^pEUU)$c2Y#Sf87R8MNn`~nZxL#PHZ1+o8CvCn8g^1I zkY9;!90C}ZI`CR_@FZJPI|*v*37ZluQ~Y{suBwCbTZthtZ4$>Yc5s2uF?UQK|0E&g zRMN-vAKK#YYQ|d`!|J44S6tRT)-O$I2B@`W_C7I<3nOmj^3-CmWQP=p0IdK~vd(7$ z1ivE*8D++z6-a(Ho-Uy?_b;|ix%5yeNp$Q9h-Uupk&WHp8@3bZ10hnm(X{7HYAE)y zA@a!=ftvi_>Y7Hixq|D&h#?$lI*d&h0)RE9SmvfhUOu$8T+6@+3zggHJ-CQ&0<8Dm zBToN3?G_1IqM^Kc@nJZrrD75i@0Bh$lXyBvP;#Rcm+LcawWc^GN?S)l zpah)XXD`H_f@%8t?`O z>9}6Y^W)VnFa+I6p~3ivCl}i4i(kKmZ@2czQRW9=NYeCENiS_HXbr?8*Wcdx-e$1# z3QZc)xzt%G(`pZmK>gL0-=0UHyyzng#esi99xNV^hM8@gGTfIE6S-8CMF0Sr{{Oxg z;DySd`|`FQ4=4$1^Q*gAM!l6hHUKIFS&M{Nj~4$61BuOao+uI6&sGa#E4GYaK2eU$ z-wIdyVA7%tK+l|1AMN_3$!-|?2&PRd+6Hy^Uykh&;=a7^i$dg%oRVBJR)E$}nxIUM zS<3&MO+zVEPgZebFSZ4HDB4E9kLQPB98sqNvKF`R51|^U*z0+JB}4M-H_AQ`4YD6M zU=s?WQJoGZNc5t`;bXfp2$4N5&?&hu}%!e6SnD9R6;2Whsd5P_)y;k0viRP9j z)t%--8Q=qnSj`F%J1pQ44aTqO%$C!N2e(Q_&2x%|t*K4_ARyKRL-%x$UC`$uEi4;H z-zP-^chdi}od@=HIvaSTm02;IP5Hg$$J;cKTQCi;R>aO1Dg9?DxMOF}!m}$*uV=Mm-qSlvd*kfK7QjvabtMO1auoONR_uM{ zAaX9c&3eMZw=xgZCWl`K4s-!c8`$x$r-c4}Ip(E4qzObPxe*Kf8x;y9IlVe4M0oih2{k5ZgM zeiuIcslcFhV$*)vC_b4s7RP;8f~Sd|Xf0KY%C^7KZFWIzdcAZCBmYE&jH~+-SUtyifv-_E|YK5i;tX{QQBI0iU(w<1MoQ|Ls@f|%a+jd{(D0-^r za43V*z~-9ws*7odxEU(^IBuLXWHdUpm*s5O?*)exy)Q_8Q8RfV&fx9)^kJO*d66Zv z^7?E3=(}!y=B9D+Dc_X%f~7zIfBG2Fkd!GBruB8N(3|_pp%OEVy|l`XWlyx7w(4i{ kmA=D+ 0 || count[1] > 0) { + log.Printf("W! Deprecated inputs: %d and %d options", count[0], count[1]) + } + if count, found := c.Deprecations["aggregators"]; found && (count[0] > 0 || count[1] > 0) { + log.Printf("W! Deprecated aggregators: %d and %d options", count[0], count[1]) + } + if count, found := c.Deprecations["processors"]; found && (count[0] > 0 || count[1] > 0) { + log.Printf("W! Deprecated processors: %d and %d options", count[0], count[1]) + } + if count, found := c.Deprecations["outputs"]; found && (count[0] > 0 || count[1] > 0) { + log.Printf("W! Deprecated outputs: %d and %d options", count[0], count[1]) + } + + ag, err := agent.NewAgent(c) + if err != nil { + return err + } + + // Notify systemd that telegraf is ready + // SdNotify() only tries to notify if the NOTIFY_SOCKET environment is set, so it's safe to call when systemd isn't present. + // Ignore the return values here because they're not valid for platforms that don't use systemd. + // For platforms that use systemd, telegraf doesn't log if the notification failed. + _, _ = daemon.SdNotify(false, daemon.SdNotifyReady) + if *fRunOnce { wait := time.Duration(*fTestWait) * time.Second return ag.Once(ctx, wait) @@ -180,12 +322,6 @@ func runAgent(ctx context.Context, return ag.Test(ctx, wait) } - log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " ")) - log.Printf("I! Loaded aggregators: %s", strings.Join(c.AggregatorNames(), " ")) - log.Printf("I! Loaded processors: %s", strings.Join(c.ProcessorNames(), " ")) - log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " ")) - log.Printf("I! Tags enabled: %s", c.ListTags()) - if *fPidfile != "" { f, err := os.OpenFile(*fPidfile, os.O_CREATE|os.O_WRONLY, 0644) if err != nil { @@ -235,7 +371,20 @@ func formatFullVersion() string { return strings.Join(parts, " ") } +func deleteEmpty(s []string) []string { + var r []string + for _, str := range s { + if str != "" { + r = append(r, str) + } + } + return r +} + func main() { + flag.Var(&fConfigs, "config", "configuration file to load") + flag.Var(&fConfigDirs, "config-directory", "directory containing additional *.conf files") + flag.Usage = func() { usageExit(0) } flag.Parse() args := flag.Args() @@ -261,6 +410,11 @@ func main() { logger.SetupLogging(logger.LogConfig{}) + // Configure version + if err := internal.SetVersion(version); err != nil { + log.Println("Telegraf version already configured to: " + internal.Version()) + } + // Load external plugins, if requested. if *fPlugins != "" { log.Printf("I! Loading external plugins from: %s", *fPlugins) @@ -292,7 +446,38 @@ func main() { fmt.Println(formatFullVersion()) return case "config": - config.PrintSampleConfig( + err := configCmd.Parse(args[1:]) + if err != nil { + log.Fatal("E! " + err.Error()) + } + + // The sub_Filters are populated when the filter flags are set after the subcommand config + // e.g. telegraf config --section-filter inputs + subSectionFilters := deleteEmpty(strings.Split(":"+strings.TrimSpace(*fSubSectionFilters)+":", ":")) + subInputFilters := deleteEmpty(strings.Split(":"+strings.TrimSpace(*fSubInputFilters)+":", ":")) + subOutputFilters := deleteEmpty(strings.Split(":"+strings.TrimSpace(*fSubOutputFilters)+":", ":")) + subAggregatorFilters := deleteEmpty(strings.Split(":"+strings.TrimSpace(*fsubAggregatorFilters)+":", ":")) + subProcessorFilters := deleteEmpty(strings.Split(":"+strings.TrimSpace(*fSubProcessorFilters)+":", ":")) + + // Overwrite the global filters if the subfilters are defined, this allows for backwards compatibility + // Now you can still filter the sample config like so: telegraf --section-filter inputs config + if len(subSectionFilters) > 0 { + sectionFilters = subSectionFilters + } + if len(subInputFilters) > 0 { + inputFilters = subInputFilters + } + if len(subOutputFilters) > 0 { + outputFilters = subOutputFilters + } + if len(subAggregatorFilters) > 0 { + aggregatorFilters = subAggregatorFilters + } + if len(subProcessorFilters) > 0 { + processorFilters = subProcessorFilters + } + + printer.PrintSampleConfig( sectionFilters, inputFilters, outputFilters, @@ -305,6 +490,27 @@ func main() { // switch for flags which just do something and exit immediately switch { + case *fDeprecationList: + c := config.NewConfig() + infos := c.CollectDeprecationInfos( + inputFilters, + outputFilters, + aggregatorFilters, + processorFilters, + ) + //nolint:revive // We will notice if Println fails + fmt.Println("Deprecated Input Plugins: ") + c.PrintDeprecationList(infos["inputs"]) + //nolint:revive // We will notice if Println fails + fmt.Println("Deprecated Output Plugins: ") + c.PrintDeprecationList(infos["outputs"]) + //nolint:revive // We will notice if Println fails + fmt.Println("Deprecated Processor Plugins: ") + c.PrintDeprecationList(infos["processors"]) + //nolint:revive // We will notice if Println fails + fmt.Println("Deprecated Aggregator Plugins: ") + c.PrintDeprecationList(infos["aggregators"]) + return case *fOutputList: fmt.Println("Available Output Plugins: ") names := make([]string, 0, len(outputs.Outputs)) @@ -331,7 +537,7 @@ func main() { fmt.Println(formatFullVersion()) return case *fSampleConfig: - config.PrintSampleConfig( + printer.PrintSampleConfig( sectionFilters, inputFilters, outputFilters, @@ -340,28 +546,16 @@ func main() { ) return case *fUsage != "": - err := config.PrintInputConfig(*fUsage) - err2 := config.PrintOutputConfig(*fUsage) + err := printer.PrintInputConfig(*fUsage) + err2 := printer.PrintOutputConfig(*fUsage) if err != nil && err2 != nil { log.Fatalf("E! %s and %s", err, err2) } return } - shortVersion := version - if shortVersion == "" { - shortVersion = "unknown" - } - - // Configure version - if err := internal.SetVersion(shortVersion); err != nil { - log.Println("Telegraf version already configured to: " + internal.Version()) - } - run( inputFilters, outputFilters, - aggregatorFilters, - processorFilters, ) } diff --git a/cmd/telegraf/telegraf_posix.go b/cmd/telegraf/telegraf_posix.go index ca28622f16752..21ad935b7147e 100644 --- a/cmd/telegraf/telegraf_posix.go +++ b/cmd/telegraf/telegraf_posix.go @@ -1,13 +1,12 @@ +//go:build !windows // +build !windows package main -func run(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) { +func run(inputFilters, outputFilters []string) { stop = make(chan struct{}) reloadLoop( inputFilters, outputFilters, - aggregatorFilters, - processorFilters, ) } diff --git a/cmd/telegraf/telegraf_windows.go b/cmd/telegraf/telegraf_windows.go index 830e6eaa4f8a0..ab80fc2be0564 100644 --- a/cmd/telegraf/telegraf_windows.go +++ b/cmd/telegraf/telegraf_windows.go @@ -1,5 +1,8 @@ +//go:build windows // +build windows +//go:generate goversioninfo -icon=../../assets/windows/tiger.ico + package main import ( @@ -11,30 +14,27 @@ import ( "github.com/kardianos/service" ) -func run(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) { +func run(inputFilters, outputFilters []string) { + // Register the eventlog logging target for windows. + logger.RegisterEventLogger(*fServiceName) + if runtime.GOOS == "windows" && windowsRunAsService() { runAsWindowsService( inputFilters, outputFilters, - aggregatorFilters, - processorFilters, ) } else { stop = make(chan struct{}) reloadLoop( inputFilters, outputFilters, - aggregatorFilters, - processorFilters, ) } } type program struct { - inputFilters []string - outputFilters []string - aggregatorFilters []string - processorFilters []string + inputFilters []string + outputFilters []string } func (p *program) Start(s service.Service) error { @@ -46,16 +46,17 @@ func (p *program) run() { reloadLoop( p.inputFilters, p.outputFilters, - p.aggregatorFilters, - p.processorFilters, ) + close(stop) } func (p *program) Stop(s service.Service) error { - close(stop) + var empty struct{} + stop <- empty // signal reloadLoop to finish (context cancel) + <-stop // wait for reloadLoop to finish and close channel return nil } -func runAsWindowsService(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) { +func runAsWindowsService(inputFilters, outputFilters []string) { programFiles := os.Getenv("ProgramFiles") if programFiles == "" { // Should never happen programFiles = "C:\\Program Files" @@ -69,10 +70,8 @@ func runAsWindowsService(inputFilters, outputFilters, aggregatorFilters, process } prg := &program{ - inputFilters: inputFilters, - outputFilters: outputFilters, - aggregatorFilters: aggregatorFilters, - processorFilters: processorFilters, + inputFilters: inputFilters, + outputFilters: outputFilters, } s, err := service.New(prg, svcConfig) if err != nil { @@ -81,27 +80,31 @@ func runAsWindowsService(inputFilters, outputFilters, aggregatorFilters, process // Handle the --service flag here to prevent any issues with tooling that // may not have an interactive session, e.g. installing from Ansible. if *fService != "" { - if *fConfig != "" { - svcConfig.Arguments = []string{"--config", *fConfig} + if len(fConfigs) > 0 { + svcConfig.Arguments = []string{} + } + for _, fConfig := range fConfigs { + svcConfig.Arguments = append(svcConfig.Arguments, "--config", fConfig) } - if *fConfigDirectory != "" { - svcConfig.Arguments = append(svcConfig.Arguments, "--config-directory", *fConfigDirectory) + + for _, fConfigDirectory := range fConfigDirs { + svcConfig.Arguments = append(svcConfig.Arguments, "--config-directory", fConfigDirectory) } + //set servicename to service cmd line, to have a custom name after relaunch as a service svcConfig.Arguments = append(svcConfig.Arguments, "--service-name", *fServiceName) + if *fServiceAutoRestart { + svcConfig.Option = service.KeyValue{"OnFailure": "restart", "OnFailureDelayDuration": *fServiceRestartDelay} + } + err := service.Control(s, *fService) if err != nil { log.Fatal("E! " + err.Error()) } os.Exit(0) } else { - winlogger, err := s.Logger(nil) - if err == nil { - //When in service mode, register eventlog target andd setup default logging to eventlog - logger.RegisterEventLogger(winlogger) - logger.SetupLogging(logger.LogConfig{LogTarget: logger.LogTargetEventlog}) - } + logger.SetupLogging(logger.LogConfig{LogTarget: logger.LogTargetEventlog}) err = s.Run() if err != nil { diff --git a/config/README.md b/config/README.md new file mode 120000 index 0000000000000..5455122d9fbb5 --- /dev/null +++ b/config/README.md @@ -0,0 +1 @@ +../docs/CONFIGURATION.md \ No newline at end of file diff --git a/config/aws/credentials.go b/config/aws/credentials.go index f9c98edbf0a4f..358080ab3ba69 100644 --- a/config/aws/credentials.go +++ b/config/aws/credentials.go @@ -1,54 +1,87 @@ package aws import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" - "github.com/aws/aws-sdk-go/aws/session" + "context" + awsV2 "github.com/aws/aws-sdk-go-v2/aws" + configV2 "github.com/aws/aws-sdk-go-v2/config" + credentialsV2 "github.com/aws/aws-sdk-go-v2/credentials" + stscredsV2 "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/service/sts" ) type CredentialConfig struct { - Region string - AccessKey string - SecretKey string - RoleARN string - Profile string - Filename string - Token string - EndpointURL string + Region string `toml:"region"` + AccessKey string `toml:"access_key"` + SecretKey string `toml:"secret_key"` + RoleARN string `toml:"role_arn"` + Profile string `toml:"profile"` + Filename string `toml:"shared_credential_file"` + Token string `toml:"token"` + EndpointURL string `toml:"endpoint_url"` + RoleSessionName string `toml:"role_session_name"` + WebIdentityTokenFile string `toml:"web_identity_token_file"` } -func (c *CredentialConfig) Credentials() client.ConfigProvider { +func (c *CredentialConfig) Credentials() (awsV2.Config, error) { if c.RoleARN != "" { return c.assumeCredentials() - } else { - return c.rootCredentials() } + return c.rootCredentials() } -func (c *CredentialConfig) rootCredentials() client.ConfigProvider { - config := &aws.Config{ - Region: aws.String(c.Region), +func (c *CredentialConfig) rootCredentials() (awsV2.Config, error) { + options := []func(*configV2.LoadOptions) error{ + configV2.WithRegion(c.Region), } + if c.EndpointURL != "" { - config.Endpoint = &c.EndpointURL + resolver := awsV2.EndpointResolverFunc(func(service, region string) (awsV2.Endpoint, error) { + return awsV2.Endpoint{ + URL: c.EndpointURL, + HostnameImmutable: true, + Source: awsV2.EndpointSourceCustom, + }, nil + }) + options = append(options, configV2.WithEndpointResolver(resolver)) + } + + if c.Profile != "" { + options = append(options, configV2.WithSharedConfigProfile(c.Profile)) + } + if c.Filename != "" { + options = append(options, configV2.WithSharedCredentialsFiles([]string{c.Filename})) } + if c.AccessKey != "" || c.SecretKey != "" { - config.Credentials = credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token) - } else if c.Profile != "" || c.Filename != "" { - config.Credentials = credentials.NewSharedCredentials(c.Filename, c.Profile) + provider := credentialsV2.NewStaticCredentialsProvider(c.AccessKey, c.SecretKey, c.Token) + options = append(options, configV2.WithCredentialsProvider(provider)) } - return session.New(config) + return configV2.LoadDefaultConfig(context.Background(), options...) } -func (c *CredentialConfig) assumeCredentials() client.ConfigProvider { - rootCredentials := c.rootCredentials() - config := &aws.Config{ - Region: aws.String(c.Region), - Endpoint: &c.EndpointURL, +func (c *CredentialConfig) assumeCredentials() (awsV2.Config, error) { + rootCredentials, err := c.rootCredentials() + if err != nil { + return awsV2.Config{}, err } - config.Credentials = stscreds.NewCredentials(rootCredentials, c.RoleARN) - return session.New(config) + + var provider awsV2.CredentialsProvider + stsService := sts.NewFromConfig(rootCredentials) + if c.WebIdentityTokenFile != "" { + provider = stscredsV2.NewWebIdentityRoleProvider(stsService, c.RoleARN, stscredsV2.IdentityTokenFile(c.WebIdentityTokenFile), func(opts *stscredsV2.WebIdentityRoleOptions) { + if c.RoleSessionName != "" { + opts.RoleSessionName = c.RoleSessionName + } + }) + } else { + provider = stscredsV2.NewAssumeRoleProvider(stsService, c.RoleARN, func(opts *stscredsV2.AssumeRoleOptions) { + if c.RoleSessionName != "" { + opts.RoleSessionName = c.RoleSessionName + } + }) + } + + rootCredentials.Credentials = awsV2.NewCredentialsCache(provider) + return rootCredentials, nil } diff --git a/config/config.go b/config/config.go index 4fd65139e2ab9..adab8ce3c7761 100644 --- a/config/config.go +++ b/config/config.go @@ -2,29 +2,36 @@ package config import ( "bytes" - "errors" + "crypto/tls" + _ "embed" "fmt" - "io/ioutil" + "io" "log" - "math" "net/http" "net/url" "os" "path/filepath" + "reflect" "regexp" "runtime" "sort" "strconv" "strings" + "sync" "time" + "github.com/coreos/go-semver/semver" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/aggregators" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/temporary/json_v2" + "github.com/influxdata/telegraf/plugins/parsers/temporary/xpath" "github.com/influxdata/telegraf/plugins/processors" "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/toml" @@ -32,17 +39,6 @@ import ( ) var ( - // Default sections - sectionDefaults = []string{"global_tags", "agent", "outputs", - "processors", "aggregators", "inputs"} - - // Default input plugins - inputDefaults = []string{"cpu", "mem", "swap", "system", "kernel", - "processes", "disk", "diskio"} - - // Default output plugins - outputDefaults = []string{"influxdb"} - // envVarRe is a regex to find environment variables in the config file envVarRe = regexp.MustCompile(`\$\{(\w+)\}|\$(\w+)`) @@ -50,12 +46,22 @@ var ( `"`, `\"`, `\`, `\\`, ) + httpLoadConfigRetryInterval = 10 * time.Second + + // fetchURLRe is a regex to determine whether the requested file should + // be fetched from a remote or read from the filesystem. + fetchURLRe = regexp.MustCompile(`^\w+://`) ) // Config specifies the URL/user/password for the database that telegraf // will be logging to, as well as all the plugins that the user has // specified type Config struct { + toml *toml.Config + errs []error // config load errors. + UnusedFields map[string]bool + unusedFieldsMutex *sync.Mutex + Tags map[string]string InputFilters []string OutputFilters []string @@ -64,18 +70,28 @@ type Config struct { Inputs []*models.RunningInput Outputs []*models.RunningOutput Aggregators []*models.RunningAggregator + Parsers []*models.RunningParser // Processors have a slice wrapper type because they need to be sorted Processors models.RunningProcessors AggProcessors models.RunningProcessors + + Deprecations map[string][]int64 + version *semver.Version } +// NewConfig creates a new struct to hold the Telegraf config. +// For historical reasons, It holds the actual instances of the running plugins +// once the configuration is parsed. func NewConfig() *Config { c := &Config{ + UnusedFields: map[string]bool{}, + unusedFieldsMutex: &sync.Mutex{}, + // Agent defaults: Agent: &AgentConfig{ - Interval: internal.Duration{Duration: 10 * time.Second}, + Interval: Duration(10 * time.Second), RoundInterval: true, - FlushInterval: internal.Duration{Duration: 10 * time.Second}, + FlushInterval: Duration(10 * time.Second), LogTarget: "file", LogfileRotationMaxArchives: 5, }, @@ -83,44 +99,72 @@ func NewConfig() *Config { Tags: make(map[string]string), Inputs: make([]*models.RunningInput, 0), Outputs: make([]*models.RunningOutput, 0), + Parsers: make([]*models.RunningParser, 0), Processors: make([]*models.RunningProcessor, 0), AggProcessors: make([]*models.RunningProcessor, 0), InputFilters: make([]string, 0), OutputFilters: make([]string, 0), + Deprecations: make(map[string][]int64), } + + // Handle unknown version + version := internal.Version() + if version == "" || version == "unknown" { + version = "0.0.0-unknown" + } + c.version = semver.New(version) + + tomlCfg := &toml.Config{ + NormFieldName: toml.DefaultConfig.NormFieldName, + FieldToKey: toml.DefaultConfig.FieldToKey, + MissingField: c.missingTomlField, + } + c.toml = tomlCfg + return c } +// AgentConfig defines configuration that will be used by the Telegraf agent type AgentConfig struct { // Interval at which to gather information - Interval internal.Duration + Interval Duration // RoundInterval rounds collection interval to 'interval'. // ie, if Interval=10s then always collect on :00, :10, :20, etc. RoundInterval bool + // Collected metrics are rounded to the precision specified. Precision is + // specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s). + // Valid time units are "ns", "us" (or "µs"), "ms", "s". + // // By default or when set to "0s", precision will be set to the same - // timestamp order as the collection interval, with the maximum being 1s. + // timestamp order as the collection interval, with the maximum being 1s: // ie, when interval = "10s", precision will be "1s" // when interval = "250ms", precision will be "1ms" + // // Precision will NOT be used for service inputs. It is up to each individual // service input to set the timestamp at the appropriate precision. - Precision internal.Duration + Precision Duration // CollectionJitter is used to jitter the collection by a random amount. // Each plugin will sleep for a random time within jitter before collecting. // This can be used to avoid many plugins querying things like sysfs at the // same time, which can have a measurable effect on the system. - CollectionJitter internal.Duration + CollectionJitter Duration + + // CollectionOffset is used to shift the collection by the given amount. + // This can be be used to avoid many plugins querying constraint devices + // at the same time by manually scheduling them in time. + CollectionOffset Duration // FlushInterval is the Interval at which to flush data - FlushInterval internal.Duration + FlushInterval Duration // FlushJitter Jitters the flush interval by a random amount. // This is primarily to avoid large write spikes for users running a large // number of telegraf instances. // ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - FlushJitter internal.Duration + FlushJitter Duration // MetricBatchSize is the maximum number of metrics that is wrote to an // output plugin in one call. @@ -136,12 +180,12 @@ type AgentConfig struct { // FlushBufferWhenFull tells Telegraf to flush the metric buffer whenever // it fills up, regardless of FlushInterval. Setting this option to true // does _not_ deactivate FlushInterval. - FlushBufferWhenFull bool // deprecated in 0.13; has no effect + FlushBufferWhenFull bool `toml:"flush_buffer_when_full" deprecated:"0.13.0;2.0.0;option is ignored"` // TODO(cam): Remove UTC and parameter, they are no longer // valid for the agent config. Leaving them here for now for backwards- // compatibility - UTC bool `toml:"utc"` // deprecated in 1.0.0; has no effect + UTC bool `toml:"utc" deprecated:"1.0.0;option is ignored"` // Debug is the option for running in debug mode Debug bool `toml:"debug"` @@ -160,18 +204,25 @@ type AgentConfig struct { // The file will be rotated after the time interval specified. When set // to 0 no time based rotation is performed. - LogfileRotationInterval internal.Duration `toml:"logfile_rotation_interval"` + LogfileRotationInterval Duration `toml:"logfile_rotation_interval"` // The logfile will be rotated when it becomes larger than the specified // size. When set to 0 no size based rotation is performed. - LogfileRotationMaxSize internal.Size `toml:"logfile_rotation_max_size"` + LogfileRotationMaxSize Size `toml:"logfile_rotation_max_size"` // Maximum number of rotated archives to keep, any older logs are deleted. // If set to -1, no archives are removed. LogfileRotationMaxArchives int `toml:"logfile_rotation_max_archives"` + // Pick a timezone to use when logging or type 'local' for local time. + LogWithTimezone string `toml:"log_with_timezone"` + Hostname string OmitHostname bool + + // Method for translating SNMP objects. 'netsnmp' to call external programs, + // 'gosmi' to use the built-in library. + SnmpTranslator string `toml:"snmp_translator"` } // InputNames returns a list of strings of the configured inputs. @@ -192,6 +243,15 @@ func (c *Config) AggregatorNames() []string { return PluginNameCounts(name) } +// ParserNames returns a list of strings of the configured parsers. +func (c *Config) ParserNames() []string { + var name []string + for _, parser := range c.Parsers { + name = append(name, parser.Config.DataFormat) + } + return PluginNameCounts(name) +} + // ProcessorNames returns a list of strings of the configured processors. func (c *Config) ProcessorNames() []string { var name []string @@ -244,371 +304,6 @@ func (c *Config) ListTags() string { return strings.Join(tags, " ") } -var header = `# Telegraf Configuration -# -# Telegraf is entirely plugin driven. All metrics are gathered from the -# declared inputs, and sent to the declared outputs. -# -# Plugins must be declared in here to be active. -# To deactivate a plugin, comment out the name and any variables. -# -# Use 'telegraf -config telegraf.conf -test' to see what metrics a config -# file would generate. -# -# Environment variables can be used anywhere in this config file, simply surround -# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), -# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) - -` -var globalTagsConfig = ` -# Global tags can be specified here in key="value" format. -[global_tags] - # dc = "us-east-1" # will tag all metrics with dc=us-east-1 - # rack = "1a" - ## Environment variables can be used as tags, and throughout the config file - # user = "$USER" - -` -var agentConfig = ` -# Configuration for telegraf agent -[agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will send metrics to outputs in batches of at most - ## metric_batch_size metrics. - ## This controls the size of writes that Telegraf sends to output plugins. - metric_batch_size = 1000 - - ## Maximum number of unwritten metrics per output. Increasing this value - ## allows for longer periods of output downtime without dropping metrics at the - ## cost of higher maximum memory usage. - metric_buffer_limit = 10000 - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Default flushing interval for all outputs. Maximum flush_interval will be - ## flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## By default or when set to "0s", precision will be set to the same - ## timestamp order as the collection interval, with the maximum being 1s. - ## ie, when interval = "10s", precision will be "1s" - ## when interval = "250ms", precision will be "1ms" - ## Precision will NOT be used for service inputs. It is up to each individual - ## service input to set the timestamp at the appropriate precision. - ## Valid time units are "ns", "us" (or "µs"), "ms", "s". - precision = "" - - ## Log at debug level. - # debug = false - ## Log only error level messages. - # quiet = false - - ## Log target controls the destination for logs and can be one of "file", - ## "stderr" or, on Windows, "eventlog". When set to "file", the output file - ## is determined by the "logfile" setting. - # logtarget = "file" - - ## Name of the file to be logged to when using the "file" logtarget. If set to - ## the empty string then logs are written to stderr. - # logfile = "" - - ## The logfile will be rotated after the time interval specified. When set - ## to 0 no time based rotation is performed. Logs are rotated only when - ## written to, if there is no log activity rotation may be delayed. - # logfile_rotation_interval = "0d" - - ## The logfile will be rotated when it becomes larger than the specified - ## size. When set to 0 no size based rotation is performed. - # logfile_rotation_max_size = "0MB" - - ## Maximum number of rotated archives to keep, any older logs are deleted. - ## If set to -1, no archives are removed. - # logfile_rotation_max_archives = 5 - - ## Override default hostname, if empty use os.Hostname() - hostname = "" - ## If set to true, do no set the "host" tag in the telegraf agent. - omit_hostname = false - -` - -var outputHeader = ` -############################################################################### -# OUTPUT PLUGINS # -############################################################################### - -` - -var processorHeader = ` -############################################################################### -# PROCESSOR PLUGINS # -############################################################################### - -` - -var aggregatorHeader = ` -############################################################################### -# AGGREGATOR PLUGINS # -############################################################################### - -` - -var inputHeader = ` -############################################################################### -# INPUT PLUGINS # -############################################################################### - -` - -var serviceInputHeader = ` -############################################################################### -# SERVICE INPUT PLUGINS # -############################################################################### - -` - -// PrintSampleConfig prints the sample config -func PrintSampleConfig( - sectionFilters []string, - inputFilters []string, - outputFilters []string, - aggregatorFilters []string, - processorFilters []string, -) { - // print headers - fmt.Printf(header) - - if len(sectionFilters) == 0 { - sectionFilters = sectionDefaults - } - printFilteredGlobalSections(sectionFilters) - - // print output plugins - if sliceContains("outputs", sectionFilters) { - if len(outputFilters) != 0 { - if len(outputFilters) >= 3 && outputFilters[1] != "none" { - fmt.Printf(outputHeader) - } - printFilteredOutputs(outputFilters, false) - } else { - fmt.Printf(outputHeader) - printFilteredOutputs(outputDefaults, false) - // Print non-default outputs, commented - var pnames []string - for pname := range outputs.Outputs { - if !sliceContains(pname, outputDefaults) { - pnames = append(pnames, pname) - } - } - sort.Strings(pnames) - printFilteredOutputs(pnames, true) - } - } - - // print processor plugins - if sliceContains("processors", sectionFilters) { - if len(processorFilters) != 0 { - if len(processorFilters) >= 3 && processorFilters[1] != "none" { - fmt.Printf(processorHeader) - } - printFilteredProcessors(processorFilters, false) - } else { - fmt.Printf(processorHeader) - pnames := []string{} - for pname := range processors.Processors { - pnames = append(pnames, pname) - } - sort.Strings(pnames) - printFilteredProcessors(pnames, true) - } - } - - // print aggregator plugins - if sliceContains("aggregators", sectionFilters) { - if len(aggregatorFilters) != 0 { - if len(aggregatorFilters) >= 3 && aggregatorFilters[1] != "none" { - fmt.Printf(aggregatorHeader) - } - printFilteredAggregators(aggregatorFilters, false) - } else { - fmt.Printf(aggregatorHeader) - pnames := []string{} - for pname := range aggregators.Aggregators { - pnames = append(pnames, pname) - } - sort.Strings(pnames) - printFilteredAggregators(pnames, true) - } - } - - // print input plugins - if sliceContains("inputs", sectionFilters) { - if len(inputFilters) != 0 { - if len(inputFilters) >= 3 && inputFilters[1] != "none" { - fmt.Printf(inputHeader) - } - printFilteredInputs(inputFilters, false) - } else { - fmt.Printf(inputHeader) - printFilteredInputs(inputDefaults, false) - // Print non-default inputs, commented - var pnames []string - for pname := range inputs.Inputs { - if !sliceContains(pname, inputDefaults) { - pnames = append(pnames, pname) - } - } - sort.Strings(pnames) - printFilteredInputs(pnames, true) - } - } -} - -func printFilteredProcessors(processorFilters []string, commented bool) { - // Filter processors - var pnames []string - for pname := range processors.Processors { - if sliceContains(pname, processorFilters) { - pnames = append(pnames, pname) - } - } - sort.Strings(pnames) - - // Print Outputs - for _, pname := range pnames { - creator := processors.Processors[pname] - output := creator() - printConfig(pname, output, "processors", commented) - } -} - -func printFilteredAggregators(aggregatorFilters []string, commented bool) { - // Filter outputs - var anames []string - for aname := range aggregators.Aggregators { - if sliceContains(aname, aggregatorFilters) { - anames = append(anames, aname) - } - } - sort.Strings(anames) - - // Print Outputs - for _, aname := range anames { - creator := aggregators.Aggregators[aname] - output := creator() - printConfig(aname, output, "aggregators", commented) - } -} - -func printFilteredInputs(inputFilters []string, commented bool) { - // Filter inputs - var pnames []string - for pname := range inputs.Inputs { - if sliceContains(pname, inputFilters) { - pnames = append(pnames, pname) - } - } - sort.Strings(pnames) - - // cache service inputs to print them at the end - servInputs := make(map[string]telegraf.ServiceInput) - // for alphabetical looping: - servInputNames := []string{} - - // Print Inputs - for _, pname := range pnames { - if pname == "cisco_telemetry_gnmi" { - continue - } - creator := inputs.Inputs[pname] - input := creator() - - switch p := input.(type) { - case telegraf.ServiceInput: - servInputs[pname] = p - servInputNames = append(servInputNames, pname) - continue - } - - printConfig(pname, input, "inputs", commented) - } - - // Print Service Inputs - if len(servInputs) == 0 { - return - } - sort.Strings(servInputNames) - - fmt.Printf(serviceInputHeader) - for _, name := range servInputNames { - printConfig(name, servInputs[name], "inputs", commented) - } -} - -func printFilteredOutputs(outputFilters []string, commented bool) { - // Filter outputs - var onames []string - for oname := range outputs.Outputs { - if sliceContains(oname, outputFilters) { - onames = append(onames, oname) - } - } - sort.Strings(onames) - - // Print Outputs - for _, oname := range onames { - creator := outputs.Outputs[oname] - output := creator() - printConfig(oname, output, "outputs", commented) - } -} - -func printFilteredGlobalSections(sectionFilters []string) { - if sliceContains("global_tags", sectionFilters) { - fmt.Printf(globalTagsConfig) - } - - if sliceContains("agent", sectionFilters) { - fmt.Printf(agentConfig) - } -} - -func printConfig(name string, p telegraf.PluginDescriber, op string, commented bool) { - comment := "" - if commented { - comment = "# " - } - fmt.Printf("\n%s# %s\n%s[[%s.%s]]", comment, p.Description(), comment, - op, name) - - config := p.SampleConfig() - if config == "" { - fmt.Printf("\n%s # no configuration\n\n", comment) - } else { - lines := strings.Split(config, "\n") - for i, line := range lines { - if i == 0 || i == len(lines)-1 { - fmt.Print("\n") - continue - } - fmt.Print(strings.TrimRight(comment+line, " ") + "\n") - } - } -} - func sliceContains(name string, list []string) bool { for _, b := range list { if b == name { @@ -618,26 +313,7 @@ func sliceContains(name string, list []string) bool { return false } -// PrintInputConfig prints the config usage of a single input. -func PrintInputConfig(name string) error { - if creator, ok := inputs.Inputs[name]; ok { - printConfig(name, creator(), "inputs", false) - } else { - return errors.New(fmt.Sprintf("Input %s not found", name)) - } - return nil -} - -// PrintOutputConfig prints the config usage of a single output. -func PrintOutputConfig(name string) error { - if creator, ok := outputs.Outputs[name]; ok { - printConfig(name, creator(), "outputs", false) - } else { - return errors.New(fmt.Sprintf("Output %s not found", name)) - } - return nil -} - +// LoadDirectory loads all toml config files found in the specified path, recursively. func (c *Config) LoadDirectory(path string) error { walkfn := func(thispath string, info os.FileInfo, _ error) error { if info == nil { @@ -683,6 +359,10 @@ func getDefaultConfigPath() (string, error) { etcfile = programFiles + `\Telegraf\telegraf.conf` } for _, path := range []string{envfile, homefile, etcfile} { + if isURL(path) { + log.Printf("I! Using config url: %s", path) + return path, nil + } if _, err := os.Stat(path); err == nil { log.Printf("I! Using config file: %s", path) return path, nil @@ -694,6 +374,12 @@ func getDefaultConfigPath() (string, error) { " in $TELEGRAF_CONFIG_PATH, %s, or %s", homefile, etcfile) } +// isURL checks if string is valid url +func isURL(str string) bool { + u, err := url.Parse(str) + return err == nil && u.Scheme != "" && u.Host != "" +} + // LoadConfig loads the given config file and applies it to c func (c *Config) LoadConfig(path string) error { var err error @@ -727,8 +413,8 @@ func (c *Config) LoadConfigData(data []byte) error { if !ok { return fmt.Errorf("invalid configuration, bad table name %q", tableName) } - if err = toml.UnmarshalTable(subTable, c.Tags); err != nil { - return fmt.Errorf("error parsing table name %q: %w", tableName, err) + if err = c.toml.UnmarshalTable(subTable, c.Tags); err != nil { + return fmt.Errorf("error parsing table name %q: %s", tableName, err) } } } @@ -739,8 +425,8 @@ func (c *Config) LoadConfigData(data []byte) error { if !ok { return fmt.Errorf("invalid configuration, error parsing agent table") } - if err = toml.UnmarshalTable(subTable, c.Agent); err != nil { - return fmt.Errorf("error parsing agent table: %w", err) + if err = c.toml.UnmarshalTable(subTable, c.Agent); err != nil { + return fmt.Errorf("error parsing [agent]: %w", err) } } @@ -757,6 +443,15 @@ func (c *Config) LoadConfigData(data []byte) error { c.Tags["host"] = c.Agent.Hostname } + // Set snmp agent translator default + if c.Agent.SnmpTranslator == "" { + c.Agent.SnmpTranslator = "netsnmp" + } + + if len(c.UnusedFields) > 0 { + return fmt.Errorf("line %d: configuration specified the fields %q, but they weren't used", tbl.Line, keys(c.UnusedFields)) + } + // Parse all the rest of the plugins: for name, val := range tbl.Fields { subTable, ok := val.(*ast.Table) @@ -772,18 +467,21 @@ func (c *Config) LoadConfigData(data []byte) error { // legacy [outputs.influxdb] support case *ast.Table: if err = c.addOutput(pluginName, pluginSubTable); err != nil { - return fmt.Errorf("Error parsing %s, %s", pluginName, err) + return fmt.Errorf("error parsing %s, %w", pluginName, err) } case []*ast.Table: for _, t := range pluginSubTable { if err = c.addOutput(pluginName, t); err != nil { - return fmt.Errorf("Error parsing %s array, %s", pluginName, err) + return fmt.Errorf("error parsing %s array, %w", pluginName, err) } } default: - return fmt.Errorf("Unsupported config format: %s", + return fmt.Errorf("unsupported config format: %s", pluginName) } + if len(c.UnusedFields) > 0 { + return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields)) + } } case "inputs", "plugins": for pluginName, pluginVal := range subTable.Fields { @@ -791,18 +489,21 @@ func (c *Config) LoadConfigData(data []byte) error { // legacy [inputs.cpu] support case *ast.Table: if err = c.addInput(pluginName, pluginSubTable); err != nil { - return fmt.Errorf("Error parsing %s, %s", pluginName, err) + return fmt.Errorf("error parsing %s, %w", pluginName, err) } case []*ast.Table: for _, t := range pluginSubTable { if err = c.addInput(pluginName, t); err != nil { - return fmt.Errorf("Error parsing %s, %s", pluginName, err) + return fmt.Errorf("error parsing %s, %w", pluginName, err) } } default: return fmt.Errorf("Unsupported config format: %s", pluginName) } + if len(c.UnusedFields) > 0 { + return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields)) + } } case "processors": for pluginName, pluginVal := range subTable.Fields { @@ -810,13 +511,16 @@ func (c *Config) LoadConfigData(data []byte) error { case []*ast.Table: for _, t := range pluginSubTable { if err = c.addProcessor(pluginName, t); err != nil { - return fmt.Errorf("Error parsing %s, %s", pluginName, err) + return fmt.Errorf("error parsing %s, %w", pluginName, err) } } default: return fmt.Errorf("Unsupported config format: %s", pluginName) } + if len(c.UnusedFields) > 0 { + return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields)) + } } case "aggregators": for pluginName, pluginVal := range subTable.Fields { @@ -831,6 +535,9 @@ func (c *Config) LoadConfigData(data []byte) error { return fmt.Errorf("Unsupported config format: %s", pluginName) } + if len(c.UnusedFields) > 0 { + return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields)) + } } // Assume it's an input input for legacy config file support if no other // identifiers are present @@ -861,19 +568,22 @@ func escapeEnv(value string) string { } func loadConfig(config string) ([]byte, error) { - u, err := url.Parse(config) - if err != nil { - return nil, err - } + if fetchURLRe.MatchString(config) { + u, err := url.Parse(config) + if err != nil { + return nil, err + } - switch u.Scheme { - case "https", "http": - return fetchConfig(u) - default: - // If it isn't a https scheme, try it as a file. + switch u.Scheme { + case "https", "http": + return fetchConfig(u) + default: + return nil, fmt.Errorf("scheme %q not supported", u.Scheme) + } } - return ioutil.ReadFile(config) + // If it isn't a https scheme, try it as a file + return os.ReadFile(config) } func fetchConfig(u *url.URL) ([]byte, error) { @@ -887,17 +597,27 @@ func fetchConfig(u *url.URL) ([]byte, error) { } req.Header.Add("Accept", "application/toml") req.Header.Set("User-Agent", internal.ProductToken()) - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("failed to retrieve remote config: %s", resp.Status) + retries := 3 + for i := 0; i <= retries; i++ { + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("Retry %d of %d failed connecting to HTTP config server %s", i, retries, err) + } + + if resp.StatusCode != http.StatusOK { + if i < retries { + log.Printf("Error getting HTTP config. Retry %d of %d in %s. Status=%d", i, retries, httpLoadConfigRetryInterval, resp.StatusCode) + time.Sleep(httpLoadConfigRetryInterval) + continue + } + return nil, fmt.Errorf("Retry %d of %d failed to retrieve remote config: %s", i, retries, resp.Status) + } + defer resp.Body.Close() + return io.ReadAll(resp.Body) } - defer resp.Body.Close() - return ioutil.ReadAll(resp.Body) + return nil, nil } // parseConfig loads a TOML configuration from a provided path and @@ -912,19 +632,19 @@ func parseConfig(contents []byte) (*ast.Table, error) { continue } - var env_var []byte + var envVar []byte if parameter[1] != nil { - env_var = parameter[1] + envVar = parameter[1] } else if parameter[2] != nil { - env_var = parameter[2] + envVar = parameter[2] } else { continue } - env_val, ok := os.LookupEnv(strings.TrimPrefix(string(env_var), "$")) + envVal, ok := os.LookupEnv(strings.TrimPrefix(string(envVar), "$")) if ok { - env_val = escapeEnv(env_val) - contents = bytes.Replace(contents, parameter[0], []byte(env_val), 1) + envVal = escapeEnv(envVal) + contents = bytes.Replace(contents, parameter[0], []byte(envVal), 1) } } @@ -934,16 +654,25 @@ func parseConfig(contents []byte) (*ast.Table, error) { func (c *Config) addAggregator(name string, table *ast.Table) error { creator, ok := aggregators.Aggregators[name] if !ok { + // Handle removed, deprecated plugins + if di, deprecated := aggregators.Deprecations[name]; deprecated { + printHistoricPluginDeprecationNotice("aggregators", name, di) + return fmt.Errorf("plugin deprecated") + } return fmt.Errorf("Undefined but requested aggregator: %s", name) } aggregator := creator() - conf, err := buildAggregator(name, table) + conf, err := c.buildAggregator(name, table) if err != nil { return err } - if err := toml.UnmarshalTable(table, aggregator); err != nil { + if err := c.toml.UnmarshalTable(table, aggregator); err != nil { + return err + } + + if err := c.printUserDeprecation("aggregators", name, aggregator); err != nil { return err } @@ -951,25 +680,63 @@ func (c *Config) addAggregator(name string, table *ast.Table) error { return nil } +func (c *Config) probeParser(table *ast.Table) bool { + var dataformat string + c.getFieldString(table, "data_format", &dataformat) + + _, ok := parsers.Parsers[dataformat] + return ok +} + +func (c *Config) addParser(parentname string, table *ast.Table) (*models.RunningParser, error) { + var dataformat string + c.getFieldString(table, "data_format", &dataformat) + + creator, ok := parsers.Parsers[dataformat] + if !ok { + return nil, fmt.Errorf("Undefined but requested parser: %s", dataformat) + } + parser := creator(parentname) + + conf, err := c.buildParser(parentname, table) + if err != nil { + return nil, err + } + + if err := c.toml.UnmarshalTable(table, parser); err != nil { + return nil, err + } + + running := models.NewRunningParser(parser, conf) + c.Parsers = append(c.Parsers, running) + + return running, nil +} + func (c *Config) addProcessor(name string, table *ast.Table) error { creator, ok := processors.Processors[name] if !ok { + // Handle removed, deprecated plugins + if di, deprecated := processors.Deprecations[name]; deprecated { + printHistoricPluginDeprecationNotice("processors", name, di) + return fmt.Errorf("plugin deprecated") + } return fmt.Errorf("Undefined but requested processor: %s", name) } - processorConfig, err := buildProcessor(name, table) + processorConfig, err := c.buildProcessor(name, table) if err != nil { return err } - rf, err := c.newRunningProcessor(creator, processorConfig, name, table) + rf, err := c.newRunningProcessor(creator, processorConfig, table) if err != nil { return err } c.Processors = append(c.Processors, rf) // save a copy for the aggregator - rf, err = c.newRunningProcessor(creator, processorConfig, name, table) + rf, err = c.newRunningProcessor(creator, processorConfig, table) if err != nil { return err } @@ -981,21 +748,24 @@ func (c *Config) addProcessor(name string, table *ast.Table) error { func (c *Config) newRunningProcessor( creator processors.StreamingCreator, processorConfig *models.ProcessorConfig, - name string, table *ast.Table, ) (*models.RunningProcessor, error) { processor := creator() if p, ok := processor.(unwrappable); ok { - if err := toml.UnmarshalTable(table, p.Unwrap()); err != nil { + if err := c.toml.UnmarshalTable(table, p.Unwrap()); err != nil { return nil, err } } else { - if err := toml.UnmarshalTable(table, processor); err != nil { + if err := c.toml.UnmarshalTable(table, processor); err != nil { return nil, err } } + if err := c.printUserDeprecation("processors", processorConfig.Name, processor); err != nil { + return nil, err + } + rf := models.NewRunningProcessor(processor, processorConfig) return rf, nil } @@ -1006,32 +776,45 @@ func (c *Config) addOutput(name string, table *ast.Table) error { } creator, ok := outputs.Outputs[name] if !ok { - return fmt.Errorf("Undefined but requested output: %s", name) + // Handle removed, deprecated plugins + if di, deprecated := outputs.Deprecations[name]; deprecated { + printHistoricPluginDeprecationNotice("outputs", name, di) + return fmt.Errorf("plugin deprecated") + } + return fmt.Errorf("undefined but requested output: %s", name) } output := creator() // If the output has a SetSerializer function, then this means it can write // arbitrary types of output, so build the serializer and set it. - switch t := output.(type) { - case serializers.SerializerOutput: - serializer, err := buildSerializer(name, table) + if t, ok := output.(serializers.SerializerOutput); ok { + serializer, err := c.buildSerializer(table) if err != nil { return err } t.SetSerializer(serializer) } - outputConfig, err := buildOutput(name, table) + outputConfig, err := c.buildOutput(name, table) if err != nil { return err } - if err := toml.UnmarshalTable(table, output); err != nil { + if err := c.toml.UnmarshalTable(table, output); err != nil { + return err + } + + if err := c.printUserDeprecation("outputs", name, output); err != nil { return err } - ro := models.NewRunningOutput(name, output, outputConfig, - c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit) + if c, ok := interface{}(output).(interface{ TLSConfig() (*tls.Config, error) }); ok { + if _, err := c.TLSConfig(); err != nil { + return err + } + } + + ro := models.NewRunningOutput(output, outputConfig, c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit) c.Outputs = append(c.Outputs, ro) return nil } @@ -1040,56 +823,161 @@ func (c *Config) addInput(name string, table *ast.Table) error { if len(c.InputFilters) > 0 && !sliceContains(name, c.InputFilters) { return nil } - // Legacy support renaming io input to diskio - if name == "io" { - name = "diskio" - } + + // For inputs with parsers we need to compute the set of + // options that is not covered by both, the parser and the input. + // We achieve this by keeping a local book of missing entries + // that counts the number of misses. In case we have a parser + // for the input both need to miss the entry. We count the + // missing entries at the end. + missThreshold := 0 + missCount := make(map[string]int) + c.setLocalMissingTomlFieldTracker(missCount) + defer c.resetMissingTomlFieldTracker() creator, ok := inputs.Inputs[name] if !ok { + // Handle removed, deprecated plugins + if di, deprecated := inputs.Deprecations[name]; deprecated { + printHistoricPluginDeprecationNotice("inputs", name, di) + return fmt.Errorf("plugin deprecated") + } + return fmt.Errorf("Undefined but requested input: %s", name) } input := creator() - // If the input has a SetParser function, then this means it can accept - // arbitrary types of input, so build the parser and set it. + // If the input has a SetParser or SetParserFunc function, it can accept + // arbitrary data-formats, so build the requested parser and set it. + if t, ok := input.(telegraf.ParserInput); ok { + missThreshold = 1 + if parser, err := c.addParser(name, table); err == nil { + t.SetParser(parser) + } else { + missThreshold = 0 + // Fallback to the old way of instantiating the parsers. + config, err := c.getParserConfig(name, table) + if err != nil { + return err + } + parser, err := c.buildParserOld(name, config) + if err != nil { + return err + } + t.SetParser(parser) + } + } + + // Keep the old interface for backward compatibility if t, ok := input.(parsers.ParserInput); ok { - parser, err := buildParser(name, table) - if err != nil { - return err + // DEPRECATED: Please switch your plugin to telegraf.ParserInput. + missThreshold = 1 + if parser, err := c.addParser(name, table); err == nil { + t.SetParser(parser) + } else { + missThreshold = 0 + // Fallback to the old way of instantiating the parsers. + config, err := c.getParserConfig(name, table) + if err != nil { + return err + } + parser, err := c.buildParserOld(name, config) + if err != nil { + return err + } + t.SetParser(parser) + } + } + + if t, ok := input.(telegraf.ParserFuncInput); ok { + missThreshold = 1 + if c.probeParser(table) { + t.SetParserFunc(func() (telegraf.Parser, error) { + parser, err := c.addParser(name, table) + if err != nil { + return nil, err + } + err = parser.Init() + return parser, err + }) + } else { + missThreshold = 0 + // Fallback to the old way + config, err := c.getParserConfig(name, table) + if err != nil { + return err + } + t.SetParserFunc(func() (telegraf.Parser, error) { + return c.buildParserOld(name, config) + }) } - t.SetParser(parser) } if t, ok := input.(parsers.ParserFuncInput); ok { - config, err := getParserConfig(name, table) - if err != nil { - return err + // DEPRECATED: Please switch your plugin to telegraf.ParserFuncInput. + missThreshold = 1 + if c.probeParser(table) { + t.SetParserFunc(func() (parsers.Parser, error) { + parser, err := c.addParser(name, table) + if err != nil { + return nil, err + } + err = parser.Init() + return parser, err + }) + } else { + missThreshold = 0 + // Fallback to the old way + config, err := c.getParserConfig(name, table) + if err != nil { + return err + } + t.SetParserFunc(func() (parsers.Parser, error) { + return c.buildParserOld(name, config) + }) } - t.SetParserFunc(func() (parsers.Parser, error) { - return parsers.NewParser(config) - }) } - pluginConfig, err := buildInput(name, table) + pluginConfig, err := c.buildInput(name, table) if err != nil { return err } - if err := toml.UnmarshalTable(table, input); err != nil { + if err := c.toml.UnmarshalTable(table, input); err != nil { + return err + } + + if err := c.printUserDeprecation("inputs", name, input); err != nil { return err } + if c, ok := interface{}(input).(interface{ TLSConfig() (*tls.Config, error) }); ok { + if _, err := c.TLSConfig(); err != nil { + return err + } + } + rp := models.NewRunningInput(input, pluginConfig) rp.SetDefaultTags(c.Tags) c.Inputs = append(c.Inputs, rp) + + // Check the number of misses against the threshold + for key, count := range missCount { + if count <= missThreshold { + continue + } + if err := c.missingTomlField(nil, key); err != nil { + return err + } + } + return nil } // buildAggregator parses Aggregator specific items from the ast.Table, // builds the filter and returns a // models.AggregatorConfig to be inserted into models.RunningAggregator -func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, error) { +func (c *Config) buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, error) { conf := &models.AggregatorConfig{ Name: name, Delay: time.Millisecond * 100, @@ -1097,115 +985,66 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err Grace: time.Second * 0, } - if err := getConfigDuration(tbl, "period", &conf.Period); err != nil { - return nil, err - } + c.getFieldDuration(tbl, "period", &conf.Period) + c.getFieldDuration(tbl, "delay", &conf.Delay) + c.getFieldDuration(tbl, "grace", &conf.Grace) + c.getFieldBool(tbl, "drop_original", &conf.DropOriginal) + c.getFieldString(tbl, "name_prefix", &conf.MeasurementPrefix) + c.getFieldString(tbl, "name_suffix", &conf.MeasurementSuffix) + c.getFieldString(tbl, "name_override", &conf.NameOverride) + c.getFieldString(tbl, "alias", &conf.Alias) - if err := getConfigDuration(tbl, "delay", &conf.Delay); err != nil { - return nil, err + conf.Tags = make(map[string]string) + if node, ok := tbl.Fields["tags"]; ok { + if subtbl, ok := node.(*ast.Table); ok { + if err := c.toml.UnmarshalTable(subtbl, conf.Tags); err != nil { + return nil, fmt.Errorf("could not parse tags for input %s", name) + } + } } - if err := getConfigDuration(tbl, "grace", &conf.Grace); err != nil { - return nil, err + if c.hasErrs() { + return nil, c.firstErr() } - if node, ok := tbl.Fields["drop_original"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - conf.DropOriginal, err = strconv.ParseBool(b.Value) - if err != nil { - return nil, fmt.Errorf("error parsing boolean value for %s: %s", name, err) - } - } - } + var err error + conf.Filter, err = c.buildFilter(tbl) + if err != nil { + return conf, err } + return conf, nil +} - if node, ok := tbl.Fields["name_prefix"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - conf.MeasurementPrefix = str.Value - } - } +// buildParser parses Parser specific items from the ast.Table, +// builds the filter and returns a +// models.ParserConfig to be inserted into models.RunningParser +func (c *Config) buildParser(name string, tbl *ast.Table) (*models.ParserConfig, error) { + var dataformat string + c.getFieldString(tbl, "data_format", &dataformat) + + conf := &models.ParserConfig{ + Parent: name, + DataFormat: dataformat, } - if node, ok := tbl.Fields["name_suffix"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - conf.MeasurementSuffix = str.Value - } - } - } - - if node, ok := tbl.Fields["name_override"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - conf.NameOverride = str.Value - } - } - } - - if node, ok := tbl.Fields["alias"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - conf.Alias = str.Value - } - } - } - - conf.Tags = make(map[string]string) - if node, ok := tbl.Fields["tags"]; ok { - if subtbl, ok := node.(*ast.Table); ok { - if err := toml.UnmarshalTable(subtbl, conf.Tags); err != nil { - return nil, fmt.Errorf("could not parse tags for input %s", name) - } - } - } - - delete(tbl.Fields, "drop_original") - delete(tbl.Fields, "name_prefix") - delete(tbl.Fields, "name_suffix") - delete(tbl.Fields, "name_override") - delete(tbl.Fields, "alias") - delete(tbl.Fields, "tags") - var err error - conf.Filter, err = buildFilter(tbl) - if err != nil { - return conf, err - } return conf, nil } // buildProcessor parses Processor specific items from the ast.Table, // builds the filter and returns a // models.ProcessorConfig to be inserted into models.RunningProcessor -func buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error) { +func (c *Config) buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error) { conf := &models.ProcessorConfig{Name: name} - if node, ok := tbl.Fields["order"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Integer); ok { - var err error - conf.Order, err = strconv.ParseInt(b.Value, 10, 64) - if err != nil { - return nil, fmt.Errorf("error parsing int value for %s: %s", name, err) - } - } - } - } + c.getFieldInt64(tbl, "order", &conf.Order) + c.getFieldString(tbl, "alias", &conf.Alias) - if node, ok := tbl.Fields["alias"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - conf.Alias = str.Value - } - } + if c.hasErrs() { + return nil, c.firstErr() } - delete(tbl.Fields, "alias") - delete(tbl.Fields, "order") var err error - conf.Filter, err = buildFilter(tbl) + conf.Filter, err = c.buildFilter(tbl) if err != nil { return conf, err } @@ -1216,1004 +1055,575 @@ func buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error // (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to // be inserted into the models.OutputConfig/models.InputConfig // to be used for glob filtering on tags and measurements -func buildFilter(tbl *ast.Table) (models.Filter, error) { +func (c *Config) buildFilter(tbl *ast.Table) (models.Filter, error) { f := models.Filter{} - if node, ok := tbl.Fields["namepass"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - f.NamePass = append(f.NamePass, str.Value) - } - } - } - } - } - - if node, ok := tbl.Fields["namedrop"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - f.NameDrop = append(f.NameDrop, str.Value) - } - } - } - } - } + c.getFieldStringSlice(tbl, "namepass", &f.NamePass) + c.getFieldStringSlice(tbl, "namedrop", &f.NameDrop) - fields := []string{"pass", "fieldpass"} - for _, field := range fields { - if node, ok := tbl.Fields[field]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - f.FieldPass = append(f.FieldPass, str.Value) - } - } - } - } - } - } + c.getFieldStringSlice(tbl, "pass", &f.FieldPass) + c.getFieldStringSlice(tbl, "fieldpass", &f.FieldPass) - fields = []string{"drop", "fielddrop"} - for _, field := range fields { - if node, ok := tbl.Fields[field]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - f.FieldDrop = append(f.FieldDrop, str.Value) - } - } - } - } - } - } + c.getFieldStringSlice(tbl, "drop", &f.FieldDrop) + c.getFieldStringSlice(tbl, "fielddrop", &f.FieldDrop) - if node, ok := tbl.Fields["tagpass"]; ok { - if subtbl, ok := node.(*ast.Table); ok { - for name, val := range subtbl.Fields { - if kv, ok := val.(*ast.KeyValue); ok { - tagfilter := &models.TagFilter{Name: name} - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - tagfilter.Filter = append(tagfilter.Filter, str.Value) - } - } - } - f.TagPass = append(f.TagPass, *tagfilter) - } - } - } - } + c.getFieldTagFilter(tbl, "tagpass", &f.TagPass) + c.getFieldTagFilter(tbl, "tagdrop", &f.TagDrop) - if node, ok := tbl.Fields["tagdrop"]; ok { - if subtbl, ok := node.(*ast.Table); ok { - for name, val := range subtbl.Fields { - if kv, ok := val.(*ast.KeyValue); ok { - tagfilter := &models.TagFilter{Name: name} - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - tagfilter.Filter = append(tagfilter.Filter, str.Value) - } - } - } - f.TagDrop = append(f.TagDrop, *tagfilter) - } - } - } - } + c.getFieldStringSlice(tbl, "tagexclude", &f.TagExclude) + c.getFieldStringSlice(tbl, "taginclude", &f.TagInclude) - if node, ok := tbl.Fields["tagexclude"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - f.TagExclude = append(f.TagExclude, str.Value) - } - } - } - } + if c.hasErrs() { + return f, c.firstErr() } - if node, ok := tbl.Fields["taginclude"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - f.TagInclude = append(f.TagInclude, str.Value) - } - } - } - } - } if err := f.Compile(); err != nil { return f, err } - delete(tbl.Fields, "namedrop") - delete(tbl.Fields, "namepass") - delete(tbl.Fields, "fielddrop") - delete(tbl.Fields, "fieldpass") - delete(tbl.Fields, "drop") - delete(tbl.Fields, "pass") - delete(tbl.Fields, "tagdrop") - delete(tbl.Fields, "tagpass") - delete(tbl.Fields, "tagexclude") - delete(tbl.Fields, "taginclude") return f, nil } // buildInput parses input specific items from the ast.Table, // builds the filter and returns a // models.InputConfig to be inserted into models.RunningInput -func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) { +func (c *Config) buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) { cp := &models.InputConfig{Name: name} - - if err := getConfigDuration(tbl, "interval", &cp.Interval); err != nil { - return nil, err - } - - if err := getConfigDuration(tbl, "precision", &cp.Precision); err != nil { - return nil, err - } - - if err := getConfigDuration(tbl, "collection_jitter", &cp.CollectionJitter); err != nil { - return nil, err - } - - if node, ok := tbl.Fields["name_prefix"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - cp.MeasurementPrefix = str.Value - } - } - } - - if node, ok := tbl.Fields["name_suffix"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - cp.MeasurementSuffix = str.Value - } - } - } - - if node, ok := tbl.Fields["name_override"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - cp.NameOverride = str.Value - } - } - } - - if node, ok := tbl.Fields["alias"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - cp.Alias = str.Value - } - } - } + c.getFieldDuration(tbl, "interval", &cp.Interval) + c.getFieldDuration(tbl, "precision", &cp.Precision) + c.getFieldDuration(tbl, "collection_jitter", &cp.CollectionJitter) + c.getFieldDuration(tbl, "collection_offset", &cp.CollectionOffset) + c.getFieldString(tbl, "name_prefix", &cp.MeasurementPrefix) + c.getFieldString(tbl, "name_suffix", &cp.MeasurementSuffix) + c.getFieldString(tbl, "name_override", &cp.NameOverride) + c.getFieldString(tbl, "alias", &cp.Alias) cp.Tags = make(map[string]string) if node, ok := tbl.Fields["tags"]; ok { if subtbl, ok := node.(*ast.Table); ok { - if err := toml.UnmarshalTable(subtbl, cp.Tags); err != nil { - return nil, fmt.Errorf("could not parse tags for input %s\n", name) + if err := c.toml.UnmarshalTable(subtbl, cp.Tags); err != nil { + return nil, fmt.Errorf("could not parse tags for input %s", name) } } } - delete(tbl.Fields, "name_prefix") - delete(tbl.Fields, "name_suffix") - delete(tbl.Fields, "name_override") - delete(tbl.Fields, "alias") - delete(tbl.Fields, "tags") + if c.hasErrs() { + return nil, c.firstErr() + } + var err error - cp.Filter, err = buildFilter(tbl) + cp.Filter, err = c.buildFilter(tbl) if err != nil { return cp, err } return cp, nil } -// buildParser grabs the necessary entries from the ast.Table for creating +// buildParserOld grabs the necessary entries from the ast.Table for creating // a parsers.Parser object, and creates it, which can then be added onto // an Input object. -func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { - config, err := getParserConfig(name, tbl) +func (c *Config) buildParserOld(name string, config *parsers.Config) (telegraf.Parser, error) { + parser, err := parsers.NewParser(config) if err != nil { return nil, err } - return parsers.NewParser(config) + logger := models.NewLogger("parsers", config.DataFormat, name) + models.SetLoggerOnPlugin(parser, logger) + if initializer, ok := parser.(telegraf.Initializer); ok { + if err := initializer.Init(); err != nil { + return nil, err + } + } + + return parser, nil } -func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { - c := &parsers.Config{ +func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { + pc := &parsers.Config{ JSONStrict: true, } - if node, ok := tbl.Fields["data_format"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DataFormat = str.Value - } - } - } + c.getFieldString(tbl, "data_format", &pc.DataFormat) // Legacy support, exec plugin originally parsed JSON by default. - if name == "exec" && c.DataFormat == "" { - c.DataFormat = "json" - } else if c.DataFormat == "" { - c.DataFormat = "influx" - } + if name == "exec" && pc.DataFormat == "" { + pc.DataFormat = "json" + } else if pc.DataFormat == "" { + pc.DataFormat = "influx" + } + + c.getFieldString(tbl, "separator", &pc.Separator) + + c.getFieldStringSlice(tbl, "templates", &pc.Templates) + c.getFieldStringSlice(tbl, "tag_keys", &pc.TagKeys) + c.getFieldStringSlice(tbl, "json_string_fields", &pc.JSONStringFields) + c.getFieldString(tbl, "json_name_key", &pc.JSONNameKey) + c.getFieldString(tbl, "json_query", &pc.JSONQuery) + c.getFieldString(tbl, "json_time_key", &pc.JSONTimeKey) + c.getFieldString(tbl, "json_time_format", &pc.JSONTimeFormat) + c.getFieldString(tbl, "json_timezone", &pc.JSONTimezone) + c.getFieldBool(tbl, "json_strict", &pc.JSONStrict) + c.getFieldString(tbl, "data_type", &pc.DataType) + c.getFieldString(tbl, "collectd_auth_file", &pc.CollectdAuthFile) + c.getFieldString(tbl, "collectd_security_level", &pc.CollectdSecurityLevel) + c.getFieldString(tbl, "collectd_parse_multivalue", &pc.CollectdSplit) + + c.getFieldStringSlice(tbl, "collectd_typesdb", &pc.CollectdTypesDB) + + c.getFieldString(tbl, "dropwizard_metric_registry_path", &pc.DropwizardMetricRegistryPath) + c.getFieldString(tbl, "dropwizard_time_path", &pc.DropwizardTimePath) + c.getFieldString(tbl, "dropwizard_time_format", &pc.DropwizardTimeFormat) + c.getFieldString(tbl, "dropwizard_tags_path", &pc.DropwizardTagsPath) + c.getFieldStringMap(tbl, "dropwizard_tag_paths", &pc.DropwizardTagPathsMap) - if node, ok := tbl.Fields["separator"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.Separator = str.Value + //for grok data_format + c.getFieldStringSlice(tbl, "grok_named_patterns", &pc.GrokNamedPatterns) + c.getFieldStringSlice(tbl, "grok_patterns", &pc.GrokPatterns) + c.getFieldString(tbl, "grok_custom_patterns", &pc.GrokCustomPatterns) + c.getFieldStringSlice(tbl, "grok_custom_pattern_files", &pc.GrokCustomPatternFiles) + c.getFieldString(tbl, "grok_timezone", &pc.GrokTimezone) + c.getFieldString(tbl, "grok_unique_timestamp", &pc.GrokUniqueTimestamp) + + c.getFieldStringSlice(tbl, "form_urlencoded_tag_keys", &pc.FormUrlencodedTagKeys) + + c.getFieldString(tbl, "value_field_name", &pc.ValueFieldName) + + // for influx parser + c.getFieldString(tbl, "influx_parser_type", &pc.InfluxParserType) + + // for XPath parser family + if choice.Contains(pc.DataFormat, []string{"xml", "xpath_json", "xpath_msgpack", "xpath_protobuf"}) { + c.getFieldString(tbl, "xpath_protobuf_file", &pc.XPathProtobufFile) + c.getFieldString(tbl, "xpath_protobuf_type", &pc.XPathProtobufType) + c.getFieldStringSlice(tbl, "xpath_protobuf_import_paths", &pc.XPathProtobufImportPaths) + c.getFieldBool(tbl, "xpath_print_document", &pc.XPathPrintDocument) + + // Determine the actual xpath configuration tables + node, xpathOK := tbl.Fields["xpath"] + if !xpathOK { + // Add this for backward compatibility + node, xpathOK = tbl.Fields[pc.DataFormat] + } + if xpathOK { + if subtbls, ok := node.([]*ast.Table); ok { + pc.XPathConfig = make([]xpath.Config, len(subtbls)) + for i, subtbl := range subtbls { + subcfg := pc.XPathConfig[i] + c.getFieldString(subtbl, "metric_name", &subcfg.MetricQuery) + c.getFieldString(subtbl, "metric_selection", &subcfg.Selection) + c.getFieldString(subtbl, "timestamp", &subcfg.Timestamp) + c.getFieldString(subtbl, "timestamp_format", &subcfg.TimestampFmt) + c.getFieldStringMap(subtbl, "tags", &subcfg.Tags) + c.getFieldStringMap(subtbl, "fields", &subcfg.Fields) + c.getFieldStringMap(subtbl, "fields_int", &subcfg.FieldsInt) + c.getFieldString(subtbl, "field_selection", &subcfg.FieldSelection) + c.getFieldBool(subtbl, "field_name_expansion", &subcfg.FieldNameExpand) + c.getFieldString(subtbl, "field_name", &subcfg.FieldNameQuery) + c.getFieldString(subtbl, "field_value", &subcfg.FieldValueQuery) + c.getFieldString(subtbl, "tag_selection", &subcfg.TagSelection) + c.getFieldBool(subtbl, "tag_name_expansion", &subcfg.TagNameExpand) + c.getFieldString(subtbl, "tag_name", &subcfg.TagNameQuery) + c.getFieldString(subtbl, "tag_value", &subcfg.TagValueQuery) + pc.XPathConfig[i] = subcfg + } } } } - if node, ok := tbl.Fields["templates"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.Templates = append(c.Templates, str.Value) + // for JSON_v2 parser + if node, ok := tbl.Fields["json_v2"]; ok { + if metricConfigs, ok := node.([]*ast.Table); ok { + pc.JSONV2Config = make([]json_v2.Config, len(metricConfigs)) + for i, metricConfig := range metricConfigs { + mc := pc.JSONV2Config[i] + c.getFieldString(metricConfig, "measurement_name", &mc.MeasurementName) + if mc.MeasurementName == "" { + mc.MeasurementName = name + } + c.getFieldString(metricConfig, "measurement_name_path", &mc.MeasurementNamePath) + c.getFieldString(metricConfig, "timestamp_path", &mc.TimestampPath) + c.getFieldString(metricConfig, "timestamp_format", &mc.TimestampFormat) + c.getFieldString(metricConfig, "timestamp_timezone", &mc.TimestampTimezone) + + mc.Fields = getFieldSubtable(c, metricConfig) + mc.Tags = getTagSubtable(c, metricConfig) + + if objectconfigs, ok := metricConfig.Fields["object"]; ok { + if objectconfigs, ok := objectconfigs.([]*ast.Table); ok { + for _, objectConfig := range objectconfigs { + var o json_v2.Object + c.getFieldString(objectConfig, "path", &o.Path) + c.getFieldBool(objectConfig, "optional", &o.Optional) + c.getFieldString(objectConfig, "timestamp_key", &o.TimestampKey) + c.getFieldString(objectConfig, "timestamp_format", &o.TimestampFormat) + c.getFieldString(objectConfig, "timestamp_timezone", &o.TimestampTimezone) + c.getFieldBool(objectConfig, "disable_prepend_keys", &o.DisablePrependKeys) + c.getFieldStringSlice(objectConfig, "included_keys", &o.IncludedKeys) + c.getFieldStringSlice(objectConfig, "excluded_keys", &o.ExcludedKeys) + c.getFieldStringSlice(objectConfig, "tags", &o.Tags) + c.getFieldStringMap(objectConfig, "renames", &o.Renames) + c.getFieldStringMap(objectConfig, "fields", &o.Fields) + + o.FieldPaths = getFieldSubtable(c, objectConfig) + o.TagPaths = getTagSubtable(c, objectConfig) + + mc.JSONObjects = append(mc.JSONObjects, o) + } } } + + pc.JSONV2Config[i] = mc } } } - if node, ok := tbl.Fields["tag_keys"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.TagKeys = append(c.TagKeys, str.Value) - } - } - } - } + pc.MetricName = name + + if c.hasErrs() { + return nil, c.firstErr() } - if node, ok := tbl.Fields["json_string_fields"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.JSONStringFields = append(c.JSONStringFields, str.Value) - } - } + return pc, nil +} + +func getFieldSubtable(c *Config, metricConfig *ast.Table) []json_v2.DataSet { + var fields []json_v2.DataSet + + if fieldConfigs, ok := metricConfig.Fields["field"]; ok { + if fieldConfigs, ok := fieldConfigs.([]*ast.Table); ok { + for _, fieldconfig := range fieldConfigs { + var f json_v2.DataSet + c.getFieldString(fieldconfig, "path", &f.Path) + c.getFieldString(fieldconfig, "rename", &f.Rename) + c.getFieldString(fieldconfig, "type", &f.Type) + c.getFieldBool(fieldconfig, "optional", &f.Optional) + fields = append(fields, f) } } } - if node, ok := tbl.Fields["json_name_key"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.JSONNameKey = str.Value + return fields +} + +func getTagSubtable(c *Config, metricConfig *ast.Table) []json_v2.DataSet { + var tags []json_v2.DataSet + + if fieldConfigs, ok := metricConfig.Fields["tag"]; ok { + if fieldConfigs, ok := fieldConfigs.([]*ast.Table); ok { + for _, fieldconfig := range fieldConfigs { + var t json_v2.DataSet + c.getFieldString(fieldconfig, "path", &t.Path) + c.getFieldString(fieldconfig, "rename", &t.Rename) + t.Type = "string" + tags = append(tags, t) + c.getFieldBool(fieldconfig, "optional", &t.Optional) } } } - if node, ok := tbl.Fields["json_query"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.JSONQuery = str.Value - } - } + return tags +} + +// buildSerializer grabs the necessary entries from the ast.Table for creating +// a serializers.Serializer object, and creates it, which can then be added onto +// an Output object. +func (c *Config) buildSerializer(tbl *ast.Table) (serializers.Serializer, error) { + sc := &serializers.Config{TimestampUnits: 1 * time.Second} + + c.getFieldString(tbl, "data_format", &sc.DataFormat) + + if sc.DataFormat == "" { + sc.DataFormat = "influx" } - if node, ok := tbl.Fields["json_time_key"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.JSONTimeKey = str.Value - } - } + c.getFieldString(tbl, "prefix", &sc.Prefix) + c.getFieldString(tbl, "template", &sc.Template) + c.getFieldStringSlice(tbl, "templates", &sc.Templates) + c.getFieldString(tbl, "carbon2_format", &sc.Carbon2Format) + c.getFieldString(tbl, "carbon2_sanitize_replace_char", &sc.Carbon2SanitizeReplaceChar) + c.getFieldBool(tbl, "csv_column_prefix", &sc.CSVPrefix) + c.getFieldBool(tbl, "csv_header", &sc.CSVHeader) + c.getFieldString(tbl, "csv_separator", &sc.CSVSeparator) + c.getFieldString(tbl, "csv_timestamp_format", &sc.TimestampFormat) + c.getFieldInt(tbl, "influx_max_line_bytes", &sc.InfluxMaxLineBytes) + c.getFieldBool(tbl, "influx_sort_fields", &sc.InfluxSortFields) + c.getFieldBool(tbl, "influx_uint_support", &sc.InfluxUintSupport) + c.getFieldBool(tbl, "graphite_tag_support", &sc.GraphiteTagSupport) + c.getFieldString(tbl, "graphite_tag_sanitize_mode", &sc.GraphiteTagSanitizeMode) + + c.getFieldString(tbl, "graphite_separator", &sc.GraphiteSeparator) + + c.getFieldDuration(tbl, "json_timestamp_units", &sc.TimestampUnits) + c.getFieldString(tbl, "json_timestamp_format", &sc.TimestampFormat) + + c.getFieldBool(tbl, "splunkmetric_hec_routing", &sc.HecRouting) + c.getFieldBool(tbl, "splunkmetric_multimetric", &sc.SplunkmetricMultiMetric) + + c.getFieldStringSlice(tbl, "wavefront_source_override", &sc.WavefrontSourceOverride) + c.getFieldBool(tbl, "wavefront_use_strict", &sc.WavefrontUseStrict) + c.getFieldBool(tbl, "wavefront_disable_prefix_conversion", &sc.WavefrontDisablePrefixConversion) + + c.getFieldBool(tbl, "prometheus_export_timestamp", &sc.PrometheusExportTimestamp) + c.getFieldBool(tbl, "prometheus_sort_metrics", &sc.PrometheusSortMetrics) + c.getFieldBool(tbl, "prometheus_string_as_label", &sc.PrometheusStringAsLabel) + + if c.hasErrs() { + return nil, c.firstErr() } - if node, ok := tbl.Fields["json_time_format"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.JSONTimeFormat = str.Value - } - } + return serializers.NewSerializer(sc) +} + +// buildOutput parses output specific items from the ast.Table, +// builds the filter and returns an +// models.OutputConfig to be inserted into models.RunningInput +// Note: error exists in the return for future calls that might require error +func (c *Config) buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) { + filter, err := c.buildFilter(tbl) + if err != nil { + return nil, err + } + oc := &models.OutputConfig{ + Name: name, + Filter: filter, } - if node, ok := tbl.Fields["json_timezone"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.JSONTimezone = str.Value - } - } + // TODO: support FieldPass/FieldDrop on outputs + + c.getFieldDuration(tbl, "flush_interval", &oc.FlushInterval) + c.getFieldDuration(tbl, "flush_jitter", &oc.FlushJitter) + + c.getFieldInt(tbl, "metric_buffer_limit", &oc.MetricBufferLimit) + c.getFieldInt(tbl, "metric_batch_size", &oc.MetricBatchSize) + c.getFieldString(tbl, "alias", &oc.Alias) + c.getFieldString(tbl, "name_override", &oc.NameOverride) + c.getFieldString(tbl, "name_suffix", &oc.NameSuffix) + c.getFieldString(tbl, "name_prefix", &oc.NamePrefix) + + if c.hasErrs() { + return nil, c.firstErr() } - if node, ok := tbl.Fields["json_strict"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.JSONStrict, err = b.Boolean() - if err != nil { - return nil, err - } - } - } + return oc, nil +} + +func (c *Config) missingTomlField(_ reflect.Type, key string) error { + switch key { + // General options to ignore + case "alias", + "collection_jitter", "collection_offset", + "data_format", "delay", "drop", "drop_original", + "fielddrop", "fieldpass", "flush_interval", "flush_jitter", + "grace", + "interval", + "lvm", // What is this used for? + "metric_batch_size", "metric_buffer_limit", + "name_override", "name_prefix", "name_suffix", "namedrop", "namepass", + "order", + "pass", "period", "precision", + "tagdrop", "tagexclude", "taginclude", "tagpass", "tags": + + // Parser options to ignore + case "data_type", "separator", "tag_keys", + // "templates", // shared with serializers + "grok_custom_pattern_files", "grok_custom_patterns", "grok_named_patterns", "grok_patterns", + "grok_timezone", "grok_unique_timestamp", + "influx_parser_type", + "prometheus_ignore_timestamp", // not used anymore? + "value_field_name": + + // Serializer options to ignore + case "prefix", "template", "templates", + "carbon2_format", "carbon2_sanitize_replace_char", + "csv_column_prefix", "csv_header", "csv_separator", "csv_timestamp_format", + "graphite_tag_sanitize_mode", "graphite_tag_support", "graphite_separator", + "influx_max_line_bytes", "influx_sort_fields", "influx_uint_support", + "json_timestamp_format", "json_timestamp_units", + "prometheus_export_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label", + "splunkmetric_hec_routing", "splunkmetric_multimetric", + "wavefront_disable_prefix_conversion", "wavefront_source_override", "wavefront_use_strict": + default: + c.unusedFieldsMutex.Lock() + c.UnusedFields[key] = true + c.unusedFieldsMutex.Unlock() } + return nil +} - if node, ok := tbl.Fields["data_type"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DataType = str.Value - } +func (c *Config) setLocalMissingTomlFieldTracker(counter map[string]int) { + f := func(_ reflect.Type, key string) error { + if c, ok := counter[key]; ok { + counter[key] = c + 1 + } else { + counter[key] = 1 } + return nil } + c.toml.MissingField = f +} + +func (c *Config) resetMissingTomlFieldTracker() { + c.toml.MissingField = c.missingTomlField +} - if node, ok := tbl.Fields["collectd_auth_file"]; ok { +func (c *Config) getFieldString(tbl *ast.Table, fieldName string, target *string) { + if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { if str, ok := kv.Value.(*ast.String); ok { - c.CollectdAuthFile = str.Value + *target = str.Value } } } +} - if node, ok := tbl.Fields["collectd_security_level"]; ok { +func (c *Config) getFieldDuration(tbl *ast.Table, fieldName string, target interface{}) { + if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { if str, ok := kv.Value.(*ast.String); ok { - c.CollectdSecurityLevel = str.Value + d, err := time.ParseDuration(str.Value) + if err != nil { + c.addError(tbl, fmt.Errorf("error parsing duration: %w", err)) + return + } + targetVal := reflect.ValueOf(target).Elem() + targetVal.Set(reflect.ValueOf(d)) } } } +} - if node, ok := tbl.Fields["collectd_parse_multivalue"]; ok { +func (c *Config) getFieldBool(tbl *ast.Table, fieldName string, target *bool) { + var err error + if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CollectdSplit = str.Value + switch t := kv.Value.(type) { + case *ast.Boolean: + *target, err = t.Boolean() + if err != nil { + c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value)) + return + } + case *ast.String: + *target, err = strconv.ParseBool(t.Value) + if err != nil { + c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value)) + return + } + default: + c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value.Source())) + return } } } +} - if node, ok := tbl.Fields["collectd_typesdb"]; ok { +func (c *Config) getFieldInt(tbl *ast.Table, fieldName string, target *int) { + if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.CollectdTypesDB = append(c.CollectdTypesDB, str.Value) - } + if iAst, ok := kv.Value.(*ast.Integer); ok { + i, err := iAst.Int() + if err != nil { + c.addError(tbl, fmt.Errorf("unexpected int type %q, expecting int", iAst.Value)) + return } + *target = int(i) } } } +} - if node, ok := tbl.Fields["dropwizard_metric_registry_path"]; ok { +func (c *Config) getFieldInt64(tbl *ast.Table, fieldName string, target *int64) { + if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DropwizardMetricRegistryPath = str.Value + if iAst, ok := kv.Value.(*ast.Integer); ok { + i, err := iAst.Int() + if err != nil { + c.addError(tbl, fmt.Errorf("unexpected int type %q, expecting int", iAst.Value)) + return + } + *target = i } } } - if node, ok := tbl.Fields["dropwizard_time_path"]; ok { +} + +func (c *Config) getFieldStringSlice(tbl *ast.Table, fieldName string, target *[]string) { + if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DropwizardTimePath = str.Value + ary, ok := kv.Value.(*ast.Array) + if !ok { + c.addError(tbl, fmt.Errorf("found unexpected format while parsing %q, expecting string array/slice format", fieldName)) + return + } + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + *target = append(*target, str.Value) + } } } } - if node, ok := tbl.Fields["dropwizard_time_format"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DropwizardTimeFormat = str.Value - } - } - } - if node, ok := tbl.Fields["dropwizard_tags_path"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DropwizardTagsPath = str.Value - } - } - } - c.DropwizardTagPathsMap = make(map[string]string) - if node, ok := tbl.Fields["dropwizard_tag_paths"]; ok { +} + +func (c *Config) getFieldTagFilter(tbl *ast.Table, fieldName string, target *[]models.TagFilter) { + if node, ok := tbl.Fields[fieldName]; ok { if subtbl, ok := node.(*ast.Table); ok { for name, val := range subtbl.Fields { if kv, ok := val.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DropwizardTagPathsMap[name] = str.Value - } - } - } - } - } - - //for grok data_format - if node, ok := tbl.Fields["grok_named_patterns"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.GrokNamedPatterns = append(c.GrokNamedPatterns, str.Value) - } - } - } - } - } - - if node, ok := tbl.Fields["grok_patterns"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.GrokPatterns = append(c.GrokPatterns, str.Value) - } - } - } - } - } - - if node, ok := tbl.Fields["grok_custom_patterns"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.GrokCustomPatterns = str.Value - } - } - } - - if node, ok := tbl.Fields["grok_custom_pattern_files"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.GrokCustomPatternFiles = append(c.GrokCustomPatternFiles, str.Value) - } - } - } - } - } - - if node, ok := tbl.Fields["grok_timezone"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.GrokTimezone = str.Value - } - } - } - - if node, ok := tbl.Fields["grok_unique_timestamp"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.GrokUniqueTimestamp = str.Value - } - } - } - - //for csv parser - if node, ok := tbl.Fields["csv_column_names"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.CSVColumnNames = append(c.CSVColumnNames, str.Value) + ary, ok := kv.Value.(*ast.Array) + if !ok { + c.addError(tbl, fmt.Errorf("found unexpected format while parsing %q, expecting string array/slice format on each entry", fieldName)) + return } - } - } - } - } - - if node, ok := tbl.Fields["csv_column_types"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.CSVColumnTypes = append(c.CSVColumnTypes, str.Value) - } - } - } - } - } - - if node, ok := tbl.Fields["csv_tag_columns"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.CSVTagColumns = append(c.CSVTagColumns, str.Value) - } - } - } - } - } - - if node, ok := tbl.Fields["csv_delimiter"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CSVDelimiter = str.Value - } - } - } - - if node, ok := tbl.Fields["csv_comment"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CSVComment = str.Value - } - } - } - if node, ok := tbl.Fields["csv_measurement_column"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CSVMeasurementColumn = str.Value - } - } - } - - if node, ok := tbl.Fields["csv_timestamp_column"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CSVTimestampColumn = str.Value - } - } - } - - if node, ok := tbl.Fields["csv_timestamp_format"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CSVTimestampFormat = str.Value - } - } - } - - if node, ok := tbl.Fields["csv_timezone"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.CSVTimezone = str.Value - } - } - } - - if node, ok := tbl.Fields["csv_header_row_count"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if integer, ok := kv.Value.(*ast.Integer); ok { - v, err := integer.Int() - if err != nil { - return nil, err - } - c.CSVHeaderRowCount = int(v) - } - } - } - - if node, ok := tbl.Fields["csv_skip_rows"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if integer, ok := kv.Value.(*ast.Integer); ok { - v, err := integer.Int() - if err != nil { - return nil, err - } - c.CSVSkipRows = int(v) - } - } - } - - if node, ok := tbl.Fields["csv_skip_columns"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if integer, ok := kv.Value.(*ast.Integer); ok { - v, err := integer.Int() - if err != nil { - return nil, err - } - c.CSVSkipColumns = int(v) - } - } - } - - if node, ok := tbl.Fields["csv_trim_space"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.Boolean); ok { - //for config with no quotes - val, err := strconv.ParseBool(str.Value) - c.CSVTrimSpace = val - if err != nil { - return nil, fmt.Errorf("E! parsing to bool: %v", err) - } - } - } - } - - if node, ok := tbl.Fields["form_urlencoded_tag_keys"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.FormUrlencodedTagKeys = append(c.FormUrlencodedTagKeys, str.Value) + tagFilter := models.TagFilter{Name: name} + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + tagFilter.Filter = append(tagFilter.Filter, str.Value) + } } + *target = append(*target, tagFilter) } } } } - - c.MetricName = name - - delete(tbl.Fields, "data_format") - delete(tbl.Fields, "separator") - delete(tbl.Fields, "templates") - delete(tbl.Fields, "tag_keys") - delete(tbl.Fields, "json_name_key") - delete(tbl.Fields, "json_query") - delete(tbl.Fields, "json_string_fields") - delete(tbl.Fields, "json_time_format") - delete(tbl.Fields, "json_time_key") - delete(tbl.Fields, "json_timezone") - delete(tbl.Fields, "json_strict") - delete(tbl.Fields, "data_type") - delete(tbl.Fields, "collectd_auth_file") - delete(tbl.Fields, "collectd_security_level") - delete(tbl.Fields, "collectd_typesdb") - delete(tbl.Fields, "collectd_parse_multivalue") - delete(tbl.Fields, "dropwizard_metric_registry_path") - delete(tbl.Fields, "dropwizard_time_path") - delete(tbl.Fields, "dropwizard_time_format") - delete(tbl.Fields, "dropwizard_tags_path") - delete(tbl.Fields, "dropwizard_tag_paths") - delete(tbl.Fields, "grok_named_patterns") - delete(tbl.Fields, "grok_patterns") - delete(tbl.Fields, "grok_custom_patterns") - delete(tbl.Fields, "grok_custom_pattern_files") - delete(tbl.Fields, "grok_timezone") - delete(tbl.Fields, "grok_unique_timestamp") - delete(tbl.Fields, "csv_column_names") - delete(tbl.Fields, "csv_column_types") - delete(tbl.Fields, "csv_comment") - delete(tbl.Fields, "csv_delimiter") - delete(tbl.Fields, "csv_field_columns") - delete(tbl.Fields, "csv_header_row_count") - delete(tbl.Fields, "csv_measurement_column") - delete(tbl.Fields, "csv_skip_columns") - delete(tbl.Fields, "csv_skip_rows") - delete(tbl.Fields, "csv_tag_columns") - delete(tbl.Fields, "csv_timestamp_column") - delete(tbl.Fields, "csv_timestamp_format") - delete(tbl.Fields, "csv_timezone") - delete(tbl.Fields, "csv_trim_space") - delete(tbl.Fields, "form_urlencoded_tag_keys") - - return c, nil } -// buildSerializer grabs the necessary entries from the ast.Table for creating -// a serializers.Serializer object, and creates it, which can then be added onto -// an Output object. -func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error) { - c := &serializers.Config{TimestampUnits: time.Duration(1 * time.Second)} - - if node, ok := tbl.Fields["data_format"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.DataFormat = str.Value - } - } - } - - if c.DataFormat == "" { - c.DataFormat = "influx" - } - - if node, ok := tbl.Fields["prefix"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.Prefix = str.Value - } - } - } - - if node, ok := tbl.Fields["template"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.Template = str.Value - } - } - } - - if node, ok := tbl.Fields["templates"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.Templates = append(c.Templates, str.Value) - } - } - } - } - } - - if node, ok := tbl.Fields["carbon2_format"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.Carbon2Format = str.Value - } - } - } - - if node, ok := tbl.Fields["influx_max_line_bytes"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if integer, ok := kv.Value.(*ast.Integer); ok { - v, err := integer.Int() - if err != nil { - return nil, err - } - c.InfluxMaxLineBytes = int(v) - } - } - } - - if node, ok := tbl.Fields["influx_sort_fields"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.InfluxSortFields, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - if node, ok := tbl.Fields["influx_uint_support"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.InfluxUintSupport, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - if node, ok := tbl.Fields["graphite_tag_support"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.GraphiteTagSupport, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - if node, ok := tbl.Fields["graphite_separator"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - c.GraphiteSeparator = str.Value - } - } - } - - if node, ok := tbl.Fields["json_timestamp_units"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - timestampVal, err := time.ParseDuration(str.Value) - if err != nil { - return nil, fmt.Errorf("Unable to parse json_timestamp_units as a duration, %s", err) - } - // now that we have a duration, truncate it to the nearest - // power of ten (just in case) - nearest_exponent := int64(math.Log10(float64(timestampVal.Nanoseconds()))) - new_nanoseconds := int64(math.Pow(10.0, float64(nearest_exponent))) - c.TimestampUnits = time.Duration(new_nanoseconds) - } - } - } - - if node, ok := tbl.Fields["splunkmetric_hec_routing"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.HecRouting, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - if node, ok := tbl.Fields["splunkmetric_multimetric"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.SplunkmetricMultiMetric, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - if node, ok := tbl.Fields["wavefront_source_override"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if ary, ok := kv.Value.(*ast.Array); ok { - for _, elem := range ary.Value { - if str, ok := elem.(*ast.String); ok { - c.WavefrontSourceOverride = append(c.WavefrontSourceOverride, str.Value) +func (c *Config) getFieldStringMap(tbl *ast.Table, fieldName string, target *map[string]string) { + *target = map[string]string{} + if node, ok := tbl.Fields[fieldName]; ok { + if subtbl, ok := node.(*ast.Table); ok { + for name, val := range subtbl.Fields { + if kv, ok := val.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + (*target)[name] = str.Value } } } } } - - if node, ok := tbl.Fields["wavefront_use_strict"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.WavefrontUseStrict, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - if node, ok := tbl.Fields["prometheus_export_timestamp"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.PrometheusExportTimestamp, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - if node, ok := tbl.Fields["prometheus_sort_metrics"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.PrometheusSortMetrics, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - if node, ok := tbl.Fields["prometheus_string_as_label"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if b, ok := kv.Value.(*ast.Boolean); ok { - var err error - c.PrometheusStringAsLabel, err = b.Boolean() - if err != nil { - return nil, err - } - } - } - } - - delete(tbl.Fields, "carbon2_format") - delete(tbl.Fields, "influx_max_line_bytes") - delete(tbl.Fields, "influx_sort_fields") - delete(tbl.Fields, "influx_uint_support") - delete(tbl.Fields, "graphite_tag_support") - delete(tbl.Fields, "graphite_separator") - delete(tbl.Fields, "data_format") - delete(tbl.Fields, "prefix") - delete(tbl.Fields, "template") - delete(tbl.Fields, "templates") - delete(tbl.Fields, "json_timestamp_units") - delete(tbl.Fields, "splunkmetric_hec_routing") - delete(tbl.Fields, "splunkmetric_multimetric") - delete(tbl.Fields, "wavefront_source_override") - delete(tbl.Fields, "wavefront_use_strict") - delete(tbl.Fields, "prometheus_export_timestamp") - delete(tbl.Fields, "prometheus_sort_metrics") - delete(tbl.Fields, "prometheus_string_as_label") - return serializers.NewSerializer(c) } -// buildOutput parses output specific items from the ast.Table, -// builds the filter and returns an -// models.OutputConfig to be inserted into models.RunningInput -// Note: error exists in the return for future calls that might require error -func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) { - filter, err := buildFilter(tbl) - if err != nil { - return nil, err - } - oc := &models.OutputConfig{ - Name: name, - Filter: filter, - } - - // TODO - // Outputs don't support FieldDrop/FieldPass, so set to NameDrop/NamePass - if len(oc.Filter.FieldDrop) > 0 { - oc.Filter.NameDrop = oc.Filter.FieldDrop - } - if len(oc.Filter.FieldPass) > 0 { - oc.Filter.NamePass = oc.Filter.FieldPass - } - - if err := getConfigDuration(tbl, "flush_interval", &oc.FlushInterval); err != nil { - return nil, err - } - - if err := getConfigDuration(tbl, "flush_jitter", &oc.FlushJitter); err != nil { - return nil, err - } - - if node, ok := tbl.Fields["metric_buffer_limit"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if integer, ok := kv.Value.(*ast.Integer); ok { - v, err := integer.Int() - if err != nil { - return nil, err - } - oc.MetricBufferLimit = int(v) - } - } - } - - if node, ok := tbl.Fields["metric_batch_size"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if integer, ok := kv.Value.(*ast.Integer); ok { - v, err := integer.Int() - if err != nil { - return nil, err - } - oc.MetricBatchSize = int(v) - } - } - } - - if node, ok := tbl.Fields["alias"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - oc.Alias = str.Value - } - } - } - - if node, ok := tbl.Fields["name_override"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - oc.NameOverride = str.Value - } - } +func keys(m map[string]bool) []string { + result := []string{} + for k := range m { + result = append(result, k) } + return result +} - if node, ok := tbl.Fields["name_suffix"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - oc.NameSuffix = str.Value - } - } - } +func (c *Config) hasErrs() bool { + return len(c.errs) > 0 +} - if node, ok := tbl.Fields["name_prefix"]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - oc.NamePrefix = str.Value - } - } +func (c *Config) firstErr() error { + if len(c.errs) == 0 { + return nil } + return c.errs[0] +} - delete(tbl.Fields, "metric_buffer_limit") - delete(tbl.Fields, "metric_batch_size") - delete(tbl.Fields, "alias") - delete(tbl.Fields, "name_override") - delete(tbl.Fields, "name_suffix") - delete(tbl.Fields, "name_prefix") - - return oc, nil +func (c *Config) addError(tbl *ast.Table, err error) { + c.errs = append(c.errs, fmt.Errorf("line %d:%d: %w", tbl.Line, tbl.Position, err)) } // unwrappable lets you retrieve the original telegraf.Processor from the @@ -2222,19 +1632,3 @@ func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) { type unwrappable interface { Unwrap() telegraf.Processor } - -func getConfigDuration(tbl *ast.Table, key string, target *time.Duration) error { - if node, ok := tbl.Fields[key]; ok { - if kv, ok := node.(*ast.KeyValue); ok { - if str, ok := kv.Value.(*ast.String); ok { - d, err := time.ParseDuration(str.Value) - if err != nil { - return err - } - delete(tbl.Fields, key) - *target = d - } - } - } - return nil -} diff --git a/config/config_test.go b/config/config_test.go index 42aefff151761..b8a7dfc3b6bdd 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,34 +1,38 @@ package config import ( + "fmt" + "net/http" + "net/http/httptest" "os" + "reflect" + "runtime" + "strings" + "sync" "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/models" + "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/plugins/inputs/exec" - "github.com/influxdata/telegraf/plugins/inputs/http_listener_v2" - "github.com/influxdata/telegraf/plugins/inputs/memcached" - "github.com/influxdata/telegraf/plugins/inputs/procstat" - "github.com/influxdata/telegraf/plugins/outputs/azure_monitor" - httpOut "github.com/influxdata/telegraf/plugins/outputs/http" + "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/parsers" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + _ "github.com/influxdata/telegraf/plugins/parsers/all" // Blank import to have all parsers for testing ) func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { c := NewConfig() - err := os.Setenv("MY_TEST_SERVER", "192.168.1.1") - assert.NoError(t, err) - err = os.Setenv("TEST_INTERVAL", "10s") - assert.NoError(t, err) + require.NoError(t, os.Setenv("MY_TEST_SERVER", "192.168.1.1")) + require.NoError(t, os.Setenv("TEST_INTERVAL", "10s")) c.LoadConfig("./testdata/single_plugin_env_vars.toml") - memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) - memcached.Servers = []string{"192.168.1.1"} + input := inputs.Inputs["memcached"]().(*MockupInputPlugin) + input.Servers = []string{"192.168.1.1"} filter := models.Filter{ NameDrop: []string{"metricname2"}, @@ -48,26 +52,27 @@ func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { }, }, } - assert.NoError(t, filter.Compile()) - mConfig := &models.InputConfig{ + require.NoError(t, filter.Compile()) + inputConfig := &models.InputConfig{ Name: "memcached", Filter: filter, Interval: 10 * time.Second, } - mConfig.Tags = make(map[string]string) + inputConfig.Tags = make(map[string]string) - assert.Equal(t, memcached, c.Inputs[0].Input, - "Testdata did not produce a correct memcached struct.") - assert.Equal(t, mConfig, c.Inputs[0].Config, - "Testdata did not produce correct memcached metadata.") + // Ignore Log and Parser + c.Inputs[0].Input.(*MockupInputPlugin).Log = nil + c.Inputs[0].Input.(*MockupInputPlugin).parser = nil + require.Equal(t, input, c.Inputs[0].Input, "Testdata did not produce a correct mockup struct.") + require.Equal(t, inputConfig, c.Inputs[0].Config, "Testdata did not produce correct input metadata.") } func TestConfig_LoadSingleInput(t *testing.T) { c := NewConfig() c.LoadConfig("./testdata/single_plugin.toml") - memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) - memcached.Servers = []string{"localhost"} + input := inputs.Inputs["memcached"]().(*MockupInputPlugin) + input.Servers = []string{"localhost"} filter := models.Filter{ NameDrop: []string{"metricname2"}, @@ -87,35 +92,34 @@ func TestConfig_LoadSingleInput(t *testing.T) { }, }, } - assert.NoError(t, filter.Compile()) - mConfig := &models.InputConfig{ + require.NoError(t, filter.Compile()) + inputConfig := &models.InputConfig{ Name: "memcached", Filter: filter, Interval: 5 * time.Second, } - mConfig.Tags = make(map[string]string) + inputConfig.Tags = make(map[string]string) - assert.Equal(t, memcached, c.Inputs[0].Input, - "Testdata did not produce a correct memcached struct.") - assert.Equal(t, mConfig, c.Inputs[0].Config, - "Testdata did not produce correct memcached metadata.") + // Ignore Log and Parser + c.Inputs[0].Input.(*MockupInputPlugin).Log = nil + c.Inputs[0].Input.(*MockupInputPlugin).parser = nil + require.Equal(t, input, c.Inputs[0].Input, "Testdata did not produce a correct memcached struct.") + require.Equal(t, inputConfig, c.Inputs[0].Config, "Testdata did not produce correct memcached metadata.") } func TestConfig_LoadDirectory(t *testing.T) { c := NewConfig() - err := c.LoadConfig("./testdata/single_plugin.toml") - if err != nil { - t.Error(err) - } - err = c.LoadDirectory("./testdata/subconfig") - if err != nil { - t.Error(err) - } + require.NoError(t, c.LoadConfig("./testdata/single_plugin.toml")) + require.NoError(t, c.LoadDirectory("./testdata/subconfig")) - memcached := inputs.Inputs["memcached"]().(*memcached.Memcached) - memcached.Servers = []string{"localhost"} + // Create the expected data + expectedPlugins := make([]*MockupInputPlugin, 4) + expectedConfigs := make([]*models.InputConfig, 4) - filter := models.Filter{ + expectedPlugins[0] = inputs.Inputs["memcached"]().(*MockupInputPlugin) + expectedPlugins[0].Servers = []string{"localhost"} + + filterMockup := models.Filter{ NameDrop: []string{"metricname2"}, NamePass: []string{"metricname1"}, FieldDrop: []string{"other", "stuff"}, @@ -133,121 +137,152 @@ func TestConfig_LoadDirectory(t *testing.T) { }, }, } - assert.NoError(t, filter.Compile()) - mConfig := &models.InputConfig{ + require.NoError(t, filterMockup.Compile()) + expectedConfigs[0] = &models.InputConfig{ Name: "memcached", - Filter: filter, + Filter: filterMockup, Interval: 5 * time.Second, } - mConfig.Tags = make(map[string]string) - - assert.Equal(t, memcached, c.Inputs[0].Input, - "Testdata did not produce a correct memcached struct.") - assert.Equal(t, mConfig, c.Inputs[0].Config, - "Testdata did not produce correct memcached metadata.") + expectedConfigs[0].Tags = make(map[string]string) - ex := inputs.Inputs["exec"]().(*exec.Exec) - p, err := parsers.NewParser(&parsers.Config{ + expectedPlugins[1] = inputs.Inputs["exec"]().(*MockupInputPlugin) + parserConfig := &parsers.Config{ MetricName: "exec", DataFormat: "json", JSONStrict: true, - }) - assert.NoError(t, err) - ex.SetParser(p) - ex.Command = "/usr/bin/myothercollector --foo=bar" - eConfig := &models.InputConfig{ - Name: "exec", - MeasurementSuffix: "_myothercollector", } - eConfig.Tags = make(map[string]string) - - exec := c.Inputs[1].Input.(*exec.Exec) - require.NotNil(t, exec.Log) - exec.Log = nil + p, err := parsers.NewParser(parserConfig) + require.NoError(t, err) - assert.Equal(t, ex, c.Inputs[1].Input, - "Merged Testdata did not produce a correct exec struct.") - assert.Equal(t, eConfig, c.Inputs[1].Config, - "Merged Testdata did not produce correct exec metadata.") + // Inject logger to have proper struct for comparison + models.SetLoggerOnPlugin(p, models.NewLogger("parsers", parserConfig.DataFormat, parserConfig.MetricName)) - memcached.Servers = []string{"192.168.1.1"} - assert.Equal(t, memcached, c.Inputs[2].Input, - "Testdata did not produce a correct memcached struct.") - assert.Equal(t, mConfig, c.Inputs[2].Config, - "Testdata did not produce correct memcached metadata.") + expectedPlugins[1].SetParser(p) + expectedPlugins[1].Command = "/usr/bin/myothercollector --foo=bar" + expectedConfigs[1] = &models.InputConfig{ + Name: "exec", + MeasurementSuffix: "_myothercollector", + } + expectedConfigs[1].Tags = make(map[string]string) - pstat := inputs.Inputs["procstat"]().(*procstat.Procstat) - pstat.PidFile = "/var/run/grafana-server.pid" + expectedPlugins[2] = inputs.Inputs["memcached"]().(*MockupInputPlugin) + expectedPlugins[2].Servers = []string{"192.168.1.1"} - pConfig := &models.InputConfig{Name: "procstat"} - pConfig.Tags = make(map[string]string) + filterMemcached := models.Filter{ + NameDrop: []string{"metricname2"}, + NamePass: []string{"metricname1"}, + FieldDrop: []string{"other", "stuff"}, + FieldPass: []string{"some", "strings"}, + TagDrop: []models.TagFilter{ + { + Name: "badtag", + Filter: []string{"othertag"}, + }, + }, + TagPass: []models.TagFilter{ + { + Name: "goodtag", + Filter: []string{"mytag"}, + }, + }, + } + require.NoError(t, filterMemcached.Compile()) + expectedConfigs[2] = &models.InputConfig{ + Name: "memcached", + Filter: filterMemcached, + Interval: 5 * time.Second, + } + expectedConfigs[2].Tags = make(map[string]string) + + expectedPlugins[3] = inputs.Inputs["procstat"]().(*MockupInputPlugin) + expectedPlugins[3].PidFile = "/var/run/grafana-server.pid" + expectedConfigs[3] = &models.InputConfig{Name: "procstat"} + expectedConfigs[3].Tags = make(map[string]string) + + // Check the generated plugins + require.Len(t, c.Inputs, len(expectedPlugins)) + require.Len(t, c.Inputs, len(expectedConfigs)) + for i, plugin := range c.Inputs { + input := plugin.Input.(*MockupInputPlugin) + // Check the logger and ignore it for comparison + require.NotNil(t, input.Log) + input.Log = nil + + // Ignore the parser if not expected + if expectedPlugins[i].parser == nil { + input.parser = nil + } + + require.Equalf(t, expectedPlugins[i], plugin.Input, "Plugin %d: incorrect struct produced", i) + require.Equalf(t, expectedConfigs[i], plugin.Config, "Plugin %d: incorrect config produced", i) + } +} - assert.Equal(t, pstat, c.Inputs[3].Input, - "Merged Testdata did not produce a correct procstat struct.") - assert.Equal(t, pConfig, c.Inputs[3].Config, - "Merged Testdata did not produce correct procstat metadata.") +func TestConfig_WrongCertPath(t *testing.T) { + c := NewConfig() + require.Error(t, c.LoadConfig("./testdata/wrong_cert_path.toml")) } func TestConfig_LoadSpecialTypes(t *testing.T) { c := NewConfig() - err := c.LoadConfig("./testdata/special_types.toml") - assert.NoError(t, err) - require.Equal(t, 1, len(c.Inputs)) + require.NoError(t, c.LoadConfig("./testdata/special_types.toml")) + require.Len(t, c.Inputs, 1) - inputHTTPListener, ok := c.Inputs[0].Input.(*http_listener_v2.HTTPListenerV2) - assert.Equal(t, true, ok) + input, ok := c.Inputs[0].Input.(*MockupInputPlugin) + require.True(t, ok) // Tests telegraf duration parsing. - assert.Equal(t, internal.Duration{Duration: time.Second}, inputHTTPListener.WriteTimeout) + require.Equal(t, Duration(time.Second), input.WriteTimeout) // Tests telegraf size parsing. - assert.Equal(t, internal.Size{Size: 1024 * 1024}, inputHTTPListener.MaxBodySize) - // Tests toml multiline basic strings. - assert.Equal(t, "/path/to/my/cert\n", inputHTTPListener.TLSCert) + require.Equal(t, Size(1024*1024), input.MaxBodySize) + // Tests toml multiline basic strings on single line. + require.Equal(t, "./testdata/special_types.pem", input.TLSCert) + // Tests toml multiline basic strings on single line. + require.Equal(t, "./testdata/special_types.key", input.TLSKey) + // Tests toml multiline basic strings on multiple lines. + require.Equal(t, "/path/", strings.TrimRight(input.Paths[0], "\r\n")) } func TestConfig_FieldNotDefined(t *testing.T) { c := NewConfig() err := c.LoadConfig("./testdata/invalid_field.toml") require.Error(t, err, "invalid field name") - assert.Equal(t, "Error loading config file ./testdata/invalid_field.toml: Error parsing http_listener_v2, line 2: field corresponding to `not_a_field' is not defined in http_listener_v2.HTTPListenerV2", err.Error()) - + require.Equal(t, "Error loading config file ./testdata/invalid_field.toml: plugin inputs.http_listener_v2: line 1: configuration specified the fields [\"not_a_field\"], but they weren't used", err.Error()) } func TestConfig_WrongFieldType(t *testing.T) { c := NewConfig() err := c.LoadConfig("./testdata/wrong_field_type.toml") require.Error(t, err, "invalid field type") - assert.Equal(t, "Error loading config file ./testdata/wrong_field_type.toml: Error parsing http_listener_v2, line 2: (http_listener_v2.HTTPListenerV2.Port) cannot unmarshal TOML string into int", err.Error()) + require.Equal(t, "Error loading config file ./testdata/wrong_field_type.toml: error parsing http_listener_v2, line 2: (config.MockupInputPlugin.Port) cannot unmarshal TOML string into int", err.Error()) c = NewConfig() err = c.LoadConfig("./testdata/wrong_field_type2.toml") require.Error(t, err, "invalid field type2") - assert.Equal(t, "Error loading config file ./testdata/wrong_field_type2.toml: Error parsing http_listener_v2, line 2: (http_listener_v2.HTTPListenerV2.Methods) cannot unmarshal TOML string into []string", err.Error()) + require.Equal(t, "Error loading config file ./testdata/wrong_field_type2.toml: error parsing http_listener_v2, line 2: (config.MockupInputPlugin.Methods) cannot unmarshal TOML string into []string", err.Error()) } func TestConfig_InlineTables(t *testing.T) { // #4098 c := NewConfig() - err := c.LoadConfig("./testdata/inline_table.toml") - assert.NoError(t, err) - require.Equal(t, 2, len(c.Outputs)) - - outputHTTP, ok := c.Outputs[1].Output.(*httpOut.HTTP) - assert.Equal(t, true, ok) - assert.Equal(t, map[string]string{"Authorization": "Token $TOKEN", "Content-Type": "application/json"}, outputHTTP.Headers) - assert.Equal(t, []string{"org_id"}, c.Outputs[0].Config.Filter.TagInclude) + require.NoError(t, c.LoadConfig("./testdata/inline_table.toml")) + require.Len(t, c.Outputs, 2) + + output, ok := c.Outputs[1].Output.(*MockupOuputPlugin) + require.True(t, ok) + require.Equal(t, map[string]string{"Authorization": "Token $TOKEN", "Content-Type": "application/json"}, output.Headers) + require.Equal(t, []string{"org_id"}, c.Outputs[0].Config.Filter.TagInclude) } func TestConfig_SliceComment(t *testing.T) { t.Skipf("Skipping until #3642 is resolved") c := NewConfig() - err := c.LoadConfig("./testdata/slice_comment.toml") - assert.NoError(t, err) - require.Equal(t, 1, len(c.Outputs)) + require.NoError(t, c.LoadConfig("./testdata/slice_comment.toml")) + require.Len(t, c.Outputs, 1) - outputHTTP, ok := c.Outputs[0].Output.(*httpOut.HTTP) - assert.Equal(t, []string{"test"}, outputHTTP.Scopes) - assert.Equal(t, true, ok) + output, ok := c.Outputs[0].Output.(*MockupOuputPlugin) + require.True(t, ok) + require.Equal(t, []string{"test"}, output.Scopes) } func TestConfig_BadOrdering(t *testing.T) { @@ -256,25 +291,432 @@ func TestConfig_BadOrdering(t *testing.T) { c := NewConfig() err := c.LoadConfig("./testdata/non_slice_slice.toml") require.Error(t, err, "bad ordering") - assert.Equal(t, "Error loading config file ./testdata/non_slice_slice.toml: Error parsing http array, line 4: cannot unmarshal TOML array into string (need slice)", err.Error()) + require.Equal(t, "Error loading config file ./testdata/non_slice_slice.toml: error parsing http array, line 4: cannot unmarshal TOML array into string (need slice)", err.Error()) } func TestConfig_AzureMonitorNamespacePrefix(t *testing.T) { // #8256 Cannot use empty string as the namespace prefix c := NewConfig() - defaultPrefixConfig := `[[outputs.azure_monitor]]` - err := c.LoadConfigData([]byte(defaultPrefixConfig)) - assert.NoError(t, err) - azureMonitor, ok := c.Outputs[0].Output.(*azure_monitor.AzureMonitor) - assert.Equal(t, "Telegraf/", azureMonitor.NamespacePrefix) - assert.Equal(t, true, ok) + require.NoError(t, c.LoadConfig("./testdata/azure_monitor.toml")) + require.Len(t, c.Outputs, 2) + + expectedPrefix := []string{"Telegraf/", ""} + for i, plugin := range c.Outputs { + output, ok := plugin.Output.(*MockupOuputPlugin) + require.True(t, ok) + require.Equal(t, expectedPrefix[i], output.NamespacePrefix) + } +} - c = NewConfig() - customPrefixConfig := `[[outputs.azure_monitor]] - namespace_prefix = ""` - err = c.LoadConfigData([]byte(customPrefixConfig)) - assert.NoError(t, err) - azureMonitor, ok = c.Outputs[0].Output.(*azure_monitor.AzureMonitor) - assert.Equal(t, "", azureMonitor.NamespacePrefix) - assert.Equal(t, true, ok) +func TestConfig_URLRetries3Fails(t *testing.T) { + httpLoadConfigRetryInterval = 0 * time.Second + responseCounter := 0 + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + responseCounter++ + })) + defer ts.Close() + + expected := fmt.Sprintf("Error loading config file %s: Retry 3 of 3 failed to retrieve remote config: 404 Not Found", ts.URL) + + c := NewConfig() + err := c.LoadConfig(ts.URL) + require.Error(t, err) + require.Equal(t, expected, err.Error()) + require.Equal(t, 4, responseCounter) +} + +func TestConfig_URLRetries3FailsThenPasses(t *testing.T) { + httpLoadConfigRetryInterval = 0 * time.Second + responseCounter := 0 + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if responseCounter <= 2 { + w.WriteHeader(http.StatusNotFound) + } else { + w.WriteHeader(http.StatusOK) + } + responseCounter++ + })) + defer ts.Close() + + c := NewConfig() + require.NoError(t, c.LoadConfig(ts.URL)) + require.Equal(t, 4, responseCounter) +} + +func TestConfig_getDefaultConfigPathFromEnvURL(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + c := NewConfig() + err := os.Setenv("TELEGRAF_CONFIG_PATH", ts.URL) + require.NoError(t, err) + configPath, err := getDefaultConfigPath() + require.NoError(t, err) + require.Equal(t, ts.URL, configPath) + err = c.LoadConfig("") + require.NoError(t, err) +} + +func TestConfig_URLLikeFileName(t *testing.T) { + c := NewConfig() + err := c.LoadConfig("http:##www.example.com.conf") + require.Error(t, err) + + if runtime.GOOS == "windows" { + // The error file not found error message is different on windows + require.Equal(t, "Error loading config file http:##www.example.com.conf: open http:##www.example.com.conf: The system cannot find the file specified.", err.Error()) + } else { + require.Equal(t, "Error loading config file http:##www.example.com.conf: open http:##www.example.com.conf: no such file or directory", err.Error()) + } +} + +func TestConfig_ParserInterfaceNewFormat(t *testing.T) { + formats := []string{ + "collectd", + "csv", + "dropwizard", + "form_urlencoded", + "graphite", + "grok", + "influx", + "json", + "json_v2", + "logfmt", + "nagios", + "prometheus", + "prometheusremotewrite", + "value", + "wavefront", + "xml", "xpath_json", "xpath_msgpack", "xpath_protobuf", + } + + c := NewConfig() + require.NoError(t, c.LoadConfig("./testdata/parsers_new.toml")) + require.Len(t, c.Inputs, len(formats)) + + cfg := parsers.Config{ + CSVHeaderRowCount: 42, + DropwizardTagPathsMap: make(map[string]string), + GrokPatterns: []string{"%{COMBINED_LOG_FORMAT}"}, + JSONStrict: true, + MetricName: "parser_test_new", + } + + override := map[string]struct { + param map[string]interface{} + mask []string + }{ + "csv": { + param: map[string]interface{}{ + "HeaderRowCount": cfg.CSVHeaderRowCount, + }, + mask: []string{"TimeFunc"}, + }, + "xpath_protobuf": { + param: map[string]interface{}{ + "ProtobufMessageDef": "testdata/addressbook.proto", + "ProtobufMessageType": "addressbook.AddressBook", + }, + }, + } + + expected := make([]telegraf.Parser, 0, len(formats)) + for _, format := range formats { + formatCfg := &cfg + formatCfg.DataFormat = format + + logger := models.NewLogger("parsers", format, cfg.MetricName) + + // Try with the new format + if creator, found := parsers.Parsers[format]; found { + t.Logf("using new format parser for %q...", format) + parserNew := creator(formatCfg.MetricName) + if settings, found := override[format]; found { + s := reflect.Indirect(reflect.ValueOf(parserNew)) + for key, value := range settings.param { + v := reflect.ValueOf(value) + s.FieldByName(key).Set(v) + } + } + models.SetLoggerOnPlugin(parserNew, logger) + if p, ok := parserNew.(telegraf.Initializer); ok { + require.NoError(t, p.Init()) + } + expected = append(expected, parserNew) + continue + } + + // Try with the old format + parserOld, err := parsers.NewParser(formatCfg) + if err == nil { + t.Logf("using old format parser for %q...", format) + models.SetLoggerOnPlugin(parserOld, logger) + if p, ok := parserOld.(telegraf.Initializer); ok { + require.NoError(t, p.Init()) + } + expected = append(expected, parserOld) + continue + } + require.Containsf(t, err.Error(), "invalid data format:", "setup %q failed: %v", format, err) + require.Failf(t, "%q neither found in old nor new format", format) + } + require.Len(t, expected, len(formats)) + + actual := make([]interface{}, 0) + generated := make([]interface{}, 0) + for _, plugin := range c.Inputs { + input, ok := plugin.Input.(*MockupInputPluginParserNew) + require.True(t, ok) + // Get the parser set with 'SetParser()' + if p, ok := input.Parser.(*models.RunningParser); ok { + require.NoError(t, p.Init()) + actual = append(actual, p.Parser) + } else { + actual = append(actual, input.Parser) + } + // Get the parser set with 'SetParserFunc()' + g, err := input.ParserFunc() + require.NoError(t, err) + if rp, ok := g.(*models.RunningParser); ok { + generated = append(generated, rp.Parser) + } else { + generated = append(generated, g) + } + } + require.Len(t, actual, len(formats)) + + for i, format := range formats { + // Determine the underlying type of the parser + stype := reflect.Indirect(reflect.ValueOf(expected[i])).Interface() + // Ignore all unexported fields and fields not relevant for functionality + options := []cmp.Option{ + cmpopts.IgnoreUnexported(stype), + cmpopts.IgnoreTypes(sync.Mutex{}), + cmpopts.IgnoreInterfaces(struct{ telegraf.Logger }{}), + } + if settings, found := override[format]; found { + options = append(options, cmpopts.IgnoreFields(stype, settings.mask...)) + } + + // Do a manual comparision as require.EqualValues will also work on unexported fields + // that cannot be cleared or ignored. + diff := cmp.Diff(expected[i], actual[i], options...) + require.Emptyf(t, diff, "Difference in SetParser() for %q", format) + diff = cmp.Diff(expected[i], generated[i], options...) + require.Emptyf(t, diff, "Difference in SetParserFunc() for %q", format) + } +} + +func TestConfig_ParserInterfaceOldFormat(t *testing.T) { + formats := []string{ + "collectd", + "csv", + "dropwizard", + "form_urlencoded", + "graphite", + "grok", + "influx", + "json", + "json_v2", + "logfmt", + "nagios", + "prometheus", + "prometheusremotewrite", + "value", + "wavefront", + "xml", "xpath_json", "xpath_msgpack", "xpath_protobuf", + } + + c := NewConfig() + require.NoError(t, c.LoadConfig("./testdata/parsers_old.toml")) + require.Len(t, c.Inputs, len(formats)) + + cfg := parsers.Config{ + CSVHeaderRowCount: 42, + DropwizardTagPathsMap: make(map[string]string), + GrokPatterns: []string{"%{COMBINED_LOG_FORMAT}"}, + JSONStrict: true, + MetricName: "parser_test_old", + } + + override := map[string]struct { + param map[string]interface{} + mask []string + }{ + "csv": { + param: map[string]interface{}{ + "HeaderRowCount": cfg.CSVHeaderRowCount, + }, + mask: []string{"TimeFunc"}, + }, + "xpath_protobuf": { + param: map[string]interface{}{ + "ProtobufMessageDef": "testdata/addressbook.proto", + "ProtobufMessageType": "addressbook.AddressBook", + }, + }, + } + + expected := make([]telegraf.Parser, 0, len(formats)) + for _, format := range formats { + formatCfg := &cfg + formatCfg.DataFormat = format + + logger := models.NewLogger("parsers", format, cfg.MetricName) + + // Try with the new format + if creator, found := parsers.Parsers[format]; found { + t.Logf("using new format parser for %q...", format) + parserNew := creator(formatCfg.MetricName) + if settings, found := override[format]; found { + s := reflect.Indirect(reflect.ValueOf(parserNew)) + for key, value := range settings.param { + v := reflect.ValueOf(value) + s.FieldByName(key).Set(v) + } + } + models.SetLoggerOnPlugin(parserNew, logger) + if p, ok := parserNew.(telegraf.Initializer); ok { + require.NoError(t, p.Init()) + } + expected = append(expected, parserNew) + continue + } + + // Try with the old format + parserOld, err := parsers.NewParser(formatCfg) + if err == nil { + t.Logf("using old format parser for %q...", format) + models.SetLoggerOnPlugin(parserOld, logger) + if p, ok := parserOld.(telegraf.Initializer); ok { + require.NoError(t, p.Init()) + } + expected = append(expected, parserOld) + continue + } + require.Containsf(t, err.Error(), "invalid data format:", "setup %q failed: %v", format, err) + require.Failf(t, "%q neither found in old nor new format", format) + } + require.Len(t, expected, len(formats)) + + actual := make([]interface{}, 0) + generated := make([]interface{}, 0) + for _, plugin := range c.Inputs { + input, ok := plugin.Input.(*MockupInputPluginParserOld) + require.True(t, ok) + // Get the parser set with 'SetParser()' + if p, ok := input.Parser.(*models.RunningParser); ok { + require.NoError(t, p.Init()) + actual = append(actual, p.Parser) + } else { + actual = append(actual, input.Parser) + } + // Get the parser set with 'SetParserFunc()' + g, err := input.ParserFunc() + require.NoError(t, err) + if rp, ok := g.(*models.RunningParser); ok { + generated = append(generated, rp.Parser) + } else { + generated = append(generated, g) + } + } + require.Len(t, actual, len(formats)) + + for i, format := range formats { + // Determine the underlying type of the parser + stype := reflect.Indirect(reflect.ValueOf(expected[i])).Interface() + // Ignore all unexported fields and fields not relevant for functionality + options := []cmp.Option{ + cmpopts.IgnoreUnexported(stype), + cmpopts.IgnoreTypes(sync.Mutex{}), + cmpopts.IgnoreInterfaces(struct{ telegraf.Logger }{}), + } + if settings, found := override[format]; found { + options = append(options, cmpopts.IgnoreFields(stype, settings.mask...)) + } + + // Do a manual comparision as require.EqualValues will also work on unexported fields + // that cannot be cleared or ignored. + diff := cmp.Diff(expected[i], actual[i], options...) + require.Emptyf(t, diff, "Difference in SetParser() for %q", format) + diff = cmp.Diff(expected[i], generated[i], options...) + require.Emptyf(t, diff, "Difference in SetParserFunc() for %q", format) + } +} + +/*** Mockup INPUT plugin for (old) parser testing to avoid cyclic dependencies ***/ +type MockupInputPluginParserOld struct { + Parser parsers.Parser + ParserFunc parsers.ParserFunc +} + +func (m *MockupInputPluginParserOld) SampleConfig() string { return "Mockup old parser test plugin" } +func (m *MockupInputPluginParserOld) Gather(acc telegraf.Accumulator) error { return nil } +func (m *MockupInputPluginParserOld) SetParser(parser parsers.Parser) { m.Parser = parser } +func (m *MockupInputPluginParserOld) SetParserFunc(f parsers.ParserFunc) { m.ParserFunc = f } + +/*** Mockup INPUT plugin for (new) parser testing to avoid cyclic dependencies ***/ +type MockupInputPluginParserNew struct { + Parser telegraf.Parser + ParserFunc telegraf.ParserFunc +} + +func (m *MockupInputPluginParserNew) SampleConfig() string { return "Mockup old parser test plugin" } +func (m *MockupInputPluginParserNew) Gather(acc telegraf.Accumulator) error { return nil } +func (m *MockupInputPluginParserNew) SetParser(parser telegraf.Parser) { m.Parser = parser } +func (m *MockupInputPluginParserNew) SetParserFunc(f telegraf.ParserFunc) { m.ParserFunc = f } + +/*** Mockup INPUT plugin for testing to avoid cyclic dependencies ***/ +type MockupInputPlugin struct { + Servers []string `toml:"servers"` + Methods []string `toml:"methods"` + Timeout Duration `toml:"timeout"` + ReadTimeout Duration `toml:"read_timeout"` + WriteTimeout Duration `toml:"write_timeout"` + MaxBodySize Size `toml:"max_body_size"` + Paths []string `toml:"paths"` + Port int `toml:"port"` + Command string + PidFile string + Log telegraf.Logger `toml:"-"` + tls.ServerConfig + + parser telegraf.Parser +} + +func (m *MockupInputPlugin) SampleConfig() string { return "Mockup test input plugin" } +func (m *MockupInputPlugin) Gather(acc telegraf.Accumulator) error { return nil } +func (m *MockupInputPlugin) SetParser(parser telegraf.Parser) { m.parser = parser } + +/*** Mockup OUTPUT plugin for testing to avoid cyclic dependencies ***/ +type MockupOuputPlugin struct { + URL string `toml:"url"` + Headers map[string]string `toml:"headers"` + Scopes []string `toml:"scopes"` + NamespacePrefix string `toml:"namespace_prefix"` + Log telegraf.Logger `toml:"-"` + tls.ClientConfig +} + +func (m *MockupOuputPlugin) Connect() error { return nil } +func (m *MockupOuputPlugin) Close() error { return nil } +func (m *MockupOuputPlugin) SampleConfig() string { return "Mockup test output plugin" } +func (m *MockupOuputPlugin) Write(metrics []telegraf.Metric) error { return nil } + +// Register the mockup plugin on loading +func init() { + // Register the mockup input plugin for the required names + inputs.Add("parser_test_new", func() telegraf.Input { return &MockupInputPluginParserNew{} }) + inputs.Add("parser_test_old", func() telegraf.Input { return &MockupInputPluginParserOld{} }) + inputs.Add("exec", func() telegraf.Input { return &MockupInputPlugin{Timeout: Duration(time.Second * 5)} }) + inputs.Add("http_listener_v2", func() telegraf.Input { return &MockupInputPlugin{} }) + inputs.Add("memcached", func() telegraf.Input { return &MockupInputPlugin{} }) + inputs.Add("procstat", func() telegraf.Input { return &MockupInputPlugin{} }) + + // Register the mockup output plugin for the required names + outputs.Add("azure_monitor", func() telegraf.Output { return &MockupOuputPlugin{NamespacePrefix: "Telegraf/"} }) + outputs.Add("http", func() telegraf.Output { return &MockupOuputPlugin{} }) } diff --git a/config/deprecation.go b/config/deprecation.go new file mode 100644 index 0000000000000..f8c13904b5a3a --- /dev/null +++ b/config/deprecation.go @@ -0,0 +1,330 @@ +package config + +import ( + "fmt" + "log" //nolint:revive // log is ok here as the logging facility is not set-up yet + "reflect" + "sort" + "strings" + + "github.com/coreos/go-semver/semver" + "github.com/fatih/color" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/models" + "github.com/influxdata/telegraf/plugins/aggregators" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/processors" +) + +// deprecationInfo contains all important information to describe a deprecated entity +type deprecationInfo struct { + // Name of the plugin or plugin option + Name string + // LogLevel is the level of deprecation which currently corresponds to a log-level + LogLevel telegraf.Escalation + info telegraf.DeprecationInfo +} + +func (di *deprecationInfo) determineEscalation(telegrafVersion *semver.Version) error { + di.LogLevel = telegraf.None + if di.info.Since == "" { + return nil + } + + since, err := semver.NewVersion(di.info.Since) + if err != nil { + return fmt.Errorf("cannot parse 'since' version %q: %v", di.info.Since, err) + } + + var removal *semver.Version + if di.info.RemovalIn != "" { + removal, err = semver.NewVersion(di.info.RemovalIn) + if err != nil { + return fmt.Errorf("cannot parse 'removal' version %q: %v", di.info.RemovalIn, err) + } + } else { + removal = &semver.Version{Major: since.Major} + removal.BumpMajor() + di.info.RemovalIn = removal.String() + } + + // Drop potential pre-release tags + version := semver.Version{ + Major: telegrafVersion.Major, + Minor: telegrafVersion.Minor, + Patch: telegrafVersion.Patch, + } + if !version.LessThan(*removal) { + di.LogLevel = telegraf.Error + } else if !version.LessThan(*since) { + di.LogLevel = telegraf.Warn + } + return nil +} + +// pluginDeprecationInfo holds all information about a deprecated plugin or it's options +type pluginDeprecationInfo struct { + deprecationInfo + + // Options deprecated for this plugin + Options []deprecationInfo +} + +func (c *Config) incrementPluginDeprecations(category string) { + newcounts := []int64{1, 0} + if counts, found := c.Deprecations[category]; found { + newcounts = []int64{counts[0] + 1, counts[1]} + } + c.Deprecations[category] = newcounts +} + +func (c *Config) incrementPluginOptionDeprecations(category string) { + newcounts := []int64{0, 1} + if counts, found := c.Deprecations[category]; found { + newcounts = []int64{counts[0], counts[1] + 1} + } + c.Deprecations[category] = newcounts +} + +func (c *Config) collectDeprecationInfo(category, name string, plugin interface{}, all bool) pluginDeprecationInfo { + info := pluginDeprecationInfo{ + deprecationInfo: deprecationInfo{ + Name: category + "." + name, + LogLevel: telegraf.None, + }, + } + + // First check if the whole plugin is deprecated + switch category { + case "aggregators": + if pi, deprecated := aggregators.Deprecations[name]; deprecated { + info.deprecationInfo.info = pi + } + case "inputs": + if pi, deprecated := inputs.Deprecations[name]; deprecated { + info.deprecationInfo.info = pi + } + case "outputs": + if pi, deprecated := outputs.Deprecations[name]; deprecated { + info.deprecationInfo.info = pi + } + case "processors": + if pi, deprecated := processors.Deprecations[name]; deprecated { + info.deprecationInfo.info = pi + } + } + if err := info.determineEscalation(c.version); err != nil { + panic(fmt.Errorf("plugin %q: %v", info.Name, err)) + } + if info.LogLevel != telegraf.None { + c.incrementPluginDeprecations(category) + } + + // Allow checking for names only. + if plugin == nil { + return info + } + + // Check for deprecated options + walkPluginStruct(reflect.ValueOf(plugin), func(field reflect.StructField, value reflect.Value) { + // Try to report only those fields that are set + if !all && value.IsZero() { + return + } + + tags := strings.SplitN(field.Tag.Get("deprecated"), ";", 3) + if len(tags) < 1 || tags[0] == "" { + return + } + optionInfo := deprecationInfo{Name: field.Name} + optionInfo.info.Since = tags[0] + + if len(tags) > 1 { + optionInfo.info.Notice = tags[len(tags)-1] + } + if len(tags) > 2 { + optionInfo.info.RemovalIn = tags[1] + } + if err := optionInfo.determineEscalation(c.version); err != nil { + panic(fmt.Errorf("plugin %q option %q: %v", info.Name, field.Name, err)) + } + + if optionInfo.LogLevel != telegraf.None { + c.incrementPluginOptionDeprecations(category) + } + + // Get the toml field name + option := field.Tag.Get("toml") + if option != "" { + optionInfo.Name = option + } + info.Options = append(info.Options, optionInfo) + }) + + return info +} + +func (c *Config) printUserDeprecation(category, name string, plugin interface{}) error { + info := c.collectDeprecationInfo(category, name, plugin, false) + models.PrintPluginDeprecationNotice(info.LogLevel, info.Name, info.info) + + if info.LogLevel == telegraf.Error { + return fmt.Errorf("plugin deprecated") + } + + // Print deprecated options + deprecatedOptions := make([]string, 0) + for _, option := range info.Options { + models.PrintOptionDeprecationNotice(option.LogLevel, info.Name, option.Name, option.info) + if option.LogLevel == telegraf.Error { + deprecatedOptions = append(deprecatedOptions, option.Name) + } + } + + if len(deprecatedOptions) > 0 { + return fmt.Errorf("plugin options %q deprecated", strings.Join(deprecatedOptions, ",")) + } + + return nil +} + +func (c *Config) CollectDeprecationInfos(inFilter, outFilter, aggFilter, procFilter []string) map[string][]pluginDeprecationInfo { + infos := make(map[string][]pluginDeprecationInfo) + + infos["inputs"] = make([]pluginDeprecationInfo, 0) + for name, creator := range inputs.Inputs { + if len(inFilter) > 0 && !sliceContains(name, inFilter) { + continue + } + + plugin := creator() + info := c.collectDeprecationInfo("inputs", name, plugin, true) + + if info.LogLevel != telegraf.None || len(info.Options) > 0 { + infos["inputs"] = append(infos["inputs"], info) + } + } + + infos["outputs"] = make([]pluginDeprecationInfo, 0) + for name, creator := range outputs.Outputs { + if len(outFilter) > 0 && !sliceContains(name, outFilter) { + continue + } + + plugin := creator() + info := c.collectDeprecationInfo("outputs", name, plugin, true) + + if info.LogLevel != telegraf.None || len(info.Options) > 0 { + infos["outputs"] = append(infos["outputs"], info) + } + } + + infos["processors"] = make([]pluginDeprecationInfo, 0) + for name, creator := range processors.Processors { + if len(procFilter) > 0 && !sliceContains(name, procFilter) { + continue + } + + plugin := creator() + info := c.collectDeprecationInfo("processors", name, plugin, true) + + if info.LogLevel != telegraf.None || len(info.Options) > 0 { + infos["processors"] = append(infos["processors"], info) + } + } + + infos["aggregators"] = make([]pluginDeprecationInfo, 0) + for name, creator := range aggregators.Aggregators { + if len(aggFilter) > 0 && !sliceContains(name, aggFilter) { + continue + } + + plugin := creator() + info := c.collectDeprecationInfo("aggregators", name, plugin, true) + + if info.LogLevel != telegraf.None || len(info.Options) > 0 { + infos["aggregators"] = append(infos["aggregators"], info) + } + } + + return infos +} + +func (c *Config) PrintDeprecationList(plugins []pluginDeprecationInfo) { + sort.Slice(plugins, func(i, j int) bool { return plugins[i].Name < plugins[j].Name }) + + for _, plugin := range plugins { + switch plugin.LogLevel { + case telegraf.Warn, telegraf.Error: + _, _ = fmt.Printf( + " %-40s %-5s since %-5s removal in %-5s %s\n", + plugin.Name, plugin.LogLevel, plugin.info.Since, plugin.info.RemovalIn, plugin.info.Notice, + ) + } + + if len(plugin.Options) < 1 { + continue + } + sort.Slice(plugin.Options, func(i, j int) bool { return plugin.Options[i].Name < plugin.Options[j].Name }) + for _, option := range plugin.Options { + _, _ = fmt.Printf( + " %-40s %-5s since %-5s removal in %-5s %s\n", + plugin.Name+"/"+option.Name, option.LogLevel, option.info.Since, option.info.RemovalIn, option.info.Notice, + ) + } + } +} + +func printHistoricPluginDeprecationNotice(category, name string, info telegraf.DeprecationInfo) { + prefix := "E! " + color.RedString("DeprecationError") + log.Printf( + "%s: Plugin %q deprecated since version %s and removed: %s", + prefix, category+"."+name, info.Since, info.Notice, + ) +} + +// walkPluginStruct iterates over the fields of a structure in depth-first search (to cover nested structures) +// and calls the given function for every visited field. +func walkPluginStruct(value reflect.Value, fn func(f reflect.StructField, fv reflect.Value)) { + v := reflect.Indirect(value) + t := v.Type() + + // Only works on structs + if t.Kind() != reflect.Struct { + return + } + + // Walk over the struct fields and call the given function. If we encounter more complex embedded + // elements (stucts, slices/arrays, maps) we need to descend into those elements as they might + // contain structures nested in the current structure. + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + fieldValue := v.Field(i) + + if field.PkgPath != "" { + continue + } + switch field.Type.Kind() { + case reflect.Struct: + walkPluginStruct(fieldValue, fn) + case reflect.Array, reflect.Slice: + for j := 0; j < fieldValue.Len(); j++ { + element := fieldValue.Index(j) + // The array might contain structs + walkPluginStruct(element, fn) + fn(field, element) + } + case reflect.Map: + iter := fieldValue.MapRange() + for iter.Next() { + element := iter.Value() + // The map might contain structs + walkPluginStruct(element, fn) + fn(field, element) + } + } + fn(field, fieldValue) + } +} diff --git a/config/printer/agent.conf b/config/printer/agent.conf new file mode 100644 index 0000000000000..99bbb2d57597e --- /dev/null +++ b/config/printer/agent.conf @@ -0,0 +1,90 @@ +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "10s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 1000 + + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. + metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Collection offset is used to shift the collection by the given amount. + ## This can be be used to avoid many plugins querying constraint devices + ## at the same time by manually scheduling them in time. + # collection_offset = "0s" + + ## Default flushing interval for all outputs. Maximum flush_interval will be + ## flush_interval + flush_jitter + flush_interval = "10s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## Collected metrics are rounded to the precision specified. Precision is + ## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s). + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + ## + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s: + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + precision = "0s" + + ## Log at debug level. + # debug = false + ## Log only error level messages. + # quiet = false + + ## Log target controls the destination for logs and can be one of "file", + ## "stderr" or, on Windows, "eventlog". When set to "file", the output file + ## is determined by the "logfile" setting. + # logtarget = "file" + + ## Name of the file to be logged to when using the "file" logtarget. If set to + ## the empty string then logs are written to stderr. + # logfile = "" + + ## The logfile will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. Logs are rotated only when + ## written to, if there is no log activity rotation may be delayed. + # logfile_rotation_interval = "0h" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # logfile_rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # logfile_rotation_max_archives = 5 + + ## Pick a timezone to use when logging or type 'local' for local time. + ## Example: America/Chicago + # log_with_timezone = "" + + ## Override default hostname, if empty use os.Hostname() + hostname = "" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = false + + ## Method of translating SNMP objects. Can be "netsnmp" which + ## translates by calling external programs snmptranslate and snmptable, + ## or "gosmi" which translates using the built-in gosmi library. + # snmp_translator = "netsnmp" diff --git a/config/printer/printer.go b/config/printer/printer.go new file mode 100644 index 0000000000000..f1521d1351e3a --- /dev/null +++ b/config/printer/printer.go @@ -0,0 +1,381 @@ +package printer + +import ( + _ "embed" + "fmt" + "sort" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/aggregators" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/processors" +) + +var ( + // Default sections + sectionDefaults = []string{"global_tags", "agent", "outputs", + "processors", "aggregators", "inputs"} + + // Default input plugins + inputDefaults = []string{"cpu", "mem", "swap", "system", "kernel", + "processes", "disk", "diskio"} + + // Default output plugins + outputDefaults = []string{"influxdb"} +) + +var header = `# Telegraf Configuration +# +# Telegraf is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. +# +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. +# +# Use 'telegraf -config telegraf.conf -test' to see what metrics a config +# file would generate. +# +# Environment variables can be used anywhere in this config file, simply surround +# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), +# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) + +` +var globalTagsConfig = ` +# Global tags can be specified here in key="value" format. +[global_tags] + # dc = "us-east-1" # will tag all metrics with dc=us-east-1 + # rack = "1a" + ## Environment variables can be used as tags, and throughout the config file + # user = "$USER" + +` + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the agentConfig data. +//go:embed agent.conf +var agentConfig string + +var outputHeader = ` +############################################################################### +# OUTPUT PLUGINS # +############################################################################### + +` + +var processorHeader = ` +############################################################################### +# PROCESSOR PLUGINS # +############################################################################### + +` + +var aggregatorHeader = ` +############################################################################### +# AGGREGATOR PLUGINS # +############################################################################### + +` + +var inputHeader = ` +############################################################################### +# INPUT PLUGINS # +############################################################################### + +` + +var serviceInputHeader = ` +############################################################################### +# SERVICE INPUT PLUGINS # +############################################################################### + +` + +func sliceContains(name string, list []string) bool { + for _, b := range list { + if b == name { + return true + } + } + return false +} + +// PrintSampleConfig prints the sample config +func PrintSampleConfig( + sectionFilters []string, + inputFilters []string, + outputFilters []string, + aggregatorFilters []string, + processorFilters []string, +) { + // print headers + fmt.Print(header) + + if len(sectionFilters) == 0 { + sectionFilters = sectionDefaults + } + printFilteredGlobalSections(sectionFilters) + + // print output plugins + if sliceContains("outputs", sectionFilters) { + if len(outputFilters) != 0 { + if len(outputFilters) >= 3 && outputFilters[1] != "none" { + fmt.Print(outputHeader) + } + printFilteredOutputs(outputFilters, false) + } else { + fmt.Print(outputHeader) + printFilteredOutputs(outputDefaults, false) + // Print non-default outputs, commented + var pnames []string + for pname := range outputs.Outputs { + if !sliceContains(pname, outputDefaults) { + pnames = append(pnames, pname) + } + } + sort.Strings(pnames) + printFilteredOutputs(pnames, true) + } + } + + // print processor plugins + if sliceContains("processors", sectionFilters) { + if len(processorFilters) != 0 { + if len(processorFilters) >= 3 && processorFilters[1] != "none" { + fmt.Print(processorHeader) + } + printFilteredProcessors(processorFilters, false) + } else { + fmt.Print(processorHeader) + pnames := []string{} + for pname := range processors.Processors { + pnames = append(pnames, pname) + } + sort.Strings(pnames) + printFilteredProcessors(pnames, true) + } + } + + // print aggregator plugins + if sliceContains("aggregators", sectionFilters) { + if len(aggregatorFilters) != 0 { + if len(aggregatorFilters) >= 3 && aggregatorFilters[1] != "none" { + fmt.Print(aggregatorHeader) + } + printFilteredAggregators(aggregatorFilters, false) + } else { + fmt.Print(aggregatorHeader) + pnames := []string{} + for pname := range aggregators.Aggregators { + pnames = append(pnames, pname) + } + sort.Strings(pnames) + printFilteredAggregators(pnames, true) + } + } + + // print input plugins + if sliceContains("inputs", sectionFilters) { + if len(inputFilters) != 0 { + if len(inputFilters) >= 3 && inputFilters[1] != "none" { + fmt.Print(inputHeader) + } + printFilteredInputs(inputFilters, false) + } else { + fmt.Print(inputHeader) + printFilteredInputs(inputDefaults, false) + // Print non-default inputs, commented + var pnames []string + for pname := range inputs.Inputs { + if !sliceContains(pname, inputDefaults) { + pnames = append(pnames, pname) + } + } + sort.Strings(pnames) + printFilteredInputs(pnames, true) + } + } +} + +// PluginNameCounts returns a list of sorted plugin names and their count +func PluginNameCounts(plugins []string) []string { + names := make(map[string]int) + for _, plugin := range plugins { + names[plugin]++ + } + + var namecount []string + for name, count := range names { + if count == 1 { + namecount = append(namecount, name) + } else { + namecount = append(namecount, fmt.Sprintf("%s (%dx)", name, count)) + } + } + + sort.Strings(namecount) + return namecount +} + +func printFilteredProcessors(processorFilters []string, commented bool) { + // Filter processors + var pnames []string + for pname := range processors.Processors { + if sliceContains(pname, processorFilters) { + pnames = append(pnames, pname) + } + } + sort.Strings(pnames) + + // Print Outputs + for _, pname := range pnames { + creator := processors.Processors[pname] + output := creator() + printConfig(pname, output, "processors", commented, processors.Deprecations[pname]) + } +} + +func printFilteredAggregators(aggregatorFilters []string, commented bool) { + // Filter outputs + var anames []string + for aname := range aggregators.Aggregators { + if sliceContains(aname, aggregatorFilters) { + anames = append(anames, aname) + } + } + sort.Strings(anames) + + // Print Outputs + for _, aname := range anames { + creator := aggregators.Aggregators[aname] + output := creator() + printConfig(aname, output, "aggregators", commented, aggregators.Deprecations[aname]) + } +} + +func printFilteredInputs(inputFilters []string, commented bool) { + // Filter inputs + var pnames []string + for pname := range inputs.Inputs { + if sliceContains(pname, inputFilters) { + pnames = append(pnames, pname) + } + } + sort.Strings(pnames) + + // cache service inputs to print them at the end + servInputs := make(map[string]telegraf.ServiceInput) + // for alphabetical looping: + servInputNames := []string{} + + // Print Inputs + for _, pname := range pnames { + // Skip inputs that are registered twice for backward compatibility + switch pname { + case "cisco_telemetry_gnmi", "io", "KNXListener": + continue + } + creator := inputs.Inputs[pname] + input := creator() + + if p, ok := input.(telegraf.ServiceInput); ok { + servInputs[pname] = p + servInputNames = append(servInputNames, pname) + continue + } + + printConfig(pname, input, "inputs", commented, inputs.Deprecations[pname]) + } + + // Print Service Inputs + if len(servInputs) == 0 { + return + } + sort.Strings(servInputNames) + + fmt.Print(serviceInputHeader) + for _, name := range servInputNames { + printConfig(name, servInputs[name], "inputs", commented, inputs.Deprecations[name]) + } +} + +func printFilteredOutputs(outputFilters []string, commented bool) { + // Filter outputs + var onames []string + for oname := range outputs.Outputs { + if sliceContains(oname, outputFilters) { + onames = append(onames, oname) + } + } + sort.Strings(onames) + + // Print Outputs + for _, oname := range onames { + creator := outputs.Outputs[oname] + output := creator() + printConfig(oname, output, "outputs", commented, outputs.Deprecations[oname]) + } +} + +func printFilteredGlobalSections(sectionFilters []string) { + if sliceContains("global_tags", sectionFilters) { + fmt.Print(globalTagsConfig) + } + + if sliceContains("agent", sectionFilters) { + fmt.Print(agentConfig) + } +} + +func printConfig(name string, p telegraf.PluginDescriber, op string, commented bool, di telegraf.DeprecationInfo) { + comment := "" + if commented { + comment = "# " + } + + if di.Since != "" { + removalNote := "" + if di.RemovalIn != "" { + removalNote = " and will be removed in " + di.RemovalIn + } + fmt.Printf("\n%s ## DEPRECATED: The '%s' plugin is deprecated in version %s%s, %s.", comment, name, di.Since, removalNote, di.Notice) + } + + config := p.SampleConfig() + if config == "" { + fmt.Printf("\n#[[%s.%s]]", op, name) + fmt.Printf("\n%s # no configuration\n\n", comment) + } else { + lines := strings.Split(config, "\n") + fmt.Print("\n") + for i, line := range lines { + if i == len(lines)-1 { + fmt.Print("\n") + continue + } + fmt.Print(strings.TrimRight(comment+line, " ") + "\n") + } + } +} + +// PrintInputConfig prints the config usage of a single input. +func PrintInputConfig(name string) error { + creator, ok := inputs.Inputs[name] + if !ok { + return fmt.Errorf("input %s not found", name) + } + + printConfig(name, creator(), "inputs", false, inputs.Deprecations[name]) + return nil +} + +// PrintOutputConfig prints the config usage of a single output. +func PrintOutputConfig(name string) error { + creator, ok := outputs.Outputs[name] + if !ok { + return fmt.Errorf("output %s not found", name) + } + + printConfig(name, creator(), "outputs", false, outputs.Deprecations[name]) + return nil +} diff --git a/config/testdata/addressbook.proto b/config/testdata/addressbook.proto new file mode 100644 index 0000000000000..3ed0eb566a987 --- /dev/null +++ b/config/testdata/addressbook.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package addressbook; + +message Person { + string name = 1; + int32 id = 2; // Unique ID number for this person. + string email = 3; + uint32 age = 4; + + enum PhoneType { + MOBILE = 0; + HOME = 1; + WORK = 2; + } + + message PhoneNumber { + string number = 1; + PhoneType type = 2; + } + + repeated PhoneNumber phones = 5; +} + +message AddressBook { + repeated Person people = 1; + repeated string tags = 2; +} diff --git a/config/testdata/azure_monitor.toml b/config/testdata/azure_monitor.toml new file mode 100644 index 0000000000000..6151bea9020c5 --- /dev/null +++ b/config/testdata/azure_monitor.toml @@ -0,0 +1,4 @@ +[[outputs.azure_monitor]] + +[[outputs.azure_monitor]] + namespace_prefix = "" diff --git a/config/testdata/parsers_new.toml b/config/testdata/parsers_new.toml new file mode 100644 index 0000000000000..515d6924339c5 --- /dev/null +++ b/config/testdata/parsers_new.toml @@ -0,0 +1,60 @@ +[[inputs.parser_test_new]] + data_format = "collectd" + +[[inputs.parser_test_new]] + data_format = "csv" + csv_header_row_count = 42 + +[[inputs.parser_test_new]] + data_format = "dropwizard" + +[[inputs.parser_test_new]] + data_format = "form_urlencoded" + +[[inputs.parser_test_new]] + data_format = "graphite" + +[[inputs.parser_test_new]] + data_format = "grok" + grok_patterns = ["%{COMBINED_LOG_FORMAT}"] + +[[inputs.parser_test_new]] + data_format = "influx" + +[[inputs.parser_test_new]] + data_format = "json" + +[[inputs.parser_test_new]] + data_format = "json_v2" + +[[inputs.parser_test_new]] + data_format = "logfmt" + +[[inputs.parser_test_new]] + data_format = "nagios" + +[[inputs.parser_test_new]] + data_format = "prometheus" + +[[inputs.parser_test_new]] + data_format = "prometheusremotewrite" + +[[inputs.parser_test_new]] + data_format = "value" + +[[inputs.parser_test_new]] + data_format = "wavefront" + +[[inputs.parser_test_new]] + data_format = "xml" + +[[inputs.parser_test_new]] + data_format = "xpath_json" + +[[inputs.parser_test_new]] + data_format = "xpath_msgpack" + +[[inputs.parser_test_new]] + data_format = "xpath_protobuf" + xpath_protobuf_file = "testdata/addressbook.proto" + xpath_protobuf_type = "addressbook.AddressBook" diff --git a/config/testdata/parsers_old.toml b/config/testdata/parsers_old.toml new file mode 100644 index 0000000000000..6a0b946a7ee51 --- /dev/null +++ b/config/testdata/parsers_old.toml @@ -0,0 +1,60 @@ +[[inputs.parser_test_old]] + data_format = "collectd" + +[[inputs.parser_test_old]] + data_format = "csv" + csv_header_row_count = 42 + +[[inputs.parser_test_old]] + data_format = "dropwizard" + +[[inputs.parser_test_old]] + data_format = "form_urlencoded" + +[[inputs.parser_test_old]] + data_format = "graphite" + +[[inputs.parser_test_old]] + data_format = "grok" + grok_patterns = ["%{COMBINED_LOG_FORMAT}"] + +[[inputs.parser_test_old]] + data_format = "influx" + +[[inputs.parser_test_old]] + data_format = "json" + +[[inputs.parser_test_old]] + data_format = "json_v2" + +[[inputs.parser_test_old]] + data_format = "logfmt" + +[[inputs.parser_test_old]] + data_format = "nagios" + +[[inputs.parser_test_old]] + data_format = "prometheus" + +[[inputs.parser_test_old]] + data_format = "prometheusremotewrite" + +[[inputs.parser_test_old]] + data_format = "value" + +[[inputs.parser_test_old]] + data_format = "wavefront" + +[[inputs.parser_test_old]] + data_format = "xml" + +[[inputs.parser_test_old]] + data_format = "xpath_json" + +[[inputs.parser_test_old]] + data_format = "xpath_msgpack" + +[[inputs.parser_test_old]] + data_format = "xpath_protobuf" + xpath_protobuf_file = "testdata/addressbook.proto" + xpath_protobuf_type = "addressbook.AddressBook" diff --git a/config/testdata/single_plugin_env_vars.toml b/config/testdata/single_plugin_env_vars.toml index b1f71ea8adb78..de7c47cf72c43 100644 --- a/config/testdata/single_plugin_env_vars.toml +++ b/config/testdata/single_plugin_env_vars.toml @@ -1,3 +1,18 @@ +# Telegraf Configuration +# +# Telegraf is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. +# +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. +# +# Use 'telegraf -config telegraf.conf -test' to see what metrics a config +# file would generate. +# +# Environment variables can be used anywhere in this config file, simply surround +# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), +# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) + [[inputs.memcached]] servers = ["$MY_TEST_SERVER"] namepass = ["metricname1", "ip_${MY_TEST_SERVER}_name"] diff --git a/config/testdata/special_types.key b/config/testdata/special_types.key new file mode 100644 index 0000000000000..25db3c98dd19a --- /dev/null +++ b/config/testdata/special_types.key @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIFYI4Hm+jRW3OC3zvoWDaCig6E7X0Ql9l8elHPU3e5+toAoGCCqGSM49 +AwEHoUQDQgAEGOw1XQ84Ai3GTZJ5o5u1yTFgA3VLZTTT0oHol06LRj5Md3oRy0MQ +QO5OhsAGGz16SYcPHf77aZmf2Of6ixYaLQ== +-----END EC PRIVATE KEY----- diff --git a/config/testdata/special_types.pem b/config/testdata/special_types.pem new file mode 100644 index 0000000000000..8097a52fc6cf4 --- /dev/null +++ b/config/testdata/special_types.pem @@ -0,0 +1,11 @@ +-----BEGIN CERTIFICATE----- +MIIBjTCCATOgAwIBAgIRALJ1hlgDYCh5dWfr6tdrBEYwCgYIKoZIzj0EAwIwFDES +MBAGA1UEAxMJbG9jYWxob3N0MB4XDTIyMDExMjA3NTgyMloXDTIyMDExMzA3NTgy +MlowFDESMBAGA1UEAxMJbG9jYWxob3N0MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcD +QgAEGOw1XQ84Ai3GTZJ5o5u1yTFgA3VLZTTT0oHol06LRj5Md3oRy0MQQO5OhsAG +Gz16SYcPHf77aZmf2Of6ixYaLaNmMGQwDgYDVR0PAQH/BAQDAgeAMB0GA1UdJQQW +MBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUuKpGXAb1DaVSffJ/xuF6 +FE31CC8wFAYDVR0RBA0wC4IJbG9jYWxob3N0MAoGCCqGSM49BAMCA0gAMEUCIHCb +m2phe189gftRke2Mo45lDsEAGaXsjA4lO/IOMo5lAiEA5k2X0bQfFhSfAcZPFtDI +iUwvC9SD3+CnzkP35O0jo+c= +-----END CERTIFICATE----- diff --git a/config/testdata/special_types.toml b/config/testdata/special_types.toml index 24b73ae45f1d3..b38773f28e963 100644 --- a/config/testdata/special_types.toml +++ b/config/testdata/special_types.toml @@ -1,9 +1,8 @@ [[inputs.http_listener_v2]] write_timeout = "1s" max_body_size = "1MiB" - tls_cert = """ -/path/to/my/cert -""" - tls_key = ''' -/path/to/my/key -''' + paths = [ """ +/path/ +""" ] + tls_cert = """./testdata/special_types.pem""" + tls_key = '''./testdata/special_types.key''' diff --git a/config/testdata/telegraf-agent.toml b/config/testdata/telegraf-agent.toml index f71b98206e5e8..6967d6e862277 100644 --- a/config/testdata/telegraf-agent.toml +++ b/config/testdata/telegraf-agent.toml @@ -176,14 +176,6 @@ # If no servers are specified, then 127.0.0.1 is used as the host and 4020 as the port. servers = ["127.0.0.1:4021"] -# Read metrics from local Lustre service on OST, MDS -[[inputs.lustre2]] - # An array of /proc globs to search for Lustre stats - # If not specified, the default will work on Lustre 2.5.x - # - # ost_procfiles = ["/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats"] - # mds_procfiles = ["/proc/fs/lustre/mdt/*/md_stats"] - # Read metrics about memory usage [[inputs.mem]] # no configuration diff --git a/config/testdata/wrong_cert_path.toml b/config/testdata/wrong_cert_path.toml new file mode 100644 index 0000000000000..99d359f1ce3d3 --- /dev/null +++ b/config/testdata/wrong_cert_path.toml @@ -0,0 +1,5 @@ +[[inputs.http_listener_v2]] + write_timeout = "1s" + max_body_size = "1MiB" + tls_cert = "invalid.pem" + tls_key = "invalid.key" diff --git a/config/types.go b/config/types.go index 7c1c50b9e3690..227b1e18d27a3 100644 --- a/config/types.go +++ b/config/types.go @@ -1,8 +1,8 @@ package config import ( - "bytes" "strconv" + "strings" "time" "github.com/alecthomas/units" @@ -16,40 +16,44 @@ type Size int64 // UnmarshalTOML parses the duration from the TOML config file func (d *Duration) UnmarshalTOML(b []byte) error { - var err error - b = bytes.Trim(b, `'`) - - // see if we can directly convert it - dur, err := time.ParseDuration(string(b)) - if err == nil { - *d = Duration(dur) - return nil - } - - // Parse string duration, ie, "1s" - if uq, err := strconv.Unquote(string(b)); err == nil && len(uq) > 0 { - dur, err := time.ParseDuration(uq) - if err == nil { - *d = Duration(dur) - return nil - } - } + // convert to string + durStr := string(b) + // Value is a TOML number (e.g. 3, 10, 3.5) // First try parsing as integer seconds - sI, err := strconv.ParseInt(string(b), 10, 64) + sI, err := strconv.ParseInt(durStr, 10, 64) if err == nil { dur := time.Second * time.Duration(sI) *d = Duration(dur) return nil } // Second try parsing as float seconds - sF, err := strconv.ParseFloat(string(b), 64) + sF, err := strconv.ParseFloat(durStr, 64) if err == nil { dur := time.Second * time.Duration(sF) *d = Duration(dur) return nil } + // Finally, try value is a TOML string (e.g. "3s", 3s) or literal (e.g. '3s') + durStr = strings.ReplaceAll(durStr, "'", "") + durStr = strings.ReplaceAll(durStr, "\"", "") + if durStr == "" { + durStr = "0s" + } + // special case: logging interval had a default of 0d, which silently + // failed, but in order to prevent issues with default configs that had + // uncommented the option, change it from zero days to zero hours. + if durStr == "0d" { + durStr = "0h" + } + + dur, err := time.ParseDuration(durStr) + if err != nil { + return err + } + + *d = Duration(dur) return nil } diff --git a/config/types_test.go b/config/types_test.go index 8e35de6111c82..7fe445d87ef91 100644 --- a/config/types_test.go +++ b/config/types_test.go @@ -29,3 +29,62 @@ func TestConfigDuration(t *testing.T) { require.Equal(t, p.MaxParallelLookups, 13) require.Equal(t, p.Ordered, true) } + +func TestDuration(t *testing.T) { + var d config.Duration + + require.NoError(t, d.UnmarshalTOML([]byte(`"1s"`))) + require.Equal(t, time.Second, time.Duration(d)) + + d = config.Duration(0) + require.NoError(t, d.UnmarshalTOML([]byte(`1s`))) + require.Equal(t, time.Second, time.Duration(d)) + + d = config.Duration(0) + require.NoError(t, d.UnmarshalTOML([]byte(`'1s'`))) + require.Equal(t, time.Second, time.Duration(d)) + + d = config.Duration(0) + require.NoError(t, d.UnmarshalTOML([]byte(`10`))) + require.Equal(t, 10*time.Second, time.Duration(d)) + + d = config.Duration(0) + require.NoError(t, d.UnmarshalTOML([]byte(`1.5`))) + require.Equal(t, time.Second, time.Duration(d)) + + d = config.Duration(0) + require.NoError(t, d.UnmarshalTOML([]byte(``))) + require.Equal(t, 0*time.Second, time.Duration(d)) + + d = config.Duration(0) + require.NoError(t, d.UnmarshalTOML([]byte(`""`))) + require.Equal(t, 0*time.Second, time.Duration(d)) + + require.Error(t, d.UnmarshalTOML([]byte(`"1"`))) // string missing unit + require.Error(t, d.UnmarshalTOML([]byte(`'2'`))) // string missing unit + require.Error(t, d.UnmarshalTOML([]byte(`'ns'`))) // string missing time + require.Error(t, d.UnmarshalTOML([]byte(`'us'`))) // string missing time +} + +func TestSize(t *testing.T) { + var s config.Size + + require.NoError(t, s.UnmarshalTOML([]byte(`"1B"`))) + require.Equal(t, int64(1), int64(s)) + + s = config.Size(0) + require.NoError(t, s.UnmarshalTOML([]byte(`1`))) + require.Equal(t, int64(1), int64(s)) + + s = config.Size(0) + require.NoError(t, s.UnmarshalTOML([]byte(`'1'`))) + require.Equal(t, int64(1), int64(s)) + + s = config.Size(0) + require.NoError(t, s.UnmarshalTOML([]byte(`"1GB"`))) + require.Equal(t, int64(1000*1000*1000), int64(s)) + + s = config.Size(0) + require.NoError(t, s.UnmarshalTOML([]byte(`"12GiB"`))) + require.Equal(t, int64(12*1024*1024*1024), int64(s)) +} diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index 4e94b8f012eab..0000000000000 --- a/docker-compose.yml +++ /dev/null @@ -1,105 +0,0 @@ -version: '3' - -services: - aerospike: - image: aerospike/aerospike-server:4.9.0.11 - ports: - - "3000:3000" - - "3001:3001" - - "3002:3002" - - "3003:3003" - zookeeper: - image: wurstmeister/zookeeper - environment: - - JAVA_OPTS="-Xms256m -Xmx256m" - ports: - - "2181:2181" - kafka: - image: wurstmeister/kafka - environment: - - KAFKA_ADVERTISED_HOST_NAME=localhost - - KAFKA_ADVERTISED_PORT=9092 - - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 - - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 - - JAVA_OPTS="-Xms256m -Xmx256m" - ports: - - "9092:9092" - depends_on: - - zookeeper - elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0 - environment: - - "ES_JAVA_OPTS=-Xms256m -Xmx256m" - - discovery.type=single-node - - xpack.security.enabled=false - ports: - - "9200:9200" - mysql: - image: mysql - environment: - - MYSQL_ALLOW_EMPTY_PASSWORD=yes - ports: - - "3306:3306" - memcached: - image: memcached - ports: - - "11211:11211" - pgbouncer: - image: mbentley/ubuntu-pgbouncer - environment: - - PG_ENV_POSTGRESQL_USER=pgbouncer - - PG_ENV_POSTGRESQL_PASS=pgbouncer - ports: - - "6432:6432" - postgres: - image: postgres:alpine - environment: - - POSTGRES_HOST_AUTH_METHOD=trust - ports: - - "5432:5432" - rabbitmq: - image: rabbitmq:3-management - ports: - - "15672:15672" - - "5672:5672" - redis: - image: redis:alpine - ports: - - "6379:6379" - nsq: - image: nsqio/nsq - ports: - - "4150:4150" - command: "/nsqd" - mqtt: - image: ncarlier/mqtt - ports: - - "1883:1883" - riemann: - image: stealthly/docker-riemann - ports: - - "5555:5555" - nats: - image: nats - ports: - - "4222:4222" - openldap: - image: cobaugh/openldap-alpine - environment: - - SLAPD_CONFIG_ROOTDN="cn=manager,cn=config" - - SLAPD_CONFIG_ROOTPW="secret" - ports: - - "389:389" - - "636:636" - crate: - image: crate/crate - ports: - - "4200:4200" - - "4230:4230" - - "6543:5432" - command: - - crate - - -Cnetwork.host=0.0.0.0 - - -Ctransport.host=localhost - environment: - - CRATE_HEAP_SIZE=128m diff --git a/docs/AGGREGATORS.md b/docs/AGGREGATORS.md index a5930a3e0df6d..bd75e4da28071 100644 --- a/docs/AGGREGATORS.md +++ b/docs/AGGREGATORS.md @@ -1,132 +1,124 @@ -### Aggregator Plugins +# Aggregator Plugins This section is for developers who want to create a new aggregator plugin. -### Aggregator Plugin Guidelines +## Aggregator Plugin Guidelines * A aggregator must conform to the [telegraf.Aggregator][] interface. * Aggregators should call `aggregators.Add` in their `init` function to register themselves. See below for a quick example. * To be available within Telegraf itself, plugins must add themselves to the `github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file. -- The `SampleConfig` function should return valid toml that describes how the - plugin can be configured. This is included in `telegraf config`. Please - consult the [SampleConfig][] page for the latest style guidelines. -* The `Description` function should say in one line what this aggregator does. +* Each plugin requires a file called `sample.conf` containing the sample configuration + for the plugin in TOML format. + Please consult the [Sample Config][] page for the latest style guidelines. +* Each plugin `README.md` file should include the `sample.conf` file in a section + describing the configuration by specifying a `toml` section in the form `toml @sample.conf`. The specified file(s) are then injected automatically into the Readme. * The Aggregator plugin will need to keep caches of metrics that have passed through it. This should be done using the builtin `HashID()` function of each metric. * When the `Reset()` function is called, all caches should be cleared. -- Follow the recommended [CodeStyle][]. +* Follow the recommended [Code Style][]. ### Aggregator Plugin Example ```go +//go:generate ../../../tools/readme_config_includer/generator package min // min.go import ( - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/aggregators" + _ "embed" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/aggregators" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Min struct { - // caches for metric fields, names, and tags - fieldCache map[uint64]map[string]float64 - nameCache map[uint64]string - tagCache map[uint64]map[string]string + // caches for metric fields, names, and tags + fieldCache map[uint64]map[string]float64 + nameCache map[uint64]string + tagCache map[uint64]map[string]string } func NewMin() telegraf.Aggregator { - m := &Min{} - m.Reset() - return m -} - -var sampleConfig = ` - ## period is the flush & clear interval of the aggregator. - period = "30s" - ## If true drop_original will drop the original metrics and - ## only send aggregates. - drop_original = false -` - -func (m *Min) Init() error { - return nil + m := &Min{} + m.Reset() + return m } -func (m *Min) SampleConfig() string { - return sampleConfig +func (*Min) SampleConfig() string { + return sampleConfig } -func (m *Min) Description() string { - return "Keep the aggregate min of each metric passing through." +func (m *Min) Init() error { + return nil } func (m *Min) Add(in telegraf.Metric) { - id := in.HashID() - if _, ok := m.nameCache[id]; !ok { - // hit an uncached metric, create caches for first time: - m.nameCache[id] = in.Name() - m.tagCache[id] = in.Tags() - m.fieldCache[id] = make(map[string]float64) - for k, v := range in.Fields() { - if fv, ok := convert(v); ok { - m.fieldCache[id][k] = fv - } - } - } else { - for k, v := range in.Fields() { - if fv, ok := convert(v); ok { - if _, ok := m.fieldCache[id][k]; !ok { - // hit an uncached field of a cached metric - m.fieldCache[id][k] = fv - continue - } - if fv < m.fieldCache[id][k] { + id := in.HashID() + if _, ok := m.nameCache[id]; !ok { + // hit an uncached metric, create caches for first time: + m.nameCache[id] = in.Name() + m.tagCache[id] = in.Tags() + m.fieldCache[id] = make(map[string]float64) + for k, v := range in.Fields() { + if fv, ok := convert(v); ok { + m.fieldCache[id][k] = fv + } + } + } else { + for k, v := range in.Fields() { + if fv, ok := convert(v); ok { + if _, ok := m.fieldCache[id][k]; !ok { + // hit an uncached field of a cached metric + m.fieldCache[id][k] = fv + continue + } + if fv < m.fieldCache[id][k] { // set new minimum - m.fieldCache[id][k] = fv - } - } - } - } + m.fieldCache[id][k] = fv + } + } + } + } } func (m *Min) Push(acc telegraf.Accumulator) { - for id, _ := range m.nameCache { - fields := map[string]interface{}{} - for k, v := range m.fieldCache[id] { - fields[k+"_min"] = v - } - acc.AddFields(m.nameCache[id], fields, m.tagCache[id]) - } + for id, _ := range m.nameCache { + fields := map[string]interface{}{} + for k, v := range m.fieldCache[id] { + fields[k+"_min"] = v + } + acc.AddFields(m.nameCache[id], fields, m.tagCache[id]) + } } func (m *Min) Reset() { - m.fieldCache = make(map[uint64]map[string]float64) - m.nameCache = make(map[uint64]string) - m.tagCache = make(map[uint64]map[string]string) + m.fieldCache = make(map[uint64]map[string]float64) + m.nameCache = make(map[uint64]string) + m.tagCache = make(map[uint64]map[string]string) } func convert(in interface{}) (float64, bool) { - switch v := in.(type) { - case float64: - return v, true - case int64: - return float64(v), true - default: - return 0, false - } + switch v := in.(type) { + case float64: + return v, true + case int64: + return float64(v), true + default: + return 0, false + } } func init() { - aggregators.Add("min", func() telegraf.Aggregator { - return NewMin() - }) + aggregators.Add("min", func() telegraf.Aggregator { + return NewMin() + }) } ``` - -[telegraf.Aggregator]: https://godoc.org/github.com/influxdata/telegraf#Aggregator -[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig -[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle diff --git a/docs/AGGREGATORS_AND_PROCESSORS.md b/docs/AGGREGATORS_AND_PROCESSORS.md index 7be34aed5cef4..389138cec7a94 100644 --- a/docs/AGGREGATORS_AND_PROCESSORS.md +++ b/docs/AGGREGATORS_AND_PROCESSORS.md @@ -5,7 +5,7 @@ As of release 1.1.0, Telegraf has the concept of Aggregator and Processor Plugin These plugins sit in-between Input & Output plugins, aggregating and processing metrics as they pass through Telegraf: -``` +```text ┌───────────┐ │ │ │ CPU │───┐ @@ -17,8 +17,8 @@ metrics as they pass through Telegraf: │ Memory │───┤ ┌──▶│ InfluxDB │ │ │ │ │ │ │ └───────────┘ │ ┌─────────────┐ ┌─────────────┐ │ └───────────┘ - │ │ │ │Aggregate │ │ -┌───────────┐ │ │Process │ │ - mean │ │ ┌───────────┐ + │ │ │ │Aggregators │ │ +┌───────────┐ │ │Processors │ │ - mean │ │ ┌───────────┐ │ │ │ │ - transform │ │ - quantiles │ │ │ │ │ MySQL │───┼───▶│ - decorate │────▶│ - min/max │───┼──▶│ File │ │ │ │ │ - filter │ │ - count │ │ │ │ @@ -44,12 +44,14 @@ to control which metrics are passed through a processor or aggregator. If a metric is filtered out the metric bypasses the plugin and is passed downstream to the next plugin. -### Processor +## Processor + Processor plugins process metrics as they pass through and immediately emit results based on the values they process. For example, this could be printing all metrics or adding a tag to all metrics that pass through. -### Aggregator +## Aggregator + Aggregator plugins, on the other hand, are a bit more complicated. Aggregators are typically for emitting new _aggregate_ metrics, such as a running mean, minimum, maximum, or standard deviation. For this reason, all _aggregator_ @@ -62,6 +64,6 @@ emit the aggregates and not the original metrics. Since aggregates are created for each measurement, field, and unique tag combination the plugin receives, you can make use of `taginclude` to group -aggregates by specific tags only. +aggregates by specific tags only. **Note:** Aggregator plugins only aggregate metrics within their periods (`now() - period`). Data with a timestamp earlier than `now() - period` cannot be included. diff --git a/docs/COMMANDS_AND_FLAGS.md b/docs/COMMANDS_AND_FLAGS.md new file mode 100644 index 0000000000000..d906fd651f1c9 --- /dev/null +++ b/docs/COMMANDS_AND_FLAGS.md @@ -0,0 +1,68 @@ +# Telegraf Commands & Flags + +## Usage + +```shell +telegraf [commands] +telegraf [flags] +``` + +## Commands + +|command|description| +|--------|-----------------------------------------------| +|`config` |print out full sample configuration to stdout| +|`version`|print the version to stdout| + +## Flags + +|flag|description| +|-------------------|------------| +|`--aggregator-filter ` |filter the aggregators to enable, separator is `:`| +|`--config ` |configuration file to load| +|`--config-directory ` |directory containing additional *.conf files| +|`--watch-config` |Telegraf will restart on local config changes. Monitor changes using either fs notifications or polling. Valid values: `inotify` or `poll`. Monitoring is off by default.| +|`--plugin-directory` |directory containing *.so files, this directory will be searched recursively. Any Plugin found will be loaded and namespaced.| +|`--debug` |turn on debug logging| +|`--deprecation-list` |print all deprecated plugins or plugin options| +|`--input-filter ` |filter the inputs to enable, separator is `:`| +|`--input-list` |print available input plugins.| +|`--output-filter ` |filter the outputs to enable, separator is `:`| +|`--output-list` |print available output plugins.| +|`--pidfile ` |file to write our pid to| +|`--pprof-addr

` |pprof address to listen on, don't activate pprof if empty| +|`--processor-filter ` |filter the processors to enable, separator is `:`| +|`--quiet` |run in quiet mode| +|`--section-filter` |filter config sections to output, separator is `:`. Valid values are `agent`, `global_tags`, `outputs`, `processors`, `aggregators` and `inputs`| +|`--sample-config` |print out full sample configuration| +|`--once` |enable once mode: gather metrics once, write them, and exit| +|`--test` |enable test mode: gather metrics once and print them. **No outputs are executed!**| +|`--test-wait` |wait up to this many seconds for service inputs to complete in test or once mode. **Implies `--test` if not used with `--once`**| +|`--usage ` |print usage for a plugin, ie, `telegraf --usage mysql`| +|`--version` |display the version and exit| + +## Examples + +**Generate a telegraf config file:** + +`telegraf config > telegraf.conf` + +**Generate config with only cpu input & influxdb output plugins defined:** + +`telegraf config --input-filter cpu --output-filter influxdb` + +**Run a single telegraf collection, outputting metrics to stdout:** + +`telegraf --config telegraf.conf --test` + +**Run telegraf with all plugins defined in config file:** + +`telegraf --config telegraf.conf` + +**Run telegraf, enabling the cpu & memory input, and influxdb output plugins:** + +`telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb` + +**Run telegraf with pprof:** + +`telegraf --config telegraf.conf --pprof-addr localhost:6060` diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 9b8b07263b700..e1b22023b6aa3 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -1,3 +1,5 @@ + + # Configuration Telegraf's configuration file is written using [TOML][] and is composed of @@ -5,9 +7,10 @@ three sections: [global tags][], [agent][] settings, and [plugins][]. View the default [telegraf.conf][] config file with all available plugins. -### Generating a Configuration File +## Generating a Configuration File A default config file can be generated by telegraf: + ```sh telegraf config > telegraf.conf ``` @@ -16,10 +19,12 @@ To generate a file with specific inputs and outputs, you can use the --input-filter and --output-filter flags: ```sh -telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config +telegraf config --input-filter cpu:mem:net:swap --output-filter influxdb:kafka ``` -### Configuration Loading +[View the full list][flags] of Telegraf commands and flags or by running `telegraf --help`. + +## Configuration Loading The location of the configuration file can be set via the `--config` command line flag. @@ -32,7 +37,7 @@ On most systems, the default locations are `/etc/telegraf/telegraf.conf` for the main configuration file and `/etc/telegraf/telegraf.d` for the directory of configuration files. -### Environment Variables +## Environment Variables Environment variables can be used anywhere in the config file, simply surround them with `${}`. Replacement occurs before file parsing. For strings @@ -47,14 +52,17 @@ in the `/etc/default/telegraf` file. `/etc/default/telegraf`: For InfluxDB 1.x: -``` + +```shell USER="alice" INFLUX_URL="http://localhost:8086" INFLUX_SKIP_DATABASE_CREATION="true" INFLUX_PASSWORD="monkey123" ``` + For InfluxDB OSS 2: -``` + +```shell INFLUX_HOST="http://localhost:8086" # used to be 9999 INFLUX_TOKEN="replace_with_your_token" INFLUX_ORG="your_username" @@ -62,7 +70,8 @@ INFLUX_BUCKET="replace_with_your_bucket_name" ``` For InfluxDB Cloud 2: -``` + +```shell # For AWS West (Oregon) INFLUX_HOST="https://us-west-2-1.aws.cloud2.influxdata.com" # Other Cloud URLs at https://v2.docs.influxdata.com/v2.0/reference/urls/#influxdb-cloud-urls @@ -72,6 +81,7 @@ INFLUX_BUCKET="replace_with_your_bucket_name" ``` `/etc/telegraf.conf`: + ```toml [global_tags] user = "${USER}" @@ -87,20 +97,21 @@ INFLUX_BUCKET="replace_with_your_bucket_name" # For InfluxDB OSS 2: [[outputs.influxdb_v2]] urls = ["${INFLUX_HOST}"] - token = ["${INFLUX_TOKEN}"] - org = ["${INFLUX_ORG}"] - bucket = ["${INFLUX_BUCKET}"] + token = "${INFLUX_TOKEN}" + organization = "${INFLUX_ORG}" + bucket = "${INFLUX_BUCKET}" # For InfluxDB Cloud 2: [[outputs.influxdb_v2]] urls = ["${INFLUX_HOST}"] - token = ["${INFLUX_TOKEN}"] - org = ["${INFLUX_ORG}"] - bucket = ["${INFLUX_BUCKET}"] + token = "${INFLUX_TOKEN}" + organization = "${INFLUX_ORG}" + bucket = "${INFLUX_BUCKET}" ``` The above files will produce the following effective configuration file to be parsed: + ```toml [global_tags] user = "alice" @@ -117,7 +128,7 @@ parsed: [[outputs.influxdb_v2]] urls = ["http://127.0.0.1:8086"] # double check the port. could be 9999 if using OSS Beta token = "replace_with_your_token" - org = "your_username" + organization = "your_username" bucket = "replace_with_your_bucket_name" # For InfluxDB Cloud 2: @@ -126,31 +137,33 @@ parsed: INFLUX_HOST="https://us-west-2-1.aws.cloud2.influxdata.com" # Other Cloud URLs at https://v2.docs.influxdata.com/v2.0/reference/urls/#influxdb-cloud-urls token = "replace_with_your_token" - org = "yourname@yourcompany.com" + organization = "yourname@yourcompany.com" bucket = "replace_with_your_bucket_name" ``` -### Intervals +## Intervals Intervals are durations of time and can be specified for supporting settings by combining an integer value and time unit as a string value. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. + ```toml [agent] interval = "10s" ``` -### Global Tags +## Global Tags Global tags can be specified in the `[global_tags]` table in key="value" format. All metrics that are gathered will be tagged with the tags specified. +Global tags are overriden by tags set by plugins. ```toml [global_tags] dc = "us-east-1" ``` -### Agent +## Agent The agent table configures Telegraf and the defaults used across all plugins. @@ -175,6 +188,11 @@ The agent table configures Telegraf and the defaults used across all plugins. This can be used to avoid many plugins querying things like sysfs at the same time, which can have a measurable effect on the system. +- **collection_offset**: + Collection offset is used to shift the collection by the given [interval][]. + This can be be used to avoid many plugins querying constraint devices + at the same time by manually scheduling them in time. + - **flush_interval**: Default flushing [interval][] for all outputs. Maximum flush_interval will be flush_interval + flush_jitter. @@ -206,7 +224,6 @@ The agent table configures Telegraf and the defaults used across all plugins. Name of the file to be logged to when using the "file" logtarget. If set to the empty string then logs are written to stderr. - - **logfile_rotation_interval**: The logfile will be rotated after the time interval specified. When set to 0 no time based rotation is performed. @@ -219,12 +236,22 @@ The agent table configures Telegraf and the defaults used across all plugins. Maximum number of rotated archives to keep, any older logs are deleted. If set to -1, no archives are removed. +- **log_with_timezone**: + Pick a timezone to use when logging or type 'local' for local time. Example: 'America/Chicago'. + [See this page for options/formats.](https://socketloop.com/tutorials/golang-display-list-of-timezones-with-gmt) + - **hostname**: Override default hostname, if empty use os.Hostname() + - **omit_hostname**: If set to true, do no set the "host" tag in the telegraf agent. -### Plugins +- **snmp_translator**: + Method of translating SNMP objects. Can be "netsnmp" which + translates by calling external programs snmptranslate and snmptable, + or "gosmi" which translates using the built-in gosmi library. + +## Plugins Telegraf plugins are divided into 4 types: [inputs][], [outputs][], [processors][], and [aggregators][]. @@ -265,6 +292,11 @@ Parameters that can be used with any input plugin: plugin. Collection jitter is used to jitter the collection by a random [interval][]. +- **collection_offset**: + Overrides the `collection_offset` setting of the [agent][Agent] for the + plugin. Collection offset is used to shift the collection by the given + [interval][]. + - **name_override**: Override the base name of the measurement. (Default is the name of the input). @@ -280,6 +312,7 @@ emitted from the input plugin. #### Examples Use the name_suffix parameter to emit measurements with the name `cpu_total`: + ```toml [[inputs.cpu]] name_suffix = "_total" @@ -288,6 +321,7 @@ Use the name_suffix parameter to emit measurements with the name `cpu_total`: ``` Use the name_override parameter to emit measurements with the name `foobar`: + ```toml [[inputs.cpu]] name_override = "foobar" @@ -300,6 +334,7 @@ Emit measurements with two additional tags: `tag1=foo` and `tag2=bar` > **NOTE**: With TOML, order matters. Parameters belong to the last defined > table header, place `[inputs.cpu.tags]` table at the _end_ of the plugin > definition. + ```toml [[inputs.cpu]] percpu = false @@ -311,6 +346,7 @@ Emit measurements with two additional tags: `tag1=foo` and `tag2=bar` Utilize `name_override`, `name_prefix`, or `name_suffix` config options to avoid measurement collisions when defining multiple plugins: + ```toml [[inputs.cpu]] percpu = false @@ -350,6 +386,7 @@ emitted from the output plugin. #### Examples Override flush parameters for a single output: + ```toml [agent] flush_interval = "10s" @@ -387,6 +424,7 @@ processor. If the order processors are applied matters you must set order on all involved processors: + ```toml [[processors.rename]] order = 1 @@ -428,7 +466,7 @@ Parameters that can be used with any aggregator plugin: the name of the input). - **name_prefix**: Specifies a prefix to attach to the measurement name. - **name_suffix**: Specifies a suffix to attach to the measurement name. -- **tags**: A map of tags to apply to a specific input's measurements. +- **tags**: A map of tags to apply to the measurement - behavior varies based on aggregator. The [metric filtering][] parameters can be used to limit what metrics are handled by the aggregator. Excluded metrics are passed downstream to the next @@ -438,6 +476,7 @@ aggregator. Collect and emit the min/max of the system load1 metric every 30s, dropping the originals. + ```toml [[inputs.system]] fieldpass = ["load1"] # collects system load1 metric. @@ -453,6 +492,7 @@ the originals. Collect and emit the min/max of the swap metrics every 30s, dropping the originals. The aggregator will not be applied to the system load metrics due to the `namepass` parameter. + ```toml [[inputs.swap]] @@ -468,14 +508,13 @@ to the `namepass` parameter. files = ["stdout"] ``` - -### Metric Filtering +## Metric Filtering Metric filtering can be configured per plugin on any input, output, processor, and aggregator plugin. Filters fall under two categories: Selectors and Modifiers. -#### Selectors +### Selectors Selector filters include or exclude entire metrics. When a metric is excluded from a Input or an Output plugin, the metric is dropped. If a metric is @@ -500,10 +539,10 @@ The inverse of `tagpass`. If a match is found the metric is discarded. This is tested on metrics after they have passed the `tagpass` test. > NOTE: Due to the way TOML is parsed, `tagpass` and `tagdrop` parameters must be -defined at the *_end_* of the plugin definition, otherwise subsequent plugin config +defined at the **end** of the plugin definition, otherwise subsequent plugin config options will be interpreted as part of the tagpass/tagdrop tables. -#### Modifiers +### Modifiers Modifier filters remove tags and fields from a metric. If all fields are removed the metric is removed. @@ -529,9 +568,10 @@ The inverse of `taginclude`. Tags with a tag key matching one of the patterns will be discarded from the metric. Any tag can be filtered including global tags and the agent `host` tag. -#### Filtering Examples +### Filtering Examples + +#### Using tagpass and tagdrop -##### Using tagpass and tagdrop: ```toml [[inputs.cpu]] percpu = true @@ -564,7 +604,8 @@ tags and the agent `host` tag. instance = ["isatap*", "Local*"] ``` -##### Using fieldpass and fielddrop: +#### Using fieldpass and fielddrop + ```toml # Drop all metrics for guest & steal CPU usage [[inputs.cpu]] @@ -577,7 +618,8 @@ tags and the agent `host` tag. fieldpass = ["inodes*"] ``` -##### Using namepass and namedrop: +#### Using namepass and namedrop + ```toml # Drop all metrics about containers for kubelet [[inputs.prometheus]] @@ -590,7 +632,8 @@ tags and the agent `host` tag. namepass = ["rest_client_*"] ``` -##### Using taginclude and tagexclude: +#### Using taginclude and tagexclude + ```toml # Only include the "cpu" tag in the measurements for the cpu plugin. [[inputs.cpu]] @@ -603,7 +646,8 @@ tags and the agent `host` tag. tagexclude = ["fstype"] ``` -##### Metrics can be routed to different outputs using the metric name and tags: +#### Metrics can be routed to different outputs using the metric name and tags + ```toml [[outputs.influxdb]] urls = [ "http://localhost:8086" ] @@ -625,7 +669,7 @@ tags and the agent `host` tag. cpu = ["cpu0"] ``` -##### Routing metrics to different outputs based on the input. +#### Routing metrics to different outputs based on the input Metrics are tagged with `influxdb_database` in the input, which is then used to select the output. The tag is removed in the outputs before writing. @@ -649,7 +693,7 @@ select the output. The tag is removed in the outputs before writing. influxdb_database = "other" ``` -### Transport Layer Security (TLS) +## Transport Layer Security (TLS) Reference the detailed [TLS][] documentation. @@ -666,3 +710,4 @@ Reference the detailed [TLS][] documentation. [telegraf.conf]: /etc/telegraf.conf [TLS]: /docs/TLS.md [glob pattern]: https://github.com/gobwas/glob#syntax +[flags]: /docs/COMMANDS_AND_FLAGS.md diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index b716501683bf8..cb04d3e009030 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -5,17 +5,21 @@ using a configurable parser into [metrics][]. This allows, for example, the `kafka_consumer` input plugin to process messages in either InfluxDB Line Protocol or in JSON format. -- [InfluxDB Line Protocol](/plugins/parsers/influx) - [Collectd](/plugins/parsers/collectd) - [CSV](/plugins/parsers/csv) - [Dropwizard](/plugins/parsers/dropwizard) - [Graphite](/plugins/parsers/graphite) - [Grok](/plugins/parsers/grok) +- [InfluxDB Line Protocol](/plugins/parsers/influx) - [JSON](/plugins/parsers/json) +- [JSON v2](/plugins/parsers/json_v2) - [Logfmt](/plugins/parsers/logfmt) - [Nagios](/plugins/parsers/nagios) +- [Prometheus](/plugins/parsers/prometheus) +- [PrometheusRemoteWrite](/plugins/parsers/prometheusremotewrite) - [Value](/plugins/parsers/value), ie: 45 or "booyah" - [Wavefront](/plugins/parsers/wavefront) +- [XPath](/plugins/parsers/xpath) (supports XML, JSON, MessagePack, Protocol Buffers) Any input plugin containing the `data_format` option can use it to select the desired parser: @@ -29,9 +33,6 @@ desired parser: name_suffix = "_mycollector" ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "json" ``` diff --git a/docs/DATA_FORMATS_OUTPUT.md b/docs/DATA_FORMATS_OUTPUT.md index 2b3e953601218..d7ad8367bcb78 100644 --- a/docs/DATA_FORMATS_OUTPUT.md +++ b/docs/DATA_FORMATS_OUTPUT.md @@ -6,12 +6,15 @@ plugins. 1. [InfluxDB Line Protocol](/plugins/serializers/influx) 1. [Carbon2](/plugins/serializers/carbon2) +1. [CSV](/plugins/serializers/csv) 1. [Graphite](/plugins/serializers/graphite) 1. [JSON](/plugins/serializers/json) +1. [MessagePack](/plugins/serializers/msgpack) 1. [Prometheus](/plugins/serializers/prometheus) +1. [Prometheus Remote Write](/plugins/serializers/prometheusremotewrite) +1. [ServiceNow Metrics](/plugins/serializers/nowmetric) 1. [SplunkMetric](/plugins/serializers/splunkmetric) 1. [Wavefront](/plugins/serializers/wavefront) -1. [ServiceNow Metrics](/plugins/serializers/nowmetric) You will be able to identify the plugins with support by the presence of a `data_format` config option, for example, in the `file` output plugin: @@ -22,8 +25,5 @@ You will be able to identify the plugins with support by the presence of a files = ["stdout"] ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ``` diff --git a/docs/DOCKER.md b/docs/DOCKER.md new file mode 100644 index 0000000000000..5d0484e10be5a --- /dev/null +++ b/docs/DOCKER.md @@ -0,0 +1,3 @@ +# Telegraf Docker Images + +Docker images for Telegraf are kept in the [influxdata/influxdata-docker](https://github.com/influxdata/influxdata-docker/tree/master/telegraf) repo. diff --git a/docs/EXTERNAL_PLUGINS.md b/docs/EXTERNAL_PLUGINS.md index aa3b5058aa8b4..f3dc0699ca2df 100644 --- a/docs/EXTERNAL_PLUGINS.md +++ b/docs/EXTERNAL_PLUGINS.md @@ -1,15 +1,18 @@ -### External Plugins +# External Plugins -[External plugins](/EXTERNAL_PLUGINS.md) are external programs that are built outside -of Telegraf that can run through an `execd` plugin. These external plugins allow for -more flexibility compared to internal Telegraf plugins. +[External plugins](/EXTERNAL_PLUGINS.md) are external programs that are built outside +of Telegraf that can run through an `execd` plugin. These external plugins allow for +more flexibility compared to internal Telegraf plugins. - External plugins can be written in any language (internal Telegraf plugins can only written in Go) - External plugins can access to libraries not written in Go - Utilize licensed software that isn't available to the open source community - Can include large dependencies that would otherwise bloat Telegraf +- You don't need to wait on the Telegraf team to publish your plugin and start working with it. +- using the [shim](/plugins/common/shim) you can easily convert plugins between internal and external use + +## External Plugin Guidelines -### External Plugin Guidelines The guidelines of writing external plugins would follow those for our general [input](/docs/INPUTS.md), [output](/docs/OUTPUTS.md), [processor](/docs/PROCESSORS.md), and [aggregator](/docs/AGGREGATORS.md) plugins. Please reference the documentation on how to create these plugins written in Go. @@ -17,52 +20,55 @@ Please reference the documentation on how to create these plugins written in Go. _For listed [external plugins](/EXTERNAL_PLUGINS.md), the author of the external plugin is also responsible for the maintenance and feature development of external plugins. Expect to have users open plugin issues on its respective GitHub repository._ -#### Execd Go Shim +### Execd Go Shim + For Go plugins, there is a [Execd Go Shim](/plugins/common/shim/) that will make it trivial to extract an internal input, processor, or output plugin from the main Telegraf repo out to a stand-alone repo. This shim allows anyone to build and run it as a separate app using one of the `execd`plugins: + - [inputs.execd](/plugins/inputs/execd) - [processors.execd](/plugins/processors/execd) - [outputs.execd](/plugins/outputs/execd) Follow the [Steps to externalize a plugin](/plugins/common/shim#steps-to-externalize-a-plugin) and [Steps to build and run your plugin](/plugins/common/shim#steps-to-build-and-run-your-plugin) to properly with the Execd Go Shim -#### Step-by-Step guidelines -This is a guide to help you set up your plugin to use it with `execd` -1. Write your Telegraf plugin. Depending on the plugin, follow the guidelines on how to create the plugin itself using InfluxData's best practices: +### Step-by-Step guidelines + +This is a guide to help you set up your plugin to use it with `execd`: + +1. Write your Telegraf plugin. Depending on the plugin, follow the guidelines on how to create the plugin itself using InfluxData's best practices: - [Input Plugins](/docs/INPUTS.md) - [Processor Plugins](/docs/PROCESSORS.md) - [Aggregator Plugins](/docs/AGGREGATORS.md) - [Output Plugins](/docs/OUTPUTS.md) 2. If your plugin is written in Go, include the steps for the [Execd Go Shim](/plugins/common/shim#steps-to-build-and-run-your-plugin) - 1. Move the project to an external repo, it's recommended to preserve the path - structure, (but not strictly necessary). eg if your plugin was at - `plugins/inputs/cpu`, it's recommended that it also be under `plugins/inputs/cpu` - in the new repo. For a further example of what this might look like, take a - look at [ssoroka/rand](https://github.com/ssoroka/rand) or - [danielnelson/telegraf-execd-openvpn](https://github.com/danielnelson//telegraf-execd-openvpn) - 1. Copy [main.go](/plugins/common/shim/example/cmd/main.go) into your project under the `cmd` folder. - This will be the entrypoint to the plugin when run as a stand-alone program, and - it will call the shim code for you to make that happen. It's recommended to - have only one plugin per repo, as the shim is not designed to run multiple - plugins at the same time (it would vastly complicate things). - 1. Edit the main.go file to import your plugin. Within Telegraf this would have - been done in an all.go file, but here we don't split the two apart, and the change - just goes in the top of main.go. If you skip this step, your plugin will do nothing. - eg: `_ "github.com/me/my-plugin-telegraf/plugins/inputs/cpu"` - 1. Optionally add a [plugin.conf](./example/cmd/plugin.conf) for configuration - specific to your plugin. Note that this config file **must be separate from the - rest of the config for Telegraf, and must not be in a shared directory where - Telegraf is expecting to load all configs**. If Telegraf reads this config file - it will not know which plugin it relates to. Telegraf instead uses an execd config - block to look for this plugin. - 1. Add usage and development instructions in the homepage of your repository for running - your plugin with its respective `execd` plugin. Please refer to - [openvpn](/danielnelson/telegraf-execd-openvpn#usage) and [awsalarms](/vipinvkmenon/awsalarms#installation) - for examples. Include the following steps: + - Move the project to an external repo, it's recommended to preserve the path + structure, (but not strictly necessary). eg if your plugin was at + `plugins/inputs/cpu`, it's recommended that it also be under `plugins/inputs/cpu` + in the new repo. For a further example of what this might look like, take a + look at [ssoroka/rand](https://github.com/ssoroka/rand) or + [danielnelson/telegraf-execd-openvpn](https://github.com/danielnelson//telegraf-execd-openvpn) + - Copy [main.go](/plugins/common/shim/example/cmd/main.go) into your project under the `cmd` folder. + This will be the entrypoint to the plugin when run as a stand-alone program, and + it will call the shim code for you to make that happen. It's recommended to + have only one plugin per repo, as the shim is not designed to run multiple + plugins at the same time (it would vastly complicate things). + - Edit the main.go file to import your plugin. Within Telegraf this would have + been done in an all.go file, but here we don't split the two apart, and the change + just goes in the top of main.go. If you skip this step, your plugin will do nothing. + eg: `_ "github.com/me/my-plugin-telegraf/plugins/inputs/cpu"` + - Optionally add a [plugin.conf](./example/cmd/plugin.conf) for configuration + specific to your plugin. Note that this config file **must be separate from the + rest of the config for Telegraf, and must not be in a shared directory where + Telegraf is expecting to load all configs**. If Telegraf reads this config file + it will not know which plugin it relates to. Telegraf instead uses an execd config + block to look for this plugin. + - Add usage and development instructions in the homepage of your repository for running + your plugin with its respective `execd` plugin. Please refer to + [openvpn](https://github.com/danielnelson/telegraf-execd-openvpn#usage) and [awsalarms](https://github.com/vipinvkmenon/awsalarms#installation) + for examples. Include the following steps: 1. How to download the release package for your platform or how to clone the binary for your external plugin - 1. The commands to unpack or build your binary + 1. The commands to build your binary 1. Location to edit your `telegraf.conf` - 1. Configuration to run your external plugin with [inputs.execd](/plugins/inputs/execd), + 1. Configuration to run your external plugin with [inputs.execd](/plugins/inputs/execd), [processors.execd](/plugins/processors/execd) or [outputs.execd](/plugins/outputs/execd) - 1. Note that restart or reload of Telegraf is required - 1. Submit your plugin by opening a PR to add your external plugin to the [/EXTERNAL_PLUGINS.md](/EXTERNAL_PLUGINS.md) - list. Please include the plugin name, link to the plugin repository and a short description of the plugin. + - Submit your plugin by opening a PR to add your external plugin to the [/EXTERNAL_PLUGINS.md](/EXTERNAL_PLUGINS.md) + list. Please include the plugin name, link to the plugin repository and a short description of the plugin. diff --git a/docs/FAQ.md b/docs/FAQ.md index 4fe28db8b9cbc..c702a91564994 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -1,24 +1,23 @@ # Frequently Asked Questions -### Q: How can I monitor the Docker Engine Host from within a container? +## Q: How can I monitor the Docker Engine Host from within a container? You will need to setup several volume mounts as well as some environment variables: -``` + +```shell docker run --name telegraf \ - -v /:/hostfs:ro \ - -e HOST_ETC=/hostfs/etc \ - -e HOST_PROC=/hostfs/proc \ - -e HOST_SYS=/hostfs/sys \ - -e HOST_VAR=/hostfs/var \ - -e HOST_RUN=/hostfs/run \ - -e HOST_MOUNT_PREFIX=/hostfs \ - telegraf + -v /:/hostfs:ro \ + -e HOST_ETC=/hostfs/etc \ + -e HOST_PROC=/hostfs/proc \ + -e HOST_SYS=/hostfs/sys \ + -e HOST_VAR=/hostfs/var \ + -e HOST_RUN=/hostfs/run \ + -e HOST_MOUNT_PREFIX=/hostfs \ + telegraf ``` - -### Q: Why do I get a "no such host" error resolving hostnames that other -programs can resolve? +## Q: Why do I get a "no such host" error resolving hostnames that other programs can resolve? Go uses a pure Go resolver by default for [name resolution](https://golang.org/pkg/net/#hdr-Name_Resolution). This resolver behaves differently than the C library functions but is more @@ -29,16 +28,18 @@ that are unsupported by the pure Go resolver, you can switch to the cgo resolver. If running manually set: -``` + +```shell export GODEBUG=netdns=cgo ``` If running as a service add the environment variable to `/etc/default/telegraf`: -``` + +```shell GODEBUG=netdns=cgo ``` -### Q: How can I manage series cardinality? +## Q: How can I manage series cardinality? High [series cardinality][], when not properly managed, can cause high load on your database. Telegraf attempts to avoid creating series with high @@ -50,8 +51,6 @@ You can use the following techniques to avoid cardinality issues: - Use [metric filtering][] options to exclude unneeded measurements and tags. - Write to a database with an appropriate [retention policy][]. -- Limit series cardinality in your database using the - [max-series-per-database][] and [max-values-per-tag][] settings. - Consider using the [Time Series Index][tsi]. - Monitor your databases using the [show cardinality][] commands. - Consult the [InfluxDB documentation][influx docs] for the most up-to-date techniques. @@ -59,13 +58,6 @@ You can use the following techniques to avoid cardinality issues: [series cardinality]: https://docs.influxdata.com/influxdb/v1.7/concepts/glossary/#series-cardinality [metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering [retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ -[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 -[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000 [tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ [show cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality [influx docs]: https://docs.influxdata.com/influxdb/latest/ - -### Q: When will the next version be released? - -The latest release date estimate can be viewed on the -[milestones](https://github.com/influxdata/telegraf/milestones) page. diff --git a/docs/INPUTS.md b/docs/INPUTS.md index 179b674442d6d..8dc5725fd1f94 100644 --- a/docs/INPUTS.md +++ b/docs/INPUTS.md @@ -1,4 +1,4 @@ -### Input Plugins +# Input Plugins This section is for developers who want to create new collection inputs. Telegraf is entirely plugin driven. This interface allows for operators to @@ -8,54 +8,52 @@ to create new ways of generating metrics. Plugin authorship is kept as simple as possible to promote people to develop and submit new inputs. -### Input Plugin Guidelines +## Input Plugin Guidelines - A plugin must conform to the [telegraf.Input][] interface. - Input Plugins should call `inputs.Add` in their `init` function to register themselves. See below for a quick example. - Input Plugins must be added to the `github.com/influxdata/telegraf/plugins/inputs/all/all.go` file. -- The `SampleConfig` function should return valid toml that describes how the - plugin can be configured. This is included in `telegraf config`. Please - consult the [SampleConfig][] page for the latest style - guidelines. -- The `Description` function should say in one line what this plugin does. -- Follow the recommended [CodeStyle][]. +- Each plugin requires a file called `sample.conf` containing the sample + configuration for the plugin in TOML format. + Please consult the [Sample Config][] page for the latest style guidelines. +- Each plugin `README.md` file should include the `sample.conf` file in a section + describing the configuration by specifying a `toml` section in the form `toml @sample.conf`. The specified file(s) are then injected automatically into the Readme. +- Follow the recommended [Code Style][]. Let's say you've written a plugin that emits metrics about processes on the current host. -### Input Plugin Example +## Input Plugin Example ```go +//go:generate ../../../tools/readme_config_includer/generator package simple -// simple.go - import ( + _ "embed" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Simple struct { Ok bool `toml:"ok"` Log telegraf.Logger `toml:"-"` } -func (s *Simple) Description() string { - return "a demo plugin" -} - -func (s *Simple) SampleConfig() string { - return ` - ## Indicate if everything is fine - ok = true -` +func (*Simple) SampleConfig() string { + return sampleConfig } // Init is for setup, and validating config. func (s *Simple) Init() error { - return nil + return nil } func (s *Simple) Gather(acc telegraf.Accumulator) error { @@ -75,15 +73,15 @@ func init() { ### Development -* Run `make static` followed by `make plugin-[pluginName]` to spin up a docker +- Run `make static` followed by `make plugin-[pluginName]` to spin up a docker dev environment using docker-compose. -* ***[Optional]*** When developing a plugin, add a `dev` directory with a +- ***[Optional]*** When developing a plugin, add a `dev` directory with a `docker-compose.yml` and `telegraf.conf` as well as any other supporting files, where sensible. ### Typed Metrics -In addition the the `AddFields` function, the accumulator also supports +In addition to the `AddFields` function, the accumulator also supports functions to add typed metrics: `AddGauge`, `AddCounter`, etc. Metric types are ignored by the InfluxDB output, but can be used for other outputs, such as [prometheus][prom metric types]. @@ -101,7 +99,7 @@ You can then utilize the parser internally in your plugin, parsing data as you see fit. Telegraf's configuration layer will take care of instantiating and creating the `Parser` object. -Add the following to the `SampleConfig()`: +Add the following to the sample configuration in the README.md: ```toml ## Data format to consume. @@ -143,8 +141,8 @@ Check the [amqp_consumer][] for an example implementation. [amqp_consumer]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/amqp_consumer [prom metric types]: https://prometheus.io/docs/concepts/metric_types/ [input data formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig -[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle +[Sample Config]: https://github.com/influxdata/telegraf/blob/master/docs/developers/SAMPLE_CONFIG.md +[Code Style]: https://github.com/influxdata/telegraf/blob/master/docs/developers/CODE_STYLE.md [telegraf.Input]: https://godoc.org/github.com/influxdata/telegraf#Input [telegraf.ServiceInput]: https://godoc.org/github.com/influxdata/telegraf#ServiceInput [telegraf.Accumulator]: https://godoc.org/github.com/influxdata/telegraf#Accumulator diff --git a/docs/INTEGRATION_TESTS.md b/docs/INTEGRATION_TESTS.md new file mode 100644 index 0000000000000..4d5e60347c4fb --- /dev/null +++ b/docs/INTEGRATION_TESTS.md @@ -0,0 +1,166 @@ +# Integration Tests + +## Running + +To run all named integration tests: + +```shell +make test-integration +``` + +To run all tests, including unit and integration tests: + +```shell +go test -count 1 -race ./... +``` + +## Developing + +To run integration tests against a service the project uses +[testcontainers][1]. The makes it very easy to create and cleanup +container-based tests. + +The `testutil/container.go` has a `Container` type that wraps this project to +easily create containers for testing in Telegraf. A typical test looks like +the following: + +```go +servicePort := "5432" + +container := testutil.Container{ + Image: "postgres:alpine", + ExposedPorts: []string{servicePort}, + Env: map[string]string{ + "POSTGRES_HOST_AUTH_METHOD": "trust", + }, + WaitingFor: wait.ForAll( + wait.ForLog("database system is ready to accept connections"), + wait.ForListeningPort(nat.Port(servicePort)), + ), +} + +err := container.Start() +require.NoError(t, err, "failed to start container") + +defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") +}() +``` + +User's should start the container and then defer termination of the container. + +The `test.Container` type requires at least an image, ports to expose, and a +wait stanza. See the following to learn more: + +### Images + +Images are pulled from [DockerHub][2] by default. When looking for and +selecting an image from DockerHub, please use the following priority order: + +1. [Official Images][3]: these images are generally produced by the publisher + themselves and are fully supported with great documentation. These images are + easy to spot as they do not have an author in the name (e.g. "mysql") +2. Publisher produced: not all software has an entry in the above Official + Images. This may be due to the project being smaller or moving faster. In + this case, pull directly from the publisher's DockerHub whenever possible. +3. [Bitnami][4]: If neither of the above images exist, look at the images + produced and maintained by Bitnami. They go to great efforts to create images + for the most popular software, produce great documentation, and ensure that + images are maintained. +4. Other images: If, and only if, none of the above images will work for a + particular use-case, then another image can be used. Be prepared to justify, + the use of these types of images. + +### Ports + +When the port is specified as a single value (e.g. `11211`) then testcontainers +will generate a random port for the service to start on. This way multiple +tests can be run and prevent ports from conflicting. + +The test container will expect an array of ports to expose for testing. For +most tests only a single port is used, but a user can specify more than one +to allow for testing if another port is open for example. + +On each container's DockerHub page, the README will usually specify what ports +are used by the container by default. For many containers this port can be +changed or specified with an environment variable. + +If no ports are specified, a user can view the image tag and view the various +image layers. Find an image layer with the `EXPOSE` keyword to determine what +ports are used by the container. + +### Wait Stanza + +The wait stanza lays out what test containers will wait for to determine that +the container has started and is ready for use by the test. It is best to +provide not only a port, but also a log message. Ports can come up very early +in the container, and the service may not be ready. + +To find a good log message, it is suggested to launch the container manually +and see what the final message is printed. Usually this is something to the +effect of "ready for connections" or "setup complete". Also ensure that this +message only shows up once, or the use of the + +### Other Parameters + +There are other optional parameters that user can make use of for additional +configuration of the test containers: + +- `BindMounts`: used to mount local test data into the container. The order is + location in the container as the key and the local file as the value. +- `Entrypoint`: if a user wishes to override the entrypoint with a custom + command +- `Env`: to pass environmental variables to the container similar to Docker + CLI's `--env` option +- `Name`: if a container needs a hostname set or expects a certain name use + this option to set the containers hostname +- `Networks`: if the user creates a custom network + +[1]: "testcontainers-go" +[2]: "DockerHub" +[3]: "DockerHub Official Images" +[4]: "Bitnami Images" + +## Network + +By default the containers will use the bridge network where other containers +cannot talk to each other. + +If a custom network is required for running tests, for example if containers +do need to communicate, then users can set that up with the following code: + +```go +networkName := "test-network" +net, err := testcontainers.GenericNetwork(ctx, testcontainers.GenericNetworkRequest{ + NetworkRequest: testcontainers.NetworkRequest{ + Name: networkName, + Attachable: true, + CheckDuplicate: true, + }, +}) +require.NoError(t, err) +defer func() { + require.NoError(t, net.Remove(ctx), "terminating network failed") +}() +``` + +Then specify the network name in the container startup: + +```go +zookeeper := testutil.Container{ + Image: "wurstmeister/zookeeper", + ExposedPorts: []string{"2181:2181"}, + Networks: []string{networkName}, + WaitingFor: wait.ForLog("binding to port"), + Name: "telegraf-test-zookeeper", +} +``` + +## Contributing + +When adding integrations tests please do the following: + +- Add integration to the end of the test name +- Use testcontainers when an external service is required +- Use the testutil.Container to setup and configure testcontainers +- Ensure the testcontainer wait stanza is well-tested diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 66dc38b43eb08..ac74e618b24fe 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -8,101 +8,211 @@ following works: - collectd.org [MIT License](https://git.octo.it/?p=collectd.git;a=blob;f=COPYING;hb=HEAD) - github.com/Azure/azure-amqp-common-go [MIT License](https://github.com/Azure/azure-amqp-common-go/blob/master/LICENSE) - github.com/Azure/azure-event-hubs-go [MIT License](https://github.com/Azure/azure-event-hubs-go/blob/master/LICENSE) +- github.com/Azure/azure-kusto-go [MIT License](https://github.com/Azure/azure-kusto-go/blob/master/LICENSE) - github.com/Azure/azure-pipeline-go [MIT License](https://github.com/Azure/azure-pipeline-go/blob/master/LICENSE) - github.com/Azure/azure-sdk-for-go [Apache License 2.0](https://github.com/Azure/azure-sdk-for-go/blob/master/LICENSE) +- github.com/Azure/azure-sdk-for-go/sdk/azcore [MIT License](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azcore/LICENSE.txt) +- github.com/Azure/azure-sdk-for-go/sdk/internal [MIT License](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/internal/LICENSE.txt) +- github.com/Azure/azure-sdk-for-go/sdk/storage/azblob [MIT License](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/LICENSE.txt) +- github.com/Azure/azure-storage-blob-go [MIT License](https://github.com/Azure/azure-storage-blob-go/blob/master/LICENSE) - github.com/Azure/azure-storage-queue-go [MIT License](https://github.com/Azure/azure-storage-queue-go/blob/master/LICENSE) - github.com/Azure/go-amqp [MIT License](https://github.com/Azure/go-amqp/blob/master/LICENSE) +- github.com/Azure/go-ansiterm [MIT License](https://github.com/Azure/go-ansiterm/blob/master/LICENSE) - github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE) +- github.com/Azure/go-ntlmssp [MIT License](https://github.com/Azure/go-ntlmssp/blob/master/LICENSE) +- github.com/ClickHouse/clickhouse-go [MIT License](https://github.com/ClickHouse/clickhouse-go/blob/master/LICENSE) - github.com/Mellanox/rdmamap [Apache License 2.0](https://github.com/Mellanox/rdmamap/blob/master/LICENSE) -- github.com/Microsoft/ApplicationInsights-Go [MIT License](https://github.com/Microsoft/ApplicationInsights-Go/blob/master/LICENSE) - github.com/Microsoft/go-winio [MIT License](https://github.com/Microsoft/go-winio/blob/master/LICENSE) +- github.com/Microsoft/hcsshim [MIT License](https://github.com/microsoft/hcsshim/blob/master/LICENSE) +- github.com/PuerkitoBio/purell [BSD 3-Clause "New" or "Revised" License](https://github.com/PuerkitoBio/purell/blob/master/LICENSE) +- github.com/PuerkitoBio/urlesc [BSD 3-Clause "New" or "Revised" License](https://github.com/PuerkitoBio/urlesc/blob/master/LICENSE) - github.com/Shopify/sarama [MIT License](https://github.com/Shopify/sarama/blob/master/LICENSE) -- github.com/StackExchange/wmi [MIT License](https://github.com/StackExchange/wmi/blob/master/LICENSE) - github.com/aerospike/aerospike-client-go [Apache License 2.0](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE) +- github.com/alecthomas/participle [MIT License](https://github.com/alecthomas/participle/blob/master/COPYING) - github.com/alecthomas/units [MIT License](https://github.com/alecthomas/units/blob/master/COPYING) +- github.com/aliyun/alibaba-cloud-sdk-go [Apache License 2.0](https://github.com/aliyun/alibaba-cloud-sdk-go/blob/master/LICENSE) - github.com/amir/raidman [The Unlicense](https://github.com/amir/raidman/blob/master/UNLICENSE) +- github.com/antchfx/jsonquery [MIT License](https://github.com/antchfx/jsonquery/blob/master/LICENSE) +- github.com/antchfx/xmlquery [MIT License](https://github.com/antchfx/xmlquery/blob/master/LICENSE) +- github.com/antchfx/xpath [MIT License](https://github.com/antchfx/xpath/blob/master/LICENSE) +- github.com/apache/arrow/go/arrow [Apache License 2.0](https://github.com/apache/arrow/blob/master/LICENSE.txt) - github.com/apache/thrift [Apache License 2.0](https://github.com/apache/thrift/blob/master/LICENSE) - github.com/aristanetworks/glog [Apache License 2.0](https://github.com/aristanetworks/glog/blob/master/LICENSE) - github.com/aristanetworks/goarista [Apache License 2.0](https://github.com/aristanetworks/goarista/blob/master/COPYING) -- github.com/aws/aws-sdk-go [Apache License 2.0](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) +- github.com/armon/go-metrics [MIT License](https://github.com/armon/go-metrics/blob/master/LICENSE) +- github.com/aws/aws-sdk-go-v2 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/aws/protocol/eventstream/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/config [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/config/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/credentials [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/credentials/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/feature/dynamodb/attributevalue/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/feature/ec2/imds [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/feature/ec2/imds/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/feature/s3/manager [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/feature/s3/manager/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/internal/configsources [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/internal/configsources/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/internal/endpoints [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/internal/endpoints/v2/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/internal/ini [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/internal/ini/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/cloudwatch [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/cloudwatch/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/cloudwatchlogs/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/dynamodb [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/dynamodb/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/dynamodbstreams [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/dynamodbstreams/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/ec2 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/ec2/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/accept-encoding/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/endpoint-discovery/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/presigned-url/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/internal/s3shared [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/internal/s3shared/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/kinesis [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/kinesis/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/s3 [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/s3/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/sso [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/ec2/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/sts [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/sts/LICENSE.txt) +- github.com/aws/aws-sdk-go-v2/service/timestreamwrite [Apache License 2.0](https://github.com/aws/aws-sdk-go-v2/blob/main/service/timestreamwrite/LICENSE.txt) +- github.com/aws/smithy-go [Apache License 2.0](https://github.com/aws/smithy-go/blob/main/LICENSE) +- github.com/awslabs/kinesis-aggregation/go [Apache License 2.0](https://github.com/awslabs/kinesis-aggregation/blob/master/LICENSE.txt) - github.com/benbjohnson/clock [MIT License](https://github.com/benbjohnson/clock/blob/master/LICENSE) - github.com/beorn7/perks [MIT License](https://github.com/beorn7/perks/blob/master/LICENSE) +- github.com/bmatcuk/doublestar [MIT License](https://github.com/bmatcuk/doublestar/blob/master/LICENSE) - github.com/caio/go-tdigest [MIT License](https://github.com/caio/go-tdigest/blob/master/LICENSE) - github.com/cenkalti/backoff [MIT License](https://github.com/cenkalti/backoff/blob/master/LICENSE) - github.com/cespare/xxhash [MIT License](https://github.com/cespare/xxhash/blob/master/LICENSE.txt) - github.com/cisco-ie/nx-telemetry-proto [Apache License 2.0](https://github.com/cisco-ie/nx-telemetry-proto/blob/master/LICENSE) - github.com/containerd/containerd [Apache License 2.0](https://github.com/containerd/containerd/blob/master/LICENSE) +- github.com/coreos/go-semver [Apache License 2.0](https://github.com/coreos/go-semver/blob/main/LICENSE) +- github.com/coreos/go-systemd [Apache License 2.0](https://github.com/coreos/go-systemd/blob/main/LICENSE) +- github.com/cornelk/hashmap [Apache License 2.0](https://github.com/cornelk/hashmap/blob/master/LICENSE) - github.com/couchbase/go-couchbase [MIT License](https://github.com/couchbase/go-couchbase/blob/master/LICENSE) - github.com/couchbase/gomemcached [MIT License](https://github.com/couchbase/gomemcached/blob/master/LICENSE) -- github.com/couchbase/goutils [COUCHBASE INC. COMMUNITY EDITION LICENSE](https://github.com/couchbase/goutils/blob/master/LICENSE.md) +- github.com/couchbase/goutils [Apache License 2.0](https://github.com/couchbase/goutils/blob/master/LICENSE.md) - github.com/davecgh/go-spew [ISC License](https://github.com/davecgh/go-spew/blob/master/LICENSE) +- github.com/dchest/siphash [Creative Commons Zero v1.0 Universal](https://github.com/dchest/siphash/blob/master/LICENSE) - github.com/denisenkom/go-mssqldb [BSD 3-Clause "New" or "Revised" License](https://github.com/denisenkom/go-mssqldb/blob/master/LICENSE.txt) - github.com/devigned/tab [MIT License](https://github.com/devigned/tab/blob/master/LICENSE) -- github.com/dgrijalva/jwt-go [MIT License](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE) +- github.com/dgryski/go-rendezvous [MIT License](https://github.com/dgryski/go-rendezvous/blob/master/LICENSE) - github.com/dimchansky/utfbom [Apache License 2.0](https://github.com/dimchansky/utfbom/blob/master/LICENSE) +- github.com/djherbis/times [MIT License](https://github.com/djherbis/times/blob/master/LICENSE) - github.com/docker/distribution [Apache License 2.0](https://github.com/docker/distribution/blob/master/LICENSE) - github.com/docker/docker [Apache License 2.0](https://github.com/docker/docker/blob/master/LICENSE) - github.com/docker/go-connections [Apache License 2.0](https://github.com/docker/go-connections/blob/master/LICENSE) - github.com/docker/go-units [Apache License 2.0](https://github.com/docker/go-units/blob/master/LICENSE) -- github.com/docker/libnetwork [Apache License 2.0](https://github.com/docker/libnetwork/blob/master/LICENSE) +- github.com/doclambda/protobufquery [MIT License](https://github.com/doclambda/protobufquery/blob/master/LICENSE) +- github.com/dynatrace-oss/dynatrace-metric-utils-go [Apache License 2.0](https://github.com/dynatrace-oss/dynatrace-metric-utils-go/blob/master/LICENSE) - github.com/eapache/go-resiliency [MIT License](https://github.com/eapache/go-resiliency/blob/master/LICENSE) - github.com/eapache/go-xerial-snappy [MIT License](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE) - github.com/eapache/queue [MIT License](https://github.com/eapache/queue/blob/master/LICENSE) - github.com/eclipse/paho.mqtt.golang [Eclipse Public License - v 1.0](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE) -- github.com/ericchiang/k8s [Apache License 2.0](https://github.com/ericchiang/k8s/blob/master/LICENSE) +- github.com/emicklei/go-restful [MIT License](https://github.com/emicklei/go-restful/blob/v3/LICENSE) +- github.com/fatih/color [MIT License](https://github.com/fatih/color/blob/master/LICENSE.md) +- github.com/form3tech-oss/jwt-go [MIT License](https://github.com/form3tech-oss/jwt-go/blob/master/LICENSE) - github.com/ghodss/yaml [MIT License](https://github.com/ghodss/yaml/blob/master/LICENSE) -- github.com/glinton/ping [MIT License](https://github.com/glinton/ping/blob/master/LICENSE) +- github.com/go-asn1-ber/asn1-ber [MIT License](https://github.com/go-asn1-ber/asn1-ber/blob/v1.3/LICENSE) +- github.com/go-ldap/ldap [MIT License](https://github.com/go-ldap/ldap/blob/v3.4.1/LICENSE) - github.com/go-logfmt/logfmt [MIT License](https://github.com/go-logfmt/logfmt/blob/master/LICENSE) +- github.com/go-logr/logr [Apache License 2.0](https://github.com/go-logr/logr/blob/master/LICENSE) - github.com/go-ole/go-ole [MIT License](https://github.com/go-ole/go-ole/blob/master/LICENSE) +- github.com/go-openapi/jsonpointer [Apache License 2.0](https://github.com/go-openapi/jsonpointer/blob/master/LICENSE) +- github.com/go-openapi/jsonreference [Apache License 2.0](https://github.com/go-openapi/jsonreference/blob/master/LICENSE) +- github.com/go-openapi/swag [Apache License 2.0](https://github.com/go-openapi/swag/blob/master/LICENSE) +- github.com/go-ping/ping [MIT License](https://github.com/go-ping/ping/blob/master/LICENSE) - github.com/go-redis/redis [BSD 2-Clause "Simplified" License](https://github.com/go-redis/redis/blob/master/LICENSE) - github.com/go-sql-driver/mysql [Mozilla Public License 2.0](https://github.com/go-sql-driver/mysql/blob/master/LICENSE) -- github.com/goburrow/modbus [BSD 3-Clause "New" or "Revised" License](https://github.com/goburrow/modbus/blob/master/LICENSE) -- github.com/goburrow/serial [MIT License](https://github.com/goburrow/serial/LICENSE) +- github.com/go-stack/stack [MIT License](https://github.com/go-stack/stack/blob/master/LICENSE.md) - github.com/gobwas/glob [MIT License](https://github.com/gobwas/glob/blob/master/LICENSE) - github.com/gofrs/uuid [MIT License](https://github.com/gofrs/uuid/blob/master/LICENSE) - github.com/gogo/protobuf [BSD 3-Clause Clear License](https://github.com/gogo/protobuf/blob/master/LICENSE) +- github.com/golang-jwt/jwt [MIT License](https://github.com/golang-jwt/jwt/blob/main/LICENSE) +- github.com/golang-sql/civil [Apache License 2.0](https://github.com/golang-sql/civil/blob/master/LICENSE) +- github.com/golang-sql/sqlexp [BSD 3-Clause "New" or "Revised" License](https://github.com/golang-sql/sqlexp/blob/master/LICENSE) - github.com/golang/geo [Apache License 2.0](https://github.com/golang/geo/blob/master/LICENSE) - github.com/golang/groupcache [Apache License 2.0](https://github.com/golang/groupcache/blob/master/LICENSE) - github.com/golang/protobuf [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/protobuf/blob/master/LICENSE) - github.com/golang/snappy [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/snappy/blob/master/LICENSE) +- github.com/google/flatbuffers [Apache License 2.0](https://github.com/google/flatbuffers/blob/master/LICENSE.txt) +- github.com/google/gnostic [Apache License 2.0](https://github.com/google/gnostic/blob/master/LICENSE) +- github.com/google/gnxi [Apache License 2.0](https://github.com/google/gnxi/blob/master/LICENSE) - github.com/google/go-cmp [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-cmp/blob/master/LICENSE) - github.com/google/go-github [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-github/blob/master/LICENSE) - github.com/google/go-querystring [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-querystring/blob/master/LICENSE) +- github.com/google/gofuzz [Apache License 2.0](https://github.com/google/gofuzz/blob/master/LICENSE) +- github.com/google/uuid [BSD 3-Clause "New" or "Revised" License](https://github.com/google/uuid/blob/master/LICENSE) +- github.com/googleapis/enterprise-certificate-proxy [Apache License 2.0](https://github.com/googleapis/enterprise-certificate-proxy/blob/main/LICENSE) - github.com/googleapis/gax-go [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/gax-go/blob/master/LICENSE) - github.com/gopcua/opcua [MIT License](https://github.com/gopcua/opcua/blob/master/LICENSE) +- github.com/gophercloud/gophercloud [Apache License 2.0](https://github.com/gophercloud/gophercloud/blob/master/LICENSE) - github.com/gorilla/mux [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/mux/blob/master/LICENSE) +- github.com/gorilla/websocket [BSD 2-Clause "Simplified" License](https://github.com/gorilla/websocket/blob/master/LICENSE) +- github.com/gosnmp/gosnmp [BSD 2-Clause "Simplified" License](https://github.com/gosnmp/gosnmp/blob/master/LICENSE) +- github.com/grid-x/modbus [BSD 3-Clause "New" or "Revised" License](https://github.com/grid-x/modbus/blob/master/LICENSE) +- github.com/grid-x/serial [MIT License](https://github.com/grid-x/serial/blob/master/LICENSE) +- github.com/gwos/tcg/sdk [MIT License](https://github.com/gwos/tcg/blob/master/LICENSE) - github.com/hailocab/go-hostpool [MIT License](https://github.com/hailocab/go-hostpool/blob/master/LICENSE) - github.com/harlow/kinesis-consumer [MIT License](https://github.com/harlow/kinesis-consumer/blob/master/MIT-LICENSE) -- github.com/hashicorp/consul [Mozilla Public License 2.0](https://github.com/hashicorp/consul/blob/master/LICENSE) +- github.com/hashicorp/consul/api [Mozilla Public License 2.0](https://github.com/hashicorp/consul/blob/master/LICENSE) +- github.com/hashicorp/errwrap [Mozilla Public License 2.0](https://github.com/hashicorp/errwrap/blob/master/LICENSE) - github.com/hashicorp/go-cleanhttp [Mozilla Public License 2.0](https://github.com/hashicorp/go-cleanhttp/blob/master/LICENSE) +- github.com/hashicorp/go-hclog [Mozilla Public License 2.0](https://github.com/hashicorp/go-hclog/LICENSE) +- github.com/hashicorp/go-immutable-radix [Mozilla Public License 2.0](https://github.com/hashicorp/go-immutable-radix/LICENSE) +- github.com/hashicorp/go-multierror [Mozilla Public License 2.0](https://github.com/hashicorp/go-multierror/blob/master/LICENSE) - github.com/hashicorp/go-rootcerts [Mozilla Public License 2.0](https://github.com/hashicorp/go-rootcerts/blob/master/LICENSE) -- github.com/hashicorp/go-uuid [Mozilla Public License 2.0](https://github.com/hashicorp/go-uuid/LICENSE) +- github.com/hashicorp/go-uuid [Mozilla Public License 2.0](https://github.com/hashicorp/go-uuid/blob/master/LICENSE) +- github.com/hashicorp/golang-lru [Mozilla Public License 2.0](https://github.com/hashicorp/golang-lru/blob/master/LICENSE) - github.com/hashicorp/serf [Mozilla Public License 2.0](https://github.com/hashicorp/serf/blob/master/LICENSE) - github.com/influxdata/go-syslog [MIT License](https://github.com/influxdata/go-syslog/blob/develop/LICENSE) +- github.com/influxdata/influxdb-observability/common [MIT License](https://github.com/influxdata/influxdb-observability/blob/main/LICENSE) +- github.com/influxdata/influxdb-observability/influx2otel [MIT License](https://github.com/influxdata/influxdb-observability/blob/main/LICENSE) +- github.com/influxdata/influxdb-observability/otel2influx [MIT License](https://github.com/influxdata/influxdb-observability/blob/main/LICENSE) +- github.com/influxdata/line-protocol [MIT License](https://github.com/influxdata/line-protocol/blob/v2/LICENSE) - github.com/influxdata/tail [MIT License](https://github.com/influxdata/tail/blob/master/LICENSE.txt) - github.com/influxdata/toml [MIT License](https://github.com/influxdata/toml/blob/master/LICENSE) - github.com/influxdata/wlog [MIT License](https://github.com/influxdata/wlog/blob/master/LICENSE) +- github.com/intel/iaevents [Apache License 2.0](https://github.com/intel/iaevents/blob/main/LICENSE) +- github.com/jackc/chunkreader [MIT License](https://github.com/jackc/chunkreader/blob/master/LICENSE) +- github.com/jackc/pgconn [MIT License](https://github.com/jackc/pgconn/blob/master/LICENSE) +- github.com/jackc/pgio [MIT License](https://github.com/jackc/pgio/blob/master/LICENSE) +- github.com/jackc/pgpassfile [MIT License](https://github.com/jackc/pgpassfile/blob/master/LICENSE) +- github.com/jackc/pgproto3 [MIT License](https://github.com/jackc/pgproto3/blob/master/LICENSE) +- github.com/jackc/pgservicefile [MIT License](https://github.com/jackc/pgservicefile/blob/master/LICENSE) +- github.com/jackc/pgtype [MIT License](https://github.com/jackc/pgtype/blob/master/LICENSE) - github.com/jackc/pgx [MIT License](https://github.com/jackc/pgx/blob/master/LICENSE) +- github.com/jaegertracing/jaeger [Apache License 2.0](https://github.com/jaegertracing/jaeger/blob/master/LICENSE) +- github.com/james4k/rcon [MIT License](https://github.com/james4k/rcon/blob/master/LICENSE) +- github.com/jcmturner/aescts [Apache License 2.0](https://github.com/jcmturner/aescts/blob/master/LICENSE) +- github.com/jcmturner/dnsutils [Apache License 2.0](https://github.com/jcmturner/dnsutils/blob/master/LICENSE) - github.com/jcmturner/gofork [BSD 3-Clause "New" or "Revised" License](https://github.com/jcmturner/gofork/blob/master/LICENSE) +- github.com/jcmturner/gokrb5 [Apache License 2.0](https://github.com/jcmturner/gokrb5/blob/master/LICENSE) +- github.com/jcmturner/rpc [Apache License 2.0](https://github.com/jcmturner/rpc/blob/master/LICENSE) +- github.com/jhump/protoreflect [Apache License 2.0](https://github.com/jhump/protoreflect/blob/master/LICENSE) - github.com/jmespath/go-jmespath [Apache License 2.0](https://github.com/jmespath/go-jmespath/blob/master/LICENSE) +- github.com/josharian/intern [MIT License](https://github.com/josharian/intern/blob/master/license.md) +- github.com/josharian/native [MIT License](https://github.com/josharian/native/blob/main/license) - github.com/jpillora/backoff [MIT License](https://github.com/jpillora/backoff/blob/master/LICENSE) +- github.com/json-iterator/go [MIT License](https://github.com/json-iterator/go/blob/master/LICENSE) - github.com/kardianos/service [zlib License](https://github.com/kardianos/service/blob/master/LICENSE) - github.com/karrick/godirwalk [BSD 2-Clause "Simplified" License](https://github.com/karrick/godirwalk/blob/master/LICENSE) - github.com/kballard/go-shellquote [MIT License](https://github.com/kballard/go-shellquote/blob/master/LICENSE) - github.com/klauspost/compress [BSD 3-Clause Clear License](https://github.com/klauspost/compress/blob/master/LICENSE) -- github.com/konsorten/go-windows-terminal-sequences [MIT License](https://github.com/konsorten/go-windows-terminal-sequences/blob/master/LICENSE) -- github.com/kubernetes/apimachinery [Apache License 2.0](https://github.com/kubernetes/apimachinery/blob/master/LICENSE) +- github.com/kylelemons/godebug [Apache License 2.0](https://github.com/kylelemons/godebug/blob/master/LICENSE) - github.com/leodido/ragel-machinery [MIT License](https://github.com/leodido/ragel-machinery/blob/develop/LICENSE) +- github.com/magiconair/properties [BSD 2-Clause "Simplified" License](https://github.com/magiconair/properties/blob/main/LICENSE.md) - github.com/mailru/easyjson [MIT License](https://github.com/mailru/easyjson/blob/master/LICENSE) +- github.com/mattn/go-colorable [MIT License](https://github.com/mattn/go-colorable/blob/master/LICENSE) +- github.com/mattn/go-ieproxy [MIT License](https://github.com/mattn/go-ieproxy/blob/master/LICENSE) - github.com/mattn/go-isatty [MIT License](https://github.com/mattn/go-isatty/blob/master/LICENSE) - github.com/matttproud/golang_protobuf_extensions [Apache License 2.0](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) - github.com/mdlayher/apcupsd [MIT License](https://github.com/mdlayher/apcupsd/blob/master/LICENSE.md) - github.com/mdlayher/genetlink [MIT License](https://github.com/mdlayher/genetlink/blob/master/LICENSE.md) - github.com/mdlayher/netlink [MIT License](https://github.com/mdlayher/netlink/blob/master/LICENSE.md) +- github.com/mdlayher/socket [MIT License](https://github.com/mdlayher/socket/blob/master/LICENSE.md) +- github.com/microsoft/ApplicationInsights-Go [MIT License](https://github.com/microsoft/ApplicationInsights-Go/blob/master/LICENSE) - github.com/miekg/dns [BSD 3-Clause Clear License](https://github.com/miekg/dns/blob/master/LICENSE) +- github.com/minio/highwayhash [Apache License 2.0](https://github.com/minio/highwayhash/blob/master/LICENSE) - github.com/mitchellh/go-homedir [MIT License](https://github.com/mitchellh/go-homedir/blob/master/LICENSE) - github.com/mitchellh/mapstructure [MIT License](https://github.com/mitchellh/mapstructure/blob/master/LICENSE) +- github.com/moby/ipvs [Apache License 2.0](https://github.com/moby/ipvs/blob/master/LICENSE) +- github.com/moby/sys/mount [Apache License 2.0](https://github.com/moby/sys/blob/main/LICENSE) +- github.com/moby/sys/mountinfo [Apache License 2.0](https://github.com/moby/sys/blob/main/LICENSE) +- github.com/moby/term [Apache License 2.0](https://github.com/moby/term/blob/master/LICENSE) +- github.com/modern-go/concurrent [Apache License 2.0](https://github.com/modern-go/concurrent/blob/master/LICENSE) +- github.com/modern-go/reflect2 [Apache License 2.0](https://github.com/modern-go/reflect2/blob/master/LICENSE) +- github.com/morikuni/aec [MIT License](https://github.com/morikuni/aec/blob/master/LICENSE) - github.com/multiplay/go-ts3 [BSD 2-Clause "Simplified" License](https://github.com/multiplay/go-ts3/blob/master/LICENSE) +- github.com/munnerz/goautoneg [BSD 3-Clause Clear License](https://github.com/munnerz/goautoneg/blob/master/LICENSE) - github.com/naoina/go-stringutil [MIT License](https://github.com/naoina/go-stringutil/blob/master/LICENSE) - github.com/nats-io/jwt [Apache License 2.0](https://github.com/nats-io/jwt/blob/master/LICENSE) - github.com/nats-io/nats-server [Apache License 2.0](https://github.com/nats-io/nats-server/blob/master/LICENSE) @@ -111,29 +221,52 @@ following works: - github.com/nats-io/nuid [Apache License 2.0](https://github.com/nats-io/nuid/blob/master/LICENSE) - github.com/newrelic/newrelic-telemetry-sdk-go [Apache License 2.0](https://github.com/newrelic/newrelic-telemetry-sdk-go/blob/master/LICENSE.md) - github.com/nsqio/go-nsq [MIT License](https://github.com/nsqio/go-nsq/blob/master/LICENSE) +- github.com/olivere/elastic [MIT License](https://github.com/olivere/elastic/blob/release-branch.v7/LICENSE) - github.com/openconfig/gnmi [Apache License 2.0](https://github.com/openconfig/gnmi/blob/master/LICENSE) - github.com/opencontainers/go-digest [Apache License 2.0](https://github.com/opencontainers/go-digest/blob/master/LICENSE) - github.com/opencontainers/image-spec [Apache License 2.0](https://github.com/opencontainers/image-spec/blob/master/LICENSE) -- github.com/openzipkin/zipkin-go-opentracing [MIT License](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE) +- github.com/opencontainers/runc [Apache License 2.0](https://github.com/opencontainers/runc/blob/main/LICENSE) +- github.com/opentracing/opentracing-go [Apache License 2.0](https://github.com/opentracing/opentracing-go/blob/master/LICENSE) +- github.com/pborman/ansi [BSD 3-Clause "New" or "Revised" License](https://github.com/pborman/ansi/blob/master/LICENSE) +- github.com/philhofer/fwd [MIT License](https://github.com/philhofer/fwd/blob/master/LICENSE.md) - github.com/pierrec/lz4 [BSD 3-Clause "New" or "Revised" License](https://github.com/pierrec/lz4/blob/master/LICENSE) +- github.com/pion/dtls [MIT License](https://github.com/pion/dtls/blob/master/LICENSE) +- github.com/pion/logging [MIT License](https://github.com/pion/logging/blob/master/LICENSE) +- github.com/pion/transport [MIT License](https://github.com/pion/transport/blob/master/LICENSE) +- github.com/pion/udp [MIT License](https://github.com/pion/udp/blob/master/LICENSE) +- github.com/pkg/browser [BSD 2-Clause "Simplified" License](https://github.com/pkg/browser/blob/master/LICENSE) - github.com/pkg/errors [BSD 2-Clause "Simplified" License](https://github.com/pkg/errors/blob/master/LICENSE) - github.com/pmezard/go-difflib [BSD 3-Clause Clear License](https://github.com/pmezard/go-difflib/blob/master/LICENSE) - github.com/prometheus/client_golang [Apache License 2.0](https://github.com/prometheus/client_golang/blob/master/LICENSE) - github.com/prometheus/client_model [Apache License 2.0](https://github.com/prometheus/client_model/blob/master/LICENSE) - github.com/prometheus/common [Apache License 2.0](https://github.com/prometheus/common/blob/master/LICENSE) - github.com/prometheus/procfs [Apache License 2.0](https://github.com/prometheus/procfs/blob/master/LICENSE) +- github.com/prometheus/prometheus [Apache License 2.0](https://github.com/prometheus/prometheus/blob/master/LICENSE) +- github.com/rabbitmq/amqp091-go [BSD 2-Clause "Simplified" License](https://github.com/rabbitmq/amqp091-go/blob/main/LICENSE) - github.com/rcrowley/go-metrics [MIT License](https://github.com/rcrowley/go-metrics/blob/master/LICENSE) +- github.com/remyoudompheng/bigfft [BSD 3-Clause "New" or "Revised" License](https://github.com/remyoudompheng/bigfft/blob/master/LICENSE) +- github.com/riemann/riemann-go-client [MIT License](https://github.com/riemann/riemann-go-client/blob/master/LICENSE) - github.com/safchain/ethtool [Apache License 2.0](https://github.com/safchain/ethtool/blob/master/LICENSE) - github.com/samuel/go-zookeeper [BSD 3-Clause Clear License](https://github.com/samuel/go-zookeeper/blob/master/LICENSE) - github.com/shirou/gopsutil [BSD 3-Clause Clear License](https://github.com/shirou/gopsutil/blob/master/LICENSE) +- github.com/showwin/speedtest-go [MIT License](https://github.com/showwin/speedtest-go/blob/master/LICENSE) +- github.com/signalfx/com_signalfx_metrics_protobuf [Apache License 2.0](https://github.com/signalfx/com_signalfx_metrics_protobuf/blob/master/LICENSE) +- github.com/signalfx/gohistogram [MIT License](https://github.com/signalfx/gohistogram/blob/master/LICENSE) +- github.com/signalfx/golib [Apache License 2.0](https://github.com/signalfx/golib/blob/master/LICENSE) +- github.com/signalfx/sapm-proto [Apache License 2.0](https://github.com/signalfx/sapm-proto/blob/master/LICENSE) - github.com/sirupsen/logrus [MIT License](https://github.com/sirupsen/logrus/blob/master/LICENSE) -- github.com/soniah/gosnmp [BSD 2-Clause "Simplified" License](https://github.com/soniah/gosnmp/blob/master/LICENSE) -- github.com/streadway/amqp [BSD 2-Clause "Simplified" License](https://github.com/streadway/amqp/blob/master/LICENSE) +- github.com/sleepinggenius2/gosmi [MIT License](https://github.com/sleepinggenius2/gosmi/blob/master/LICENSE) +- github.com/snowflakedb/gosnowflake [Apache License 2.0](https://github.com/snowflakedb/gosnowflake/blob/master/LICENSE) - github.com/stretchr/objx [MIT License](https://github.com/stretchr/objx/blob/master/LICENSE) -- github.com/stretchr/testify [custom -- permissive](https://github.com/stretchr/testify/blob/master/LICENSE) +- github.com/stretchr/testify [MIT License](https://github.com/stretchr/testify/blob/master/LICENSE) +- github.com/testcontainers/testcontainers-go [MIT License](https://github.com/testcontainers/testcontainers-go/blob/main/LICENSE) - github.com/tidwall/gjson [MIT License](https://github.com/tidwall/gjson/blob/master/LICENSE) - github.com/tidwall/match [MIT License](https://github.com/tidwall/match/blob/master/LICENSE) - github.com/tidwall/pretty [MIT License](https://github.com/tidwall/pretty/blob/master/LICENSE) +- github.com/tinylib/msgp [MIT License](https://github.com/tinylib/msgp/blob/master/LICENSE) +- github.com/tklauser/go-sysconf [BSD 3-Clause "New" or "Revised" License](https://github.com/tklauser/go-sysconf/blob/master/LICENSE) +- github.com/tklauser/numcpus [Apache License 2.0](https://github.com/tklauser/numcpus/blob/master/LICENSE) +- github.com/vapourismo/knx-go [MIT License](https://github.com/vapourismo/knx-go/blob/master/LICENSE) - github.com/vishvananda/netlink [Apache License 2.0](https://github.com/vishvananda/netlink/blob/master/LICENSE) - github.com/vishvananda/netns [Apache License 2.0](https://github.com/vishvananda/netns/blob/master/LICENSE) - github.com/vjeantet/grok [Apache License 2.0](https://github.com/vjeantet/grok/blob/master/LICENSE) @@ -141,42 +274,61 @@ following works: - github.com/wavefronthq/wavefront-sdk-go [Apache License 2.0](https://github.com/wavefrontHQ/wavefront-sdk-go/blob/master/LICENSE) - github.com/wvanbergen/kafka [MIT License](https://github.com/wvanbergen/kafka/blob/master/LICENSE) - github.com/wvanbergen/kazoo-go [MIT License](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE) +- github.com/xdg-go/pbkdf2 [Apache License 2.0](https://github.com/xdg-go/pbkdf2/blob/main/LICENSE) +- github.com/xdg-go/scram [Apache License 2.0](https://github.com/xdg-go/scram/blob/master/LICENSE) +- github.com/xdg-go/stringprep [Apache License 2.0](https://github.com/xdg-go/stringprep/blob/master/LICENSE) - github.com/xdg/scram [Apache License 2.0](https://github.com/xdg-go/scram/blob/master/LICENSE) - github.com/xdg/stringprep [Apache License 2.0](https://github.com/xdg-go/stringprep/blob/master/LICENSE) +- github.com/youmark/pkcs8 [MIT License](https://github.com/youmark/pkcs8/blob/master/LICENSE) - github.com/yuin/gopher-lua [MIT License](https://github.com/yuin/gopher-lua/blob/master/LICENSE) +- github.com/yusufpapurcu/wmi [MIT License](https://github.com/yusufpapurcu/wmi/blob/master/LICENSE) +- go.mongodb.org/mongo-driver [Apache License 2.0](https://github.com/mongodb/mongo-go-driver/blob/master/LICENSE) - go.opencensus.io [Apache License 2.0](https://github.com/census-instrumentation/opencensus-go/blob/master/LICENSE) +- go.opentelemetry.io/collector/pdata [Apache License 2.0](https://github.com/open-telemetry/opentelemetry-collector/blob/main/LICENSE)ICENSE) - go.starlark.net [BSD 3-Clause "New" or "Revised" License](https://github.com/google/starlark-go/blob/master/LICENSE) +- go.uber.org/atomic [MIT License](https://pkg.go.dev/go.uber.org/atomic?tab=licenses) +- go.uber.org/multierr [MIT License](https://pkg.go.dev/go.uber.org/multierr?tab=licenses) - golang.org/x/crypto [BSD 3-Clause Clear License](https://github.com/golang/crypto/blob/master/LICENSE) +- golang.org/x/exp [BSD 3-Clause Clear License](https://github.com/golang/exp/blob/master/LICENSE) - golang.org/x/net [BSD 3-Clause Clear License](https://github.com/golang/net/blob/master/LICENSE) - golang.org/x/oauth2 [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/oauth2/blob/master/LICENSE) - golang.org/x/sync [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/sync/blob/master/LICENSE) - golang.org/x/sys [BSD 3-Clause Clear License](https://github.com/golang/sys/blob/master/LICENSE) +- golang.org/x/term [BSD 3-Clause License](https://pkg.go.dev/golang.org/x/term?tab=licenses) - golang.org/x/text [BSD 3-Clause Clear License](https://github.com/golang/text/blob/master/LICENSE) - golang.org/x/time [BSD 3-Clause Clear License](https://github.com/golang/time/blob/master/LICENSE) -- golang.org/x/xerrors [BSD 3-Clause Clear License](https://github.com/golang/time/blob/master/LICENSE) +- golang.org/x/xerrors [BSD 3-Clause Clear License](https://github.com/golang/xerrors/blob/master/LICENSE) - golang.zx2c4.com/wireguard [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md) - golang.zx2c4.com/wireguard/wgctrl [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md) +- gonum.org/v1/gonum [BSD 3-Clause "New" or "Revised" License](https://github.com/gonum/gonum/blob/master/LICENSE) - google.golang.org/api [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/google-api-go-client/blob/master/LICENSE) - google.golang.org/genproto [Apache License 2.0](https://github.com/google/go-genproto/blob/master/LICENSE) - google.golang.org/grpc [Apache License 2.0](https://github.com/grpc/grpc-go/blob/master/LICENSE) -- gopkg.in/asn1-ber.v1 [MIT License](https://github.com/go-asn1-ber/asn1-ber/blob/v1.3/LICENSE) +- google.golang.org/protobuf [BSD 3-Clause "New" or "Revised" License](https://pkg.go.dev/google.golang.org/protobuf?tab=licenses) - gopkg.in/fatih/pool.v2 [MIT License](https://github.com/fatih/pool/blob/v2.0.0/LICENSE) - gopkg.in/fsnotify.v1 [BSD 3-Clause "New" or "Revised" License](https://github.com/fsnotify/fsnotify/blob/v1.4.7/LICENSE) - gopkg.in/gorethink/gorethink.v3 [Apache License 2.0](https://github.com/rethinkdb/rethinkdb-go/blob/v3.0.5/LICENSE) - gopkg.in/inf.v0 [BSD 3-Clause "New" or "Revised" License](https://github.com/go-inf/inf/blob/v0.9.1/LICENSE) -- gopkg.in/jcmturner/aescts.v1 [Apache License 2.0](https://github.com/jcmturner/aescts/blob/v1.0.1/LICENSE) -- gopkg.in/jcmturner/dnsutils.v1 [Apache License 2.0](https://github.com/jcmturner/dnsutils/blob/v1.0.1/LICENSE) -- gopkg.in/jcmturner/gokrb5.v7 [Apache License 2.0](https://github.com/jcmturner/gokrb5/tree/v7.5.0/LICENSE) -- gopkg.in/jcmturner/rpc.v1 [Apache License 2.0](https://github.com/jcmturner/rpc/blob/v1.1.0/LICENSE) -- gopkg.in/ldap.v3 [MIT License](https://github.com/go-ldap/ldap/blob/v3.1.7/LICENSE) -- gopkg.in/mgo.v2 [BSD 2-Clause "Simplified" License](https://github.com/go-mgo/mgo/blob/v2/LICENSE) +- gopkg.in/ini.v1 [Apache License 2.0](https://github.com/go-ini/ini/blob/master/LICENSE) - gopkg.in/olivere/elastic.v5 [MIT License](https://github.com/olivere/elastic/blob/v5.0.76/LICENSE) - gopkg.in/tomb.v1 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v1/LICENSE) +- gopkg.in/tomb.v2 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v2/LICENSE) - gopkg.in/yaml.v2 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v2.2.2/LICENSE) - gopkg.in/yaml.v3 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v3/LICENSE) +- k8s.io/api [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) +- k8s.io/apimachinery [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) +- k8s.io/client-go [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) +- k8s.io/klog [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) +- k8s.io/kube-openapi [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) +- k8s.io/utils [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) - modernc.org/libc [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/libc/-/blob/master/LICENSE) +- modernc.org/mathutil [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/mathutil/-/blob/master/LICENSE) - modernc.org/memory [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/memory/-/blob/master/LICENSE) - modernc.org/sqlite [BSD 3-Clause "New" or "Revised" License](https://gitlab.com/cznic/sqlite/-/blob/master/LICENSE) +- sigs.k8s.io/json [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) +- sigs.k8s.io/structured-merge-diff [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) +- sigs.k8s.io/yaml [Apache License 2.0](https://github.com/kubernetes/client-go/blob/master/LICENSE) + +## Telegraf used and modified code from these projects -## telegraf used and modified code from these projects - github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE) diff --git a/docs/NIGHTLIES.md b/docs/NIGHTLIES.md new file mode 100644 index 0000000000000..ee264a0846eb8 --- /dev/null +++ b/docs/NIGHTLIES.md @@ -0,0 +1,32 @@ + +# Nightly Builds + +These builds are generated from the master branch each night: + +| DEB | RPM | TAR GZ | ZIP | +| --------------- | --------------- | ------------------------------| --- | +| [amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb) | [aarch64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.aarch64.rpm) | [darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz) | [windows_amd64.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_amd64.zip) | +| [arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb) | [armel.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armel.rpm) | [darwin_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_arm64.tar.gz) | [windows_i386.zip](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_windows_i386.zip) | +| [armel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armel.deb) | [armv6hl.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.armv6hl.rpm) | [freebsd_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_amd64.tar.gz) | | +| [armhf.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_armhf.deb) | [i386.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.i386.rpm) | [freebsd_armv7.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_armv7.tar.gz) | | +| [i386.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_i386.deb) | [ppc64le.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.ppc64le.rpm) | [freebsd_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_freebsd_i386.tar.gz) | | +| [mips.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_mips.deb) | [riscv64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.riscv64.rpm) | [linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_amd64.tar.gz) | | +| [mipsel.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_mipsel.deb) | [s390x.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.s390x.rpm) | [linux_arm64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_arm64.tar.gz) | | +| [ppc64el.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_ppc64el.deb) | [x86_64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.x86_64.rpm) | [linux_armel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armel.tar.gz) | | +| [riscv64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_riscv64.deb) | | [linux_armhf.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_armhf.tar.gz) | | +| [s390x.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_s390x.deb) | | [linux_i386.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_i386.tar.gz) | | +| | | [linux_mips.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_mips.tar.gz) | | +| | | [linux_mipsel.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_mipsel.tar.gz) | | +| | | [linux_ppc64le.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_ppc64le.tar.gz) | | +| | | [linux_riscv64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_riscv64.tar.gz) | | +| | | [linux_s390x.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_linux_s390x.tar.gz) | | +| | | [static_linux_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_static_linux_amd64.tar.gz) | | + +Nightly docker images are available on [quay.io](https://quay.io/repository/influxdb/telegraf-nightly?tab=tags): + +```shell +# Debian-based image +docker pull quay.io/influxdb/telegraf-nightly:latest +# Alpine-based image +docker pull quay.io/influxdb/telegraf-nightly:alpine +``` diff --git a/docs/OUTPUTS.md b/docs/OUTPUTS.md index 1a27ca515f118..b1f127ba57e6f 100644 --- a/docs/OUTPUTS.md +++ b/docs/OUTPUTS.md @@ -1,52 +1,54 @@ -### Output Plugins +# Output Plugins This section is for developers who want to create a new output sink. Outputs are created in a similar manner as collection plugins, and their interface has similar constructs. -### Output Plugin Guidelines +## Output Plugin Guidelines - An output must conform to the [telegraf.Output][] interface. - Outputs should call `outputs.Add` in their `init` function to register themselves. See below for a quick example. - To be available within Telegraf itself, plugins must add themselves to the `github.com/influxdata/telegraf/plugins/outputs/all/all.go` file. -- The `SampleConfig` function should return valid toml that describes how the - plugin can be configured. This is included in `telegraf config`. Please - consult the [SampleConfig][] page for the latest style guidelines. -- The `Description` function should say in one line what this output does. -- Follow the recommended [CodeStyle][]. +- Each plugin requires a file called `sample.conf` containing the sample + configuration for the plugin in TOML format. + Please consult the [Sample Config][] page for the latest style guidelines. +- Each plugin `README.md` file should include the `sample.conf` file in a section + describing the configuration by specifying a `toml` section in the form `toml @sample.conf`. The specified file(s) are then injected automatically into the Readme. +- Follow the recommended [Code Style][]. -### Output Plugin Example +## Output Plugin Example ```go +//go:generate ../../../tools/readme_config_includer/generator package simpleoutput // simpleoutput.go import ( + _ "embed" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Simple struct { Ok bool `toml:"ok"` Log telegraf.Logger `toml:"-"` } -func (s *Simple) Description() string { - return "a demo output" -} - -func (s *Simple) SampleConfig() string { - return ` - ok = true -` +func (*Simple) SampleConfig() string { + return sampleConfig } // Init is for setup, and validating config. func (s *Simple) Init() error { - return nil + return nil } func (s *Simple) Connect() error { @@ -103,6 +105,7 @@ You should also add the following to your `SampleConfig()`: ## Flushing Metrics to Outputs Metrics are flushed to outputs when any of the following events happen: + - `flush_interval + rand(flush_jitter)` has elapsed since start or the last flush interval - At least `metric_batch_size` count of metrics are waiting in the buffer - The telegraf process has received a SIGUSR1 signal @@ -115,6 +118,6 @@ or investigate other reasons why the writes might be taking longer than expected [file]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/file [output data formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md -[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig -[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle +[Sample Config]: https://github.com/influxdata/telegraf/blob/master/docs/developers/SAMPLE_CONFIG.md +[Code Style]: https://github.com/influxdata/telegraf/blob/master/docs/developers/CODE_STYLE.md [telegraf.Output]: https://godoc.org/github.com/influxdata/telegraf#Output diff --git a/docs/PROCESSORS.md b/docs/PROCESSORS.md index 25566fe323fd2..aa92396bd926a 100644 --- a/docs/PROCESSORS.md +++ b/docs/PROCESSORS.md @@ -1,72 +1,69 @@ -### Processor Plugins +# Processor Plugins This section is for developers who want to create a new processor plugin. -### Processor Plugin Guidelines +## Processor Plugin Guidelines * A processor must conform to the [telegraf.Processor][] interface. * Processors should call `processors.Add` in their `init` function to register themselves. See below for a quick example. * To be available within Telegraf itself, plugins must add themselves to the `github.com/influxdata/telegraf/plugins/processors/all/all.go` file. -* The `SampleConfig` function should return valid toml that describes how the - processor can be configured. This is include in the output of `telegraf - config`. -- The `SampleConfig` function should return valid toml that describes how the - plugin can be configured. This is included in `telegraf config`. Please - consult the [SampleConfig][] page for the latest style guidelines. -* The `Description` function should say in one line what this processor does. -- Follow the recommended [CodeStyle][]. +* Each plugin requires a file called `sample.conf` containing the sample + configuration for the plugin in TOML format. + Please consult the [Sample Config][] page for the latest style guidelines. +* Each plugin `README.md` file should include the `sample.conf` file in a section + describing the configuration by specifying a `toml` section in the form `toml @sample.conf`. The specified file(s) are then injected automatically into the Readme. +* Follow the recommended [Code Style][]. -### Processor Plugin Example +## Processor Plugin Example ```go +//go:generate ../../../tools/readme_config_includer/generator package printer // printer.go import ( - "fmt" + _ "embed" + "fmt" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/processors" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" ) -type Printer struct { - Log telegraf.Logger `toml:"-"` -} +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string -var sampleConfig = ` -` - -func (p *Printer) SampleConfig() string { - return sampleConfig +type Printer struct { + Log telegraf.Logger `toml:"-"` } -func (p *Printer) Description() string { - return "Print all metrics that pass through this filter." +func (*Printer) SampleConfig() string { + return sampleConfig } // Init is for setup, and validating config. func (p *Printer) Init() error { - return nil + return nil } func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric { - for _, metric := range in { - fmt.Println(metric.String()) - } - return in + for _, metric := range in { + fmt.Println(metric.String()) + } + return in } func init() { - processors.Add("printer", func() telegraf.Processor { - return &Printer{} - }) + processors.Add("printer", func() telegraf.Processor { + return &Printer{} + }) } ``` -### Streaming Processors +## Streaming Processors Streaming processors are a new processor type available to you. They are particularly useful to implement processor types that use background processes @@ -84,38 +81,37 @@ Some differences from classic Processors: * Processors should call `processors.AddStreaming` in their `init` function to register themselves. See below for a quick example. -### Streaming Processor Example +## Streaming Processor Example ```go +//go:generate ../../../tools/readme_config_includer/generator package printer // printer.go import ( - "fmt" + _ "embed" + "fmt" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/processors" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" ) -type Printer struct { - Log telegraf.Logger `toml:"-"` -} +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string -var sampleConfig = ` -` - -func (p *Printer) SampleConfig() string { - return sampleConfig +type Printer struct { + Log telegraf.Logger `toml:"-"` } -func (p *Printer) Description() string { - return "Print all metrics that pass through this filter." +func (*Printer) SampleConfig() string { + return sampleConfig } // Init is for setup, and validating config. func (p *Printer) Init() error { - return nil + return nil } // Start is called once when the plugin starts; it is only called once per @@ -135,13 +131,13 @@ func (p *Printer) Start(acc telegraf.Accumulator) error { // Metrics you don't want to pass downstream should have metric.Drop() called, // rather than simply omitting the acc.AddMetric() call func (p *Printer) Add(metric telegraf.Metric, acc telegraf.Accumulator) error { - // print! - fmt.Println(metric.String()) - // pass the metric downstream, or metric.Drop() it. - // Metric will be dropped if this function returns an error. - acc.AddMetric(metric) + // print! + fmt.Println(metric.String()) + // pass the metric downstream, or metric.Drop() it. + // Metric will be dropped if this function returns an error. + acc.AddMetric(metric) - return nil + return nil } // Stop gives you an opportunity to gracefully shut down the processor. @@ -154,13 +150,13 @@ func (p *Printer) Stop() error { } func init() { - processors.AddStreaming("printer", func() telegraf.StreamingProcessor { - return &Printer{} - }) + processors.AddStreaming("printer", func() telegraf.StreamingProcessor { + return &Printer{} + }) } ``` -[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig -[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle +[Sample Config]: https://github.com/influxdata/telegraf/blob/master/docs/developers/SAMPLE_CONFIG.md +[Code Style]: https://github.com/influxdata/telegraf/blob/master/docs/developers/CODE_STYLE.md [telegraf.Processor]: https://godoc.org/github.com/influxdata/telegraf#Processor [telegraf.StreamingProcessor]: https://godoc.org/github.com/influxdata/telegraf#StreamingProcessor diff --git a/docs/PROFILING.md b/docs/PROFILING.md index a0851c8f18b12..428158e690576 100644 --- a/docs/PROFILING.md +++ b/docs/PROFILING.md @@ -6,7 +6,7 @@ By default, the profiling is turned off. To enable profiling you need to specify address to config parameter `pprof-addr`, for example: -``` +```shell telegraf --config telegraf.conf --pprof-addr localhost:6060 ``` @@ -21,4 +21,3 @@ or to look at a 30-second CPU profile: `go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30` To view all available profiles, open `http://localhost:6060/debug/pprof/` in your browser. - diff --git a/docs/README.md b/docs/README.md index b7b55336c5a04..431118259ebce 100644 --- a/docs/README.md +++ b/docs/README.md @@ -10,6 +10,8 @@ - [Profiling][profiling] - [Windows Service][winsvc] - [FAQ][faq] +- Developer Builds + - [Nightlies](nightlies) [conf]: /docs/CONFIGURATION.md [metrics]: /docs/METRICS.md @@ -19,3 +21,4 @@ [profiling]: /docs/PROFILING.md [winsvc]: /docs/WINDOWS_SERVICE.md [faq]: /docs/FAQ.md +[nightlies]: /docs/NIGHTLIES.md diff --git a/docs/SQL_DRIVERS_INPUT.md b/docs/SQL_DRIVERS_INPUT.md new file mode 100644 index 0000000000000..f68103e0ff71a --- /dev/null +++ b/docs/SQL_DRIVERS_INPUT.md @@ -0,0 +1,51 @@ +# Available SQL drivers for the SQL input plugin + +This is a list of available drivers for the SQL input plugin. The data-source-name (DSN) is driver specific and +might change between versions. Please check the driver documentation for available options and the format. + +| database | driver | aliases | example DSN | comment | +| -------------------- | --------------------------------------------------------- | --------------- | -------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | +| CockroachDB | [cockroach](https://github.com/jackc/pgx) | postgres or pgx | see _postgres_ driver | uses PostgresQL driver | +| MariaDB | [maria](https://github.com/go-sql-driver/mysql) | mysql | see _mysql_ driver | uses MySQL driver | +| Microsoft SQL Server | [sqlserver](https://github.com/denisenkom/go-mssqldb) | mssql | `username:password@host/instance?param1=value¶m2=value` | uses newer _sqlserver_ driver | +| MySQL | [mysql](https://github.com/go-sql-driver/mysql) | | `[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]` | see [driver docs](https://github.com/go-sql-driver/mysql) for more information | +| PostgreSQL | [postgres](https://github.com/jackc/pgx) | pgx | `[user[:password]@][netloc][:port][,...][/dbname][?param1=value1&...]` | see [postgres docs](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) for more information | +| SQLite | [sqlite](https://gitlab.com/cznic/sqlite) | | `filename` | see [driver docu](https://pkg.go.dev/modernc.org/sqlite) for more information | +| TiDB | [tidb](https://github.com/go-sql-driver/mysql) | mysql | see _mysql_ driver | uses MySQL driver | +| ClickHouse | [clickhouse](https://github.com/ClickHouse/clickhouse-go) | | `tcp://host:port[?param1=value&...¶mN=value]"` | see [clickhouse-go docs](https://github.com/ClickHouse/clickhouse-go#dsn) for more information | + +## Comments + +### Driver aliases + +Some database drivers are supported though another driver (e.g. CockroachDB). For other databases we provide a more +obvious name (e.g. postgres) compared to the driver name. For all of those drivers you might use an _alias_ name +during configuration. + +### Example data-source-name DSN + +The given examples are just that, so please check the driver documentation for the exact format +and available options and parameters. Please note that the format of a DSN might also change +between driver version. + +### Type conversions + +Telegraf relies on type conversion of the database driver and/or the golang sql framework. In case you find +any problem, please open an issue! + +## Help + +If nothing seems to work, you might find help in the telegraf forum or in the chat. + +### The documentation is wrong + +Please open an issue or even better send a pull-request! + +### I found a bug + +Please open an issue or even better send a pull-request! + +### My database is not supported + +We currently cannot support CGO drivers in telegraf! Please check if a **pure Go** driver for the [golang sql framework](https://golang.org/pkg/database/sql/) exists. +If you found such a driver, please let us know by opening an issue or even better by sending a pull-request! diff --git a/docs/SUPPORTED_PLATFORMS.md b/docs/SUPPORTED_PLATFORMS.md new file mode 100644 index 0000000000000..9df5dfa2cbaf9 --- /dev/null +++ b/docs/SUPPORTED_PLATFORMS.md @@ -0,0 +1,199 @@ +# Supported Platforms + +Telegraf is a cross-platform application. This doc helps define which +operating systems, distributions, and releases Telegraf supports. + +Telegraf is supported on Linux, FreeBSD, Windows, and macOS. It is +written in Go which supports these operating systems and +more. Telegraf may work on Go's other operating systems and users are +welcome to build their own binaries for them. Bug reports should be +submitted only for supported platforms. + +Golang.org has a [table][go-table] of valid OS and architecture +combinations and the golang wiki has more specific [minimum +requirements][go-reqs] for Go itself. + +[go-table]: https://golang.org/doc/install/source#environment +[go-reqs]: https://github.com/golang/go/wiki/MinimumRequirements#operating-systems + +## Linux + +Telegraf intent: *Support latest versions of major linux +distributions* + +Telegraf supports RHEL, Fedora, Debian, and Ubuntu. InfluxData +provides package repositories for these distributions. Instructions +for using the package repositories can be found on +[docs.influxdata.com][repo-docs]. Bug reports should be submitted only +for supported distributions and releases. + +Telegraf's Debian or Ubuntu packages are likely to work on other +Debian-based distributions although these are not +supported. Similarly, Telegraf's Fedora and RHEL packages are likely +to work on other Redhat-based distributions although again these are +not supported. + +Telegraf releases include .tar.gz packages for use with other +distributions, for building container images, or for installation +without a package manager. As part of telegraf's release process we +publish [official images][docker-hub] to Docker Hub. + +Distrowatch lists [major distributions][dw-major] and tracks +[popularity][dw-pop] of distributions. Wikipedia lists [linux +distributions][wp-distro] by the major distribution they're based on. + +[repo-docs]: https://docs.influxdata.com/telegraf/latest/introduction/installation/ +[docker-hub]: https://hub.docker.com/_/telegraf +[dw-major]: https://distrowatch.com/dwres.php?resource=major +[dw-pop]: https://distrowatch.com/dwres.php?resource=popularity +[wp-distro]: https://en.wikipedia.org/wiki/List_of_Linux_distributions + +### RHEL + +Red Hat makes a major release every four to five years and supports +each release in production for ten years. Extended support is +available for three or more years. + +Telegraf intent: *Support releases in RHEL production, but not in +extended support.* + +Redhat publishes [release history][rh-history] and wikipedia has a +[summary timeline][wp-rhel]. + +As of April 2021, 7 and 8 are production releases. + +[rh-history]: https://access.redhat.com/articles/3078 +[wp-rhel]: https://en.wikipedia.org/wiki/Red_Hat_Enterprise_Linux#Version_history_and_timeline + +### Ubuntu + +Ubuntu makes two releases a year. Every two years one of the releases +is an LTS (long-term support) release. Interim (non-LTS) releases are +in standard support for nine months. LTS releases are in maintenance +for five years, then in extended security maintenance for up to three +more years. + +Telegraf intent: *Support interim releases and LTS releases in Ubuntu +maintenance, but not in extended security maintenance.* + +Ubuntu publishes [release history][ub-history] and wikipedia has a +[table][wp-ub] of all releases and support status. + +As of April 2021, Ubuntu 20.10 is in standard support. Ubuntu 18.04 +LTS and 20.04 LTS are in maintenance. + +[ub-history]: https://ubuntu.com/about/release-cycle +[wp-ub]: https://en.wikipedia.org/wiki/Ubuntu_version_history#Table_of_versions + +### Debian + +Debian generally makes major releases every two years and provides +security support for each release for three years. After security +support expires the release enters long term support (LTS) until at +least five years after release. + +Telegraf intent: *Support releases under Debian security support* + +Debian publishes [releases and support status][deb-history] and +wikipedia has a [summary table][wp-deb]. + +As of April 2021, Debian 10 is in security support. + +[deb-history]: https://www.debian.org/releases/ +[wp-deb]: https://en.wikipedia.org/wiki/Debian_version_history#Release_table + +### Fedora + +Fedora makes two releases a year and supports each release for a year. + +Telegraf intent: *Support releases supported by Fedora* + +Fedora publishes [release history][fed-history] and wikipedia has a +[summary table][wp-fed]. + +[fed-history]: https://fedoraproject.org/wiki/Releases +[wp-fed]: https://en.wikipedia.org/wiki/Fedora_version_history#Version_history + +## FreeBSD + +FreeBSD makes major releases about every two years. Releases reach end +of life after five years. + +Telegraf intent: *Support releases under FreeBSD security support* + +FreeBSD publishes [release history][freebsd-history] and wikipedia has +a [summary table][wp-freebsd]. + +As of April 2021, releases 11 and 12 are under security support. + +[freebsd-history]: https://www.freebsd.org/security/#sup +[wp-freebsd]: https://en.wikipedia.org/wiki/FreeBSD#Version_history + +## Windows + +Telegraf intent: *Support current versions of Windows and Windows +Server* + +Microsoft has two release channels, the semi-annual channel (SAC) and +the long-term servicing channel (LTSC). The semi-annual channel is for +mainstream feature releases. + +Microsoft publishes [lifecycle policy by release][ms-lifecycle] and a +[product lifecycle faq][ms-lifecycle-faq]. + +[ms-lifecycle]: https://docs.microsoft.com/en-us/lifecycle/products/?terms=windows +[ms-lifecycle-faq]: https://docs.microsoft.com/en-us/lifecycle/faq/windows + +### Windows 10 + +Windows 10 makes SAC releases twice a year and supports those releases +for [18 or 30 months][w10-timeline]. They also make LTSC releases +which are supported for 10 years but are intended only for medical or +industrial devices that require a static feature set. + +Telegraf intent: *Support semi-annual channel releases supported by +Microsoft* + +Microsoft publishes Windows 10 [release information][w10-history], and +[servicing channels][w10-channels]. Wikipedia has a [summary +table][wp-w10] of support status. + +As of April 2021, versions 19H2, 20H1, and 20H2 are supported. + +[w10-timeline]: https://docs.microsoft.com/en-us/lifecycle/faq/windows#what-is-the-servicing-timeline-for-a-version-feature-update-of-windows-10 +[w10-history]: https://docs.microsoft.com/en-us/windows/release-health/release-information +[w10-channels]: https://docs.microsoft.com/en-us/windows/deployment/update/get-started-updates-channels-tools +[wp-w10]: https://en.wikipedia.org/wiki/Windows_10_version_history#Channels + +### Windows Server + +Windows Server makes SAC releases for that are supported for 18 months +and LTSC releases that are supported for five years under mainstream +support and five more years under extended support. + +Telegraf intent: *Support current semi-annual channel releases +supported by Microsoft and long-term releases under mainstream +support* + +Microsoft publishes Windows Server [release information][ws-history] +and [servicing channels][ws-channels]. + +As of April 2021, Server 2016 (version 1607) and Server 2019 (version +1809) are LTSC releases under mainstream support and versions 1909, +2004, and 20H2 are supported SAC releases. + +[ws-history]: https://docs.microsoft.com/en-us/windows-server/get-started/windows-server-release-info +[ws-channels]: https://docs.microsoft.com/en-us/windows-server/get-started-19/servicing-channels-19 + +## macOS + +MacOS makes one major release a year and provides support for each +release for three years. + +Telegraf intent: *Support releases supported by Apple* + +Release history is available from [wikipedia][wp-macos]. + +As of April 2021, 10.14, 10.15, and 11 are supported. + +[wp-macos]: https://en.wikipedia.org/wiki/MacOS#Release_history diff --git a/docs/TEMPLATE_PATTERN.md b/docs/TEMPLATE_PATTERN.md index 4244369d7dcab..74443a24bbd2a 100644 --- a/docs/TEMPLATE_PATTERN.md +++ b/docs/TEMPLATE_PATTERN.md @@ -4,7 +4,8 @@ Template patterns are a mini language that describes how a dot delimited string should be mapped to and from [metrics][]. A template has the form: -``` + +```text "host.mytag.mytag.measurement.measurement.field*" ``` @@ -22,11 +23,12 @@ correspond to the field name. Any part of the template that is not a keyword is treated as a tag key. This can also be specified multiple times. +**NOTE:** `measurement` must be specified in your template. **NOTE:** `field*` cannot be used in conjunction with `measurement*`. -### Examples +## Examples -#### Measurement & Tag Templates +### Measurement & Tag Templates The most basic template is to specify a single transformation to apply to all incoming metrics. So the following template: @@ -39,7 +41,7 @@ templates = [ would result in the following Graphite -> Telegraf transformation. -``` +```text us.west.cpu.load 100 => cpu.load,region=us.west value=100 ``` @@ -54,7 +56,7 @@ templates = [ ] ``` -#### Field Templates +### Field Templates The field keyword tells Telegraf to give the metric that field name. So the following template: @@ -68,7 +70,7 @@ templates = [ would result in the following Graphite -> Telegraf transformation. -``` +```text cpu.usage.idle.percent.eu-east 100 => cpu_usage,region=eu-east idle_percent=100 ``` @@ -85,12 +87,12 @@ templates = [ which would result in the following Graphite -> Telegraf transformation. -``` +```text cpu.usage.eu-east.idle.percentage 100 => cpu_usage,region=eu-east idle_percentage=100 ``` -#### Filter Templates +### Filter Templates Users can also filter the template(s) to use based on the name of the bucket, using glob matching, like so: @@ -104,7 +106,7 @@ templates = [ which would result in the following transformation: -``` +```text cpu.load.eu-east 100 => cpu_load,region=eu-east value=100 @@ -112,7 +114,7 @@ mem.cached.localhost 256 => mem_cached,host=localhost value=256 ``` -#### Adding Tags +### Adding Tags Additional tags can be added to a metric that don't exist on the received metric. You can add additional tags by specifying them after the pattern. @@ -127,7 +129,7 @@ templates = [ would result in the following Graphite -> Telegraf transformation. -``` +```text cpu.usage.idle.eu-east 100 => cpu_usage,region=eu-east,datacenter=1a idle=100 ``` diff --git a/docs/TLS.md b/docs/TLS.md index 3cd6a1025fc4b..133776b7faf73 100644 --- a/docs/TLS.md +++ b/docs/TLS.md @@ -5,9 +5,10 @@ possible, plugins will provide the standard settings described below. With the exception of the advanced configuration available TLS settings will be documented in the sample configuration. -### Client Configuration +## Client Configuration For client TLS support we have the following options: + ```toml ## Root certificates for verifying server certificates encoded in PEM format. # tls_ca = "/etc/telegraf/ca.pem" @@ -18,6 +19,8 @@ For client TLS support we have the following options: # tls_key = "/etc/telegraf/key.pem" ## Skip TLS verification. # insecure_skip_verify = false +## Send the specified TLS server name via SNI. +# tls_server_name = "foo.example.com" ``` ### Server Configuration @@ -29,6 +32,12 @@ The server TLS configuration provides support for TLS mutual authentication: ## enable mutually authenticated TLS connections. # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +## Set one or more allowed DNS name to enable a whitelist +## to verify incoming client certificates. +## It will go through all available SAN in the certificate, +## if of them matches the request is accepted. +# tls_allowed_dns_names = ["client.example.org"] + ## Add service certificate and key. # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" @@ -44,23 +53,23 @@ for the interest of brevity. ## Define list of allowed ciphers suites. If not defined the default ciphers ## supported by Go will be used. ## ex: tls_cipher_suites = [ -## "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", -## "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", -## "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", -## "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", -## "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", -## "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", -## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", -## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", -## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", -## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", -## "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", -## "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", -## "TLS_RSA_WITH_AES_128_GCM_SHA256", -## "TLS_RSA_WITH_AES_256_GCM_SHA384", -## "TLS_RSA_WITH_AES_128_CBC_SHA256", -## "TLS_RSA_WITH_AES_128_CBC_SHA", -## "TLS_RSA_WITH_AES_256_CBC_SHA" +## "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", +## "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", +## "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", +## "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", +## "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", +## "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", +## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", +## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", +## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", +## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", +## "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", +## "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", +## "TLS_RSA_WITH_AES_128_GCM_SHA256", +## "TLS_RSA_WITH_AES_256_GCM_SHA384", +## "TLS_RSA_WITH_AES_128_CBC_SHA256", +## "TLS_RSA_WITH_AES_128_CBC_SHA", +## "TLS_RSA_WITH_AES_256_CBC_SHA" ## ] # tls_cipher_suites = [] @@ -72,6 +81,7 @@ for the interest of brevity. ``` Cipher suites for use with `tls_cipher_suites`: + - `TLS_RSA_WITH_RC4_128_SHA` - `TLS_RSA_WITH_3DES_EDE_CBC_SHA` - `TLS_RSA_WITH_AES_128_CBC_SHA` @@ -99,6 +109,7 @@ Cipher suites for use with `tls_cipher_suites`: - `TLS_CHACHA20_POLY1305_SHA256` TLS versions for use with `tls_min_version` or `tls_max_version`: + - `TLS10` - `TLS11` - `TLS12` diff --git a/docs/WINDOWS_SERVICE.md b/docs/WINDOWS_SERVICE.md index b0b6ee5adf358..39a672c633e55 100644 --- a/docs/WINDOWS_SERVICE.md +++ b/docs/WINDOWS_SERVICE.md @@ -9,29 +9,31 @@ the general steps to set it up. 3. Place the telegraf.exe and the telegraf.conf config file into `C:\Program Files\Telegraf` 4. To install the service into the Windows Service Manager, run the following in PowerShell as an administrator (If necessary, you can wrap any spaces in the file paths in double quotes ""): - ``` + ```shell > C:\"Program Files"\Telegraf\telegraf.exe --service install ``` 5. Edit the configuration file to meet your needs 6. To check that it works, run: - ``` + ```shell > C:\"Program Files"\Telegraf\telegraf.exe --config C:\"Program Files"\Telegraf\telegraf.conf --test ``` 7. To start collecting data, run: - ``` + ```shell > net start telegraf ``` ## Config Directory You can also specify a `--config-directory` for the service to use: + 1. Create a directory for config snippets: `C:\Program Files\Telegraf\telegraf.d` 2. Include the `--config-directory` option when registering the service: - ``` + + ```shell > C:\"Program Files"\Telegraf\telegraf.exe --service install --config C:\"Program Files"\Telegraf\telegraf.conf --config-directory C:\"Program Files"\Telegraf\telegraf.d ``` @@ -54,17 +56,21 @@ filtering options. However, if you do need to run multiple telegraf instances on a single system, you can install the service with the `--service-name` and `--service-display-name` flags to give the services unique names: -``` +```shell > C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-1 --service-display-name "Telegraf 1" > C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-2 --service-display-name "Telegraf 2" ``` +## Auto restart and restart delay + +By default the service will not automatically restart on failure. Providing the `--service-auto-restart` flag during installation will always restart the service with a default delay of 5 minutes. To modify this to for example 3 minutes, provide the additional flag `--service-restart-delay 3m`. The delay can be any valid `time.Duration` string. + ## Troubleshooting When Telegraf runs as a Windows service, Telegraf logs messages to Windows events log before configuration file with logging settings is loaded. Check event log for an error reported by `telegraf` service in case of Telegraf service reports failure on its start: Event Viewer->Windows Logs->Application -**Troubleshooting common error #1067** +### common error #1067 When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start diff --git a/docs/developers/CODE_STYLE.md b/docs/developers/CODE_STYLE.md new file mode 100644 index 0000000000000..61485aa8c8f98 --- /dev/null +++ b/docs/developers/CODE_STYLE.md @@ -0,0 +1,8 @@ +# Code Style + +Code is required to be formatted using `gofmt`, this covers most code style +requirements. It is also highly recommended to use `goimports` to +automatically order imports. + +Please try to keep lines length under 80 characters, the exact number of +characters is not strict but it generally helps with readability. diff --git a/docs/developers/DEPRECATION.md b/docs/developers/DEPRECATION.md new file mode 100644 index 0000000000000..62b5b986e87a2 --- /dev/null +++ b/docs/developers/DEPRECATION.md @@ -0,0 +1,88 @@ +# Deprecation + +Deprecation is the primary tool for making changes in Telegraf. A deprecation +indicates that the community should move away from using a feature, and +documents that the feature will be removed in the next major update (2.0). + +Key to deprecation is that the feature remains in Telegraf and the behavior is +not changed. + +We do not have a strict definition of a breaking change. All code changes +change behavior, the decision to deprecate or make the change immediately is +decided based on the impact. + +## Deprecate plugins + +Add an entry to the plugins deprecation list (e.g. in `plugins/inputs/deprecations.go`). Include the deprecation version +and any replacement, e.g. + +```golang + "logparser": { + Since: "1.15.0", + Notice: "use 'inputs.tail' with 'grok' data format instead", + }, +``` + +The entry can contain an optional `RemovalIn` field specifying the planned version for removal of the plugin. + +Also add the deprecation warning to the plugin's README: + +```markdown +# Logparser Input Plugin + +### **Deprecated in 1.10**: Please use the [tail][] plugin along with the +`grok` [data format][]. + +[tail]: /plugins/inputs/tail/README.md +[data formats]: /docs/DATA_FORMATS_INPUT.md +``` + +Telegraf will automatically check if a deprecated plugin is configured and print a warning + +```text +2022-01-26T20:08:15Z W! DeprecationWarning: Plugin "inputs.logparser" deprecated since version 1.15.0 and will be removed in 2.0.0: use 'inputs.tail' with 'grok' data format instead +``` + +## Deprecate options + +Mark the option as deprecated in the sample config, include the deprecation +version and any replacement. + +```toml + ## Broker URL + ## deprecated in 1.7; use the brokers option + # url = "amqp://localhost:5672/influxdb" +``` + +In the plugins configuration struct, add a `deprecated` tag to the option: + +```go +type AMQPConsumer struct { + URL string `toml:"url" deprecated:"1.7.0;use brokers"` +} +``` + +The `deprecated` tag has the format `[;removal version];` where the `removal version` is optional. The specified deprecation info will automatically displayed by Telegraf if the option is used in the config + +```text +2022-01-26T20:08:15Z W! DeprecationWarning: Option "url" of plugin "inputs.amqp_consumer" deprecated since version 1.7.0 and will be removed in 2.0.0: use brokers +``` + +## Deprecate metrics + +In the README document the metric as deprecated. If there is a replacement field, +tag, or measurement then mention it. + +```markdown +- system + - fields: + - uptime_format (string, deprecated in 1.10: use `uptime` field) +``` + +Add filtering to the sample config, leave it commented out. + +```toml +[[inputs.system]] + ## Uncomment to remove deprecated metrics. + # fielddrop = ["uptime_format"] +``` diff --git a/docs/developers/LOGGING.md b/docs/developers/LOGGING.md new file mode 100644 index 0000000000000..e009968c4df36 --- /dev/null +++ b/docs/developers/LOGGING.md @@ -0,0 +1,79 @@ +# Logging + +## Plugin Logging + +You can access the Logger for a plugin by defining a field named `Log`. This +`Logger` is configured internally with the plugin name and alias so they do not +need to be specified for each log call. + +```go +type MyPlugin struct { + Log telegraf.Logger `toml:"-"` +} +``` + +You can then use this Logger in the plugin. Use the method corresponding to +the log level of the message. + +```go +p.Log.Errorf("Unable to write to file: %v", err) +``` + +## Agent Logging + +In other sections of the code it is required to add the log level and module +manually: + +```go +log.Printf("E! [agent] Error writing to %s: %v", output.LogName(), err) +``` + +## When to Log + +Log a message if an error occurs but the plugin can continue working. For +example if the plugin handles several servers and only one of them has a fatal +error, it can be logged as an error. + +Use logging judiciously for debug purposes. Since Telegraf does not currently +support setting the log level on a per module basis, it is especially important +to not over do it with debug logging. + +If the plugin is listening on a socket, log a message with the address of the socket: + +```go +p.log.InfoF("Listening on %s://%s", protocol, l.Addr()) +``` + +## When not to Log + +Don't use logging to emit performance data or other meta data about the plugin, +instead use the `internal` plugin and the `selfstats` package. + +Don't log fatal errors in the plugin that require the plugin to return, instead +return them from the function and Telegraf will handle the logging. + +Don't log for static configuration errors, check for them in a plugin `Init()` +function and return an error there. + +Don't log a warning every time a plugin is called for situations that are +normal on some systems. + +## Log Level + +The log level is indicated by a single character at the start of the log +message. Adding this prefix is not required when using the Plugin Logger. + +- `D!` Debug +- `I!` Info +- `W!` Warning +- `E!` Error + +## Style + +Log messages should be capitalized and be a single line. + +If it includes data received from another system or process, such as the text +of an error message, the text should be quoted with `%q`. + +Use the `%v` format for the Go error type instead of `%s` to ensure a nil error +is printed. diff --git a/docs/developers/METRIC_FORMAT_CHANGES.md b/docs/developers/METRIC_FORMAT_CHANGES.md new file mode 100644 index 0000000000000..7d6477c253aca --- /dev/null +++ b/docs/developers/METRIC_FORMAT_CHANGES.md @@ -0,0 +1,49 @@ +# Metric Format Changes + +When making changes to an existing input plugin, care must be taken not to change the metric format in ways that will cause trouble for existing users. This document helps developers understand how to make metric format changes safely. + +## Changes can cause incompatibilities + +If the metric format changes, data collected in the new format can be incompatible with data in the old format. Database queries designed around the old format may not work with the new format. This can cause application failures. + +Some metric format changes don't cause incompatibilities. Also, some unsafe changes are necessary. How do you know what changes are safe and what to do if your change isn't safe? + +## Guidelines + +The main guideline is just to keep compatibility in mind when making changes. Often developers are focused on making a change that fixes their particular problem and they forget that many people use the existing code and will upgrade. When you're coding, keep existing users and applications in mind. + +### Renaming, removing, reusing + +Database queries refer to the metric and its tags and fields by name. Any Telegraf code change that changes those names has the potential to break an existing query. Similarly, removing tags or fields can break queries. + +Changing the meaning of an existing tag value or field value or reusing an existing one in a new way isn't safe. Although queries that use these tags/field may not break, they will not work as they did before the change. + +Adding a field doesn't break existing queries. Queries that select all fields and/or tags (like "select * from") will return an extra series but this is often useful. + +### Performance and storage + +Time series databases can store large amounts of data but many of them don't perform well on high cardinality data. If a metric format change includes a new tag that holds high cardinality data, database performance could be reduced enough to cause existing applications not to work as they previously did. Metric format changes that dramatically increase the number of tags or fields of a metric can increase database storage requirements unexpectedly. Both of these types of changes are unsafe. + +### Make unsafe changes opt-in + +If your change has the potential to seriously affect existing users, the change must be opt-in. To do this, add a plugin configuration setting that lets the user select the metric format. Make the setting's default value select the old metric format. When new users add the plugin they can choose the new format and get its benefits. When existing users upgrade, their config files won't have the new setting so the default will ensure that there is no change. + +When adding a setting, avoid using a boolean and consider instead a string or int for future flexibility. A boolean can only handle two formats but a string can handle many. For example, compare use_new_format=true and features=["enable_foo_fields"]; the latter is much easier to extend and still very descriptive. + +If you want to encourage existing users to use the new format you can log a warning once on startup when the old format is selected. The warning should tell users in a gentle way that they can upgrade to a better metric format. If it doesn't make sense to maintain multiple metric formats forever, you can change the default on a major release or even remove the old format completely. See [[Deprecation]] for details. + +### Utility + +Changes should be useful to many or most users. A change that is only useful for a small number of users may not accepted, even if it's off by default. + +## Summary table + +| | delete | rename | add | +| ------- | ------ | ------ | --- | +| metric | unsafe | unsafe | safe | +| tag | unsafe | unsafe | be careful with cardinality | +| field | unsafe | unsafe | ok as long as it's useful for existing users and is worth the added space | + +## References + +InfluxDB Documentation: "Schema and data layout" diff --git a/docs/developers/PACKAGING.md b/docs/developers/PACKAGING.md new file mode 100644 index 0000000000000..d3e5780cce57b --- /dev/null +++ b/docs/developers/PACKAGING.md @@ -0,0 +1,66 @@ +# Packaging + +Building the packages for Telegraf is automated using [Make](https://en.wikipedia.org/wiki/Make_(software)). Just running `make` will build a Telegraf binary for the operating system and architecture you are using (if it is supported). If you need to build a different package then you can run `make package` which will build all the supported packages. You will most likely only want a subset, you can define a subset of packages to be built by overriding the `include_packages` variable like so `make package include_packages="amd64.deb"`. You can also build all packages for a specific architecture like so `make package include_packages="$(make amd64)"`. + +The packaging steps require certain tools to be setup before hand to work. These dependencies are listed in the ci.docker file which you can find in the scripts directory. Therefore it is recommended to use Docker to build the artifacts, see more details below. + +## Go Version + +Telegraf will be built using the latest version of Go whenever possible. + +### Update CI image + +Incrementing the version is maintained by the core Telegraf team because it requires access to an internal docker repository that hosts the docker CI images. When a new version is released, the following process is followed: + +1. Within the `Makefile` and `.circleci\config.yml` update the Go versions to the new version number +2. Run `make ci`, this requires quay.io internal permissions +3. The files `scripts\installgo_linux.sh`, `scripts\installgo_mac.sh`, and `scripts\installgo_windows.sh` need to be updated as well with the new Go version and SHA +4. Create a pull request with these new changes, and verify the CI passes and uses the new docker image + +See the [previous PRs](https://github.com/influxdata/telegraf/search?q=chore+update+go&type=commits) as examples. + +### Access to quay.io + +A member of the team needs to invite you to the quay.io organization. +To push new images, the user needs to do the following: + +1. Create a password if the user logged in using Google authentication +2. Download an encrypted username/password from the quay.io user page +3. Run `docker login quay.io` and enter in the encrypted username and password + from the previous step + +## Package using Docker + +This packaging method uses the CI images, and is very similar to how the +official packages are created on release. This is the recommended method for +building the rpm/deb as it is less system dependent. + +Pull the CI images from quay, the version corresponds to the version of Go +that is used to build the binary: + +```shell +docker pull quay.io/influxdb/telegraf-ci:1.9.7 +``` + +Start a shell in the container: + +```shell +docker run -ti quay.io/influxdb/telegraf-ci:1.9.7 /bin/bash +``` + +From within the container: + +1. `go get -d github.com/influxdata/telegraf` +2. `cd /go/src/github.com/influxdata/telegraf` +3. `git checkout release-1.10` + * Replace tag `release-1.10` with the version of Telegraf you would like to build +4. `git reset --hard 1.10.2` +5. `make deps` +6. `make package include_packages="amd64.deb"` + * Change `include_packages` to change what package you want, run `make help` to see possible values + +From the host system, copy the build artifacts out of the container: + +```shell +docker cp romantic_ptolemy:/go/src/github.com/influxdata/telegraf/build/telegraf-1.10.2-1.x86_64.rpm . +``` diff --git a/docs/developers/PROFILING.md b/docs/developers/PROFILING.md new file mode 100644 index 0000000000000..c1f02e4080d4c --- /dev/null +++ b/docs/developers/PROFILING.md @@ -0,0 +1,66 @@ +# Profiling + +This article describes how to collect performance traces and memory profiles +from Telegraf. If you are submitting this for an issue, please include the +version.txt generated below. + +Use the `--pprof-addr` option to enable the profiler, the easiest way to do +this may be to add this line to `/etc/default/telegraf`: + +```shell +TELEGRAF_OPTS="--pprof-addr localhost:6060" +``` + +Restart Telegraf to activate the profile address. + +## Trace Profile + +Collect a trace during the time where the performance issue is occurring. This +example collects a 10 second trace and runs for 10 seconds: + +```shell +curl 'http://localhost:6060/debug/pprof/trace?seconds=10' > trace.bin +telegraf --version > version.txt +go env GOOS GOARCH >> version.txt +``` + +The `trace.bin` and `version.txt` files can be sent in for analysis or, if desired, you can +analyze the trace with: + +```shell +go tool trace trace.bin +``` + +## Memory Profile + +Collect a heap memory profile: + +```shell +curl 'http://localhost:6060/debug/pprof/heap' > mem.prof +telegraf --version > version.txt +go env GOOS GOARCH >> version.txt +``` + +Analyze: + +```shell +$ go tool pprof mem.prof +(pprof) top5 +``` + +## CPU Profile + +Collect a 30s CPU profile: + +```shell +curl 'http://localhost:6060/debug/pprof/profile' > cpu.prof +telegraf --version > version.txt +go env GOOS GOARCH >> version.txt +``` + +Analyze: + +```shell +go tool pprof cpu.prof +(pprof) top5 +``` diff --git a/docs/developers/README.md b/docs/developers/README.md new file mode 120000 index 0000000000000..f939e75f21a8b --- /dev/null +++ b/docs/developers/README.md @@ -0,0 +1 @@ +../../CONTRIBUTING.md \ No newline at end of file diff --git a/docs/developers/REVIEWS.md b/docs/developers/REVIEWS.md new file mode 100644 index 0000000000000..ebf1379c11b49 --- /dev/null +++ b/docs/developers/REVIEWS.md @@ -0,0 +1,174 @@ +# Reviews + +Pull-requests require two approvals before being merged. Expect several rounds of back and forth on +reviews, non-trivial changes are rarely accepted on the first pass. It might take some time +until you see a first review so please be patient. + +All pull requests should follow the style and best practices in the +[CONTRIBUTING.md](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md) +document. + +## Process + +The review process is roughly structured as follows: + +1. Submit a pull request. +Please check that you signed the [CLA](https://www.influxdata.com/legal/cla/) (and [Corporate CLA](https://www.influxdata.com/legal/ccla/) if you are contributing code on as an employee of your company). Provide a short description of your submission and reference issues that you potentially close. Make sure the CI tests are all green and there are no linter-issues. +1. Get feedback from a first reviewer and a `ready for final review` tag. +Please constructively work with the reviewer to get your code into a mergable state (see also [below](#reviewing-plugin-code)). +1. Get a final review by one of the InfluxData maintainers. +Please fix any issue raised. +1. Wait for the pull-request to be merged. +It might take some time until your PR gets merged, depending on the release cycle and the type of +your pull-request (bugfix, enhancement of existing code, new plugin, etc). Remember, it might be necessary to rebase your code before merge to resolve conflicts. + +Please read the review comments carefully, fix the related part of the code and/or respond in case there is anything unclear. Maintainers will add the `waiting for response` tag to PRs to make it clear we are waiting on the submitter for updates. __Once the tag is added, if there is no activity on a pull request or the contributor does not respond, our bot will automatically close the PR after two weeks!__ If you expect a longer period of inactivity or you want to abandon a pull request, please let us know. + +In case you still want to continue with the PR, feel free to reopen it. + +## Reviewing Plugin Code + +- Avoid variables scoped to the package. Everything should be scoped to the plugin struct, since multiple instances of the same plugin are allowed and package-level variables will cause race conditions. +- SampleConfig must match the readme, but not include the plugin name. +- structs should include toml tags for fields that are expected to be editable from the config. eg `toml:"command"` (snake_case) +- plugins that want to log should declare the Telegraf logger, not use the log package. eg: + +```Go + Log telegraf.Logger `toml:"-"` +``` + +(in tests, you can do `myPlugin.Log = testutil.Logger{}`) + +- Initialization and config checking should be done on the `Init() error` function, not in the Connect, Gather, or Start functions. +- `Init() error` should not contain connections to external services. If anything fails in Init, Telegraf will consider it a configuration error and refuse to start. +- plugins should avoid synchronization code if they are not starting goroutines. Plugin functions are never called in parallel. +- avoid goroutines when you don't need them and removing them would simplify the code +- errors should almost always be checked. +- avoid boolean fields when a string or enumerated type would be better for future extension. Lots of boolean fields also make the code difficult to maintain. +- use config.Duration instead of internal.Duration +- compose tls.ClientConfig as opposed to specifying all the TLS fields manually +- http.Client should be declared once on `Init() error` and reused, (or better yet, on the package if there's no client-specific configuration). http.Client has built-in concurrency protection and reuses connections transparently when possible. +- avoid doing network calls in loops where possible, as this has a large performance cost. This isn't always possible to avoid. +- when processing batches of records with multiple network requests (some outputs that need to partition writes do this), return an error when you want the whole batch to be retried, log the error when you want the batch to continue without the record +- consider using the StreamingProcessor interface instead of the (legacy) Processor interface +- avoid network calls in processors when at all possible. If it's necessary, it's possible, but complicated (see processor.reversedns). +- avoid dependencies when: + - they require cgo + - they pull in massive projects instead of small libraries + - they could be replaced by a simple http call + - they seem unnecessary, superfluous, or gratuitous +- consider adding build tags if plugins have OS-specific considerations +- use the right logger log levels so that Telegraf is normally quiet eg `plugin.Log.Debugf()` only shows up when running Telegraf with `--debug` +- consistent field types: dynamically setting the type of a field should be strongly avoided as it causes problems that are difficult to solve later, made worse by having to worry about backwards compatibility in future changes. For example, if an numeric value comes from a string field and it is not clear if the field can sometimes be a float, the author should pick either a float or an int, and parse that field consistently every time. Better to sometimes truncate a float, or to always store ints as floats, rather than changing the field type, which causes downstream problems with output databases. +- backwards compatibility: We work hard not to break existing configurations during new changes. Upgrading Telegraf should be a seamless transition. Possible tools to make this transition smooth are: + - enumerable type fields that allow you to customize behavior (avoid boolean feature flags) + - version fields that can be used to opt in to newer changed behavior without breaking old (see inputs.mysql for example) + - a new version of the plugin if it has changed significantly (eg outputs.influxdb and outputs.influxdb_v2) + - Logger and README deprecation warnings + - changing the default value of a field can be okay, but will affect users who have not specified the field and should be approached cautiously. + - The general rule here is "don't surprise me": users should not be caught off-guard by unexpected or breaking changes. + +## Linting + +Each pull request will have the appriopriate linters checking the files for any common mistakes. The github action Super Linter is used: [super-pinter](https://github.com/github/super-linter). If it is failing you can click on the action and read the logs to figure out the issue. You can also run the github action locally by following these instructions: [run-linter-locally.md](https://github.com/github/super-linter/blob/main/docs/run-linter-locally.md). You can find more information on each of the linters in the super linter readme. + +## Testing + +Sufficient unit tests must be created. New plugins must always contain +some unit tests. Bug fixes and enhancements should include new tests, but +they can be allowed if the reviewer thinks it would not be worth the effort. + +[Table Driven Tests](https://github.com/golang/go/wiki/TableDrivenTests) are +encouraged to reduce boiler plate in unit tests. + +The [stretchr/testify](https://github.com/stretchr/testify) library should be +used for assertions within the tests when possible, with preference towards +github.com/stretchr/testify/require. + +Primarily use the require package to avoid cascading errors: + +```go +assert.Equal(t, lhs, rhs) # avoid +require.Equal(t, lhs, rhs) # good +``` + +## Configuration + +The config file is the primary interface and should be carefully scrutinized. + +Ensure the [[SampleConfig]] and +[README](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/EXAMPLE_README.md) +match with the current standards. + +READMEs should: + +- be spaces, not tabs +- be indented consistently, matching other READMEs +- have two `#` for comments +- have one `#` for defaults, which should always match the default value of the plugin +- include all appropriate types as a list for enumerable field types +- include a useful example, avoiding "example", "test", etc. +- include tips for any common problems +- include example output from the plugin, if input/processor/aggregator/parser/serializer + +## Metric Schema + +Telegraf metrics are heavily based on InfluxDB points, but have some +extensions to support other outputs and metadata. + +New metrics must follow the recommended +[schema design](https://docs.influxdata.com/influxdb/latest/concepts/schema_and_data_layout/). +Each metric should be evaluated for _series cardinality_, proper use of tags vs +fields, and should use existing patterns for encoding metrics. + +Metrics use `snake_case` naming style. + +### Enumerations + +Generally enumeration data should be encoded as a tag. In some cases it may +be desirable to also include the data as an integer field: + +```shell +net_response,result=success result_code=0i +``` + +### Histograms + +Use tags for each range with the `le` tag, and `+Inf` for the values out of +range. This format is inspired by the Prometheus project: + +```shell +cpu,le=0.0 usage_idle_bucket=0i 1486998330000000000 +cpu,le=50.0 usage_idle_bucket=2i 1486998330000000000 +cpu,le=100.0 usage_idle_bucket=2i 1486998330000000000 +cpu,le=+Inf usage_idle_bucket=2i 1486998330000000000 +``` + +### Lists + +Lists are tricky, but the general technique is to encode using a tag, creating +one series be item in the list. + +### Counters + +Counters retrieved from other projects often are in one of two styles, +monotonically increasing without reset and reset on each interval. No attempt +should be made to switch between these two styles but if given the option it +is preferred to use the non-reseting variant. This style is more resilient in +the face of downtime and does not contain a fixed time element. + +## Go Best Practices + +In general code should follow best practice describe in [Code Review +Comments](https://github.com/golang/go/wiki/CodeReviewComments). + +### Networking + +All network operations should have appropriate timeouts. The ability to +cancel the option, preferably using a context, is desirable but not always +worth the implementation complexity. + +### Channels + +Channels should be used in judiciously as they often complicate the design and +can easily be used improperly. Only use them when they are needed. diff --git a/docs/developers/SAMPLE_CONFIG.md b/docs/developers/SAMPLE_CONFIG.md new file mode 100644 index 0000000000000..75ff181b6679d --- /dev/null +++ b/docs/developers/SAMPLE_CONFIG.md @@ -0,0 +1,81 @@ +# Sample Configuration + +The sample config file is generated from a results of the `SampleConfig()` functions of the plugin. + +You can generate a full sample +config: + +```shell +telegraf config +``` + +You can also generate the config for a particular plugin using the `-usage` +option: + +```shell +telegraf --usage influxdb +``` + +## Style + +In the config file we use 2-space indention. Since the config is +[TOML](https://github.com/toml-lang/toml) the indention has no meaning. + +Documentation is double commented, full sentences, and ends with a period. + +```toml + ## This text describes what an the exchange_type option does. + # exchange_type = "topic" +``` + +Try to give every parameter a default value whenever possible. If an +parameter does not have a default or must frequently be changed then have it +uncommented. + +```toml + ## Brokers are the AMQP brokers to connect to. + brokers = ["amqp://localhost:5672"] +``` + +Options where the default value is usually sufficient are normally commented +out. The commented out value is the default. + +```toml + ## What an exchange type is. + # exchange_type = "topic" +``` + +If you want to show an example of a possible setting filled out that is +different from the default, show both: + +```toml + ## Static routing key. Used when no routing_tag is set or as a fallback + ## when the tag specified in routing tag is not found. + ## example: routing_key = "telegraf" + # routing_key = "" +``` + +Unless parameters are closely related, add a space between them. Usually +parameters is closely related have a single description. + +```toml + ## If true, queue will be declared as an exclusive queue. + # queue_exclusive = false + + ## If true, queue will be declared as an auto deleted queue. + # queue_auto_delete = false + + ## Authentication credentials for the PLAIN auth_method. + # username = "" + # password = "" +``` + +Parameters should usually be describable in a few sentences. If it takes +much more than this, try to provide a shorter explanation and provide a more +complex description in the Configuration section of the plugins +[README](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/example) + +Boolean parameters should be used judiciously. You should try to think of +something better since they don't scale well, things are often not truly +boolean, and frequently end up with implicit dependencies: this option does +something if this and this are also set. diff --git a/docs/maintainers/CHANGELOG.md b/docs/maintainers/CHANGELOG.md new file mode 100644 index 0000000000000..8935ad70ca74e --- /dev/null +++ b/docs/maintainers/CHANGELOG.md @@ -0,0 +1,43 @@ +# Changelog + +The changelog contains the list of changes by version in addition to release +notes. The file is updated immediately after adding a change that impacts +users. Changes that don't effect the functionality of Telegraf, such as +refactoring code, are not included. + +The changelog entries are added by a maintainer after merging a pull request. +We experimented with requiring the pull request contributor to add the entry, +which had a nice side-effect of reducing the number of changelog only commits +in the log history, however this had several drawbacks: + +- The entry often needed reworded. +- Entries frequently caused merge conflicts. +- Required contributor to know which version a change was accepted into. +- Merge conflicts made it more time consuming to backport changes. + +Changes are added only to the first version a change is added in. For +example, a change backported to 1.7.2 would only appear under 1.7.2 and not in +1.8.0. This may become confusing if we begin supporting more than one +previous version but works well for now. + +## Updating + +If the change resulted in deprecation, mention the deprecation in the Release +Notes section of the version. In general all changes that require or +recommend the user to perform an action when upgrading should be mentioned in +the release notes. + +If a new plugin has been added, include it in a section based on the type of +the plugin. + +All user facing changes, including those already mentioned in the release +notes or new plugin sections, should be added to either the Features or +Bugfixes section. + +Features should generally link to the pull request since this describes the +actual implementation. Bug fixes should link to the issue instead of the pull +request since this describes the problem, if a bug has been fixed but does not +have an issue then it is okay to link to the pull request. + +It is usually okay to just use the shortlog commit message, but if needed +it can differ or be further clarified in the changelog. diff --git a/docs/maintainers/LABELS.md b/docs/maintainers/LABELS.md new file mode 100644 index 0000000000000..5b8b8bb216796 --- /dev/null +++ b/docs/maintainers/LABELS.md @@ -0,0 +1,69 @@ +# Labels + +This page describes the meaning of the various +[labels](https://github.com/influxdata/telegraf/labels) we use on the Github +issue tracker. + +## Categories + +New issues are automatically labeled `feature request`, `bug`, or `support`. +If you are unsure what problem the author is proposing, you can use the `need more info` label +and if there is another issue you can add the `closed/duplicate` label and close the +new issue. + +New pull requests are usually labeled one of `enhancement`, `bugfix` or `new +plugin`. + +## Additional Labels + +Apply any of the `area/*` labels that match. If an area doesn't exist, new +ones can be added but **it is not a goal to have an area for all issues.** + +If the issue only applies to one platform, you can use a `platform/*` label. +These are only applied to single platform issues which are not on Linux. + +For bugs you may want to add `panic`, `regression`, or `upstream` to provide +further detail. + +Summary of Labels: + +| Label | Description | Purpose | +| --- | ----------- | ---| +| `area/*` | These labels each corresponding to a plugin or group of plugins that can be added to identify the affected plugin or group of plugins | categorization | +| `breaking change` | Improvement to Telegraf that requires breaking changes to the plugin or agent; for minor/major releases | triage | +| `bug` | New issue for an existing component of Telegraf | triage | +| `cloud` | Issues or request around cloud environments | categorization | +| `dependencies` | Pull requests that update a dependency file | triage | +| `discussion` | Issues open for discussion | community/categorization | +| `documentation` | Issues related to Telegraf documentation and configuration descriptions | categorization | +| `error handling` | Issues related to error handling | categorization | +| `external plugin` | Plugins that would be ideal external plugin and expedite being able to use plugin w/ Telegraf | categorization | +| `good first issue` | This is a smaller issue suited for getting started in Telegraf, Golang, and contributing to OSS | community | +| `help wanted` | Request for community participation, code, contribution | community | +| `need more info` | Issue triaged but outstanding questions remain | community | +| `performance` | Issues or PRs that address performance issues | categorization| +| `platform/*` | Issues that only apply to one platform | categorization | +| `plugin/*` | Request for new plugins and issues/PRs that are related to plugins | categorization | +| `ready for final review` | Pull request has been reviewed and/or tested by multiple users and is ready for a final review | triage | +| `rfc` | Request for comment - larger topics of discussion that are looking for feedback | community | +| `support` |Telegraf questions, may be directed to community site or slack | triage | +| `upstream` | Bug or issues that rely on dependency fixes and we cannot fix independently | triage | +| `waiting for response` | Waiting for response from contributor | community/triage | +| `wip` | PR still Work In Progress, not ready for detailed review | triage | + +Labels starting with `pm` are not applied by maintainers. + +## Closing Issues + +We close issues for the following reasons: + +| Label | Reason | +| --- | ----------- | +| `closed/as-designed` | Labels to be used when closing an issue or PR with short description why it was closed | +| `closed/duplicate` | This issue or pull request already exists | +| `closed/external-candidate` | The feature request is best implemented by an external plugin | +| `closed/external-issue` | The feature request is best implemented by an external plugin | +| `closed/needs more info` | Did not receive the information we need within 3 months from last activity on issue | +| `closed/not-reproducible` | Given the information we have we can't reproduce the issue | +| `closed/out-of-scope` | The feature request is out of scope for Telegraf - highly unlikely to be worked on | +| `closed/question` | This issue is a support question, directed to community site or slack | diff --git a/docs/maintainers/PULL_REQUESTS.md b/docs/maintainers/PULL_REQUESTS.md new file mode 100644 index 0000000000000..5a627d4cc29ec --- /dev/null +++ b/docs/maintainers/PULL_REQUESTS.md @@ -0,0 +1,72 @@ +# Pull Requests + +## Before Review + +Ensure that the CLA is signed (the `telegraf-tiger` bot performs this check). The +only exemption would be non-copyrightable changes such as fixing a typo. + +Check that all tests are passing. Due to intermittent errors in the CI tests +it may be required to check the cause of test failures and restart failed +tests and/or create new issues to fix intermittent test failures. + +Ensure that PR is opened against the master branch as all changes are merged +to master initially. It is possible to change the branch a pull request is +opened against but it often results in many conflicts, change it before +reviewing and then if needed ask the contributor to rebase. + +Ensure there are no merge conflicts. If there are conflicts, ask the +contributor to merge or rebase. + +## Review + +[Review the pull request](https://github.com/influxdata/telegraf/blob/master/docs/developers/REVIEWS.md). + +## Merge + +Determine what release the change will be applied to. New features should +be added only to master, and will be released in the next minor version (1.x). +Bug fixes can be backported to the current release branch to go out with the +next patch release (1.7.x) unless the bug is too risky to backport or there is +an easy workaround. Set the correct milestone on the pull request and any +associated issue. + +All pull requests are merged using the "Squash and Merge" strategy on Github. +This method is used because many pull requests do not have a clean change +history and this method allows us to normalize commit messages as well as +simplifies backporting. + +### Rewriting the commit message + +After selecting "Squash and Merge" you may need to rewrite the commit message. +Usually the body of the commit messages should be cleared as well, unless it +is well written and applies to the entire changeset. + +- Use imperative present tense for the first line of the message: + - Use "Add tests for" (instead of "I added tests for" or "Adding tests for") +- The default merge commit messages include the PR number at the end of the +commit message, keep this in the final message. +- If applicable mention the plugin in the message. + +**Example Enhancement:** + +> Add user tag to procstat input (#4386) + +**Example Bug Fix:** + +> Fix output format of printer processor (#4417) + +## After Merge + +[Update the Changelog](https://github.com/influxdata/telegraf/blob/master/docs/maintainers/CHANGELOG.md). + +If required, backport the patch and the changelog update to the current +release branch. Usually this can be done by cherry picking the commits: + +```shell +git cherry-pick -x aaaaaaaa bbbbbbbb +``` + +Backporting changes to the changelog often pulls in unwanted changes. After +cherry picking commits, double check that the only the expected lines are +modified and if needed clean up the changelog and amend the change. Push the +new master and release branch to Github. diff --git a/docs/maintainers/RELEASES.md b/docs/maintainers/RELEASES.md new file mode 100644 index 0000000000000..7eb2522cfd0e8 --- /dev/null +++ b/docs/maintainers/RELEASES.md @@ -0,0 +1,107 @@ +# Releases + +## Release Branch + +On master, update `etc/telegraf.conf` and commit: + +```sh +./telegraf config > etc/telegraf.conf +``` + +Create the new release branch: + +```sh +git checkout -b release-1.15 +``` + +Push the changes: + +```sh +git push origin release-1.15 master +``` + +Update next version strings on master: + +```sh +git checkout master +echo 1.16.0 > build_version.txt +``` + +## Release Candidate + +Release candidates are created only for new minor releases (ex: 1.15.0). Tags +are created but some of the other tasks, such as adding a changelog entry are +skipped. Packages are added to the github release page and posted to +community but are not posted to package repos or docker hub. + +```sh +git checkout release-1.15 +git commit --allow-empty -m "Telegraf 1.15.0-rc1" +git tag -s v1.15.0-rc1 -m "Telegraf 1.15.0-rc1" +git push origin release-1.15 v1.15.0-rc1 +``` + +## Release + +On master, set the release date in the changelog and cherry-pick the change +back: + +```sh +git checkout master +vi CHANGELOG.md +git commit -m "Set 1.8.0 release date" +git checkout release-1.8 +git cherry-pick -x +``` + +Double check that the changelog was applied as desired, or fix it up and +amend the change before pushing. + +Tag the release: + +```sh +git checkout release-1.8 +# This just improves the `git show 1.8.0` output +git commit --allow-empty -m "Telegraf 1.8.0" +git tag -s v1.8.0 -m "Telegraf 1.8.0" +``` + +Check that the version was set correctly, the tag can always be altered if a +mistake is made but only before you push it to Github: + +```sh +make +./telegraf --version +Telegraf v1.8.0 (git: release-1.8 aaaaaaaa) +``` + +When you push a branch with a tag to Github, CircleCI will be triggered to +build the packages. + +```sh +git push origin master release-1.8 v1.8.0 +``` + +Set the release notes on Github. + +Update webpage download links. + +Update apt and yum repositories hosted at repos.influxdata.com. + +Update the package signatures on S3, these are used primarily by the docker images. + +Update docker image [influxdata/influxdata-docker](https://github.com/influxdata/influxdata-docker): + +```sh +cd influxdata-docker +git co master +git pull +git co -b telegraf-1.8.0 +telegraf/1.8/Dockerfile +telegraf/1.8/alpine/Dockerfile +git commit -am "telegraf 1.8.0" +``` + +Official company post to RSS/community. + +Update documentation on docs.influxdata.com diff --git a/etc/telegraf.conf b/etc/telegraf.conf index f67ddfbf19dcd..d40f3cc784139 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -21,80 +21,96 @@ ## Environment variables can be used as tags, and throughout the config file # user = "$USER" - -# Configuration for telegraf agent -[agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will send metrics to outputs in batches of at most - ## metric_batch_size metrics. - ## This controls the size of writes that Telegraf sends to output plugins. - metric_batch_size = 1000 - - ## Maximum number of unwritten metrics per output. Increasing this value - ## allows for longer periods of output downtime without dropping metrics at the - ## cost of higher maximum memory usage. - metric_buffer_limit = 10000 - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Default flushing interval for all outputs. Maximum flush_interval will be - ## flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## By default or when set to "0s", precision will be set to the same - ## timestamp order as the collection interval, with the maximum being 1s. - ## ie, when interval = "10s", precision will be "1s" - ## when interval = "250ms", precision will be "1ms" - ## Precision will NOT be used for service inputs. It is up to each individual - ## service input to set the timestamp at the appropriate precision. - ## Valid time units are "ns", "us" (or "µs"), "ms", "s". - precision = "" - - ## Log at debug level. - # debug = false - ## Log only error level messages. - # quiet = false - - ## Log target controls the destination for logs and can be one of "file", - ## "stderr" or, on Windows, "eventlog". When set to "file", the output file - ## is determined by the "logfile" setting. - # logtarget = "file" - - ## Name of the file to be logged to when using the "file" logtarget. If set to - ## the empty string then logs are written to stderr. - # logfile = "" - - ## The logfile will be rotated after the time interval specified. When set - ## to 0 no time based rotation is performed. Logs are rotated only when - ## written to, if there is no log activity rotation may be delayed. - # logfile_rotation_interval = "0d" - - ## The logfile will be rotated when it becomes larger than the specified - ## size. When set to 0 no size based rotation is performed. - # logfile_rotation_max_size = "0MB" - - ## Maximum number of rotated archives to keep, any older logs are deleted. - ## If set to -1, no archives are removed. - # logfile_rotation_max_archives = 5 - - ## Override default hostname, if empty use os.Hostname() - hostname = "" - ## If set to true, do no set the "host" tag in the telegraf agent. - omit_hostname = false - +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "10s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 1000 + + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. + metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Collection offset is used to shift the collection by the given amount. + ## This can be be used to avoid many plugins querying constraint devices + ## at the same time by manually scheduling them in time. + # collection_offset = "0s" + + ## Default flushing interval for all outputs. Maximum flush_interval will be + ## flush_interval + flush_jitter + flush_interval = "10s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## Collected metrics are rounded to the precision specified. Precision is + ## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s). + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + ## + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s: + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + precision = "0s" + + ## Log at debug level. + # debug = false + ## Log only error level messages. + # quiet = false + + ## Log target controls the destination for logs and can be one of "file", + ## "stderr" or, on Windows, "eventlog". When set to "file", the output file + ## is determined by the "logfile" setting. + # logtarget = "file" + + ## Name of the file to be logged to when using the "file" logtarget. If set to + ## the empty string then logs are written to stderr. + # logfile = "" + + ## The logfile will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. Logs are rotated only when + ## written to, if there is no log activity rotation may be delayed. + # logfile_rotation_interval = "0h" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # logfile_rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # logfile_rotation_max_archives = 5 + + ## Pick a timezone to use when logging or type 'local' for local time. + ## Example: America/Chicago + # log_with_timezone = "" + + ## Override default hostname, if empty use os.Hostname() + hostname = "" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = false + + ## Method of translating SNMP objects. Can be "netsnmp" which + ## translates by calling external programs snmptranslate and snmptable, + ## or "gosmi" which translates using the built-in gosmi library. + # snmp_translator = "netsnmp" ############################################################################### # OUTPUT PLUGINS # @@ -171,7 +187,7 @@ ## HTTP Content-Encoding for write request body, can be set to "gzip" to ## compress body or "identity" to apply no encoding. - # content_encoding = "identity" + # content_encoding = "gzip" ## When true, Telegraf will output unsigned integers as unsigned values, ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned @@ -306,12 +322,39 @@ # ## Context Tag Sources add Application Insights context tags to a tag value. # ## # ## For list of allowed context tag keys see: -# ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go +# ## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go # # [outputs.application_insights.context_tag_sources] # # "ai.cloud.role" = "kubernetes_container_name" # # "ai.cloud.roleInstance" = "kubernetes_pod_name" +# # Sends metrics to Azure Data Explorer +# [[outputs.azure_data_explorer]] +# ## The URI property of the Azure Data Explorer resource on Azure +# ## ex: endpoint_url = https://myadxresource.australiasoutheast.kusto.windows.net +# endpoint_url = "" +# +# ## The Azure Data Explorer database that the metrics will be ingested into. +# ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion. +# ## ex: "exampledatabase" +# database = "" +# +# ## Timeout for Azure Data Explorer operations +# # timeout = "20s" +# +# ## Type of metrics grouping used when pushing to Azure Data Explorer. +# ## Default is "TablePerMetric" for one table per different metric. +# ## For more information, please check the plugin README. +# # metrics_grouping_type = "TablePerMetric" +# +# ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). +# # table_name = "" +# +# ## Creates tables and relevant mapping if set to true(default). +# ## Skips table and mapping creation if set to false, this is useful for running Telegraf with the lowest possible permissions i.e. table ingestor role. +# # create_tables = true + + # # Send aggregate metrics to Azure Monitor # [[outputs.azure_monitor]] # ## Timeout for HTTP writes. @@ -337,12 +380,30 @@ # ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" # # resource_id = "" # -# ## Optionally, if in Azure US Government, China or other sovereign -# ## cloud environment, set appropriate REST endpoint for receiving +# ## Optionally, if in Azure US Government, China, or other sovereign +# ## cloud environment, set the appropriate REST endpoint for receiving # ## metrics. (Note: region may be unused in this context) # # endpoint_url = "https://monitoring.core.usgovcloudapi.net" +# # Configuration for Google Cloud BigQuery to send entries +# [[outputs.bigquery]] +# ## Credentials File +# credentials_file = "/path/to/service/account/key.json" +# +# ## Google Cloud Platform Project +# project = "my-gcp-project" +# +# ## The namespace for the metric descriptor +# dataset = "telegraf" +# +# ## Timeout for BigQuery operations. +# # timeout = "5s" +# +# ## Character to replace hyphens on Metric name +# # replace_hyphen_to = "_" + + # # Publish Telegraf metrics to a Google Cloud PubSub topic # [[outputs.cloud_pubsub]] # ## Required. Name of Google Cloud Platform (GCP) Project that owns @@ -401,16 +462,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # #access_key = "" # #secret_key = "" # #token = "" # #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" # #profile = "" # #shared_credential_file = "" # @@ -435,10 +499,70 @@ # # high_resolution_metrics = false +# # Configuration for AWS CloudWatchLogs output. +# [[outputs.cloudwatch_logs]] +# ## The region is the Amazon region that you wish to connect to. +# ## Examples include but are not limited to: +# ## - us-west-1 +# ## - us-west-2 +# ## - us-east-1 +# ## - ap-southeast-1 +# ## - ap-southeast-2 +# ## ... +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! +# ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place +# log_group = "my-group-name" +# +# ## Log stream in log group +# ## Either log group name or reference to metric attribute, from which it can be parsed: +# ## tag: or field:. If log stream is not exist, it will be created. +# ## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) +# ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) +# log_stream = "tag:location" +# +# ## Source of log data - metric name +# ## specify the name of the metric, from which the log data should be retrieved. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_metric_name = "docker_log" +# log_data_metric_name = "docker_log" +# +# ## Specify from which metric attribute the log data should be retrieved: +# ## tag: or field:. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_source = "field:message" +# log_data_source = "field:message" + + # # Configuration for CrateDB to send metrics to. # [[outputs.cratedb]] -# # A github.com/jackc/pgx connection string. -# # See https://godoc.org/github.com/jackc/pgx#ParseDSN +# # A github.com/jackc/pgx/v4 connection string. +# # See https://pkg.go.dev/github.com/jackc/pgx/v4#ParseConfig # url = "postgres://user:password@localhost/schema?sslmode=disable" # # Timeout for all CrateDB queries. # timeout = "5s" @@ -446,6 +570,8 @@ # table = "metrics" # # If true, and the metrics table does not exist, create it automatically. # table_create = true +# # The character(s) to replace any '.' in an object key with +# key_separator = "_" # # Configuration for DataDog API to send metrics to. @@ -458,6 +584,14 @@ # # ## Write URL override; useful for debugging. # # url = "https://app.datadoghq.com/api/v1/series" +# +# ## Set http_proxy +# # use_system_proxy = false +# # http_proxy_url = "http://localhost:8888" +# +# ## Override the default (none) compression used to send data. +# ## Supports: "zlib", "none" +# # compression = "none" # # Send metrics to nowhere at all @@ -482,39 +616,51 @@ # ## The API token needs data ingest scope permission. When using OneAgent, no API token is required. # api_token = "" # -# ## Optional prefix for metric names (e.g.: "telegraf.") -# prefix = "telegraf." +# ## Optional prefix for metric names (e.g.: "telegraf") +# prefix = "telegraf" # # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" -# # ## Optional flag for ignoring tls certificate check # # insecure_skip_verify = false # -# # ## Connection timeout, defaults to "5s" if not set. # timeout = "5s" +# +# ## If you want metrics to be treated and reported as delta counters, add the metric names here +# additional_counters = [ ] +# +# ## Optional dimensions to be added to every metric +# # [outputs.dynatrace.default_dimensions] +# # default_key = "default value" # # Configuration for Elasticsearch to send metrics to. # [[outputs.elasticsearch]] # ## The full HTTP endpoint URL for your Elasticsearch instance # ## Multiple urls can be specified as part of the same cluster, -# ## this means that only ONE of the urls will be written to each interval. +# ## this means that only ONE of the urls will be written to each interval # urls = [ "http://node1.es.example.com:9200" ] # required. # ## Elasticsearch client timeout, defaults to "5s" if not set. # timeout = "5s" # ## Set to true to ask Elasticsearch a list of all cluster nodes, -# ## thus it is not necessary to list all nodes in the urls config option. +# ## thus it is not necessary to list all nodes in the urls config option # enable_sniffer = false +# ## Set to true to enable gzip compression +# enable_gzip = false # ## Set the interval to check if the Elasticsearch nodes are available # ## Setting to "0s" will disable the health check (not recommended in production) # health_check_interval = "10s" +# ## Set the timeout for periodic health checks. +# # health_check_timeout = "1s" +# ## HTTP basic authentication details. # ## HTTP basic authentication details # # username = "telegraf" # # password = "mypassword" +# ## HTTP bearer token authentication details +# # auth_bearer_token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9" # # ## Index Config # ## The target index for metrics (Elasticsearch will create if it not exists). @@ -551,6 +697,47 @@ # ## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string # ## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's # force_document_id = false +# +# ## Specifies the handling of NaN and Inf values. +# ## This option can have the following values: +# ## none -- do not modify field-values (default); will produce an error if NaNs or infs are encountered +# ## drop -- drop fields containing NaNs or infs +# ## replace -- replace with the value in "float_replacement_value" (default: 0.0) +# ## NaNs and inf will be replaced with the given number, -inf with the negative of that number +# # float_handling = "none" +# # float_replacement_value = 0.0 +# +# ## Pipeline Config +# ## To use a ingest pipeline, set this to the name of the pipeline you want to use. +# # use_pipeline = "my_pipeline" +# ## Additionally, you can specify a tag name using the notation {{tag_name}} +# ## which will be used as part of the pipeline name. If the tag does not exist, +# ## the default pipeline will be used as the pipeline. If no default pipeline is set, +# ## no pipeline is used for the metric. +# # use_pipeline = "{{es_pipeline}}" +# # default_pipeline = "my_pipeline" + + +# # Configuration for Event Hubs output plugin +# [[outputs.event_hubs]] +# ## The full connection string to the Event Hub (required) +# ## The shared access key must have "Send" permissions on the target Event Hub. +# connection_string = "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName" +# +# ## Client timeout (defaults to 30s) +# # timeout = "30s" +# +# ## Partition key +# ## Metric tag or field name to use for the event partition key. The value of +# ## this tag or field is set as the key for events if it exists. If both, tag +# ## and field, exist the tag is preferred. +# # partition_key = "" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "json" # # Send metrics to command as input over stdin @@ -558,6 +745,12 @@ # ## Command to ingest metrics via stdin. # command = ["tee", "-a", "/dev/null"] # +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# # ## Timeout for command to complete. # # timeout = "5s" # @@ -570,9 +763,16 @@ # # Run executable as long-running output plugin # [[outputs.execd]] -# ## Program to run as daemon +# ## One program to run as daemon. +# ## NOTE: process and each argument should each be their own string # command = ["my-telegraf-output", "--some-flag", "value"] # +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# # ## Delay before the process is restarted after an unexpected termination # restart_delay = "10s" # @@ -590,12 +790,12 @@ # # ## Use batch serialization format instead of line based delimiting. The # ## batch format allows for the production of non line based output formats and -# ## may more efficiently encode metric groups. +# ## may more efficiently encode and write metrics. # # use_batch_format = false # # ## The file will be rotated after the time interval specified. When set # ## to 0 no time based rotation is performed. -# # rotation_interval = "0d" +# # rotation_interval = "0h" # # ## The logfile will be rotated when it becomes larger than the specified # ## size. When set to 0 no size based rotation is performed. @@ -615,7 +815,7 @@ # # Configuration for Graphite server to send metrics to # [[outputs.graphite]] # ## TCP endpoint for your graphite instance. -# ## If multiple endpoints are configured, output will be load balanced. +# ## If multiple endpoints are configured, the output will be load balanced. # ## Only one of the endpoints will be written to with each iteration. # servers = ["localhost:2003"] # ## Prefix metrics name @@ -627,6 +827,11 @@ # ## Enable Graphite tags support # # graphite_tag_support = false # +# ## Define how metric names and tags are sanitized; options are "strict", or "compatible" +# ## strict - Default method, and backwards compatible with previous versionf of Telegraf +# ## compatible - More relaxed sanitizing when using tags, and compatible with the graphite spec +# # graphite_tag_sanitize_mode = "strict" +# # ## Character for separating metric name and field for Graphite tags # # graphite_separator = "." # @@ -653,13 +858,56 @@ # # Send telegraf metrics to graylog # [[outputs.graylog]] -# ## UDP endpoint for your graylog instance. -# servers = ["127.0.0.1:12201"] +# ## Endpoints for your graylog instances. +# servers = ["udp://127.0.0.1:12201"] +# +# ## Connection timeout. +# # timeout = "5s" # # ## The field to use as the GELF short_message, if unset the static string # ## "telegraf" will be used. # ## example: short_message_field = "message" # # short_message_field = "" +# +# ## According to GELF payload specification, additional fields names must be prefixed +# ## with an underscore. Previous versions did not prefix custom field 'name' with underscore. +# ## Set to true for backward compatibility. +# # name_field_no_prefix = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Send telegraf metrics to GroundWork Monitor +# [[outputs.groundwork]] +# ## URL of your groundwork instance. +# url = "https://groundwork.example.com" +# +# ## Agent uuid for GroundWork API Server. +# agent_id = "" +# +# ## Username and password to access GroundWork API. +# username = "" +# password = "" +# +# ## Default application type to use in GroundWork client +# # default_app_type = "TELEGRAF" +# +# ## Default display name for the host with services(metrics). +# # default_host = "telegraf" +# +# ## Default service state. +# # default_service_state = "SERVICE_OK" +# +# ## The name of the tag that contains the hostname. +# # resource_tag = "host" +# +# ## The name of the tag that contains the host group name. +# # group_tag = "group" # # Configurable HTTP health check resource based on metrics @@ -723,6 +971,9 @@ # # token_url = "https://indentityprovider/oauth2/v1/token" # # scopes = ["urn:opc:idm:__myscopes__"] # +# ## Goole API Auth +# # google_application_credentials = "/etc/telegraf/example_secret.json" +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -730,12 +981,27 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_headers = '{"Content-Type": "application/json", "X-MY-HEADER":"hello"}' +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# # ## Data format to output. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # # data_format = "influx" # +# ## Use batch serialization format (default) instead of line based format. +# ## Batch format is more efficient and should be used unless line based +# ## format is really needed. +# # use_batch_format = true +# # ## HTTP Content-Encoding for write request body, can be set to "gzip" to # ## compress body or "identity" to apply no encoding. # # content_encoding = "identity" @@ -744,9 +1010,47 @@ # # [outputs.http.headers] # # # Should be set manually to "application/json" for json data_format # # Content-Type = "text/plain; charset=utf-8" +# +# ## MaxIdleConns controls the maximum number of idle (keep-alive) +# ## connections across all hosts. Zero means no limit. +# # max_idle_conn = 0 +# +# ## MaxIdleConnsPerHost, if non-zero, controls the maximum idle +# ## (keep-alive) connections to keep per-host. If zero, +# ## DefaultMaxIdleConnsPerHost is used(2). +# # max_idle_conn_per_host = 2 +# +# ## Idle (keep-alive) connection timeout. +# ## Maximum amount of time before idle connection is closed. +# ## Zero means no limit. +# # idle_conn_timeout = 0 +# +# ## Amazon Region +# #region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Optional list of statuscodes (<200 or >300) upon which requests should not be retried +# # non_retryable_statuscodes = [409, 413] -# # Configuration for sending metrics to InfluxDB +# # Configuration for sending metrics to InfluxDB 2.0 # [[outputs.influxdb_v2]] # ## The URLs of the InfluxDB cluster nodes. # ## @@ -758,7 +1062,7 @@ # ## Token for authentication. # token = "" # -# ## Organization is the name of the organization you wish to write to; must exist. +# ## Organization is the name of the organization you wish to write to. # organization = "" # # ## Destination bucket to write into. @@ -802,7 +1106,7 @@ # # Configuration for sending metrics to an Instrumental project # [[outputs.instrumental]] # ## Project API Token (required) -# api_token = "API Token" # required +# api_token = "API Token" # required # ## Prefix the metrics with a given name # prefix = "" # ## Stats output template (Graphite formatting) @@ -810,7 +1114,7 @@ # template = "host.tags.measurement.field" # ## Timeout in seconds to connect # timeout = "2s" -# ## Display Communication to Instrumental +# ## Debug true - Print communication to Instrumental # debug = false @@ -832,7 +1136,7 @@ # # client_id = "Telegraf" # # ## Set the minimal supported Kafka version. Setting this enables the use of new -# ## Kafka features and APIs. Of particular interest, lz4 compression +# ## Kafka features and APIs. Of particular interested, lz4 compression # ## requires at least version 0.10.0.0. # ## ex: version = "1.1.0" # # version = "" @@ -883,13 +1187,18 @@ # ## routing_key = "telegraf" # # routing_key = "" # -# ## CompressionCodec represents the various compression codecs recognized by +# ## Compression codec represents the various compression codecs recognized by # ## Kafka in messages. -# ## 0 : No compression -# ## 1 : Gzip compression -# ## 2 : Snappy compression -# ## 3 : LZ4 compression -# # compression_codec = 0 +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD +# # compression_codec = 0 +# +# ## Idempotent Writes +# ## If enabled, exactly one copy of each message is written. +# # idempotent_writes = false # # ## RequiredAcks is used in Produce Requests to tell the broker how many # ## replica acknowledgements it must see before responding @@ -916,20 +1225,45 @@ # # max_message_bytes = 1000000 # # ## Optional TLS Config -# # enable_tls = true # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # +# ## Optional SOCKS5 proxy to use when connecting to brokers +# # socks5_enabled = true +# # socks5_address = "127.0.0.1:1080" +# # socks5_username = "alice" +# # socks5_password = "pass123" +# # ## Optional SASL Config # # sasl_username = "kafka" # # sasl_password = "secret" # +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI (experimental) +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## used if sasl_mechanism is OAUTHBEARER (experimental) +# # sasl_access_token = "" +# # ## SASL protocol version. When connecting to Azure EventHub set to 0. # # sasl_version = 1 # +# # Disable Kafka metadata full fetch +# # metadata_full = false +# # ## Data format to output. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -944,16 +1278,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # #access_key = "" # #secret_key = "" # #token = "" # #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" # #profile = "" # #shared_credential_file = "" # @@ -965,12 +1302,7 @@ # # ## Kinesis StreamName must exist prior to starting telegraf. # streamname = "StreamName" -# ## DEPRECATED: PartitionKey as used for sharding data. -# partitionkey = "PartitionKey" -# ## DEPRECATED: If set the partitionKey will be a random UUID on every put. -# ## This allows for scaling across multiple shards in a stream. -# ## This will cause issues with ordering. -# use_random_partitionkey = false +# # ## The partition key can be calculated using one of several methods: # ## # ## Use a static value for all writes: @@ -1020,29 +1352,125 @@ # ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite # ## This template is used in librato's source (not metric's name) # template = "host" + + +# # A plugin that can send metrics over HTTPs to Logz.io +# [[outputs.logzio]] +# ## Connection timeout, defaults to "5s" if not set. +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Logz.io account token +# token = "your logz.io token" # required +# +# ## Use your listener URL for your Logz.io account region. +# # url = "https://listener.logz.io:8071" + + +# # A plugin that can transmit logs to Loki +# [[outputs.loki]] +# ## The domain of Loki +# domain = "https://loki.domain.tld" +# +# ## Endpoint to write api +# # endpoint = "/loki/api/v1/push" +# +# ## Connection timeout, defaults to "5s" if not set. +# # timeout = "5s" +# +# ## Basic auth credential +# # username = "loki" +# # password = "pass" +# +# ## Additional HTTP headers +# # http_headers = {"X-Scope-OrgID" = "1"} +# +# ## If the request must be gzip encoded +# # gzip_request = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # A plugin that can transmit logs to mongodb +# [[outputs.mongodb]] +# # connection string examples for mongodb +# dsn = "mongodb://localhost:27017" +# # dsn = "mongodb://mongod1:27017,mongod2:27017,mongod3:27017/admin&replicaSet=myReplSet&w=1" +# +# # overrides serverSelectionTimeoutMS in dsn if set +# # timeout = "30s" +# +# # default authentication, optional +# # authentication = "NONE" +# +# # for SCRAM-SHA-256 authentication +# # authentication = "SCRAM" +# # username = "root" +# # password = "***" # +# # for x509 certificate authentication +# # authentication = "X509" +# # tls_ca = "ca.pem" +# # tls_key = "client.pem" +# # # tls_key_pwd = "changeme" # required for encrypted tls_key +# # insecure_skip_verify = false +# +# # database to store measurements and time series collections +# # database = "telegraf" +# +# # granularity can be seconds, minutes, or hours. +# # configuring this value will be based on your input collection frequency. +# # see https://docs.mongodb.com/manual/core/timeseries-collections/#create-a-time-series-collection +# # granularity = "seconds" +# +# # optionally set a TTL to automatically expire documents from the measurement collections. +# # ttl = "360h" # # Configuration for MQTT server to send metrics to # [[outputs.mqtt]] -# servers = ["localhost:1883"] # required. +# ## MQTT Brokers +# ## The list of brokers should only include the hostname or IP address and the +# ## port to the broker. This should follow the format '{host}:{port}'. For +# ## example, "localhost:1883" or "127.0.0.1:8883". +# servers = ["localhost:1883"] # -# ## MQTT outputs send metrics to this topic format -# ## "///" -# ## ex: prefix/web01.example.com/mem +# ## MQTT Topic for Producer Messages +# ## MQTT outputs send metrics to this topic format: +# ## /// (e.g. prefix/web01.example.com/mem) # topic_prefix = "telegraf" # # ## QoS policy for messages +# ## The mqtt QoS policy for sending messages. +# ## See https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_9.0.0/com.ibm.mq.dev.doc/q029090_.htm # ## 0 = at most once # ## 1 = at least once # ## 2 = exactly once # # qos = 2 # +# ## Keep Alive +# ## Defines the maximum length of time that the broker and client may not +# ## communicate. Defaults to 0 which turns the feature off. +# ## +# ## For version v2.0.12 and later mosquitto there is a bug +# ## (see https://github.com/eclipse/mosquitto/issues/2117), which requires +# ## this to be non-zero. As a reference eclipse/paho.mqtt.golang defaults to 30. +# # keep_alive = 0 +# # ## username and password to connect MQTT server. # # username = "telegraf" # # password = "metricsmetricsmetricsmetrics" # -# ## client ID, if not set a random ID is generated +# ## client ID +# ## The unique client id to connect MQTT server. If this parameter is not set +# ## then a random ID is generated. # # client_id = "" # # ## Timeout for write operations. default: 5s @@ -1052,10 +1480,11 @@ # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" +# # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # -# ## When true, metrics will be sent in one MQTT message per flush. Otherwise, +# ## When true, metrics will be sent in one MQTT message per flush. Otherwise, # ## metrics are written one metric per MQTT message. # # batch = false # @@ -1063,7 +1492,6 @@ # ## actually reads it # # retain = false # -# ## Data format to output. # ## Each data format has its own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md @@ -1075,6 +1503,9 @@ # ## URLs of NATS servers # servers = ["nats://localhost:4222"] # +# ## Optional client name +# # name = "" +# # ## Optional credentials # # username = "" # # password = "" @@ -1104,14 +1535,26 @@ # # Send metrics to New Relic metrics endpoint # [[outputs.newrelic]] -# ## New Relic Insights API key -# insights_key = "insights api key" +# ## The 'insights_key' parameter requires a NR license key. +# ## New Relic recommends you create one +# ## with a convenient name such as TELEGRAF_INSERT_KEY. +# ## reference: https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/#ingest-license-key +# # insights_key = "New Relic License Key Here" # # ## Prefix to add to add to metric name for easy identification. +# ## This is very useful if your metric names are ambiguous. # # metric_prefix = "" # # ## Timeout for writes to the New Relic API. # # timeout = "15s" +# +# ## HTTP Proxy override. If unset use values from the standard +# ## proxy environment variables to determine proxy, if any. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## Metric URL override to enable geographic location endpoints. +# # If not set use values from the standard +# # metric_url = "https://metric-api.newrelic.com/metric/v1" # # Send telegraf measurements to NSQD @@ -1128,6 +1571,41 @@ # data_format = "influx" +# # Send OpenTelemetry metrics over gRPC +# [[outputs.opentelemetry]] +# ## Override the default (localhost:4317) OpenTelemetry gRPC service +# ## address:port +# # service_address = "localhost:4317" +# +# ## Override the default (5s) request timeout +# # timeout = "5s" +# +# ## Optional TLS Config. +# ## +# ## Root certificates for verifying server certificates encoded in PEM format. +# # tls_ca = "/etc/telegraf/ca.pem" +# ## The public and private keypairs for the client encoded in PEM format. +# ## May contain intermediate certificates. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS, but skip TLS chain and host verification. +# # insecure_skip_verify = false +# ## Send the specified TLS server name via SNI. +# # tls_server_name = "foo.example.com" +# +# ## Override the default (gzip) compression used to send data. +# ## Supports: "gzip", "none" +# # compression = "gzip" +# +# ## Additional OpenTelemetry resource attributes +# # [outputs.opentelemetry.attributes] +# # "service.name" = "demo" +# +# ## Additional gRPC request metadata +# # [outputs.opentelemetry.headers] +# # key1 = "value1" + + # # Configuration for OpenTSDB server to send metrics to # [[outputs.opentsdb]] # ## prefix for metrics keys @@ -1158,15 +1636,12 @@ # # Configuration for the Prometheus client to spawn # [[outputs.prometheus_client]] -# ## Address to listen on +# ## Address to listen on. # listen = ":9273" # -# ## Metric version controls the mapping from Telegraf metrics into -# ## Prometheus format. When using the prometheus input, use the same value in -# ## both plugins to ensure metrics are round-tripped without modification. -# ## -# ## example: metric_version = 1; deprecated in 1.13 -# ## metric_version = 2; recommended version +# ## Metric version controls the mapping from Prometheus metrics into Telegraf metrics. +# ## See "Metric Format Configuration" in plugins/inputs/prometheus/README.md for details. +# ## Valid options: 1, 2 # # metric_version = 1 # # ## Use HTTP Basic Authentication. @@ -1203,7 +1678,7 @@ # # export_timestamp = false -# # Configuration for the Riemann server to send metrics to +# # Configuration for Riemann to send metrics to # [[outputs.riemann]] # ## The full TCP or UDP URL of the Riemann server # url = "tcp://localhost:5555" @@ -1237,6 +1712,7 @@ # # timeout = "5s" +# ## DEPRECATED: The 'riemann_legacy' plugin is deprecated in version 1.3.0, use 'outputs.riemann' instead (see https://github.com/influxdata/telegraf/issues/1878). # # Configuration for the Riemann server to send metrics to # [[outputs.riemann_legacy]] # ## URL of server @@ -1247,6 +1723,116 @@ # separator = " " +# # Send aggregate metrics to Sensu Monitor +# [[outputs.sensu]] +# ## BACKEND API URL is the Sensu Backend API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the corresponding backend API path +# ## /api/core/v2/namespaces/:entity_namespace/events/:entity_name/:check_name). +# ## +# ## Backend Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## AGENT API URL is the Sensu Agent API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the correspeonding agent API path (/events). +# ## +# ## Agent API Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## NOTE: if backend_api_url and agent_api_url and api_key are set, the output +# ## plugin will use backend_api_url. If backend_api_url and agent_api_url are +# ## not provided, the output plugin will default to use an agent_api_url of +# ## http://127.0.0.1:3031 +# ## +# # backend_api_url = "http://127.0.0.1:8080" +# # agent_api_url = "http://127.0.0.1:3031" +# +# ## API KEY is the Sensu Backend API token +# ## Generate a new API token via: +# ## +# ## $ sensuctl cluster-role create telegraf --verb create --resource events,entities +# ## $ sensuctl cluster-role-binding create telegraf --cluster-role telegraf --group telegraf +# ## $ sensuctl user create telegraf --group telegraf --password REDACTED +# ## $ sensuctl api-key grant telegraf +# ## +# ## For more information on Sensu RBAC profiles & API tokens, please visit: +# ## - https://docs.sensu.io/sensu-go/latest/reference/rbac/ +# ## - https://docs.sensu.io/sensu-go/latest/reference/apikeys/ +# ## +# # api_key = "${SENSU_API_KEY}" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Timeout for HTTP message +# # timeout = "5s" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Sensu Event details +# ## +# ## Below are the event details to be sent to Sensu. The main portions of the +# ## event are the check, entity, and metrics specifications. For more information +# ## on Sensu events and its components, please visit: +# ## - Events - https://docs.sensu.io/sensu-go/latest/reference/events +# ## - Checks - https://docs.sensu.io/sensu-go/latest/reference/checks +# ## - Entities - https://docs.sensu.io/sensu-go/latest/reference/entities +# ## - Metrics - https://docs.sensu.io/sensu-go/latest/reference/events#metrics +# ## +# ## Check specification +# ## The check name is the name to give the Sensu check associated with the event +# ## created. This maps to check.metatadata.name in the event. +# [outputs.sensu.check] +# name = "telegraf" +# +# ## Entity specification +# ## Configure the entity name and namespace, if necessary. This will be part of +# ## the entity.metadata in the event. +# ## +# ## NOTE: if the output plugin is configured to send events to a +# ## backend_api_url and entity_name is not set, the value returned by +# ## os.Hostname() will be used; if the output plugin is configured to send +# ## events to an agent_api_url, entity_name and entity_namespace are not used. +# # [outputs.sensu.entity] +# # name = "server-01" +# # namespace = "default" +# +# ## Metrics specification +# ## Configure the tags for the metrics that are sent as part of the Sensu event +# # [outputs.sensu.tags] +# # source = "telegraf" +# +# ## Configure the handler(s) for processing the provided metrics +# # [outputs.sensu.metrics] +# # handlers = ["influxdb","elasticsearch"] + + +# # Send metrics and events to SignalFx +# [[outputs.signalfx]] +# ## SignalFx Org Access Token +# access_token = "my-secret-token" +# +# ## The SignalFx realm that your organization resides in +# signalfx_realm = "us9" # Required if ingest_url is not set +# +# ## You can optionally provide a custom ingest url instead of the +# ## signalfx_realm option above if you are using a gateway or proxy +# ## instance. This option takes precident over signalfx_realm. +# ingest_url = "https://my-custom-ingest/" +# +# ## Event typed metrics are omitted by default, +# ## If you require an event typed metric you must specify the +# ## metric name in the following list. +# included_event_names = ["plugin.metric_name"] + + # # Generic socket writer capable of handling multiple socket types. # [[outputs.socket_writer]] # ## URL to connect to @@ -1274,18 +1860,73 @@ # ## Defaults to the OS configuration. # # keep_alive_period = "5m" # -# ## Content encoding for packet-based connections (i.e. UDP, unixgram). -# ## Can be set to "gzip" or to "identity" to apply no encoding. +# ## Content encoding for message payloads, can be set to "gzip" or to +# ## "identity" to apply no encoding. # ## # # content_encoding = "identity" # # ## Data format to generate. # ## Each data format has its own unique set of configuration options, read # ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # # data_format = "influx" +# # Save metrics to an SQL Database +# [[outputs.sql]] +# ## Database driver +# ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), +# ## sqlite (SQLite3), snowflake (snowflake.com) clickhouse (ClickHouse) +# # driver = "" +# +# ## Data source name +# ## The format of the data source name is different for each database driver. +# ## See the plugin readme for details. +# # data_source_name = "" +# +# ## Timestamp column name +# # timestamp_column = "timestamp" +# +# ## Table creation template +# ## Available template variables: +# ## {TABLE} - table name as a quoted identifier +# ## {TABLELITERAL} - table name as a quoted string literal +# ## {COLUMNS} - column definitions (list of quoted identifiers and types) +# # table_template = "CREATE TABLE {TABLE}({COLUMNS})" +# +# ## Table existence check template +# ## Available template variables: +# ## {TABLE} - tablename as a quoted identifier +# # table_exists_template = "SELECT 1 FROM {TABLE} LIMIT 1" +# +# ## Initialization SQL +# # init_sql = "" +# +# ## Metric type to SQL type conversion +# ## The values on the left are the data types Telegraf has and the values on +# ## the right are the data types Telegraf will use when sending to a database. +# ## +# ## The database values used must be data types the destination database +# ## understands. It is up to the user to ensure that the selected data type is +# ## available in the database they are using. Refer to your database +# ## documentation for what data types are available and supported. +# #[outputs.sql.convert] +# # integer = "INT" +# # real = "DOUBLE" +# # text = "TEXT" +# # timestamp = "TIMESTAMP" +# # defaultvalue = "TEXT" +# # unsigned = "UNSIGNED" +# # bool = "BOOL" +# +# ## This setting controls the behavior of the unsigned value. By default the +# ## setting will take the integer value and append the unsigned value to it. The other +# ## option is "literal", which will use the actual value the user provides to +# ## the unsigned option. This is useful for a database like ClickHouse where +# ## the unsigned value should use a value like "uint64". +# # conversion_style = "unsigned_suffix" + + # # Configuration for Google Cloud Stackdriver to send metrics to # [[outputs.stackdriver]] # ## GCP Project @@ -1304,7 +1945,7 @@ # # location = "eu-north0" -# # A plugin that can transmit metrics to Sumo Logic HTTP Source +# # A plugin that can send metrics to Sumo Logic HTTP metric collector. # [[outputs.sumologic]] # ## Unique URL generated for your HTTP Metrics Source. # ## This is the address to send metrics to. @@ -1435,6 +2076,125 @@ # # default_appname = "Telegraf" +# # Configuration for sending metrics to Amazon Timestream. +# [[outputs.timestream]] +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Timestream database where the metrics will be inserted. +# ## The database must exist prior to starting Telegraf. +# database_name = "yourDatabaseNameHere" +# +# ## Specifies if the plugin should describe the Timestream database upon starting +# ## to validate if it has access necessary permissions, connection, etc., as a safety check. +# ## If the describe operation fails, the plugin will not start +# ## and therefore the Telegraf agent will not start. +# describe_database_on_start = false +# +# ## The mapping mode specifies how Telegraf records are represented in Timestream. +# ## Valid values are: single-table, multi-table. +# ## For example, consider the following data in line protocol format: +# ## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200 +# ## airquality,location=us-west no2=5,pm25=16 1465839830100400200 +# ## where weather and airquality are the measurement names, location and season are tags, +# ## and temperature, humidity, no2, pm25 are fields. +# ## In multi-table mode: +# ## - first line will be ingested to table named weather +# ## - second line will be ingested to table named airquality +# ## - the tags will be represented as dimensions +# ## - first table (weather) will have two records: +# ## one with measurement name equals to temperature, +# ## another with measurement name equals to humidity +# ## - second table (airquality) will have two records: +# ## one with measurement name equals to no2, +# ## another with measurement name equals to pm25 +# ## - the Timestream tables from the example will look like this: +# ## TABLE "weather": +# ## time | location | season | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-midwest | summer | temperature | 82 +# ## 2016-06-13 17:43:50 | us-midwest | summer | humidity | 71 +# ## TABLE "airquality": +# ## time | location | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-west | no2 | 5 +# ## 2016-06-13 17:43:50 | us-west | pm25 | 16 +# ## In single-table mode: +# ## - the data will be ingested to a single table, which name will be valueOf(single_table_name) +# ## - measurement name will stored in dimension named valueOf(single_table_dimension_name_for_telegraf_measurement_name) +# ## - location and season will be represented as dimensions +# ## - temperature, humidity, no2, pm25 will be represented as measurement name +# ## - the Timestream table from the example will look like this: +# ## Assuming: +# ## - single_table_name = "my_readings" +# ## - single_table_dimension_name_for_telegraf_measurement_name = "namespace" +# ## TABLE "my_readings": +# ## time | location | season | namespace | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | temperature | 82 +# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | humidity | 71 +# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | no2 | 5 +# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | pm25 | 16 +# ## In most cases, using multi-table mapping mode is recommended. +# ## However, you can consider using single-table in situations when you have thousands of measurement names. +# mapping_mode = "multi-table" +# +# ## Only valid and required for mapping_mode = "single-table" +# ## Specifies the Timestream table where the metrics will be uploaded. +# # single_table_name = "yourTableNameHere" +# +# ## Only valid and required for mapping_mode = "single-table" +# ## Describes what will be the Timestream dimension name for the Telegraf +# ## measurement name. +# # single_table_dimension_name_for_telegraf_measurement_name = "namespace" +# +# ## Specifies if the plugin should create the table, if the table do not exist. +# ## The plugin writes the data without prior checking if the table exists. +# ## When the table does not exist, the error returned from Timestream will cause +# ## the plugin to create the table, if this parameter is set to true. +# create_table_if_not_exists = true +# +# ## Only valid and required if create_table_if_not_exists = true +# ## Specifies the Timestream table magnetic store retention period in days. +# ## Check Timestream documentation for more details. +# create_table_magnetic_store_retention_period_in_days = 365 +# +# ## Only valid and required if create_table_if_not_exists = true +# ## Specifies the Timestream table memory store retention period in hours. +# ## Check Timestream documentation for more details. +# create_table_memory_store_retention_period_in_hours = 24 +# +# ## Only valid and optional if create_table_if_not_exists = true +# ## Specifies the Timestream table tags. +# ## Check Timestream documentation for more details +# # create_table_tags = { "foo" = "bar", "environment" = "dev"} +# +# ## Specify the maximum number of parallel go routines to ingest/write data +# ## If not specified, defaulted to 1 go routines +# max_write_go_routines = 25 + + # # Write metrics to Warp 10 # [[outputs.warp10]] # # Prefix to add to the measurement. @@ -1465,13 +2225,16 @@ # # Configuration for Wavefront server to send metrics to # [[outputs.wavefront]] -# ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy -# ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878 +# ## Url for Wavefront Direct Ingestion. For Wavefront Proxy Ingestion, see +# ## the 'host' and 'port' options below. # url = "https://metrics.wavefront.com" # # ## Authentication Token for Wavefront. Only required if using Direct Ingestion # #token = "DUMMY_TOKEN" # +# ## Maximum number of metrics to send per batch for Direct Ingestion. Ignored unless 'url' is set. This value should be higher than the `metric_batch_size`. Default is 10,000. Values higher than 40,000 are not recommended. +# # http_maximum_batch_size = 10000 +# # ## DNS name of the wavefront proxy server. Do not use if url is specified # #host = "wavefront.example.com" # @@ -1509,28 +2272,129 @@ # ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. # #truncate_tags = false # -# ## Define a mapping, namespaced by metric prefix, from string values to numeric values -# ## deprecated in 1.9; use the enum processor plugin -# #[[outputs.wavefront.string_to_number.elasticsearch]] -# # green = 1.0 -# # yellow = 0.5 -# # red = 0.0 - - -############################################################################### -# PROCESSOR PLUGINS # -############################################################################### +# ## Flush the internal buffers after each batch. This effectively bypasses the background sending of metrics +# ## normally done by the Wavefront SDK. This can be used if you are experiencing buffer overruns. The sending +# ## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in +# ## Telegraf. +# #immediate_flush = true -# # Clone metrics and apply modifications. -# [[processors.clone]] -# ## All modifications on inputs and aggregators can be overridden: -# # name_override = "new_name" -# # name_prefix = "new_name_prefix" -# # name_suffix = "new_name_suffix" +# # A plugin that can transmit metrics over WebSocket. +# [[outputs.websocket]] +# ## URL is the address to send metrics to. Make sure ws or wss scheme is used. +# url = "ws://127.0.0.1:3000/telegraf" # -# ## Tags to be added (all values must be strings) -# # [processors.clone.tags] +# ## Timeouts (make sure read_timeout is larger than server ping interval or set to zero). +# # connect_timeout = "30s" +# # write_timeout = "30s" +# # read_timeout = "30s" +# +# ## Optionally turn on using text data frames (binary by default). +# # use_text_frames = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional SOCKS5 proxy to use +# # socks5_enabled = true +# # socks5_address = "127.0.0.1:1080" +# # socks5_username = "alice" +# # socks5_password = "pass123" +# +# ## Optional HTTP proxy to use +# # use_system_proxy = false +# # http_proxy_url = "http://localhost:8888" +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## Additional HTTP Upgrade headers +# # [outputs.websocket.headers] +# # Authorization = "Bearer " + + +# # Send aggregated metrics to Yandex.Cloud Monitoring +# [[outputs.yandex_cloud_monitoring]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Yandex.Cloud monitoring API endpoint. Normally should not be changed +# # endpoint_url = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" +# +# ## All user metrics should be sent with "custom" service specified. Normally should not be changed +# # service = "custom" + + +############################################################################### +# PROCESSOR PLUGINS # +############################################################################### + + +# # Attach AWS EC2 metadata to metrics +# [[processors.aws_ec2]] +# ## Instance identity document tags to attach to metrics. +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html +# ## +# ## Available tags: +# ## * accountId +# ## * architecture +# ## * availabilityZone +# ## * billingProducts +# ## * imageId +# ## * instanceId +# ## * instanceType +# ## * kernelId +# ## * pendingTime +# ## * privateIp +# ## * ramdiskId +# ## * region +# ## * version +# imds_tags = [] +# +# ## EC2 instance tags retrieved with DescribeTags action. +# ## In case tag is empty upon retrieval it's omitted when tagging metrics. +# ## Note that in order for this to work, role attached to EC2 instance or AWS +# ## credentials available from the environment must have a policy attached, that +# ## allows ec2:DescribeTags. +# ## +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html +# ec2_tags = [] +# +# ## Timeout for http requests made by against aws ec2 metadata endpoint. +# timeout = "10s" +# +# ## ordered controls whether or not the metrics need to stay in the same order +# ## this plugin received them in. If false, this plugin will change the order +# ## with requests hitting cached results moving through immediately and not +# ## waiting on slower lookups. This may cause issues for you if you are +# ## depending on the order of metrics staying the same. If so, set this to true. +# ## Keeping the metrics ordered may be slightly slower. +# ordered = false +# +# ## max_parallel_calls is the maximum number of AWS API calls to be in flight +# ## at the same time. +# ## It's probably best to keep this number fairly low. +# max_parallel_calls = 10 + + +# # Apply metric modifications using override semantics. +# [[processors.clone]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.clone.tags] # # additional_tag = "tag_value" @@ -1566,28 +2430,28 @@ # # Dates measurements, tags, and fields that pass through this filter. # [[processors.date]] -# ## New tag to create -# tag_key = "month" +# ## New tag to create +# tag_key = "month" # -# ## New field to create (cannot set both field_key and tag_key) -# # field_key = "month" +# ## New field to create (cannot set both field_key and tag_key) +# # field_key = "month" # -# ## Date format string, must be a representation of the Go "reference time" -# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". -# date_format = "Jan" +# ## Date format string, must be a representation of the Go "reference time" +# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". +# date_format = "Jan" # -# ## If destination is a field, date format can also be one of -# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field. -# # date_format = "unix" +# ## If destination is a field, date format can also be one of +# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field. +# # date_format = "unix" # -# ## Offset duration added to the date string when writing the new tag. -# # date_offset = "0s" +# ## Offset duration added to the date string when writing the new tag. +# # date_offset = "0s" # -# ## Timezone to use when creating the tag or field using a reference time -# ## string. This can be set to one of "UTC", "Local", or to a location name -# ## in the IANA Time Zone database. -# ## example: timezone = "America/Los_Angeles" -# # timezone = "UTC" +# ## Timezone to use when creating the tag or field using a reference time +# ## string. This can be set to one of "UTC", "Local", or to a location name +# ## in the IANA Time Zone database. +# ## example: timezone = "America/Los_Angeles" +# # timezone = "UTC" # # Filter metrics with repeating field values @@ -1596,7 +2460,7 @@ # dedup_interval = "600s" -# # Defaults sets default value(s) for specified fields that are not set on incoming metrics. +# ## Set default fields on your metric(s) when they are nil or empty # [[processors.defaults]] # ## Ensures a set of fields always exists on your metric(s) with their # ## respective default value. @@ -1607,19 +2471,19 @@ # ## or it is not nil but its value is an empty string or is a string # ## of one or more spaces. # ## = -# # [processors.defaults.fields] -# # field_1 = "bar" -# # time_idle = 0 -# # is_error = true +# [processors.defaults.fields] +# field_1 = "bar" +# time_idle = 0 +# is_error = true # # Map enum values according to given table. # [[processors.enum]] # [[processors.enum.mapping]] -# ## Name of the field to map +# ## Name of the field to map. Globs accepted. # field = "status" # -# ## Name of the tag to map +# ## Name of the tag to map. Globs accepted. # # tag = "status" # # ## Destination tag or field to be used for the mapped value. By default the @@ -1627,8 +2491,8 @@ # dest = "status_code" # # ## Default value to be used for all values not contained in the mapping -# ## table. When unset, the unmodified value for the field will be used if no -# ## match is found. +# ## table. When unset and no match is found, the original field will remain +# ## unmodified and the destination tag or field will not be created. # # default = 0 # # ## Table of mappings @@ -1640,12 +2504,19 @@ # # Run executable as long-running processor plugin # [[processors.execd]] -# ## Program to run as daemon -# ## eg: command = ["/path/to/your_program", "arg1", "arg2"] -# command = ["cat"] +# ## One program to run as daemon. +# ## NOTE: process and each argument should each be their own string +# ## eg: command = ["/path/to/your_program", "arg1", "arg2"] +# command = ["cat"] +# +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] # # ## Delay before the process is restarted after an unexpected termination -# restart_delay = "10s" +# # restart_delay = "10s" # # Performs file path manipulations on tags and fields @@ -1740,6 +2611,29 @@ # # cache_ttl = "8h" +# # Adds noise to numerical fields +# [[processors.noise]] +# ## Specified the type of the random distribution. +# ## Can be "laplacian", "gaussian" or "uniform". +# # type = "laplacian +# +# ## Center of the distribution. +# ## Only used for Laplacian and Gaussian distributions. +# # mu = 0.0 +# +# ## Scale parameter for the Laplacian or Gaussian distribution +# # scale = 1.0 +# +# ## Upper and lower bound of the Uniform distribution +# # min = -1.0 +# # max = 1.0 +# +# ## Apply the noise only to numeric fields matching the filter criteria below. +# ## Excludes takes precedence over includes. +# # include_fields = [] +# # exclude_fields = [] + + # # Apply metric modifications using override semantics. # [[processors.override]] # ## All modifications on inputs and aggregators can be overridden: @@ -1755,7 +2649,7 @@ # # Parse a value in a specified field/tag(s) and add the result in a new metric # [[processors.parser]] # ## The name of the fields whose value will be parsed. -# parse_fields = [] +# parse_fields = ["message"] # # ## If true, incoming metrics are not emitted. # drop_original = false @@ -1779,57 +2673,114 @@ # value_key = "value" -# # Given a tag of a TCP or UDP port number, add a tag of the service name looked up in the system services file -# [[processors.port_name]] +# # Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file # [[processors.port_name]] # ## Name of tag holding the port number # # tag = "port" +# ## Or name of the field holding the port number +# # field = "port" # -# ## Name of output tag where service name will be added +# ## Name of output tag or field (depending on the source) where service name will be added # # dest = "service" # # ## Default tcp or udp # # default_protocol = "tcp" +# +# ## Tag containing the protocol (tcp or udp, case-insensitive) +# # protocol_tag = "proto" +# +# ## Field containing the protocol (tcp or udp, case-insensitive) +# # protocol_field = "proto" # # Print all metrics that pass through this filter. # [[processors.printer]] -# # Transforms tag and field values with regex pattern +# # Transforms tag and field values as well as measurement, tag and field names with regex pattern # [[processors.regex]] -# ## Tag and field conversions defined in a separate sub-tables -# # [[processors.regex.tags]] -# # ## Tag to change -# # key = "resp_code" -# # ## Regular expression to match on a tag value -# # pattern = "^(\\d)\\d\\d$" +# namepass = ["nginx_requests"] +# +# # Tag and field conversions defined in a separate sub-tables +# [[processors.regex.tags]] +# ## Tag to change, "*" will change every tag +# key = "resp_code" +# ## Regular expression to match on a tag value +# pattern = "^(\\d)\\d\\d$" +# ## Matches of the pattern will be replaced with this string. Use ${1} +# ## notation to use the text of the first submatch. +# replacement = "${1}xx" +# +# [[processors.regex.fields]] +# ## Field to change +# key = "request" +# ## All the power of the Go regular expressions available here +# ## For example, named subgroups +# pattern = "^/api(?P/[\\w/]+)\\S*" +# replacement = "${method}" +# ## If result_key is present, a new field will be created +# ## instead of changing existing field +# result_key = "method" +# +# # Multiple conversions may be applied for one field sequentially +# # Let's extract one more value +# [[processors.regex.fields]] +# key = "request" +# pattern = ".*category=(\\w+).*" +# replacement = "${1}" +# result_key = "search_category" +# +# # Rename metric fields +# [[processors.regex.field_rename]] +# ## Regular expression to match on a field name +# pattern = "^search_(\\w+)d$" +# ## Matches of the pattern will be replaced with this string. Use ${1} +# ## notation to use the text of the first submatch. +# replacement = "${1}" +# ## If the new field name already exists, you can either "overwrite" the +# ## existing one with the value of the renamed field OR you can "keep" +# ## both the existing and source field. +# # result_key = "keep" +# +# # Rename metric tags +# # [[processors.regex.tag_rename]] +# # ## Regular expression to match on a tag name +# # pattern = "^search_(\\w+)d$" +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. +# # replacement = "${1}" +# # ## If the new tag name already exists, you can either "overwrite" the +# # ## existing one with the value of the renamed tag OR you can "keep" +# # ## both the existing and source tag. +# # # result_key = "keep" +# +# # Rename metrics +# # [[processors.regex.metric_rename]] +# # ## Regular expression to match on an metric name +# # pattern = "^search_(\\w+)d$" # # ## Matches of the pattern will be replaced with this string. Use ${1} # # ## notation to use the text of the first submatch. -# # replacement = "${1}xx" -# -# # [[processors.regex.fields]] -# # ## Field to change -# # key = "request" -# # ## All the power of the Go regular expressions available here -# # ## For example, named subgroups -# # pattern = "^/api(?P/[\\w/]+)\\S*" -# # replacement = "${method}" -# # ## If result_key is present, a new field will be created -# # ## instead of changing existing field -# # result_key = "method" -# -# ## Multiple conversions may be applied for one field sequentially -# ## Let's extract one more value -# # [[processors.regex.fields]] -# # key = "request" -# # pattern = ".*category=(\\w+).*" # # replacement = "${1}" -# # result_key = "search_category" # # Rename measurements, tags, and fields that pass through this filter. # [[processors.rename]] +# ## Specify one sub-table per rename operation. +# [[processors.rename.replace]] +# measurement = "network_interface_throughput" +# dest = "throughput" +# +# [[processors.rename.replace]] +# tag = "hostname" +# dest = "host" +# +# [[processors.rename.replace]] +# field = "lower" +# dest = "min" +# +# [[processors.rename.replace]] +# field = "upper" +# dest = "max" # # ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name @@ -1899,28 +2850,35 @@ # ## The Starlark source can be set as a string in this configuration file, or # ## by referencing a file containing the script. Only one source or script # ## should be set at once. -# ## +# # ## Source of the Starlark script. # source = ''' # def apply(metric): -# return metric +# return metric # ''' # # ## File containing a Starlark script. # # script = "/usr/local/bin/myscript.star" +# +# ## The constants of the Starlark script. +# # [processors.starlark.constants] +# # max_size = 10 +# # threshold = 0.75 +# # default_name = "Julia" +# # debug_mode = true # # Perform string processing on tags, fields, and measurements # [[processors.strings]] -# ## Convert a tag value to uppercase -# # [[processors.strings.uppercase]] -# # tag = "method" -# # ## Convert a field value to lowercase and store in a new field # # [[processors.strings.lowercase]] # # field = "uri_stem" # # dest = "uri_stem_normalised" # +# ## Convert a tag value to uppercase +# # [[processors.strings.uppercase]] +# # tag = "method" +# # ## Convert a field value to titlecase # # [[processors.strings.titlecase]] # # field = "status" @@ -1963,15 +2921,21 @@ # ## Decode a base64 encoded utf-8 string # # [[processors.strings.base64decode]] # # field = "message" +# +# ## Sanitize a string to ensure it is a valid utf-8 string +# ## Each run of invalid UTF-8 byte sequences is replaced by the replacement string, which may be empty +# # [[processors.strings.valid_utf8]] +# # field = "message" +# # replacement = "" # # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit. # [[processors.tag_limit]] # ## Maximum number of tags to preserve -# limit = 10 +# limit = 3 # # ## List of tags to preferentially preserve -# keep = ["foo", "bar", "baz"] +# keep = ["environment", "region"] # # Uses a Go template to create a new tag @@ -1990,7 +2954,10 @@ # ## How many seconds between aggregations # # period = 10 # -# ## How many top metrics to return +# ## How many top buckets to return per field +# ## Every field specified to aggregate over will return k number of results. +# ## For example, 1 field with k of 10 will return 10 buckets. While 2 fields +# ## with k of 3 will return 6 buckets. # # k = 10 # # ## Over which tags should the aggregation be done. Globs can be specified, in @@ -1998,10 +2965,14 @@ # ## empty list is no aggregation over tags is done # # group_by = ['*'] # -# ## Over which fields are the top k are calculated +# ## The field(s) to aggregate +# ## Each field defined is used to create an independent aggregation. Each +# ## aggregation will return k buckets. If a metric does not have a defined +# ## field the metric will be dropped from the aggregation. Considering using +# ## the defaults processor plugin to ensure fields are set if required. # # fields = ["value"] # -# ## What aggregation to use. Options: sum, mean, min, max +# ## What aggregation function to use. Options: sum, mean, min, max # # aggregation = "mean" # # ## Instead of the top k largest metrics, return the bottom k lowest metrics @@ -2058,7 +3029,25 @@ # drop_original = false # # ## Configures which basic stats to push as fields -# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] +# # stats = ["count","diff","rate","min","max","mean","non_negative_diff","non_negative_rate","stdev","s2","sum","interval"] + + +# # Calculates a derivative for every field. +# [[aggregators.derivative]] +# ## The period in which to flush the aggregator. +# period = "30s" +# ## +# ## Suffix to append for the resulting derivative field. +# # suffix = "_rate" +# ## +# ## Field to use for the quotient when computing the derivative. +# ## When using a field as the derivation parameter the name of that field will +# ## be used for the resulting derivative, e.g. *fieldname_by_parameter*. +# ## By default the timestamps of the metrics are used and the suffix is omitted. +# # variable = "" +# ## +# ## Maximum number of roll-overs in case only one measurement is found during a period. +# # max_roll_over = 10 # # Report the final metric of a series @@ -2073,7 +3062,7 @@ # series_timeout = "5m" -# # Create aggregate histograms. +# # Configuration for aggregate histogram metrics # [[aggregators.histogram]] # ## The period in which to flush the aggregator. # period = "30s" @@ -2090,6 +3079,14 @@ # ## Defaults to true. # cumulative = true # +# ## Expiration interval for each histogram. The histogram will be expired if +# ## there are no changes in any buckets for this time interval. 0 == no expiration. +# # expiration_interval = "0m" +# +# ## If true, aggregated histogram are pushed to output only if it was updated since +# ## previous push. Defaults to false. +# # push_only_on_update = false +# # ## Example config that aggregates all fields of the metric. # # [[aggregators.histogram.config]] # # ## Right borders of buckets (with +Inf implicitly added). @@ -2124,6 +3121,65 @@ # drop_original = false +# # Keep the aggregate quantiles of each metric passing through. +# [[aggregators.quantile]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Quantiles to output in the range [0,1] +# # quantiles = [0.25, 0.5, 0.75] +# +# ## Type of aggregation algorithm +# ## Supported are: +# ## "t-digest" -- approximation using centroids, can cope with large number of samples +# ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7) +# ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8) +# ## NOTE: Do not use "exact" algorithms with large number of samples +# ## to not impair performance or memory consumption! +# # algorithm = "t-digest" +# +# ## Compression for approximation (t-digest). The value needs to be +# ## greater or equal to 1.0. Smaller values will result in more +# ## performance but less accuracy. +# # compression = 100.0 + + +# # Aggregate metrics using a Starlark script +# [[aggregators.starlark]] +# ## The Starlark source can be set as a string in this configuration file, or +# ## by referencing a file containing the script. Only one source or script +# ## should be set at once. +# ## +# ## Source of the Starlark script. +# source = ''' +# state = {} +# +# def add(metric): +# state["last"] = metric +# +# def push(): +# return state.get("last") +# +# def reset(): +# state.clear() +# ''' +# +# ## File containing a Starlark script. +# # script = "/usr/local/bin/myscript.star" +# +# ## The constants of the Starlark script. +# # [aggregators.starlark.constants] +# # max_size = 10 +# # threshold = 0.75 +# # default_name = "Julia" +# # debug_mode = true + + # # Count the occurrence of values in fields. # [[aggregators.valuecounter]] # ## General Aggregator Arguments: @@ -2133,7 +3189,7 @@ # ## aggregator and will not get sent to the output plugins. # drop_original = false # ## The fields for which the values will be counted -# fields = [] +# fields = ["status"] ############################################################################### @@ -2147,10 +3203,12 @@ percpu = true ## Whether to report total system cpu stats or not totalcpu = true - ## If true, collect raw CPU time metrics. + ## If true, collect raw CPU time metrics collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. + ## If true, compute and report the sum of all non-idle CPU states report_active = false + ## If true and the info is available then add core_id and physical_id tags + core_tags = false # Read metrics about disk usage by mount point @@ -2162,6 +3220,11 @@ ## Ignore mount points by filesystem type. ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] + ## Ignore mount points by mount options. + ## The 'mount' command reports options of all mounts in parathesis. + ## Bind mounts can be ignored with the special 'bind' option. + # ignore_mount_opts = [] + # Read metrics about disk IO by device [[inputs.diskio]] @@ -2214,8 +3277,7 @@ # Read metrics about system load & uptime [[inputs.system]] - ## Uncomment to remove deprecated metrics. - # fielddrop = ["uptime_format"] + # no configuration # # Gather ActiveMQ metrics @@ -2225,7 +3287,7 @@ # # ## Required ActiveMQ Endpoint # ## deprecated in 1.11; use the url option -# # server = "127.0.0.1" +# # server = "192.168.50.10" # # port = 8161 # # ## Credentials for basic HTTP authentication @@ -2261,6 +3323,7 @@ # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" +# # tls_name = "tlsname" # ## If false, skip chain & host verification # # insecure_skip_verify = true # @@ -2270,7 +3333,7 @@ # # disable_query_namespaces = true # default false # # namespaces = ["namespace1", "namespace2"] # -# # Enable set level telmetry +# # Enable set level telemetry # # query_sets = true # default: false # # Add namespace set combinations to limit sets executed on # # Leave blank to do all sets @@ -2283,9 +3346,20 @@ # # by default, aerospike produces a 100 bucket histogram # # this is not great for most graphing tools, this will allow # # the ability to squash this to a smaller number of buckets +# # To have a balanced histogram, the number of buckets chosen +# # should divide evenly into 100. # # num_histogram_buckets = 100 # default: 10 +# # Query statistics from AMD Graphics cards using rocm-smi binary +# [[inputs.amd_rocm_smi]] +# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/opt/rocm/bin/rocm-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + # # Read Apache status information (mod_status) # [[inputs.apache]] # ## An array of URLs to gather from, must be directed at the machine @@ -2362,7 +3436,7 @@ # ## If not specified, then default is: # bcachePath = "/sys/fs/bcache" # -# ## By default, telegraf gather stats for all bcache devices +# ## By default, Telegraf gather stats for all bcache devices # ## Setting devices will restrict the stats to the specified # ## bcache devices. # bcacheDevs = ["bcache0"] @@ -2378,6 +3452,41 @@ # tubes = ["notifications"] +# # Read metrics exposed by Beat +# [[inputs.beat]] +# ## An URL from which to read Beat-formatted JSON +# ## Default is "http://127.0.0.1:5066". +# url = "http://127.0.0.1:5066" +# +# ## Enable collection of the listed stats +# ## An empty list means collect all. Available options are currently +# ## "beat", "libbeat", "system" and "filebeat". +# # include = ["beat", "libbeat", "filebeat"] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "logstash.example.com" +# +# ## Timeout for HTTP requests +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Read BIND nameserver XML statistics # [[inputs.bind]] # ## An array of BIND XML statistics URI to gather stats. @@ -2385,6 +3494,9 @@ # # urls = ["http://localhost:8053/xml/v3"] # # gather_memory_contexts = false # # gather_views = false +# +# ## Timeout for http requests made by bind nameserver +# # timeout = "4s" # # Collect bond interface status, slaves statuses and failures count @@ -2393,10 +3505,18 @@ # ## If not specified, then default is /proc # # host_proc = "/proc" # +# ## Sets 'sys' directory path +# ## If not specified, then default is /sys +# # host_sys = "/sys" +# # ## By default, telegraf gather stats for all bond interfaces # ## Setting interfaces will restrict the stats to the specified # ## bond interfaces. # # bond_interfaces = ["bond0"] +# +# ## Tries to collect additional bond details from /sys/class/net/{bond} +# ## currently only useful for LACP (mode 4) bonds +# # collect_sys_details = false # # Collect Kafka topics and consumers status from Burrow HTTP API. @@ -2465,7 +3585,14 @@ # ## suffix used to identify socket files # socket_suffix = "asok" # -# ## Ceph user to authenticate as +# ## Ceph user to authenticate as, ceph will search for the corresponding keyring +# ## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the +# ## client section of ceph.conf for example: +# ## +# ## [client.telegraf] +# ## keyring = /etc/ceph/client.telegraf.keyring +# ## +# ## Consult the ceph documentation for more detail on keyring generation. # ceph_user = "client.admin" # # ## Ceph configuration to use to locate the cluster @@ -2474,7 +3601,8 @@ # ## Whether to gather statistics via the admin socket # gather_admin_socket_stats = true # -# ## Whether to gather statistics via ceph commands +# ## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config +# ## to be specified # gather_cluster_stats = false @@ -2507,16 +3635,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # # access_key = "" # # secret_key = "" # # token = "" # # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" # # profile = "" # # shared_credential_file = "" # @@ -2526,6 +3657,10 @@ # ## ex: endpoint_url = "http://localhost:8000" # # endpoint_url = "" # +# ## Set http_proxy +# # use_system_proxy = false +# # http_proxy_url = "http://localhost:8888" +# # # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all # # metrics are made available to the 1 minute period. Some are collected at # # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. @@ -2553,8 +3688,10 @@ # ## Configure the TTL for the internal cache of metrics. # # cache_ttl = "1h" # -# ## Metric Statistic Namespace (required) -# namespace = "AWS/ELB" +# ## Metric Statistic Namespaces (required) +# namespaces = ["AWS/ELB"] +# # A single metric statistic namespace that will be appended to namespaces on startup +# # namespace = "AWS/ELB" # # ## Maximum requests per second. Note that the global default AWS rate limit is # ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a @@ -2583,6 +3720,7 @@ # # # # ## Dimension filters for Metric. All dimensions defined for the metric names # # ## must be specified in order to retrieve the metric statistics. +# # ## 'value' has wildcard / 'glob' matching support such as 'p-*'. # # [[inputs.cloudwatch.metrics.dimensions]] # # name = "LoadBalancerName" # # value = "p-example" @@ -2590,18 +3728,18 @@ # # Collects conntrack stats from the configured directories and files. # [[inputs.conntrack]] -# ## The following defaults would work with multiple versions of conntrack. -# ## Note the nf_ and ip_ filename prefixes are mutually exclusive across -# ## kernel versions, as are the directory locations. +# ## The following defaults would work with multiple versions of conntrack. +# ## Note the nf_ and ip_ filename prefixes are mutually exclusive across +# ## kernel versions, as are the directory locations. # -# ## Superset of filenames to look for within the conntrack dirs. -# ## Missing files will be ignored. -# files = ["ip_conntrack_count","ip_conntrack_max", -# "nf_conntrack_count","nf_conntrack_max"] +# ## Superset of filenames to look for within the conntrack dirs. +# ## Missing files will be ignored. +# files = ["ip_conntrack_count","ip_conntrack_max", +# "nf_conntrack_count","nf_conntrack_max"] # -# ## Directories to search within for the conntrack files above. -# ## Missing directories will be ignored. -# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] +# ## Directories to search within for the conntrack files above. +# ## Missing directories will be ignored. +# dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] # # Gather health check statuses from services registered in Consul @@ -2613,9 +3751,10 @@ # # scheme = "http" # # ## Metric version controls the mapping from Consul metrics into -# ## Telegraf metrics. +# ## Telegraf metrics. Version 2 moved all fields with string values +# ## to tags. # ## -# ## example: metric_version = 1; deprecated in 1.15 +# ## example: metric_version = 1; deprecated in 1.16 # ## metric_version = 2; recommended version # # metric_version = 1 # @@ -2642,7 +3781,28 @@ # # tag_delimiter = ":" -# # Read metrics from one or many couchbase clusters +# # Read metrics from the Consul Agent API +# [[inputs.consul_agent]] +# ## URL for the Consul agent +# # url = "http://127.0.0.1:8500" +# +# ## Use auth token for authorization. +# ## If both are set, an error is thrown. +# ## If both are empty, no token will be used. +# # token_file = "/path/to/auth/token" +# ## OR +# # token = "a1234567-40c7-9048-7bae-378687048181" +# +# ## Set timeout (default 5 seconds) +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile + + +# # Read per-node and per-bucket metrics from Couchbase # [[inputs.couchbase]] # ## specify servers via a url matching: # ## [protocol://][:password]@address[:port] @@ -2654,6 +3814,25 @@ # ## If no protocol is specified, HTTP is used. # ## If no port is specified, 8091 is used. # servers = ["http://localhost:8091"] +# +# ## Filter bucket fields to include only here. +# # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification (defaults to false) +# ## If set to false, tls_cert and tls_key are required +# # insecure_skip_verify = false +# +# ## Whether to collect cluster-wide bucket statistics +# ## It is recommended to disable this in favor of node_stats +# ## to get a better view of the cluster. +# cluster_bucket_stats = true +# +# ## Whether to collect bucket stats for each individual node +# node_bucket_stats = false # # Read CouchDB Stats from one or more servers @@ -2667,10 +3846,22 @@ # # basic_password = "p@ssw0rd" +# # Fetch metrics from a CSGO SRCDS +# [[inputs.csgo]] +# ## Specify servers using the following format: +# ## servers = [ +# ## ["ip1:port1", "rcon_password1"], +# ## ["ip2:port2", "rcon_password2"], +# ## ] +# # +# ## If no servers are specified, no data will be collected +# servers = [] + + # # Input plugin for DC/OS metrics # [[inputs.dcos]] # ## The DC/OS cluster URL. -# cluster_url = "https://dcos-ee-master-1" +# cluster_url = "https://dcos-master-1" # # ## The ID of the service account. # service_account_id = "telegraf" @@ -2756,16 +3947,19 @@ # endpoint = "unix:///var/run/docker.sock" # # ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) +# ## Note: configure this in one of the manager nodes in a Swarm cluster. +# ## configuring in multiple Swarm managers results in duplication of metrics. # gather_services = false # -# ## Only collect metrics for these containers, collect all if empty +# ## Only collect metrics for these containers. Values will be appended to +# ## container_name_include. +# ## Deprecated (1.4.0), use container_name_include # container_names = [] # # ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars # source_tag = false # -# ## Containers to include and exclude. Globs accepted. -# ## Note that an empty array for both will include all containers +# ## Containers to include and exclude. Collect all if empty. Globs accepted. # container_name_include = [] # container_name_exclude = [] # @@ -2779,21 +3973,38 @@ # ## Timeout for docker list, info, and stats commands # timeout = "5s" # -# ## Whether to report for each container per-device blkio (8:0, 8:1...) and -# ## network (eth0, eth1, ...) stats or not +# ## Whether to report for each container per-device blkio (8:0, 8:1...), +# ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. +# ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting +# ## is honored. # perdevice = true # -# ## Whether to report for each container total blkio and network stats or not +# ## Specifies for which classes a per-device metric should be issued +# ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) +# ## Please note that this setting has no effect if 'perdevice' is set to 'true' +# # perdevice_include = ["cpu"] +# +# ## Whether to report for each container total blkio and network stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. +# ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting +# ## is honored. # total = false # -# ## Which environment variables should we use as a tag -# ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] +# ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. +# ## Possible values are 'cpu', 'blkio' and 'network' +# ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. +# ## Please note that this setting has no effect if 'total' is set to 'false' +# # total_include = ["cpu", "blkio", "network"] # # ## docker labels to include and exclude as tags. Globs accepted. # ## Note that an empty array for both will include all labels as tags # docker_label_include = [] # docker_label_exclude = [] # +# ## Which environment variables should we use as a tag +# tag_env = ["JAVA_HOME", "HEAP_SIZE"] +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -2802,11 +4013,14 @@ # # insecure_skip_verify = false -# # Read statistics from one or many dovecot servers +# # Read metrics about dovecot servers # [[inputs.dovecot]] # ## specify dovecot servers via an address:port list # ## e.g. # ## localhost:24242 +# ## or as an UDS socket +# ## e.g. +# ## /var/run/dovecot/old-stats # ## # ## If no servers are specified, then localhost is used as the host. # servers = ["localhost:24242"] @@ -2819,7 +4033,41 @@ # filters = [""] -# # Read metrics about docker containers from Fargate/ECS v2, v3 meta endpoints. +# # Reads metrics from DPDK applications using v2 telemetry interface. +# [[inputs.dpdk]] +# ## Path to DPDK telemetry socket. This shall point to v2 version of DPDK telemetry interface. +# # socket_path = "/var/run/dpdk/rte/dpdk_telemetry.v2" +# +# ## Duration that defines how long the connected socket client will wait for a response before terminating connection. +# ## This includes both writing to and reading from socket. Since it's local socket access +# ## to a fast packet processing application, the timeout should be sufficient for most users. +# ## Setting the value to 0 disables the timeout (not recommended) +# # socket_access_timeout = "200ms" +# +# ## Enables telemetry data collection for selected device types. +# ## Adding "ethdev" enables collection of telemetry from DPDK NICs (stats, xstats, link_status). +# ## Adding "rawdev" enables collection of telemetry from DPDK Raw Devices (xstats). +# # device_types = ["ethdev"] +# +# ## List of custom, application-specific telemetry commands to query +# ## The list of available commands depend on the application deployed. Applications can register their own commands +# ## via telemetry library API http://doc.dpdk.org/guides/prog_guide/telemetry_lib.html#registering-commands +# ## For e.g. L3 Forwarding with Power Management Sample Application this could be: +# ## additional_commands = ["/l3fwd-power/stats"] +# # additional_commands = [] +# +# ## Allows turning off collecting data for individual "ethdev" commands. +# ## Remove "/ethdev/link_status" from list to start getting link status metrics. +# [inputs.dpdk.ethdev] +# exclude_commands = ["/ethdev/link_status"] +# +# ## When running multiple instances of the plugin it's recommended to add a unique tag to each instance to identify +# ## metrics exposed by an instance of DPDK application. This is useful when multiple DPDK apps run on a single host. +# ## [inputs.dpdk.tags] +# ## dpdk_instance = "my-fwd-app" + + +# # Read metrics about ECS containers # [[inputs.ecs]] # ## ECS metadata url. # ## Metadata v2 API is used if set explicitly. Otherwise, @@ -2850,8 +4098,8 @@ # # Read stats from one or more Elasticsearch servers or clusters # [[inputs.elasticsearch]] # ## specify a list of one or more Elasticsearch servers -# # you can add username and password to your url to use basic authentication: -# # servers = ["http://user:pass@localhost:9200"] +# ## you can add username and password to your url to use basic authentication: +# ## servers = ["http://user:pass@localhost:9200"] # servers = ["http://localhost:9200"] # # ## Timeout for HTTP requests to the elastic search server(s) @@ -2862,25 +4110,27 @@ # ## of the cluster. # local = true # -# ## Set cluster_health to true when you want to also obtain cluster health stats +# ## Set cluster_health to true when you want to obtain cluster health stats # cluster_health = false # -# ## Adjust cluster_health_level when you want to also obtain detailed health stats +# ## Adjust cluster_health_level when you want to obtain detailed health stats # ## The options are # ## - indices (default) # ## - cluster # # cluster_health_level = "indices" # -# ## Set cluster_stats to true when you want to also obtain cluster stats. +# ## Set cluster_stats to true when you want to obtain cluster stats. # cluster_stats = false # # ## Only gather cluster_stats from the master node. To work this require local = true # cluster_stats_only_from_master = true # # ## Indices to collect; can be one or more indices names or _all +# ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index names that end with a changing value, like a date. # indices_include = ["_all"] # # ## One of "shards", "cluster", "indices" +# ## Currently only "shards" is implemented # indices_level = "shards" # # ## node_stats is a list of sub-stats that you want to have gathered. Valid options @@ -2898,6 +4148,85 @@ # # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false +# +# ## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix. +# ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and +# ## sort them by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most +# ## recent indices. +# # num_most_recent_indices = 0 + + +# # Derive metrics from aggregating Elasticsearch query results +# [[inputs.elasticsearch_query]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval. +# urls = [ "http://node1.es.example.com:9200" ] # required. +# +# ## Elasticsearch client timeout, defaults to "5s". +# # timeout = "5s" +# +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option +# # enable_sniffer = false +# +# ## Set the interval to check if the Elasticsearch nodes are available +# ## This option is only used if enable_sniffer is also set (0s to disable it) +# # health_check_interval = "10s" +# +# ## HTTP basic authentication details (eg. when using x-pack) +# # username = "telegraf" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# [[inputs.elasticsearch_query.aggregation]] +# ## measurement name for the results of the aggregation query +# measurement_name = "measurement" +# +# ## Elasticsearch indexes to query (accept wildcards). +# index = "index-*" +# +# ## The date/time field in the Elasticsearch index (mandatory). +# date_field = "@timestamp" +# +# ## If the field used for the date/time field in Elasticsearch is also using +# ## a custom date/time format it may be required to provide the format to +# ## correctly parse the field. +# ## +# ## If using one of the built in elasticsearch formats this is not required. +# # date_field_custom_format = "" +# +# ## Time window to query (eg. "1m" to query documents from last minute). +# ## Normally should be set to same as collection interval +# query_period = "1m" +# +# ## Lucene query to filter results +# # filter_query = "*" +# +# ## Fields to aggregate values (must be numeric fields) +# # metric_fields = ["metric"] +# +# ## Aggregation function to use on the metric fields +# ## Must be set if 'metric_fields' is set +# ## Valid values are: avg, sum, min, max, sum +# # metric_function = "avg" +# +# ## Fields to be used as tags +# ## Must be text, non-analyzed fields. Metric aggregations are performed per tag +# # tags = ["field.keyword", "field2.keyword"] +# +# ## Set to true to not ignore documents when the tag(s) above are missing +# # include_missing_tag = false +# +# ## String value of the tag when the tag does not exist +# ## Used when include_missing_tag is true +# # missing_tag_value = "null" # # Returns ethtool statistics for given interfaces @@ -2907,6 +4236,15 @@ # # ## List of interfaces to ignore when pulling metrics. # # interface_exclude = ["eth1"] +# +# ## Some drivers declare statistics with extra whitespace, different spacing, +# ## and mix cases. This list, when enabled, can be used to clean the keys. +# ## Here are the current possible normalizations: +# ## * snakecase: converts fooBarBaz to foo_bar_baz +# ## * trim: removes leading and trailing whitespace +# ## * lower: changes all capitalized letters to lowercase +# ## * underscore: replaces spaces with underscores +# # normalize_keys = ["snakecase", "trim", "lower", "underscore"] # # Read metrics from one or more commands that can output to stdout @@ -2918,6 +4256,12 @@ # "/tmp/collect_*.sh" # ] # +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# # ## Timeout for each command to complete. # timeout = "5s" # @@ -2957,10 +4301,6 @@ # ## as well as ** to match recursive files and directories. # files = ["/tmp/metrics.out"] # -# ## Name a tag containing the name of the file the data was parsed from. Leave empty -# ## to disable. -# # file_tag = "" -# # ## Character encoding to use when interpreting the file contents. Invalid # ## characters are replaced using the unicode replacement character. When set # ## to the empty string the data is not decoded to text. @@ -2970,32 +4310,35 @@ # ## character_encoding = "" # # character_encoding = "" # -# ## The dataformat to be read from files +# ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" +# +# +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. Cautious when file name variation is high, this can increase the cardinality +# ## significantly. Read more about cardinality here: +# ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality +# # file_tag = "" # # Count files in a directory # [[inputs.filecount]] -# ## Directory to gather stats about. -# ## deprecated in 1.9; use the directories option -# # directory = "/var/cache/apt/archives" -# # ## Directories to gather stats about. # ## This accept standard unit glob matching rules, but with the addition of # ## ** as a "super asterisk". ie: # ## /var/log/** -> recursively find all directories in /var/log and count files in each directories # ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories # ## /var/log -> count all files in /var/log and all of its subdirectories -# directories = ["/var/cache/apt/archives"] +# directories = ["/var/cache/apt", "/tmp"] # # ## Only count files that match the name pattern. Defaults to "*". -# name = "*.deb" +# name = "*" # # ## Count files in subdirectories. Defaults to true. -# recursive = false +# recursive = true # # ## Only count regular files. Defaults to true. # regular_only = true @@ -3019,14 +4362,8 @@ # [[inputs.filestat]] # ## Files to gather stats about. # ## These accept standard unix glob matching rules, but with the addition of -# ## ** as a "super asterisk". ie: -# ## "/var/log/**.log" -> recursively find all .log files in /var/log -# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log -# ## "/var/log/apache.log" -> just tail the apache log file -# ## -# ## See https://github.com/gobwas/glob for more examples -# ## -# files = ["/var/log/**.log"] +# ## ** as a "super asterisk". See https://github.com/gobwas/glob. +# files = ["/etc/telegraf/telegraf.conf", "/var/log/**.log"] # # ## If true, read the entire file and calculate an md5 checksum. # md5 = false @@ -3055,17 +4392,17 @@ # # ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent) # exclude = [ -# "monitor_agent", -# "dummy", +# "monitor_agent", +# "dummy", # ] # # Gather repository information from GitHub hosted repositories. # [[inputs.github]] -# ## List of repositories to monitor. +# ## List of repositories to monitor # repositories = [ -# "influxdata/telegraf", -# "influxdata/influxdb" +# "influxdata/telegraf", +# "influxdata/influxdb" # ] # # ## Github API access token. Unauthenticated requests are limited to 60 per hour. @@ -3076,28 +4413,39 @@ # # ## Timeout for HTTP requests. # # http_timeout = "5s" +# +# ## List of additional fields to query. +# ## NOTE: Getting those fields might involve issuing additional API-calls, so please +# ## make sure you do not exceed the rate-limit of GitHub. +# ## +# ## Available fields are: +# ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) +# # additional_fields = [] # # Read flattened metrics from one or more GrayLog HTTP endpoints # [[inputs.graylog]] # ## API endpoint, currently supported API: # ## -# ## - multiple (Ex http://:12900/system/metrics/multiple) -# ## - namespace (Ex http://:12900/system/metrics/namespace/{namespace}) +# ## - multiple (e.g. http://:9000/api/system/metrics/multiple) +# ## - namespace (e.g. http://:9000/api/system/metrics/namespace/{namespace}) # ## # ## For namespace endpoint, the metrics array will be ignored for that call. # ## Endpoint can contain namespace and multiple type calls. # ## -# ## Please check http://[graylog-server-ip]:12900/api-browser for full list +# ## Please check http://[graylog-server-ip]:9000/api/api-browser for full list # ## of endpoints # servers = [ -# "http://[graylog-server-ip]:12900/system/metrics/multiple", +# "http://[graylog-server-ip]:9000/api/system/metrics/multiple", # ] # +# ## Set timeout (default 5 seconds) +# # timeout = "5s" +# # ## Metrics list # ## List of metrics can be found on Graylog webservice documentation. -# ## Or by hitting the the web service api at: -# ## http://[graylog-host]:12900/system/metrics +# ## Or by hitting the web service api at: +# ## http://[graylog-host]:9000/api/system/metrics # metrics = [ # "jvm.cl.loaded", # "jvm.memory.pools.Metaspace.committed" @@ -3115,20 +4463,20 @@ # # insecure_skip_verify = false -# # Read metrics of haproxy, via socket or csv stats page +# # Read metrics of HAProxy, via socket or HTTP stats page # [[inputs.haproxy]] # ## An array of address to gather stats about. Specify an ip on hostname # ## with optional port. ie localhost, 10.10.3.33:1936, etc. # ## Make sure you specify the complete path to the stats endpoint # ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats # -# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats -# servers = ["http://myhaproxy.com:1936/haproxy?stats"] -# # ## Credentials for basic HTTP authentication # # username = "admin" # # password = "admin" # +# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats +# servers = ["http://myhaproxy.com:1936/haproxy?stats"] +# # ## You can also use local socket with standard wildcard globbing. # ## Server address not starting with 'http' will be treated as a possible # ## socket, so both examples below are valid. @@ -3173,6 +4521,13 @@ # ## Optional HTTP headers # # headers = {"X-Special-Header" = "Special-Value"} # +# ## HTTP entity-body to send with POST/PUT requests. +# # body = "" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# # ## Optional file with Bearer token # ## file content is added as an Authorization header # # bearer_token = "/path/to/file" @@ -3181,12 +4536,14 @@ # # username = "username" # # password = "pa$$word" # -# ## HTTP entity-body to send with POST/PUT requests. -# # body = "" +# ## OAuth2 Client Credentials. The options 'client_id', 'client_secret', and 'token_url' are required to use OAuth2. +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] # -# ## HTTP Content-Encoding for write request body, can be set to "gzip" to -# ## compress body or "identity" to apply no encoding. -# # content_encoding = "identity" +# ## HTTP Proxy support +# # http_proxy_url = "" # # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" @@ -3195,6 +4552,16 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_headers = '{"Content-Type": "application/json", "X-MY-HEADER":"hello"}' +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# # ## Amount of time allowed to complete the HTTP request # # timeout = "5s" # @@ -3206,14 +4573,11 @@ # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # # data_format = "influx" +# # # HTTP/HTTPS request given an address a method and a timeout # [[inputs.http_response]] -# ## Deprecated in 1.12, use 'urls' -# ## Server address (default http://localhost) -# # address = "http://localhost" -# # ## List of urls to query. # # urls = ["http://localhost"] # @@ -3256,12 +4620,20 @@ # # response_string_match = "ok" # # response_string_match = "\".*_status\".?:.?\"up\"" # +# ## Expected response status code. +# ## The status code of the response is compared to this value. If they match, the field +# ## "response_status_code_match" will be 1, otherwise it will be 0. If the +# ## expected status code is 0, the check is disabled and the field won't be added. +# # response_status_code = 0 +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false +# ## Use the given name as the SNI server name on each URL +# # tls_server_name = "" # # ## HTTP Request Headers (all values must be strings) # # [inputs.http_response.headers] @@ -3276,13 +4648,14 @@ # # interface = "eth0" +# ## DEPRECATED: The 'httpjson' plugin is deprecated in version 1.6.0, use 'inputs.http' instead. # # Read flattened metrics from one or more JSON HTTP endpoints # [[inputs.httpjson]] # ## NOTE This plugin only reads numerical measurements, strings and booleans # ## will be ignored. # # ## Name for the service being polled. Will be appended to the name of the -# ## measurement e.g. httpjson_webserver_stats +# ## measurement e.g. "httpjson_webserver_stats". # ## # ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead. # name = "webserver_stats" @@ -3298,7 +4671,7 @@ # ## HTTP method to use: GET or POST (case-sensitive) # method = "GET" # -# ## List of tag names to extract from top-level of JSON server response +# ## Tags to extract from top-level of JSON server response. # # tag_keys = [ # # "my_tag_1", # # "my_tag_2" @@ -3311,19 +4684,28 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # -# ## HTTP parameters (all values must be strings). For "GET" requests, data +# ## HTTP Request Parameters (all values must be strings). For "GET" requests, data # ## will be included in the query. For "POST" requests, data will be included # ## in the request body as "x-www-form-urlencoded". # # [inputs.httpjson.parameters] # # event_type = "cpu_spike" # # threshold = "0.75" # -# ## HTTP Headers (all values must be strings) +# ## HTTP Request Headers (all values must be strings). # # [inputs.httpjson.headers] # # X-Auth-Token = "my-xauth-token" # # apiVersion = "v1" +# # Gathers huge pages measurements. +# [[inputs.hugepages]] +# ## Supported huge page types: +# ## - "root" - based on root huge page control directory: /sys/kernel/mm/hugepages +# ## - "per_node" - based on per NUMA node directories: /sys/devices/system/node/node[0-9]*/hugepages +# ## - "meminfo" - based on /proc/meminfo file +# # types = ["root", "per_node"] + + # # Gather Icinga2 status # [[inputs.icinga2]] # ## Required Icinga2 server address @@ -3379,12 +4761,44 @@ # timeout = "5s" +# # Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) and per-CPU metrics like temperature, power and utilization. +# [[inputs.intel_powerstat]] +# ## The user can choose which package metrics are monitored by the plugin with the package_metrics setting: +# ## - The default, will collect "current_power_consumption", "current_dram_power_consumption" and "thermal_design_power" +# ## - Setting this value to an empty array means no package metrics will be collected +# ## - Finally, a user can specify individual metrics to capture from the supported options list +# ## Supported options: +# ## "current_power_consumption", "current_dram_power_consumption", "thermal_design_power", "max_turbo_frequency", "uncore_frequency" +# # package_metrics = ["current_power_consumption", "current_dram_power_consumption", "thermal_design_power"] +# +# ## The user can choose which per-CPU metrics are monitored by the plugin in cpu_metrics array. +# ## Empty or missing array means no per-CPU specific metrics will be collected by the plugin. +# ## Supported options: +# ## "cpu_frequency", "cpu_c0_state_residency", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles", "cpu_temperature", "cpu_busy_frequency" +# ## ATTENTION: cpu_busy_cycles option is DEPRECATED - superseded by cpu_c0_state_residency +# # cpu_metrics = [] + + # # Collect statistics about itself # [[inputs.internal]] # ## If true, collect telegraf memory stats. # # collect_memstats = true +# # Monitors internet speed using speedtest.net service +# [[inputs.internet_speed]] +# ## This plugin downloads many MB of data each time it is run. As such +# ## consider setting a higher interval for this plugin to reduce the +# ## demand on your internet connection. +# # interval = "60m" +# +# ## Sets if runs file download test +# # enable_file_download = false +# +# ## Caches the closest server location +# # cache = false + + # # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. # [[inputs.interrupts]] # ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is @@ -3426,22 +4840,37 @@ # ## gaps or overlap in pulled data # interval = "30s" # -# ## Timeout for the ipmitool command to complete +# ## Timeout for the ipmitool command to complete. Default is 20 seconds. # timeout = "20s" # # ## Schema Version: (Optional, defaults to version 1) # metric_version = 2 +# +# ## Optionally provide the hex key for the IMPI connection. +# # hex_key = "" +# +# ## If ipmitool should use a cache +# ## for me ipmitool runs about 2 to 10 times faster with cache enabled on HP G10 servers (when using ubuntu20.04) +# ## the cache file may not work well for you if some sensors come up late +# # use_cache = false +# +# ## Path to the ipmitools cache file (defaults to OS temp dir) +# ## The provided path must exist and must be writable +# # cache_path = "" # # Gather packets and bytes counters from Linux ipsets -# [[inputs.ipset]] -# ## By default, we only show sets which have already matched at least 1 packet. -# ## set include_unmatched_sets = true to gather them all. -# include_unmatched_sets = false -# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") -# use_sudo = false -# ## The default timeout of 1s for ipset execution can be overridden here: -# # timeout = "1s" +# [[inputs.ipset]] +# ## By default, we only show sets which have already matched at least 1 packet. +# ## set include_unmatched_sets = true to gather them all. +# include_unmatched_sets = false +# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") +# ## You can avoid using sudo or root, by setting appropriate privileges for +# ## the telegraf.service systemd service. +# use_sudo = false +# ## The default timeout of 1s for ipset execution can be overridden here: +# # timeout = "1s" +# # # Gather packets and bytes throughput from iptables @@ -3502,26 +4931,27 @@ # ## empty will use default value 10 # # max_subjob_per_layer = 10 # -# ## Jobs to exclude from gathering -# # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] +# ## Jobs to include or exclude from gathering +# ## When using both lists, job_exclude has priority. +# ## Wildcards are supported: [ "jobA/*", "jobB/subjob1/*"] +# # job_include = [ "*" ] +# # job_exclude = [ ] # -# ## Nodes to exclude from gathering -# # node_exclude = [ "node1", "node2" ] +# ## Nodes to include or exclude from gathering +# ## When using both lists, node_exclude has priority. +# # node_include = [ "*" ] +# # node_exclude = [ ] # # ## Worker pool for jenkins plugin only # ## Empty this field will use default value 5 # # max_connections = 5 +# ## DEPRECATED: The 'jolokia' plugin is deprecated in version 1.5.0, use 'inputs.jolokia2' instead. # # Read JMX metrics through Jolokia # [[inputs.jolokia]] -# # DEPRECATED: the jolokia plugin has been deprecated in favor of the -# # jolokia2 plugin -# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 -# # ## This is the context root used to compose the jolokia url # ## NOTE that Jolokia requires a trailing slash at the end of the context root -# ## NOTE that your jolokia security policy must allow for POST requests. # context = "/jolokia/" # # ## This specifies the mode used @@ -3544,13 +4974,6 @@ # ## Includes connection time, any redirects, and reading the response body. # # client_timeout = "4s" # -# ## Attribute delimiter -# ## -# ## When multiple attributes are returned for a single -# ## [inputs.jolokia.metrics], the field name is a concatenation of the metric -# ## name, and the attribute name, separated by the given delimiter. -# # delimiter = "_" -# # ## List of servers exposing jolokia read service # [[inputs.jolokia.servers]] # name = "as-server-01" @@ -3718,11 +5141,19 @@ # # selector_exclude = ["*"] # # ## Optional TLS Config +# ## Trusted root certificates for server # # tls_ca = "/path/to/cafile" +# ## Used for TLS client certificate authentication # # tls_cert = "/path/to/certfile" +# ## Used for TLS client certificate authentication # # tls_key = "/path/to/keyfile" +# ## Send the specified TLS server name via SNI +# # tls_server_name = "kubernetes.example.com" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false +# +# ## Uncomment to remove deprecated metrics. +# # fielddrop = ["terminated_reason"] # # Read metrics from the kubernetes kubelet api @@ -3757,7 +5188,7 @@ # [[inputs.leofs]] # ## An array of URLs of the form: # ## host [ ":" port] -# servers = ["127.0.0.1:4020"] +# servers = ["127.0.0.1:4010"] # # Provides Linux sysctl fs metrics @@ -3807,21 +5238,31 @@ # # "/proc/fs/lustre/obdfilter/*/stats", # # "/proc/fs/lustre/osd-ldiskfs/*/stats", # # "/proc/fs/lustre/obdfilter/*/job_stats", +# # "/proc/fs/lustre/obdfilter/*/exports/*/stats", # # ] # # mds_procfiles = [ # # "/proc/fs/lustre/mdt/*/md_stats", # # "/proc/fs/lustre/mdt/*/job_stats", +# # "/proc/fs/lustre/mdt/*/exports/*/stats", # # ] +# # Read metrics about LVM physical volumes, volume groups, logical volumes. +# [[inputs.lvm]] +# ## Use sudo to run LVM commands +# use_sudo = false + + # # Gathers metrics from the /3.0/reports MailChimp API # [[inputs.mailchimp]] # ## MailChimp API key # ## get from https://admin.mailchimp.com/account/api/ # api_key = "" # required +# # ## Reports for campaigns sent more than days_old ago will not be collected. -# ## 0 means collect all. +# ## 0 means collect all and is the default value. # days_old = 0 +# # ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old # # campaign_id = "" @@ -3846,22 +5287,38 @@ # # insecure_skip_verify = false -# # Read metrics from one or many mcrouter servers +# # Read metrics from one or many mcrouter servers. # [[inputs.mcrouter]] # ## An array of address to gather stats about. Specify an ip or hostname # ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc. -# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] +# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] # -# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". # # timeout = "5s" -# # Read metrics from one or many memcached servers +# # Get kernel statistics from /proc/mdstat +# [[inputs.mdstat]] +# ## Sets file path +# ## If not specified, then default is /proc/mdstat +# # file_name = "/proc/mdstat" + + +# # Read metrics from one or many memcached servers. # [[inputs.memcached]] -# ## An array of address to gather stats about. Specify an ip on hostname -# ## with optional port. ie localhost, 10.0.0.1:11211, etc. +# # An array of address to gather stats about. Specify an ip on hostname +# # with optional port. ie localhost, 10.0.0.1:11211, etc. # servers = ["localhost:11211"] +# # An array of unix memcached sockets to gather stats about. # # unix_sockets = ["/var/run/memcached.sock"] +# +# ## Optional TLS Config +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true # # Telegraf plugin for gathering metrics from N Mesos masters @@ -3923,11 +5380,43 @@ # # tagdrop = ["server"] +# # Generate metrics for test and demonstration purposes +# [[inputs.mock]] +# ## Set the metric name to use for reporting +# metric_name = "mock" +# +# ## Optional string key-value pairs of tags to add to all metrics +# # [inputs.mock.tags] +# # "key" = "value" +# +# ## One or more mock data fields *must* be defined. +# ## +# ## [[inputs.mock.constant]] +# ## name = "constant" +# ## value = value_of_any_type +# ## [[inputs.mock.random]] +# ## name = "rand" +# ## min = 1.0 +# ## max = 6.0 +# ## [[inputs.mock.sine_wave]] +# ## name = "wave" +# ## amplitude = 1.0 +# ## period = 0.5 +# ## [[inputs.mock.step]] +# ## name = "plus_one" +# ## start = 0.0 +# ## step = 1.0 +# ## [[inputs.mock.stock]] +# ## name = "abc" +# ## price = 50.00 +# ## volatility = 0.2 + + # # Retrieve data from MODBUS slave devices # [[inputs.modbus]] # ## Connection Configuration # ## -# ## The plugin supports connections to PLCs via MODBUS/TCP or +# ## The plugin supports connections to PLCs via MODBUS/TCP, RTU over TCP, ASCII over TCP or # ## via serial line communication in binary (RTU) or readable (ASCII) encoding # ## # ## Device name @@ -3954,8 +5443,22 @@ # # data_bits = 8 # # parity = "N" # # stop_bits = 1 +# +# ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP" +# ## default behaviour is "TCP" if the controller is TCP +# ## For Serial you can choose between "RTU" and "ASCII" # # transmission_mode = "RTU" # +# ## Trace the connection to the modbus device as debug messages +# ## Note: You have to enable telegraf's debug mode to see those messages! +# # debug_connection = false +# +# ## Define the configuration schema +# ## |---register -- define fields per register type in the original style (only supports one slave ID) +# ## |---request -- define fields on a requests base +# configuration_type = "register" +# +# ## --- "register" configuration style --- # # ## Measurements # ## @@ -3985,10 +5488,11 @@ # ## |---BA, DCBA - Little Endian # ## |---BADC - Mid-Big Endian # ## |---CDAB - Mid-Little Endian -# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32-IEEE (the IEEE 754 binary representation) -# ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) -# ## scale - the final numeric variable representation -# ## address - variable address +# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, +# ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) +# ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) +# ## scale - the final numeric variable representation +# ## address - variable address # # holding_registers = [ # { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]}, @@ -4003,6 +5507,110 @@ # { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, # { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, # ] +# +# +# ## --- "request" configuration style --- +# +# ## Per request definition +# ## +# +# ## Define a request sent to the device +# ## Multiple of those requests can be defined. Data will be collated into metrics at the end of data collection. +# [[inputs.modbus.request]] +# ## ID of the modbus slave device to query. +# ## If you need to query multiple slave-devices, create several "request" definitions. +# slave_id = 1 +# +# ## Byte order of the data. +# ## |---ABCD -- Big Endian (Motorola) +# ## |---DCBA -- Little Endian (Intel) +# ## |---BADC -- Big Endian with byte swap +# ## |---CDAB -- Little Endian with byte swap +# byte_order = "ABCD" +# +# ## Type of the register for the request +# ## Can be "coil", "discrete", "holding" or "input" +# register = "coil" +# +# ## Name of the measurement. +# ## Can be overriden by the individual field definitions. Defaults to "modbus" +# # measurement = "modbus" +# +# ## Field definitions +# ## Analog Variables, Input Registers and Holding Registers +# ## address - address of the register to query. For coil and discrete inputs this is the bit address. +# ## name *1 - field name +# ## type *1,2 - type of the modbus field, can be INT16, UINT16, INT32, UINT32, INT64, UINT64 and +# ## FLOAT32, FLOAT64 (IEEE 754 binary representation) +# ## scale *1,2 - (optional) factor to scale the variable with +# ## output *1,2 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. Defaults to FLOAT64 if +# ## "scale" is provided and to the input "type" class otherwise (i.e. INT* -> INT64, etc). +# ## measurement *1 - (optional) measurement name, defaults to the setting of the request +# ## omit - (optional) omit this field. Useful to leave out single values when querying many registers +# ## with a single request. Defaults to "false". +# ## +# ## *1: Those fields are ignored if field is omitted ("omit"=true) +# ## +# ## *2: Thise fields are ignored for both "coil" and "discrete"-input type of registers. For those register types +# ## the fields are output as zero or one in UINT64 format by default. +# +# ## Coil / discrete input example +# fields = [ +# { address=0, name="motor1_run"}, +# { address=1, name="jog", measurement="motor"}, +# { address=2, name="motor1_stop", omit=true}, +# { address=3, name="motor1_overheating"}, +# ] +# +# [[inputs.modbus.request.tags]] +# machine = "impresser" +# location = "main building" +# +# [[inputs.modbus.request]] +# ## Holding example +# ## All of those examples will result in FLOAT64 field outputs +# slave_id = 1 +# byte_order = "DCBA" +# register = "holding" +# fields = [ +# { address=0, name="voltage", type="INT16", scale=0.1 }, +# { address=1, name="current", type="INT32", scale=0.001 }, +# { address=3, name="power", type="UINT32", omit=true }, +# { address=5, name="energy", type="FLOAT32", scale=0.001, measurement="W" }, +# { address=7, name="frequency", type="UINT32", scale=0.1 }, +# { address=8, name="power_factor", type="INT64", scale=0.01 }, +# ] +# +# [[inputs.modbus.request.tags]] +# machine = "impresser" +# location = "main building" +# +# [[inputs.modbus.request]] +# ## Input example with type conversions +# slave_id = 1 +# byte_order = "ABCD" +# register = "input" +# fields = [ +# { address=0, name="rpm", type="INT16" }, # will result in INT64 field +# { address=1, name="temperature", type="INT16", scale=0.1 }, # will result in FLOAT64 field +# { address=2, name="force", type="INT32", output="FLOAT64" }, # will result in FLOAT64 field +# { address=4, name="hours", type="UINT32" }, # will result in UIN64 field +# ] +# +# [[inputs.modbus.request.tags]] +# machine = "impresser" +# location = "main building" +# +# +# +# ## Enable workarounds required by some devices to work correctly +# # [inputs.modbus.workarounds] +# ## Pause between read requests sent to the device. This might be necessary for (slow) serial devices. +# # pause_between_requests = "0ms" +# ## Close the connection after every gather cycle. Usually the plugin closes the connection after a certain +# ## idle-timeout, however, if you query a device with limited simultaneous connectivity (e.g. serial devices) +# ## from multiple instances you might want to only stay connected during gather and disconnect afterwards. +# # close_connection_after_gather = false # # Read metrics from one or many MongoDB servers @@ -4012,9 +5620,13 @@ # ## For example: # ## mongodb://user:auth_key@10.10.3.30:27017, # ## mongodb://10.10.3.33:18832, -# servers = ["mongodb://127.0.0.1:27017"] +# ## +# ## If connecting to a cluster, users must include the "?connect=direct" in +# ## the URL to ensure that the connection goes directly to the specified node +# ## and not have all connections passed to the master node. +# servers = ["mongodb://127.0.0.1:27017/?connect=direct"] # -# ## When true, collect cluster status +# ## When true, collect cluster status. # ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which # ## may have an impact on performance. # # gather_cluster_status = true @@ -4025,6 +5637,10 @@ # ## When true, collect per collection stats # # gather_col_stats = false # +# ## When true, collect usage statistics for each collection +# ## (insert, update, queries, remove, getmore, commands etc...). +# # gather_top_stat = false +# # ## List of db where collections stats are collected # ## If empty, all db are concerned # # col_stats_dbs = ["local"] @@ -4063,7 +5679,7 @@ # ## Omit this option to use absolute paths. # base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" # -# ## If true, Telegraf discard all data when a single file can't be read. +# ## If true discard all data when a single file can't be read. # ## Else, Telegraf omits the field generated from this file. # # fail_early = true # @@ -4125,13 +5741,19 @@ # ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS # # gather_innodb_metrics = false # +# ## gather metrics from all channels from SHOW SLAVE STATUS command output +# # gather_all_slave_channels = false +# # ## gather metrics from SHOW SLAVE STATUS command output # # gather_slave_status = false # +# ## use SHOW ALL SLAVES STATUS command output for MariaDB +# # mariadb_dialect = false +# # ## gather metrics from SHOW BINARY LOGS command output # # gather_binary_logs = false # -# ## gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES +# ## gather metrics from SHOW GLOBAL VARIABLES command output # # gather_global_variables = true # # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE @@ -4150,6 +5772,15 @@ # # gather_file_events_stats = false # # ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST +# # gather_perf_events_statements = false +# # +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME +# # gather_perf_sum_per_acc_per_event = false +# # +# ## list of events to be gathered for gather_perf_sum_per_acc_per_event +# ## in case of empty list all events will be gathered +# # perf_summary_events = [] +# # # # gather_perf_events_statements = false # # ## the limits for metrics form perf_events_statements @@ -4191,15 +5822,17 @@ # # ## The response_timeout specifies how long to wait for a reply from the Apex. # #response_timeout = "5s" +# -# # Read metrics about network interface usage +# # Gather metrics about network interfaces # [[inputs.net]] # ## By default, telegraf gathers stats from any up interface (excluding loopback) # ## Setting interfaces will tell it to gather these explicit interfaces, -# ## regardless of status. +# ## regardless of status. When specifying an interface, glob-style +# ## patterns are also supported. # ## -# # interfaces = ["eth0"] +# # interfaces = ["eth*", "enp0s[0-1]", "lo"] # ## # ## On linux systems telegraf also collects protocol stats. # ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. @@ -4231,7 +5864,7 @@ # ## expected string in answer # # expect = "ssh" # -# ## Uncomment to remove deprecated fields +# ## Uncomment to remove deprecated fields; recommended for new deploys # # fielddrop = ["result_type", "string_found"] @@ -4240,25 +5873,54 @@ # # no configuration +# # Read per-mount NFS client metrics from /proc/self/mountstats +# [[inputs.nfsclient]] +# ## Read more low-level metrics (optional, defaults to false) +# # fullstat = false +# +# ## List of mounts to explictly include or exclude (optional) +# ## The pattern (Go regexp) is matched against the mount point (not the +# ## device being mounted). If include_mounts is set, all mounts are ignored +# ## unless present in the list. If a mount is listed in both include_mounts +# ## and exclude_mounts, it is excluded. Go regexp patterns can be used. +# # include_mounts = [] +# # exclude_mounts = [] +# +# ## List of operations to include or exclude from collecting. This applies +# ## only when fullstat=true. Symantics are similar to {include,exclude}_mounts: +# ## the default is to collect everything; when include_operations is set, only +# ## those OPs are collected; when exclude_operations is set, all are collected +# ## except those listed. If include and exclude are set, the OP is excluded. +# ## See /proc/self/mountstats for a list of valid operations; note that +# ## NFSv3 and NFSv4 have different lists. While it is not possible to +# ## have different include/exclude lists for NFSv3/4, unused elements +# ## in the list should be okay. It is possible to have different lists +# ## for different mountpoints: use mulitple [[input.nfsclient]] stanzas, +# ## with their own lists. See "include_mounts" above, and be careful of +# ## duplicate metrics. +# # include_operations = [] +# # exclude_operations = [] + + # # Read Nginx's basic status information (ngx_http_stub_status_module) # [[inputs.nginx]] -# # An array of Nginx stub_status URI to gather stats. +# ## An array of Nginx stub_status URI to gather stats. # urls = ["http://localhost/server_status"] # # ## Optional TLS Config -# tls_ca = "/etc/telegraf/ca.pem" -# tls_cert = "/etc/telegraf/cert.cer" -# tls_key = "/etc/telegraf/key.key" +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification -# insecure_skip_verify = false +# # insecure_skip_verify = false # -# # HTTP response timeout (default: 5s) +# ## HTTP response timeout (default: 5s) # response_timeout = "5s" -# # Read Nginx Plus' full status information (ngx_http_status_module) +# # Read Nginx Plus' advanced status information # [[inputs.nginx_plus]] -# ## An array of ngx_http_status_module or status URI to gather stats. +# ## An array of Nginx status URIs to gather stats. # urls = ["http://localhost/status"] # # # HTTP response timeout (default: 5s) @@ -4272,11 +5934,10 @@ # # insecure_skip_verify = false -# # Read Nginx Plus Api documentation +# # Read Nginx Plus API advanced status information # [[inputs.nginx_plus_api]] -# ## An array of API URI to gather stats. +# ## An array of Nginx API URIs to gather stats. # urls = ["http://localhost/api"] -# # # Nginx API version, default: 3 # # api_version = 3 # @@ -4353,7 +6014,21 @@ # # insecure_skip_verify = false -# # A plugin to collect stats from the NSD authoritative DNS name server +# # Read metrics from the Nomad API +# [[inputs.nomad]] +# ## URL for the Nomad agent +# # url = "http://127.0.0.1:4646" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile + + +# # A plugin to collect stats from the NSD DNS resolver # [[inputs.nsd]] # ## Address of server to connect to, optionally ':port'. Defaults to the # ## address in the nsd config file. @@ -4405,7 +6080,9 @@ # # Pulls statistics from nvidia GPUs attached to the host # [[inputs.nvidia_smi]] -# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath +# ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" +# ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), +# ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned # # bin_path = "/usr/bin/nvidia-smi" # # ## Optional: timeout for GPU polling @@ -4414,9 +6091,8 @@ # # Retrieve data from OPCUA devices # [[inputs.opcua]] -# [[inputs.opcua]] -# ## Device name -# # name = "localhost" +# ## Metric name +# # name = "opcua" # # # ## OPC UA Endpoint URL # # endpoint = "opc.tcp://localhost:4840" @@ -4452,19 +6128,54 @@ # ## Password. Required for auth_method = "UserName" # # password = "" # # +# ## Option to select the metric timestamp to use. Valid options are: +# ## "gather" -- uses the time of receiving the data in telegraf +# ## "server" -- uses the timestamp provided by the server +# ## "source" -- uses the timestamp provided by the source +# # timestamp = "gather" +# # # ## Node ID configuration -# ## name - the variable name -# ## namespace - integer value 0 thru 3 -# ## identifier_type - s=string, i=numeric, g=guid, b=opaque -# ## identifier - tag as shown in opcua browser -# ## data_type - boolean, byte, short, int, uint, uint16, int16, -# ## uint32, int32, float, double, string, datetime, number +# ## name - field name to use in the output +# ## namespace - OPC UA namespace of the node (integer value 0 thru 3) +# ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) +# ## identifier - OPC UA ID (tag as shown in opcua browser) +# ## tags - extra tags to be added to the output metric (optional) # ## Example: -# ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", data_type="string", description="http://open62541.org"} -# nodes = [ -# {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, -# {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, -# ] +# ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", tags=[["tag1","value1"],["tag2","value2]]} +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier=""}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# #] +# # +# ## Node Group +# ## Sets defaults for OPC UA namespace and ID type so they aren't required in +# ## every node. A group can also have a metric name that overrides the main +# ## plugin metric name. +# ## +# ## Multiple node groups are allowed +# #[[inputs.opcua.group]] +# ## Group Metric name. Overrides the top level name. If unset, the +# ## top level name is used. +# # name = +# # +# ## Group default namespace. If a node in the group doesn't set its +# ## namespace, this is used. +# # namespace = +# # +# ## Group default identifier type. If a node in the group doesn't set its +# ## namespace, this is used. +# # identifier_type = +# # +# ## Node ID Configuration. Array of nodes with the same settings as above. +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier=""}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# #] +# +# ## Enable workarounds required by some devices to work correctly +# # [inputs.opcua.workarounds] +# ## Set additional valid status codes, StatusOK (0x0) is always considered valid +# # additional_valid_status_codes = ["0xC0"] # # OpenLDAP cn=Monitor plugin @@ -4487,8 +6198,8 @@ # bind_dn = "" # bind_password = "" # -# # Reverse metric names so they sort more naturally. Recommended. -# # This defaults to false if unset, but is set to true when generating a new config +# # reverse metric names so they sort more naturally +# # Defaults to false if unset, but is set to true when generating a new config # reverse_metric_names = true @@ -4504,16 +6215,69 @@ # # timeout = "5ms" -# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver -# [[inputs.opensmtpd]] -# ## If running as a restricted user you can prepend sudo for additional access: -# #use_sudo = false +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# [[inputs.opensmtpd]] +# ## If running as a restricted user you can prepend sudo for additional access: +# #use_sudo = false +# +# ## The default location of the smtpctl binary can be overridden with: +# binary = "/usr/sbin/smtpctl" +# +# # The default timeout of 1s can be overridden with: +# #timeout = "1s" + + +# # Collects performance metrics from OpenStack services +# [[inputs.openstack]] +# ## The recommended interval to poll is '30m' +# +# ## The identity endpoint to authenticate against and get the service catalog from. +# authentication_endpoint = "https://my.openstack.cloud:5000" +# +# ## The domain to authenticate against when using a V3 identity endpoint. +# # domain = "default" +# +# ## The project to authenticate as. +# # project = "admin" +# +# ## User authentication credentials. Must have admin rights. +# username = "admin" +# password = "password" +# +# ## Available services are: +# ## "agents", "aggregates", "flavors", "hypervisors", "networks", "nova_services", +# ## "ports", "projects", "servers", "services", "stacks", "storage_pools", "subnets", "volumes" +# # enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"] +# +# ## Collect Server Diagnostics +# # server_diagnotics = false +# +# ## output secrets (such as adminPass(for server) and UserID(for volume)). +# # output_secrets = false +# +# ## Amount of time allowed to complete the HTTP(s) request. +# # timeout = "5s" +# +# ## HTTP Proxy support +# # http_proxy_url = "" # -# ## The default location of the smtpctl binary can be overridden with: -# binary = "/usr/sbin/smtpctl" +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Options for tags received from Openstack +# # tag_prefix = "openstack_tag_" +# # tag_value = "true" +# +# ## Timestamp format for timestamp data received from Openstack. +# ## If false format is unix nanoseconds. +# # human_readable_timestamps = false # -# ## The default timeout of 1000ms can be overridden with (in milliseconds): -# timeout = 1000 +# ## Measure Openstack call duration +# # measure_openstack_requests = false # # Read current weather and forecasts data from openweathermap.org @@ -4543,7 +6307,7 @@ # ## "metric", "imperial", or "standard". # # units = "metric" # -# ## Query interval; OpenWeatherMap updates their weather data every 10 +# ## Query interval; OpenWeatherMap weather data is updated every 10 # ## minutes. # interval = "10m" @@ -4584,6 +6348,8 @@ # ## "/var/run/php5-fpm.sock" # ## or using a custom fpm status path: # ## "/var/run/php5-fpm.sock:fpm-custom-status-path" +# ## glob patterns are also supported: +# ## "/var/run/php*.sock" # ## # ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: # ## "fcgi://10.0.0.12:9000/status" @@ -4638,6 +6404,9 @@ # ## option of the ping command. # # interface = "" # +# ## Percentiles to calculate. This only works with the native method. +# # percentiles = [50, 95, 99] +# # ## Specify the ping executable binary. # # binary = "ping" # @@ -4648,6 +6417,10 @@ # # ## Use only IPv6 addresses when resolving a hostname. # # ipv6 = false +# +# ## Number of data bytes to be sent. Corresponds to the "-s" +# ## option of the ping command. This only works with the native method. +# # size = 56 # # Measure postfix queue statistics @@ -4659,8 +6432,10 @@ # # Read metrics from one or many PowerDNS servers # [[inputs.powerdns]] -# ## An array of sockets to gather stats about. -# ## Specify a path to unix socket. +# # An array of sockets to gather stats about. +# # Specify a path to unix socket. +# # +# # If no servers are specified, then '/var/run/pdns.controlsocket' is used as the path. # unix_sockets = ["/var/run/pdns.controlsocket"] @@ -4686,9 +6461,10 @@ # # pattern = "nginx" # ## user as argument for pgrep (ie, pgrep -u ) # # user = "nginx" -# ## Systemd unit name +# ## Systemd unit name, supports globs when include_systemd_children is set to true # # systemd_unit = "nginx.service" -# ## CGroup name or path +# # include_systemd_children = false +# ## CGroup name or path, supports globs # # cgroup = "systemd/system.slice/nginx.service" # # ## Windows service name @@ -4704,6 +6480,9 @@ # ## When true add the full cmdline as a tag. # # cmdline_tag = false # +# ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. +# # mode = "irix" +# # ## Add the PID as a tag instead of as a field. When collecting multiple # ## processes with otherwise matching tags this setting should be enabled to # ## ensure each process has a unique identity. @@ -4724,6 +6503,8 @@ # ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. # base_url = "https://localhost:8006/api2/json" # api_token = "USER@REALM!TOKENID=UUID" +# ## Node name, defaults to OS hostname +# # node_name = "" # # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" @@ -4775,24 +6556,28 @@ # # ## A list of queues to gather as the rabbitmq_queue measurement. If not # ## specified, metrics for all queues are gathered. +# ## Deprecated in 1.6: Use queue_name_include instead. # # queues = ["telegraf"] # # ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not # ## specified, metrics for all exchanges are gathered. # # exchanges = ["telegraf"] # +# ## Metrics to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all metrics +# ## Currently the following metrics are supported: "exchange", "federation", "node", "overview", "queue" +# # metric_include = [] +# # metric_exclude = [] +# # ## Queues to include and exclude. Globs accepted. # ## Note that an empty array for both will include all queues -# queue_name_include = [] -# queue_name_exclude = [] -# -# ## Federation upstreams include and exclude when gathering the rabbitmq_federation measurement. -# ## If neither are specified, metrics for all federation upstreams are gathered. -# ## Federation link metrics will only be gathered for queues and exchanges -# ## whose non-federation metrics will be collected (e.g a queue excluded -# ## by the 'queue_name_exclude' option will also be excluded from federation). -# ## Globs accepted. -# # federation_upstream_include = ["dataCentre-*"] +# # queue_name_include = [] +# # queue_name_exclude = [] +# +# ## Federation upstreams to include and exclude specified as an array of glob +# ## pattern strings. Federation links can also be limited by the queue and +# ## exchange filters. +# # federation_upstream_include = [] # # federation_upstream_exclude = [] @@ -4802,24 +6587,55 @@ # urls = ["http://localhost:8080/_raindrops"] -# # RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required). -# [[inputs.ras]] -# ## Optional path to RASDaemon sqlite3 database. -# ## Default: /var/lib/rasdaemon/ras-mc_event.db -# # db_path = "" +# # Reads metrics from RavenDB servers via the Monitoring Endpoints +# [[inputs.ravendb]] +# ## Node URL and port that RavenDB is listening on. By default, +# ## attempts to connect securely over HTTPS, however, if the user +# ## is running a local unsecure development cluster users can use +# ## HTTP via a URL like "http://localhost:8080" +# url = "https://localhost:4433" +# +# ## RavenDB X509 client certificate setup +# # tls_cert = "/etc/telegraf/raven.crt" +# # tls_key = "/etc/telegraf/raven.key" +# +# ## Optional request timeout +# ## +# ## Timeout, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request and +# ## time limit for requests made by this client +# # timeout = "5s" +# +# ## List of statistics which are collected +# # At least one is required +# # Allowed values: server, databases, indexes, collections +# # +# # stats_include = ["server", "databases", "indexes", "collections"] +# +# ## List of db where database stats are collected +# ## If empty, all db are concerned +# # db_stats_dbs = [] +# +# ## List of db where index status are collected +# ## If empty, all indexes from all db are concerned +# # index_stats_dbs = [] +# +# ## List of db where collection status are collected +# ## If empty, all collections from all db are concerned +# # collection_stats_dbs = [] # # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs # [[inputs.redfish]] -# ## Server url +# ## Redfish API Base URL. # address = "https://127.0.0.1:5000" # -# ## Username, Password for hardware server +# ## Credentials for the Redfish API. # username = "root" # password = "password123456" # -# ## ComputerSystemId -# computer_system_id="2M220100SL" +# ## System Id to collect data for in Redfish APIs. +# computer_system_id="System.Embedded.1" # # ## Amount of time allowed to complete the HTTP request # # timeout = "5s" @@ -4832,27 +6648,18 @@ # # insecure_skip_verify = false -# # Read metrics from one or many redis servers -# [[inputs.redis]] +# # Read metrics from one or many redis-sentinel servers +# [[inputs.redis_sentinel]] # ## specify servers via a url matching: # ## [protocol://][:password]@address[:port] # ## e.g. -# ## tcp://localhost:6379 +# ## tcp://localhost:26379 # ## tcp://:password@192.168.99.100 -# ## unix:///var/run/redis.sock +# ## unix:///var/run/redis-sentinel.sock # ## # ## If no servers are specified, then localhost is used as the host. -# ## If no port is specified, 6379 is used -# servers = ["tcp://localhost:6379"] -# -# ## Optional. Specify redis commands to retrieve values -# # [[inputs.redis.commands]] -# # command = ["get", "sample-key"] -# # field = "sample-key-value" -# # type = "string" -# -# ## specify server password -# # password = "s#cr@t%" +# ## If no port is specified, 26379 is used +# # servers = ["tcp://localhost:26379"] # # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" @@ -4870,11 +6677,11 @@ # ## rethinkdb://10.10.3.33:18832, # ## 10.0.0.1:10000, etc. # servers = ["127.0.0.1:28015"] -# ## +# # ## If you use actual rethinkdb of > 2.3.0 with username/password authorization, # ## protocol have to be named "rethinkdb2" - it will use 1_0 H. # # servers = ["rethinkdb2://username:password@127.0.0.1:28015"] -# ## +# # ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol # ## have to be named "rethinkdb". # # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] @@ -4916,54 +6723,73 @@ # # timeout = "5s" +# # Get slab statistics from procfs +# [[inputs.slab]] +# # no configuration - please see the plugin's README for steps to configure +# # sudo properly + + # # Read metrics from storage devices supporting S.M.A.R.T. # [[inputs.smart]] -# ## Optionally specify the path to the smartctl executable -# # path_smartctl = "/usr/bin/smartctl" -# -# ## Optionally specify the path to the nvme-cli executable -# # path_nvme = "/usr/bin/nvme" -# -# ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case -# ## ["auto-on"] - automatically find and enable additional vendor specific disk info -# ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info -# # enable_extensions = ["auto-on"] -# -# ## On most platforms used cli utilities requires root access. -# ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli. -# ## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli -# ## without a password. -# # use_sudo = false -# -# ## Skip checking disks in this power mode. Defaults to -# ## "standby" to not wake up disks that have stopped rotating. -# ## See --nocheck in the man pages for smartctl. -# ## smartctl version 5.41 and 5.42 have faulty detection of -# ## power mode and might require changing this value to -# ## "never" depending on your disks. -# # nocheck = "standby" -# -# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed -# ## information from each drive into the 'smart_attribute' measurement. -# # attributes = false -# -# ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed. -# # excludes = [ "/dev/pass6" ] -# -# ## Optionally specify devices and device type, if unset -# ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done -# ## and all found will be included except for the excluded in excludes. -# # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"] -# -# ## Timeout for the cli command to complete. -# # timeout = "30s" +# ## Optionally specify the path to the smartctl executable +# # path_smartctl = "/usr/bin/smartctl" +# +# ## Optionally specify the path to the nvme-cli executable +# # path_nvme = "/usr/bin/nvme" +# +# ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case +# ## ["auto-on"] - automatically find and enable additional vendor specific disk info +# ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info +# # enable_extensions = ["auto-on"] +# +# ## On most platforms used cli utilities requires root access. +# ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli. +# ## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli +# ## without a password. +# # use_sudo = false +# +# ## Skip checking disks in this power mode. Defaults to +# ## "standby" to not wake up disks that have stopped rotating. +# ## See --nocheck in the man pages for smartctl. +# ## smartctl version 5.41 and 5.42 have faulty detection of +# ## power mode and might require changing this value to +# ## "never" depending on your disks. +# # nocheck = "standby" +# +# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed +# ## information from each drive into the 'smart_attribute' measurement. +# # attributes = false +# +# ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed. +# # excludes = [ "/dev/pass6" ] +# +# ## Optionally specify devices and device type, if unset +# ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done +# ## and all found will be included except for the excluded in excludes. +# # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"] +# +# ## Timeout for the cli command to complete. +# # timeout = "30s" +# +# ## Optionally call smartctl and nvme-cli with a specific concurrency policy. +# ## By default, smartctl and nvme-cli are called in separate threads (goroutines) to gather disk attributes. +# ## Some devices (e.g. disks in RAID arrays) may have access limitations that require sequential reading of +# ## SMART data - one individual array drive at the time. In such case please set this configuration option +# ## to "sequential" to get readings for all drives. +# ## valid options: concurrent, sequential +# # read_method = "concurrent" # # Retrieves SNMP values from remote agents # [[inputs.snmp]] # ## Agent addresses to retrieve values from. +# ## format: agents = [":"] +# ## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6. +# ## default is udp +# ## port: optional # ## example: agents = ["udp://127.0.0.1:161"] # ## agents = ["tcp://127.0.0.1:161"] +# ## agents = ["udp4://v4only-snmp-agent"] # agents = ["udp://127.0.0.1:161"] # # ## Timeout for each request. @@ -4972,12 +6798,17 @@ # ## SNMP version; can be 1, 2, or 3. # # version = 2 # -# ## Agent host tag; the tag used to reference the source host -# # agent_host_tag = "agent_host" +# ## Path to mib files +# ## Used by the gosmi translator. +# ## To add paths when translating with netsnmp, use the MIBDIRS environment variable +# # path = ["/usr/share/snmp/mibs"] # # ## SNMP community string. # # community = "public" # +# ## Agent host tag +# # agent_host_tag = "agent_host" +# # ## Number of retries to attempt. # # retries = 3 # @@ -4988,7 +6819,7 @@ # ## # ## Security Name. # # sec_name = "myuser" -# ## Authentication protocol; one of "MD5", "SHA", or "". +# ## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "". # # auth_protocol = "MD5" # ## Authentication password. # # auth_password = "pass" @@ -4996,7 +6827,9 @@ # # sec_level = "authNoPriv" # ## Context Name. # # context_name = "" -# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". +# ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C", or "". +# ### Protocols "AES192", "AES192", "AES256", and "AES256C" require the underlying net-snmp tools +# ### to be compiled with --enable-blumenthal-aes (http://www.net-snmp.org/docs/INSTALL.html) # # priv_protocol = "" # ## Privacy password used for encrypted messages. # # priv_password = "" @@ -5004,9 +6837,27 @@ # ## Add fields and tables defining the variables you wish to collect. This # ## example collects the system uptime and interface variables. Reference the # ## full plugin documentation for configuration details. +# [[inputs.snmp.field]] +# oid = "RFC1213-MIB::sysUpTime.0" +# name = "uptime" +# +# [[inputs.snmp.field]] +# oid = "RFC1213-MIB::sysName.0" +# name = "source" +# is_tag = true +# +# [[inputs.snmp.table]] +# oid = "IF-MIB::ifTable" +# name = "interface" +# inherit_tags = ["source"] +# +# [[inputs.snmp.table.field]] +# oid = "IF-MIB::ifDescr" +# name = "ifDescr" +# is_tag = true -# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD. +# ## DEPRECATED: The 'snmp_legacy' plugin is deprecated in version 1.0.0, use 'inputs.snmp' instead. # [[inputs.snmp_legacy]] # ## Use 'oids.txt' file to translate oids to names # ## To generate 'oids.txt' you need to run: @@ -5029,7 +6880,6 @@ # collect = ["mybulk", "sysservices", "sysdescr"] # # Simple list of OIDs to get, in addition to "collect" # get_oids = [] -# # [[inputs.snmp.host]] # address = "192.168.2.3:161" # community = "public" @@ -5041,31 +6891,25 @@ # "ifNumber", # ".1.3.6.1.2.1.1.3.0", # ] -# # [[inputs.snmp.get]] # name = "ifnumber" # oid = "ifNumber" -# # [[inputs.snmp.get]] # name = "interface_speed" # oid = "ifSpeed" # instance = "0" -# # [[inputs.snmp.get]] # name = "sysuptime" # oid = ".1.3.6.1.2.1.1.3.0" # unit = "second" -# # [[inputs.snmp.bulk]] # name = "mybulk" # max_repetition = 127 # oid = ".1.3.6.1.2.1.1" -# # [[inputs.snmp.bulk]] # name = "ifoutoctets" # max_repetition = 127 # oid = "ifOutOctets" -# # [[inputs.snmp.host]] # address = "192.168.2.13:161" # #address = "127.0.0.1:161" @@ -5078,19 +6922,16 @@ # [[inputs.snmp.host.table]] # name = "iftable3" # include_instances = ["enp5s0", "eth1"] -# # # SNMP TABLEs # # table without mapping neither subtables # [[inputs.snmp.table]] # name = "iftable1" # oid = ".1.3.6.1.2.1.31.1.1.1" -# # # table without mapping but with subtables # [[inputs.snmp.table]] # name = "iftable2" # oid = ".1.3.6.1.2.1.31.1.1.1" # sub_tables = [".1.3.6.1.2.1.2.2.1.13"] -# # # table with mapping but without subtables # [[inputs.snmp.table]] # name = "iftable3" @@ -5098,7 +6939,6 @@ # # if empty. get all instances # mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" # # if empty, get all subtables -# # # table with both mapping and subtables # [[inputs.snmp.table]] # name = "iftable4" @@ -5110,86 +6950,28 @@ # sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] +# # Gather indicators from established connections, using iproute2's ss command. +# [[inputs.socketstat]] +# ## ss can display information about tcp, udp, raw, unix, packet, dccp and sctp sockets +# ## Specify here the types you want to gather +# socket_types = [ "tcp", "udp" ] +# ## The default timeout of 1s for ss execution can be overridden here: +# # timeout = "1s" + + # # Read stats from one or more Solr servers or cores # [[inputs.solr]] # ## specify a list of one or more Solr servers # servers = ["http://localhost:8983"] -# +# ## # ## specify a list of one or more Solr cores (default - all) # # cores = ["main"] -# +# ## # ## Optional HTTP Basic Auth Credentials # # username = "username" # # password = "pa$$word" -# # Read metrics from Microsoft SQL Server -# [[inputs.sqlserver]] -# ## Specify instances to monitor with a list of connection strings. -# ## All connection parameters are optional. -# ## By default, the host is localhost, listening on default port, TCP 1433. -# ## for Windows, the user is the currently running AD user (SSO). -# ## See https://github.com/denisenkom/go-mssqldb for detailed connection -# ## parameters, in particular, tls connections can be created like so: -# ## "encrypt=true;certificate=;hostNameInCertificate=" -# # servers = [ -# # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", -# # ] -# -# ## This enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 -# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. -# ## Possible values for database_type are -# ## "AzureSQLDB" -# ## "SQLServer" -# ## "AzureSQLManagedInstance" -# # database_type = "AzureSQLDB" -# -# -# ## Optional parameter, setting this to 2 will use a new version -# ## of the collection queries that break compatibility with the original -# ## dashboards. -# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB -# query_version = 2 -# -# ## If you are using AzureDB, setting this to true will gather resource utilization metrics -# # azuredb = false -# -# ## Possible queries -# ## Version 2: -# ## - PerformanceCounters -# ## - WaitStatsCategorized -# ## - DatabaseIO -# ## - ServerProperties -# ## - MemoryClerk -# ## - Schedulers -# ## - SqlRequests -# ## - VolumeSpace -# ## - Cpu -# -# ## Version 1: -# ## - PerformanceCounters -# ## - WaitStatsCategorized -# ## - CPUHistory -# ## - DatabaseIO -# ## - DatabaseSize -# ## - DatabaseStats -# ## - DatabaseProperties -# ## - MemoryClerk -# ## - VolumeSpace -# ## - PerformanceMetrics -# -# -# ## Queries enabled by default for specific Database Type -# ## database_type = AzureSQLDB -# ## AzureDBWaitStats, AzureDBResourceStats, AzureDBResourceGovernance, sqlAzureDBDatabaseIO -# -# ## A list of queries to include. If not specified, all the above listed queries are used. -# # include_query = [] -# -# ## A list of queries to explicitly ignore. -# exclude_query = [ 'Schedulers' , 'SqlRequests'] - - # # Gather timeseries from Google Cloud Platform v3 monitoring API # [[inputs.stackdriver]] # ## GCP Project @@ -5203,8 +6985,8 @@ # ## Exclude timeseries that start with the given metric type. # # metric_type_prefix_exclude = [] # -# ## Many metrics are updated once per minute; it is recommended to override -# ## the agent level interval with a value of 1m or greater. +# ## Most metrics are updated no more than once per minute; it is recommended +# ## to override the agent level interval with a value of 1m or greater. # interval = "1m" # # ## Maximum number of API calls to make per second. The quota for accounts @@ -5240,9 +7022,9 @@ # ## For a list of aligner strings see: # ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner # # distribution_aggregation_aligners = [ -# # "ALIGN_PERCENTILE_99", -# # "ALIGN_PERCENTILE_95", -# # "ALIGN_PERCENTILE_50", +# # "ALIGN_PERCENTILE_99", +# # "ALIGN_PERCENTILE_95", +# # "ALIGN_PERCENTILE_50", # # ] # # ## Filters can be added to reduce the number of time series matched. All @@ -5266,8 +7048,8 @@ # ## Metric labels refine the time series selection with the following expression: # ## metric.labels. = # # [[inputs.stackdriver.filter.metric_labels]] -# # key = "device_name" -# # value = 'one_of("sda", "sdb")' +# # key = "device_name" +# # value = 'one_of("sda", "sdb")' # # Get synproxy counter statistics from procfs @@ -5301,12 +7083,10 @@ # ## If Group is true, corresponding metrics are grouped to a single measurement. # # group = true # -# ## Options for the sadf command. The values on the left represent the sadf -# ## options and the values on the right their description (which are used for -# ## grouping and prefixing metrics). +# ## Options for the sadf command. The values on the left represent the sadf options and +# ## the values on the right their description (wich are used for grouping and prefixing metrics). # ## -# ## Run 'sar -h' or 'man sar' to find out the supported options for your -# ## sysstat version. +# ## Run 'sar -h' or 'man sar' to find out the supported options for your sysstat version. # [inputs.sysstat.options] # -C = "cpu" # -B = "paging" @@ -5322,12 +7102,11 @@ # -v = "inode" # -W = "swap" # -w = "task" -# # -H = "hugepages" # only available for newer linux distributions -# # "-I ALL" = "interrupts" # requires INT activity +# # -H = "hugepages" # only available for newer linux distributions +# # "-I ALL" = "interrupts" # requires INT activity # -# ## Device tags can be used to add additional tags for devices. -# ## For example the configuration below adds a tag vg with value rootvg for -# ## all metrics with sda devices. +# ## Device tags can be used to add additional tags for devices. For example the configuration below +# ## adds a tag vg with value rootvg for all metrics with sda devices. # # [[inputs.sysstat.device_tags.sda]] # # vg = "rootvg" @@ -5341,6 +7120,13 @@ # ## values are "socket", "target", "device", "mount", "automount", "swap", # ## "timer", "path", "slice" and "scope ": # # unittype = "service" +# # +# ## Filter for a specific pattern, default is "" (i.e. all), other possible +# ## values are valid pattern for systemctl, e.g. "a*" for all units with +# ## names starting with "a" +# # pattern = "" +# ## pattern = "telegraf* influxdb*" +# ## pattern = "a*" # # Reads metrics from a Teamspeak 3 Server via ServerQuery @@ -5351,6 +7137,8 @@ # username = "serverqueryuser" # ## Password for ServerQuery # password = "secret" +# ## Nickname of the ServerQuery client +# nickname = "telegraf" # ## Array of virtual servers # # virtual_servers = [1] @@ -5362,16 +7150,16 @@ # # Read Tengine's basic status information (ngx_http_reqstat_module) # [[inputs.tengine]] -# # An array of Tengine reqstat module URI to gather stats. +# ## An array of Tengine reqstat module URI to gather stats. # urls = ["http://127.0.0.1/us"] # -# # HTTP response timeout (default: 5s) +# ## HTTP response timeout (default: 5s) # # response_timeout = "5s" # # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.cer" -# # tls_key = "/etc/telegraf/key.key" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false @@ -5438,7 +7226,7 @@ # # Read uWSGI metrics. # [[inputs.uwsgi]] -# ## List with urls of uWSGI Stats servers. URL must match pattern: +# ## List with urls of uWSGI Stats servers. Url must match pattern: # ## scheme://address[:port] # ## # ## For example: @@ -5457,6 +7245,24 @@ # ## The default location of the varnishstat binary can be overridden with: # binary = "/usr/bin/varnishstat" # +# ## Additional custom arguments for the varnishstat command +# # binary_args = ["-f", "MAIN.*"] +# +# ## The default location of the varnishadm binary can be overridden with: +# adm_binary = "/usr/bin/varnishadm" +# +# ## Custom arguments for the varnishadm command +# # adm_binary_args = [""] +# +# ## Metric version defaults to metric_version=1, use metric_version=2 for removal of nonactive vcls +# ## Varnish 6.0.2 and newer is required for metric_version=2. +# metric_version = 1 +# +# ## Additional regexps to override builtin conversion of varnish metrics into telegraf metrics. +# ## Regexp group "_vcl" is used for extracting the VCL name. Metrics that contain nonactive VCL's are skipped. +# ## Regexp group "_field" overrides the field name. Other named regexp groups are used as tags. +# # regexps = ['^XCNT\.(?P<_vcl>[\w\-]*)(\.)*(?P[\w\-.+]*)\.(?P<_field>[\w\-.+]*)\.val'] +# # ## By default, telegraf gather stats for 3 metric points. # ## Setting stats will override the defaults shown below. # ## Glob matching can be used, ie, stats = ["MAIN.*"] @@ -5471,6 +7277,27 @@ # # timeout = "1s" +# # Read metrics from the Vault API +# [[inputs.vault]] +# ## URL for the Vault agent +# # url = "http://127.0.0.1:8200" +# +# ## Use Vault token for authorization. +# ## Vault token configuration is mandatory. +# ## If both are empty or both are set, an error is thrown. +# # token_file = "/path/to/auth/token" +# ## OR +# token = "s.CDDrgg5zPv5ssI0Z2P4qxJj2" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile + + # # Collect Wireguard server interface and peer statistics # [[inputs.wireguard]] # ## Optional list of Wireguard device/interface names to query. @@ -5487,36 +7314,74 @@ # # Reads metrics from a SSL certificate # [[inputs.x509_cert]] -# ## List certificate sources -# sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"] +# ## List certificate sources, support wildcard expands for files +# ## Prefix your entry with 'file://' if you intend to use relative paths +# sources = ["tcp://example.org:443", "https://influxdata.com:443", +# "smtp://mail.localhost:25", "udp://127.0.0.1:4433", +# "/etc/ssl/certs/ssl-cert-snakeoil.pem", +# "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem"] # # ## Timeout for SSL connection # # timeout = "5s" # -# ## Pass a different name into the TLS request (Server Name Indication) +# ## Pass a different name into the TLS request (Server Name Indication). +# ## This is synonymous with tls_server_name, and only one of the two +# ## options may be specified at one time. # ## example: server_name = "myhost.example.org" -# # server_name = "" +# # server_name = "myhost.example.org" # # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" +# # tls_server_name = "myhost.example.org" +# +# ## Set the proxy URL +# # use_proxy = true +# # proxy_url = "http://localhost:8888" -# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools +# # Gathers Metrics From a Dell EMC XtremIO Storage Array's V3 API +# [[inputs.xtremio]] +# ## XtremIO User Interface Endpoint +# url = "https://xtremio.example.com/" # required +# +# ## Credentials +# username = "user1" +# password = "pass123" +# +# ## Metrics to collect from the XtremIO +# # collectors = ["bbus","clusters","ssds","volumes","xms"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, pools and datasets # [[inputs.zfs]] # ## ZFS kstat path. Ignored on FreeBSD # ## If not specified, then default is: # # kstatPath = "/proc/spl/kstat/zfs" # # ## By default, telegraf gather all zfs stats -# ## If not specified, then default is: +# ## Override the stats list using the kstatMetrics array: +# ## For FreeBSD, the default is: # # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] # ## For Linux, the default is: # # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats", -# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] +# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] +# # ## By default, don't gather zpool stats # # poolMetrics = false +# +# ## By default, don't gather dataset stats +# ## On FreeBSD, if the user has enabled listsnapshots in the pool property, +# ## telegraf may not be able to correctly parse the output. +# # datasetMetrics = false # # Reads 'mntr' stats from one or many zookeeper servers @@ -5545,38 +7410,110 @@ ############################################################################### -# # Intel Resource Director Technology plugin -# [[inputs.IntelRDT]] -# ## Optionally set sampling interval to Nx100ms. -# ## This value is propagated to pqos tool. Interval format is defined by pqos itself. -# ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. -# # sampling_interval = "10" -# -# ## Optionally specify the path to pqos executable. -# ## If not provided, auto discovery will be performed. -# # pqos_path = "/usr/local/bin/pqos" -# -# ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. -# ## If not provided, default value is false. -# # shortened_metrics = false -# -# ## Specify the list of groups of CPU core(s) to be provided as pqos input. -# ## Mandatory if processes aren't set and forbidden if processes are specified. -# ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] -# # cores = ["0-3"] -# -# ## Specify the list of processes for which Metrics will be collected. -# ## Mandatory if cores aren't set and forbidden if cores are specified. -# ## e.g. ["qemu", "pmd"] -# # processes = ["process"] +# # Pull Metric Statistics from Aliyun CMS +# [[inputs.aliyuncms]] +# ## Aliyun Credentials +# ## Credentials are loaded in the following order +# ## 1) Ram RoleArn credential +# ## 2) AccessKey STS token credential +# ## 3) AccessKey credential +# ## 4) Ecs Ram Role credential +# ## 5) RSA keypair credential +# ## 6) Environment variables credential +# ## 7) Instance metadata credential +# +# # access_key_id = "" +# # access_key_secret = "" +# # access_key_sts_token = "" +# # role_arn = "" +# # role_session_name = "" +# # private_key = "" +# # public_key_id = "" +# # role_name = "" +# +# ## Specify the ali cloud region list to be queried for metrics and objects discovery +# ## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here +# ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm +# ## Default supported regions are: +# ## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen, +# ## cn-heyuan,cn-chengdu,cn-hongkong,ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, +# ## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1 +# ## +# ## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich +# ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then +# ## it will be reported on the start - for example for 'acs_cdn' project: +# ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) +# ## Currently, discovery supported for the following projects: +# ## - acs_ecs_dashboard +# ## - acs_rds_dashboard +# ## - acs_slb_dashboard +# ## - acs_vpc_eip +# regions = ["cn-hongkong"] +# +# # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all +# # metrics are made available to the 1 minute period. Some are collected at +# # 3 minute, 5 minute, or larger intervals. +# # See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv +# # Note that if a period is configured that is smaller than the minimum for a +# # particular metric, that metric will not be returned by the Aliyun OpenAPI +# # and will not be collected by Telegraf. +# # +# ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) +# period = "5m" +# +# ## Collection Delay (required - must account for metrics availability via AliyunCMS API) +# delay = "1m" +# +# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid +# ## gaps or overlap in pulled data +# interval = "5m" +# +# ## Metric Statistic Project (required) +# project = "acs_slb_dashboard" +# +# ## Maximum requests per second, default value is 200 +# ratelimit = 200 +# +# ## How often the discovery API call executed (default 1m) +# #discovery_interval = "1m" +# +# ## Metrics to Pull (Required) +# [[inputs.aliyuncms.metrics]] +# ## Metrics names to be requested, +# ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# names = ["InstanceActiveConnection", "InstanceNewConnection"] +# +# ## Dimension filters for Metric (these are optional). +# ## This allows to get additional metric dimension. If dimension is not specified it can be returned or +# ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# ## +# ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) +# ## Values specified here would be added into the list of discovered objects. +# ## You can specify either single dimension: +# #dimensions = '{"instanceId": "p-example"}' +# +# ## Or you can specify several dimensions at once: +# #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' +# +# ## Enrichment tags, can be added from discovery (if supported) +# ## Notation is : +# ## To figure out which fields are available, consult the Describe API per project. +# ## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO +# #tag_query_path = [ +# # "address:Address", +# # "name:LoadBalancerName", +# # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" +# # ] +# ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. +# +# ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery +# ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage +# ## of discovery scope vs monitoring scope +# #allow_dps_without_discovery = false # # AMQP consumer plugin # [[inputs.amqp_consumer]] -# ## Broker to consume from. -# ## deprecated in 1.7; use the brokers option -# # url = "amqp://localhost:5672/influxdb" -# # ## Brokers to consume from. If multiple brokers are specified a random broker # ## will be selected anytime a connection is established. This can be # ## helpful for load balancing when not using a dedicated load balancer. @@ -5651,13 +7588,9 @@ # data_format = "influx" +# ## DEPRECATED: The 'cassandra' plugin is deprecated in version 1.7.0, use 'inputs.jolokia2' with the 'cassandra.conf' example configuration instead. # # Read Cassandra metrics through Jolokia # [[inputs.cassandra]] -# ## DEPRECATED: The cassandra plugin has been deprecated. Please use the -# ## jolokia2 plugin instead. -# ## -# ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 -# # context = "/jolokia/read" # ## List of cassandra servers exposing jolokia read service # servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] @@ -5682,6 +7615,9 @@ # ## Address and port to host telemetry listener # service_address = ":57000" # +# ## Grpc Maximum Message Size, default is 4MB, increase the size. +# max_msg_size = 4000000 +# # ## Enable TLS; grpc transport only. # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" @@ -5696,12 +7632,26 @@ # ## Define aliases to map telemetry encoding paths to simple measurement names # [inputs.cisco_telemetry_mdt.aliases] # ifstats = "ietf-interfaces:interfaces-state/interface/statistics" +# ## Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details. +# [inputs.cisco_telemetry_mdt.dmes] +# # Global Property Xformation. +# # prop1 = "uint64 to int" +# # prop2 = "uint64 to string" +# # prop3 = "string to uint64" +# # prop4 = "string to int64" +# # prop5 = "string to float64" +# # auto-prop-xfrom = "auto-float-xfrom" #Xform any property which is string, and has float number to type float64 +# # Per Path property xformation, Name is telemetry configuration under sensor-group, path configuration "WORD Distinguished Name" +# # Per Path configuration is better as it avoid property collision issue of types. +# # dnpath = '{"Name": "show ip route summary","prop": [{"Key": "routes","Value": "string"}, {"Key": "best-paths","Value": "string"}]}' +# # dnpath2 = '{"Name": "show processes cpu","prop": [{"Key": "kernel_percent","Value": "float"}, {"Key": "idle_percent","Value": "float"}, {"Key": "process","Value": "string"}, {"Key": "user_percent","Value": "float"}, {"Key": "onesec","Value": "float"}]}' +# # dnpath3 = '{"Name": "show processes memory physical","prop": [{"Key": "processname","Value": "string"}]}' # # Read metrics from one or many ClickHouse servers # [[inputs.clickhouse]] # ## Username for authorization on ClickHouse server -# ## example: username = "default"" +# ## example: username = "default" # username = "default" # # ## Password for authorization on ClickHouse server @@ -5797,7 +7747,7 @@ # # max_message_len = 1000000 # # ## Optional. Maximum messages to read from PubSub that have not been written -# ## to an output. Defaults to 1000. +# ## to an output. Defaults to %d. # ## For best throughput set based on the number of metrics within # ## each message and the size of the output's metric_batch_size. # ## @@ -5836,7 +7786,8 @@ # # max_receiver_go_routines = 0 # # ## Optional. If true, Telegraf will attempt to base64 decode the -# ## PubSub message data before parsing +# ## PubSub message data before parsing. Many GCP services that +# ## output JSON to Google PubSub base64-encode the JSON payload. # # base64_data = false @@ -5891,6 +7842,88 @@ # data_format = "influx" +# # AWS Metric Streams listener +# [[inputs.cloudwatch_metric_streams]] +# ## Address and port to host HTTP listener on +# service_address = ":443" +# +# ## Paths to listen to. +# # paths = ["/telegraf"] +# +# ## maximum duration before timing out read of the request +# # read_timeout = "10s" +# +# ## maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Optional access key for Firehose security. +# # access_key = "test-key" +# +# ## An optional flag to keep Metric Streams metrics compatible with CloudWatch's API naming +# # api_compatability = false +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Ingests files in a directory and then moves them to a target directory. +# [[inputs.directory_monitor]] +# ## The directory to monitor and read files from. +# directory = "" +# # +# ## The directory to move finished files to. +# finished_directory = "" +# # +# ## The directory to move files to upon file error. +# ## If not provided, erroring files will stay in the monitored directory. +# # error_directory = "" +# # +# ## The amount of time a file is allowed to sit in the directory before it is picked up. +# ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow, +# ## set this higher so that the plugin will wait until the file is fully copied to the directory. +# # directory_duration_threshold = "50ms" +# # +# ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested. +# # files_to_monitor = ["^.*\.csv"] +# # +# ## A list of files to ignore, if necessary. Supports regex. +# # files_to_ignore = [".DS_Store"] +# # +# ## Maximum lines of the file to process that have not yet be written by the +# ## output. For best throughput set to the size of the output's metric_buffer_limit. +# ## Warning: setting this number higher than the output's metric_buffer_limit can cause dropped metrics. +# # max_buffered_metrics = 10000 +# # +# ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. +# ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. +# # file_queue_size = 100000 +# # +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. Cautious when file name variation is high, this can increase the cardinality +# ## significantly. Read more about cardinality here: +# ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality +# # file_tag = "" +# # +# ## Specify if the file can be read completely at once or if it needs to be read line by line (default). +# ## Possible values: "line-by-line", "at-once" +# # parse_method = "line-by-line" +# # +# ## The dataformat to be read from the files. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + # # Read logging output from the Docker engine # [[inputs.docker_log]] # ## Docker Endpoint @@ -5937,8 +7970,6 @@ # ## This requires one of the following sets of environment variables to be set: # ## # ## 1) Expected Environment Variables: -# ## - "EVENTHUB_NAMESPACE" -# ## - "EVENTHUB_NAME" # ## - "EVENTHUB_CONNECTION_STRING" # ## # ## 2) Expected Environment Variables: @@ -5947,8 +7978,17 @@ # ## - "EVENTHUB_KEY_NAME" # ## - "EVENTHUB_KEY_VALUE" # +# ## 3) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "AZURE_TENANT_ID" +# ## - "AZURE_CLIENT_ID" +# ## - "AZURE_CLIENT_SECRET" +# # ## Uncommenting the option below will create an Event Hub client based solely on the connection string. # ## This can either be the associated environment variable or hard coded directly. +# ## If this option is uncommented, environment variables will be ignored. +# ## Connection string should contain EventHubName (EntityPath) # # connection_string = "" # # ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister @@ -6012,15 +8052,22 @@ # # Run executable as long-running input plugin # [[inputs.execd]] -# ## Program to run as daemon +# ## One program to run as daemon. +# ## NOTE: process and each argument should each be their own string # command = ["telegraf-smartctl", "-d", "/dev/sda"] # +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# # ## Define how the process is signaled on each collection interval. # ## Valid values are: -# ## "none" : Do not signal anything. -# ## The process must output metrics by itself. -# ## "STDIN" : Send a newline on STDIN. -# ## "SIGHUP" : Send a HUP signal. Not available on Windows. +# ## "none" : Do not signal anything. (Recommended for service inputs) +# ## The process must output metrics by itself. +# ## "STDIN" : Send a newline on STDIN. (Recommended for gather inputs) +# ## "SIGHUP" : Send a HUP signal. Not available on Windows. (not recommended) # ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. # ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. # signal = "none" @@ -6037,65 +8084,78 @@ # # gNMI telemetry input plugin # [[inputs.gnmi]] -# ## Address and port of the gNMI GRPC server -# addresses = ["10.49.234.114:57777"] +# ## Address and port of the gNMI GRPC server +# addresses = ["10.49.234.114:57777"] # -# ## define credentials -# username = "cisco" -# password = "cisco" +# ## define credentials +# username = "cisco" +# password = "cisco" # -# ## gNMI encoding requested (one of: "proto", "json", "json_ietf") -# # encoding = "proto" +# ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") +# # encoding = "proto" # -# ## redial in case of failures after -# redial = "10s" +# ## redial in case of failures after +# redial = "10s" # -# ## enable client-side TLS and define CA to authenticate the device -# # enable_tls = true -# # tls_ca = "/etc/telegraf/ca.pem" -# # insecure_skip_verify = true +# ## enable client-side TLS and define CA to authenticate the device +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # insecure_skip_verify = true # -# ## define client-side TLS certificate & key to authenticate to the device -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" +# ## define client-side TLS certificate & key to authenticate to the device +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" # -# ## gNMI subscription prefix (optional, can usually be left empty) -# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths -# # origin = "" -# # prefix = "" -# # target = "" +# ## gNMI subscription prefix (optional, can usually be left empty) +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# # origin = "" +# # prefix = "" +# # target = "" # -# ## Define additional aliases to map telemetry encoding paths to simple measurement names -# #[inputs.gnmi.aliases] -# # ifcounters = "openconfig:/interfaces/interface/state/counters" +# ## Define additional aliases to map telemetry encoding paths to simple measurement names +# # [inputs.gnmi.aliases] +# # ifcounters = "openconfig:/interfaces/interface/state/counters" # -# [[inputs.gnmi.subscription]] -# ## Name of the measurement that will be emitted -# name = "ifcounters" +# [[inputs.gnmi.subscription]] +# ## Name of the measurement that will be emitted +# name = "ifcounters" # -# ## Origin and path of the subscription -# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths -# ## -# ## origin usually refers to a (YANG) data model implemented by the device -# ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) -# ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr -# origin = "openconfig-interfaces" -# path = "/interfaces/interface/state/counters" +# ## Origin and path of the subscription +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# ## +# ## origin usually refers to a (YANG) data model implemented by the device +# ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) +# ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr +# origin = "openconfig-interfaces" +# path = "/interfaces/interface/state/counters" +# +# # Subscription mode (one of: "target_defined", "sample", "on_change") and interval +# subscription_mode = "sample" +# sample_interval = "10s" +# +# ## Suppress redundant transmissions when measured values are unchanged +# # suppress_redundant = false # -# # Subscription mode (one of: "target_defined", "sample", "on_change") and interval -# subscription_mode = "sample" -# sample_interval = "10s" +# ## If suppression is enabled, send updates at least every X seconds anyway +# # heartbeat_interval = "60s" # -# ## Suppress redundant transmissions when measured values are unchanged -# # suppress_redundant = false +# #[[inputs.gnmi.subscription]] +# # name = "descr" +# # origin = "openconfig-interfaces" +# # path = "/interfaces/interface/state/description" +# # subscription_mode = "on_change" # -# ## If suppression is enabled, send updates at least every X seconds anyway -# # heartbeat_interval = "60s" +# ## If tag_only is set, the subscription in question will be utilized to maintain a map of +# ## tags to apply to other measurements emitted by the plugin, by matching path keys +# ## All fields from the tag-only subscription will be applied as tags to other readings, +# ## in the format _. +# # tag_only = true +# ## DEPRECATED: The 'http_listener' plugin is deprecated in version 1.9.0, has been renamed to 'influxdb_listener', use 'inputs.influxdb_listener' or 'inputs.http_listener_v2' instead. # # Accept metrics over InfluxDB 1.x HTTP API -# [[inputs.http_listener]] -# ## Address and port to host InfluxDB listener on +# [[inputs.influxdb_listener]] +# ## Address and port to host HTTP listener on # service_address = ":8186" # # ## maximum duration before timing out read of the request @@ -6105,17 +8165,11 @@ # # ## Maximum allowed HTTP request body size in bytes. # ## 0 means to use the default of 32MiB. -# max_body_size = "32MiB" +# max_body_size = 0 # -# ## Optional tag name used to store the database. -# ## If the write has a database in the query string then it will be kept in this tag name. -# ## This tag can be used in downstream outputs. -# ## The default value of nothing means it will be off and the database will not be recorded. -# # database_tag = "" -# -# ## If set the retention policy specified in the write query will be added as -# ## the value of this tag name. -# # retention_policy_tag = "" +# ## Maximum line size allowed to be sent in bytes. +# ## deprecated in 1.14; parser now handles lines of unlimited length and option is ignored +# # max_line_size = 0 # # ## Set one or more allowed client CA certificate file names to # ## enable mutually authenticated TLS connections @@ -6125,10 +8179,27 @@ # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" # +# ## Optional tag name used to store the database name. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# ## If you have a tag that is the same as the one specified below, and supply a database, +# ## the tag will be overwritten with the database supplied. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# # ## Optional username and password to accept for HTTP basic authentication. # ## You probably want to make sure you have TLS configured above for this. # # basic_username = "foobar" # # basic_password = "barfoo" +# +# ## Influx line protocol parser +# ## 'internal' is the default. 'upstream' is a newer parser that is faster +# ## and more memory efficient. +# # parser_type = "internal" # # Generic HTTP write listener @@ -6136,8 +8207,11 @@ # ## Address and port to host HTTP listener on # service_address = ":8080" # -# ## Path to listen to. -# # path = "/telegraf" +# ## Paths to listen to. +# # paths = ["/telegraf"] +# +# ## Save path as http_listener_v2_path tag if set to true +# # path_tag = false # # ## HTTP methods to accept. # # methods = ["POST", "PUT"] @@ -6148,7 +8222,7 @@ # # write_timeout = "10s" # # ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) # # max_body_size = "500MB" # # ## Part of the request to consume. Available options are "body" and @@ -6182,7 +8256,7 @@ # # Accept metrics over InfluxDB 1.x HTTP API # [[inputs.influxdb_listener]] -# ## Address and port to host InfluxDB listener on +# ## Address and port to host HTTP listener on # service_address = ":8186" # # ## maximum duration before timing out read of the request @@ -6192,17 +8266,11 @@ # # ## Maximum allowed HTTP request body size in bytes. # ## 0 means to use the default of 32MiB. -# max_body_size = "32MiB" -# -# ## Optional tag name used to store the database. -# ## If the write has a database in the query string then it will be kept in this tag name. -# ## This tag can be used in downstream outputs. -# ## The default value of nothing means it will be off and the database will not be recorded. -# # database_tag = "" +# max_body_size = 0 # -# ## If set the retention policy specified in the write query will be added as -# ## the value of this tag name. -# # retention_policy_tag = "" +# ## Maximum line size allowed to be sent in bytes. +# ## deprecated in 1.14; parser now handles lines of unlimited length and option is ignored +# # max_line_size = 0 # # ## Set one or more allowed client CA certificate file names to # ## enable mutually authenticated TLS connections @@ -6212,10 +8280,27 @@ # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" # +# ## Optional tag name used to store the database name. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# ## If you have a tag that is the same as the one specified below, and supply a database, +# ## the tag will be overwritten with the database supplied. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# # ## Optional username and password to accept for HTTP basic authentication. # ## You probably want to make sure you have TLS configured above for this. # # basic_username = "foobar" # # basic_password = "barfoo" +# +# ## Influx line protocol parser +# ## 'internal' is the default. 'upstream' is a newer parser that is faster +# ## and more memory efficient. +# # parser_type = "internal" # # Accept metrics over InfluxDB 2.x HTTP API @@ -6245,9 +8330,93 @@ # ## Optional token to accept for HTTP authentication. # ## You probably want to make sure you have TLS configured above for this. # # token = "some-long-shared-secret-token" +# +# ## Influx line protocol parser +# ## 'internal' is the default. 'upstream' is a newer parser that is faster +# ## and more memory efficient. +# # parser_type = "internal" + + +# # Intel Performance Monitoring Unit plugin exposes Intel PMU metrics available through Linux Perf subsystem +# [[inputs.intel_pmu]] +# ## List of filesystem locations of JSON files that contain PMU event definitions. +# event_definitions = ["/var/cache/pmu/GenuineIntel-6-55-4-core.json", "/var/cache/pmu/GenuineIntel-6-55-4-uncore.json"] +# +# ## List of core events measurement entities. There can be more than one core_events sections. +# [[inputs.intel_pmu.core_events]] +# ## List of events to be counted. Event names shall match names from event_definitions files. +# ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. +# ## If absent, all core events from provided event_definitions are counted skipping unresolvable ones. +# events = ["INST_RETIRED.ANY", "CPU_CLK_UNHALTED.THREAD_ANY:config1=0x4043200000000k"] +# +# ## Limits the counting of events to core numbers specified. +# ## If absent, events are counted on all cores. +# ## Single "0", multiple "0,1,2" and range "0-2" notation is supported for each array element. +# ## example: cores = ["0,2", "4", "12-16"] +# cores = ["0"] +# +# ## Indicator that plugin shall attempt to run core_events.events as a single perf group. +# ## If absent or set to false, each event is counted individually. Defaults to false. +# ## This limits the number of events that can be measured to a maximum of available hardware counters per core. +# ## Could vary depending on type of event, use of fixed counters. +# # perf_group = false +# +# ## Optionally set a custom tag value that will be added to every measurement within this events group. +# ## Can be applied to any group of events, unrelated to perf_group setting. +# # events_tag = "" +# +# ## List of uncore event measurement entities. There can be more than one uncore_events sections. +# [[inputs.intel_pmu.uncore_events]] +# ## List of events to be counted. Event names shall match names from event_definitions files. +# ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. +# ## If absent, all uncore events from provided event_definitions are counted skipping unresolvable ones. +# events = ["UNC_CHA_CLOCKTICKS", "UNC_CHA_TOR_OCCUPANCY.IA_MISS"] +# +# ## Limits the counting of events to specified sockets. +# ## If absent, events are counted on all sockets. +# ## Single "0", multiple "0,1" and range "0-1" notation is supported for each array element. +# ## example: sockets = ["0-2"] +# sockets = ["0"] +# +# ## Indicator that plugin shall provide an aggregated value for multiple units of same type distributed in an uncore. +# ## If absent or set to false, events for each unit are exposed as separate metric. Defaults to false. +# # aggregate_uncore_units = false +# +# ## Optionally set a custom tag value that will be added to every measurement within this events group. +# # events_tag = "" + + +# # Read Intel RDT metrics +# [[inputs.intel_rdt]] +# ## Optionally set sampling interval to Nx100ms. +# ## This value is propagated to pqos tool. Interval format is defined by pqos itself. +# ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. +# # sampling_interval = "10" +# +# ## Optionally specify the path to pqos executable. +# ## If not provided, auto discovery will be performed. +# # pqos_path = "/usr/local/bin/pqos" +# +# ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. +# ## If not provided, default value is false. +# # shortened_metrics = false +# +# ## Specify the list of groups of CPU core(s) to be provided as pqos input. +# ## Mandatory if processes aren't set and forbidden if processes are specified. +# ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] +# # cores = ["0-3"] +# +# ## Specify the list of processes for which Metrics will be collected. +# ## Mandatory if cores aren't set and forbidden if cores are specified. +# ## e.g. ["qemu", "pmd"] +# # processes = ["process"] +# +# ## Specify if the pqos process should be called with sudo. +# ## Mandatory if the telegraf process does not run as root. +# # use_sudo = false -# # Read JTI OpenConfig Telemetry from listed sensors +# # Subscribe and receive OpenConfig Telemetry data using JTI # [[inputs.jti_openconfig_telemetry]] # ## List of device addresses to collect telemetry from # servers = ["localhost:1883"] @@ -6318,7 +8487,6 @@ # # version = "" # # ## Optional TLS Config -# # enable_tls = true # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" @@ -6326,16 +8494,44 @@ # # insecure_skip_verify = false # # ## SASL authentication credentials. These settings should typically be used -# ## with TLS encryption enabled using the "enable_tls" option. +# ## with TLS encryption enabled # # sasl_username = "kafka" # # sasl_password = "secret" # +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI (experimental) +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## used if sasl_mechanism is OAUTHBEARER (experimental) +# # sasl_access_token = "" +# # ## SASL protocol version. When connecting to Azure EventHub set to 0. # # sasl_version = 1 # +# # Disable Kafka metadata full fetch +# # metadata_full = false +# # ## Name of the consumer group. # # consumer_group = "telegraf_metrics_consumers" # +# ## Compression codec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD +# # compression_codec = 0 # ## Initial offset position; one of "oldest" or "newest". # # offset = "oldest" # @@ -6356,6 +8552,15 @@ # ## waiting until the next flush_interval. # # max_undelivered_messages = 1000 # +# ## Maximum amount of time the consumer should take to process messages. If +# ## the debug log prints messages from sarama about 'abandoning subscription +# ## to [topic] because consuming was taking too long', increase this value to +# ## longer than the time taken by the output plugin(s). +# ## +# ## Note that the effective timeout could be between 'max_processing_time' and +# ## '2 * max_processing_time'. +# # max_processing_time = "100ms" +# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -6363,6 +8568,7 @@ # data_format = "influx" +# ## DEPRECATED: The 'kafka_consumer_legacy' plugin is deprecated in version 1.4.0, use 'inputs.kafka_consumer' instead, NOTE: 'kafka_consumer' only supports Kafka v0.8+. # # Read metrics from Kafka topic(s) # [[inputs.kafka_consumer_legacy]] # ## topic(s) to consume @@ -6398,16 +8604,19 @@ # # ## Amazon Credentials # ## Credentials are loaded in the following order -# ## 1) Assumed credentials via STS if role_arn is specified -# ## 2) explicit credentials from 'access_key' and 'secret_key' -# ## 3) shared profile from 'profile' -# ## 4) environment variables -# ## 5) shared credentials file -# ## 6) EC2 Instance Profile +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile # # access_key = "" # # secret_key = "" # # token = "" # # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" # # profile = "" # # shared_credential_file = "" # @@ -6439,23 +8648,58 @@ # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" # +# ## +# ## The content encoding of the data from kinesis +# ## If you are processing a cloudwatch logs kinesis stream then set this to "gzip" +# ## as AWS compresses cloudwatch log data before it is sent to kinesis (aws +# ## also base64 encodes the zip byte data before pushing to the stream. The base64 decoding +# ## is done automatically by the golang sdk, as data is read from kinesis) +# ## +# # content_encoding = "identity" +# # ## Optional # ## Configuration for a dynamodb checkpoint # [inputs.kinesis_consumer.checkpoint_dynamodb] -# ## unique name for this consumer -# app_name = "default" -# table_name = "default" +# ## unique name for this consumer +# app_name = "default" +# table_name = "default" + + +# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +# [[inputs.knx_listener]] +# ## Type of KNX-IP interface. +# ## Can be either "tunnel" or "router". +# # service_type = "tunnel" +# +# ## Address of the KNX-IP interface. +# service_address = "localhost:3671" +# +# ## Measurement definition(s) +# # [[inputs.knx_listener.measurement]] +# # ## Name of the measurement +# # name = "temperature" +# # ## Datapoint-Type (DPT) of the KNX messages +# # dpt = "9.001" +# # ## List of Group-Addresses (GAs) assigned to the measurement +# # addresses = ["5/5/1"] +# +# # [[inputs.knx_listener.measurement]] +# # name = "illumination" +# # dpt = "9.004" +# # addresses = ["5/5/3"] # # Read metrics off Arista LANZ, via socket # [[inputs.lanz]] # ## URL to Arista LANZ endpoint # servers = [ -# "tcp://127.0.0.1:50001" +# "tcp://switch1.int.example.com:50001", +# "tcp://switch2.int.example.com:50001", # ] -# # Stream and parse log file(s). +# ## DEPRECATED: The 'logparser' plugin is deprecated in version 1.15.0, use 'inputs.tail' with 'grok' data format instead. +# # Read metrics off Arista LANZ, via socket # [[inputs.logparser]] # ## Log files to parse. # ## These accept standard unix glob matching rules, but with the addition of @@ -6504,15 +8748,15 @@ # ## 3. UTC -- or blank/unspecified, will return timestamp in UTC # # timezone = "Canada/Eastern" # -# ## When set to "disable", timestamp will not incremented if there is a -# ## duplicate. +# ## When set to "disable", timestamp will not incremented if there is a +# ## duplicate. # # unique_timestamp = "auto" # # Read metrics from MQTT topic(s) # [[inputs.mqtt_consumer]] # ## Broker URLs for the MQTT server or cluster. To connect to multiple -# ## clusters or standalone servers, use a seperate plugin instance. +# ## clusters or standalone servers, use a separate plugin instance. # ## example: servers = ["tcp://localhost:1883"] # ## servers = ["ssl://localhost:1883"] # ## servers = ["ws://localhost:1883"] @@ -6577,6 +8821,17 @@ # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" +# +# ## Enable extracting tag values from MQTT topics +# ## _ denotes an ignored entry in the topic path +# # [[inputs.mqtt_consumer.topic_parsing]] +# # topic = "" +# # measurement = "" +# # tags = "" +# # fields = "" +# ## Value supported is int, float, unit +# # [[inputs.mqtt_consumer.topic.types]] +# # key = type # # Read metrics from NATS subject(s) @@ -6629,7 +8884,7 @@ # data_format = "influx" -# # Read NSQ topic for metrics. +# # Read metrics from NSQD topic(s) # [[inputs.nsq_consumer]] # ## Server option still works but is deprecated, we just prepend it to the nsqd array. # # server = "localhost:4150" @@ -6660,13 +8915,39 @@ # data_format = "influx" +# # Receive OpenTelemetry traces, metrics, and logs over gRPC +# [[inputs.opentelemetry]] +# ## Override the default (0.0.0.0:4317) destination OpenTelemetry gRPC service +# ## address:port +# # service_address = "0.0.0.0:4317" +# +# ## Override the default (5s) new connection timeout +# # timeout = "5s" +# +# ## Override the default (prometheus-v1) metrics schema. +# ## Supports: "prometheus-v1", "prometheus-v2" +# ## For more information about the alternatives, read the Prometheus input +# ## plugin notes. +# # metrics_schema = "prometheus-v1" +# +# ## Optional TLS Config. +# ## For advanced options: https://github.com/influxdata/telegraf/blob/v1.18.3/docs/TLS.md +# ## +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Add service certificate and key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + # # Read metrics from one or many pgbouncer servers # [[inputs.pgbouncer]] # ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## postgres://[pqgotest[:password]]@host:port[/dbname]\ # ## ?sslmode=[disable|verify-ca|verify-full] # ## or a simple string: -# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# ## host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production # ## # ## All connection parameters are optional. # ## @@ -6676,8 +8957,7 @@ # # Read metrics from one or many postgresql servers # [[inputs.postgresql]] # ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ -# ## ?sslmode=[disable|verify-ca|verify-full] +# ## postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] # ## or a simple string: # ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production # ## @@ -6697,7 +8977,7 @@ # ## connection configuration. # ## maxlifetime - specify the maximum lifetime of a connection. # ## default is forever (0s) -# max_lifetime = "0s" +# # max_lifetime = "0s" # # ## A list of databases to explicitly ignore. If not specified, metrics for all # ## databases are gathered. Do NOT use with the 'databases' option. @@ -6706,73 +8986,67 @@ # ## A list of databases to pull metrics about. If not specified, metrics for all # ## databases are gathered. Do NOT use with the 'ignored_databases' option. # # databases = ["app_production", "testing"] +# +# ## Whether to use prepared statements when connecting to the database. +# ## This should be set to false when connecting through a PgBouncer instance +# ## with pool_mode set to transaction. +# prepared_statements = true # # Read metrics from one or many postgresql servers # [[inputs.postgresql_extensible]] -# ## specify address via a url matching: -# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ -# ## ?sslmode=[disable|verify-ca|verify-full] -# ## or a simple string: -# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# # specify address via a url matching: +# # postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=... +# # or a simple string: +# # host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production # # -# ## All connection parameters are optional. # -# ## Without the dbname parameter, the driver will default to a database -# ## with the same name as the user. This dbname is just for instantiating a -# ## connection with the server and doesn't restrict the databases we are trying -# ## to grab metrics for. +# # All connection parameters are optional. +# # Without the dbname parameter, the driver will default to a database +# # with the same name as the user. This dbname is just for instantiating a +# # connection with the server and doesn't restrict the databases we are trying +# # to grab metrics for. # # # address = "host=localhost user=postgres sslmode=disable" # -# ## connection configuration. -# ## maxlifetime - specify the maximum lifetime of a connection. -# ## default is forever (0s) -# max_lifetime = "0s" +# ## A list of databases to pull metrics about. +# ## deprecated in 1.22.3; use the sqlquery option to specify database to use +# # databases = ["app_production", "testing"] # -# ## A list of databases to pull metrics about. If not specified, metrics for all -# ## databases are gathered. -# ## databases = ["app_production", "testing"] +# ## Whether to use prepared statements when connecting to the database. +# ## This should be set to false when connecting through a PgBouncer instance +# ## with pool_mode set to transaction. +# prepared_statements = true +# +# # Define the toml config where the sql queries are stored +# # The script option can be used to specify the .sql file path. +# # If script and sqlquery options specified at same time, sqlquery will be used # # -# ## A custom name for the database that will be used as the "server" tag in the -# ## measurement output. If not specified, a default one generated from -# ## the connection address is used. -# # outputaddress = "db01" +# # the tagvalue field is used to define custom tags (separated by comas). +# # the query is expected to return columns which match the names of the +# # defined tags. The values in these columns must be of a string-type, +# # a number-type or a blob-type. +# # +# # The timestamp field is used to override the data points timestamp value. By +# # default, all rows inserted with current time. By setting a timestamp column, +# # the row will be inserted with that column's value. # # -# ## Define the toml config where the sql queries are stored -# ## New queries can be added, if the withdbname is set to true and there is no -# ## databases defined in the 'databases field', the sql query is ended by a -# ## 'is not null' in order to make the query succeed. -# ## Example : -# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become -# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" -# ## because the databases variable was set to ['postgres', 'pgbench' ] and the -# ## withdbname was true. Be careful that if the withdbname is set to false you -# ## don't have to define the where clause (aka with the dbname) the tagvalue -# ## field is used to define custom tags (separated by commas) -# ## The optional "measurement" value can be used to override the default -# ## output measurement name ("postgresql"). -# ## -# ## The script option can be used to specify the .sql file path. -# ## If script and sqlquery options specified at same time, sqlquery will be used -# ## -# ## Structure : -# ## [[inputs.postgresql_extensible.query]] -# ## sqlquery string -# ## version string -# ## withdbname boolean -# ## tagvalue string (comma separated) -# ## measurement string +# # Structure : +# # [[inputs.postgresql_extensible.query]] +# # sqlquery string +# # version string +# # withdbname boolean +# # tagvalue string (coma separated) +# # timestamp string # [[inputs.postgresql_extensible.query]] -# sqlquery="SELECT * FROM pg_stat_database" +# sqlquery="SELECT * FROM pg_stat_database where datname" # version=901 # withdbname=false # tagvalue="" -# measurement="" # [[inputs.postgresql_extensible.query]] -# sqlquery="SELECT * FROM pg_stat_bgwriter" +# script="your_sql-filepath.sql" # version=901 # withdbname=false -# tagvalue="postgresql.stats" +# tagvalue="" # # Read metrics from one or many prometheus clients @@ -6780,17 +9054,17 @@ # ## An array of urls to scrape metrics from. # urls = ["http://localhost:9100/metrics"] # -# ## Metric version controls the mapping from Prometheus metrics into -# ## Telegraf metrics. When using the prometheus_client output, use the same -# ## value in both plugins to ensure metrics are round-tripped without -# ## modification. -# ## -# ## example: metric_version = 1; deprecated in 1.13 -# ## metric_version = 2; recommended version +# ## Metric version controls the mapping from Prometheus metrics into Telegraf metrics. +# ## See "Metric Format Configuration" in plugins/inputs/prometheus/README.md for details. +# ## Valid options: 1, 2 # # metric_version = 1 # # ## Url tag name (tag containing scrapped url. optional, default is "url") -# # url_tag = "scrapeUrl" +# # url_tag = "url" +# +# ## Whether the timestamp of the scraped metrics will be ignored. +# ## If set to true, the gather time will be used. +# # ignore_timestamp = false # # ## An array of Kubernetes services to scrape metrics from. # # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] @@ -6805,6 +9079,20 @@ # ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. # ## - prometheus.io/port: If port is not 9102 use this annotation # # monitor_kubernetes_pods = true +# +# ## Get the list of pods to scrape with either the scope of +# ## - cluster: the kubernetes watch api (default, no need to specify) +# ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. +# # pod_scrape_scope = "cluster" +# +# ## Only for node scrape scope: node IP of the node that telegraf is running on. +# ## Either this config or the environment variable NODE_IP must be set. +# # node_ip = "10.180.1.1" +# +# ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. +# ## Default is 60 seconds. +# # pod_scrape_interval = 60 +# # ## Restricts Kubernetes monitoring to a single namespace # ## ex: monitor_kubernetes_pods_namespace = "default" # # monitor_kubernetes_pods_namespace = "" @@ -6814,6 +9102,23 @@ # # eg. To scrape pods on a specific node # # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" # +# # cache refresh interval to set the interval for re-sync of pods list. +# # Default is 60 minutes. +# # cache_refresh_interval = 60 +# +# ## Scrape Services available in Consul Catalog +# # [inputs.prometheus.consul] +# # enabled = true +# # agent = "http://localhost:8500" +# # query_interval = "5m" +# +# # [[inputs.prometheus.consul.query]] +# # name = "a service name" +# # tag = "a service tag" +# # url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}' +# # [inputs.prometheus.consul.query.tags] +# # host = "{{.Node}}" +# # ## Use bearer token for authorization. ('bearer_token' takes priority) # # bearer_token = "/path/to/bearer/token" # ## OR @@ -6831,10 +9136,84 @@ # # tls_ca = /path/to/cafile # # tls_cert = /path/to/certfile # # tls_key = /path/to/keyfile +# # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false +# # RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required). +# [[inputs.ras]] +# ## Optional path to RASDaemon sqlite3 database. +# ## Default: /var/lib/rasdaemon/ras-mc_event.db +# # db_path = "" + + +# # Read metrics from one or many redis servers +# [[inputs.redis]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## tcp://localhost:6379 +# ## tcp://:password@192.168.99.100 +# ## unix:///var/run/redis.sock +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 6379 is used +# servers = ["tcp://localhost:6379"] +# +# ## Optional. Specify redis commands to retrieve values +# # [[inputs.redis.commands]] +# # # The command to run where each argument is a separate element +# # command = ["get", "sample-key"] +# # # The field to store the result in +# # field = "sample-key-value" +# # # The type of the result +# # # Can be "string", "integer", or "float" +# # type = "string" +# +# ## specify server password +# # password = "s#cr@t%" +# +# ## specify username for ACL auth (Redis 6.0+) +# # username = "default" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Riemann protobuff listener +# [[inputs.riemann_listener]] +# ## URL to listen on +# ## Default is "tcp://:5555" +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# +# ## Maximum number of concurrent connections. +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# ## Read timeout. +# ## 0 (default) is unlimited. +# # read_timeout = "30s" +# ## Optional TLS configuration. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Maximum socket buffer size (in bytes when no unit specified). +# # read_buffer_size = "64KiB" +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" + + # # SFlow V5 Protocol Listener # [[inputs.sflow]] # ## Address to listen for sFlow packets. @@ -6858,9 +9237,16 @@ # ## 1024. See README.md for details # ## # # service_address = "udp://:162" +# ## +# ## Path to mib files +# ## Used by the gosmi translator. +# ## To add paths when translating with netsnmp, use the MIBDIRS environment variable +# # path = ["/usr/share/snmp/mibs"] +# ## +# ## Deprecated in 1.20.0; no longer running snmptranslate # ## Timeout running snmptranslate command # # timeout = "5s" -# ## Snmp version, defaults to 2c +# ## Snmp version # # version = "2c" # ## SNMPv3 authentication and encryption options. # ## @@ -6939,9 +9325,229 @@ # # content_encoding = "identity" -# # Statsd UDP/TCP Server +# # Read metrics from SQL queries +# [[inputs.sql]] +# ## Database Driver +# ## See https://github.com/influxdata/telegraf/blob/master/docs/SQL_DRIVERS_INPUT.md for +# ## a list of supported drivers. +# driver = "mysql" +# +# ## Data source name for connecting +# ## The syntax and supported options depends on selected driver. +# dsn = "username:password@mysqlserver:3307/dbname?param=value" +# +# ## Timeout for any operation +# ## Note that the timeout for queries is per query not per gather. +# # timeout = "5s" +# +# ## Connection time limits +# ## By default the maximum idle time and maximum lifetime of a connection is unlimited, i.e. the connections +# ## will not be closed automatically. If you specify a positive time, the connections will be closed after +# ## idleing or existing for at least that amount of time, respectively. +# # connection_max_idle_time = "0s" +# # connection_max_life_time = "0s" +# +# ## Connection count limits +# ## By default the number of open connections is not limited and the number of maximum idle connections +# ## will be inferred from the number of queries specified. If you specify a positive number for any of the +# ## two options, connections will be closed when reaching the specified limit. The number of idle connections +# ## will be clipped to the maximum number of connections limit if any. +# # connection_max_open = 0 +# # connection_max_idle = auto +# +# [[inputs.sql.query]] +# ## Query to perform on the server +# query="SELECT user,state,latency,score FROM Scoreboard WHERE application > 0" +# ## Alternatively to specifying the query directly you can select a file here containing the SQL query. +# ## Only one of 'query' and 'query_script' can be specified! +# # query_script = "/path/to/sql/script.sql" +# +# ## Name of the measurement +# ## In case both measurement and 'measurement_col' are given, the latter takes precedence. +# # measurement = "sql" +# +# ## Column name containing the name of the measurement +# ## If given, this will take precedence over the 'measurement' setting. In case a query result +# ## does not contain the specified column, we fall-back to the 'measurement' setting. +# # measurement_column = "" +# +# ## Column name containing the time of the measurement +# ## If ommited, the time of the query will be used. +# # time_column = "" +# +# ## Format of the time contained in 'time_col' +# ## The time must be 'unix', 'unix_ms', 'unix_us', 'unix_ns', or a golang time format. +# ## See https://golang.org/pkg/time/#Time.Format for details. +# # time_format = "unix" +# +# ## Column names containing tags +# ## An empty include list will reject all columns and an empty exclude list will not exclude any column. +# ## I.e. by default no columns will be returned as tag and the tags are empty. +# # tag_columns_include = [] +# # tag_columns_exclude = [] +# +# ## Column names containing fields (explicit types) +# ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over +# ## the automatic (driver-based) conversion below. +# ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. +# # field_columns_float = [] +# # field_columns_int = [] +# # field_columns_uint = [] +# # field_columns_bool = [] +# # field_columns_string = [] +# +# ## Column names containing fields (automatic types) +# ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty +# ## exclude list will not exclude any column. I.e. by default all columns will be returned as fields. +# ## NOTE: We rely on the database driver to perform automatic datatype conversion. +# # field_columns_include = [] +# # field_columns_exclude = [] + + +# # Read metrics from Microsoft SQL Server +# [[inputs.sqlserver]] +# ## Specify instances to monitor with a list of connection strings. +# ## All connection parameters are optional. +# ## By default, the host is localhost, listening on default port, TCP 1433. +# ## for Windows, the user is the currently running AD user (SSO). +# ## See https://github.com/denisenkom/go-mssqldb for detailed connection +# ## parameters, in particular, tls connections can be created like so: +# ## "encrypt=true;certificate=;hostNameInCertificate=" +# servers = [ +# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# ] +# +# ## Authentication method +# ## valid methods: "connection_string", "AAD" +# # auth_method = "connection_string" +# +# ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 +# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. +# ## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool" +# +# database_type = "SQLServer" +# +# ## A list of queries to include. If not specified, all the below listed queries are used. +# include_query = [] +# +# ## A list of queries to explicitly ignore. +# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] +# +# ## Queries enabled by default for database_type = "SQLServer" are - +# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, +# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu, SQLServerAvailabilityReplicaStates, SQLServerDatabaseReplicaStates, +# ## SQLServerRecentBackups +# +# ## Queries enabled by default for database_type = "AzureSQLDB" are - +# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, +# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers +# +# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - +# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers +# +# ## Queries enabled by default for database_type = "AzureSQLPool" are - +# ## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats, +# ## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers +# +# ## Following are old config settings +# ## You may use them only if you are using the earlier flavor of queries, however it is recommended to use +# ## the new mechanism of identifying the database_type there by use it's corresponding queries +# +# ## Optional parameter, setting this to 2 will use a new version +# ## of the collection queries that break compatibility with the original +# ## dashboards. +# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB +# # query_version = 2 +# +# ## If you are using AzureDB, setting this to true will gather resource utilization metrics +# # azuredb = false +# +# ## Toggling this to true will emit an additional metric called "sqlserver_telegraf_health". +# ## This metric tracks the count of attempted queries and successful queries for each SQL instance specified in "servers". +# ## The purpose of this metric is to assist with identifying and diagnosing any connectivity or query issues. +# ## This setting/metric is optional and is disabled by default. +# # health_metric = false +# +# ## Possible queries accross different versions of the collectors +# ## Queries enabled by default for specific Database Type +# +# ## database_type = AzureSQLDB by default collects the following queries +# ## - AzureSQLDBWaitStats +# ## - AzureSQLDBResourceStats +# ## - AzureSQLDBResourceGovernance +# ## - AzureSQLDBDatabaseIO +# ## - AzureSQLDBServerProperties +# ## - AzureSQLDBOsWaitstats +# ## - AzureSQLDBMemoryClerks +# ## - AzureSQLDBPerformanceCounters +# ## - AzureSQLDBRequests +# ## - AzureSQLDBSchedulers +# +# ## database_type = AzureSQLManagedInstance by default collects the following queries +# ## - AzureSQLMIResourceStats +# ## - AzureSQLMIResourceGovernance +# ## - AzureSQLMIDatabaseIO +# ## - AzureSQLMIServerProperties +# ## - AzureSQLMIOsWaitstats +# ## - AzureSQLMIMemoryClerks +# ## - AzureSQLMIPerformanceCounters +# ## - AzureSQLMIRequests +# ## - AzureSQLMISchedulers +# +# ## database_type = AzureSQLPool by default collects the following queries +# ## - AzureSQLPoolResourceStats +# ## - AzureSQLPoolResourceGovernance +# ## - AzureSQLPoolDatabaseIO +# ## - AzureSQLPoolOsWaitStats, +# ## - AzureSQLPoolMemoryClerks +# ## - AzureSQLPoolPerformanceCounters +# ## - AzureSQLPoolSchedulers +# +# ## database_type = SQLServer by default collects the following queries +# ## - SQLServerPerformanceCounters +# ## - SQLServerWaitStatsCategorized +# ## - SQLServerDatabaseIO +# ## - SQLServerProperties +# ## - SQLServerMemoryClerks +# ## - SQLServerSchedulers +# ## - SQLServerRequests +# ## - SQLServerVolumeSpace +# ## - SQLServerCpu +# ## - SQLServerRecentBackups +# ## and following as optional (if mentioned in the include_query list) +# ## - SQLServerAvailabilityReplicaStates +# ## - SQLServerDatabaseReplicaStates +# +# ## Version 2 by default collects the following queries +# ## Version 2 is being deprecated, please consider using database_type. +# ## - PerformanceCounters +# ## - WaitStatsCategorized +# ## - DatabaseIO +# ## - ServerProperties +# ## - MemoryClerk +# ## - Schedulers +# ## - SqlRequests +# ## - VolumeSpace +# ## - Cpu +# +# ## Version 1 by default collects the following queries +# ## Version 1 is deprecated, please consider using database_type. +# ## - PerformanceCounters +# ## - WaitStatsCategorized +# ## - CPUHistory +# ## - DatabaseIO +# ## - DatabaseSize +# ## - DatabaseStats +# ## - DatabaseProperties +# ## - MemoryClerk +# ## - VolumeSpace +# ## - PerformanceMetrics + + +# # Statsd Server # [[inputs.statsd]] -# ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp) +# ## Protocol, must be "tcp", "udp4", "udp6" or "udp" (default=udp) # protocol = "udp" # # ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) @@ -6970,7 +9576,7 @@ # ## Reset timings & histograms every interval (default=true) # delete_timings = true # -# ## Percentiles to calculate for timing & histogram stats +# ## Percentiles to calculate for timing & histogram stats. # percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] # # ## separator to use between elements of a statsd metric @@ -6978,11 +9584,18 @@ # # ## Parses tags in the datadog statsd format # ## http://docs.datadoghq.com/guides/dogstatsd/ +# ## deprecated in 1.10; use datadog_extensions option instead # parse_data_dog_tags = false # -# ## Parses datadog extensions to the statsd format +# ## Parses extensions to statsd in the datadog statsd format +# ## currently supports metrics and datadog tags. +# ## http://docs.datadoghq.com/guides/dogstatsd/ # datadog_extensions = false # +# ## Parses distributions metric as specified in the datadog statsd format +# ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition +# datadog_distributions = false +# # ## Statsd data translation templates, more info can be read here: # ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md # # templates = [ @@ -6997,11 +9610,26 @@ # ## calculation of percentiles. Raising this limit increases the accuracy # ## of percentiles but also increases the memory usage and cpu time. # percentile_limit = 1000 +# +# ## Maximum socket buffer size in bytes, once the buffer fills up, metrics +# ## will start dropping. Defaults to the OS default. +# # read_buffer_size = 65535 +# +# ## Max duration (TTL) for each metric to stay cached/reported without being updated. +# # max_ttl = "10h" +# +# ## Sanitize name method +# ## By default, telegraf will pass names directly as they are received. +# ## However, upstream statsd now does sanitization of names which can be +# ## enabled by using the "upstream" method option. This option will a) replace +# ## white space with '_', replace '/' with '-', and remove charachters not +# ## matching 'a-zA-Z_\-0-9\.;='. +# #sanitize_name_method = "" -# # Suricata stats plugin +# # Suricata stats and alerts plugin # [[inputs.suricata]] -# ## Data sink for Suricata stats log +# ## Data sink for Suricata stats log. # # This is expected to be a filename of a # # unix socket to be created for listening. # source = "/var/run/suricata-stats.sock" @@ -7009,14 +9637,18 @@ # # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" # # becomes "detect_alert" when delimiter is "_". # delimiter = "_" +# +# # Detect alert logs +# alerts = false -# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587 # [[inputs.syslog]] -# ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 # ## Protocol, address and port to host the syslog receiver. # ## If no host is specified, then localhost is used. # ## If no port is specified, 6514 is used (RFC5425#section-4.1). +# ## ex: server = "tcp://localhost:6514" +# ## server = "udp://:6514" +# ## server = "unix:///var/run/telegraf-syslog.sock" # server = "tcp://:6514" # # ## TLS Config @@ -7042,7 +9674,7 @@ # ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). # ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), # ## or the non-transparent framing technique (RFC6587#section-3.4.2). -# ## Must be one of "octet-counting", "non-transparent". +# ## Must be one of "octect-counting", "non-transparent". # # framing = "octet-counting" # # ## The trailer to be expected in case of non-transparent framing (default = "LF"). @@ -7053,6 +9685,11 @@ # ## By default best effort parsing is off. # # best_effort = false # +# ## The RFC standard to use for message parsing +# ## By default RFC5424 is used. RFC3164 only supports UDP transport (no streaming support) +# ## Must be one of "RFC5424", or "RFC3164". +# # syslog_standard = "RFC5424" +# # ## Character to prepend to SD-PARAMs (default = "_"). # ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. # ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] @@ -7069,7 +9706,8 @@ # ## "/var/log/**.log" -> recursively find all .log files in /var/log # ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log # ## "/var/log/apache.log" -> just tail the apache log file -# ## +# ## "/var/log/log[!1-2]* -> tail files without 1-2 +# ## "/var/log/log[^1-2]* -> identical behavior as above # ## See https://github.com/gobwas/glob for more examples # ## # files = ["/var/mymetrics.out"] @@ -7103,43 +9741,45 @@ # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" # +# ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. +# # path_tag = "path" +# +# ## Filters to apply to files before generating metrics +# ## "ansi_color" removes ANSI colors +# # filters = [] +# # ## multiline parser/codec # ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html # #[inputs.tail.multiline] -# ## The pattern should be a regexp which matches what you believe to be an -# ## indicator that the field is part of an event consisting of multiple lines of log data. +# ## The pattern should be a regexp which matches what you believe to be an indicator that the field is part of an event consisting of multiple lines of log data. # #pattern = "^\s" # -# ## This field must be either "previous" or "next". -# ## If a line matches the pattern, "previous" indicates that it belongs to the previous line, -# ## whereas "next" indicates that the line belongs to the next one. +# ## The field's value must be previous or next and indicates the relation to the +# ## multi-line event. # #match_which_line = "previous" # -# ## The invert_match field can be true or false (defaults to false). -# ## If true, a message not matching the pattern will constitute a match of the multiline -# ## filter and the what will be applied. (vice-versa is also true) +# ## The invert_match can be true or false (defaults to false). +# ## If true, a message not matching the pattern will constitute a match of the multiline filter and the what will be applied. (vice-versa is also true) # #invert_match = false # -# ## After the specified timeout, this plugin sends a multiline event even if no new pattern -# ## is found to start a new event. The default timeout is 5s. +# #After the specified timeout, this plugin sends the multiline event even if no new pattern is found to start a new event. The default is 5s. # #timeout = 5s +# ## DEPRECATED: The 'tcp_listener' plugin is deprecated in version 1.3.0, use 'inputs.socket_listener' instead. # # Generic TCP listener # [[inputs.tcp_listener]] -# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the # # socket_listener plugin # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener +# ## DEPRECATED: The 'udp_listener' plugin is deprecated in version 1.3.0, use 'inputs.socket_listener' instead. # # Generic UDP listener # [[inputs.udp_listener]] -# # DEPRECATED: the TCP listener plugin has been deprecated in favor of the -# # socket_listener plugin # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener -# # Read metrics from VMware vCenter +# -# Read metrics from one or many vCenters # [[inputs.vsphere]] # ## List of vCenter URLs to be monitored. These three lines must be uncommented # ## and edited for the plugin to work. @@ -7256,6 +9896,13 @@ # # cluster_metric_exclude = [] ## Nothing excluded by default # # cluster_instances = false ## false by default # +# ## Resource Pools +# # datastore_include = [ "/*/host/**"] # Inventory path to datastores to collect (by default all are collected) +# # datastore_exclude = [] # Inventory paths to exclude +# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected +# # datastore_metric_exclude = [] ## Nothing excluded by default +# # datastore_instances = false ## false by default +# # ## Datastores # # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) # # datastore_exclude = [] # Inventory paths to exclude @@ -7311,12 +9958,22 @@ # # custom_attribute_include = [] # # custom_attribute_exclude = ["*"] # +# ## The number of vSphere 5 minute metric collection cycles to look back for non-realtime metrics. In +# ## some versions (6.7, 7.0 and possible more), certain metrics, such as cluster metrics, may be reported +# ## with a significant delay (>30min). If this happens, try increasing this number. Please note that increasing +# ## it too much may cause performance issues. +# # metric_lookback = 3 +# # ## Optional SSL Config # # ssl_ca = "/path/to/cafile" # # ssl_cert = "/path/to/certfile" # # ssl_key = "/path/to/keyfile" # ## Use SSL but skip chain & host verification # # insecure_skip_verify = false +# +# ## The Historical Interval value must match EXACTLY the interval in the daily +# # "Interval Duration" found on the VCenter server under Configure > General > Statistics > Statistic intervals +# # historical_interval = "5m" # # A Webhooks Event collector @@ -7327,25 +9984,52 @@ # [inputs.webhooks.filestack] # path = "/filestack" # +# ## HTTP basic auth +# #username = "" +# #password = "" +# # [inputs.webhooks.github] # path = "/github" # # secret = "" # +# ## HTTP basic auth +# #username = "" +# #password = "" +# # [inputs.webhooks.mandrill] # path = "/mandrill" # +# ## HTTP basic auth +# #username = "" +# #password = "" +# # [inputs.webhooks.rollbar] # path = "/rollbar" # +# ## HTTP basic auth +# #username = "" +# #password = "" +# # [inputs.webhooks.papertrail] # path = "/papertrail" # +# ## HTTP basic auth +# #username = "" +# #password = "" +# # [inputs.webhooks.particle] # path = "/particle" +# +# ## HTTP basic auth +# #username = "" +# #password = "" +# +# [inputs.webhooks.artifactory] +# path = "/artifactory" # # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures. # [[inputs.zipkin]] # # path = "/api/v1/spans" # URL path for span data -# # port = 9411 # Port on which Telegraf listens +# # port = 9411 # Port on which Telegraf listens diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index 5b70928994158..fa0ef2ae214c6 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -21,80 +21,96 @@ ## Environment variables can be used as tags, and throughout the config file # user = "$USER" - -# Configuration for telegraf agent -[agent] - ## Default data collection interval for all inputs - interval = "10s" - ## Rounds collection interval to 'interval' - ## ie, if interval="10s" then always collect on :00, :10, :20, etc. - round_interval = true - - ## Telegraf will send metrics to outputs in batches of at most - ## metric_batch_size metrics. - ## This controls the size of writes that Telegraf sends to output plugins. - metric_batch_size = 1000 - - ## Maximum number of unwritten metrics per output. Increasing this value - ## allows for longer periods of output downtime without dropping metrics at the - ## cost of higher maximum memory usage. - metric_buffer_limit = 10000 - - ## Collection jitter is used to jitter the collection by a random amount. - ## Each plugin will sleep for a random time within jitter before collecting. - ## This can be used to avoid many plugins querying things like sysfs at the - ## same time, which can have a measurable effect on the system. - collection_jitter = "0s" - - ## Default flushing interval for all outputs. Maximum flush_interval will be - ## flush_interval + flush_jitter - flush_interval = "10s" - ## Jitter the flush interval by a random amount. This is primarily to avoid - ## large write spikes for users running a large number of telegraf instances. - ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s - flush_jitter = "0s" - - ## By default or when set to "0s", precision will be set to the same - ## timestamp order as the collection interval, with the maximum being 1s. - ## ie, when interval = "10s", precision will be "1s" - ## when interval = "250ms", precision will be "1ms" - ## Precision will NOT be used for service inputs. It is up to each individual - ## service input to set the timestamp at the appropriate precision. - ## Valid time units are "ns", "us" (or "µs"), "ms", "s". - precision = "" - - ## Log at debug level. - # debug = false - ## Log only error level messages. - # quiet = false - - ## Log target controls the destination for logs and can be one of "file", - ## "stderr" or, on Windows, "eventlog". When set to "file", the output file - ## is determined by the "logfile" setting. - # logtarget = "file" - - ## Name of the file to be logged to when using the "file" logtarget. If set to - ## the empty string then logs are written to stderr. - # logfile = "" - - ## The logfile will be rotated after the time interval specified. When set - ## to 0 no time based rotation is performed. Logs are rotated only when - ## written to, if there is no log activity rotation may be delayed. - # logfile_rotation_interval = "0d" - - ## The logfile will be rotated when it becomes larger than the specified - ## size. When set to 0 no size based rotation is performed. - # logfile_rotation_max_size = "0MB" - - ## Maximum number of rotated archives to keep, any older logs are deleted. - ## If set to -1, no archives are removed. - # logfile_rotation_max_archives = 5 - - ## Override default hostname, if empty use os.Hostname() - hostname = "" - ## If set to true, do no set the "host" tag in the telegraf agent. - omit_hostname = false - +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "10s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 1000 + + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. + metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Collection offset is used to shift the collection by the given amount. + ## This can be be used to avoid many plugins querying constraint devices + ## at the same time by manually scheduling them in time. + # collection_offset = "0s" + + ## Default flushing interval for all outputs. Maximum flush_interval will be + ## flush_interval + flush_jitter + flush_interval = "10s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## Collected metrics are rounded to the precision specified. Precision is + ## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s). + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + ## + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s: + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + precision = "0s" + + ## Log at debug level. + # debug = false + ## Log only error level messages. + # quiet = false + + ## Log target controls the destination for logs and can be one of "file", + ## "stderr" or, on Windows, "eventlog". When set to "file", the output file + ## is determined by the "logfile" setting. + # logtarget = "file" + + ## Name of the file to be logged to when using the "file" logtarget. If set to + ## the empty string then logs are written to stderr. + # logfile = "" + + ## The logfile will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. Logs are rotated only when + ## written to, if there is no log activity rotation may be delayed. + # logfile_rotation_interval = "0h" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # logfile_rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # logfile_rotation_max_archives = 5 + + ## Pick a timezone to use when logging or type 'local' for local time. + ## Example: America/Chicago + # log_with_timezone = "" + + ## Override default hostname, if empty use os.Hostname() + hostname = "" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = false + + ## Method of translating SNMP objects. Can be "netsnmp" which + ## translates by calling external programs snmptranslate and snmptable, + ## or "gosmi" which translates using the built-in gosmi library. + # snmp_translator = "netsnmp" ############################################################################### # OUTPUT PLUGINS # @@ -171,7 +187,7 @@ ## HTTP Content-Encoding for write request body, can be set to "gzip" to ## compress body or "identity" to apply no encoding. - # content_encoding = "identity" + # content_encoding = "gzip" ## When true, Telegraf will output unsigned integers as unsigned values, ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned @@ -179,19 +195,874 @@ ## existing data has been written. # influx_uint_support = false -# # Configuration for sending metrics to InfluxDB + +# # Configuration for Amon Server to send metrics to. +# [[outputs.amon]] +# ## Amon Server Key +# server_key = "my-server-key" # required. +# +# ## Amon Instance URL +# amon_instance = "https://youramoninstance" # required +# +# ## Connection timeout. +# # timeout = "5s" + + +# # Publishes metrics to an AMQP broker +# [[outputs.amqp]] +# ## Broker to publish to. +# ## deprecated in 1.7; use the brokers option +# # url = "amqp://localhost:5672/influxdb" +# +# ## Brokers to publish to. If multiple brokers are specified a random broker +# ## will be selected anytime a connection is established. This can be +# ## helpful for load balancing when not using a dedicated load balancer. +# brokers = ["amqp://localhost:5672/influxdb"] +# +# ## Maximum messages to send over a connection. Once this is reached, the +# ## connection is closed and a new connection is made. This can be helpful for +# ## load balancing when not using a dedicated load balancer. +# # max_messages = 0 +# +# ## Exchange to declare and publish to. +# exchange = "telegraf" +# +# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". +# # exchange_type = "topic" +# +# ## If true, exchange will be passively declared. +# # exchange_passive = false +# +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" +# +# ## Additional exchange arguments. +# # exchange_arguments = { } +# # exchange_arguments = {"hash_property" = "timestamp"} +# +# ## Authentication credentials for the PLAIN auth_method. +# # username = "" +# # password = "" +# +# ## Auth method. PLAIN and EXTERNAL are supported +# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as +# ## described here: https://www.rabbitmq.com/plugins.html +# # auth_method = "PLAIN" +# +# ## Metric tag to use as a routing key. +# ## ie, if this tag exists, its value will be used as the routing key +# # routing_tag = "host" +# +# ## Static routing key. Used when no routing_tag is set or as a fallback +# ## when the tag specified in routing tag is not found. +# # routing_key = "" +# # routing_key = "telegraf" +# +# ## Delivery Mode controls if a published message is persistent. +# ## One of "transient" or "persistent". +# # delivery_mode = "transient" +# +# ## InfluxDB database added as a message header. +# ## deprecated in 1.7; use the headers option +# # database = "telegraf" +# +# ## InfluxDB retention policy added as a message header +# ## deprecated in 1.7; use the headers option +# # retention_policy = "default" +# +# ## Static headers added to each published message. +# # headers = { } +# # headers = {"database" = "telegraf", "retention_policy" = "default"} +# +# ## Connection timeout. If not provided, will default to 5s. 0s means no +# ## timeout (not recommended). +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## If true use batch serialization format instead of line based delimiting. +# ## Only applies to data formats which are not line based such as JSON. +# ## Recommended to set to true. +# # use_batch_format = false +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# ## +# ## Please note that when use_batch_format = false each amqp message contains only +# ## a single metric, it is recommended to use compression with batch format +# ## for best results. +# # content_encoding = "identity" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Send metrics to Azure Application Insights +# [[outputs.application_insights]] +# ## Instrumentation key of the Application Insights resource. +# instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx" +# +# ## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints +# # endpoint_url = "https://dc.services.visualstudio.com/v2/track" +# +# ## Timeout for closing (default: 5s). +# # timeout = "5s" +# +# ## Enable additional diagnostic logging. +# # enable_diagnostic_logging = false +# +# ## Context Tag Sources add Application Insights context tags to a tag value. +# ## +# ## For list of allowed context tag keys see: +# ## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go +# # [outputs.application_insights.context_tag_sources] +# # "ai.cloud.role" = "kubernetes_container_name" +# # "ai.cloud.roleInstance" = "kubernetes_pod_name" + + +# # Sends metrics to Azure Data Explorer +# [[outputs.azure_data_explorer]] +# ## The URI property of the Azure Data Explorer resource on Azure +# ## ex: endpoint_url = https://myadxresource.australiasoutheast.kusto.windows.net +# endpoint_url = "" +# +# ## The Azure Data Explorer database that the metrics will be ingested into. +# ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion. +# ## ex: "exampledatabase" +# database = "" +# +# ## Timeout for Azure Data Explorer operations +# # timeout = "20s" +# +# ## Type of metrics grouping used when pushing to Azure Data Explorer. +# ## Default is "TablePerMetric" for one table per different metric. +# ## For more information, please check the plugin README. +# # metrics_grouping_type = "TablePerMetric" +# +# ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable"). +# # table_name = "" +# +# ## Creates tables and relevant mapping if set to true(default). +# ## Skips table and mapping creation if set to false, this is useful for running Telegraf with the lowest possible permissions i.e. table ingestor role. +# # create_tables = true + + +# # Send aggregate metrics to Azure Monitor +# [[outputs.azure_monitor]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Set the namespace prefix, defaults to "Telegraf/". +# # namespace_prefix = "Telegraf/" +# +# ## Azure Monitor doesn't have a string value type, so convert string +# ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows +# ## a maximum of 10 dimensions so Telegraf will only send the first 10 +# ## alphanumeric dimensions. +# # strings_as_dimensions = false +# +# ## Both region and resource_id must be set or be available via the +# ## Instance Metadata service on Azure Virtual Machines. +# # +# ## Azure Region to publish metrics against. +# ## ex: region = "southcentralus" +# # region = "" +# # +# ## The Azure Resource ID against which metric will be logged, e.g. +# ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" +# # resource_id = "" +# +# ## Optionally, if in Azure US Government, China, or other sovereign +# ## cloud environment, set the appropriate REST endpoint for receiving +# ## metrics. (Note: region may be unused in this context) +# # endpoint_url = "https://monitoring.core.usgovcloudapi.net" + + +# # Configuration for Google Cloud BigQuery to send entries +# [[outputs.bigquery]] +# ## Credentials File +# credentials_file = "/path/to/service/account/key.json" +# +# ## Google Cloud Platform Project +# project = "my-gcp-project" +# +# ## The namespace for the metric descriptor +# dataset = "telegraf" +# +# ## Timeout for BigQuery operations. +# # timeout = "5s" +# +# ## Character to replace hyphens on Metric name +# # replace_hyphen_to = "_" + + +# # Publish Telegraf metrics to a Google Cloud PubSub topic +# [[outputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub topic. +# project = "my-project" +# +# ## Required. Name of PubSub topic to publish metrics to. +# topic = "my-topic" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. If true, will send all metrics per write in one PubSub message. +# # send_batched = true +# +# ## The following publish_* parameters specifically configures batching +# ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read +# ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1. +# # publish_count_threshold = 1000 +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1 +# # publish_byte_threshold = 1000000 +# +# ## Optional. Specifically configures requests made to the PubSub API. +# # publish_num_go_routines = 2 +# +# ## Optional. Specifies a timeout for requests to the PubSub API. +# # publish_timeout = "30s" +# +# ## Optional. If true, published PubSub message data will be base64-encoded. +# # base64_data = false +# +# ## Optional. PubSub attributes to add to metrics. +# # [outputs.cloud_pubsub.attributes] +# # my_attr = "tag_value" + + +# # Configuration for AWS CloudWatch output. +# [[outputs.cloudwatch]] +# ## Amazon REGION +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Namespace for the CloudWatch MetricDatums +# namespace = "InfluxData/Telegraf" +# +# ## If you have a large amount of metrics, you should consider to send statistic +# ## values instead of raw metrics which could not only improve performance but +# ## also save AWS API cost. If enable this flag, this plugin would parse the required +# ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. +# ## You could use basicstats aggregator to calculate those fields. If not all statistic +# ## fields are available, all fields would still be sent as raw metrics. +# # write_statistics = false +# +# ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision) +# # high_resolution_metrics = false + + +# # Configuration for AWS CloudWatchLogs output. +# [[outputs.cloudwatch_logs]] +# ## The region is the Amazon region that you wish to connect to. +# ## Examples include but are not limited to: +# ## - us-west-1 +# ## - us-west-2 +# ## - us-east-1 +# ## - ap-southeast-1 +# ## - ap-southeast-2 +# ## ... +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront! +# ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place +# log_group = "my-group-name" +# +# ## Log stream in log group +# ## Either log group name or reference to metric attribute, from which it can be parsed: +# ## tag: or field:. If log stream is not exist, it will be created. +# ## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream) +# ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855) +# log_stream = "tag:location" +# +# ## Source of log data - metric name +# ## specify the name of the metric, from which the log data should be retrieved. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_metric_name = "docker_log" +# log_data_metric_name = "docker_log" +# +# ## Specify from which metric attribute the log data should be retrieved: +# ## tag: or field:. +# ## I.e., if you are using docker_log plugin to stream logs from container, then +# ## specify log_data_source = "field:message" +# log_data_source = "field:message" + + +# # Configuration for CrateDB to send metrics to. +# [[outputs.cratedb]] +# # A github.com/jackc/pgx/v4 connection string. +# # See https://pkg.go.dev/github.com/jackc/pgx/v4#ParseConfig +# url = "postgres://user:password@localhost/schema?sslmode=disable" +# # Timeout for all CrateDB queries. +# timeout = "5s" +# # Name of the table to store metrics in. +# table = "metrics" +# # If true, and the metrics table does not exist, create it automatically. +# table_create = true +# # The character(s) to replace any '.' in an object key with +# key_separator = "_" + + +# # Configuration for DataDog API to send metrics to. +# [[outputs.datadog]] +# ## Datadog API key +# apikey = "my-secret-key" +# +# ## Connection timeout. +# # timeout = "5s" +# +# ## Write URL override; useful for debugging. +# # url = "https://app.datadoghq.com/api/v1/series" +# +# ## Set http_proxy +# # use_system_proxy = false +# # http_proxy_url = "http://localhost:8888" +# +# ## Override the default (none) compression used to send data. +# ## Supports: "zlib", "none" +# # compression = "none" + + +# # Send metrics to nowhere at all +# [[outputs.discard]] +# # no configuration + + +# # Send telegraf metrics to a Dynatrace environment +# [[outputs.dynatrace]] +# ## For usage with the Dynatrace OneAgent you can omit any configuration, +# ## the only requirement is that the OneAgent is running on the same host. +# ## Only setup environment url and token if you want to monitor a Host without the OneAgent present. +# ## +# ## Your Dynatrace environment URL. +# ## For Dynatrace OneAgent you can leave this empty or set it to "http://127.0.0.1:14499/metrics/ingest" (default) +# ## For Dynatrace SaaS environments the URL scheme is "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest" +# ## For Dynatrace Managed environments the URL scheme is "https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest" +# url = "" +# +# ## Your Dynatrace API token. +# ## Create an API token within your Dynatrace environment, by navigating to Settings > Integration > Dynatrace API +# ## The API token needs data ingest scope permission. When using OneAgent, no API token is required. +# api_token = "" +# +# ## Optional prefix for metric names (e.g.: "telegraf") +# prefix = "telegraf" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Optional flag for ignoring tls certificate check +# # insecure_skip_verify = false +# +# ## Connection timeout, defaults to "5s" if not set. +# timeout = "5s" +# +# ## If you want metrics to be treated and reported as delta counters, add the metric names here +# additional_counters = [ ] +# +# ## Optional dimensions to be added to every metric +# # [outputs.dynatrace.default_dimensions] +# # default_key = "default value" + + +# # Configuration for Elasticsearch to send metrics to. +# [[outputs.elasticsearch]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval +# urls = [ "http://node1.es.example.com:9200" ] # required. +# ## Elasticsearch client timeout, defaults to "5s" if not set. +# timeout = "5s" +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option +# enable_sniffer = false +# ## Set to true to enable gzip compression +# enable_gzip = false +# ## Set the interval to check if the Elasticsearch nodes are available +# ## Setting to "0s" will disable the health check (not recommended in production) +# health_check_interval = "10s" +# ## Set the timeout for periodic health checks. +# # health_check_timeout = "1s" +# ## HTTP basic authentication details. +# ## HTTP basic authentication details +# # username = "telegraf" +# # password = "mypassword" +# ## HTTP bearer token authentication details +# # auth_bearer_token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9" +# +# ## Index Config +# ## The target index for metrics (Elasticsearch will create if it not exists). +# ## You can use the date specifiers below to create indexes per time frame. +# ## The metric timestamp will be used to decide the destination index name +# # %Y - year (2016) +# # %y - last two digits of year (00..99) +# # %m - month (01..12) +# # %d - day of month (e.g., 01) +# # %H - hour (00..23) +# # %V - week of the year (ISO week) (01..53) +# ## Additionally, you can specify a tag name using the notation {{tag_name}} +# ## which will be used as part of the index name. If the tag does not exist, +# ## the default tag value will be used. +# # index_name = "telegraf-{{host}}-%Y.%m.%d" +# # default_tag_value = "none" +# index_name = "telegraf-%Y.%m.%d" # required. +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Template Config +# ## Set to true if you want telegraf to manage its index template. +# ## If enabled it will create a recommended index template for telegraf indexes +# manage_template = true +# ## The template name used for telegraf indexes +# template_name = "telegraf" +# ## Set to true if you want telegraf to overwrite an existing template +# overwrite_template = false +# ## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string +# ## it will enable data resend and update metric points avoiding duplicated metrics with diferent id's +# force_document_id = false +# +# ## Specifies the handling of NaN and Inf values. +# ## This option can have the following values: +# ## none -- do not modify field-values (default); will produce an error if NaNs or infs are encountered +# ## drop -- drop fields containing NaNs or infs +# ## replace -- replace with the value in "float_replacement_value" (default: 0.0) +# ## NaNs and inf will be replaced with the given number, -inf with the negative of that number +# # float_handling = "none" +# # float_replacement_value = 0.0 +# +# ## Pipeline Config +# ## To use a ingest pipeline, set this to the name of the pipeline you want to use. +# # use_pipeline = "my_pipeline" +# ## Additionally, you can specify a tag name using the notation {{tag_name}} +# ## which will be used as part of the pipeline name. If the tag does not exist, +# ## the default pipeline will be used as the pipeline. If no default pipeline is set, +# ## no pipeline is used for the metric. +# # use_pipeline = "{{es_pipeline}}" +# # default_pipeline = "my_pipeline" + + +# # Configuration for Event Hubs output plugin +# [[outputs.event_hubs]] +# ## The full connection string to the Event Hub (required) +# ## The shared access key must have "Send" permissions on the target Event Hub. +# connection_string = "Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName" +# +# ## Client timeout (defaults to 30s) +# # timeout = "30s" +# +# ## Partition key +# ## Metric tag or field name to use for the event partition key. The value of +# ## this tag or field is set as the key for events if it exists. If both, tag +# ## and field, exist the tag is preferred. +# # partition_key = "" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "json" + + +# # Send metrics to command as input over stdin +# [[outputs.exec]] +# ## Command to ingest metrics via stdin. +# command = ["tee", "-a", "/dev/null"] +# +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# +# ## Timeout for command to complete. +# # timeout = "5s" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Run executable as long-running output plugin +# [[outputs.execd]] +# ## One program to run as daemon. +# ## NOTE: process and each argument should each be their own string +# command = ["my-telegraf-output", "--some-flag", "value"] +# +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" +# +# ## Data format to export. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf metrics to file(s) +# [[outputs.file]] +# ## Files to write to, "stdout" is a specially handled file. +# files = ["stdout", "/tmp/metrics.out"] +# +# ## Use batch serialization format instead of line based delimiting. The +# ## batch format allows for the production of non line based output formats and +# ## may more efficiently encode and write metrics. +# # use_batch_format = false +# +# ## The file will be rotated after the time interval specified. When set +# ## to 0 no time based rotation is performed. +# # rotation_interval = "0h" +# +# ## The logfile will be rotated when it becomes larger than the specified +# ## size. When set to 0 no size based rotation is performed. +# # rotation_max_size = "0MB" +# +# ## Maximum number of rotated archives to keep, any older logs are deleted. +# ## If set to -1, no archives are removed. +# # rotation_max_archives = 5 +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for Graphite server to send metrics to +# [[outputs.graphite]] +# ## TCP endpoint for your graphite instance. +# ## If multiple endpoints are configured, the output will be load balanced. +# ## Only one of the endpoints will be written to with each iteration. +# servers = ["localhost:2003"] +# ## Prefix metrics name +# prefix = "" +# ## Graphite output template +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# template = "host.tags.measurement.field" +# +# ## Enable Graphite tags support +# # graphite_tag_support = false +# +# ## Define how metric names and tags are sanitized; options are "strict", or "compatible" +# ## strict - Default method, and backwards compatible with previous versionf of Telegraf +# ## compatible - More relaxed sanitizing when using tags, and compatible with the graphite spec +# # graphite_tag_sanitize_mode = "strict" +# +# ## Character for separating metric name and field for Graphite tags +# # graphite_separator = "." +# +# ## Graphite templates patterns +# ## 1. Template for cpu +# ## 2. Template for disk* +# ## 3. Default template +# # templates = [ +# # "cpu tags.measurement.host.field", +# # "disk* measurement.field", +# # "host.measurement.tags.field" +# #] +# +# ## timeout in seconds for the write connection to graphite +# timeout = 2 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Send telegraf metrics to graylog +# [[outputs.graylog]] +# ## Endpoints for your graylog instances. +# servers = ["udp://127.0.0.1:12201"] +# +# ## Connection timeout. +# # timeout = "5s" +# +# ## The field to use as the GELF short_message, if unset the static string +# ## "telegraf" will be used. +# ## example: short_message_field = "message" +# # short_message_field = "" +# +# ## According to GELF payload specification, additional fields names must be prefixed +# ## with an underscore. Previous versions did not prefix custom field 'name' with underscore. +# ## Set to true for backward compatibility. +# # name_field_no_prefix = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Send telegraf metrics to GroundWork Monitor +# [[outputs.groundwork]] +# ## URL of your groundwork instance. +# url = "https://groundwork.example.com" +# +# ## Agent uuid for GroundWork API Server. +# agent_id = "" +# +# ## Username and password to access GroundWork API. +# username = "" +# password = "" +# +# ## Default application type to use in GroundWork client +# # default_app_type = "TELEGRAF" +# +# ## Default display name for the host with services(metrics). +# # default_host = "telegraf" +# +# ## Default service state. +# # default_service_state = "SERVICE_OK" +# +# ## The name of the tag that contains the hostname. +# # resource_tag = "host" +# +# ## The name of the tag that contains the host group name. +# # group_tag = "group" + + +# # Configurable HTTP health check resource based on metrics +# [[outputs.health]] +# ## Address and port to listen on. +# ## ex: service_address = "http://localhost:8080" +# ## service_address = "unix:///var/run/telegraf-health.sock" +# # service_address = "http://:8080" +# +# ## The maximum duration for reading the entire request. +# # read_timeout = "5s" +# ## The maximum duration for writing the entire response. +# # write_timeout = "5s" +# +# ## Username and password to accept for HTTP basic authentication. +# # basic_username = "user1" +# # basic_password = "secret" +# +# ## Allowed CA certificates for client certificates. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## TLS server certificate and private key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## One or more check sub-tables should be defined, it is also recommended to +# ## use metric filtering to limit the metrics that flow into this output. +# ## +# ## When using the default buffer sizes, this example will fail when the +# ## metric buffer is half full. +# ## +# ## namepass = ["internal_write"] +# ## tagpass = { output = ["influxdb"] } +# ## +# ## [[outputs.health.compares]] +# ## field = "buffer_size" +# ## lt = 5000.0 +# ## +# ## [[outputs.health.contains]] +# ## field = "buffer_size" + + +# # A plugin that can transmit metrics over HTTP +# [[outputs.http]] +# ## URL is the address to send metrics to +# url = "http://127.0.0.1:8080/telegraf" +# +# ## Timeout for HTTP message +# # timeout = "5s" +# +# ## HTTP method, one of: "POST" or "PUT" +# # method = "POST" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## OAuth2 Client Credentials Grant +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# +# ## Goole API Auth +# # google_application_credentials = "/etc/telegraf/example_secret.json" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_headers = '{"Content-Type": "application/json", "X-MY-HEADER":"hello"}' +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## Use batch serialization format (default) instead of line based format. +# ## Batch format is more efficient and should be used unless line based +# ## format is really needed. +# # use_batch_format = true +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Additional HTTP headers +# # [outputs.http.headers] +# # # Should be set manually to "application/json" for json data_format +# # Content-Type = "text/plain; charset=utf-8" +# +# ## MaxIdleConns controls the maximum number of idle (keep-alive) +# ## connections across all hosts. Zero means no limit. +# # max_idle_conn = 0 +# +# ## MaxIdleConnsPerHost, if non-zero, controls the maximum idle +# ## (keep-alive) connections to keep per-host. If zero, +# ## DefaultMaxIdleConnsPerHost is used(2). +# # max_idle_conn_per_host = 2 +# +# ## Idle (keep-alive) connection timeout. +# ## Maximum amount of time before idle connection is closed. +# ## Zero means no limit. +# # idle_conn_timeout = 0 +# +# ## Amazon Region +# #region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Optional list of statuscodes (<200 or >300) upon which requests should not be retried +# # non_retryable_statuscodes = [409, 413] + + +# # Configuration for sending metrics to InfluxDB 2.0 # [[outputs.influxdb_v2]] # ## The URLs of the InfluxDB cluster nodes. # ## # ## Multiple URLs can be specified for a single cluster, only ONE of the # ## urls will be written to each interval. # ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] -# urls = ["http://127.0.0.1:9999"] +# urls = ["http://127.0.0.1:8086"] # # ## Token for authentication. # token = "" # -# ## Organization is the name of the organization you wish to write to; must exist. +# ## Organization is the name of the organization you wish to write to. # organization = "" # # ## Destination bucket to write into. @@ -232,188 +1103,8804 @@ # # insecure_skip_verify = false -############################################################################### -# INPUT PLUGINS # -############################################################################### - - -# Windows Performance Counters plugin. -# These are the recommended method of monitoring system metrics on windows, -# as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI, -# which utilize more system resources. -# -# See more configuration examples at: -# https://github.com/influxdata/telegraf/tree/master/plugins/inputs/win_perf_counters - -[[inputs.win_perf_counters]] - [[inputs.win_perf_counters.object]] - # Processor usage, alternative to native, reports on a per core. - ObjectName = "Processor" - Instances = ["*"] - Counters = [ - "% Idle Time", - "% Interrupt Time", - "% Privileged Time", - "% User Time", - "% Processor Time", - "% DPC Time", - ] - Measurement = "win_cpu" - # Set to true to include _Total instance when querying for all (*). - IncludeTotal=true - - [[inputs.win_perf_counters.object]] - # Disk times and queues - ObjectName = "LogicalDisk" - Instances = ["*"] - Counters = [ - "% Idle Time", - "% Disk Time", - "% Disk Read Time", - "% Disk Write Time", - "% Free Space", - "Current Disk Queue Length", - "Free Megabytes", - ] - Measurement = "win_disk" - # Set to true to include _Total instance when querying for all (*). - #IncludeTotal=false - - [[inputs.win_perf_counters.object]] - ObjectName = "PhysicalDisk" - Instances = ["*"] - Counters = [ - "Disk Read Bytes/sec", - "Disk Write Bytes/sec", - "Current Disk Queue Length", - "Disk Reads/sec", - "Disk Writes/sec", - "% Disk Time", - "% Disk Read Time", - "% Disk Write Time", - ] - Measurement = "win_diskio" - - [[inputs.win_perf_counters.object]] - ObjectName = "Network Interface" - Instances = ["*"] - Counters = [ - "Bytes Received/sec", - "Bytes Sent/sec", - "Packets Received/sec", - "Packets Sent/sec", - "Packets Received Discarded", - "Packets Outbound Discarded", - "Packets Received Errors", - "Packets Outbound Errors", - ] - Measurement = "win_net" - - [[inputs.win_perf_counters.object]] - ObjectName = "System" - Counters = [ - "Context Switches/sec", - "System Calls/sec", - "Processor Queue Length", - "System Up Time", - ] - Instances = ["------"] - Measurement = "win_system" - # Set to true to include _Total instance when querying for all (*). - #IncludeTotal=false - - [[inputs.win_perf_counters.object]] - # Example query where the Instance portion must be removed to get data back, - # such as from the Memory object. - ObjectName = "Memory" - Counters = [ - "Available Bytes", - "Cache Faults/sec", - "Demand Zero Faults/sec", - "Page Faults/sec", - "Pages/sec", - "Transition Faults/sec", - "Pool Nonpaged Bytes", - "Pool Paged Bytes", - "Standby Cache Reserve Bytes", - "Standby Cache Normal Priority Bytes", - "Standby Cache Core Bytes", - ] - # Use 6 x - to remove the Instance bit from the query. - Instances = ["------"] - Measurement = "win_mem" - # Set to true to include _Total instance when querying for all (*). - #IncludeTotal=false - - [[inputs.win_perf_counters.object]] - # Example query where the Instance portion must be removed to get data back, - # such as from the Paging File object. - ObjectName = "Paging File" - Counters = [ - "% Usage", - ] - Instances = ["_Total"] - Measurement = "win_swap" - - -# Windows system plugins using WMI (disabled by default, using -# win_perf_counters over WMI is recommended) - - -# # Read metrics about cpu usage -# [[inputs.cpu]] -# ## Whether to report per-cpu stats or not -# percpu = true -# ## Whether to report total system cpu stats or not -# totalcpu = true -# ## If true, collect raw CPU time metrics. -# collect_cpu_time = false -# ## If true, compute and report the sum of all non-idle CPU states. -# report_active = false - - -# # Read metrics about disk usage by mount point -# [[inputs.disk]] -# ## By default stats will be gathered for all mount points. -# ## Set mount_points will restrict the stats to only the specified mount points. -# # mount_points = ["/"] -# -# ## Ignore mount points by filesystem type. -# ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] - - -# # Read metrics about disk IO by device -# [[inputs.diskio]] -# ## By default, telegraf will gather stats for all devices including -# ## disk partitions. -# ## Setting devices will restrict the stats to the specified devices. -# # devices = ["sda", "sdb", "vd*"] -# ## Uncomment the following line if you need disk serial numbers. -# # skip_serial_number = false -# # -# ## On systems which support it, device metadata can be added in the form of -# ## tags. -# ## Currently only Linux is supported via udev properties. You can view -# ## available properties for a device by running: -# ## 'udevadm info -q property -n /dev/sda' -# # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] -# # -# ## Using the same metadata source as device_tags, you can also customize the -# ## name of the device via templates. -# ## The 'name_templates' parameter is a list of templates to try and apply to -# ## the device. The template may contain variables in the form of '$PROPERTY' or -# ## '${PROPERTY}'. The first template which does not contain any variables not -# ## present for the device is used as the device name tag. -# ## The typical use case is for LVM volumes, to get the VG/LV name instead of -# ## the near-meaningless DM-0 name. -# # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] - - -# # Read metrics about memory usage -# [[inputs.mem]] -# # no configuration +# # Configuration for sending metrics to an Instrumental project +# [[outputs.instrumental]] +# ## Project API Token (required) +# api_token = "API Token" # required +# ## Prefix the metrics with a given name +# prefix = "" +# ## Stats output template (Graphite formatting) +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +# template = "host.tags.measurement.field" +# ## Timeout in seconds to connect +# timeout = "2s" +# ## Debug true - Print communication to Instrumental +# debug = false -# # Read metrics about swap memory usage -# [[inputs.swap]] -# # no configuration +# # Configuration for the Kafka server to send metrics to +# [[outputs.kafka]] +# ## URLs of kafka brokers +# brokers = ["localhost:9092"] +# ## Kafka topic for producer messages +# topic = "telegraf" +# +# ## The value of this tag will be used as the topic. If not set the 'topic' +# ## option is used. +# # topic_tag = "" +# +# ## If true, the 'topic_tag' will be removed from to the metric. +# # exclude_topic_tag = false +# +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Of particular interested, lz4 compression +# ## requires at least version 0.10.0.0. +# ## ex: version = "1.1.0" +# # version = "" +# +# ## Optional topic suffix configuration. +# ## If the section is omitted, no suffix is used. +# ## Following topic suffix methods are supported: +# ## measurement - suffix equals to separator + measurement's name +# ## tags - suffix equals to separator + specified tags' values +# ## interleaved with separator +# +# ## Suffix equals to "_" + measurement name +# # [outputs.kafka.topic_suffix] +# # method = "measurement" +# # separator = "_" +# +# ## Suffix equals to "__" + measurement's "foo" tag value. +# ## If there's no such a tag, suffix equals to an empty string +# # [outputs.kafka.topic_suffix] +# # method = "tags" +# # keys = ["foo"] +# # separator = "__" +# +# ## Suffix equals to "_" + measurement's "foo" and "bar" +# ## tag values, separated by "_". If there is no such tags, +# ## their values treated as empty strings. +# # [outputs.kafka.topic_suffix] +# # method = "tags" +# # keys = ["foo", "bar"] +# # separator = "_" +# +# ## The routing tag specifies a tagkey on the metric whose value is used as +# ## the message key. The message key is used to determine which partition to +# ## send the message to. This tag is prefered over the routing_key option. +# routing_tag = "host" +# +# ## The routing key is set as the message key and used to determine which +# ## partition to send the message to. This value is only used when no +# ## routing_tag is set or as a fallback when the tag specified in routing tag +# ## is not found. +# ## +# ## If set to "random", a random value will be generated for each message. +# ## +# ## When unset, no message key is added and each message is routed to a random +# ## partition. +# ## +# ## ex: routing_key = "random" +# ## routing_key = "telegraf" +# # routing_key = "" +# +# ## Compression codec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD +# # compression_codec = 0 +# +# ## Idempotent Writes +# ## If enabled, exactly one copy of each message is written. +# # idempotent_writes = false +# +# ## RequiredAcks is used in Produce Requests to tell the broker how many +# ## replica acknowledgements it must see before responding +# ## 0 : the producer never waits for an acknowledgement from the broker. +# ## This option provides the lowest latency but the weakest durability +# ## guarantees (some data will be lost when a server fails). +# ## 1 : the producer gets an acknowledgement after the leader replica has +# ## received the data. This option provides better durability as the +# ## client waits until the server acknowledges the request as successful +# ## (only messages that were written to the now-dead leader but not yet +# ## replicated will be lost). +# ## -1: the producer gets an acknowledgement after all in-sync replicas have +# ## received the data. This option provides the best durability, we +# ## guarantee that no messages will be lost as long as at least one in +# ## sync replica remains. +# # required_acks = -1 +# +# ## The maximum number of times to retry sending a metric before failing +# ## until the next flush. +# # max_retry = 3 +# +# ## The maximum permitted size of a message. Should be set equal to or +# ## smaller than the broker's 'message.max.bytes'. +# # max_message_bytes = 1000000 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional SOCKS5 proxy to use when connecting to brokers +# # socks5_enabled = true +# # socks5_address = "127.0.0.1:1080" +# # socks5_username = "alice" +# # socks5_password = "pass123" +# +# ## Optional SASL Config +# # sasl_username = "kafka" +# # sasl_password = "secret" +# +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI (experimental) +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## used if sasl_mechanism is OAUTHBEARER (experimental) +# # sasl_access_token = "" +# +# ## SASL protocol version. When connecting to Azure EventHub set to 0. +# # sasl_version = 1 +# +# # Disable Kafka metadata full fetch +# # metadata_full = false +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Configuration for the AWS Kinesis output. +# [[outputs.kinesis]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# +# ## The partition key can be calculated using one of several methods: +# ## +# ## Use a static value for all writes: +# # [outputs.kinesis.partition] +# # method = "static" +# # key = "howdy" +# # +# ## Use a random partition key on each write: +# # [outputs.kinesis.partition] +# # method = "random" +# # +# ## Use the measurement name as the partition key: +# # [outputs.kinesis.partition] +# # method = "measurement" +# # +# ## Use the value of a tag for all writes, if the tag is not set the empty +# ## default option will be used. When no default, defaults to "telegraf" +# # [outputs.kinesis.partition] +# # method = "tag" +# # key = "host" +# # default = "mykey" +# +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" +# +# ## debug will show upstream aws messages. +# debug = false + + +# # Configuration for Librato API to send metrics to. +# [[outputs.librato]] +# ## Librato API Docs +# ## http://dev.librato.com/v1/metrics-authentication +# ## Librato API user +# api_user = "telegraf@influxdb.com" # required. +# ## Librato API token +# api_token = "my-secret-token" # required. +# ## Debug +# # debug = false +# ## Connection timeout. +# # timeout = "5s" +# ## Output source Template (same as graphite buckets) +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +# ## This template is used in librato's source (not metric's name) +# template = "host" + + +# # A plugin that can send metrics over HTTPs to Logz.io +# [[outputs.logzio]] +# ## Connection timeout, defaults to "5s" if not set. +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Logz.io account token +# token = "your logz.io token" # required +# +# ## Use your listener URL for your Logz.io account region. +# # url = "https://listener.logz.io:8071" + + +# # A plugin that can transmit logs to Loki +# [[outputs.loki]] +# ## The domain of Loki +# domain = "https://loki.domain.tld" +# +# ## Endpoint to write api +# # endpoint = "/loki/api/v1/push" +# +# ## Connection timeout, defaults to "5s" if not set. +# # timeout = "5s" +# +# ## Basic auth credential +# # username = "loki" +# # password = "pass" +# +# ## Additional HTTP headers +# # http_headers = {"X-Scope-OrgID" = "1"} +# +# ## If the request must be gzip encoded +# # gzip_request = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # A plugin that can transmit logs to mongodb +# [[outputs.mongodb]] +# # connection string examples for mongodb +# dsn = "mongodb://localhost:27017" +# # dsn = "mongodb://mongod1:27017,mongod2:27017,mongod3:27017/admin&replicaSet=myReplSet&w=1" +# +# # overrides serverSelectionTimeoutMS in dsn if set +# # timeout = "30s" +# +# # default authentication, optional +# # authentication = "NONE" +# +# # for SCRAM-SHA-256 authentication +# # authentication = "SCRAM" +# # username = "root" +# # password = "***" +# +# # for x509 certificate authentication +# # authentication = "X509" +# # tls_ca = "ca.pem" +# # tls_key = "client.pem" +# # # tls_key_pwd = "changeme" # required for encrypted tls_key +# # insecure_skip_verify = false +# +# # database to store measurements and time series collections +# # database = "telegraf" +# +# # granularity can be seconds, minutes, or hours. +# # configuring this value will be based on your input collection frequency. +# # see https://docs.mongodb.com/manual/core/timeseries-collections/#create-a-time-series-collection +# # granularity = "seconds" +# +# # optionally set a TTL to automatically expire documents from the measurement collections. +# # ttl = "360h" + + +# # Configuration for MQTT server to send metrics to +# [[outputs.mqtt]] +# ## MQTT Brokers +# ## The list of brokers should only include the hostname or IP address and the +# ## port to the broker. This should follow the format '{host}:{port}'. For +# ## example, "localhost:1883" or "127.0.0.1:8883". +# servers = ["localhost:1883"] +# +# ## MQTT Topic for Producer Messages +# ## MQTT outputs send metrics to this topic format: +# ## /// (e.g. prefix/web01.example.com/mem) +# topic_prefix = "telegraf" +# +# ## QoS policy for messages +# ## The mqtt QoS policy for sending messages. +# ## See https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_9.0.0/com.ibm.mq.dev.doc/q029090_.htm +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# # qos = 2 +# +# ## Keep Alive +# ## Defines the maximum length of time that the broker and client may not +# ## communicate. Defaults to 0 which turns the feature off. +# ## +# ## For version v2.0.12 and later mosquitto there is a bug +# ## (see https://github.com/eclipse/mosquitto/issues/2117), which requires +# ## this to be non-zero. As a reference eclipse/paho.mqtt.golang defaults to 30. +# # keep_alive = 0 +# +# ## username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## client ID +# ## The unique client id to connect MQTT server. If this parameter is not set +# ## then a random ID is generated. +# # client_id = "" +# +# ## Timeout for write operations. default: 5s +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## When true, metrics will be sent in one MQTT message per flush. Otherwise, +# ## metrics are written one metric per MQTT message. +# # batch = false +# +# ## When true, metric will have RETAIN flag set, making broker cache entries until someone +# ## actually reads it +# # retain = false +# +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf measurements to NATS +# [[outputs.nats]] +# ## URLs of NATS servers +# servers = ["nats://localhost:4222"] +# +# ## Optional client name +# # name = "" +# +# ## Optional credentials +# # username = "" +# # password = "" +# +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# +# ## NATS subject for producer messages +# subject = "telegraf" +# +# ## Use Transport Layer Security +# # secure = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send metrics to New Relic metrics endpoint +# [[outputs.newrelic]] +# ## The 'insights_key' parameter requires a NR license key. +# ## New Relic recommends you create one +# ## with a convenient name such as TELEGRAF_INSERT_KEY. +# ## reference: https://docs.newrelic.com/docs/apis/intro-apis/new-relic-api-keys/#ingest-license-key +# # insights_key = "New Relic License Key Here" +# +# ## Prefix to add to add to metric name for easy identification. +# ## This is very useful if your metric names are ambiguous. +# # metric_prefix = "" +# +# ## Timeout for writes to the New Relic API. +# # timeout = "15s" +# +# ## HTTP Proxy override. If unset use values from the standard +# ## proxy environment variables to determine proxy, if any. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## Metric URL override to enable geographic location endpoints. +# # If not set use values from the standard +# # metric_url = "https://metric-api.newrelic.com/metric/v1" + + +# # Send telegraf measurements to NSQD +# [[outputs.nsq]] +# ## Location of nsqd instance listening on TCP +# server = "localhost:4150" +# ## NSQ topic for producer messages +# topic = "telegraf" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send OpenTelemetry metrics over gRPC +# [[outputs.opentelemetry]] +# ## Override the default (localhost:4317) OpenTelemetry gRPC service +# ## address:port +# # service_address = "localhost:4317" +# +# ## Override the default (5s) request timeout +# # timeout = "5s" +# +# ## Optional TLS Config. +# ## +# ## Root certificates for verifying server certificates encoded in PEM format. +# # tls_ca = "/etc/telegraf/ca.pem" +# ## The public and private keypairs for the client encoded in PEM format. +# ## May contain intermediate certificates. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS, but skip TLS chain and host verification. +# # insecure_skip_verify = false +# ## Send the specified TLS server name via SNI. +# # tls_server_name = "foo.example.com" +# +# ## Override the default (gzip) compression used to send data. +# ## Supports: "gzip", "none" +# # compression = "gzip" +# +# ## Additional OpenTelemetry resource attributes +# # [outputs.opentelemetry.attributes] +# # "service.name" = "demo" +# +# ## Additional gRPC request metadata +# # [outputs.opentelemetry.headers] +# # key1 = "value1" + + +# # Configuration for OpenTSDB server to send metrics to +# [[outputs.opentsdb]] +# ## prefix for metrics keys +# prefix = "my.specific.prefix." +# +# ## DNS name of the OpenTSDB server +# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the +# ## telnet API. "http://opentsdb.example.com" will use the Http API. +# host = "opentsdb.example.com" +# +# ## Port of the OpenTSDB server +# port = 4242 +# +# ## Number of data points to send to OpenTSDB in Http requests. +# ## Not used with telnet API. +# http_batch_size = 50 +# +# ## URI Path for Http requests to OpenTSDB. +# ## Used in cases where OpenTSDB is located behind a reverse proxy. +# http_path = "/api/put" +# +# ## Debug true - Prints OpenTSDB communication +# debug = false +# +# ## Separator separates measurement name from field +# separator = "_" + + +# # Configuration for the Prometheus client to spawn +# [[outputs.prometheus_client]] +# ## Address to listen on. +# listen = ":9273" +# +# ## Metric version controls the mapping from Prometheus metrics into Telegraf metrics. +# ## See "Metric Format Configuration" in plugins/inputs/prometheus/README.md for details. +# ## Valid options: 1, 2 +# # metric_version = 1 +# +# ## Use HTTP Basic Authentication. +# # basic_username = "Foo" +# # basic_password = "Bar" +# +# ## If set, the IP Ranges which are allowed to access metrics. +# ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"] +# # ip_range = [] +# +# ## Path to publish the metrics on. +# # path = "/metrics" +# +# ## Expiration interval for each metric. 0 == no expiration +# # expiration_interval = "60s" +# +# ## Collectors to enable, valid entries are "gocollector" and "process". +# ## If unset, both are enabled. +# # collectors_exclude = ["gocollector", "process"] +# +# ## Send string metrics as Prometheus labels. +# ## Unless set to false all string metrics will be sent as labels. +# # string_as_label = true +# +# ## If set, enable TLS with the given certificate. +# # tls_cert = "/etc/ssl/telegraf.crt" +# # tls_key = "/etc/ssl/telegraf.key" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Export metric collection time. +# # export_timestamp = false + + +# # Configuration for Riemann to send metrics to +# [[outputs.riemann]] +# ## The full TCP or UDP URL of the Riemann server +# url = "tcp://localhost:5555" +# +# ## Riemann event TTL, floating-point time in seconds. +# ## Defines how long that an event is considered valid for in Riemann +# # ttl = 30.0 +# +# ## Separator to use between measurement and field name in Riemann service name +# ## This does not have any effect if 'measurement_as_attribute' is set to 'true' +# separator = "/" +# +# ## Set measurement name as Riemann attribute 'measurement', instead of prepending it to the Riemann service name +# # measurement_as_attribute = false +# +# ## Send string metrics as Riemann event states. +# ## Unless enabled all string metrics will be ignored +# # string_as_state = false +# +# ## A list of tag keys whose values get sent as Riemann tags. +# ## If empty, all Telegraf tag values will be sent as tags +# # tag_keys = ["telegraf","custom_tag"] +# +# ## Additional Riemann tags to send. +# # tags = ["telegraf-output"] +# +# ## Description for Riemann event +# # description_text = "metrics collected from telegraf" +# +# ## Riemann client write timeout, defaults to "5s" if not set. +# # timeout = "5s" + + +# ## DEPRECATED: The 'riemann_legacy' plugin is deprecated in version 1.3.0, use 'outputs.riemann' instead (see https://github.com/influxdata/telegraf/issues/1878). +# # Configuration for the Riemann server to send metrics to +# [[outputs.riemann_legacy]] +# ## URL of server +# url = "localhost:5555" +# ## transport protocol to use either tcp or udp +# transport = "tcp" +# ## separator to use between input name and field name in Riemann service name +# separator = " " + + +# # Send aggregate metrics to Sensu Monitor +# [[outputs.sensu]] +# ## BACKEND API URL is the Sensu Backend API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the corresponding backend API path +# ## /api/core/v2/namespaces/:entity_namespace/events/:entity_name/:check_name). +# ## +# ## Backend Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## AGENT API URL is the Sensu Agent API root URL to send metrics to +# ## (protocol, host, and port only). The output plugin will automatically +# ## append the correspeonding agent API path (/events). +# ## +# ## Agent API Events API reference: +# ## https://docs.sensu.io/sensu-go/latest/api/events/ +# ## +# ## NOTE: if backend_api_url and agent_api_url and api_key are set, the output +# ## plugin will use backend_api_url. If backend_api_url and agent_api_url are +# ## not provided, the output plugin will default to use an agent_api_url of +# ## http://127.0.0.1:3031 +# ## +# # backend_api_url = "http://127.0.0.1:8080" +# # agent_api_url = "http://127.0.0.1:3031" +# +# ## API KEY is the Sensu Backend API token +# ## Generate a new API token via: +# ## +# ## $ sensuctl cluster-role create telegraf --verb create --resource events,entities +# ## $ sensuctl cluster-role-binding create telegraf --cluster-role telegraf --group telegraf +# ## $ sensuctl user create telegraf --group telegraf --password REDACTED +# ## $ sensuctl api-key grant telegraf +# ## +# ## For more information on Sensu RBAC profiles & API tokens, please visit: +# ## - https://docs.sensu.io/sensu-go/latest/reference/rbac/ +# ## - https://docs.sensu.io/sensu-go/latest/reference/apikeys/ +# ## +# # api_key = "${SENSU_API_KEY}" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Timeout for HTTP message +# # timeout = "5s" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Sensu Event details +# ## +# ## Below are the event details to be sent to Sensu. The main portions of the +# ## event are the check, entity, and metrics specifications. For more information +# ## on Sensu events and its components, please visit: +# ## - Events - https://docs.sensu.io/sensu-go/latest/reference/events +# ## - Checks - https://docs.sensu.io/sensu-go/latest/reference/checks +# ## - Entities - https://docs.sensu.io/sensu-go/latest/reference/entities +# ## - Metrics - https://docs.sensu.io/sensu-go/latest/reference/events#metrics +# ## +# ## Check specification +# ## The check name is the name to give the Sensu check associated with the event +# ## created. This maps to check.metatadata.name in the event. +# [outputs.sensu.check] +# name = "telegraf" +# +# ## Entity specification +# ## Configure the entity name and namespace, if necessary. This will be part of +# ## the entity.metadata in the event. +# ## +# ## NOTE: if the output plugin is configured to send events to a +# ## backend_api_url and entity_name is not set, the value returned by +# ## os.Hostname() will be used; if the output plugin is configured to send +# ## events to an agent_api_url, entity_name and entity_namespace are not used. +# # [outputs.sensu.entity] +# # name = "server-01" +# # namespace = "default" +# +# ## Metrics specification +# ## Configure the tags for the metrics that are sent as part of the Sensu event +# # [outputs.sensu.tags] +# # source = "telegraf" +# +# ## Configure the handler(s) for processing the provided metrics +# # [outputs.sensu.metrics] +# # handlers = ["influxdb","elasticsearch"] + + +# # Send metrics and events to SignalFx +# [[outputs.signalfx]] +# ## SignalFx Org Access Token +# access_token = "my-secret-token" +# +# ## The SignalFx realm that your organization resides in +# signalfx_realm = "us9" # Required if ingest_url is not set +# +# ## You can optionally provide a custom ingest url instead of the +# ## signalfx_realm option above if you are using a gateway or proxy +# ## instance. This option takes precident over signalfx_realm. +# ingest_url = "https://my-custom-ingest/" +# +# ## Event typed metrics are omitted by default, +# ## If you require an event typed metric you must specify the +# ## metric name in the following list. +# included_event_names = ["plugin.metric_name"] + + +# # Generic socket writer capable of handling multiple socket types. +# [[outputs.socket_writer]] +# ## URL to connect to +# # address = "tcp://127.0.0.1:8094" +# # address = "tcp://example.com:http" +# # address = "tcp4://127.0.0.1:8094" +# # address = "tcp6://127.0.0.1:8094" +# # address = "tcp6://[2001:db8::1]:8094" +# # address = "udp://127.0.0.1:8094" +# # address = "udp4://127.0.0.1:8094" +# # address = "udp6://127.0.0.1:8094" +# # address = "unix:///tmp/telegraf.sock" +# # address = "unixgram:///tmp/telegraf.sock" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## Content encoding for message payloads, can be set to "gzip" or to +# ## "identity" to apply no encoding. +# ## +# # content_encoding = "identity" +# +# ## Data format to generate. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + +# # Save metrics to an SQL Database +# [[outputs.sql]] +# ## Database driver +# ## Valid options: mssql (Microsoft SQL Server), mysql (MySQL), pgx (Postgres), +# ## sqlite (SQLite3), snowflake (snowflake.com) clickhouse (ClickHouse) +# # driver = "" +# +# ## Data source name +# ## The format of the data source name is different for each database driver. +# ## See the plugin readme for details. +# # data_source_name = "" +# +# ## Timestamp column name +# # timestamp_column = "timestamp" +# +# ## Table creation template +# ## Available template variables: +# ## {TABLE} - table name as a quoted identifier +# ## {TABLELITERAL} - table name as a quoted string literal +# ## {COLUMNS} - column definitions (list of quoted identifiers and types) +# # table_template = "CREATE TABLE {TABLE}({COLUMNS})" +# +# ## Table existence check template +# ## Available template variables: +# ## {TABLE} - tablename as a quoted identifier +# # table_exists_template = "SELECT 1 FROM {TABLE} LIMIT 1" +# +# ## Initialization SQL +# # init_sql = "" +# +# ## Metric type to SQL type conversion +# ## The values on the left are the data types Telegraf has and the values on +# ## the right are the data types Telegraf will use when sending to a database. +# ## +# ## The database values used must be data types the destination database +# ## understands. It is up to the user to ensure that the selected data type is +# ## available in the database they are using. Refer to your database +# ## documentation for what data types are available and supported. +# #[outputs.sql.convert] +# # integer = "INT" +# # real = "DOUBLE" +# # text = "TEXT" +# # timestamp = "TIMESTAMP" +# # defaultvalue = "TEXT" +# # unsigned = "UNSIGNED" +# # bool = "BOOL" +# +# ## This setting controls the behavior of the unsigned value. By default the +# ## setting will take the integer value and append the unsigned value to it. The other +# ## option is "literal", which will use the actual value the user provides to +# ## the unsigned option. This is useful for a database like ClickHouse where +# ## the unsigned value should use a value like "uint64". +# # conversion_style = "unsigned_suffix" + + +# # Configuration for Google Cloud Stackdriver to send metrics to +# [[outputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## The namespace for the metric descriptor +# namespace = "telegraf" +# +# ## Custom resource type +# # resource_type = "generic_node" +# +# ## Additional resource labels +# # [outputs.stackdriver.resource_labels] +# # node_id = "$HOSTNAME" +# # namespace = "myapp" +# # location = "eu-north0" + + +# # A plugin that can send metrics to Sumo Logic HTTP metric collector. +# [[outputs.sumologic]] +# ## Unique URL generated for your HTTP Metrics Source. +# ## This is the address to send metrics to. +# # url = "https://events.sumologic.net/receiver/v1/http/" +# +# ## Data format to be used for sending metrics. +# ## This will set the "Content-Type" header accordingly. +# ## Currently supported formats: +# ## * graphite - for Content-Type of application/vnd.sumologic.graphite +# ## * carbon2 - for Content-Type of application/vnd.sumologic.carbon2 +# ## * prometheus - for Content-Type of application/vnd.sumologic.prometheus +# ## +# ## More information can be found at: +# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#content-type-headers-for-metrics +# ## +# ## NOTE: +# ## When unset, telegraf will by default use the influx serializer which is currently unsupported +# ## in HTTP Source. +# data_format = "carbon2" +# +# ## Timeout used for HTTP request +# # timeout = "5s" +# +# ## Max HTTP request body size in bytes before compression (if applied). +# ## By default 1MB is recommended. +# ## NOTE: +# ## Bear in mind that in some serializer a metric even though serialized to multiple +# ## lines cannot be split any further so setting this very low might not work +# ## as expected. +# # max_request_body_size = 1000000 +# +# ## Additional, Sumo specific options. +# ## Full list can be found here: +# ## https://help.sumologic.com/03Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source/Upload-Metrics-to-an-HTTP-Source#supported-http-headers +# +# ## Desired source name. +# ## Useful if you want to override the source name configured for the source. +# # source_name = "" +# +# ## Desired host name. +# ## Useful if you want to override the source host configured for the source. +# # source_host = "" +# +# ## Desired source category. +# ## Useful if you want to override the source category configured for the source. +# # source_category = "" +# +# ## Comma-separated key=value list of dimensions to apply to every metric. +# ## Custom dimensions will allow you to query your metrics at a more granular level. +# # dimensions = "" + + +# # Configuration for Syslog server to send metrics to +# [[outputs.syslog]] +# ## URL to connect to +# ## ex: address = "tcp://127.0.0.1:8094" +# ## ex: address = "tcp4://127.0.0.1:8094" +# ## ex: address = "tcp6://127.0.0.1:8094" +# ## ex: address = "tcp6://[2001:db8::1]:8094" +# ## ex: address = "udp://127.0.0.1:8094" +# ## ex: address = "udp4://127.0.0.1:8094" +# ## ex: address = "udp6://127.0.0.1:8094" +# address = "tcp://127.0.0.1:8094" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## The framing technique with which it is expected that messages are +# ## transported (default = "octet-counting"). Whether the messages come +# ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must +# ## be one of "octet-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-transparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## SD-PARAMs settings +# ## Syslog messages can contain key/value pairs within zero or more +# ## structured data sections. For each unrecognized metric tag/field a +# ## SD-PARAMS is created. +# ## +# ## Example: +# ## [[outputs.syslog]] +# ## sdparam_separator = "_" +# ## default_sdid = "default@32473" +# ## sdids = ["foo@123", "bar@456"] +# ## +# ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 +# ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] +# +# ## SD-PARAMs separator between the sdid and tag/field key (default = "_") +# # sdparam_separator = "_" +# +# ## Default sdid used for tags/fields that don't contain a prefix defined in +# ## the explicit sdids setting below If no default is specified, no SD-PARAMs +# ## will be used for unrecognized field. +# # default_sdid = "default@32473" +# +# ## List of explicit prefixes to extract from tag/field keys and use as the +# ## SDID, if they match (see above example for more details): +# # sdids = ["foo@123", "bar@456"] +# +# ## Default severity value. Severity and Facility are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field +# ## with key "severity_code" is defined. If unset, 5 (notice) is the default +# # default_severity_code = 5 +# +# ## Default facility value. Facility and Severity are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with +# ## key "facility_code" is defined. If unset, 1 (user-level) is the default +# # default_facility_code = 1 +# +# ## Default APP-NAME value (RFC5424#section-6.2.5) +# ## Used when no metric tag with key "appname" is defined. +# ## If unset, "Telegraf" is the default +# # default_appname = "Telegraf" + + +# # Configuration for sending metrics to Amazon Timestream. +# [[outputs.timestream]] +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #web_identity_token_file = "" +# #role_session_name = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Timestream database where the metrics will be inserted. +# ## The database must exist prior to starting Telegraf. +# database_name = "yourDatabaseNameHere" +# +# ## Specifies if the plugin should describe the Timestream database upon starting +# ## to validate if it has access necessary permissions, connection, etc., as a safety check. +# ## If the describe operation fails, the plugin will not start +# ## and therefore the Telegraf agent will not start. +# describe_database_on_start = false +# +# ## The mapping mode specifies how Telegraf records are represented in Timestream. +# ## Valid values are: single-table, multi-table. +# ## For example, consider the following data in line protocol format: +# ## weather,location=us-midwest,season=summer temperature=82,humidity=71 1465839830100400200 +# ## airquality,location=us-west no2=5,pm25=16 1465839830100400200 +# ## where weather and airquality are the measurement names, location and season are tags, +# ## and temperature, humidity, no2, pm25 are fields. +# ## In multi-table mode: +# ## - first line will be ingested to table named weather +# ## - second line will be ingested to table named airquality +# ## - the tags will be represented as dimensions +# ## - first table (weather) will have two records: +# ## one with measurement name equals to temperature, +# ## another with measurement name equals to humidity +# ## - second table (airquality) will have two records: +# ## one with measurement name equals to no2, +# ## another with measurement name equals to pm25 +# ## - the Timestream tables from the example will look like this: +# ## TABLE "weather": +# ## time | location | season | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-midwest | summer | temperature | 82 +# ## 2016-06-13 17:43:50 | us-midwest | summer | humidity | 71 +# ## TABLE "airquality": +# ## time | location | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-west | no2 | 5 +# ## 2016-06-13 17:43:50 | us-west | pm25 | 16 +# ## In single-table mode: +# ## - the data will be ingested to a single table, which name will be valueOf(single_table_name) +# ## - measurement name will stored in dimension named valueOf(single_table_dimension_name_for_telegraf_measurement_name) +# ## - location and season will be represented as dimensions +# ## - temperature, humidity, no2, pm25 will be represented as measurement name +# ## - the Timestream table from the example will look like this: +# ## Assuming: +# ## - single_table_name = "my_readings" +# ## - single_table_dimension_name_for_telegraf_measurement_name = "namespace" +# ## TABLE "my_readings": +# ## time | location | season | namespace | measure_name | measure_value::bigint +# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | temperature | 82 +# ## 2016-06-13 17:43:50 | us-midwest | summer | weather | humidity | 71 +# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | no2 | 5 +# ## 2016-06-13 17:43:50 | us-west | NULL | airquality | pm25 | 16 +# ## In most cases, using multi-table mapping mode is recommended. +# ## However, you can consider using single-table in situations when you have thousands of measurement names. +# mapping_mode = "multi-table" +# +# ## Only valid and required for mapping_mode = "single-table" +# ## Specifies the Timestream table where the metrics will be uploaded. +# # single_table_name = "yourTableNameHere" +# +# ## Only valid and required for mapping_mode = "single-table" +# ## Describes what will be the Timestream dimension name for the Telegraf +# ## measurement name. +# # single_table_dimension_name_for_telegraf_measurement_name = "namespace" +# +# ## Specifies if the plugin should create the table, if the table do not exist. +# ## The plugin writes the data without prior checking if the table exists. +# ## When the table does not exist, the error returned from Timestream will cause +# ## the plugin to create the table, if this parameter is set to true. +# create_table_if_not_exists = true +# +# ## Only valid and required if create_table_if_not_exists = true +# ## Specifies the Timestream table magnetic store retention period in days. +# ## Check Timestream documentation for more details. +# create_table_magnetic_store_retention_period_in_days = 365 +# +# ## Only valid and required if create_table_if_not_exists = true +# ## Specifies the Timestream table memory store retention period in hours. +# ## Check Timestream documentation for more details. +# create_table_memory_store_retention_period_in_hours = 24 +# +# ## Only valid and optional if create_table_if_not_exists = true +# ## Specifies the Timestream table tags. +# ## Check Timestream documentation for more details +# # create_table_tags = { "foo" = "bar", "environment" = "dev"} +# +# ## Specify the maximum number of parallel go routines to ingest/write data +# ## If not specified, defaulted to 1 go routines +# max_write_go_routines = 25 + + +# # Write metrics to Warp 10 +# [[outputs.warp10]] +# # Prefix to add to the measurement. +# prefix = "telegraf." +# +# # URL of the Warp 10 server +# warp_url = "http://localhost:8080" +# +# # Write token to access your app on warp 10 +# token = "Token" +# +# # Warp 10 query timeout +# # timeout = "15s" +# +# ## Print Warp 10 error body +# # print_error_body = false +# +# ## Max string error size +# # max_string_error_size = 511 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Configuration for Wavefront server to send metrics to +# [[outputs.wavefront]] +# ## Url for Wavefront Direct Ingestion. For Wavefront Proxy Ingestion, see +# ## the 'host' and 'port' options below. +# url = "https://metrics.wavefront.com" +# +# ## Authentication Token for Wavefront. Only required if using Direct Ingestion +# #token = "DUMMY_TOKEN" +# +# ## Maximum number of metrics to send per batch for Direct Ingestion. Ignored unless 'url' is set. This value should be higher than the `metric_batch_size`. Default is 10,000. Values higher than 40,000 are not recommended. +# # http_maximum_batch_size = 10000 +# +# ## DNS name of the wavefront proxy server. Do not use if url is specified +# #host = "wavefront.example.com" +# +# ## Port that the Wavefront proxy server listens on. Do not use if url is specified +# #port = 2878 +# +# ## prefix for metrics keys +# #prefix = "my.specific.prefix." +# +# ## whether to use "value" for name of simple fields. default is false +# #simple_fields = false +# +# ## character to use between metric and field name. default is . (dot) +# #metric_separator = "." +# +# ## Convert metric name paths to use metricSeparator character +# ## When true will convert all _ (underscore) characters in final metric name. default is true +# #convert_paths = true +# +# ## Use Strict rules to sanitize metric and tag names from invalid characters +# ## When enabled forward slash (/) and comma (,) will be accepted +# #use_strict = false +# +# ## Use Regex to sanitize metric and tag names from invalid characters +# ## Regex is more thorough, but significantly slower. default is false +# #use_regex = false +# +# ## point tags to use as the source name for Wavefront (if none found, host will be used) +# #source_override = ["hostname", "address", "agent_host", "node_host"] +# +# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true +# #convert_bool = true +# +# ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any +# ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. +# #truncate_tags = false +# +# ## Flush the internal buffers after each batch. This effectively bypasses the background sending of metrics +# ## normally done by the Wavefront SDK. This can be used if you are experiencing buffer overruns. The sending +# ## of metrics will block for a longer time, but this will be handled gracefully by the internal buffering in +# ## Telegraf. +# #immediate_flush = true + + +# # A plugin that can transmit metrics over WebSocket. +# [[outputs.websocket]] +# ## URL is the address to send metrics to. Make sure ws or wss scheme is used. +# url = "ws://127.0.0.1:3000/telegraf" +# +# ## Timeouts (make sure read_timeout is larger than server ping interval or set to zero). +# # connect_timeout = "30s" +# # write_timeout = "30s" +# # read_timeout = "30s" +# +# ## Optionally turn on using text data frames (binary by default). +# # use_text_frames = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional SOCKS5 proxy to use +# # socks5_enabled = true +# # socks5_address = "127.0.0.1:1080" +# # socks5_username = "alice" +# # socks5_password = "pass123" +# +# ## Optional HTTP proxy to use +# # use_system_proxy = false +# # http_proxy_url = "http://localhost:8888" +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" +# +# ## Additional HTTP Upgrade headers +# # [outputs.websocket.headers] +# # Authorization = "Bearer " + + +# # Send aggregated metrics to Yandex.Cloud Monitoring +# [[outputs.yandex_cloud_monitoring]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Yandex.Cloud monitoring API endpoint. Normally should not be changed +# # endpoint_url = "https://monitoring.api.cloud.yandex.net/monitoring/v2/data/write" +# +# ## All user metrics should be sent with "custom" service specified. Normally should not be changed +# # service = "custom" + + +############################################################################### +# PROCESSOR PLUGINS # +############################################################################### + + +# # Attach AWS EC2 metadata to metrics +# [[processors.aws_ec2]] +# ## Instance identity document tags to attach to metrics. +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html +# ## +# ## Available tags: +# ## * accountId +# ## * architecture +# ## * availabilityZone +# ## * billingProducts +# ## * imageId +# ## * instanceId +# ## * instanceType +# ## * kernelId +# ## * pendingTime +# ## * privateIp +# ## * ramdiskId +# ## * region +# ## * version +# imds_tags = [] +# +# ## EC2 instance tags retrieved with DescribeTags action. +# ## In case tag is empty upon retrieval it's omitted when tagging metrics. +# ## Note that in order for this to work, role attached to EC2 instance or AWS +# ## credentials available from the environment must have a policy attached, that +# ## allows ec2:DescribeTags. +# ## +# ## For more information see: +# ## https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTags.html +# ec2_tags = [] +# +# ## Timeout for http requests made by against aws ec2 metadata endpoint. +# timeout = "10s" +# +# ## ordered controls whether or not the metrics need to stay in the same order +# ## this plugin received them in. If false, this plugin will change the order +# ## with requests hitting cached results moving through immediately and not +# ## waiting on slower lookups. This may cause issues for you if you are +# ## depending on the order of metrics staying the same. If so, set this to true. +# ## Keeping the metrics ordered may be slightly slower. +# ordered = false +# +# ## max_parallel_calls is the maximum number of AWS API calls to be in flight +# ## at the same time. +# ## It's probably best to keep this number fairly low. +# max_parallel_calls = 10 + + +# # Apply metric modifications using override semantics. +# [[processors.clone]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.clone.tags] +# # additional_tag = "tag_value" + + +# # Convert values to another metric value type +# [[processors.converter]] +# ## Tags to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.tags] +# measurement = [] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] +# +# ## Fields to convert +# ## +# ## The table key determines the target type, and the array of key-values +# ## select the keys to convert. The array may contain globs. +# ## = [...] +# [processors.converter.fields] +# measurement = [] +# tag = [] +# string = [] +# integer = [] +# unsigned = [] +# boolean = [] +# float = [] + + +# # Dates measurements, tags, and fields that pass through this filter. +# [[processors.date]] +# ## New tag to create +# tag_key = "month" +# +# ## New field to create (cannot set both field_key and tag_key) +# # field_key = "month" +# +# ## Date format string, must be a representation of the Go "reference time" +# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". +# date_format = "Jan" +# +# ## If destination is a field, date format can also be one of +# ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field. +# # date_format = "unix" +# +# ## Offset duration added to the date string when writing the new tag. +# # date_offset = "0s" +# +# ## Timezone to use when creating the tag or field using a reference time +# ## string. This can be set to one of "UTC", "Local", or to a location name +# ## in the IANA Time Zone database. +# ## example: timezone = "America/Los_Angeles" +# # timezone = "UTC" + + +# # Filter metrics with repeating field values +# [[processors.dedup]] +# ## Maximum time to suppress output +# dedup_interval = "600s" + + +# ## Set default fields on your metric(s) when they are nil or empty +# [[processors.defaults]] +# ## Ensures a set of fields always exists on your metric(s) with their +# ## respective default value. +# ## For any given field pair (key = default), if it's not set, a field +# ## is set on the metric with the specified default. +# ## +# ## A field is considered not set if it is nil on the incoming metric; +# ## or it is not nil but its value is an empty string or is a string +# ## of one or more spaces. +# ## = +# [processors.defaults.fields] +# field_1 = "bar" +# time_idle = 0 +# is_error = true + + +# # Map enum values according to given table. +# [[processors.enum]] +# [[processors.enum.mapping]] +# ## Name of the field to map. Globs accepted. +# field = "status" +# +# ## Name of the tag to map. Globs accepted. +# # tag = "status" +# +# ## Destination tag or field to be used for the mapped value. By default the +# ## source tag or field is used, overwriting the original value. +# dest = "status_code" +# +# ## Default value to be used for all values not contained in the mapping +# ## table. When unset and no match is found, the original field will remain +# ## unmodified and the destination tag or field will not be created. +# # default = 0 +# +# ## Table of mappings +# [processors.enum.mapping.value_mappings] +# green = 1 +# amber = 2 +# red = 3 + + +# # Run executable as long-running processor plugin +# [[processors.execd]] +# ## One program to run as daemon. +# ## NOTE: process and each argument should each be their own string +# ## eg: command = ["/path/to/your_program", "arg1", "arg2"] +# command = ["cat"] +# +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# +# ## Delay before the process is restarted after an unexpected termination +# # restart_delay = "10s" + + +# # Performs file path manipulations on tags and fields +# [[processors.filepath]] +# ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag +# # [[processors.filepath.basename]] +# # tag = "path" +# # dest = "basepath" +# +# ## Treat the field value as a path and keep all but the last element of path, typically the path's directory +# # [[processors.filepath.dirname]] +# # field = "path" +# +# ## Treat the tag value as a path, converting it to its the last element without its suffix +# # [[processors.filepath.stem]] +# # tag = "path" +# +# ## Treat the tag value as a path, converting it to the shortest path name equivalent +# ## to path by purely lexical processing +# # [[processors.filepath.clean]] +# # tag = "path" +# +# ## Treat the tag value as a path, converting it to a relative path that is lexically +# ## equivalent to the source path when joined to 'base_path' +# # [[processors.filepath.rel]] +# # tag = "path" +# # base_path = "/var/log" +# +# ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only +# ## effect on Windows +# # [[processors.filepath.toslash]] +# # tag = "path" + + +# # Add a tag of the network interface name looked up over SNMP by interface number +# [[processors.ifname]] +# ## Name of tag holding the interface number +# # tag = "ifIndex" +# +# ## Name of output tag where service name will be added +# # dest = "ifName" +# +# ## Name of tag of the SNMP agent to request the interface name from +# # agent = "agent" +# +# ## Timeout for each request. +# # timeout = "5s" +# +# ## SNMP version; can be 1, 2, or 3. +# # version = 2 +# +# ## SNMP community string. +# # community = "public" +# +# ## Number of retries to attempt. +# # retries = 3 +# +# ## The GETBULK max-repetitions parameter. +# # max_repetitions = 10 +# +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA", or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Context Name. +# # context_name = "" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" +# +# ## max_parallel_lookups is the maximum number of SNMP requests to +# ## make at the same time. +# # max_parallel_lookups = 100 +# +# ## ordered controls whether or not the metrics need to stay in the +# ## same order this plugin received them in. If false, this plugin +# ## may change the order when data is cached. If you need metrics to +# ## stay in order set this to true. keeping the metrics ordered may +# ## be slightly slower +# # ordered = false +# +# ## cache_ttl is the amount of time interface names are cached for a +# ## given agent. After this period elapses if names are needed they +# ## will be retrieved again. +# # cache_ttl = "8h" + + +# # Adds noise to numerical fields +# [[processors.noise]] +# ## Specified the type of the random distribution. +# ## Can be "laplacian", "gaussian" or "uniform". +# # type = "laplacian +# +# ## Center of the distribution. +# ## Only used for Laplacian and Gaussian distributions. +# # mu = 0.0 +# +# ## Scale parameter for the Laplacian or Gaussian distribution +# # scale = 1.0 +# +# ## Upper and lower bound of the Uniform distribution +# # min = -1.0 +# # max = 1.0 +# +# ## Apply the noise only to numeric fields matching the filter criteria below. +# ## Excludes takes precedence over includes. +# # include_fields = [] +# # exclude_fields = [] + + +# # Apply metric modifications using override semantics. +# [[processors.override]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.override.tags] +# # additional_tag = "tag_value" + + +# # Parse a value in a specified field/tag(s) and add the result in a new metric +# [[processors.parser]] +# ## The name of the fields whose value will be parsed. +# parse_fields = ["message"] +# +# ## If true, incoming metrics are not emitted. +# drop_original = false +# +# ## If set to override, emitted metrics will be merged by overriding the +# ## original metric using the newly parsed metrics. +# merge = "override" +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Rotate a single valued metric into a multi field metric +# [[processors.pivot]] +# ## Tag to use for naming the new field. +# tag_key = "name" +# ## Field to use as the value of the new field. +# value_key = "value" + + +# # Given a tag/field of a TCP or UDP port number, add a tag/field of the service name looked up in the system services file +# [[processors.port_name]] +# ## Name of tag holding the port number +# # tag = "port" +# ## Or name of the field holding the port number +# # field = "port" +# +# ## Name of output tag or field (depending on the source) where service name will be added +# # dest = "service" +# +# ## Default tcp or udp +# # default_protocol = "tcp" +# +# ## Tag containing the protocol (tcp or udp, case-insensitive) +# # protocol_tag = "proto" +# +# ## Field containing the protocol (tcp or udp, case-insensitive) +# # protocol_field = "proto" + + +# # Print all metrics that pass through this filter. +# [[processors.printer]] + + +# # Transforms tag and field values as well as measurement, tag and field names with regex pattern +# [[processors.regex]] +# namepass = ["nginx_requests"] +# +# # Tag and field conversions defined in a separate sub-tables +# [[processors.regex.tags]] +# ## Tag to change, "*" will change every tag +# key = "resp_code" +# ## Regular expression to match on a tag value +# pattern = "^(\\d)\\d\\d$" +# ## Matches of the pattern will be replaced with this string. Use ${1} +# ## notation to use the text of the first submatch. +# replacement = "${1}xx" +# +# [[processors.regex.fields]] +# ## Field to change +# key = "request" +# ## All the power of the Go regular expressions available here +# ## For example, named subgroups +# pattern = "^/api(?P/[\\w/]+)\\S*" +# replacement = "${method}" +# ## If result_key is present, a new field will be created +# ## instead of changing existing field +# result_key = "method" +# +# # Multiple conversions may be applied for one field sequentially +# # Let's extract one more value +# [[processors.regex.fields]] +# key = "request" +# pattern = ".*category=(\\w+).*" +# replacement = "${1}" +# result_key = "search_category" +# +# # Rename metric fields +# [[processors.regex.field_rename]] +# ## Regular expression to match on a field name +# pattern = "^search_(\\w+)d$" +# ## Matches of the pattern will be replaced with this string. Use ${1} +# ## notation to use the text of the first submatch. +# replacement = "${1}" +# ## If the new field name already exists, you can either "overwrite" the +# ## existing one with the value of the renamed field OR you can "keep" +# ## both the existing and source field. +# # result_key = "keep" +# +# # Rename metric tags +# # [[processors.regex.tag_rename]] +# # ## Regular expression to match on a tag name +# # pattern = "^search_(\\w+)d$" +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. +# # replacement = "${1}" +# # ## If the new tag name already exists, you can either "overwrite" the +# # ## existing one with the value of the renamed tag OR you can "keep" +# # ## both the existing and source tag. +# # # result_key = "keep" +# +# # Rename metrics +# # [[processors.regex.metric_rename]] +# # ## Regular expression to match on an metric name +# # pattern = "^search_(\\w+)d$" +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. +# # replacement = "${1}" + + +# # Rename measurements, tags, and fields that pass through this filter. +# [[processors.rename]] +# ## Specify one sub-table per rename operation. +# [[processors.rename.replace]] +# measurement = "network_interface_throughput" +# dest = "throughput" +# +# [[processors.rename.replace]] +# tag = "hostname" +# dest = "host" +# +# [[processors.rename.replace]] +# field = "lower" +# dest = "min" +# +# [[processors.rename.replace]] +# field = "upper" +# dest = "max" + + +# # ReverseDNS does a reverse lookup on IP addresses to retrieve the DNS name +# [[processors.reverse_dns]] +# ## For optimal performance, you may want to limit which metrics are passed to this +# ## processor. eg: +# ## namepass = ["my_metric_*"] +# +# ## cache_ttl is how long the dns entries should stay cached for. +# ## generally longer is better, but if you expect a large number of diverse lookups +# ## you'll want to consider memory use. +# cache_ttl = "24h" +# +# ## lookup_timeout is how long should you wait for a single dns request to repsond. +# ## this is also the maximum acceptable latency for a metric travelling through +# ## the reverse_dns processor. After lookup_timeout is exceeded, a metric will +# ## be passed on unaltered. +# ## multiple simultaneous resolution requests for the same IP will only make a +# ## single rDNS request, and they will all wait for the answer for this long. +# lookup_timeout = "3s" +# +# ## max_parallel_lookups is the maximum number of dns requests to be in flight +# ## at the same time. Requesting hitting cached values do not count against this +# ## total, and neither do mulptiple requests for the same IP. +# ## It's probably best to keep this number fairly low. +# max_parallel_lookups = 10 +# +# ## ordered controls whether or not the metrics need to stay in the same order +# ## this plugin received them in. If false, this plugin will change the order +# ## with requests hitting cached results moving through immediately and not +# ## waiting on slower lookups. This may cause issues for you if you are +# ## depending on the order of metrics staying the same. If so, set this to true. +# ## keeping the metrics ordered may be slightly slower. +# ordered = false +# +# [[processors.reverse_dns.lookup]] +# ## get the ip from the field "source_ip", and put the result in the field "source_name" +# field = "source_ip" +# dest = "source_name" +# +# [[processors.reverse_dns.lookup]] +# ## get the ip from the tag "destination_ip", and put the result in the tag +# ## "destination_name". +# tag = "destination_ip" +# dest = "destination_name" +# +# ## If you would prefer destination_name to be a field instead, you can use a +# ## processors.converter after this one, specifying the order attribute. + + +# # Add the S2 Cell ID as a tag based on latitude and longitude fields +# [[processors.s2geo]] +# ## The name of the lat and lon fields containing WGS-84 latitude and +# ## longitude in decimal degrees. +# # lat_field = "lat" +# # lon_field = "lon" +# +# ## New tag to create +# # tag_key = "s2_cell_id" +# +# ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html) +# # cell_level = 9 + + +# # Process metrics using a Starlark script +# [[processors.starlark]] +# ## The Starlark source can be set as a string in this configuration file, or +# ## by referencing a file containing the script. Only one source or script +# ## should be set at once. +# +# ## Source of the Starlark script. +# source = ''' +# def apply(metric): +# return metric +# ''' +# +# ## File containing a Starlark script. +# # script = "/usr/local/bin/myscript.star" +# +# ## The constants of the Starlark script. +# # [processors.starlark.constants] +# # max_size = 10 +# # threshold = 0.75 +# # default_name = "Julia" +# # debug_mode = true + + +# # Perform string processing on tags, fields, and measurements +# [[processors.strings]] +# ## Convert a field value to lowercase and store in a new field +# # [[processors.strings.lowercase]] +# # field = "uri_stem" +# # dest = "uri_stem_normalised" +# +# ## Convert a tag value to uppercase +# # [[processors.strings.uppercase]] +# # tag = "method" +# +# ## Convert a field value to titlecase +# # [[processors.strings.titlecase]] +# # field = "status" +# +# ## Trim leading and trailing whitespace using the default cutset +# # [[processors.strings.trim]] +# # field = "message" +# +# ## Trim leading characters in cutset +# # [[processors.strings.trim_left]] +# # field = "message" +# # cutset = "\t" +# +# ## Trim trailing characters in cutset +# # [[processors.strings.trim_right]] +# # field = "message" +# # cutset = "\r\n" +# +# ## Trim the given prefix from the field +# # [[processors.strings.trim_prefix]] +# # field = "my_value" +# # prefix = "my_" +# +# ## Trim the given suffix from the field +# # [[processors.strings.trim_suffix]] +# # field = "read_count" +# # suffix = "_count" +# +# ## Replace all non-overlapping instances of old with new +# # [[processors.strings.replace]] +# # measurement = "*" +# # old = ":" +# # new = "_" +# +# ## Trims strings based on width +# # [[processors.strings.left]] +# # field = "message" +# # width = 10 +# +# ## Decode a base64 encoded utf-8 string +# # [[processors.strings.base64decode]] +# # field = "message" +# +# ## Sanitize a string to ensure it is a valid utf-8 string +# ## Each run of invalid UTF-8 byte sequences is replaced by the replacement string, which may be empty +# # [[processors.strings.valid_utf8]] +# # field = "message" +# # replacement = "" + + +# # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit. +# [[processors.tag_limit]] +# ## Maximum number of tags to preserve +# limit = 3 +# +# ## List of tags to preferentially preserve +# keep = ["environment", "region"] + + +# # Uses a Go template to create a new tag +# [[processors.template]] +# ## Tag to set with the output of the template. +# tag = "topic" +# +# ## Go template used to create the tag value. In order to ease TOML +# ## escaping requirements, you may wish to use single quotes around the +# ## template string. +# template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' + + +# # Print all metrics that pass through this filter. +# [[processors.topk]] +# ## How many seconds between aggregations +# # period = 10 +# +# ## How many top buckets to return per field +# ## Every field specified to aggregate over will return k number of results. +# ## For example, 1 field with k of 10 will return 10 buckets. While 2 fields +# ## with k of 3 will return 6 buckets. +# # k = 10 +# +# ## Over which tags should the aggregation be done. Globs can be specified, in +# ## which case any tag matching the glob will aggregated over. If set to an +# ## empty list is no aggregation over tags is done +# # group_by = ['*'] +# +# ## The field(s) to aggregate +# ## Each field defined is used to create an independent aggregation. Each +# ## aggregation will return k buckets. If a metric does not have a defined +# ## field the metric will be dropped from the aggregation. Considering using +# ## the defaults processor plugin to ensure fields are set if required. +# # fields = ["value"] +# +# ## What aggregation function to use. Options: sum, mean, min, max +# # aggregation = "mean" +# +# ## Instead of the top k largest metrics, return the bottom k lowest metrics +# # bottomk = false +# +# ## The plugin assigns each metric a GroupBy tag generated from its name and +# ## tags. If this setting is different than "" the plugin will add a +# ## tag (which name will be the value of this setting) to each metric with +# ## the value of the calculated GroupBy tag. Useful for debugging +# # add_groupby_tag = "" +# +# ## These settings provide a way to know the position of each metric in +# ## the top k. The 'add_rank_field' setting allows to specify for which +# ## fields the position is required. If the list is non empty, then a field +# ## will be added to each and every metric for each string present in this +# ## setting. This field will contain the ranking of the group that +# ## the metric belonged to when aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_rank' +# # add_rank_fields = [] +# +# ## These settings provide a way to know what values the plugin is generating +# ## when aggregating metrics. The 'add_aggregate_field' setting allows to +# ## specify for which fields the final aggregation value is required. If the +# ## list is non empty, then a field will be added to each every metric for +# ## each field present in this setting. This field will contain +# ## the computed aggregation for the group that the metric belonged to when +# ## aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_aggregate' +# # add_aggregate_fields = [] + + +# # Rotate multi field metric into several single field metrics +# [[processors.unpivot]] +# ## Tag to use for the name. +# tag_key = "name" +# ## Field to use for the name of the value. +# value_key = "value" + + +############################################################################### +# AGGREGATOR PLUGINS # +############################################################################### + + +# # Keep the aggregate basicstats of each metric passing through. +# [[aggregators.basicstats]] +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Configures which basic stats to push as fields +# # stats = ["count","diff","rate","min","max","mean","non_negative_diff","non_negative_rate","stdev","s2","sum","interval"] + + +# # Calculates a derivative for every field. +# [[aggregators.derivative]] +# ## The period in which to flush the aggregator. +# period = "30s" +# ## +# ## Suffix to append for the resulting derivative field. +# # suffix = "_rate" +# ## +# ## Field to use for the quotient when computing the derivative. +# ## When using a field as the derivation parameter the name of that field will +# ## be used for the resulting derivative, e.g. *fieldname_by_parameter*. +# ## By default the timestamps of the metrics are used and the suffix is omitted. +# # variable = "" +# ## +# ## Maximum number of roll-overs in case only one measurement is found during a period. +# # max_roll_over = 10 + + +# # Report the final metric of a series +# [[aggregators.final]] +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## The time that a series is not updated until considering it final. +# series_timeout = "5m" + + +# # Configuration for aggregate histogram metrics +# [[aggregators.histogram]] +# ## The period in which to flush the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## If true, the histogram will be reset on flush instead +# ## of accumulating the results. +# reset = false +# +# ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added. +# ## Defaults to true. +# cumulative = true +# +# ## Expiration interval for each histogram. The histogram will be expired if +# ## there are no changes in any buckets for this time interval. 0 == no expiration. +# # expiration_interval = "0m" +# +# ## If true, aggregated histogram are pushed to output only if it was updated since +# ## previous push. Defaults to false. +# # push_only_on_update = false +# +# ## Example config that aggregates all fields of the metric. +# # [[aggregators.histogram.config]] +# # ## Right borders of buckets (with +Inf implicitly added). +# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] +# # ## The name of metric. +# # measurement_name = "cpu" +# +# ## Example config that aggregates only specific fields of the metric. +# # [[aggregators.histogram.config]] +# # ## Right borders of buckets (with +Inf implicitly added). +# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] +# # ## The name of metric. +# # measurement_name = "diskio" +# # ## The concrete fields of metric +# # fields = ["io_time", "read_time", "write_time"] + + +# # Merge metrics into multifield metrics by series key +# [[aggregators.merge]] +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = true + + +# # Keep the aggregate min/max of each metric passing through. +# [[aggregators.minmax]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false + + +# # Keep the aggregate quantiles of each metric passing through. +# [[aggregators.quantile]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Quantiles to output in the range [0,1] +# # quantiles = [0.25, 0.5, 0.75] +# +# ## Type of aggregation algorithm +# ## Supported are: +# ## "t-digest" -- approximation using centroids, can cope with large number of samples +# ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7) +# ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8) +# ## NOTE: Do not use "exact" algorithms with large number of samples +# ## to not impair performance or memory consumption! +# # algorithm = "t-digest" +# +# ## Compression for approximation (t-digest). The value needs to be +# ## greater or equal to 1.0. Smaller values will result in more +# ## performance but less accuracy. +# # compression = 100.0 + + +# # Aggregate metrics using a Starlark script +# [[aggregators.starlark]] +# ## The Starlark source can be set as a string in this configuration file, or +# ## by referencing a file containing the script. Only one source or script +# ## should be set at once. +# ## +# ## Source of the Starlark script. +# source = ''' +# state = {} +# +# def add(metric): +# state["last"] = metric +# +# def push(): +# return state.get("last") +# +# def reset(): +# state.clear() +# ''' +# +# ## File containing a Starlark script. +# # script = "/usr/local/bin/myscript.star" +# +# ## The constants of the Starlark script. +# # [aggregators.starlark.constants] +# # max_size = 10 +# # threshold = 0.75 +# # default_name = "Julia" +# # debug_mode = true + + +# # Count the occurrence of values in fields. +# [[aggregators.valuecounter]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## The fields for which the values will be counted +# fields = ["status"] + + +############################################################################### +# INPUT PLUGINS # +############################################################################### + + +# Read metrics about cpu usage +[[inputs.cpu]] + ## Whether to report per-cpu stats or not + percpu = true + ## Whether to report total system cpu stats or not + totalcpu = true + ## If true, collect raw CPU time metrics + collect_cpu_time = false + ## If true, compute and report the sum of all non-idle CPU states + report_active = false + ## If true and the info is available then add core_id and physical_id tags + core_tags = false + + +# Read metrics about disk usage by mount point +[[inputs.disk]] + ## By default stats will be gathered for all mount points. + ## Set mount_points will restrict the stats to only the specified mount points. + # mount_points = ["/"] + + ## Ignore mount points by filesystem type. + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] + + ## Ignore mount points by mount options. + ## The 'mount' command reports options of all mounts in parathesis. + ## Bind mounts can be ignored with the special 'bind' option. + # ignore_mount_opts = [] + + +# Read metrics about disk IO by device +[[inputs.diskio]] + ## By default, telegraf will gather stats for all devices including + ## disk partitions. + ## Setting devices will restrict the stats to the specified devices. + # devices = ["sda", "sdb", "vd*"] + ## Uncomment the following line if you need disk serial numbers. + # skip_serial_number = false + # + ## On systems which support it, device metadata can be added in the form of + ## tags. + ## Currently only Linux is supported via udev properties. You can view + ## available properties for a device by running: + ## 'udevadm info -q property -n /dev/sda' + ## Note: Most, but not all, udev properties can be accessed this way. Properties + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. + # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] + # + ## Using the same metadata source as device_tags, you can also customize the + ## name of the device via templates. + ## The 'name_templates' parameter is a list of templates to try and apply to + ## the device. The template may contain variables in the form of '$PROPERTY' or + ## '${PROPERTY}'. The first template which does not contain any variables not + ## present for the device is used as the device name tag. + ## The typical use case is for LVM volumes, to get the VG/LV name instead of + ## the near-meaningless DM-0 name. + # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] + + +#[[inputs.kernel]] + # no configuration + + +# Read metrics about memory usage +[[inputs.mem]] + # no configuration + + +# Get the number of processes and group them by status +[[inputs.processes]] + # no configuration + + +# Read metrics about swap memory usage +[[inputs.swap]] + # no configuration + + +# Read metrics about system load & uptime +[[inputs.system]] + # no configuration + + +# # Gather ActiveMQ metrics +# [[inputs.activemq]] +# ## ActiveMQ WebConsole URL +# url = "http://127.0.0.1:8161" +# +# ## Required ActiveMQ Endpoint +# ## deprecated in 1.11; use the url option +# # server = "192.168.50.10" +# # port = 8161 +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Required ActiveMQ webadmin root path +# # webadmin = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read stats from aerospike server(s) +# [[inputs.aerospike]] +# ## Aerospike servers to connect to (with port) +# ## This plugin will query all namespaces the aerospike +# ## server has configured and get stats for them. +# servers = ["localhost:3000"] +# +# # username = "telegraf" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # enable_tls = false +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# # tls_name = "tlsname" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true +# +# # Feature Options +# # Add namespace variable to limit the namespaces executed on +# # Leave blank to do all +# # disable_query_namespaces = true # default false +# # namespaces = ["namespace1", "namespace2"] +# +# # Enable set level telemetry +# # query_sets = true # default: false +# # Add namespace set combinations to limit sets executed on +# # Leave blank to do all sets +# # sets = ["namespace1/set1", "namespace1/set2", "namespace3"] +# +# # Histograms +# # enable_ttl_histogram = true # default: false +# # enable_object_size_linear_histogram = true # default: false +# +# # by default, aerospike produces a 100 bucket histogram +# # this is not great for most graphing tools, this will allow +# # the ability to squash this to a smaller number of buckets +# # To have a balanced histogram, the number of buckets chosen +# # should divide evenly into 100. +# # num_histogram_buckets = 100 # default: 10 + + +# # Query statistics from AMD Graphics cards using rocm-smi binary +# [[inputs.amd_rocm_smi]] +# ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/opt/rocm/bin/rocm-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + +# # Read Apache status information (mod_status) +# [[inputs.apache]] +# ## An array of URLs to gather from, must be directed at the machine +# ## readable version of the mod_status page including the auto query string. +# ## Default is "http://localhost/server-status?auto". +# urls = ["http://localhost/server-status?auto"] +# +# ## Credentials for basic HTTP authentication. +# # username = "myuser" +# # password = "mypassword" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Monitor APC UPSes connected to apcupsd +# [[inputs.apcupsd]] +# # A list of running apcupsd server to connect to. +# # If not provided will default to tcp://127.0.0.1:3551 +# servers = ["tcp://127.0.0.1:3551"] +# +# ## Timeout for dialing server. +# timeout = "5s" + + +# # Gather metrics from Apache Aurora schedulers +# [[inputs.aurora]] +# ## Schedulers are the base addresses of your Aurora Schedulers +# schedulers = ["http://127.0.0.1:8081"] +# +# ## Set of role types to collect metrics from. +# ## +# ## The scheduler roles are checked each interval by contacting the +# ## scheduler nodes; zookeeper is not contacted. +# # roles = ["leader", "follower"] +# +# ## Timeout is the max time for total network operations. +# # timeout = "5s" +# +# ## Username and password are sent using HTTP Basic Auth. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Gather Azure Storage Queue metrics +# [[inputs.azure_storage_queue]] +# ## Required Azure Storage Account name +# account_name = "mystorageaccount" +# +# ## Required Azure Storage Account access key +# account_key = "storageaccountaccesskey" +# +# ## Set to false to disable peeking age of oldest message (executes faster) +# # peek_oldest_message_age = true + + +# # Collects Beanstalkd server and tubes stats +# [[inputs.beanstalkd]] +# ## Server to collect data from +# server = "localhost:11300" +# +# ## List of tubes to gather stats about. +# ## If no tubes specified then data gathered for each tube on server reported by list-tubes command +# tubes = ["notifications"] + + +# # Read metrics exposed by Beat +# [[inputs.beat]] +# ## An URL from which to read Beat-formatted JSON +# ## Default is "http://127.0.0.1:5066". +# url = "http://127.0.0.1:5066" +# +# ## Enable collection of the listed stats +# ## An empty list means collect all. Available options are currently +# ## "beat", "libbeat", "system" and "filebeat". +# # include = ["beat", "libbeat", "filebeat"] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "logstash.example.com" +# +# ## Timeout for HTTP requests +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read BIND nameserver XML statistics +# [[inputs.bind]] +# ## An array of BIND XML statistics URI to gather stats. +# ## Default is "http://localhost:8053/xml/v3". +# # urls = ["http://localhost:8053/xml/v3"] +# # gather_memory_contexts = false +# # gather_views = false +# +# ## Timeout for http requests made by bind nameserver +# # timeout = "4s" + + +# # Collect bond interface status, slaves statuses and failures count +# [[inputs.bond]] +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" +# +# ## Sets 'sys' directory path +# ## If not specified, then default is /sys +# # host_sys = "/sys" +# +# ## By default, telegraf gather stats for all bond interfaces +# ## Setting interfaces will restrict the stats to the specified +# ## bond interfaces. +# # bond_interfaces = ["bond0"] +# +# ## Tries to collect additional bond details from /sys/class/net/{bond} +# ## currently only useful for LACP (mode 4) bonds +# # collect_sys_details = false + + +# # Collect Kafka topics and consumers status from Burrow HTTP API. +# [[inputs.burrow]] +# ## Burrow API endpoints in format "schema://host:port". +# ## Default is "http://localhost:8000". +# servers = ["http://localhost:8000"] +# +# ## Override Burrow API prefix. +# ## Useful when Burrow is behind reverse-proxy. +# # api_prefix = "/v3/kafka" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Limit per-server concurrent connections. +# ## Useful in case of large number of topics or consumer groups. +# # concurrent_connections = 20 +# +# ## Filter clusters, default is no filtering. +# ## Values can be specified as glob patterns. +# # clusters_include = [] +# # clusters_exclude = [] +# +# ## Filter consumer groups, default is no filtering. +# ## Values can be specified as glob patterns. +# # groups_include = [] +# # groups_exclude = [] +# +# ## Filter topics, default is no filtering. +# ## Values can be specified as glob patterns. +# # topics_include = [] +# # topics_exclude = [] +# +# ## Credentials for basic HTTP authentication. +# # username = "" +# # password = "" +# +# ## Optional SSL config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# # insecure_skip_verify = false + + +# # Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster. +# [[inputs.ceph]] +# ## This is the recommended interval to poll. Too frequent and you will lose +# ## data points due to timeouts during rebalancing and recovery +# interval = '1m' +# +# ## All configuration values are optional, defaults are shown below +# +# ## location of ceph binary +# ceph_binary = "/usr/bin/ceph" +# +# ## directory in which to look for socket files +# socket_dir = "/var/run/ceph" +# +# ## prefix of MON and OSD socket files, used to determine socket type +# mon_prefix = "ceph-mon" +# osd_prefix = "ceph-osd" +# mds_prefix = "ceph-mds" +# rgw_prefix = "ceph-client" +# +# ## suffix used to identify socket files +# socket_suffix = "asok" +# +# ## Ceph user to authenticate as, ceph will search for the corresponding keyring +# ## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the +# ## client section of ceph.conf for example: +# ## +# ## [client.telegraf] +# ## keyring = /etc/ceph/client.telegraf.keyring +# ## +# ## Consult the ceph documentation for more detail on keyring generation. +# ceph_user = "client.admin" +# +# ## Ceph configuration to use to locate the cluster +# ceph_config = "/etc/ceph/ceph.conf" +# +# ## Whether to gather statistics via the admin socket +# gather_admin_socket_stats = true +# +# ## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config +# ## to be specified +# gather_cluster_stats = false + + +# # Read specific statistics per cgroup +# [[inputs.cgroup]] +# ## Directories in which to look for files, globs are supported. +# ## Consider restricting paths to the set of cgroups you really +# ## want to monitor if you have a large number of cgroups, to avoid +# ## any cardinality issues. +# # paths = [ +# # "/sys/fs/cgroup/memory", +# # "/sys/fs/cgroup/memory/child1", +# # "/sys/fs/cgroup/memory/child2/*", +# # ] +# ## cgroup stat fields, as file names, globs are supported. +# ## these file names are appended to each path from above. +# # files = ["memory.*usage*", "memory.limit_in_bytes"] + + +# # Get standard chrony metrics, requires chronyc executable. +# [[inputs.chrony]] +# ## If true, chronyc tries to perform a DNS lookup for the time server. +# # dns_lookup = false + + +# # Pull Metric Statistics from Amazon CloudWatch +# [[inputs.cloudwatch]] +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Set http_proxy +# # use_system_proxy = false +# # http_proxy_url = "http://localhost:8888" +# +# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all +# # metrics are made available to the 1 minute period. Some are collected at +# # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. +# # Note that if a period is configured that is smaller than the minimum for a +# # particular metric, that metric will not be returned by the Cloudwatch API +# # and will not be collected by Telegraf. +# # +# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) +# period = "5m" +# +# ## Collection Delay (required - must account for metrics availability via CloudWatch API) +# delay = "5m" +# +# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid +# ## gaps or overlap in pulled data +# interval = "5m" +# +# ## Recommended if "delay" and "period" are both within 3 hours of request time. Invalid values will be ignored. +# ## Recently Active feature will only poll for CloudWatch ListMetrics values that occurred within the last 3 Hours. +# ## If enabled, it will reduce total API usage of the CloudWatch ListMetrics API and require less memory to retain. +# ## Do not enable if "period" or "delay" is longer than 3 hours, as it will not return data more than 3 hours old. +# ## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html +# #recently_active = "PT3H" +# +# ## Configure the TTL for the internal cache of metrics. +# # cache_ttl = "1h" +# +# ## Metric Statistic Namespaces (required) +# namespaces = ["AWS/ELB"] +# # A single metric statistic namespace that will be appended to namespaces on startup +# # namespace = "AWS/ELB" +# +# ## Maximum requests per second. Note that the global default AWS rate limit is +# ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a +# ## maximum of 50. +# ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html +# # ratelimit = 25 +# +# ## Timeout for http requests made by the cloudwatch client. +# # timeout = "5s" +# +# ## Namespace-wide statistic filters. These allow fewer queries to be made to +# ## cloudwatch. +# # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # statistic_exclude = [] +# +# ## Metrics to Pull +# ## Defaults to all Metrics in Namespace if nothing is provided +# ## Refreshes Namespace available metrics every 1h +# #[[inputs.cloudwatch.metrics]] +# # names = ["Latency", "RequestCount"] +# # +# # ## Statistic filters for Metric. These allow for retrieving specific +# # ## statistics for an individual metric. +# # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # # statistic_exclude = [] +# # +# # ## Dimension filters for Metric. All dimensions defined for the metric names +# # ## must be specified in order to retrieve the metric statistics. +# # ## 'value' has wildcard / 'glob' matching support such as 'p-*'. +# # [[inputs.cloudwatch.metrics.dimensions]] +# # name = "LoadBalancerName" +# # value = "p-example" + + +# # Gather health check statuses from services registered in Consul +# [[inputs.consul]] +# ## Consul server address +# # address = "localhost:8500" +# +# ## URI scheme for the Consul server, one of "http", "https" +# # scheme = "http" +# +# ## Metric version controls the mapping from Consul metrics into +# ## Telegraf metrics. Version 2 moved all fields with string values +# ## to tags. +# ## +# ## example: metric_version = 1; deprecated in 1.16 +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## ACL token used in every request +# # token = "" +# +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# +# ## Data center to query the health checks from +# # datacenter = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true +# +# ## Consul checks' tag splitting +# # When tags are formatted like "key:value" with ":" as a delimiter then +# # they will be splitted and reported as proper key:value in Telegraf +# # tag_delimiter = ":" + + +# # Read metrics from the Consul Agent API +# [[inputs.consul_agent]] +# ## URL for the Consul agent +# # url = "http://127.0.0.1:8500" +# +# ## Use auth token for authorization. +# ## If both are set, an error is thrown. +# ## If both are empty, no token will be used. +# # token_file = "/path/to/auth/token" +# ## OR +# # token = "a1234567-40c7-9048-7bae-378687048181" +# +# ## Set timeout (default 5 seconds) +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile + + +# # Read per-node and per-bucket metrics from Couchbase +# [[inputs.couchbase]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## http://couchbase-0.example.com/ +# ## http://admin:secret@couchbase-0.example.com:8091/ +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no protocol is specified, HTTP is used. +# ## If no port is specified, 8091 is used. +# servers = ["http://localhost:8091"] +# +# ## Filter bucket fields to include only here. +# # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification (defaults to false) +# ## If set to false, tls_cert and tls_key are required +# # insecure_skip_verify = false +# +# ## Whether to collect cluster-wide bucket statistics +# ## It is recommended to disable this in favor of node_stats +# ## to get a better view of the cluster. +# cluster_bucket_stats = true +# +# ## Whether to collect bucket stats for each individual node +# node_bucket_stats = false + + +# # Read CouchDB Stats from one or more servers +# [[inputs.couchdb]] +# ## Works with CouchDB stats endpoints out of the box +# ## Multiple Hosts from which to read CouchDB stats: +# hosts = ["http://localhost:8086/_stats"] +# +# ## Use HTTP Basic Authentication. +# # basic_username = "telegraf" +# # basic_password = "p@ssw0rd" + + +# # Fetch metrics from a CSGO SRCDS +# [[inputs.csgo]] +# ## Specify servers using the following format: +# ## servers = [ +# ## ["ip1:port1", "rcon_password1"], +# ## ["ip2:port2", "rcon_password2"], +# ## ] +# # +# ## If no servers are specified, no data will be collected +# servers = [] + + +# # Input plugin for DC/OS metrics +# [[inputs.dcos]] +# ## The DC/OS cluster URL. +# cluster_url = "https://dcos-master-1" +# +# ## The ID of the service account. +# service_account_id = "telegraf" +# ## The private key file for the service account. +# service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem" +# +# ## Path containing login token. If set, will read on every gather. +# # token_file = "/home/dcos/.dcos/token" +# +# ## In all filter options if both include and exclude are empty all items +# ## will be collected. Arrays may contain glob patterns. +# ## +# ## Node IDs to collect metrics from. If a node is excluded, no metrics will +# ## be collected for its containers or apps. +# # node_include = [] +# # node_exclude = [] +# ## Container IDs to collect container metrics from. +# # container_include = [] +# # container_exclude = [] +# ## Container IDs to collect app metrics from. +# # app_include = [] +# # app_exclude = [] +# +# ## Maximum concurrent connections to the cluster. +# # max_connections = 10 +# ## Maximum time to receive a response from cluster. +# # response_timeout = "20s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true +# +# ## Recommended filtering to reduce series cardinality. +# # [inputs.dcos.tagdrop] +# # path = ["/var/lib/mesos/slave/slaves/*"] + + +# # Read metrics from one or many disque servers +# [[inputs.disque]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port and password. +# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost"] + + +# # Provide a native collection for dmsetup based statistics for dm-cache +# [[inputs.dmcache]] +# ## Whether to report per-device stats or not +# per_device = true + + +# # Query given DNS server and gives statistics +# [[inputs.dns_query]] +# ## servers to query +# servers = ["8.8.8.8"] +# +# ## Network is the network protocol name. +# # network = "udp" +# +# ## Domains or subdomains to query. +# # domains = ["."] +# +# ## Query record type. +# ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. +# # record_type = "A" +# +# ## Dns server port. +# # port = 53 +# +# ## Query timeout in seconds. +# # timeout = 2 + + +# # Read metrics about docker containers +# [[inputs.docker]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# endpoint = "unix:///var/run/docker.sock" +# +# ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) +# ## Note: configure this in one of the manager nodes in a Swarm cluster. +# ## configuring in multiple Swarm managers results in duplication of metrics. +# gather_services = false +# +# ## Only collect metrics for these containers. Values will be appended to +# ## container_name_include. +# ## Deprecated (1.4.0), use container_name_include +# container_names = [] +# +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = false +# +# ## Containers to include and exclude. Collect all if empty. Globs accepted. +# container_name_include = [] +# container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## Timeout for docker list, info, and stats commands +# timeout = "5s" +# +# ## Whether to report for each container per-device blkio (8:0, 8:1...), +# ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. +# ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting +# ## is honored. +# perdevice = true +# +# ## Specifies for which classes a per-device metric should be issued +# ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) +# ## Please note that this setting has no effect if 'perdevice' is set to 'true' +# # perdevice_include = ["cpu"] +# +# ## Whether to report for each container total blkio and network stats or not. +# ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. +# ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting +# ## is honored. +# total = false +# +# ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. +# ## Possible values are 'cpu', 'blkio' and 'network' +# ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. +# ## Please note that this setting has no effect if 'total' is set to 'false' +# # total_include = ["cpu", "blkio", "network"] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# docker_label_include = [] +# docker_label_exclude = [] +# +# ## Which environment variables should we use as a tag +# tag_env = ["JAVA_HOME", "HEAP_SIZE"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics about dovecot servers +# [[inputs.dovecot]] +# ## specify dovecot servers via an address:port list +# ## e.g. +# ## localhost:24242 +# ## or as an UDS socket +# ## e.g. +# ## /var/run/dovecot/old-stats +# ## +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost:24242"] +# +# ## Type is one of "user", "domain", "ip", or "global" +# type = "global" +# +# ## Wildcard matches like "*.com". An empty string "" is same as "*" +# ## If type = "ip" filters should be +# filters = [""] + + +# # Read metrics about ECS containers +# [[inputs.ecs]] +# ## ECS metadata url. +# ## Metadata v2 API is used if set explicitly. Otherwise, +# ## v3 metadata endpoint API is used if available. +# # endpoint_url = "" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "RUNNING" state will be captured. +# ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING", +# ## "RESOURCES_PROVISIONED", "STOPPED". +# # container_status_include = [] +# # container_status_exclude = [] +# +# ## ecs labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# ecs_label_include = [ "com.amazonaws.ecs.*" ] +# ecs_label_exclude = [] +# +# ## Timeout for queries. +# # timeout = "5s" + + +# # Read stats from one or more Elasticsearch servers or clusters +# [[inputs.elasticsearch]] +# ## specify a list of one or more Elasticsearch servers +# ## you can add username and password to your url to use basic authentication: +# ## servers = ["http://user:pass@localhost:9200"] +# servers = ["http://localhost:9200"] +# +# ## Timeout for HTTP requests to the elastic search server(s) +# http_timeout = "5s" +# +# ## When local is true (the default), the node will read only its own stats. +# ## Set local to false when you want to read the node stats from all nodes +# ## of the cluster. +# local = true +# +# ## Set cluster_health to true when you want to obtain cluster health stats +# cluster_health = false +# +# ## Adjust cluster_health_level when you want to obtain detailed health stats +# ## The options are +# ## - indices (default) +# ## - cluster +# # cluster_health_level = "indices" +# +# ## Set cluster_stats to true when you want to obtain cluster stats. +# cluster_stats = false +# +# ## Only gather cluster_stats from the master node. To work this require local = true +# cluster_stats_only_from_master = true +# +# ## Indices to collect; can be one or more indices names or _all +# ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index names that end with a changing value, like a date. +# indices_include = ["_all"] +# +# ## One of "shards", "cluster", "indices" +# ## Currently only "shards" is implemented +# indices_level = "shards" +# +# ## node_stats is a list of sub-stats that you want to have gathered. Valid options +# ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", +# ## "breaker". Per default, all stats are gathered. +# # node_stats = ["jvm", "http"] +# +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix. +# ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and +# ## sort them by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most +# ## recent indices. +# # num_most_recent_indices = 0 + + +# # Derive metrics from aggregating Elasticsearch query results +# [[inputs.elasticsearch_query]] +# ## The full HTTP endpoint URL for your Elasticsearch instance +# ## Multiple urls can be specified as part of the same cluster, +# ## this means that only ONE of the urls will be written to each interval. +# urls = [ "http://node1.es.example.com:9200" ] # required. +# +# ## Elasticsearch client timeout, defaults to "5s". +# # timeout = "5s" +# +# ## Set to true to ask Elasticsearch a list of all cluster nodes, +# ## thus it is not necessary to list all nodes in the urls config option +# # enable_sniffer = false +# +# ## Set the interval to check if the Elasticsearch nodes are available +# ## This option is only used if enable_sniffer is also set (0s to disable it) +# # health_check_interval = "10s" +# +# ## HTTP basic authentication details (eg. when using x-pack) +# # username = "telegraf" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# [[inputs.elasticsearch_query.aggregation]] +# ## measurement name for the results of the aggregation query +# measurement_name = "measurement" +# +# ## Elasticsearch indexes to query (accept wildcards). +# index = "index-*" +# +# ## The date/time field in the Elasticsearch index (mandatory). +# date_field = "@timestamp" +# +# ## If the field used for the date/time field in Elasticsearch is also using +# ## a custom date/time format it may be required to provide the format to +# ## correctly parse the field. +# ## +# ## If using one of the built in elasticsearch formats this is not required. +# # date_field_custom_format = "" +# +# ## Time window to query (eg. "1m" to query documents from last minute). +# ## Normally should be set to same as collection interval +# query_period = "1m" +# +# ## Lucene query to filter results +# # filter_query = "*" +# +# ## Fields to aggregate values (must be numeric fields) +# # metric_fields = ["metric"] +# +# ## Aggregation function to use on the metric fields +# ## Must be set if 'metric_fields' is set +# ## Valid values are: avg, sum, min, max, sum +# # metric_function = "avg" +# +# ## Fields to be used as tags +# ## Must be text, non-analyzed fields. Metric aggregations are performed per tag +# # tags = ["field.keyword", "field2.keyword"] +# +# ## Set to true to not ignore documents when the tag(s) above are missing +# # include_missing_tag = false +# +# ## String value of the tag when the tag does not exist +# ## Used when include_missing_tag is true +# # missing_tag_value = "null" + + +# # Returns ethtool statistics for given interfaces +# [[inputs.ethtool]] +# ## List of interfaces to pull metrics for +# # interface_include = ["eth0"] +# +# ## List of interfaces to ignore when pulling metrics. +# # interface_exclude = ["eth1"] +# +# ## Some drivers declare statistics with extra whitespace, different spacing, +# ## and mix cases. This list, when enabled, can be used to clean the keys. +# ## Here are the current possible normalizations: +# ## * snakecase: converts fooBarBaz to foo_bar_baz +# ## * trim: removes leading and trailing whitespace +# ## * lower: changes all capitalized letters to lowercase +# ## * underscore: replaces spaces with underscores +# # normalize_keys = ["snakecase", "trim", "lower", "underscore"] + + +# # Read metrics from one or more commands that can output to stdout +# [[inputs.exec]] +# ## Commands array +# commands = [ +# "/tmp/test.sh", +# "/usr/bin/mycollector --foo=bar", +# "/tmp/collect_*.sh" +# ] +# +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# +# ## Timeout for each command to complete. +# timeout = "5s" +# +# ## measurement name suffix (for separating different commands) +# name_suffix = "_mycollector" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from fail2ban. +# [[inputs.fail2ban]] +# ## Use sudo to run fail2ban-client +# use_sudo = false + + +# # Read devices value(s) from a Fibaro controller +# [[inputs.fibaro]] +# ## Required Fibaro controller address/hostname. +# ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available +# url = "http://:80" +# +# ## Required credentials to access the API (http://) +# username = "" +# password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" + + +# # Parse a complete file each interval +# [[inputs.file]] +# ## Files to parse each interval. Accept standard unix glob matching rules, +# ## as well as ** to match recursive files and directories. +# files = ["/tmp/metrics.out"] +# +# ## Character encoding to use when interpreting the file contents. Invalid +# ## characters are replaced using the unicode replacement character. When set +# ## to the empty string the data is not decoded to text. +# ## ex: character_encoding = "utf-8" +# ## character_encoding = "utf-16le" +# ## character_encoding = "utf-16be" +# ## character_encoding = "" +# # character_encoding = "" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. Cautious when file name variation is high, this can increase the cardinality +# ## significantly. Read more about cardinality here: +# ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality +# # file_tag = "" + + +# # Count files in a directory +# [[inputs.filecount]] +# ## Directories to gather stats about. +# ## This accept standard unit glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/** -> recursively find all directories in /var/log and count files in each directories +# ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories +# ## /var/log -> count all files in /var/log and all of its subdirectories +# directories = ["/var/cache/apt", "/tmp"] +# +# ## Only count files that match the name pattern. Defaults to "*". +# name = "*" +# +# ## Count files in subdirectories. Defaults to true. +# recursive = true +# +# ## Only count regular files. Defaults to true. +# regular_only = true +# +# ## Follow all symlinks while walking the directory tree. Defaults to false. +# follow_symlinks = false +# +# ## Only count files that are at least this size. If size is +# ## a negative number, only count files that are smaller than the +# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... +# ## Without quotes and units, interpreted as size in bytes. +# size = "0B" +# +# ## Only count files that have not been touched for at least this +# ## duration. If mtime is negative, only count files that have been +# ## touched in this duration. Defaults to "0s". +# mtime = "0s" + + +# # Read stats about given file(s) +# [[inputs.filestat]] +# ## Files to gather stats about. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". See https://github.com/gobwas/glob. +# files = ["/etc/telegraf/telegraf.conf", "/var/log/**.log"] +# +# ## If true, read the entire file and calculate an md5 checksum. +# md5 = false + + +# # Read real time temps from fireboard.io servers +# [[inputs.fireboard]] +# ## Specify auth token for your account +# auth_token = "invalidAuthToken" +# ## You can override the fireboard server URL if necessary +# # url = https://fireboard.io/api/v1/devices.json +# ## You can set a different http_timeout if you need to +# ## You should set a string using an number and time indicator +# ## for example "12s" for 12 seconds. +# # http_timeout = "4s" + + +# # Read metrics exposed by fluentd in_monitor plugin +# [[inputs.fluentd]] +# ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint). +# ## +# ## Endpoint: +# ## - only one URI is allowed +# ## - https is not supported +# endpoint = "http://localhost:24220/api/plugins.json" +# +# ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent) +# exclude = [ +# "monitor_agent", +# "dummy", +# ] + + +# # Gather repository information from GitHub hosted repositories. +# [[inputs.github]] +# ## List of repositories to monitor +# repositories = [ +# "influxdata/telegraf", +# "influxdata/influxdb" +# ] +# +# ## Github API access token. Unauthenticated requests are limited to 60 per hour. +# # access_token = "" +# +# ## Github API enterprise url. Github Enterprise accounts must specify their base url. +# # enterprise_base_url = "" +# +# ## Timeout for HTTP requests. +# # http_timeout = "5s" +# +# ## List of additional fields to query. +# ## NOTE: Getting those fields might involve issuing additional API-calls, so please +# ## make sure you do not exceed the rate-limit of GitHub. +# ## +# ## Available fields are: +# ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) +# # additional_fields = [] + + +# # Read flattened metrics from one or more GrayLog HTTP endpoints +# [[inputs.graylog]] +# ## API endpoint, currently supported API: +# ## +# ## - multiple (e.g. http://:9000/api/system/metrics/multiple) +# ## - namespace (e.g. http://:9000/api/system/metrics/namespace/{namespace}) +# ## +# ## For namespace endpoint, the metrics array will be ignored for that call. +# ## Endpoint can contain namespace and multiple type calls. +# ## +# ## Please check http://[graylog-server-ip]:9000/api/api-browser for full list +# ## of endpoints +# servers = [ +# "http://[graylog-server-ip]:9000/api/system/metrics/multiple", +# ] +# +# ## Set timeout (default 5 seconds) +# # timeout = "5s" +# +# ## Metrics list +# ## List of metrics can be found on Graylog webservice documentation. +# ## Or by hitting the web service api at: +# ## http://[graylog-host]:9000/api/system/metrics +# metrics = [ +# "jvm.cl.loaded", +# "jvm.memory.pools.Metaspace.committed" +# ] +# +# ## Username and password +# username = "" +# password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics of HAProxy, via socket or HTTP stats page +# [[inputs.haproxy]] +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.10.3.33:1936, etc. +# ## Make sure you specify the complete path to the stats endpoint +# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats +# servers = ["http://myhaproxy.com:1936/haproxy?stats"] +# +# ## You can also use local socket with standard wildcard globbing. +# ## Server address not starting with 'http' will be treated as a possible +# ## socket, so both examples below are valid. +# # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"] +# +# ## By default, some of the fields are renamed from what haproxy calls them. +# ## Setting this option to true results in the plugin keeping the original +# ## field names. +# # keep_field_names = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Monitor disks' temperatures using hddtemp +# [[inputs.hddtemp]] +# ## By default, telegraf gathers temps data from all disks detected by the +# ## hddtemp. +# ## +# ## Only collect temps from the selected disks. +# ## +# ## A * as the device name will return the temperature values of all disks. +# ## +# # address = "127.0.0.1:7634" +# # devices = ["sda", "*"] + + +# # Read formatted metrics from one or more HTTP endpoints +# [[inputs.http]] +# ## One or more URLs from which to read formatted metrics +# urls = [ +# "http://localhost/metrics" +# ] +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## HTTP entity-body to send with POST/PUT requests. +# # body = "" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Optional file with Bearer token +# ## file content is added as an Authorization header +# # bearer_token = "/path/to/file" +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## OAuth2 Client Credentials. The options 'client_id', 'client_secret', and 'token_url' are required to use OAuth2. +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# +# ## HTTP Proxy support +# # http_proxy_url = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Cookie authentication +# # cookie_auth_url = "https://localhost/authMe" +# # cookie_auth_method = "POST" +# # cookie_auth_username = "username" +# # cookie_auth_password = "pa$$word" +# # cookie_auth_headers = '{"Content-Type": "application/json", "X-MY-HEADER":"hello"}' +# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' +# ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie +# # cookie_auth_renewal = "5m" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## List of success status codes +# # success_status_codes = [200] +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" +# + + +# # HTTP/HTTPS request given an address a method and a timeout +# [[inputs.http_response]] +# ## List of urls to query. +# # urls = ["http://localhost"] +# +# ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) +# # http_proxy = "http://localhost:8888" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## HTTP Request Method +# # method = "GET" +# +# ## Whether to follow redirects from the server (defaults to false) +# # follow_redirects = false +# +# ## Optional file with Bearer token +# ## file content is added as an Authorization header +# # bearer_token = "/path/to/file" +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional HTTP Request Body +# # body = ''' +# # {'fake':'data'} +# # ''' +# +# ## Optional name of the field that will contain the body of the response. +# ## By default it is set to an empty String indicating that the body's content won't be added +# # response_body_field = '' +# +# ## Maximum allowed HTTP response body size in bytes. +# ## 0 means to use the default of 32MiB. +# ## If the response body size exceeds this limit a "body_read_error" will be raised +# # response_body_max_size = "32MiB" +# +# ## Optional substring or regex match in body of the response (case sensitive) +# # response_string_match = "\"service_status\": \"up\"" +# # response_string_match = "ok" +# # response_string_match = "\".*_status\".?:.?\"up\"" +# +# ## Expected response status code. +# ## The status code of the response is compared to this value. If they match, the field +# ## "response_status_code_match" will be 1, otherwise it will be 0. If the +# ## expected status code is 0, the check is disabled and the field won't be added. +# # response_status_code = 0 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# ## Use the given name as the SNI server name on each URL +# # tls_server_name = "" +# +# ## HTTP Request Headers (all values must be strings) +# # [inputs.http_response.headers] +# # Host = "github.com" +# +# ## Optional setting to map response http headers into tags +# ## If the http header is not present on the request, no corresponding tag will be added +# ## If multiple instances of the http header are present, only the first value will be used +# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} +# +# ## Interface to use when dialing an address +# # interface = "eth0" + + +# ## DEPRECATED: The 'httpjson' plugin is deprecated in version 1.6.0, use 'inputs.http' instead. +# # Read flattened metrics from one or more JSON HTTP endpoints +# [[inputs.httpjson]] +# ## NOTE This plugin only reads numerical measurements, strings and booleans +# ## will be ignored. +# +# ## Name for the service being polled. Will be appended to the name of the +# ## measurement e.g. "httpjson_webserver_stats". +# ## +# ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead. +# name = "webserver_stats" +# +# ## URL of each server in the service's cluster +# servers = [ +# "http://localhost:9999/stats/", +# "http://localhost:9998/stats/", +# ] +# ## Set response_timeout (default 5 seconds) +# response_timeout = "5s" +# +# ## HTTP method to use: GET or POST (case-sensitive) +# method = "GET" +# +# ## Tags to extract from top-level of JSON server response. +# # tag_keys = [ +# # "my_tag_1", +# # "my_tag_2" +# # ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP Request Parameters (all values must be strings). For "GET" requests, data +# ## will be included in the query. For "POST" requests, data will be included +# ## in the request body as "x-www-form-urlencoded". +# # [inputs.httpjson.parameters] +# # event_type = "cpu_spike" +# # threshold = "0.75" +# +# ## HTTP Request Headers (all values must be strings). +# # [inputs.httpjson.headers] +# # X-Auth-Token = "my-xauth-token" +# # apiVersion = "v1" + + +# # Gather Icinga2 status +# [[inputs.icinga2]] +# ## Required Icinga2 server address +# # server = "https://localhost:5665" +# +# ## Required Icinga2 object type ("services" or "hosts") +# # object_type = "services" +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Gets counters from all InfiniBand cards and ports installed +# [[inputs.infiniband]] +# # no configuration + + +# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.influxdb]] +# ## Works with InfluxDB debug endpoints out of the box, +# ## but other services can use this format too. +# ## See the influxdb plugin's README for more details. +# +# ## Multiple URLs from which to read InfluxDB-formatted JSON +# ## Default is "http://localhost:8086/debug/vars". +# urls = [ +# "http://localhost:8086/debug/vars" +# ] +# +# ## Username and password to send using HTTP Basic Authentication. +# # username = "" +# # password = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## http request & header timeout +# timeout = "5s" + + +# # Collect statistics about itself +# [[inputs.internal]] +# ## If true, collect telegraf memory stats. +# # collect_memstats = true + + +# # Monitors internet speed using speedtest.net service +# [[inputs.internet_speed]] +# ## This plugin downloads many MB of data each time it is run. As such +# ## consider setting a higher interval for this plugin to reduce the +# ## demand on your internet connection. +# # interval = "60m" +# +# ## Sets if runs file download test +# # enable_file_download = false +# +# ## Caches the closest server location +# # cache = false + + +# # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. +# [[inputs.interrupts]] +# ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is +# ## stored as a field. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# # cpu_as_tag = false +# +# ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. +# # [inputs.interrupts.tagdrop] +# # irq = [ "NET_RX", "TASKLET" ] + + +# # Read metrics from the bare metal servers via IPMI +# [[inputs.ipmi_sensor]] +# ## optionally specify the path to the ipmitool executable +# # path = "/usr/bin/ipmitool" +# ## +# ## Setting 'use_sudo' to true will make use of sudo to run ipmitool. +# ## Sudo must be configured to allow the telegraf user to run ipmitool +# ## without a password. +# # use_sudo = false +# ## +# ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR +# # privilege = "ADMINISTRATOR" +# ## +# ## optionally specify one or more servers via a url matching +# ## [username[:password]@][protocol[(address)]] +# ## e.g. +# ## root:passwd@lan(127.0.0.1) +# ## +# ## if no servers are specified, local machine sensor stats will be queried +# ## +# # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] +# +# ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid +# ## gaps or overlap in pulled data +# interval = "30s" +# +# ## Timeout for the ipmitool command to complete. Default is 20 seconds. +# timeout = "20s" +# +# ## Schema Version: (Optional, defaults to version 1) +# metric_version = 2 +# +# ## Optionally provide the hex key for the IMPI connection. +# # hex_key = "" +# +# ## If ipmitool should use a cache +# ## for me ipmitool runs about 2 to 10 times faster with cache enabled on HP G10 servers (when using ubuntu20.04) +# ## the cache file may not work well for you if some sensors come up late +# # use_cache = false +# +# ## Path to the ipmitools cache file (defaults to OS temp dir) +# ## The provided path must exist and must be writable +# # cache_path = "" + + +# # Gather packets and bytes counters from Linux ipsets +# [[inputs.ipset]] +# ## By default, we only show sets which have already matched at least 1 packet. +# ## set include_unmatched_sets = true to gather them all. +# include_unmatched_sets = false +# ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") +# ## You can avoid using sudo or root, by setting appropriate privileges for +# ## the telegraf.service systemd service. +# use_sudo = false +# ## The default timeout of 1s for ipset execution can be overridden here: +# # timeout = "1s" +# + + +# # Read jobs and cluster metrics from Jenkins instances +# [[inputs.jenkins]] +# ## The Jenkins URL in the format "schema://host:port" +# url = "http://my-jenkins-instance:8080" +# # username = "admin" +# # password = "admin" +# +# ## Set response_timeout +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Max Job Build Age filter +# ## Default 1 hour, ignore builds older than max_build_age +# # max_build_age = "1h" +# +# ## Optional Sub Job Depth filter +# ## Jenkins can have unlimited layer of sub jobs +# ## This config will limit the layers of pulling, default value 0 means +# ## unlimited pulling until no more sub jobs +# # max_subjob_depth = 0 +# +# ## Optional Sub Job Per Layer +# ## In workflow-multibranch-plugin, each branch will be created as a sub job. +# ## This config will limit to call only the lasted branches in each layer, +# ## empty will use default value 10 +# # max_subjob_per_layer = 10 +# +# ## Jobs to include or exclude from gathering +# ## When using both lists, job_exclude has priority. +# ## Wildcards are supported: [ "jobA/*", "jobB/subjob1/*"] +# # job_include = [ "*" ] +# # job_exclude = [ ] +# +# ## Nodes to include or exclude from gathering +# ## When using both lists, node_exclude has priority. +# # node_include = [ "*" ] +# # node_exclude = [ ] +# +# ## Worker pool for jenkins plugin only +# ## Empty this field will use default value 5 +# # max_connections = 5 + + +# ## DEPRECATED: The 'jolokia' plugin is deprecated in version 1.5.0, use 'inputs.jolokia2' instead. +# # Read JMX metrics through Jolokia +# [[inputs.jolokia]] +# ## This is the context root used to compose the jolokia url +# ## NOTE that Jolokia requires a trailing slash at the end of the context root +# context = "/jolokia/" +# +# ## This specifies the mode used +# # mode = "proxy" +# # +# ## When in proxy mode this section is used to specify further +# ## proxy address configurations. +# ## Remember to change host address to fit your environment. +# # [inputs.jolokia.proxy] +# # host = "127.0.0.1" +# # port = "8080" +# +# ## Optional http timeouts +# ## +# ## response_header_timeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # response_header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# +# ## List of servers exposing jolokia read service +# [[inputs.jolokia.servers]] +# name = "as-server-01" +# host = "127.0.0.1" +# port = "8080" +# # username = "myuser" +# # password = "mypassword" +# +# ## List of metrics collected on above servers +# ## Each metric consists in a name, a jmx path and either +# ## a pass or drop slice attribute. +# ## This collect all heap memory usage metrics. +# [[inputs.jolokia.metrics]] +# name = "heap_memory_usage" +# mbean = "java.lang:type=Memory" +# attribute = "HeapMemoryUsage" +# +# ## This collect thread counts metrics. +# [[inputs.jolokia.metrics]] +# name = "thread_count" +# mbean = "java.lang:type=Threading" +# attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" +# +# ## This collect number of class loaded/unloaded counts metrics. +# [[inputs.jolokia.metrics]] +# name = "class_count" +# mbean = "java.lang:type=ClassLoading" +# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" + + +# # Read JMX metrics from a Jolokia REST agent endpoint +# [[inputs.jolokia2_agent]] +# # default_tag_prefix = "" +# # default_field_prefix = "" +# # default_field_separator = "." +# +# # Add agents URLs to query +# urls = ["http://localhost:8080/jolokia"] +# # username = "" +# # password = "" +# # response_timeout = "5s" +# +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" +# # insecure_skip_verify = false +# +# ## Add metrics to read +# [[inputs.jolokia2_agent.metric]] +# name = "java_runtime" +# mbean = "java.lang:type=Runtime" +# paths = ["Uptime"] + + +# # Read JMX metrics from a Jolokia REST proxy endpoint +# [[inputs.jolokia2_proxy]] +# # default_tag_prefix = "" +# # default_field_prefix = "" +# # default_field_separator = "." +# +# ## Proxy agent +# url = "http://localhost:8080/jolokia" +# # username = "" +# # password = "" +# # response_timeout = "5s" +# +# ## Optional TLS config +# # tls_ca = "/var/private/ca.pem" +# # tls_cert = "/var/private/client.pem" +# # tls_key = "/var/private/client-key.pem" +# # insecure_skip_verify = false +# +# ## Add proxy targets to query +# # default_target_username = "" +# # default_target_password = "" +# [[inputs.jolokia2_proxy.target]] +# url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi" +# # username = "" +# # password = "" +# +# ## Add metrics to read +# [[inputs.jolokia2_proxy.metric]] +# name = "java_runtime" +# mbean = "java.lang:type=Runtime" +# paths = ["Uptime"] + + +# # Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.kapacitor]] +# ## Multiple URLs from which to read Kapacitor-formatted JSON +# ## Default is "http://localhost:9092/kapacitor/v1/debug/vars". +# urls = [ +# "http://localhost:9092/kapacitor/v1/debug/vars" +# ] +# +# ## Time limit for http requests +# timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read status information from one or more Kibana servers +# [[inputs.kibana]] +# ## Specify a list of one or more Kibana servers +# servers = ["http://localhost:5601"] +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the Kubernetes api +# [[inputs.kube_inventory]] +# ## URL for the Kubernetes API +# url = "https://127.0.0.1" +# +# ## Namespace to use. Set to "" to use all namespaces. +# # namespace = "default" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /run/secrets/kubernetes.io/serviceaccount/token +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional Resources to exclude from gathering +# ## Leave them with blank with try to gather everything available. +# ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes", +# ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets" +# # resource_exclude = [ "deployments", "nodes", "statefulsets" ] +# +# ## Optional Resources to include when gathering +# ## Overrides resource_exclude if both set. +# # resource_include = [ "deployments", "nodes", "statefulsets" ] +# +# ## selectors to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all selectors as tags +# ## selector_exclude overrides selector_include if both set. +# # selector_include = [] +# # selector_exclude = ["*"] +# +# ## Optional TLS Config +# ## Trusted root certificates for server +# # tls_ca = "/path/to/cafile" +# ## Used for TLS client certificate authentication +# # tls_cert = "/path/to/certfile" +# ## Used for TLS client certificate authentication +# # tls_key = "/path/to/keyfile" +# ## Send the specified TLS server name via SNI +# # tls_server_name = "kubernetes.example.com" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Uncomment to remove deprecated metrics. +# # fielddrop = ["terminated_reason"] + + +# # Read metrics from the kubernetes kubelet api +# [[inputs.kubernetes]] +# ## URL for the kubelet +# url = "http://127.0.0.1:10255" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /run/secrets/kubernetes.io/serviceaccount/token +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Pod labels to be added as tags. An empty array for both include and +# ## exclude will include all labels. +# # label_include = [] +# # label_exclude = ["*"] +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from a LeoFS Server via SNMP +# [[inputs.leofs]] +# ## An array of URLs of the form: +# ## host [ ":" port] +# servers = ["127.0.0.1:4010"] + + +# # Provides Linux sysctl fs metrics +# [[inputs.linux_sysctl_fs]] +# # no configuration + + +# # Read metrics exposed by Logstash +# [[inputs.logstash]] +# ## The URL of the exposed Logstash API endpoint. +# url = "http://127.0.0.1:9600" +# +# ## Use Logstash 5 single pipeline API, set to true when monitoring +# ## Logstash 5. +# # single_pipeline = false +# +# ## Enable optional collection components. Can contain +# ## "pipelines", "process", and "jvm". +# # collect = ["pipelines", "process", "jvm"] +# +# ## Timeout for HTTP requests. +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Use TLS but skip chain & host verification. +# # insecure_skip_verify = false +# +# ## Optional HTTP headers. +# # [inputs.logstash.headers] +# # "X-Special-Header" = "Special-Value" + + +# # Read metrics about LVM physical volumes, volume groups, logical volumes. +# [[inputs.lvm]] +# ## Use sudo to run LVM commands +# use_sudo = false + + +# # Gathers metrics from the /3.0/reports MailChimp API +# [[inputs.mailchimp]] +# ## MailChimp API key +# ## get from https://admin.mailchimp.com/account/api/ +# api_key = "" # required +# +# ## Reports for campaigns sent more than days_old ago will not be collected. +# ## 0 means collect all and is the default value. +# days_old = 0 +# +# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old +# # campaign_id = "" + + +# # Retrieves information on a specific host in a MarkLogic Cluster +# [[inputs.marklogic]] +# ## Base URL of the MarkLogic HTTP Server. +# url = "http://localhost:8002" +# +# ## List of specific hostnames to retrieve information. At least (1) required. +# # hosts = ["hostname1", "hostname2"] +# +# ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges +# # username = "myuser" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or many mcrouter servers. +# [[inputs.mcrouter]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc. +# servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" + + +# # Read metrics from one or many memcached servers. +# [[inputs.memcached]] +# # An array of address to gather stats about. Specify an ip on hostname +# # with optional port. ie localhost, 10.0.0.1:11211, etc. +# servers = ["localhost:11211"] +# # An array of unix memcached sockets to gather stats about. +# # unix_sockets = ["/var/run/memcached.sock"] +# +# ## Optional TLS Config +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true + + +# # Telegraf plugin for gathering metrics from N Mesos masters +# [[inputs.mesos]] +# ## Timeout, in ms. +# timeout = 100 +# +# ## A list of Mesos masters. +# masters = ["http://localhost:5050"] +# +# ## Master metrics groups to be collected, by default, all enabled. +# master_collections = [ +# "resources", +# "master", +# "system", +# "agents", +# "frameworks", +# "framework_offers", +# "tasks", +# "messages", +# "evqueue", +# "registrar", +# "allocator", +# ] +# +# ## A list of Mesos slaves, default is [] +# # slaves = [] +# +# ## Slave metrics groups to be collected, by default, all enabled. +# # slave_collections = [ +# # "resources", +# # "agent", +# # "system", +# # "executors", +# # "tasks", +# # "messages", +# # ] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Collects scores from a Minecraft server's scoreboard using the RCON protocol +# [[inputs.minecraft]] +# ## Address of the Minecraft server. +# # server = "localhost" +# +# ## Server RCON Port. +# # port = "25575" +# +# ## Server RCON Password. +# password = "" +# +# ## Uncomment to remove deprecated metric components. +# # tagdrop = ["server"] + + +# # Generate metrics for test and demonstration purposes +# [[inputs.mock]] +# ## Set the metric name to use for reporting +# metric_name = "mock" +# +# ## Optional string key-value pairs of tags to add to all metrics +# # [inputs.mock.tags] +# # "key" = "value" +# +# ## One or more mock data fields *must* be defined. +# ## +# ## [[inputs.mock.constant]] +# ## name = "constant" +# ## value = value_of_any_type +# ## [[inputs.mock.random]] +# ## name = "rand" +# ## min = 1.0 +# ## max = 6.0 +# ## [[inputs.mock.sine_wave]] +# ## name = "wave" +# ## amplitude = 1.0 +# ## period = 0.5 +# ## [[inputs.mock.step]] +# ## name = "plus_one" +# ## start = 0.0 +# ## step = 1.0 +# ## [[inputs.mock.stock]] +# ## name = "abc" +# ## price = 50.00 +# ## volatility = 0.2 + + +# # Retrieve data from MODBUS slave devices +# [[inputs.modbus]] +# ## Connection Configuration +# ## +# ## The plugin supports connections to PLCs via MODBUS/TCP, RTU over TCP, ASCII over TCP or +# ## via serial line communication in binary (RTU) or readable (ASCII) encoding +# ## +# ## Device name +# name = "Device" +# +# ## Slave ID - addresses a MODBUS device on the bus +# ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] +# slave_id = 1 +# +# ## Timeout for each request +# timeout = "1s" +# +# ## Maximum number of retries and the time to wait between retries +# ## when a slave-device is busy. +# # busy_retries = 0 +# # busy_retries_wait = "100ms" +# +# # TCP - connect via Modbus/TCP +# controller = "tcp://localhost:502" +# +# ## Serial (RS485; RS232) +# # controller = "file:///dev/ttyUSB0" +# # baud_rate = 9600 +# # data_bits = 8 +# # parity = "N" +# # stop_bits = 1 +# +# ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP" +# ## default behaviour is "TCP" if the controller is TCP +# ## For Serial you can choose between "RTU" and "ASCII" +# # transmission_mode = "RTU" +# +# ## Trace the connection to the modbus device as debug messages +# ## Note: You have to enable telegraf's debug mode to see those messages! +# # debug_connection = false +# +# ## Define the configuration schema +# ## |---register -- define fields per register type in the original style (only supports one slave ID) +# ## |---request -- define fields on a requests base +# configuration_type = "register" +# +# ## --- "register" configuration style --- +# +# ## Measurements +# ## +# +# ## Digital Variables, Discrete Inputs and Coils +# ## measurement - the (optional) measurement name, defaults to "modbus" +# ## name - the variable name +# ## address - variable address +# +# discrete_inputs = [ +# { name = "start", address = [0]}, +# { name = "stop", address = [1]}, +# { name = "reset", address = [2]}, +# { name = "emergency_stop", address = [3]}, +# ] +# coils = [ +# { name = "motor1_run", address = [0]}, +# { name = "motor1_jog", address = [1]}, +# { name = "motor1_stop", address = [2]}, +# ] +# +# ## Analog Variables, Input Registers and Holding Registers +# ## measurement - the (optional) measurement name, defaults to "modbus" +# ## name - the variable name +# ## byte_order - the ordering of bytes +# ## |---AB, ABCD - Big Endian +# ## |---BA, DCBA - Little Endian +# ## |---BADC - Mid-Big Endian +# ## |---CDAB - Mid-Little Endian +# ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, +# ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) +# ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) +# ## scale - the final numeric variable representation +# ## address - variable address +# +# holding_registers = [ +# { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]}, +# { name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]}, +# { name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]}, +# { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]}, +# { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]}, +# { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]}, +# ] +# input_registers = [ +# { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, +# { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, +# { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, +# ] +# +# +# ## --- "request" configuration style --- +# +# ## Per request definition +# ## +# +# ## Define a request sent to the device +# ## Multiple of those requests can be defined. Data will be collated into metrics at the end of data collection. +# [[inputs.modbus.request]] +# ## ID of the modbus slave device to query. +# ## If you need to query multiple slave-devices, create several "request" definitions. +# slave_id = 1 +# +# ## Byte order of the data. +# ## |---ABCD -- Big Endian (Motorola) +# ## |---DCBA -- Little Endian (Intel) +# ## |---BADC -- Big Endian with byte swap +# ## |---CDAB -- Little Endian with byte swap +# byte_order = "ABCD" +# +# ## Type of the register for the request +# ## Can be "coil", "discrete", "holding" or "input" +# register = "coil" +# +# ## Name of the measurement. +# ## Can be overriden by the individual field definitions. Defaults to "modbus" +# # measurement = "modbus" +# +# ## Field definitions +# ## Analog Variables, Input Registers and Holding Registers +# ## address - address of the register to query. For coil and discrete inputs this is the bit address. +# ## name *1 - field name +# ## type *1,2 - type of the modbus field, can be INT16, UINT16, INT32, UINT32, INT64, UINT64 and +# ## FLOAT32, FLOAT64 (IEEE 754 binary representation) +# ## scale *1,2 - (optional) factor to scale the variable with +# ## output *1,2 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. Defaults to FLOAT64 if +# ## "scale" is provided and to the input "type" class otherwise (i.e. INT* -> INT64, etc). +# ## measurement *1 - (optional) measurement name, defaults to the setting of the request +# ## omit - (optional) omit this field. Useful to leave out single values when querying many registers +# ## with a single request. Defaults to "false". +# ## +# ## *1: Those fields are ignored if field is omitted ("omit"=true) +# ## +# ## *2: Thise fields are ignored for both "coil" and "discrete"-input type of registers. For those register types +# ## the fields are output as zero or one in UINT64 format by default. +# +# ## Coil / discrete input example +# fields = [ +# { address=0, name="motor1_run"}, +# { address=1, name="jog", measurement="motor"}, +# { address=2, name="motor1_stop", omit=true}, +# { address=3, name="motor1_overheating"}, +# ] +# +# [[inputs.modbus.request.tags]] +# machine = "impresser" +# location = "main building" +# +# [[inputs.modbus.request]] +# ## Holding example +# ## All of those examples will result in FLOAT64 field outputs +# slave_id = 1 +# byte_order = "DCBA" +# register = "holding" +# fields = [ +# { address=0, name="voltage", type="INT16", scale=0.1 }, +# { address=1, name="current", type="INT32", scale=0.001 }, +# { address=3, name="power", type="UINT32", omit=true }, +# { address=5, name="energy", type="FLOAT32", scale=0.001, measurement="W" }, +# { address=7, name="frequency", type="UINT32", scale=0.1 }, +# { address=8, name="power_factor", type="INT64", scale=0.01 }, +# ] +# +# [[inputs.modbus.request.tags]] +# machine = "impresser" +# location = "main building" +# +# [[inputs.modbus.request]] +# ## Input example with type conversions +# slave_id = 1 +# byte_order = "ABCD" +# register = "input" +# fields = [ +# { address=0, name="rpm", type="INT16" }, # will result in INT64 field +# { address=1, name="temperature", type="INT16", scale=0.1 }, # will result in FLOAT64 field +# { address=2, name="force", type="INT32", output="FLOAT64" }, # will result in FLOAT64 field +# { address=4, name="hours", type="UINT32" }, # will result in UIN64 field +# ] +# +# [[inputs.modbus.request.tags]] +# machine = "impresser" +# location = "main building" +# +# +# +# ## Enable workarounds required by some devices to work correctly +# # [inputs.modbus.workarounds] +# ## Pause between read requests sent to the device. This might be necessary for (slow) serial devices. +# # pause_between_requests = "0ms" +# ## Close the connection after every gather cycle. Usually the plugin closes the connection after a certain +# ## idle-timeout, however, if you query a device with limited simultaneous connectivity (e.g. serial devices) +# ## from multiple instances you might want to only stay connected during gather and disconnect afterwards. +# # close_connection_after_gather = false + + +# # Read metrics from one or many MongoDB servers +# [[inputs.mongodb]] +# ## An array of URLs of the form: +# ## "mongodb://" [user ":" pass "@"] host [ ":" port] +# ## For example: +# ## mongodb://user:auth_key@10.10.3.30:27017, +# ## mongodb://10.10.3.33:18832, +# ## +# ## If connecting to a cluster, users must include the "?connect=direct" in +# ## the URL to ensure that the connection goes directly to the specified node +# ## and not have all connections passed to the master node. +# servers = ["mongodb://127.0.0.1:27017/?connect=direct"] +# +# ## When true, collect cluster status. +# ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which +# ## may have an impact on performance. +# # gather_cluster_status = true +# +# ## When true, collect per database stats +# # gather_perdb_stats = false +# +# ## When true, collect per collection stats +# # gather_col_stats = false +# +# ## When true, collect usage statistics for each collection +# ## (insert, update, queries, remove, getmore, commands etc...). +# # gather_top_stat = false +# +# ## List of db where collections stats are collected +# ## If empty, all db are concerned +# # col_stats_dbs = ["local"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics and status information about processes managed by Monit +# [[inputs.monit]] +# ## Monit HTTPD address +# address = "http://127.0.0.1:2812" +# +# ## Username and Password for Monit +# # username = "" +# # password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Aggregates the contents of multiple files into a single point +# [[inputs.multifile]] +# ## Base directory where telegraf will look for files. +# ## Omit this option to use absolute paths. +# base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" +# +# ## If true discard all data when a single file can't be read. +# ## Else, Telegraf omits the field generated from this file. +# # fail_early = true +# +# ## Files to parse each interval. +# [[inputs.multifile.file]] +# file = "in_pressure_input" +# dest = "pressure" +# conversion = "float" +# [[inputs.multifile.file]] +# file = "in_temp_input" +# dest = "temperature" +# conversion = "float(3)" +# [[inputs.multifile.file]] +# file = "in_humidityrelative_input" +# dest = "humidityrelative" +# conversion = "float(3)" + + +# # Read metrics from one or many mysql servers +# [[inputs.mysql]] +# ## specify servers via a url matching: +# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]] +# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name +# ## e.g. +# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] +# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"] +# # +# ## If no servers are specified, then localhost is used as the host. +# servers = ["tcp(127.0.0.1:3306)/"] +# +# ## Selects the metric output format. +# ## +# ## This option exists to maintain backwards compatibility, if you have +# ## existing metrics do not set or change this value until you are ready to +# ## migrate to the new format. +# ## +# ## If you do not have existing metrics from this plugin set to the latest +# ## version. +# ## +# ## Telegraf >=1.6: metric_version = 2 +# ## <1.6: metric_version = 1 (or unset) +# metric_version = 2 +# +# ## if the list is empty, then metrics are gathered from all database tables +# # table_schema_databases = [] +# +# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list +# # gather_table_schema = false +# +# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST +# # gather_process_list = false +# +# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS +# # gather_user_statistics = false +# +# ## gather auto_increment columns and max values from information schema +# # gather_info_schema_auto_inc = false +# +# ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS +# # gather_innodb_metrics = false +# +# ## gather metrics from all channels from SHOW SLAVE STATUS command output +# # gather_all_slave_channels = false +# +# ## gather metrics from SHOW SLAVE STATUS command output +# # gather_slave_status = false +# +# ## use SHOW ALL SLAVES STATUS command output for MariaDB +# # mariadb_dialect = false +# +# ## gather metrics from SHOW BINARY LOGS command output +# # gather_binary_logs = false +# +# ## gather metrics from SHOW GLOBAL VARIABLES command output +# # gather_global_variables = true +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE +# # gather_table_io_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS +# # gather_table_lock_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE +# # gather_index_io_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS +# # gather_event_waits = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME +# # gather_file_events_stats = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST +# # gather_perf_events_statements = false +# # +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME +# # gather_perf_sum_per_acc_per_event = false +# # +# ## list of events to be gathered for gather_perf_sum_per_acc_per_event +# ## in case of empty list all events will be gathered +# # perf_summary_events = [] +# # +# # gather_perf_events_statements = false +# +# ## the limits for metrics form perf_events_statements +# # perf_events_statements_digest_text_limit = 120 +# # perf_events_statements_limit = 250 +# # perf_events_statements_time_limit = 86400 +# +# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) +# ## example: interval_slow = "30m" +# # interval_slow = "" +# +# ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Provides metrics about the state of a NATS server +# [[inputs.nats]] +# ## The address of the monitoring endpoint of the NATS server +# server = "http://localhost:8222" +# +# ## Maximum time to receive response +# # response_timeout = "5s" + + +# # Neptune Apex data collector +# [[inputs.neptune_apex]] +# ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. +# ## Measurements will be logged under "apex". +# +# ## The base URL of the local Apex(es). If you specify more than one server, they will +# ## be differentiated by the "source" tag. +# servers = [ +# "http://apex.local", +# ] +# +# ## The response_timeout specifies how long to wait for a reply from the Apex. +# #response_timeout = "5s" +# + + +# # Gather metrics about network interfaces +# [[inputs.net]] +# ## By default, telegraf gathers stats from any up interface (excluding loopback) +# ## Setting interfaces will tell it to gather these explicit interfaces, +# ## regardless of status. When specifying an interface, glob-style +# ## patterns are also supported. +# ## +# # interfaces = ["eth*", "enp0s[0-1]", "lo"] +# ## +# ## On linux systems telegraf also collects protocol stats. +# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. +# ## +# # ignore_protocol_stats = false +# ## + + +# # Collect response time of a TCP or UDP connection +# [[inputs.net_response]] +# ## Protocol, must be "tcp" or "udp" +# ## NOTE: because the "udp" protocol does not respond to requests, it requires +# ## a send/expect string pair (see below). +# protocol = "tcp" +# ## Server address (default localhost) +# address = "localhost:80" +# +# ## Set timeout +# # timeout = "1s" +# +# ## Set read timeout (only used if expecting a response) +# # read_timeout = "1s" +# +# ## The following options are required for UDP checks. For TCP, they are +# ## optional. The plugin will send the given string to the server and then +# ## expect to receive the given 'expect' string back. +# ## string sent to the server +# # send = "ssh" +# ## expected string in answer +# # expect = "ssh" +# +# ## Uncomment to remove deprecated fields; recommended for new deploys +# # fielddrop = ["result_type", "string_found"] + + +# # Read TCP metrics such as established, time wait and sockets counts. +# [[inputs.netstat]] +# # no configuration + + +# # Read per-mount NFS client metrics from /proc/self/mountstats +# [[inputs.nfsclient]] +# ## Read more low-level metrics (optional, defaults to false) +# # fullstat = false +# +# ## List of mounts to explictly include or exclude (optional) +# ## The pattern (Go regexp) is matched against the mount point (not the +# ## device being mounted). If include_mounts is set, all mounts are ignored +# ## unless present in the list. If a mount is listed in both include_mounts +# ## and exclude_mounts, it is excluded. Go regexp patterns can be used. +# # include_mounts = [] +# # exclude_mounts = [] +# +# ## List of operations to include or exclude from collecting. This applies +# ## only when fullstat=true. Symantics are similar to {include,exclude}_mounts: +# ## the default is to collect everything; when include_operations is set, only +# ## those OPs are collected; when exclude_operations is set, all are collected +# ## except those listed. If include and exclude are set, the OP is excluded. +# ## See /proc/self/mountstats for a list of valid operations; note that +# ## NFSv3 and NFSv4 have different lists. While it is not possible to +# ## have different include/exclude lists for NFSv3/4, unused elements +# ## in the list should be okay. It is possible to have different lists +# ## for different mountpoints: use mulitple [[input.nfsclient]] stanzas, +# ## with their own lists. See "include_mounts" above, and be careful of +# ## duplicate metrics. +# # include_operations = [] +# # exclude_operations = [] + + +# # Read Nginx's basic status information (ngx_http_stub_status_module) +# [[inputs.nginx]] +# ## An array of Nginx stub_status URI to gather stats. +# urls = ["http://localhost/server_status"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" + + +# # Read Nginx Plus' advanced status information +# [[inputs.nginx_plus]] +# ## An array of Nginx status URIs to gather stats. +# urls = ["http://localhost/status"] +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx Plus API advanced status information +# [[inputs.nginx_plus_api]] +# ## An array of Nginx API URIs to gather stats. +# urls = ["http://localhost/api"] +# # Nginx API version, default: 3 +# # api_version = 3 +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx virtual host traffic status module information (nginx-module-sts) +# [[inputs.nginx_sts]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module) +# [[inputs.nginx_upstream_check]] +# ## An URL where Nginx Upstream check module is enabled +# ## It should be set to return a JSON formatted response +# url = "http://127.0.0.1/status?format=json" +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "check.example.com" +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx virtual host traffic status module information (nginx-module-vts) +# [[inputs.nginx_vts]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the Nomad API +# [[inputs.nomad]] +# ## URL for the Nomad agent +# # url = "http://127.0.0.1:4646" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile + + +# # A plugin to collect stats from the NSD DNS resolver +# [[inputs.nsd]] +# ## Address of server to connect to, optionally ':port'. Defaults to the +# ## address in the nsd config file. +# server = "127.0.0.1:8953" +# +# ## If running as a restricted user you can prepend sudo for additional access: +# # use_sudo = false +# +# ## The default location of the nsd-control binary can be overridden with: +# # binary = "/usr/sbin/nsd-control" +# +# ## The default location of the nsd config file can be overridden with: +# # config_file = "/etc/nsd/nsd.conf" +# +# ## The default timeout of 1s can be overridden with: +# # timeout = "1s" + + +# # Read NSQ topic and channel statistics. +# [[inputs.nsq]] +# ## An array of NSQD HTTP API endpoints +# endpoints = ["http://localhost:4151"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Collect kernel snmp counters and network interface statistics +# [[inputs.nstat]] +# ## file paths for proc files. If empty default paths will be used: +# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 +# ## These can also be overridden with env variables, see README. +# proc_net_netstat = "/proc/net/netstat" +# proc_net_snmp = "/proc/net/snmp" +# proc_net_snmp6 = "/proc/net/snmp6" +# ## dump metrics with 0 values too +# dump_zeros = true + + +# # Get standard NTP query metrics, requires ntpq executable. +# [[inputs.ntpq]] +# ## If false, set the -n ntpq flag. Can reduce metric gather time. +# dns_lookup = true + + +# # Pulls statistics from nvidia GPUs attached to the host +# [[inputs.nvidia_smi]] +# ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" +# ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), +# ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned +# # bin_path = "/usr/bin/nvidia-smi" +# +# ## Optional: timeout for GPU polling +# # timeout = "5s" + + +# # Retrieve data from OPCUA devices +# [[inputs.opcua]] +# ## Metric name +# # name = "opcua" +# # +# ## OPC UA Endpoint URL +# # endpoint = "opc.tcp://localhost:4840" +# # +# ## Maximum time allowed to establish a connect to the endpoint. +# # connect_timeout = "10s" +# # +# ## Maximum time allowed for a request over the estabilished connection. +# # request_timeout = "5s" +# # +# ## Security policy, one of "None", "Basic128Rsa15", "Basic256", +# ## "Basic256Sha256", or "auto" +# # security_policy = "auto" +# # +# ## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto" +# # security_mode = "auto" +# # +# ## Path to cert.pem. Required when security mode or policy isn't "None". +# ## If cert path is not supplied, self-signed cert and key will be generated. +# # certificate = "/etc/telegraf/cert.pem" +# # +# ## Path to private key.pem. Required when security mode or policy isn't "None". +# ## If key path is not supplied, self-signed cert and key will be generated. +# # private_key = "/etc/telegraf/key.pem" +# # +# ## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To +# ## authenticate using a specific ID, select 'Certificate' or 'UserName' +# # auth_method = "Anonymous" +# # +# ## Username. Required for auth_method = "UserName" +# # username = "" +# # +# ## Password. Required for auth_method = "UserName" +# # password = "" +# # +# ## Option to select the metric timestamp to use. Valid options are: +# ## "gather" -- uses the time of receiving the data in telegraf +# ## "server" -- uses the timestamp provided by the server +# ## "source" -- uses the timestamp provided by the source +# # timestamp = "gather" +# # +# ## Node ID configuration +# ## name - field name to use in the output +# ## namespace - OPC UA namespace of the node (integer value 0 thru 3) +# ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) +# ## identifier - OPC UA ID (tag as shown in opcua browser) +# ## tags - extra tags to be added to the output metric (optional) +# ## Example: +# ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", tags=[["tag1","value1"],["tag2","value2]]} +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier=""}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# #] +# # +# ## Node Group +# ## Sets defaults for OPC UA namespace and ID type so they aren't required in +# ## every node. A group can also have a metric name that overrides the main +# ## plugin metric name. +# ## +# ## Multiple node groups are allowed +# #[[inputs.opcua.group]] +# ## Group Metric name. Overrides the top level name. If unset, the +# ## top level name is used. +# # name = +# # +# ## Group default namespace. If a node in the group doesn't set its +# ## namespace, this is used. +# # namespace = +# # +# ## Group default identifier type. If a node in the group doesn't set its +# ## namespace, this is used. +# # identifier_type = +# # +# ## Node ID Configuration. Array of nodes with the same settings as above. +# # nodes = [ +# # {name="", namespace="", identifier_type="", identifier=""}, +# # {name="", namespace="", identifier_type="", identifier=""}, +# #] +# +# ## Enable workarounds required by some devices to work correctly +# # [inputs.opcua.workarounds] +# ## Set additional valid status codes, StatusOK (0x0) is always considered valid +# # additional_valid_status_codes = ["0xC0"] + + +# # OpenLDAP cn=Monitor plugin +# [[inputs.openldap]] +# host = "localhost" +# port = 389 +# +# # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption. +# # note that port will likely need to be changed to 636 for ldaps +# # valid options: "" | "starttls" | "ldaps" +# tls = "" +# +# # skip peer certificate verification. Default is false. +# insecure_skip_verify = false +# +# # Path to PEM-encoded Root certificate to use to verify server certificate +# tls_ca = "/etc/ssl/certs.pem" +# +# # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed. +# bind_dn = "" +# bind_password = "" +# +# # reverse metric names so they sort more naturally +# # Defaults to false if unset, but is set to true when generating a new config +# reverse_metric_names = true + + +# # Get standard NTP query metrics from OpenNTPD. +# [[inputs.openntpd]] +# ## Run ntpctl binary with sudo. +# # use_sudo = false +# +# ## Location of the ntpctl binary. +# # binary = "/usr/sbin/ntpctl" +# +# ## Maximum time the ntpctl binary is allowed to run. +# # timeout = "5ms" + + +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# [[inputs.opensmtpd]] +# ## If running as a restricted user you can prepend sudo for additional access: +# #use_sudo = false +# +# ## The default location of the smtpctl binary can be overridden with: +# binary = "/usr/sbin/smtpctl" +# +# # The default timeout of 1s can be overridden with: +# #timeout = "1s" + + +# # Collects performance metrics from OpenStack services +# [[inputs.openstack]] +# ## The recommended interval to poll is '30m' +# +# ## The identity endpoint to authenticate against and get the service catalog from. +# authentication_endpoint = "https://my.openstack.cloud:5000" +# +# ## The domain to authenticate against when using a V3 identity endpoint. +# # domain = "default" +# +# ## The project to authenticate as. +# # project = "admin" +# +# ## User authentication credentials. Must have admin rights. +# username = "admin" +# password = "password" +# +# ## Available services are: +# ## "agents", "aggregates", "flavors", "hypervisors", "networks", "nova_services", +# ## "ports", "projects", "servers", "services", "stacks", "storage_pools", "subnets", "volumes" +# # enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"] +# +# ## Collect Server Diagnostics +# # server_diagnotics = false +# +# ## output secrets (such as adminPass(for server) and UserID(for volume)). +# # output_secrets = false +# +# ## Amount of time allowed to complete the HTTP(s) request. +# # timeout = "5s" +# +# ## HTTP Proxy support +# # http_proxy_url = "" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Options for tags received from Openstack +# # tag_prefix = "openstack_tag_" +# # tag_value = "true" +# +# ## Timestamp format for timestamp data received from Openstack. +# ## If false format is unix nanoseconds. +# # human_readable_timestamps = false +# +# ## Measure Openstack call duration +# # measure_openstack_requests = false + + +# # Read current weather and forecasts data from openweathermap.org +# [[inputs.openweathermap]] +# ## OpenWeatherMap API key. +# app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +# +# ## City ID's to collect weather data from. +# city_id = ["5391959"] +# +# ## Language of the description field. Can be one of "ar", "bg", +# ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu", +# ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru", +# ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw" +# # lang = "en" +# +# ## APIs to fetch; can contain "weather" or "forecast". +# fetch = ["weather", "forecast"] +# +# ## OpenWeatherMap base URL +# # base_url = "https://api.openweathermap.org/" +# +# ## Timeout for HTTP response. +# # response_timeout = "5s" +# +# ## Preferred unit system for temperature and wind speed. Can be one of +# ## "metric", "imperial", or "standard". +# # units = "metric" +# +# ## Query interval; OpenWeatherMap weather data is updated every 10 +# ## minutes. +# interval = "10m" + + +# # Read metrics of passenger using passenger-status +# [[inputs.passenger]] +# ## Path of passenger-status. +# ## +# ## Plugin gather metric via parsing XML output of passenger-status +# ## More information about the tool: +# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html +# ## +# ## If no path is specified, then the plugin simply execute passenger-status +# ## hopefully it can be found in your PATH +# command = "passenger-status -v --show=xml" + + +# # Gather counters from PF +# [[inputs.pf]] +# ## PF require root access on most systems. +# ## Setting 'use_sudo' to true will make use of sudo to run pfctl. +# ## Users must configure sudo to allow telegraf user to run pfctl with no password. +# ## pfctl can be restricted to only list command "pfctl -s info". +# use_sudo = false + + +# # Read metrics of phpfpm, via HTTP status page or socket +# [[inputs.phpfpm]] +# ## An array of addresses to gather stats about. Specify an ip or hostname +# ## with optional port and path +# ## +# ## Plugin can be configured in three modes (either can be used): +# ## - http: the URL must start with http:// or https://, ie: +# ## "http://localhost/status" +# ## "http://192.168.130.1/status?full" +# ## +# ## - unixsocket: path to fpm socket, ie: +# ## "/var/run/php5-fpm.sock" +# ## or using a custom fpm status path: +# ## "/var/run/php5-fpm.sock:fpm-custom-status-path" +# ## glob patterns are also supported: +# ## "/var/run/php*.sock" +# ## +# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: +# ## "fcgi://10.0.0.12:9000/status" +# ## "cgi://10.0.10.12:9001/status" +# ## +# ## Example of multiple gathering from local socket and remote host +# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] +# urls = ["http://localhost/status"] +# +# ## Duration allowed to complete HTTP requests. +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Ping given url(s) and return statistics +# [[inputs.ping]] +# ## Hosts to send ping packets to. +# urls = ["example.org"] +# +# ## Method used for sending pings, can be either "exec" or "native". When set +# ## to "exec" the systems ping command will be executed. When set to "native" +# ## the plugin will send pings directly. +# ## +# ## While the default is "exec" for backwards compatibility, new deployments +# ## are encouraged to use the "native" method for improved compatibility and +# ## performance. +# # method = "exec" +# +# ## Number of ping packets to send per interval. Corresponds to the "-c" +# ## option of the ping command. +# # count = 1 +# +# ## Time to wait between sending ping packets in seconds. Operates like the +# ## "-i" option of the ping command. +# # ping_interval = 1.0 +# +# ## If set, the time to wait for a ping response in seconds. Operates like +# ## the "-W" option of the ping command. +# # timeout = 1.0 +# +# ## If set, the total ping deadline, in seconds. Operates like the -w option +# ## of the ping command. +# # deadline = 10 +# +# ## Interface or source address to send ping from. Operates like the -I or -S +# ## option of the ping command. +# # interface = "" +# +# ## Percentiles to calculate. This only works with the native method. +# # percentiles = [50, 95, 99] +# +# ## Specify the ping executable binary. +# # binary = "ping" +# +# ## Arguments for ping command. When arguments is not empty, the command from +# ## the binary option will be used and other options (ping_interval, timeout, +# ## etc) will be ignored. +# # arguments = ["-c", "3"] +# +# ## Use only IPv6 addresses when resolving a hostname. +# # ipv6 = false +# +# ## Number of data bytes to be sent. Corresponds to the "-s" +# ## option of the ping command. This only works with the native method. +# # size = 56 + + +# # Read metrics from one or many PowerDNS servers +# [[inputs.powerdns]] +# # An array of sockets to gather stats about. +# # Specify a path to unix socket. +# # +# # If no servers are specified, then '/var/run/pdns.controlsocket' is used as the path. +# unix_sockets = ["/var/run/pdns.controlsocket"] + + +# # Read metrics from one or many PowerDNS Recursor servers +# [[inputs.powerdns_recursor]] +# ## Path to the Recursor control socket. +# unix_sockets = ["/var/run/pdns_recursor.controlsocket"] +# +# ## Directory to create receive socket. This default is likely not writable, +# ## please reference the full plugin documentation for a recommended setup. +# # socket_dir = "/var/run/" +# ## Socket permissions for the receive socket. +# # socket_mode = "0666" + + +# # Monitor process cpu and memory usage +# [[inputs.procstat]] +# ## PID file to monitor process +# pid_file = "/var/run/nginx.pid" +# ## executable name (ie, pgrep ) +# # exe = "nginx" +# ## pattern as argument for pgrep (ie, pgrep -f ) +# # pattern = "nginx" +# ## user as argument for pgrep (ie, pgrep -u ) +# # user = "nginx" +# ## Systemd unit name, supports globs when include_systemd_children is set to true +# # systemd_unit = "nginx.service" +# # include_systemd_children = false +# ## CGroup name or path, supports globs +# # cgroup = "systemd/system.slice/nginx.service" +# +# ## Windows service name +# # win_service = "" +# +# ## override for process_name +# ## This is optional; default is sourced from /proc//status +# # process_name = "bar" +# +# ## Field name prefix +# # prefix = "" +# +# ## When true add the full cmdline as a tag. +# # cmdline_tag = false +# +# ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. +# # mode = "irix" +# +# ## Add the PID as a tag instead of as a field. When collecting multiple +# ## processes with otherwise matching tags this setting should be enabled to +# ## ensure each process has a unique identity. +# ## +# ## Enabling this option may result in a large number of series, especially +# ## when processes have a short lifetime. +# # pid_tag = false +# +# ## Method to use when finding process IDs. Can be one of 'pgrep', or +# ## 'native'. The pgrep finder calls the pgrep executable in the PATH while +# ## the native finder performs the search directly in a manor dependent on the +# ## platform. Default is 'pgrep' +# # pid_finder = "pgrep" + + +# # Provides metrics from Proxmox nodes (Proxmox Virtual Environment > 6.2). +# [[inputs.proxmox]] +# ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. +# base_url = "https://localhost:8006/api2/json" +# api_token = "USER@REALM!TOKENID=UUID" +# ## Node name, defaults to OS hostname +# # node_name = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = false +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" + + +# # Reads last_run_summary.yaml file and converts to measurements +# [[inputs.puppetagent]] +# ## Location of puppet last run summary file +# location = "/var/lib/puppet/state/last_run_summary.yaml" + + +# # Reads metrics from RabbitMQ servers via the Management Plugin +# [[inputs.rabbitmq]] +# ## Management Plugin url. (default: http://localhost:15672) +# # url = "http://localhost:15672" +# ## Tag added to rabbitmq_overview series; deprecated: use tags +# # name = "rmq-server-1" +# ## Credentials +# # username = "guest" +# # password = "guest" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional request timeouts +# ## +# ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# +# ## A list of nodes to gather as the rabbitmq_node measurement. If not +# ## specified, metrics for all nodes are gathered. +# # nodes = ["rabbit@node1", "rabbit@node2"] +# +# ## A list of queues to gather as the rabbitmq_queue measurement. If not +# ## specified, metrics for all queues are gathered. +# ## Deprecated in 1.6: Use queue_name_include instead. +# # queues = ["telegraf"] +# +# ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not +# ## specified, metrics for all exchanges are gathered. +# # exchanges = ["telegraf"] +# +# ## Metrics to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all metrics +# ## Currently the following metrics are supported: "exchange", "federation", "node", "overview", "queue" +# # metric_include = [] +# # metric_exclude = [] +# +# ## Queues to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all queues +# # queue_name_include = [] +# # queue_name_exclude = [] +# +# ## Federation upstreams to include and exclude specified as an array of glob +# ## pattern strings. Federation links can also be limited by the queue and +# ## exchange filters. +# # federation_upstream_include = [] +# # federation_upstream_exclude = [] + + +# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) +# [[inputs.raindrops]] +# ## An array of raindrops middleware URI to gather stats. +# urls = ["http://localhost:8080/_raindrops"] + + +# # Reads metrics from RavenDB servers via the Monitoring Endpoints +# [[inputs.ravendb]] +# ## Node URL and port that RavenDB is listening on. By default, +# ## attempts to connect securely over HTTPS, however, if the user +# ## is running a local unsecure development cluster users can use +# ## HTTP via a URL like "http://localhost:8080" +# url = "https://localhost:4433" +# +# ## RavenDB X509 client certificate setup +# # tls_cert = "/etc/telegraf/raven.crt" +# # tls_key = "/etc/telegraf/raven.key" +# +# ## Optional request timeout +# ## +# ## Timeout, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request and +# ## time limit for requests made by this client +# # timeout = "5s" +# +# ## List of statistics which are collected +# # At least one is required +# # Allowed values: server, databases, indexes, collections +# # +# # stats_include = ["server", "databases", "indexes", "collections"] +# +# ## List of db where database stats are collected +# ## If empty, all db are concerned +# # db_stats_dbs = [] +# +# ## List of db where index status are collected +# ## If empty, all indexes from all db are concerned +# # index_stats_dbs = [] +# +# ## List of db where collection status are collected +# ## If empty, all collections from all db are concerned +# # collection_stats_dbs = [] + + +# # Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs +# [[inputs.redfish]] +# ## Redfish API Base URL. +# address = "https://127.0.0.1:5000" +# +# ## Credentials for the Redfish API. +# username = "root" +# password = "password123456" +# +# ## System Id to collect data for in Redfish APIs. +# computer_system_id="System.Embedded.1" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or many redis-sentinel servers +# [[inputs.redis_sentinel]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## tcp://localhost:26379 +# ## tcp://:password@192.168.99.100 +# ## unix:///var/run/redis-sentinel.sock +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 26379 is used +# # servers = ["tcp://localhost:26379"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Read metrics from one or many RethinkDB servers +# [[inputs.rethinkdb]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port add password. ie, +# ## rethinkdb://user:auth_key@10.10.3.30:28105, +# ## rethinkdb://10.10.3.33:18832, +# ## 10.0.0.1:10000, etc. +# servers = ["127.0.0.1:28015"] +# +# ## If you use actual rethinkdb of > 2.3.0 with username/password authorization, +# ## protocol have to be named "rethinkdb2" - it will use 1_0 H. +# # servers = ["rethinkdb2://username:password@127.0.0.1:28015"] +# +# ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol +# ## have to be named "rethinkdb". +# # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] + + +# # Read metrics one or many Riak servers +# [[inputs.riak]] +# # Specify a list of one or more riak http servers +# servers = ["http://localhost:8098"] + + +# # Read API usage and limits for a Salesforce organisation +# [[inputs.salesforce]] +# ## specify your credentials +# ## +# username = "your_username" +# password = "your_password" +# ## +# ## (optional) security token +# # security_token = "your_security_token" +# ## +# ## (optional) environment type (sandbox or production) +# ## default is: production +# ## +# # environment = "production" +# ## +# ## (optional) API version (default: "39.0") +# ## +# # version = "39.0" + + +# # Read metrics from storage devices supporting S.M.A.R.T. +# [[inputs.smart]] +# ## Optionally specify the path to the smartctl executable +# # path_smartctl = "/usr/bin/smartctl" +# +# ## Optionally specify the path to the nvme-cli executable +# # path_nvme = "/usr/bin/nvme" +# +# ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case +# ## ["auto-on"] - automatically find and enable additional vendor specific disk info +# ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info +# # enable_extensions = ["auto-on"] +# +# ## On most platforms used cli utilities requires root access. +# ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli. +# ## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli +# ## without a password. +# # use_sudo = false +# +# ## Skip checking disks in this power mode. Defaults to +# ## "standby" to not wake up disks that have stopped rotating. +# ## See --nocheck in the man pages for smartctl. +# ## smartctl version 5.41 and 5.42 have faulty detection of +# ## power mode and might require changing this value to +# ## "never" depending on your disks. +# # nocheck = "standby" +# +# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed +# ## information from each drive into the 'smart_attribute' measurement. +# # attributes = false +# +# ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed. +# # excludes = [ "/dev/pass6" ] +# +# ## Optionally specify devices and device type, if unset +# ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done +# ## and all found will be included except for the excluded in excludes. +# # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"] +# +# ## Timeout for the cli command to complete. +# # timeout = "30s" +# +# ## Optionally call smartctl and nvme-cli with a specific concurrency policy. +# ## By default, smartctl and nvme-cli are called in separate threads (goroutines) to gather disk attributes. +# ## Some devices (e.g. disks in RAID arrays) may have access limitations that require sequential reading of +# ## SMART data - one individual array drive at the time. In such case please set this configuration option +# ## to "sequential" to get readings for all drives. +# ## valid options: concurrent, sequential +# # read_method = "concurrent" + + +# # Retrieves SNMP values from remote agents +# [[inputs.snmp]] +# ## Agent addresses to retrieve values from. +# ## format: agents = [":"] +# ## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6. +# ## default is udp +# ## port: optional +# ## example: agents = ["udp://127.0.0.1:161"] +# ## agents = ["tcp://127.0.0.1:161"] +# ## agents = ["udp4://v4only-snmp-agent"] +# agents = ["udp://127.0.0.1:161"] +# +# ## Timeout for each request. +# # timeout = "5s" +# +# ## SNMP version; can be 1, 2, or 3. +# # version = 2 +# +# ## Path to mib files +# ## Used by the gosmi translator. +# ## To add paths when translating with netsnmp, use the MIBDIRS environment variable +# # path = ["/usr/share/snmp/mibs"] +# +# ## SNMP community string. +# # community = "public" +# +# ## Agent host tag +# # agent_host_tag = "agent_host" +# +# ## Number of retries to attempt. +# # retries = 3 +# +# ## The GETBULK max-repetitions parameter. +# # max_repetitions = 10 +# +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Context Name. +# # context_name = "" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C", or "". +# ### Protocols "AES192", "AES192", "AES256", and "AES256C" require the underlying net-snmp tools +# ### to be compiled with --enable-blumenthal-aes (http://www.net-snmp.org/docs/INSTALL.html) +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" +# +# ## Add fields and tables defining the variables you wish to collect. This +# ## example collects the system uptime and interface variables. Reference the +# ## full plugin documentation for configuration details. +# [[inputs.snmp.field]] +# oid = "RFC1213-MIB::sysUpTime.0" +# name = "uptime" +# +# [[inputs.snmp.field]] +# oid = "RFC1213-MIB::sysName.0" +# name = "source" +# is_tag = true +# +# [[inputs.snmp.table]] +# oid = "IF-MIB::ifTable" +# name = "interface" +# inherit_tags = ["source"] +# +# [[inputs.snmp.table.field]] +# oid = "IF-MIB::ifDescr" +# name = "ifDescr" +# is_tag = true + + +# ## DEPRECATED: The 'snmp_legacy' plugin is deprecated in version 1.0.0, use 'inputs.snmp' instead. +# [[inputs.snmp_legacy]] +# ## Use 'oids.txt' file to translate oids to names +# ## To generate 'oids.txt' you need to run: +# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt +# ## Or if you have an other MIB folder with custom MIBs +# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt +# snmptranslate_file = "/tmp/oids.txt" +# [[inputs.snmp.host]] +# address = "192.168.2.2:161" +# # SNMP community +# community = "public" # default public +# # SNMP version (1, 2 or 3) +# # Version 3 not supported yet +# version = 2 # default 2 +# # SNMP response timeout +# timeout = 2.0 # default 2.0 +# # SNMP request retries +# retries = 2 # default 2 +# # Which get/bulk do you want to collect for this host +# collect = ["mybulk", "sysservices", "sysdescr"] +# # Simple list of OIDs to get, in addition to "collect" +# get_oids = [] +# [[inputs.snmp.host]] +# address = "192.168.2.3:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# collect = ["mybulk"] +# get_oids = [ +# "ifNumber", +# ".1.3.6.1.2.1.1.3.0", +# ] +# [[inputs.snmp.get]] +# name = "ifnumber" +# oid = "ifNumber" +# [[inputs.snmp.get]] +# name = "interface_speed" +# oid = "ifSpeed" +# instance = "0" +# [[inputs.snmp.get]] +# name = "sysuptime" +# oid = ".1.3.6.1.2.1.1.3.0" +# unit = "second" +# [[inputs.snmp.bulk]] +# name = "mybulk" +# max_repetition = 127 +# oid = ".1.3.6.1.2.1.1" +# [[inputs.snmp.bulk]] +# name = "ifoutoctets" +# max_repetition = 127 +# oid = "ifOutOctets" +# [[inputs.snmp.host]] +# address = "192.168.2.13:161" +# #address = "127.0.0.1:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# #collect = ["mybulk", "sysservices", "sysdescr", "systype"] +# collect = ["sysuptime" ] +# [[inputs.snmp.host.table]] +# name = "iftable3" +# include_instances = ["enp5s0", "eth1"] +# # SNMP TABLEs +# # table without mapping neither subtables +# [[inputs.snmp.table]] +# name = "iftable1" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # table without mapping but with subtables +# [[inputs.snmp.table]] +# name = "iftable2" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# sub_tables = [".1.3.6.1.2.1.2.2.1.13"] +# # table with mapping but without subtables +# [[inputs.snmp.table]] +# name = "iftable3" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty. get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty, get all subtables +# # table with both mapping and subtables +# [[inputs.snmp.table]] +# name = "iftable4" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty get all subtables +# # sub_tables could be not "real subtables" +# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] + + +# # Read stats from one or more Solr servers or cores +# [[inputs.solr]] +# ## specify a list of one or more Solr servers +# servers = ["http://localhost:8983"] +# ## +# ## specify a list of one or more Solr cores (default - all) +# # cores = ["main"] +# ## +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" + + +# # Gather timeseries from Google Cloud Platform v3 monitoring API +# [[inputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## Include timeseries that start with the given metric type. +# metric_type_prefix_include = [ +# "compute.googleapis.com/", +# ] +# +# ## Exclude timeseries that start with the given metric type. +# # metric_type_prefix_exclude = [] +# +# ## Most metrics are updated no more than once per minute; it is recommended +# ## to override the agent level interval with a value of 1m or greater. +# interval = "1m" +# +# ## Maximum number of API calls to make per second. The quota for accounts +# ## varies, it can be viewed on the API dashboard: +# ## https://cloud.google.com/monitoring/quotas#quotas_and_limits +# # rate_limit = 14 +# +# ## The delay and window options control the number of points selected on +# ## each gather. When set, metrics are gathered between: +# ## start: now() - delay - window +# ## end: now() - delay +# # +# ## Collection delay; if set too low metrics may not yet be available. +# # delay = "5m" +# # +# ## If unset, the window will start at 1m and be updated dynamically to span +# ## the time between calls (approximately the length of the plugin interval). +# # window = "1m" +# +# ## TTL for cached list of metric types. This is the maximum amount of time +# ## it may take to discover new metrics. +# # cache_ttl = "1h" +# +# ## If true, raw bucket counts are collected for distribution value types. +# ## For a more lightweight collection, you may wish to disable and use +# ## distribution_aggregation_aligners instead. +# # gather_raw_distribution_buckets = true +# +# ## Aggregate functions to be used for metrics whose value type is +# ## distribution. These aggregate values are recorded in in addition to raw +# ## bucket counts; if they are enabled. +# ## +# ## For a list of aligner strings see: +# ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner +# # distribution_aggregation_aligners = [ +# # "ALIGN_PERCENTILE_99", +# # "ALIGN_PERCENTILE_95", +# # "ALIGN_PERCENTILE_50", +# # ] +# +# ## Filters can be added to reduce the number of time series matched. All +# ## functions are supported: starts_with, ends_with, has_substring, and +# ## one_of. Only the '=' operator is supported. +# ## +# ## The logical operators when combining filters are defined statically using +# ## the following values: +# ## filter ::= {AND } +# ## resource_labels ::= {OR } +# ## metric_labels ::= {OR } +# ## +# ## For more details, see https://cloud.google.com/monitoring/api/v3/filters +# # +# ## Resource labels refine the time series selection with the following expression: +# ## resource.labels. = +# # [[inputs.stackdriver.filter.resource_labels]] +# # key = "instance_name" +# # value = 'starts_with("localhost")' +# # +# ## Metric labels refine the time series selection with the following expression: +# ## metric.labels. = +# # [[inputs.stackdriver.filter.metric_labels]] +# # key = "device_name" +# # value = 'one_of("sda", "sdb")' + + +# # Get synproxy counter statistics from procfs +# [[inputs.synproxy]] +# # no configuration + + +# # Gather systemd units state +# [[inputs.systemd_units]] +# ## Set timeout for systemctl execution +# # timeout = "1s" +# # +# ## Filter for a specific unit type, default is "service", other possible +# ## values are "socket", "target", "device", "mount", "automount", "swap", +# ## "timer", "path", "slice" and "scope ": +# # unittype = "service" +# # +# ## Filter for a specific pattern, default is "" (i.e. all), other possible +# ## values are valid pattern for systemctl, e.g. "a*" for all units with +# ## names starting with "a" +# # pattern = "" +# ## pattern = "telegraf* influxdb*" +# ## pattern = "a*" + + +# # Reads metrics from a Teamspeak 3 Server via ServerQuery +# [[inputs.teamspeak]] +# ## Server address for Teamspeak 3 ServerQuery +# # server = "127.0.0.1:10011" +# ## Username for ServerQuery +# username = "serverqueryuser" +# ## Password for ServerQuery +# password = "secret" +# ## Nickname of the ServerQuery client +# nickname = "telegraf" +# ## Array of virtual servers +# # virtual_servers = [1] + + +# # Read metrics about temperature +# [[inputs.temp]] +# # no configuration + + +# # Read Tengine's basic status information (ngx_http_reqstat_module) +# [[inputs.tengine]] +# ## An array of Tengine reqstat module URI to gather stats. +# urls = ["http://127.0.0.1/us"] +# +# ## HTTP response timeout (default: 5s) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Gather metrics from the Tomcat server status page. +# [[inputs.tomcat]] +# ## URL of the Tomcat server status +# # url = "http://127.0.0.1:8080/manager/status/all?XML=true" +# +# ## HTTP Basic Auth Credentials +# # username = "tomcat" +# # password = "s3cret" +# +# ## Request timeout +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Inserts sine and cosine waves for demonstration purposes +# [[inputs.trig]] +# ## Set the amplitude +# amplitude = 10.0 + + +# # Read Twemproxy stats data +# [[inputs.twemproxy]] +# ## Twemproxy stats address and port (no scheme) +# addr = "localhost:22222" +# ## Monitor pool name +# pools = ["redis_pool", "mc_pool"] + + +# # A plugin to collect stats from the Unbound DNS resolver +# [[inputs.unbound]] +# ## Address of server to connect to, read from unbound conf default, optionally ':port' +# ## Will lookup IP if given a hostname +# server = "127.0.0.1:8953" +# +# ## If running as a restricted user you can prepend sudo for additional access: +# # use_sudo = false +# +# ## The default location of the unbound-control binary can be overridden with: +# # binary = "/usr/sbin/unbound-control" +# +# ## The default location of the unbound config file can be overridden with: +# # config_file = "/etc/unbound/unbound.conf" +# +# ## The default timeout of 1s can be overridden with: +# # timeout = "1s" +# +# ## When set to true, thread metrics are tagged with the thread id. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# thread_as_tag = false + + +# # Read uWSGI metrics. +# [[inputs.uwsgi]] +# ## List with urls of uWSGI Stats servers. Url must match pattern: +# ## scheme://address[:port] +# ## +# ## For example: +# ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] +# servers = ["tcp://127.0.0.1:1717"] +# +# ## General connection timeout +# # timeout = "5s" + + +# # Read metrics from the Vault API +# [[inputs.vault]] +# ## URL for the Vault agent +# # url = "http://127.0.0.1:8200" +# +# ## Use Vault token for authorization. +# ## Vault token configuration is mandatory. +# ## If both are empty or both are set, an error is thrown. +# # token_file = "/path/to/auth/token" +# ## OR +# token = "s.CDDrgg5zPv5ssI0Z2P4qxJj2" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile + + +# # Input plugin to collect Windows Event Log messages +# [[inputs.win_eventlog]] +# ## Telegraf should have Administrator permissions to subscribe for some Windows Events channels +# ## (System log, for example) +# +# ## LCID (Locale ID) for event rendering +# ## 1033 to force English language +# ## 0 to use default Windows locale +# # locale = 0 +# +# ## Name of eventlog, used only if xpath_query is empty +# ## Example: "Application" +# # eventlog_name = "" +# +# ## xpath_query can be in defined short form like "Event/System[EventID=999]" +# ## or you can form a XML Query. Refer to the Consuming Events article: +# ## https://docs.microsoft.com/en-us/windows/win32/wes/consuming-events +# ## XML query is the recommended form, because it is most flexible +# ## You can create or debug XML Query by creating Custom View in Windows Event Viewer +# ## and then copying resulting XML here +# xpath_query = ''' +# +# +# +# *[System[( (EventID >= 5152 and EventID <= 5158) or EventID=5379 or EventID=4672)]] +# +# +# +# +# +# +# +# +# +# +# +# +# +# +# ''' +# +# ## System field names: +# ## "Source", "EventID", "Version", "Level", "Task", "Opcode", "Keywords", "TimeCreated", +# ## "EventRecordID", "ActivityID", "RelatedActivityID", "ProcessID", "ThreadID", "ProcessName", +# ## "Channel", "Computer", "UserID", "UserName", "Message", "LevelText", "TaskText", "OpcodeText" +# +# ## In addition to System, Data fields can be unrolled from additional XML nodes in event. +# ## Human-readable representation of those nodes is formatted into event Message field, +# ## but XML is more machine-parsable +# +# # Process UserData XML to fields, if this node exists in Event XML +# process_userdata = true +# +# # Process EventData XML to fields, if this node exists in Event XML +# process_eventdata = true +# +# ## Separator character to use for unrolled XML Data field names +# separator = "_" +# +# ## Get only first line of Message field. For most events first line is usually more than enough +# only_first_line_of_message = true +# +# ## Parse timestamp from TimeCreated.SystemTime event field. +# ## Will default to current time of telegraf processing on parsing error or if set to false +# timestamp_from_event = true +# +# ## Fields to include as tags. Globbing supported ("Level*" for both "Level" and "LevelText") +# event_tags = ["Source", "EventID", "Level", "LevelText", "Task", "TaskText", "Opcode", "OpcodeText", "Keywords", "Channel", "Computer"] +# +# ## Default list of fields to send. All fields are sent by default. Globbing supported +# event_fields = ["*"] +# +# ## Fields to exclude. Also applied to data fields. Globbing supported +# exclude_fields = ["TimeCreated", "Binary", "Data_Address*"] +# +# ## Skip those tags or fields if their value is empty or equals to zero. Globbing supported +# exclude_empty = ["*ActivityID", "UserID"] + + +# # # Input plugin to counterPath Performance Counters on Windows operating systems +# # [[inputs.win_perf_counters]] +# # ## By default this plugin returns basic CPU and Disk statistics. +# # ## See the README file for more examples. +# # ## Uncomment examples below or write your own as you see fit. If the system +# # ## being polled for data does not have the Object at startup of the Telegraf +# # ## agent, it will not be gathered. +# # ## Settings: +# # # PrintValid = false # Print All matching performance counters +# # # Whether request a timestamp along with the PerfCounter data or just use current time +# # # UsePerfCounterTime=true +# # # If UseWildcardsExpansion params is set to true, wildcards (partial wildcards in instance names and wildcards in counters names) in configured counter paths will be expanded +# # # and in case of localized Windows, counter paths will be also localized. It also returns instance indexes in instance names. +# # # If false, wildcards (not partial) in instance names will still be expanded, but instance indexes will not be returned in instance names. +# # #UseWildcardsExpansion = false +# # # When running on a localized version of Windows and with UseWildcardsExpansion = true, Windows will +# # # localize object and counter names. When LocalizeWildcardsExpansion = false, use the names in object.Counters instead +# # # of the localized names. Only Instances can have wildcards in this case. ObjectName and Counters must not have wildcards when this +# # # setting is false. +# # #LocalizeWildcardsExpansion = true +# # # Period after which counters will be reread from configuration and wildcards in counter paths expanded +# # CountersRefreshInterval="1m" +# # ## Accepts a list of PDH error codes which are defined in pdh.go, if this error is encountered it will be ignored +# # ## For example, you can provide "PDH_NO_DATA" to ignore performance counters with no instances +# # ## By default no errors are ignored +# # ## You can find the list here: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/win_perf_counters/pdh.go +# # ## e.g.: IgnoredErrors = ["PDH_NO_DATA"] +# # # IgnoredErrors = [] +# # +# # [[inputs.win_perf_counters.object]] +# # # Processor usage, alternative to native, reports on a per core. +# # ObjectName = "Processor" +# # Instances = ["*"] +# # Counters = [ +# # "% Idle Time", +# # "% Interrupt Time", +# # "% Privileged Time", +# # "% User Time", +# # "% Processor Time", +# # "% DPC Time", +# # ] +# # Measurement = "win_cpu" +# # # Set to true to include _Total instance when querying for all (*). +# # # IncludeTotal=false +# # # Print out when the performance counter is missing from object, counter or instance. +# # # WarnOnMissing = false +# # # Gather raw values instead of formatted. Raw value is stored in the field name with the "_Raw" suffix, e.g. "Disk_Read_Bytes_sec_Raw". +# # # UseRawValues = true +# # +# # [[inputs.win_perf_counters.object]] +# # # Disk times and queues +# # ObjectName = "LogicalDisk" +# # Instances = ["*"] +# # Counters = [ +# # "% Idle Time", +# # "% Disk Time", +# # "% Disk Read Time", +# # "% Disk Write Time", +# # "% User Time", +# # "% Free Space", +# # "Current Disk Queue Length", +# # "Free Megabytes", +# # ] +# # Measurement = "win_disk" +# # +# # [[inputs.win_perf_counters.object]] +# # ObjectName = "PhysicalDisk" +# # Instances = ["*"] +# # Counters = [ +# # "Disk Read Bytes/sec", +# # "Disk Write Bytes/sec", +# # "Current Disk Queue Length", +# # "Disk Reads/sec", +# # "Disk Writes/sec", +# # "% Disk Time", +# # "% Disk Read Time", +# # "% Disk Write Time", +# # ] +# # Measurement = "win_diskio" +# # +# # [[inputs.win_perf_counters.object]] +# # ObjectName = "Network Interface" +# # Instances = ["*"] +# # Counters = [ +# # "Bytes Received/sec", +# # "Bytes Sent/sec", +# # "Packets Received/sec", +# # "Packets Sent/sec", +# # "Packets Received Discarded", +# # "Packets Outbound Discarded", +# # "Packets Received Errors", +# # "Packets Outbound Errors", +# # ] +# # Measurement = "win_net" +# # +# # +# # [[inputs.win_perf_counters.object]] +# # ObjectName = "System" +# # Counters = [ +# # "Context Switches/sec", +# # "System Calls/sec", +# # "Processor Queue Length", +# # "System Up Time", +# # ] +# # Instances = ["------"] +# # Measurement = "win_system" +# # +# # [[inputs.win_perf_counters.object]] +# # # Example counterPath where the Instance portion must be removed to get data back, +# # # such as from the Memory object. +# # ObjectName = "Memory" +# # Counters = [ +# # "Available Bytes", +# # "Cache Faults/sec", +# # "Demand Zero Faults/sec", +# # "Page Faults/sec", +# # "Pages/sec", +# # "Transition Faults/sec", +# # "Pool Nonpaged Bytes", +# # "Pool Paged Bytes", +# # "Standby Cache Reserve Bytes", +# # "Standby Cache Normal Priority Bytes", +# # "Standby Cache Core Bytes", +# # ] +# # Instances = ["------"] # Use 6 x - to remove the Instance bit from the counterPath. +# # Measurement = "win_mem" +# # +# # [[inputs.win_perf_counters.object]] +# # # Example query where the Instance portion must be removed to get data back, +# # # such as from the Paging File object. +# # ObjectName = "Paging File" +# # Counters = [ +# # "% Usage", +# # ] +# # Instances = ["_Total"] +# # Measurement = "win_swap" + + +# # Input plugin to report Windows services info. +# [[inputs.win_services]] +# ## Names of the services to monitor. Leave empty to monitor all the available services on the host. Globs accepted. Case sensitive. +# service_names = [ +# "LanmanServer", +# "TermService", +# "Win*", +# ] +# excluded_service_names = ['WinRM'] # optional, list of service names to exclude + + +# # Collect Wireguard server interface and peer statistics +# [[inputs.wireguard]] +# ## Optional list of Wireguard device/interface names to query. +# ## If omitted, all Wireguard interfaces are queried. +# # devices = ["wg0"] + + +# # Monitor wifi signal strength and quality +# [[inputs.wireless]] +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" + + +# # Reads metrics from a SSL certificate +# [[inputs.x509_cert]] +# ## List certificate sources, support wildcard expands for files +# ## Prefix your entry with 'file://' if you intend to use relative paths +# sources = ["tcp://example.org:443", "https://influxdata.com:443", +# "smtp://mail.localhost:25", "udp://127.0.0.1:4433", +# "/etc/ssl/certs/ssl-cert-snakeoil.pem", +# "/etc/mycerts/*.mydomain.org.pem", "file:///path/to/*.pem"] +# +# ## Timeout for SSL connection +# # timeout = "5s" +# +# ## Pass a different name into the TLS request (Server Name Indication). +# ## This is synonymous with tls_server_name, and only one of the two +# ## options may be specified at one time. +# ## example: server_name = "myhost.example.org" +# # server_name = "myhost.example.org" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# # tls_server_name = "myhost.example.org" +# +# ## Set the proxy URL +# # use_proxy = true +# # proxy_url = "http://localhost:8888" + + +# # Gathers Metrics From a Dell EMC XtremIO Storage Array's V3 API +# [[inputs.xtremio]] +# ## XtremIO User Interface Endpoint +# url = "https://xtremio.example.com/" # required +# +# ## Credentials +# username = "user1" +# password = "pass123" +# +# ## Metrics to collect from the XtremIO +# # collectors = ["bbus","clusters","ssds","volumes","xms"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, pools and datasets +# [[inputs.zfs]] +# ## ZFS kstat path. Ignored on FreeBSD +# ## If not specified, then default is: +# # kstatPath = "/proc/spl/kstat/zfs" +# +# ## By default, telegraf gather all zfs stats +# ## Override the stats list using the kstatMetrics array: +# ## For FreeBSD, the default is: +# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] +# ## For Linux, the default is: +# # kstatMetrics = ["abdstats", "arcstats", "dnodestats", "dbufcachestats", +# # "dmu_tx", "fm", "vdev_mirror_stats", "zfetchstats", "zil"] +# +# ## By default, don't gather zpool stats +# # poolMetrics = false +# +# ## By default, don't gather dataset stats +# ## On FreeBSD, if the user has enabled listsnapshots in the pool property, +# ## telegraf may not be able to correctly parse the output. +# # datasetMetrics = false + + +# # Reads 'mntr' stats from one or many zookeeper servers +# [[inputs.zookeeper]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie localhost:2181, 10.0.0.1:2181, etc. +# +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 2181 is used +# servers = [":2181"] +# +# ## Timeout for metric collections from all servers. Minimum timeout is "1s". +# # timeout = "5s" +# +# ## Optional TLS Config +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## If false, skip chain & host verification +# # insecure_skip_verify = true + + +############################################################################### +# SERVICE INPUT PLUGINS # +############################################################################### + + +# # Pull Metric Statistics from Aliyun CMS +# [[inputs.aliyuncms]] +# ## Aliyun Credentials +# ## Credentials are loaded in the following order +# ## 1) Ram RoleArn credential +# ## 2) AccessKey STS token credential +# ## 3) AccessKey credential +# ## 4) Ecs Ram Role credential +# ## 5) RSA keypair credential +# ## 6) Environment variables credential +# ## 7) Instance metadata credential +# +# # access_key_id = "" +# # access_key_secret = "" +# # access_key_sts_token = "" +# # role_arn = "" +# # role_session_name = "" +# # private_key = "" +# # public_key_id = "" +# # role_name = "" +# +# ## Specify the ali cloud region list to be queried for metrics and objects discovery +# ## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here +# ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm +# ## Default supported regions are: +# ## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen, +# ## cn-heyuan,cn-chengdu,cn-hongkong,ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, +# ## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1 +# ## +# ## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich +# ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then +# ## it will be reported on the start - for example for 'acs_cdn' project: +# ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) +# ## Currently, discovery supported for the following projects: +# ## - acs_ecs_dashboard +# ## - acs_rds_dashboard +# ## - acs_slb_dashboard +# ## - acs_vpc_eip +# regions = ["cn-hongkong"] +# +# # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all +# # metrics are made available to the 1 minute period. Some are collected at +# # 3 minute, 5 minute, or larger intervals. +# # See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv +# # Note that if a period is configured that is smaller than the minimum for a +# # particular metric, that metric will not be returned by the Aliyun OpenAPI +# # and will not be collected by Telegraf. +# # +# ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) +# period = "5m" +# +# ## Collection Delay (required - must account for metrics availability via AliyunCMS API) +# delay = "1m" +# +# ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid +# ## gaps or overlap in pulled data +# interval = "5m" +# +# ## Metric Statistic Project (required) +# project = "acs_slb_dashboard" +# +# ## Maximum requests per second, default value is 200 +# ratelimit = 200 +# +# ## How often the discovery API call executed (default 1m) +# #discovery_interval = "1m" +# +# ## Metrics to Pull (Required) +# [[inputs.aliyuncms.metrics]] +# ## Metrics names to be requested, +# ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# names = ["InstanceActiveConnection", "InstanceNewConnection"] +# +# ## Dimension filters for Metric (these are optional). +# ## This allows to get additional metric dimension. If dimension is not specified it can be returned or +# ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq +# ## +# ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) +# ## Values specified here would be added into the list of discovered objects. +# ## You can specify either single dimension: +# #dimensions = '{"instanceId": "p-example"}' +# +# ## Or you can specify several dimensions at once: +# #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' +# +# ## Enrichment tags, can be added from discovery (if supported) +# ## Notation is : +# ## To figure out which fields are available, consult the Describe API per project. +# ## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO +# #tag_query_path = [ +# # "address:Address", +# # "name:LoadBalancerName", +# # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" +# # ] +# ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. +# +# ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery +# ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage +# ## of discovery scope vs monitoring scope +# #allow_dps_without_discovery = false + + +# # AMQP consumer plugin +# [[inputs.amqp_consumer]] +# ## Brokers to consume from. If multiple brokers are specified a random broker +# ## will be selected anytime a connection is established. This can be +# ## helpful for load balancing when not using a dedicated load balancer. +# brokers = ["amqp://localhost:5672/influxdb"] +# +# ## Authentication credentials for the PLAIN auth_method. +# # username = "" +# # password = "" +# +# ## Name of the exchange to declare. If unset, no exchange will be declared. +# exchange = "telegraf" +# +# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". +# # exchange_type = "topic" +# +# ## If true, exchange will be passively declared. +# # exchange_passive = false +# +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" +# +# ## Additional exchange arguments. +# # exchange_arguments = { } +# # exchange_arguments = {"hash_property" = "timestamp"} +# +# ## AMQP queue name. +# queue = "telegraf" +# +# ## AMQP queue durability can be "transient" or "durable". +# queue_durability = "durable" +# +# ## If true, queue will be passively declared. +# # queue_passive = false +# +# ## A binding between the exchange and queue using this binding key is +# ## created. If unset, no binding is created. +# binding_key = "#" +# +# ## Maximum number of messages server should give to the worker. +# # prefetch_count = 50 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Auth method. PLAIN and EXTERNAL are supported +# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as +# ## described here: https://www.rabbitmq.com/plugins.html +# # auth_method = "PLAIN" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# ## DEPRECATED: The 'cassandra' plugin is deprecated in version 1.7.0, use 'inputs.jolokia2' with the 'cassandra.conf' example configuration instead. +# # Read Cassandra metrics through Jolokia +# [[inputs.cassandra]] +# context = "/jolokia/read" +# ## List of cassandra servers exposing jolokia read service +# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] +# ## List of metrics collected on above servers +# ## Each metric consists of a jmx path. +# ## This will collect all heap memory usage metrics from the jvm and +# ## ReadLatency metrics for all keyspaces and tables. +# ## "type=Table" in the query works with Cassandra3.0. Older versions might +# ## need to use "type=ColumnFamily" +# metrics = [ +# "/java.lang:type=Memory/HeapMemoryUsage", +# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" +# ] + + +# # Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms +# [[inputs.cisco_telemetry_mdt]] +# ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when +# ## using the grpc transport. +# transport = "grpc" +# +# ## Address and port to host telemetry listener +# service_address = ":57000" +# +# ## Grpc Maximum Message Size, default is 4MB, increase the size. +# max_msg_size = 4000000 +# +# ## Enable TLS; grpc transport only. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Enable TLS client authentication and define allowed CA certificates; grpc +# ## transport only. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags +# # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"] +# +# ## Define aliases to map telemetry encoding paths to simple measurement names +# [inputs.cisco_telemetry_mdt.aliases] +# ifstats = "ietf-interfaces:interfaces-state/interface/statistics" +# ## Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details. +# [inputs.cisco_telemetry_mdt.dmes] +# # Global Property Xformation. +# # prop1 = "uint64 to int" +# # prop2 = "uint64 to string" +# # prop3 = "string to uint64" +# # prop4 = "string to int64" +# # prop5 = "string to float64" +# # auto-prop-xfrom = "auto-float-xfrom" #Xform any property which is string, and has float number to type float64 +# # Per Path property xformation, Name is telemetry configuration under sensor-group, path configuration "WORD Distinguished Name" +# # Per Path configuration is better as it avoid property collision issue of types. +# # dnpath = '{"Name": "show ip route summary","prop": [{"Key": "routes","Value": "string"}, {"Key": "best-paths","Value": "string"}]}' +# # dnpath2 = '{"Name": "show processes cpu","prop": [{"Key": "kernel_percent","Value": "float"}, {"Key": "idle_percent","Value": "float"}, {"Key": "process","Value": "string"}, {"Key": "user_percent","Value": "float"}, {"Key": "onesec","Value": "float"}]}' +# # dnpath3 = '{"Name": "show processes memory physical","prop": [{"Key": "processname","Value": "string"}]}' + + +# # Read metrics from one or many ClickHouse servers +# [[inputs.clickhouse]] +# ## Username for authorization on ClickHouse server +# ## example: username = "default" +# username = "default" +# +# ## Password for authorization on ClickHouse server +# ## example: password = "super_secret" +# +# ## HTTP(s) timeout while getting metrics values +# ## The timeout includes connection time, any redirects, and reading the response body. +# ## example: timeout = 1s +# # timeout = 5s +# +# ## List of servers for metrics scraping +# ## metrics scrape via HTTP(s) clickhouse interface +# ## https://clickhouse.tech/docs/en/interfaces/http/ +# ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"] +# servers = ["http://127.0.0.1:8123"] +# +# ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster +# ## with using same "user:password" described in "user" and "password" parameters +# ## and get this server hostname list from "system.clusters" table +# ## see +# ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters +# ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers +# ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/ +# ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables +# ## example: auto_discovery = false +# # auto_discovery = true +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster IN (...)" filter will apply +# ## please use only full cluster names here, regexp and glob filters is not allowed +# ## for "/etc/clickhouse-server/config.d/remote.xml" +# ## +# ## +# ## +# ## +# ## clickhouse-ru-1.local9000 +# ## clickhouse-ru-2.local9000 +# ## +# ## +# ## clickhouse-eu-1.local9000 +# ## clickhouse-eu-2.local9000 +# ## +# ## +# ## +# ## +# ## +# ## +# ## example: cluster_include = ["my-own-cluster"] +# # cluster_include = [] +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply +# ## example: cluster_exclude = ["my-internal-not-discovered-cluster"] +# # cluster_exclude = [] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from Google PubSub +# [[inputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub subscription. +# project = "my-project" +# +# ## Required. Name of PubSub subscription to ingest metrics from. +# subscription = "my-subscription" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. Number of seconds to wait before attempting to restart the +# ## PubSub subscription receiver after an unexpected error. +# ## If the streaming pull for a PubSub Subscription fails (receiver), +# ## the agent attempts to restart receiving messages after this many seconds. +# # retry_delay_seconds = 5 +# +# ## Optional. Maximum byte length of a message to consume. +# ## Larger messages are dropped with an error. If less than 0 or unspecified, +# ## treated as no limit. +# # max_message_len = 1000000 +# +# ## Optional. Maximum messages to read from PubSub that have not been written +# ## to an output. Defaults to %d. +# ## For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message contains 10 metrics and the output +# ## metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## The following are optional Subscription ReceiveSettings in PubSub. +# ## Read more about these values: +# ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings +# +# ## Optional. Maximum number of seconds for which a PubSub subscription +# ## should auto-extend the PubSub ACK deadline for each message. If less than +# ## 0, auto-extension is disabled. +# # max_extension = 0 +# +# ## Optional. Maximum number of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_messages = 0 +# +# ## Optional. Maximum size in bytes of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_bytes = 0 +# +# ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn +# ## to pull messages from PubSub concurrently. This limit applies to each +# ## subscription separately and is treated as the PubSub default if less than +# ## 1. Note this setting does not limit the number of messages that can be +# ## processed concurrently (use "max_outstanding_messages" instead). +# # max_receiver_go_routines = 0 +# +# ## Optional. If true, Telegraf will attempt to base64 decode the +# ## PubSub message data before parsing. Many GCP services that +# ## output JSON to Google PubSub base64-encode the JSON payload. +# # base64_data = false + + +# # Google Cloud Pub/Sub Push HTTP listener +# [[inputs.cloud_pubsub_push]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Application secret to verify messages originate from Cloud Pub/Sub +# # token = "" +# +# ## Path to listen to. +# # path = "/" +# +# ## Maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## Maximum duration before timing out write of the response. This should be set to a value +# ## large enough that you can send at least 'metric_batch_size' number of messages within the +# ## duration. +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag. +# # add_meta = false +# +# ## Optional. Maximum messages to read from PubSub that have not been written +# ## to an output. Defaults to 1000. +# ## For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message contains 10 metrics and the output +# ## metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # AWS Metric Streams listener +# [[inputs.cloudwatch_metric_streams]] +# ## Address and port to host HTTP listener on +# service_address = ":443" +# +# ## Paths to listen to. +# # paths = ["/telegraf"] +# +# ## maximum duration before timing out read of the request +# # read_timeout = "10s" +# +# ## maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Optional access key for Firehose security. +# # access_key = "test-key" +# +# ## An optional flag to keep Metric Streams metrics compatible with CloudWatch's API naming +# # api_compatability = false +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Ingests files in a directory and then moves them to a target directory. +# [[inputs.directory_monitor]] +# ## The directory to monitor and read files from. +# directory = "" +# # +# ## The directory to move finished files to. +# finished_directory = "" +# # +# ## The directory to move files to upon file error. +# ## If not provided, erroring files will stay in the monitored directory. +# # error_directory = "" +# # +# ## The amount of time a file is allowed to sit in the directory before it is picked up. +# ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow, +# ## set this higher so that the plugin will wait until the file is fully copied to the directory. +# # directory_duration_threshold = "50ms" +# # +# ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested. +# # files_to_monitor = ["^.*\.csv"] +# # +# ## A list of files to ignore, if necessary. Supports regex. +# # files_to_ignore = [".DS_Store"] +# # +# ## Maximum lines of the file to process that have not yet be written by the +# ## output. For best throughput set to the size of the output's metric_buffer_limit. +# ## Warning: setting this number higher than the output's metric_buffer_limit can cause dropped metrics. +# # max_buffered_metrics = 10000 +# # +# ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. +# ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. +# # file_queue_size = 100000 +# # +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. Cautious when file name variation is high, this can increase the cardinality +# ## significantly. Read more about cardinality here: +# ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality +# # file_tag = "" +# # +# ## Specify if the file can be read completely at once or if it needs to be read line by line (default). +# ## Possible values: "line-by-line", "at-once" +# # parse_method = "line-by-line" +# # +# ## The dataformat to be read from the files. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read logging output from the Docker engine +# [[inputs.docker_log]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# # endpoint = "unix:///var/run/docker.sock" +# +# ## When true, container logs are read from the beginning; otherwise +# ## reading begins at the end of the log. +# # from_beginning = false +# +# ## Timeout for Docker API calls. +# # timeout = "5s" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# # docker_label_include = [] +# # docker_label_exclude = [] +# +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Azure Event Hubs service input plugin +# [[inputs.eventhub_consumer]] +# ## The default behavior is to create a new Event Hub client from environment variables. +# ## This requires one of the following sets of environment variables to be set: +# ## +# ## 1) Expected Environment Variables: +# ## - "EVENTHUB_CONNECTION_STRING" +# ## +# ## 2) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "EVENTHUB_KEY_NAME" +# ## - "EVENTHUB_KEY_VALUE" +# +# ## 3) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "AZURE_TENANT_ID" +# ## - "AZURE_CLIENT_ID" +# ## - "AZURE_CLIENT_SECRET" +# +# ## Uncommenting the option below will create an Event Hub client based solely on the connection string. +# ## This can either be the associated environment variable or hard coded directly. +# ## If this option is uncommented, environment variables will be ignored. +# ## Connection string should contain EventHubName (EntityPath) +# # connection_string = "" +# +# ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister +# # persistence_dir = "" +# +# ## Change the default consumer group +# # consumer_group = "" +# +# ## By default the event hub receives all messages present on the broker, alternative modes can be set below. +# ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339). +# ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run). +# # from_timestamp = +# # latest = true +# +# ## Set a custom prefetch count for the receiver(s) +# # prefetch_count = 1000 +# +# ## Add an epoch to the receiver(s) +# # epoch = 0 +# +# ## Change to set a custom user agent, "telegraf" is used by default +# # user_agent = "telegraf" +# +# ## To consume from a specific partition, set the partition_ids option. +# ## An empty array will result in receiving from all partitions. +# # partition_ids = ["0","1"] +# +# ## Max undelivered messages +# # max_undelivered_messages = 1000 +# +# ## Set either option below to true to use a system property as timestamp. +# ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime. +# ## It is recommended to use this setting when the data itself has no timestamp. +# # enqueued_time_as_ts = true +# # iot_hub_enqueued_time_as_ts = true +# +# ## Tags or fields to create from keys present in the application property bag. +# ## These could for example be set by message enrichments in Azure IoT Hub. +# # application_property_tags = [] +# # application_property_fields = [] +# +# ## Tag or field name to use for metadata +# ## By default all metadata is disabled +# # sequence_number_field = "SequenceNumber" +# # enqueued_time_field = "EnqueuedTime" +# # offset_field = "Offset" +# # partition_id_tag = "PartitionID" +# # partition_key_tag = "PartitionKey" +# # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID" +# # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID" +# # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod" +# # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID" +# # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Run executable as long-running input plugin +# [[inputs.execd]] +# ## One program to run as daemon. +# ## NOTE: process and each argument should each be their own string +# command = ["telegraf-smartctl", "-d", "/dev/sda"] +# +# ## Environment variables +# ## Array of "key=value" pairs to pass as environment variables +# ## e.g. "KEY=value", "USERNAME=John Doe", +# ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" +# # environment = [] +# +# ## Define how the process is signaled on each collection interval. +# ## Valid values are: +# ## "none" : Do not signal anything. (Recommended for service inputs) +# ## The process must output metrics by itself. +# ## "STDIN" : Send a newline on STDIN. (Recommended for gather inputs) +# ## "SIGHUP" : Send a HUP signal. Not available on Windows. (not recommended) +# ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. +# ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. +# signal = "none" +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # gNMI telemetry input plugin +# [[inputs.gnmi]] +# ## Address and port of the gNMI GRPC server +# addresses = ["10.49.234.114:57777"] +# +# ## define credentials +# username = "cisco" +# password = "cisco" +# +# ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") +# # encoding = "proto" +# +# ## redial in case of failures after +# redial = "10s" +# +# ## enable client-side TLS and define CA to authenticate the device +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # insecure_skip_verify = true +# +# ## define client-side TLS certificate & key to authenticate to the device +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## gNMI subscription prefix (optional, can usually be left empty) +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# # origin = "" +# # prefix = "" +# # target = "" +# +# ## Define additional aliases to map telemetry encoding paths to simple measurement names +# # [inputs.gnmi.aliases] +# # ifcounters = "openconfig:/interfaces/interface/state/counters" +# +# [[inputs.gnmi.subscription]] +# ## Name of the measurement that will be emitted +# name = "ifcounters" +# +# ## Origin and path of the subscription +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# ## +# ## origin usually refers to a (YANG) data model implemented by the device +# ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) +# ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr +# origin = "openconfig-interfaces" +# path = "/interfaces/interface/state/counters" +# +# # Subscription mode (one of: "target_defined", "sample", "on_change") and interval +# subscription_mode = "sample" +# sample_interval = "10s" +# +# ## Suppress redundant transmissions when measured values are unchanged +# # suppress_redundant = false +# +# ## If suppression is enabled, send updates at least every X seconds anyway +# # heartbeat_interval = "60s" +# +# #[[inputs.gnmi.subscription]] +# # name = "descr" +# # origin = "openconfig-interfaces" +# # path = "/interfaces/interface/state/description" +# # subscription_mode = "on_change" +# +# ## If tag_only is set, the subscription in question will be utilized to maintain a map of +# ## tags to apply to other measurements emitted by the plugin, by matching path keys +# ## All fields from the tag-only subscription will be applied as tags to other readings, +# ## in the format _. +# # tag_only = true + + +# ## DEPRECATED: The 'http_listener' plugin is deprecated in version 1.9.0, has been renamed to 'influxdb_listener', use 'inputs.influxdb_listener' or 'inputs.http_listener_v2' instead. +# # Accept metrics over InfluxDB 1.x HTTP API +# [[inputs.influxdb_listener]] +# ## Address and port to host HTTP listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = 0 +# +# ## Maximum line size allowed to be sent in bytes. +# ## deprecated in 1.14; parser now handles lines of unlimited length and option is ignored +# # max_line_size = 0 +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# +# ## Optional tag name used to store the database name. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# ## If you have a tag that is the same as the one specified below, and supply a database, +# ## the tag will be overwritten with the database supplied. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" +# +# ## Influx line protocol parser +# ## 'internal' is the default. 'upstream' is a newer parser that is faster +# ## and more memory efficient. +# # parser_type = "internal" + + +# # Generic HTTP write listener +# [[inputs.http_listener_v2]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Paths to listen to. +# # paths = ["/telegraf"] +# +# ## Save path as http_listener_v2_path tag if set to true +# # path_tag = false +# +# ## HTTP methods to accept. +# # methods = ["POST", "PUT"] +# +# ## maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Part of the request to consume. Available options are "body" and +# ## "query". +# # data_source = "body" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" +# +# ## Optional setting to map http headers into tags +# ## If the http header is not present on the request, no corresponding tag will be added +# ## If multiple instances of the http header are present, only the first value will be used +# # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Accept metrics over InfluxDB 1.x HTTP API +# [[inputs.influxdb_listener]] +# ## Address and port to host HTTP listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = 0 +# +# ## Maximum line size allowed to be sent in bytes. +# ## deprecated in 1.14; parser now handles lines of unlimited length and option is ignored +# # max_line_size = 0 +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# +# ## Optional tag name used to store the database name. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# ## If you have a tag that is the same as the one specified below, and supply a database, +# ## the tag will be overwritten with the database supplied. +# # database_tag = "" +# +# ## If set the retention policy specified in the write query will be added as +# ## the value of this tag name. +# # retention_policy_tag = "" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" +# +# ## Influx line protocol parser +# ## 'internal' is the default. 'upstream' is a newer parser that is faster +# ## and more memory efficient. +# # parser_type = "internal" + + +# # Accept metrics over InfluxDB 2.x HTTP API +# [[inputs.influxdb_v2_listener]] +# ## Address and port to host InfluxDB listener on +# ## (Double check the port. Could be 9999 if using OSS Beta) +# service_address = ":8086" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# # max_body_size = "32MiB" +# +# ## Optional tag to determine the bucket. +# ## If the write has a bucket in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # bucket_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional token to accept for HTTP authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # token = "some-long-shared-secret-token" +# +# ## Influx line protocol parser +# ## 'internal' is the default. 'upstream' is a newer parser that is faster +# ## and more memory efficient. +# # parser_type = "internal" + + +# # Subscribe and receive OpenConfig Telemetry data using JTI +# [[inputs.jti_openconfig_telemetry]] +# ## List of device addresses to collect telemetry from +# servers = ["localhost:1883"] +# +# ## Authentication details. Username and password are must if device expects +# ## authentication. Client ID must be unique when connecting from multiple instances +# ## of telegraf to the same device +# username = "user" +# password = "pass" +# client_id = "telegraf" +# +# ## Frequency to get data +# sample_frequency = "1000ms" +# +# ## Sensors to subscribe for +# ## A identifier for each sensor can be provided in path by separating with space +# ## Else sensor path will be used as identifier +# ## When identifier is used, we can provide a list of space separated sensors. +# ## A single subscription will be created with all these sensors and data will +# ## be saved to measurement with this identifier name +# sensors = [ +# "/interfaces/", +# "collection /components/ /lldp", +# ] +# +# ## We allow specifying sensor group level reporting rate. To do this, specify the +# ## reporting rate in Duration at the beginning of sensor paths / collection +# ## name. For entries without reporting rate, we use configured sample frequency +# sensors = [ +# "1000ms customReporting /interfaces /lldp", +# "2000ms collection /components", +# "/interfaces", +# ] +# +# ## Optional TLS Config +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. +# ## Failed streams/calls will not be retried if 0 is provided +# retry_delay = "1000ms" +# +# ## To treat all string values as tags, set this to true +# str_as_tags = false + + +# # Read metrics from Kafka topics +# [[inputs.kafka_consumer]] +# ## Kafka brokers. +# brokers = ["localhost:9092"] +# +# ## Topics to consume. +# topics = ["telegraf"] +# +# ## When set this tag will be added to all metrics with the topic as the value. +# # topic_tag = "" +# +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Must be 0.10.2.0 or greater. +# ## ex: version = "1.1.0" +# # version = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## SASL authentication credentials. These settings should typically be used +# ## with TLS encryption enabled +# # sasl_username = "kafka" +# # sasl_password = "secret" +# +# ## Optional SASL: +# ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI +# ## (defaults to PLAIN) +# # sasl_mechanism = "" +# +# ## used if sasl_mechanism is GSSAPI (experimental) +# # sasl_gssapi_service_name = "" +# # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH +# # sasl_gssapi_auth_type = "KRB5_USER_AUTH" +# # sasl_gssapi_kerberos_config_path = "/" +# # sasl_gssapi_realm = "realm" +# # sasl_gssapi_key_tab_path = "" +# # sasl_gssapi_disable_pafxfast = false +# +# ## used if sasl_mechanism is OAUTHBEARER (experimental) +# # sasl_access_token = "" +# +# ## SASL protocol version. When connecting to Azure EventHub set to 0. +# # sasl_version = 1 +# +# # Disable Kafka metadata full fetch +# # metadata_full = false +# +# ## Name of the consumer group. +# # consumer_group = "telegraf_metrics_consumers" +# +# ## Compression codec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : None +# ## 1 : Gzip +# ## 2 : Snappy +# ## 3 : LZ4 +# ## 4 : ZSTD +# # compression_codec = 0 +# ## Initial offset position; one of "oldest" or "newest". +# # offset = "oldest" +# +# ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky". +# # balance_strategy = "range" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 1000000 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Maximum amount of time the consumer should take to process messages. If +# ## the debug log prints messages from sarama about 'abandoning subscription +# ## to [topic] because consuming was taking too long', increase this value to +# ## longer than the time taken by the output plugin(s). +# ## +# ## Note that the effective timeout could be between 'max_processing_time' and +# ## '2 * max_processing_time'. +# # max_processing_time = "100ms" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# ## DEPRECATED: The 'kafka_consumer_legacy' plugin is deprecated in version 1.4.0, use 'inputs.kafka_consumer' instead, NOTE: 'kafka_consumer' only supports Kafka v0.8+. +# # Read metrics from Kafka topic(s) +# [[inputs.kafka_consumer_legacy]] +# ## topic(s) to consume +# topics = ["telegraf"] +# +# ## an array of Zookeeper connection strings +# zookeeper_peers = ["localhost:2181"] +# +# ## Zookeeper Chroot +# zookeeper_chroot = "" +# +# ## the name of the consumer group +# consumer_group = "telegraf_metrics_consumers" +# +# ## Offset (must be either "oldest" or "newest") +# offset = "oldest" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 65536 + + +# # Configuration for the AWS Kinesis input. +# [[inputs.kinesis_consumer]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified +# ## 2) Assumed credentials via STS if role_arn is specified +# ## 3) explicit credentials from 'access_key' and 'secret_key' +# ## 4) shared profile from 'profile' +# ## 5) environment variables +# ## 6) shared credentials file +# ## 7) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # web_identity_token_file = "" +# # role_session_name = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# +# ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported) +# # shard_iterator_type = "TRIM_HORIZON" +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## +# ## The content encoding of the data from kinesis +# ## If you are processing a cloudwatch logs kinesis stream then set this to "gzip" +# ## as AWS compresses cloudwatch log data before it is sent to kinesis (aws +# ## also base64 encodes the zip byte data before pushing to the stream. The base64 decoding +# ## is done automatically by the golang sdk, as data is read from kinesis) +# ## +# # content_encoding = "identity" +# +# ## Optional +# ## Configuration for a dynamodb checkpoint +# [inputs.kinesis_consumer.checkpoint_dynamodb] +# ## unique name for this consumer +# app_name = "default" +# table_name = "default" + + +# # Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +# [[inputs.knx_listener]] +# ## Type of KNX-IP interface. +# ## Can be either "tunnel" or "router". +# # service_type = "tunnel" +# +# ## Address of the KNX-IP interface. +# service_address = "localhost:3671" +# +# ## Measurement definition(s) +# # [[inputs.knx_listener.measurement]] +# # ## Name of the measurement +# # name = "temperature" +# # ## Datapoint-Type (DPT) of the KNX messages +# # dpt = "9.001" +# # ## List of Group-Addresses (GAs) assigned to the measurement +# # addresses = ["5/5/1"] +# +# # [[inputs.knx_listener.measurement]] +# # name = "illumination" +# # dpt = "9.004" +# # addresses = ["5/5/3"] + + +# # Read metrics off Arista LANZ, via socket +# [[inputs.lanz]] +# ## URL to Arista LANZ endpoint +# servers = [ +# "tcp://switch1.int.example.com:50001", +# "tcp://switch2.int.example.com:50001", +# ] + + +# ## DEPRECATED: The 'logparser' plugin is deprecated in version 1.15.0, use 'inputs.tail' with 'grok' data format instead. +# # Read metrics off Arista LANZ, via socket +# [[inputs.logparser]] +# ## Log files to parse. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/**.log -> recursively find all .log files in /var/log +# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log +# ## /var/log/apache.log -> only tail the apache log file +# files = ["/var/log/apache/access.log"] +# +# ## Read files that currently exist from the beginning. Files that are created +# ## while telegraf is running (and that match the "files" globs) will always +# ## be read from the beginning. +# from_beginning = false +# +# ## Method used to watch for file updates. Can be either "inotify" or "poll". +# # watch_method = "inotify" +# +# ## Parse logstash-style "grok" patterns: +# [inputs.logparser.grok] +# ## This is a list of patterns to check the given log file(s) for. +# ## Note that adding patterns here increases processing time. The most +# ## efficient configuration is to have one pattern per logparser. +# ## Other common built-in patterns are: +# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) +# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) +# patterns = ["%{COMBINED_LOG_FORMAT}"] +# +# ## Name of the outputted measurement name. +# measurement = "apache_access_log" +# +# ## Full path(s) to custom pattern files. +# custom_pattern_files = [] +# +# ## Custom patterns can also be defined here. Put one pattern per line. +# custom_patterns = ''' +# ''' +# +# ## Timezone allows you to provide an override for timestamps that +# ## don't already include an offset +# ## e.g. 04/06/2016 12:41:45 data one two 5.43µs +# ## +# ## Default: "" which renders UTC +# ## Options are as follows: +# ## 1. Local -- interpret based on machine localtime +# ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones +# ## 3. UTC -- or blank/unspecified, will return timestamp in UTC +# # timezone = "Canada/Eastern" +# +# ## When set to "disable", timestamp will not incremented if there is a +# ## duplicate. +# # unique_timestamp = "auto" + + +# # Read metrics from MQTT topic(s) +# [[inputs.mqtt_consumer]] +# ## Broker URLs for the MQTT server or cluster. To connect to multiple +# ## clusters or standalone servers, use a separate plugin instance. +# ## example: servers = ["tcp://localhost:1883"] +# ## servers = ["ssl://localhost:1883"] +# ## servers = ["ws://localhost:1883"] +# servers = ["tcp://127.0.0.1:1883"] +# +# ## Topics that will be subscribed to. +# topics = [ +# "telegraf/host01/cpu", +# "telegraf/+/mem", +# "sensors/#", +# ] +# +# ## The message topic will be stored in a tag specified by this value. If set +# ## to the empty string no topic tag will be created. +# # topic_tag = "topic" +# +# ## QoS policy for messages +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# ## +# ## When using a QoS of 1 or 2, you should enable persistent_session to allow +# ## resuming unacknowledged messages. +# # qos = 0 +# +# ## Connection timeout for initial connection in seconds +# # connection_timeout = "30s" +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Persistent session disables clearing of the client session on connection. +# ## In order for this option to work you must also set client_id to identify +# ## the client. To receive messages that arrived while the client is offline, +# ## also set the qos option to 1 or 2 and don't forget to also set the QoS when +# ## publishing. +# # persistent_session = false +# +# ## If unset, a random client ID will be generated. +# # client_id = "" +# +# ## Username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Enable extracting tag values from MQTT topics +# ## _ denotes an ignored entry in the topic path +# # [[inputs.mqtt_consumer.topic_parsing]] +# # topic = "" +# # measurement = "" +# # tags = "" +# # fields = "" +# ## Value supported is int, float, unit +# # [[inputs.mqtt_consumer.topic.types]] +# # key = type + + +# # Read metrics from NATS subject(s) +# [[inputs.nats_consumer]] +# ## urls of NATS servers +# servers = ["nats://localhost:4222"] +# +# ## subject(s) to consume +# subjects = ["telegraf"] +# +# ## name a queue group +# queue_group = "telegraf_consumers" +# +# ## Optional credentials +# # username = "" +# # password = "" +# +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# +# ## Use Transport Layer Security +# # secure = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Sets the limits for pending msgs and bytes for each subscription +# ## These shouldn't need to be adjusted except in very high throughput scenarios +# # pending_message_limit = 65536 +# # pending_bytes_limit = 67108864 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from NSQD topic(s) +# [[inputs.nsq_consumer]] +# ## Server option still works but is deprecated, we just prepend it to the nsqd array. +# # server = "localhost:4150" +# +# ## An array representing the NSQD TCP HTTP Endpoints +# nsqd = ["localhost:4150"] +# +# ## An array representing the NSQLookupd HTTP Endpoints +# nsqlookupd = ["localhost:4161"] +# topic = "telegraf" +# channel = "consumer" +# max_in_flight = 100 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Receive OpenTelemetry traces, metrics, and logs over gRPC +# [[inputs.opentelemetry]] +# ## Override the default (0.0.0.0:4317) destination OpenTelemetry gRPC service +# ## address:port +# # service_address = "0.0.0.0:4317" +# +# ## Override the default (5s) new connection timeout +# # timeout = "5s" +# +# ## Override the default (prometheus-v1) metrics schema. +# ## Supports: "prometheus-v1", "prometheus-v2" +# ## For more information about the alternatives, read the Prometheus input +# ## plugin notes. +# # metrics_schema = "prometheus-v1" +# +# ## Optional TLS Config. +# ## For advanced options: https://github.com/influxdata/telegraf/blob/v1.18.3/docs/TLS.md +# ## +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Add service certificate and key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" + + +# # Read metrics from one or many pgbouncer servers +# [[inputs.pgbouncer]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@host:port[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# address = "host=localhost user=pgbouncer sslmode=disable" + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# ## +# address = "host=localhost user=postgres sslmode=disable" +# ## A custom name for the database that will be used as the "server" tag in the +# ## measurement output. If not specified, a default one generated from +# ## the connection address is used. +# # outputaddress = "db01" +# +# ## connection configuration. +# ## maxlifetime - specify the maximum lifetime of a connection. +# ## default is forever (0s) +# # max_lifetime = "0s" +# +# ## A list of databases to explicitly ignore. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'databases' option. +# # ignored_databases = ["postgres", "template0", "template1"] +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'ignored_databases' option. +# # databases = ["app_production", "testing"] +# +# ## Whether to use prepared statements when connecting to the database. +# ## This should be set to false when connecting through a PgBouncer instance +# ## with pool_mode set to transaction. +# prepared_statements = true + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql_extensible]] +# # specify address via a url matching: +# # postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=... +# # or a simple string: +# # host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production +# # +# # All connection parameters are optional. +# # Without the dbname parameter, the driver will default to a database +# # with the same name as the user. This dbname is just for instantiating a +# # connection with the server and doesn't restrict the databases we are trying +# # to grab metrics for. +# # +# address = "host=localhost user=postgres sslmode=disable" +# +# ## A list of databases to pull metrics about. +# ## deprecated in 1.22.3; use the sqlquery option to specify database to use +# # databases = ["app_production", "testing"] +# +# ## Whether to use prepared statements when connecting to the database. +# ## This should be set to false when connecting through a PgBouncer instance +# ## with pool_mode set to transaction. +# prepared_statements = true +# +# # Define the toml config where the sql queries are stored +# # The script option can be used to specify the .sql file path. +# # If script and sqlquery options specified at same time, sqlquery will be used +# # +# # the tagvalue field is used to define custom tags (separated by comas). +# # the query is expected to return columns which match the names of the +# # defined tags. The values in these columns must be of a string-type, +# # a number-type or a blob-type. +# # +# # The timestamp field is used to override the data points timestamp value. By +# # default, all rows inserted with current time. By setting a timestamp column, +# # the row will be inserted with that column's value. +# # +# # Structure : +# # [[inputs.postgresql_extensible.query]] +# # sqlquery string +# # version string +# # withdbname boolean +# # tagvalue string (coma separated) +# # timestamp string +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_database where datname" +# version=901 +# withdbname=false +# tagvalue="" +# [[inputs.postgresql_extensible.query]] +# script="your_sql-filepath.sql" +# version=901 +# withdbname=false +# tagvalue="" + + +# # Read metrics from one or many prometheus clients +# [[inputs.prometheus]] +# ## An array of urls to scrape metrics from. +# urls = ["http://localhost:9100/metrics"] +# +# ## Metric version controls the mapping from Prometheus metrics into Telegraf metrics. +# ## See "Metric Format Configuration" in plugins/inputs/prometheus/README.md for details. +# ## Valid options: 1, 2 +# # metric_version = 1 +# +# ## Url tag name (tag containing scrapped url. optional, default is "url") +# # url_tag = "url" +# +# ## Whether the timestamp of the scraped metrics will be ignored. +# ## If set to true, the gather time will be used. +# # ignore_timestamp = false +# +# ## An array of Kubernetes services to scrape metrics from. +# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] +# +# ## Kubernetes config file to create client from. +# # kube_config = "/path/to/kubernetes.config" +# +# ## Scrape Kubernetes pods for the following prometheus annotations: +# ## - prometheus.io/scrape: Enable scraping for this pod +# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to +# ## set this to 'https' & most likely set the tls config. +# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. +# ## - prometheus.io/port: If port is not 9102 use this annotation +# # monitor_kubernetes_pods = true +# +# ## Get the list of pods to scrape with either the scope of +# ## - cluster: the kubernetes watch api (default, no need to specify) +# ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. +# # pod_scrape_scope = "cluster" +# +# ## Only for node scrape scope: node IP of the node that telegraf is running on. +# ## Either this config or the environment variable NODE_IP must be set. +# # node_ip = "10.180.1.1" +# +# ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. +# ## Default is 60 seconds. +# # pod_scrape_interval = 60 +# +# ## Restricts Kubernetes monitoring to a single namespace +# ## ex: monitor_kubernetes_pods_namespace = "default" +# # monitor_kubernetes_pods_namespace = "" +# # label selector to target pods which have the label +# # kubernetes_label_selector = "env=dev,app=nginx" +# # field selector to target pods +# # eg. To scrape pods on a specific node +# # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" +# +# # cache refresh interval to set the interval for re-sync of pods list. +# # Default is 60 minutes. +# # cache_refresh_interval = 60 +# +# ## Scrape Services available in Consul Catalog +# # [inputs.prometheus.consul] +# # enabled = true +# # agent = "http://localhost:8500" +# # query_interval = "5m" +# +# # [[inputs.prometheus.consul.query]] +# # name = "a service name" +# # tag = "a service tag" +# # url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}' +# # [inputs.prometheus.consul.query.tags] +# # host = "{{.Node}}" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## HTTP Basic Authentication username and password. ('bearer_token' and +# ## 'bearer_token_string' take priority) +# # username = "" +# # password = "" +# +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# # response_timeout = "3s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or many redis servers +# [[inputs.redis]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## tcp://localhost:6379 +# ## tcp://:password@192.168.99.100 +# ## unix:///var/run/redis.sock +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 6379 is used +# servers = ["tcp://localhost:6379"] +# +# ## Optional. Specify redis commands to retrieve values +# # [[inputs.redis.commands]] +# # # The command to run where each argument is a separate element +# # command = ["get", "sample-key"] +# # # The field to store the result in +# # field = "sample-key-value" +# # # The type of the result +# # # Can be "string", "integer", or "float" +# # type = "string" +# +# ## specify server password +# # password = "s#cr@t%" +# +# ## specify username for ACL auth (Redis 6.0+) +# # username = "default" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Riemann protobuff listener +# [[inputs.riemann_listener]] +# ## URL to listen on +# ## Default is "tcp://:5555" +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# +# ## Maximum number of concurrent connections. +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# ## Read timeout. +# ## 0 (default) is unlimited. +# # read_timeout = "30s" +# ## Optional TLS configuration. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# ## Maximum socket buffer size (in bytes when no unit specified). +# # read_buffer_size = "64KiB" +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" + + +# # SFlow V5 Protocol Listener +# [[inputs.sflow]] +# ## Address to listen for sFlow packets. +# ## example: service_address = "udp://:6343" +# ## service_address = "udp4://:6343" +# ## service_address = "udp6://:6343" +# service_address = "udp://:6343" +# +# ## Set the size of the operating system's receive buffer. +# ## example: read_buffer_size = "64KiB" +# # read_buffer_size = "" + + +# # Receive SNMP traps +# [[inputs.snmp_trap]] +# ## Transport, local address, and port to listen on. Transport must +# ## be "udp://". Omit local address to listen on all interfaces. +# ## example: "udp://127.0.0.1:1234" +# ## +# ## Special permissions may be required to listen on a port less than +# ## 1024. See README.md for details +# ## +# # service_address = "udp://:162" +# ## +# ## Path to mib files +# ## Used by the gosmi translator. +# ## To add paths when translating with netsnmp, use the MIBDIRS environment variable +# # path = ["/usr/share/snmp/mibs"] +# ## +# ## Deprecated in 1.20.0; no longer running snmptranslate +# ## Timeout running snmptranslate command +# # timeout = "5s" +# ## Snmp version +# # version = "2c" +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA" or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" + + +# # Generic socket listener capable of handling multiple socket types. +# [[inputs.socket_listener]] +# ## URL to listen on +# # service_address = "tcp://:8094" +# # service_address = "tcp://127.0.0.1:http" +# # service_address = "tcp4://:8094" +# # service_address = "tcp6://:8094" +# # service_address = "tcp6://[2001:db8::1]:8094" +# # service_address = "udp://:8094" +# # service_address = "udp4://:8094" +# # service_address = "udp6://:8094" +# # service_address = "unix:///tmp/telegraf.sock" +# # service_address = "unixgram:///tmp/telegraf.sock" +# +# ## Change the file mode bits on unix sockets. These permissions may not be +# ## respected by some platforms, to safely restrict write permissions it is best +# ## to place the socket into a directory that has previously been created +# ## with the desired permissions. +# ## ex: socket_mode = "777" +# # socket_mode = "" +# +# ## Maximum number of concurrent connections. +# ## Only applies to stream sockets (e.g. TCP). +# ## 0 (default) is unlimited. +# # max_connections = 1024 +# +# ## Read timeout. +# ## Only applies to stream sockets (e.g. TCP). +# ## 0 (default) is unlimited. +# # read_timeout = "30s" +# +# ## Optional TLS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Enables client authentication if set. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Maximum socket buffer size (in bytes when no unit specified). +# ## For stream sockets, once the buffer fills up, the sender will start backing up. +# ## For datagram sockets, once the buffer fills up, metrics will start dropping. +# ## Defaults to the OS default. +# # read_buffer_size = "64KiB" +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# # data_format = "influx" +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" + + +# # Read metrics from SQL queries +# [[inputs.sql]] +# ## Database Driver +# ## See https://github.com/influxdata/telegraf/blob/master/docs/SQL_DRIVERS_INPUT.md for +# ## a list of supported drivers. +# driver = "mysql" +# +# ## Data source name for connecting +# ## The syntax and supported options depends on selected driver. +# dsn = "username:password@mysqlserver:3307/dbname?param=value" +# +# ## Timeout for any operation +# ## Note that the timeout for queries is per query not per gather. +# # timeout = "5s" +# +# ## Connection time limits +# ## By default the maximum idle time and maximum lifetime of a connection is unlimited, i.e. the connections +# ## will not be closed automatically. If you specify a positive time, the connections will be closed after +# ## idleing or existing for at least that amount of time, respectively. +# # connection_max_idle_time = "0s" +# # connection_max_life_time = "0s" +# +# ## Connection count limits +# ## By default the number of open connections is not limited and the number of maximum idle connections +# ## will be inferred from the number of queries specified. If you specify a positive number for any of the +# ## two options, connections will be closed when reaching the specified limit. The number of idle connections +# ## will be clipped to the maximum number of connections limit if any. +# # connection_max_open = 0 +# # connection_max_idle = auto +# +# [[inputs.sql.query]] +# ## Query to perform on the server +# query="SELECT user,state,latency,score FROM Scoreboard WHERE application > 0" +# ## Alternatively to specifying the query directly you can select a file here containing the SQL query. +# ## Only one of 'query' and 'query_script' can be specified! +# # query_script = "/path/to/sql/script.sql" +# +# ## Name of the measurement +# ## In case both measurement and 'measurement_col' are given, the latter takes precedence. +# # measurement = "sql" +# +# ## Column name containing the name of the measurement +# ## If given, this will take precedence over the 'measurement' setting. In case a query result +# ## does not contain the specified column, we fall-back to the 'measurement' setting. +# # measurement_column = "" +# +# ## Column name containing the time of the measurement +# ## If ommited, the time of the query will be used. +# # time_column = "" +# +# ## Format of the time contained in 'time_col' +# ## The time must be 'unix', 'unix_ms', 'unix_us', 'unix_ns', or a golang time format. +# ## See https://golang.org/pkg/time/#Time.Format for details. +# # time_format = "unix" +# +# ## Column names containing tags +# ## An empty include list will reject all columns and an empty exclude list will not exclude any column. +# ## I.e. by default no columns will be returned as tag and the tags are empty. +# # tag_columns_include = [] +# # tag_columns_exclude = [] +# +# ## Column names containing fields (explicit types) +# ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over +# ## the automatic (driver-based) conversion below. +# ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. +# # field_columns_float = [] +# # field_columns_int = [] +# # field_columns_uint = [] +# # field_columns_bool = [] +# # field_columns_string = [] +# +# ## Column names containing fields (automatic types) +# ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty +# ## exclude list will not exclude any column. I.e. by default all columns will be returned as fields. +# ## NOTE: We rely on the database driver to perform automatic datatype conversion. +# # field_columns_include = [] +# # field_columns_exclude = [] + + +# # Read metrics from Microsoft SQL Server +# [[inputs.sqlserver]] +# ## Specify instances to monitor with a list of connection strings. +# ## All connection parameters are optional. +# ## By default, the host is localhost, listening on default port, TCP 1433. +# ## for Windows, the user is the currently running AD user (SSO). +# ## See https://github.com/denisenkom/go-mssqldb for detailed connection +# ## parameters, in particular, tls connections can be created like so: +# ## "encrypt=true;certificate=;hostNameInCertificate=" +# servers = [ +# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# ] +# +# ## Authentication method +# ## valid methods: "connection_string", "AAD" +# # auth_method = "connection_string" +# +# ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 +# ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. +# ## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool" +# +# database_type = "SQLServer" +# +# ## A list of queries to include. If not specified, all the below listed queries are used. +# include_query = [] +# +# ## A list of queries to explicitly ignore. +# exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] +# +# ## Queries enabled by default for database_type = "SQLServer" are - +# ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, +# ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu, SQLServerAvailabilityReplicaStates, SQLServerDatabaseReplicaStates, +# ## SQLServerRecentBackups +# +# ## Queries enabled by default for database_type = "AzureSQLDB" are - +# ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, +# ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers +# +# ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - +# ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, +# ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers +# +# ## Queries enabled by default for database_type = "AzureSQLPool" are - +# ## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats, +# ## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers +# +# ## Following are old config settings +# ## You may use them only if you are using the earlier flavor of queries, however it is recommended to use +# ## the new mechanism of identifying the database_type there by use it's corresponding queries +# +# ## Optional parameter, setting this to 2 will use a new version +# ## of the collection queries that break compatibility with the original +# ## dashboards. +# ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB +# # query_version = 2 +# +# ## If you are using AzureDB, setting this to true will gather resource utilization metrics +# # azuredb = false +# +# ## Toggling this to true will emit an additional metric called "sqlserver_telegraf_health". +# ## This metric tracks the count of attempted queries and successful queries for each SQL instance specified in "servers". +# ## The purpose of this metric is to assist with identifying and diagnosing any connectivity or query issues. +# ## This setting/metric is optional and is disabled by default. +# # health_metric = false +# +# ## Possible queries accross different versions of the collectors +# ## Queries enabled by default for specific Database Type +# +# ## database_type = AzureSQLDB by default collects the following queries +# ## - AzureSQLDBWaitStats +# ## - AzureSQLDBResourceStats +# ## - AzureSQLDBResourceGovernance +# ## - AzureSQLDBDatabaseIO +# ## - AzureSQLDBServerProperties +# ## - AzureSQLDBOsWaitstats +# ## - AzureSQLDBMemoryClerks +# ## - AzureSQLDBPerformanceCounters +# ## - AzureSQLDBRequests +# ## - AzureSQLDBSchedulers +# +# ## database_type = AzureSQLManagedInstance by default collects the following queries +# ## - AzureSQLMIResourceStats +# ## - AzureSQLMIResourceGovernance +# ## - AzureSQLMIDatabaseIO +# ## - AzureSQLMIServerProperties +# ## - AzureSQLMIOsWaitstats +# ## - AzureSQLMIMemoryClerks +# ## - AzureSQLMIPerformanceCounters +# ## - AzureSQLMIRequests +# ## - AzureSQLMISchedulers +# +# ## database_type = AzureSQLPool by default collects the following queries +# ## - AzureSQLPoolResourceStats +# ## - AzureSQLPoolResourceGovernance +# ## - AzureSQLPoolDatabaseIO +# ## - AzureSQLPoolOsWaitStats, +# ## - AzureSQLPoolMemoryClerks +# ## - AzureSQLPoolPerformanceCounters +# ## - AzureSQLPoolSchedulers +# +# ## database_type = SQLServer by default collects the following queries +# ## - SQLServerPerformanceCounters +# ## - SQLServerWaitStatsCategorized +# ## - SQLServerDatabaseIO +# ## - SQLServerProperties +# ## - SQLServerMemoryClerks +# ## - SQLServerSchedulers +# ## - SQLServerRequests +# ## - SQLServerVolumeSpace +# ## - SQLServerCpu +# ## - SQLServerRecentBackups +# ## and following as optional (if mentioned in the include_query list) +# ## - SQLServerAvailabilityReplicaStates +# ## - SQLServerDatabaseReplicaStates +# +# ## Version 2 by default collects the following queries +# ## Version 2 is being deprecated, please consider using database_type. +# ## - PerformanceCounters +# ## - WaitStatsCategorized +# ## - DatabaseIO +# ## - ServerProperties +# ## - MemoryClerk +# ## - Schedulers +# ## - SqlRequests +# ## - VolumeSpace +# ## - Cpu +# +# ## Version 1 by default collects the following queries +# ## Version 1 is deprecated, please consider using database_type. +# ## - PerformanceCounters +# ## - WaitStatsCategorized +# ## - CPUHistory +# ## - DatabaseIO +# ## - DatabaseSize +# ## - DatabaseStats +# ## - DatabaseProperties +# ## - MemoryClerk +# ## - VolumeSpace +# ## - PerformanceMetrics + + +# # Statsd Server +# [[inputs.statsd]] +# ## Protocol, must be "tcp", "udp4", "udp6" or "udp" (default=udp) +# protocol = "udp" +# +# ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) +# max_tcp_connections = 250 +# +# ## Enable TCP keep alive probes (default=false) +# tcp_keep_alive = false +# +# ## Specifies the keep-alive period for an active network connection. +# ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false. +# ## Defaults to the OS configuration. +# # tcp_keep_alive_period = "2h" +# +# ## Address and port to host UDP listener on +# service_address = ":8125" +# +# ## The following configuration options control when telegraf clears it's cache +# ## of previous values. If set to false, then telegraf will only clear it's +# ## cache when the daemon is restarted. +# ## Reset gauges every interval (default=true) +# delete_gauges = true +# ## Reset counters every interval (default=true) +# delete_counters = true +# ## Reset sets every interval (default=true) +# delete_sets = true +# ## Reset timings & histograms every interval (default=true) +# delete_timings = true +# +# ## Percentiles to calculate for timing & histogram stats. +# percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] +# +# ## separator to use between elements of a statsd metric +# metric_separator = "_" +# +# ## Parses tags in the datadog statsd format +# ## http://docs.datadoghq.com/guides/dogstatsd/ +# ## deprecated in 1.10; use datadog_extensions option instead +# parse_data_dog_tags = false +# +# ## Parses extensions to statsd in the datadog statsd format +# ## currently supports metrics and datadog tags. +# ## http://docs.datadoghq.com/guides/dogstatsd/ +# datadog_extensions = false +# +# ## Parses distributions metric as specified in the datadog statsd format +# ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition +# datadog_distributions = false +# +# ## Statsd data translation templates, more info can be read here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md +# # templates = [ +# # "cpu.* measurement*" +# # ] +# +# ## Number of UDP messages allowed to queue up, once filled, +# ## the statsd server will start dropping packets +# allowed_pending_messages = 10000 +# +# ## Number of timing/histogram values to track per-measurement in the +# ## calculation of percentiles. Raising this limit increases the accuracy +# ## of percentiles but also increases the memory usage and cpu time. +# percentile_limit = 1000 +# +# ## Maximum socket buffer size in bytes, once the buffer fills up, metrics +# ## will start dropping. Defaults to the OS default. +# # read_buffer_size = 65535 +# +# ## Max duration (TTL) for each metric to stay cached/reported without being updated. +# # max_ttl = "10h" +# +# ## Sanitize name method +# ## By default, telegraf will pass names directly as they are received. +# ## However, upstream statsd now does sanitization of names which can be +# ## enabled by using the "upstream" method option. This option will a) replace +# ## white space with '_', replace '/' with '-', and remove charachters not +# ## matching 'a-zA-Z_\-0-9\.;='. +# #sanitize_name_method = "" + + +# # Suricata stats and alerts plugin +# [[inputs.suricata]] +# ## Data sink for Suricata stats log. +# # This is expected to be a filename of a +# # unix socket to be created for listening. +# source = "/var/run/suricata-stats.sock" +# +# # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" +# # becomes "detect_alert" when delimiter is "_". +# delimiter = "_" +# +# # Detect alert logs +# alerts = false + + +# [[inputs.syslog]] +# ## Protocol, address and port to host the syslog receiver. +# ## If no host is specified, then localhost is used. +# ## If no port is specified, 6514 is used (RFC5425#section-4.1). +# ## ex: server = "tcp://localhost:6514" +# ## server = "udp://:6514" +# ## server = "unix:///var/run/telegraf-syslog.sock" +# server = "tcp://:6514" +# +# ## TLS Config +# # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Period between keep alive probes. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# ## Only applies to stream sockets (e.g. TCP). +# # keep_alive_period = "5m" +# +# ## Maximum number of concurrent connections (default = 0). +# ## 0 means unlimited. +# ## Only applies to stream sockets (e.g. TCP). +# # max_connections = 1024 +# +# ## Read timeout is the maximum time allowed for reading a single message (default = 5s). +# ## 0 means unlimited. +# # read_timeout = "5s" +# +# ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). +# ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). +# ## Must be one of "octect-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-transparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## Whether to parse in best effort mode or not (default = false). +# ## By default best effort parsing is off. +# # best_effort = false +# +# ## The RFC standard to use for message parsing +# ## By default RFC5424 is used. RFC3164 only supports UDP transport (no streaming support) +# ## Must be one of "RFC5424", or "RFC3164". +# # syslog_standard = "RFC5424" +# +# ## Character to prepend to SD-PARAMs (default = "_"). +# ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. +# ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] +# ## For each combination a field is created. +# ## Its name is created concatenating identifier, sdparam_separator, and parameter name. +# # sdparam_separator = "_" + + +# # Parse the new lines appended to a file +# [[inputs.tail]] +# ## File names or a pattern to tail. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## "/var/log/**.log" -> recursively find all .log files in /var/log +# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log +# ## "/var/log/apache.log" -> just tail the apache log file +# ## "/var/log/log[!1-2]* -> tail files without 1-2 +# ## "/var/log/log[^1-2]* -> identical behavior as above +# ## See https://github.com/gobwas/glob for more examples +# ## +# files = ["/var/mymetrics.out"] +# +# ## Read file from beginning. +# # from_beginning = false +# +# ## Whether file is a named pipe +# # pipe = false +# +# ## Method used to watch for file updates. Can be either "inotify" or "poll". +# # watch_method = "inotify" +# +# ## Maximum lines of the file to process that have not yet be written by the +# ## output. For best throughput set based on the number of metrics on each +# ## line and the size of the output's metric_batch_size. +# # max_undelivered_lines = 1000 +# +# ## Character encoding to use when interpreting the file contents. Invalid +# ## characters are replaced using the unicode replacement character. When set +# ## to the empty string the data is not decoded to text. +# ## ex: character_encoding = "utf-8" +# ## character_encoding = "utf-16le" +# ## character_encoding = "utf-16be" +# ## character_encoding = "" +# # character_encoding = "" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Set the tag that will contain the path of the tailed file. If you don't want this tag, set it to an empty string. +# # path_tag = "path" +# +# ## Filters to apply to files before generating metrics +# ## "ansi_color" removes ANSI colors +# # filters = [] +# +# ## multiline parser/codec +# ## https://www.elastic.co/guide/en/logstash/2.4/plugins-filters-multiline.html +# #[inputs.tail.multiline] +# ## The pattern should be a regexp which matches what you believe to be an indicator that the field is part of an event consisting of multiple lines of log data. +# #pattern = "^\s" +# +# ## The field's value must be previous or next and indicates the relation to the +# ## multi-line event. +# #match_which_line = "previous" +# +# ## The invert_match can be true or false (defaults to false). +# ## If true, a message not matching the pattern will constitute a match of the multiline filter and the what will be applied. (vice-versa is also true) +# #invert_match = false +# +# #After the specified timeout, this plugin sends the multiline event even if no new pattern is found to start a new event. The default is 5s. +# #timeout = 5s + + +# ## DEPRECATED: The 'tcp_listener' plugin is deprecated in version 1.3.0, use 'inputs.socket_listener' instead. +# # Generic TCP listener +# [[inputs.tcp_listener]] +# # socket_listener plugin +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener + + +# ## DEPRECATED: The 'udp_listener' plugin is deprecated in version 1.3.0, use 'inputs.socket_listener' instead. +# # Generic UDP listener +# [[inputs.udp_listener]] +# # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener + + +# -# Read metrics from one or many vCenters +# [[inputs.vsphere]] +# ## List of vCenter URLs to be monitored. These three lines must be uncommented +# ## and edited for the plugin to work. +# vcenters = [ "https://vcenter.local/sdk" ] +# username = "user@corp.local" +# password = "secret" +# +# ## VMs +# ## Typical VM metrics (if omitted or empty, all metrics are collected) +# # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected) +# # vm_exclude = [] # Inventory paths to exclude +# vm_metric_include = [ +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.run.summation", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.wait.summation", +# "mem.active.average", +# "mem.granted.average", +# "mem.latency.average", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.usage.average", +# "power.power.average", +# "virtualDisk.numberReadAveraged.average", +# "virtualDisk.numberWriteAveraged.average", +# "virtualDisk.read.average", +# "virtualDisk.readOIO.latest", +# "virtualDisk.throughput.usage.average", +# "virtualDisk.totalReadLatency.average", +# "virtualDisk.totalWriteLatency.average", +# "virtualDisk.write.average", +# "virtualDisk.writeOIO.latest", +# "sys.uptime.latest", +# ] +# # vm_metric_exclude = [] ## Nothing is excluded by default +# # vm_instances = true ## true by default +# +# ## Hosts +# ## Typical host metrics (if omitted or empty, all metrics are collected) +# # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected) +# # host_exclude [] # Inventory paths to exclude +# host_metric_include = [ +# "cpu.coreUtilization.average", +# "cpu.costop.summation", +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.swapwait.summation", +# "cpu.usage.average", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.utilization.average", +# "cpu.wait.summation", +# "disk.deviceReadLatency.average", +# "disk.deviceWriteLatency.average", +# "disk.kernelReadLatency.average", +# "disk.kernelWriteLatency.average", +# "disk.numberReadAveraged.average", +# "disk.numberWriteAveraged.average", +# "disk.read.average", +# "disk.totalReadLatency.average", +# "disk.totalWriteLatency.average", +# "disk.write.average", +# "mem.active.average", +# "mem.latency.average", +# "mem.state.latest", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.totalCapacity.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.errorsRx.summation", +# "net.errorsTx.summation", +# "net.usage.average", +# "power.power.average", +# "storageAdapter.numberReadAveraged.average", +# "storageAdapter.numberWriteAveraged.average", +# "storageAdapter.read.average", +# "storageAdapter.write.average", +# "sys.uptime.latest", +# ] +# ## Collect IP addresses? Valid values are "ipv4" and "ipv6" +# # ip_addresses = ["ipv6", "ipv4" ] +# +# # host_metric_exclude = [] ## Nothing excluded by default +# # host_instances = true ## true by default +# +# +# ## Clusters +# # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # cluster_exclude = [] # Inventory paths to exclude +# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected +# # cluster_metric_exclude = [] ## Nothing excluded by default +# # cluster_instances = false ## false by default +# +# ## Resource Pools +# # datastore_include = [ "/*/host/**"] # Inventory path to datastores to collect (by default all are collected) +# # datastore_exclude = [] # Inventory paths to exclude +# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected +# # datastore_metric_exclude = [] ## Nothing excluded by default +# # datastore_instances = false ## false by default +# +# ## Datastores +# # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) +# # datastore_exclude = [] # Inventory paths to exclude +# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected +# # datastore_metric_exclude = [] ## Nothing excluded by default +# # datastore_instances = false ## false by default +# +# ## Datacenters +# # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # datacenter_exclude = [] # Inventory paths to exclude +# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected +# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. +# # datacenter_instances = false ## false by default +# +# ## Plugin Settings +# ## separator character to use for measurement and field names (default: "_") +# # separator = "_" +# +# ## number of objects to retrieve per query for realtime resources (vms and hosts) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_objects = 256 +# +# ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_metrics = 256 +# +# ## number of go routines to use for collection and discovery of objects and metrics +# # collect_concurrency = 1 +# # discover_concurrency = 1 +# +# ## the interval before (re)discovering objects subject to metrics collection (default: 300s) +# # object_discovery_interval = "300s" +# +# ## timeout applies to any of the api request made to vcenter +# # timeout = "60s" +# +# ## When set to true, all samples are sent as integers. This makes the output +# ## data types backwards compatible with Telegraf 1.9 or lower. Normally all +# ## samples from vCenter, with the exception of percentages, are integer +# ## values, but under some conditions, some averaging takes place internally in +# ## the plugin. Setting this flag to "false" will send values as floats to +# ## preserve the full precision when averaging takes place. +# # use_int_samples = true +# +# ## Custom attributes from vCenter can be very useful for queries in order to slice the +# ## metrics along different dimension and for forming ad-hoc relationships. They are disabled +# ## by default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# ## By default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# # custom_attribute_include = [] +# # custom_attribute_exclude = ["*"] +# +# ## The number of vSphere 5 minute metric collection cycles to look back for non-realtime metrics. In +# ## some versions (6.7, 7.0 and possible more), certain metrics, such as cluster metrics, may be reported +# ## with a significant delay (>30min). If this happens, try increasing this number. Please note that increasing +# ## it too much may cause performance issues. +# # metric_lookback = 3 +# +# ## Optional SSL Config +# # ssl_ca = "/path/to/cafile" +# # ssl_cert = "/path/to/certfile" +# # ssl_key = "/path/to/keyfile" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## The Historical Interval value must match EXACTLY the interval in the daily +# # "Interval Duration" found on the VCenter server under Configure > General > Statistics > Statistic intervals +# # historical_interval = "5m" + + +# # A Webhooks Event collector +# [[inputs.webhooks]] +# ## Address and port to host Webhook listener on +# service_address = ":1619" +# +# [inputs.webhooks.filestack] +# path = "/filestack" +# +# ## HTTP basic auth +# #username = "" +# #password = "" +# +# [inputs.webhooks.github] +# path = "/github" +# # secret = "" +# +# ## HTTP basic auth +# #username = "" +# #password = "" +# +# [inputs.webhooks.mandrill] +# path = "/mandrill" +# +# ## HTTP basic auth +# #username = "" +# #password = "" +# +# [inputs.webhooks.rollbar] +# path = "/rollbar" +# +# ## HTTP basic auth +# #username = "" +# #password = "" +# +# [inputs.webhooks.papertrail] +# path = "/papertrail" +# +# ## HTTP basic auth +# #username = "" +# #password = "" +# +# [inputs.webhooks.particle] +# path = "/particle" +# +# ## HTTP basic auth +# #username = "" +# #password = "" +# +# [inputs.webhooks.artifactory] +# path = "/artifactory" + + +# # This plugin implements the Zipkin http server to gather trace and timing data needed to troubleshoot latency problems in microservice architectures. +# [[inputs.zipkin]] +# # path = "/api/v1/spans" # URL path for span data +# # port = 9411 # Port on which Telegraf listens + diff --git a/filter/filter.go b/filter/filter.go index df171257bc789..984fa3ed08f70 100644 --- a/filter/filter.go +++ b/filter/filter.go @@ -47,7 +47,7 @@ func Compile(filters []string) (Filter, error) { // hasMeta reports whether path contains any magic glob characters. func hasMeta(s string) bool { - return strings.IndexAny(s, "*?[") >= 0 + return strings.ContainsAny(s, "*?[") } type filter struct { @@ -79,13 +79,24 @@ func compileFilterNoGlob(filters []string) Filter { } type IncludeExcludeFilter struct { - include Filter - exclude Filter + include Filter + exclude Filter + includeDefault bool + excludeDefault bool } func NewIncludeExcludeFilter( include []string, exclude []string, +) (Filter, error) { + return NewIncludeExcludeFilterDefaults(include, exclude, true, false) +} + +func NewIncludeExcludeFilterDefaults( + include []string, + exclude []string, + includeDefault bool, + excludeDefault bool, ) (Filter, error) { in, err := Compile(include) if err != nil { @@ -97,7 +108,7 @@ func NewIncludeExcludeFilter( return nil, err } - return &IncludeExcludeFilter{in, ex}, nil + return &IncludeExcludeFilter{in, ex, includeDefault, excludeDefault}, nil } func (f *IncludeExcludeFilter) Match(s string) bool { @@ -105,12 +116,17 @@ func (f *IncludeExcludeFilter) Match(s string) bool { if !f.include.Match(s) { return false } + } else if !f.includeDefault { + return false } if f.exclude != nil { if f.exclude.Match(s) { return false } + } else if f.excludeDefault { + return false } + return true } diff --git a/go.mod b/go.mod index 4769fbb62ceeb..2ad366c7d8337 100644 --- a/go.mod +++ b/go.mod @@ -1,161 +1,410 @@ module github.com/influxdata/telegraf -go 1.15 +go 1.18 require ( - cloud.google.com/go v0.53.0 - cloud.google.com/go/datastore v1.1.0 // indirect - cloud.google.com/go/pubsub v1.2.0 - code.cloudfoundry.org/clock v1.0.0 // indirect - collectd.org v0.3.0 - github.com/Azure/azure-event-hubs-go/v3 v3.2.0 - github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 - github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect - github.com/Azure/go-autorest/autorest v0.9.3 - github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 - github.com/BurntSushi/toml v0.3.1 + cloud.google.com/go/bigquery v1.33.0 + cloud.google.com/go/monitoring v1.5.0 + cloud.google.com/go/pubsub v1.23.0 + collectd.org v0.5.0 + github.com/Azure/azure-event-hubs-go/v3 v3.3.17 + github.com/Azure/azure-kusto-go v0.7.0 + github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd + github.com/Azure/go-autorest/autorest v0.11.24 + github.com/Azure/go-autorest/autorest/adal v0.9.18 + github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 + github.com/BurntSushi/toml v0.4.1 + github.com/ClickHouse/clickhouse-go v1.5.4 + github.com/DATA-DOG/go-sqlmock v1.5.0 github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee - github.com/Microsoft/ApplicationInsights-Go v0.4.2 - github.com/Microsoft/go-winio v0.4.9 // indirect - github.com/Shopify/sarama v1.27.1 - github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect - github.com/aerospike/aerospike-client-go v1.27.0 - github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 + github.com/Shopify/sarama v1.34.1 + github.com/aerospike/aerospike-client-go/v5 v5.7.0 + github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 + github.com/aliyun/alibaba-cloud-sdk-go v1.61.1529 github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 - github.com/apache/thrift v0.12.0 - github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect + github.com/antchfx/jsonquery v1.1.5 + github.com/antchfx/xmlquery v1.3.9 + github.com/antchfx/xpath v1.2.1 + github.com/apache/thrift v0.15.0 github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 - github.com/armon/go-metrics v0.3.0 // indirect - github.com/aws/aws-sdk-go v1.34.34 - github.com/benbjohnson/clock v1.0.3 - github.com/bitly/go-hostpool v0.1.0 // indirect - github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 - github.com/caio/go-tdigest v2.3.0+incompatible // indirect - github.com/cenkalti/backoff v2.0.0+incompatible // indirect - github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 - github.com/cockroachdb/apd v1.1.0 // indirect - github.com/containerd/containerd v1.4.1 // indirect - github.com/cornelk/hashmap v1.0.1 // indirect - github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 - github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 // indirect - github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a // indirect - github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 - github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 - github.com/dimchansky/utfbom v1.1.0 - github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible // indirect - github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible - github.com/docker/go-connections v0.3.0 // indirect - github.com/docker/go-units v0.3.3 // indirect - github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166 - github.com/eclipse/paho.mqtt.golang v1.2.0 - github.com/ericchiang/k8s v1.2.0 + github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 + github.com/aws/aws-sdk-go-v2 v1.16.5 + github.com/aws/aws-sdk-go-v2/config v1.15.7 + github.com/aws/aws-sdk-go-v2/credentials v1.12.5 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.6 + github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0 + github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.15.8 + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.15.7 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.46.0 + github.com/aws/aws-sdk-go-v2/service/kinesis v1.15.7 + github.com/aws/aws-sdk-go-v2/service/sts v1.16.7 + github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.13.6 + github.com/aws/smithy-go v1.11.3 + github.com/benbjohnson/clock v1.3.0 + github.com/bmatcuk/doublestar/v3 v3.0.0 + github.com/caio/go-tdigest v3.1.0+incompatible + github.com/cisco-ie/nx-telemetry-proto v0.0.0-20220628142927-f4160bcb943c + github.com/coreos/go-semver v0.3.0 + github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f + github.com/cornelk/hashmap v1.0.1 + github.com/couchbase/go-couchbase v0.1.1 + github.com/denisenkom/go-mssqldb v0.12.0 + github.com/dimchansky/utfbom v1.1.1 + github.com/djherbis/times v1.5.0 + github.com/docker/docker v20.10.14+incompatible + github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 + github.com/dynatrace-oss/dynatrace-metric-utils-go v0.5.0 + github.com/eclipse/paho.mqtt.golang v1.3.5 + github.com/fatih/color v1.13.0 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 - github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96 - github.com/go-logfmt/logfmt v0.4.0 - github.com/go-ole/go-ole v1.2.1 // indirect + github.com/go-ldap/ldap/v3 v3.4.1 + github.com/go-logfmt/logfmt v0.5.1 + github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c github.com/go-redis/redis v6.15.9+incompatible - github.com/go-sql-driver/mysql v1.5.0 - github.com/goburrow/modbus v0.1.0 + github.com/go-redis/redis/v8 v8.11.5 + github.com/go-sql-driver/mysql v1.6.0 + github.com/goburrow/modbus v0.1.0 // indirect github.com/goburrow/serial v0.1.0 // indirect github.com/gobwas/glob v0.2.3 - github.com/gofrs/uuid v2.1.0+incompatible - github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d + github.com/gofrs/uuid v4.2.0+incompatible + github.com/golang-jwt/jwt/v4 v4.4.2 github.com/golang/geo v0.0.0-20190916061304-5b978397cfec - github.com/golang/protobuf v1.3.5 - github.com/google/go-cmp v0.5.2 + github.com/golang/snappy v0.0.4 + github.com/google/gnxi v0.0.0-20220411075422-cd6b043b7fd0 + github.com/google/go-cmp v0.5.8 github.com/google/go-github/v32 v32.1.0 - github.com/gopcua/opcua v0.1.12 - github.com/gorilla/mux v1.6.2 - github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect - github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 - github.com/hashicorp/consul v1.2.1 - github.com/hashicorp/go-msgpack v0.5.5 // indirect - github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 // indirect - github.com/hashicorp/memberlist v0.1.5 // indirect - github.com/hashicorp/serf v0.8.1 // indirect - github.com/influxdata/go-syslog/v2 v2.0.1 - github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4 + github.com/google/uuid v1.3.0 + github.com/gopcua/opcua v0.3.3 + github.com/gophercloud/gophercloud v0.25.0 + github.com/gorilla/mux v1.8.0 + github.com/gorilla/websocket v1.5.0 + github.com/gosnmp/gosnmp v1.34.0 + github.com/grid-x/modbus v0.0.0-20211113184042-7f2251c342c9 + github.com/gwos/tcg/sdk v0.0.0-20211223101342-35fbd1ae683c + github.com/harlow/kinesis-consumer v0.3.6-0.20210911031324-5a873d6e9fec + github.com/hashicorp/consul/api v1.12.0 + github.com/hashicorp/go-uuid v1.0.2 + github.com/influxdata/go-syslog/v3 v3.0.0 + github.com/influxdata/influxdb-observability/common v0.2.22 + github.com/influxdata/influxdb-observability/influx2otel v0.2.21 + github.com/influxdata/influxdb-observability/otel2influx v0.2.22 + github.com/influxdata/line-protocol/v2 v2.2.1 + github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7 github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 - github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect - github.com/jackc/pgx v3.6.0+incompatible - github.com/kardianos/service v1.0.0 - github.com/karrick/godirwalk v1.12.0 + github.com/intel/iaevents v1.0.0 + github.com/jackc/pgx/v4 v4.16.1 + github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a + github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca + github.com/jmespath/go-jmespath v0.4.0 + github.com/kardianos/service v1.2.1 + github.com/karrick/godirwalk v1.16.1 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 - github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 // indirect - github.com/lib/pq v1.3.0 // indirect - github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 - github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe - github.com/miekg/dns v1.0.14 - github.com/mitchellh/go-testing-interface v1.0.0 // indirect - github.com/morikuni/aec v1.0.0 // indirect - github.com/multiplay/go-ts3 v1.0.0 - github.com/naoina/go-stringutil v0.1.0 // indirect - github.com/nats-io/nats-server/v2 v2.1.4 - github.com/nats-io/nats.go v1.9.1 - github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0 - github.com/nsqio/go-nsq v1.0.7 - github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 - github.com/opencontainers/go-digest v1.0.0-rc1 // indirect - github.com/opencontainers/image-spec v1.0.1 // indirect - github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect - github.com/opentracing/opentracing-go v1.0.2 // indirect - github.com/openzipkin/zipkin-go-opentracing v0.3.4 - github.com/orcaman/concurrent-map v0.0.0-20210106121528-16402b402231 + github.com/lxc/lxd v0.0.0-20220624154119-6d73e2a3d0c5 + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 + github.com/mdlayher/apcupsd v0.0.0-20220319200143-473c7b5f3c6a + github.com/microsoft/ApplicationInsights-Go v0.4.4 + github.com/miekg/dns v1.1.50 + github.com/moby/ipvs v1.0.2 + github.com/multiplay/go-ts3 v1.0.1 + github.com/nats-io/nats-server/v2 v2.8.4 + github.com/nats-io/nats.go v1.16.0 + github.com/newrelic/newrelic-telemetry-sdk-go v0.8.1 + github.com/nsqio/go-nsq v1.1.0 + github.com/olivere/elastic v6.2.37+incompatible + github.com/openconfig/gnmi v0.0.0-20200617225440-d2b4e6a45802 + github.com/opentracing/opentracing-go v1.2.0 + github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5 + github.com/openzipkin/zipkin-go v0.2.5 + github.com/pborman/ansi v1.0.0 + github.com/pion/dtls/v2 v2.0.13 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.5.1 + github.com/prometheus/client_golang v1.12.1 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.9.1 - github.com/prometheus/procfs v0.0.8 + github.com/prometheus/common v0.32.1 + github.com/prometheus/procfs v0.7.3 + github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2 + github.com/rabbitmq/amqp091-go v1.3.4 + github.com/riemann/riemann-go-client v0.5.1-0.20211206220514-f58f10cdce16 github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 - github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect - github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect - github.com/shirou/gopsutil v2.20.9+incompatible - github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect - github.com/sirupsen/logrus v1.4.2 - github.com/soniah/gosnmp v1.25.0 - github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 - github.com/stretchr/testify v1.6.1 + github.com/sensu/sensu-go/api/core/v2 v2.14.0 + github.com/shirou/gopsutil/v3 v3.22.4 + github.com/showwin/speedtest-go v1.1.5 + github.com/signalfx/golib/v3 v3.3.43 + github.com/sirupsen/logrus v1.8.1 + github.com/sleepinggenius2/gosmi v0.4.4 + github.com/snowflakedb/gosnowflake v1.6.2 + github.com/stretchr/testify v1.7.4 github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 - github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 // indirect - github.com/tidwall/gjson v1.6.0 - github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e // indirect - github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect - github.com/vjeantet/grok v1.0.0 - github.com/vmware/govmomi v0.19.0 - github.com/wavefronthq/wavefront-sdk-go v0.9.2 + github.com/testcontainers/testcontainers-go v0.12.0 + github.com/tidwall/gjson v1.14.1 + github.com/tinylib/msgp v1.1.6 + github.com/urfave/cli/v2 v2.3.0 + github.com/vapourismo/knx-go v0.0.0-20211128234507-8198fa17db36 + github.com/vjeantet/grok v1.0.1 + github.com/vmware/govmomi v0.28.0 + github.com/wavefronthq/wavefront-sdk-go v0.9.11 github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf - github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a // indirect - github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c - github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect - go.starlark.net v0.0.0-20200901195727-6e684ef5eeee - golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 // indirect - golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect - golang.org/x/net v0.0.0-20200904194848-62affa334b73 - golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a - golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6 - golang.org/x/text v0.3.3 - golang.org/x/tools v0.0.0-20200317043434-63da46f3035e // indirect - golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 - gonum.org/v1/gonum v0.6.2 // indirect - google.golang.org/api v0.20.0 - google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24 - google.golang.org/grpc v1.28.0 - gopkg.in/fatih/pool.v2 v2.0.0 // indirect + github.com/xdg/scram v1.0.5 + github.com/yuin/goldmark v1.4.1 + go.mongodb.org/mongo-driver v1.9.1 + go.opentelemetry.io/collector/pdata v0.54.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.28.0 + go.opentelemetry.io/otel/metric v0.30.0 + go.opentelemetry.io/otel/sdk/metric v0.28.0 + go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd + golang.org/x/net v0.0.0-20220622184535-263ec571b305 + golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb + golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f + golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664 + golang.org/x/text v0.3.7 + golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211230205640-daad0b7ba671 + gonum.org/v1/gonum v0.11.0 + google.golang.org/api v0.84.0 + google.golang.org/genproto v0.0.0-20220623142657-077d458a5694 + google.golang.org/grpc v1.47.0 + google.golang.org/protobuf v1.28.0 gopkg.in/gorethink/gorethink.v3 v3.0.5 - gopkg.in/ldap.v3 v3.1.0 - gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce - gopkg.in/olivere/elastic.v5 v5.0.70 - gopkg.in/yaml.v2 v2.2.8 - gotest.tools v2.2.0+incompatible // indirect - honnef.co/go/tools v0.0.1-2020.1.3 // indirect - k8s.io/apimachinery v0.17.1 // indirect - modernc.org/sqlite v1.7.4 + gopkg.in/olivere/elastic.v5 v5.0.86 + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 + gopkg.in/yaml.v2 v2.4.0 + k8s.io/api v0.24.1 + k8s.io/apimachinery v0.24.1 + k8s.io/client-go v0.24.1 + modernc.org/sqlite v1.17.3 ) -// replaced due to https://github.com/satori/go.uuid/issues/73 -replace github.com/satori/go.uuid => github.com/gofrs/uuid v3.2.0+incompatible +require ( + cloud.google.com/go v0.102.1 // indirect + cloud.google.com/go/compute v1.6.1 // indirect + cloud.google.com/go/iam v0.3.0 // indirect + code.cloudfoundry.org/clock v1.0.0 // indirect + github.com/Azure/azure-amqp-common-go/v3 v3.2.3 // indirect + github.com/Azure/azure-pipeline-go v0.2.3 // indirect + github.com/Azure/azure-sdk-for-go v61.2.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 // indirect + github.com/Azure/azure-storage-blob-go v0.14.0 // indirect + github.com/Azure/go-amqp v0.17.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect + github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect + github.com/Azure/go-autorest/logger v0.2.1 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c // indirect + github.com/Microsoft/go-winio v0.4.17 // indirect + github.com/Microsoft/hcsshim v0.8.24 // indirect + github.com/PuerkitoBio/purell v1.1.1 // indirect + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/alecthomas/participle v0.4.1 // indirect + github.com/apache/arrow/go/arrow v0.0.0-20211006091945-a69884db78f4 // indirect + github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect + github.com/armon/go-metrics v0.3.3 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.2 // indirect + github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.12 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.6 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.12 // indirect + github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.7.6 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.6 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.11.8 // indirect + github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bitly/go-hostpool v0.1.0 // indirect + github.com/cenkalti/backoff v2.2.1+incompatible // indirect + github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 // indirect + github.com/containerd/cgroups v1.0.3 // indirect + github.com/containerd/containerd v1.5.13 // indirect + github.com/couchbase/gomemcached v0.1.3 // indirect + github.com/couchbase/goutils v0.1.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dchest/siphash v1.1.0 // indirect + github.com/devigned/tab v0.1.1 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/go-connections v0.4.0 + github.com/docker/go-units v0.4.0 // indirect + github.com/eapache/go-resiliency v1.2.0 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect + github.com/eapache/queue v1.1.0 // indirect + github.com/echlebek/timeproxy v1.0.0 // indirect + github.com/emicklei/go-restful v2.9.5+incompatible // indirect + github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3 // indirect + github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect + github.com/go-asn1-ber/asn1-ber v1.5.1 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-macaroon-bakery/macaroonpb v1.0.0 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.19.5 // indirect + github.com/go-openapi/swag v0.19.15 // indirect + github.com/go-stack/stack v1.8.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect + github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/flatbuffers v2.0.0+incompatible // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/go-querystring v1.0.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa // indirect + github.com/googleapis/gax-go/v2 v2.4.0 // indirect + github.com/grid-x/serial v0.0.0-20211107191517-583c7356b3aa // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.1 // indirect + github.com/hashicorp/go-hclog v0.16.2 // indirect + github.com/hashicorp/go-immutable-radix v1.2.0 // indirect + github.com/hashicorp/go-msgpack v0.5.5 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/hashicorp/serf v0.9.6 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.12.1 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect + github.com/jackc/pgtype v1.11.0 // indirect + github.com/jaegertracing/jaeger v1.26.0 // indirect + github.com/jcmturner/aescts/v2 v2.0.0 // indirect + github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect + github.com/jcmturner/gofork v1.0.0 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect + github.com/jcmturner/rpc/v2 v2.0.3 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/josharian/native v1.0.0 // indirect + github.com/jpillora/backoff v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/juju/webbrowser v1.0.0 // indirect + github.com/julienschmidt/httprouter v1.3.0 // indirect + github.com/klauspost/compress v1.15.6 // indirect + github.com/kr/fs v0.1.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-ieproxy v0.0.1 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mdlayher/genetlink v1.2.0 // indirect + github.com/mdlayher/netlink v1.6.0 // indirect + github.com/mdlayher/socket v0.2.3 // indirect + github.com/minio/highwayhash v1.0.2 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moby/sys/mount v0.2.0 // indirect + github.com/moby/sys/mountinfo v0.5.0 // indirect + github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/naoina/go-stringutil v0.1.0 // indirect + github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a // indirect + github.com/nats-io/nkeys v0.3.0 // indirect + github.com/nats-io/nuid v1.0.1 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.0.2 // indirect + github.com/opencontainers/runc v1.1.2 // indirect + github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect + github.com/pborman/uuid v1.2.1 // indirect + github.com/philhofer/fwd v1.1.1 // indirect + github.com/pierrec/lz4 v2.6.1+incompatible // indirect + github.com/pierrec/lz4/v4 v4.1.14 // indirect + github.com/pion/logging v0.2.2 // indirect + github.com/pion/transport v0.13.0 // indirect + github.com/pion/udp v0.1.1 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pkg/sftp v1.13.5 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect + github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff // indirect + github.com/robfig/cron/v3 v3.0.1 // indirect + github.com/rogpeppe/fastuuid v1.2.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e // indirect + github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 // indirect + github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 // indirect + github.com/signalfx/sapm-proto v0.7.2 // indirect + github.com/stretchr/objx v0.4.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.0 // indirect + github.com/tklauser/go-sysconf v0.3.10 // indirect + github.com/tklauser/numcpus v0.4.0 // indirect + github.com/vishvananda/netlink v1.2.1-beta.2 // indirect + github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect + github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/scram v1.1.1 // indirect + github.com/xdg-go/stringprep v1.0.3 // indirect + github.com/xdg/stringprep v1.0.3 // indirect + github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect + github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da // indirect + github.com/yusufpapurcu/wmi v1.2.2 // indirect + go.etcd.io/etcd/api/v3 v3.5.0 // indirect + go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/otel v1.7.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.6.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.28.0 // indirect + go.opentelemetry.io/otel/sdk v1.6.0 // indirect + go.opentelemetry.io/otel/trace v1.7.0 // indirect + go.opentelemetry.io/proto/otlp v0.12.0 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d // indirect + golang.org/x/exp v0.0.0-20200513190911-00229845015e // indirect + golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect + golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect + golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect + golang.org/x/tools v0.1.11 // indirect + golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect + golang.zx2c4.com/wireguard v0.0.0-20211209221555-9c9e7e272434 // indirect + google.golang.org/appengine v1.6.7 // indirect + gopkg.in/errgo.v1 v1.0.1 // indirect + gopkg.in/fatih/pool.v2 v2.0.0 // indirect + gopkg.in/fsnotify.v1 v1.4.7 // indirect + gopkg.in/httprequest.v1 v1.2.1 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.66.6 // indirect + gopkg.in/macaroon-bakery.v2 v2.3.0 // indirect + gopkg.in/macaroon.v2 v2.1.0 // indirect + gopkg.in/sourcemap.v1 v1.0.5 // indirect + gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + honnef.co/go/tools v0.2.2 // indirect + k8s.io/klog/v2 v2.60.1 // indirect + k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect + k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect + lukechampine.com/uint128 v1.1.1 // indirect + modernc.org/cc/v3 v3.36.0 // indirect + modernc.org/ccgo/v3 v3.16.6 // indirect + modernc.org/libc v1.16.7 // indirect + modernc.org/mathutil v1.4.1 // indirect + modernc.org/memory v1.1.1 // indirect + modernc.org/opt v0.1.1 // indirect + modernc.org/strutil v1.1.1 // indirect + modernc.org/token v1.0.0 // indirect + sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect + sigs.k8s.io/yaml v1.2.0 // indirect +) diff --git a/go.sum b/go.sum index 710d8160518d3..22a7209b5d70c 100644 --- a/go.sum +++ b/go.sum @@ -1,206 +1,768 @@ +4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU= -cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0 h1:MZQCQQaRwOrAcuKjiHWHrgKykt4fZyuwF2dtiG3fGW8= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1 h1:vpK6iQWv/2uUeFJth4/cBHsQAGjn1iIE6AAlxipRaA0= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.33.0 h1:SrJif2vLSVsCedozwEfR4EpcyagYUrdk6QBZR+TKqEQ= +cloud.google.com/go/bigquery v1.33.0/go.mod h1:TJTAVrBeuDft/vFrDJfmM/Oq+eJ0rIeDkkJ2+nar1oU= +cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1 h1:2sMmt8prCn7DPaG4Pmh0N3Inmc8cT8ae5k1M6VJ9Wqc= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/datacatalog v1.3.0 h1:3llKXv7cC1acsWjvWmG0NQQkYVSVgunMSfVk7h6zz8Q= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/kms v1.4.0 h1:iElbfoE61VeLhnZcGOltqL8HIly8Nhbe5t6JlH9GXjo= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/monitoring v1.5.0 h1:ZltYv8e69fJVga7RTthUBGdx4+Pwz6GRF1V3zylERl4= +cloud.google.com/go/monitoring v1.5.0/go.mod h1:/o9y8NYX5j91JjD/JvGLYbi86kL11OjyJXq2XziLJu4= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.23.0 h1:DAaD+gG3tUkdq/VdOuI9s4g84w8ia7z/CcLkhprcMew= +cloud.google.com/go/pubsub v1.23.0/go.mod h1:XUpUURgUDXYVGARZBmwHbfcVdMo4EVtRhSLlzBbmmf0= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.22.1 h1:F6IlQJZrZM++apn9V5/VfS3gbTUYg98PS3EMQAzqtfg= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= -collectd.org v0.3.0 h1:iNBHGw1VvPJxH2B6RiFWFZ+vsjo1lCdRszBeOuwGi00= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY= +collectd.org v0.5.0 h1:y4uFSAuOmeVhG3GCRa3/oH+ysePfO/+eGJNfd0Qa3d8= +collectd.org v0.5.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +contrib.go.opencensus.io/exporter/prometheus v0.3.0/go.mod h1:rpCPVQKhiyH8oomWgm34ZmgIdZa8OVYO5WAIygPbBBE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-amqp-common-go/v3 v3.0.0 h1:j9tjcwhypb/jek3raNrwlCIl7iKQYOug7CLpSyBBodc= -github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg= -github.com/Azure/azure-event-hubs-go/v3 v3.2.0 h1:CQlxKH5a4NX1ZmbdqXUPRwuNGh2XvtgmhkZvkEuWzhs= -github.com/Azure/azure-event-hubs-go/v3 v3.2.0/go.mod h1:BPIIJNH/l/fVHYq3Rm6eg4clbrULrQ3q7+icmqHyyLc= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/Azure/azure-amqp-common-go/v3 v3.2.3 h1:uDF62mbd9bypXWi19V1bN5NZEO84JqgmI5G73ibAmrk= +github.com/Azure/azure-amqp-common-go/v3 v3.2.3/go.mod h1:7rPmbSfszeovxGfc5fSAXE4ehlXQZHpMja2OtxC2Tas= +github.com/Azure/azure-event-hubs-go/v3 v3.3.17 h1:9k2yRMBJWgcIlSNBuKVja2af/oR3oMowqFPpHDV5Kl4= +github.com/Azure/azure-event-hubs-go/v3 v3.3.17/go.mod h1:R5H325+EzgxcBDkUerEwtor7ZQg77G7HiOTwpcuIVXY= +github.com/Azure/azure-kusto-go v0.7.0 h1:vc6avA4df8b1c1uwYgVv02wvhhSiHtaqtC/9sOw+lDY= +github.com/Azure/azure-kusto-go v0.7.0/go.mod h1:PrnIeDgVjBc1Jv1dwOuL1om6/zIWQ7SbIBlx+/9UdWc= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= -github.com/Azure/azure-pipeline-go v0.1.9 h1:u7JFb9fFTE6Y/j8ae2VK33ePrRqJqoCM/IWkQdAZ+rg= github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= -github.com/Azure/azure-sdk-for-go v37.1.0+incompatible h1:aFlw3lP7ZHQi4m1kWCpcwYtczhDkGhDoRaMTaxcOf68= -github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-storage-blob-go v0.6.0 h1:SEATKb3LIHcaSIX+E6/K4kJpwfuozFEsmt5rS56N6CE= +github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= +github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v52.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v61.2.0+incompatible h1:sSormXkfW0ov1vh6ihTBRQxdfg73fPqkccl50GbR9iM= +github.com/Azure/azure-sdk-for-go v61.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 h1:qoVeMsc9/fh/yhxVaA0obYjVH/oI/ihrOoMwsLS9KSA= +github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= +github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= +github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3 h1:E+m3SkZCN0Bf5q7YdTs5lSm2CYY3CK4spn5OmUIiQtk= +github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 h1:Px2UA+2RvSSvv+RvJNuUB6n7rs5Wsel4dXLe90Um2n4= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo= github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= -github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 h1:7MiZ6Th+YTmwUdrKmFg5OMsGYz7IdQwjqL0RPxkhhOQ= -github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= -github.com/Azure/go-amqp v0.12.6 h1:34yItuwhA/nusvq2sPSNPQxZLCf/CtaogYH8n578mnY= -github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= +github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= +github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= +github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd h1:b3wyxBl3vvr15tUAziPBPK354y+LSdfPCpex5oBttHo= +github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= +github.com/Azure/go-amqp v0.17.0 h1:HHXa3149nKrI0IZwyM7DRcRy5810t9ZICDutn4BYzj4= +github.com/Azure/go-amqp v0.17.0/go.mod h1:9YJ3RhxRT1gquYnzpZO1vcYMMpAdJT+QEg6fwmw9Zlg= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.3 h1:OZEIaBbMdUE/Js+BQKlpO81XlISgipr6yDJ+PSwsgi4= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.11.24 h1:1fIGgHKqVm54KIPT+q8Zmd1QlVsmHqeUGso5qm2BqqE= +github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/adal v0.8.1 h1:pZdL8o72rK+avFWl+p9nE8RWi1JInZrWJYlnpfXJwHk= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ= +github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= -github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8= -github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= -github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4= -github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= +github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c h1:/IBSNwUN8+eKzUzbJPqhK839ygXJ82sde8x3ogr6R28= +github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/ClickHouse/clickhouse-go v1.5.4 h1:cKjXeYLNWVJIx2J1K6H2CqyRmfwVJVY1OV1coaaFcI0= +github.com/ClickHouse/clickhouse-go v1.5.4/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= +github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/sketches-go v0.0.0-20190923095040-43f19ad77ff7/go.mod h1:Q5DbzQ+3AkgGwymQO7aZFNP7ns2lZKGtvRBzRXfdi60= +github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.4.4/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/Flaque/filet v0.0.0-20201012163910-45f684403088 h1:PnnQln5IGbhLeJOi6hVs+lCeF+B1dRfFKPGXUAez0Ww= +github.com/Flaque/filet v0.0.0-20201012163910-45f684403088/go.mod h1:TK+jB3mBs+8ZMWhU5BqZKnZWJ1MrLo8etNVg51ueTBo= +github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= +github.com/HdrHistogram/hdrhistogram-go v1.0.1/go.mod h1:BWJ+nMSHY3L41Zj7CA3uXnloDp7xxV0YvstAE7nKTaM= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee h1:atI/FFjXh6hIVlPE1Jup9m8N4B9q/OSbMUe2EBahs+w= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM= -github.com/Microsoft/ApplicationInsights-Go v0.4.2 h1:HIZoGXMiKNwAtMAgCSSX35j9mP+DjGF9ezfBvxMDLLg= -github.com/Microsoft/ApplicationInsights-Go v0.4.2/go.mod h1:CukZ/G66zxXtI+h/VcVn3eVVDGDHfXM2zVILF7bMmsg= -github.com/Microsoft/go-winio v0.4.9 h1:3RbgqgGVqmcpbOiwrjbVtDHLlJBGF6aE+yHmNtBNsFQ= -github.com/Microsoft/go-winio v0.4.9/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.24 h1:jP+GMeRXIR1sH1kG4lJr9ShmSjVrua5jmFZDtfYGkn4= +github.com/Microsoft/hcsshim v0.8.24/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/PuerkitoBio/purell v1.0.0 h1:0GoNN3taZV6QI81IXgCbxMyEaJDXMSIjArYBCYzVVvs= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2 h1:JCHLVE3B+kJde7bIEo5N4J+ZbLhp0J1Fs+ulyRws4gE= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/sarama v1.27.1 h1:iUlzHymqWsITyttu6KxazcAz8WEj5FqcwFK/oEi7rE8= -github.com/Shopify/sarama v1.27.1/go.mod h1:g5s5osgELxgM+Md9Qni9rzo7Rbt+vvFQI4bt/Mc93II= +github.com/Shopify/sarama v1.22.2-0.20190604114437-cd910a683f9f/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs= +github.com/Shopify/sarama v1.29.0/go.mod h1:2QpgD79wpdAESqNQMxNc0KYMkycd4slxGdV3TWSVqrU= +github.com/Shopify/sarama v1.29.1/go.mod h1:mdtqvCSg8JOxk8PmpTNGyo6wzd4BMm4QXSfDnTXmgkE= +github.com/Shopify/sarama v1.34.1 h1:pVCQO7BMAK3s1jWhgi5v1W6lwZ6Veiekfc2vsgRS06Y= +github.com/Shopify/sarama v1.34.1/go.mod h1:NZSNswsnStpq8TUdFaqnpXm2Do6KRzTIjdBdVlL1YRM= github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/aerospike/aerospike-client-go v1.27.0 h1:VC6/Wqqm3Qlp4/utM7Zts3cv4A2HPn8rVFp/XZKTWgE= -github.com/aerospike/aerospike-client-go v1.27.0/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af h1:wVe6/Ea46ZMeNkQjjBW6xcqyQA/j5e0D6GytH95g0gQ= +github.com/Shopify/toxiproxy/v2 v2.4.0 h1:O1e4Jfvr/hefNTNu+8VtdEG5lSeamJRo4aKhMOKNM64= +github.com/Shopify/toxiproxy/v2 v2.4.0/go.mod h1:3ilnjng821bkozDRxNoo64oI/DKqM+rOyJzb564+bvg= +github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/aerospike/aerospike-client-go/v5 v5.7.0 h1:Olgq011scnhKlGxo4AcGSXI8JRLF0aSEdl1PhjmKTUo= +github.com/aerospike/aerospike-client-go/v5 v5.7.0/go.mod h1:rJ/KpmClE7kiBPfvAPrGw9WuNOiz8v2uKbQaUyYPXtI= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/go-thrift v0.0.0-20170109061633-7914173639b2/go.mod h1:CxCgO+NdpMdi9SsTlGbc0W+/UNxO3I0AabOEJZ3w61w= +github.com/alecthomas/kong v0.2.1/go.mod h1:+inYUSluD+p4L8KdviBSgzcqEjUQOfC5fQDRFuc36lI= +github.com/alecthomas/participle v0.4.1 h1:P2PJWzwrSpuCWXKnzqvw0b0phSfH1kJo4p2HvLynVsI= +github.com/alecthomas/participle v0.4.1/go.mod h1:T8u4bQOSMwrkTWOSyt8/jSFPEnRtd0FKFMjVfYBlqPs= +github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ= +github.com/alecthomas/repr v0.0.0-20210301060118-828286944d6a/go.mod h1:2kn6fqh/zIyPLmm3ugklbEi5hg5wS435eygvNfaDQL8= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 h1:AUNCr9CiJuwrRYS3XieqF+Z9B9gNxo/eANAJCF2eiN4= +github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= +github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk= +github.com/aliyun/alibaba-cloud-sdk-go v1.61.1529 h1:qAt5MZ3Ukwx/JMAiaagQhNQMBZLcmJbnweBoK3EeHxI= +github.com/aliyun/alibaba-cloud-sdk-go v1.61.1529/go.mod h1:RcDobYh8k5VP6TNybz9m++gL3ijVI5wueVr0EM10VsU= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= -github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/antchfx/jsonquery v1.1.5 h1:1YWrNFYCcIuJPIjFeOP5b6TXbLSUYY8qqxWbuZOB1qE= +github.com/antchfx/jsonquery v1.1.5/go.mod h1:RtMzTHohKaAerkfslTNjr3Y9MdxjKlSgIgaVjVKNiug= +github.com/antchfx/xmlquery v1.3.9 h1:Y+zyMdiUZ4fasTQTkDb3DflOXP7+obcYEh80SISBmnQ= +github.com/antchfx/xmlquery v1.3.9/go.mod h1:wojC/BxjEkjJt6dPiAqUzoXO5nIMWtxHS8PD8TmN4ks= +github.com/antchfx/xpath v1.1.7/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= +github.com/antchfx/xpath v1.2.0/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/antchfx/xpath v1.2.1 h1:qhp4EW6aCOVr5XIkT+l6LJ9ck/JsUH/yyauNgTQkBF8= +github.com/antchfx/xpath v1.2.1/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antonmedv/expr v1.8.9/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= +github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= +github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY= +github.com/apache/arrow/go/arrow v0.0.0-20211006091945-a69884db78f4 h1:nPUln5QTzhftSpmld3xcXw/GOJ3z1E8fR8tUrrc0YWk= +github.com/apache/arrow/go/arrow v0.0.0-20211006091945-a69884db78f4/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.14.1/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.14.2/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.15.0 h1:aGvdaR0v1t9XLgjtBYwxcBvBOTMqClzwE26CHOgjW1Y= +github.com/apache/thrift v0.15.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/apex/log v1.6.0/go.mod h1:x7s+P9VtvFBXge9Vbn+8TrqKmuzmD35TTkeBHul8UtY= +github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= +github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= +github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 h1:Bmjk+DjIi3tTAU0wxGaFbfjGUqlxxSXARq9A96Kgoos= github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA= github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 h1:FD4/ikKOFxwP8muWDypbmBWc634+YcAs3eBrYAmRdZY= github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU= -github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= -github.com/aws/aws-sdk-go v1.34.34 h1:5dC0ZU0xy25+UavGNEkQ/5MOQwxXDA2YXtjCL1HfYKI= -github.com/aws/aws-sdk-go v1.34.34/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= +github.com/armon/go-metrics v0.3.3 h1:a9F4rlj7EWWrbj7BYw8J8+x+ZZkJeqzNyRk8hdPF+ro= +github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/ashanbrown/forbidigo v1.1.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= +github.com/ashanbrown/makezero v0.0.0-20201205152432-7b7cdbb3025a/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.19.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.29.11/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= +github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go v1.38.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= +github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= +github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= +github.com/aws/aws-sdk-go-v2 v1.8.1/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= +github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2 v1.16.4/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= +github.com/aws/aws-sdk-go-v2 v1.16.5 h1:Ah9h1TZD9E2S1LzHpViBO3Jz9FPL5+rmflmb8hXirtI= +github.com/aws/aws-sdk-go-v2 v1.16.5/go.mod h1:Wh7MEsmEApyL5hrWzpDkba4gwAPc5/piwLVLFnCxp48= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.2 h1:LFOGNUQxc/8BlhA4FD+JdYjJKQK6tsz9Xiuh+GUTKAQ= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.2/go.mod h1:u/38zebMi809w7YFnqY/07Tw/FSs6DGhPD95Xiig7XQ= +github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= +github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= +github.com/aws/aws-sdk-go-v2/config v1.6.1/go.mod h1:t/y3UPu0XEDy0cEw6mvygaBQaPzWiYAxfP2SzgtvclA= +github.com/aws/aws-sdk-go-v2/config v1.8.2/go.mod h1:r0bkX9NyuCuf28qVcsEMtpAQibT7gA1Q0gzkjvgJdLU= +github.com/aws/aws-sdk-go-v2/config v1.15.7 h1:PrzhYjDpWnGSpjedmEapldQKPW4x8cCNzUI8XOho1CM= +github.com/aws/aws-sdk-go-v2/config v1.15.7/go.mod h1:exERlvqx1OoUHrxQpMgrmfSW0H6B1+r3xziZD3bBXRg= +github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= +github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= +github.com/aws/aws-sdk-go-v2/credentials v1.3.3/go.mod h1:oVieKMT3m9BSfqhOfuQ+E0j/yN84ZAJ7Qv8Sfume/ak= +github.com/aws/aws-sdk-go-v2/credentials v1.4.2/go.mod h1:9Sp6u121/f0NnvHyhG7dgoYeUTEFC2vsvJqJ6wXpkaI= +github.com/aws/aws-sdk-go-v2/credentials v1.12.2/go.mod h1:/XWqDVuzclEKvzileqtD7/t+wIhOogv//6JFlKEe0Wc= +github.com/aws/aws-sdk-go-v2/credentials v1.12.5 h1:WNNCUTWA0vyMy5t8LfS4iB7QshsW0DsHS/VdhyCGZWM= +github.com/aws/aws-sdk-go-v2/credentials v1.12.5/go.mod h1:DOcdLlkqUiNGyXnjWgspC3eIAdXhj8q0pO1LiSvrTI4= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0 h1:8kvinmbIDObqsWegKP0JjeanYPiA4GUVpAtciNWE+jw= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0/go.mod h1:UVFtSYSWCHj2+brBLDHUdlJXmz8LxUpZhA+Ewypc+xQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.1/go.mod h1:+GTydg3uHmVlQdkRoetz6VHKbOMEYof70m19IpMLifc= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.5.1/go.mod h1:W1ldHfsgeGlKpJ4xZMKZUI6Wmp6EAstU7PxnhbXWWrI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.5/go.mod h1:WAPnuhG5IQ/i6DETFl5NmX3kKqCzw7aau9NHAGcm4QE= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.6 h1:+NZzDh/RpcQTpo9xMFUgkseIam6PC+YJbdhbQp1NOXI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.6/go.mod h1:ClLMcuQA/wcHPmOIfNzNI4Y1Q0oDbmEkbYhMFOzHDh8= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0/go.mod h1:eHwXu2+uE/T6gpnYWwBwqoeqRf9IXyCcolyOWDRAErQ= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3 h1:0O72494cCsazjpsGfo+LXezru6PMSp0HUB1m5UfpaRU= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.3/go.mod h1:claNkz2j/N/AZceFcAbR0NyuWnrn+jCYpI+6Ozjsc0k= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4/go.mod h1:W5gGbtNXFpF9/ssYZTaItzG/B+j0bjTnwStiCP2AtWU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.11/go.mod h1:tmUB6jakq5DFNcXsXOA/ZQ7/C8VnSKYkx58OI7Fh79g= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.12 h1:Zt7DDk5V7SyQULUUwIKzsROtVzp/kVvcz15uQx/Tkow= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.12/go.mod h1:Afj/U8svX6sJ77Q+FPWMzabJ9QjbwP32YlopgKALUpg= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.5/go.mod h1:fV1AaS2gFc1tM0RCb015FJ0pvWVUfJZANzjwoO4YakM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.6 h1:eeXdGVtXEe+2Jc49+/vAzna3FAQnUD4AagAw8tzbmfc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.6/go.mod h1:FwpAKI+FBPIELJIdmQzlLtRe8LQSOreMcM2wBsPMvvc= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.1/go.mod h1:Pv3WenDjI0v2Jl7UaMFIIbPOBbhn33RmmAmGgkXDoqY= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.3/go.mod h1:EES9ToeC3h063zCFDdqWGnARExNdULPaBvARm1FLwxA= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.12 h1:j0VqrjtgsY1Bx27tD0ysay36/K4kFMWRp9K3ieO9nLU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.12/go.mod h1:00c7+ALdPh4YeEUPXJzyU0Yy01nPGOq2+9rUaz05z9g= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0 h1:vXZPcDQg7e5z2IKz0huei6zhfAxDoZdXej2o3jUbjCI= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.7.0/go.mod h1:BlrFkwOhSgESkbdS+zJBy4+1mQ3f3Fq9Gp8nT+gaSwk= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.15.8 h1:S61ei29N1W3Mj3QFTJDxKE0nF+jgD2hUQ4UVbUsoq4M= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.15.8/go.mod h1:s1VB5n8Ak2Kve6EeCsLu0vTR864sethcgRSBQJG0DBg= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0/go.mod h1:XY5YhCS9SLul3JSQ08XG/nfxXxrkh6RR21XPq/J//NY= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.15.7 h1:Ls6kDGWNr3wxE8JypXgTTonHpQ1eRVCGNqaFHY2UASw= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.15.7/go.mod h1:+v2jeT4/39fCXUQ0ZfHQHMMiJljnmiuj16F03uAd9DY= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0 h1:QbFWJr2SAyVYvyoOHvJU6sCGLnqNT94ZbWElJMEI1JY= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0/go.mod h1:bYsEP8w5YnbYyrx/Zi5hy4hTwRRQISSJS3RWrsGRijg= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.46.0 h1:pG2i0g+jToeZrjHXXMFWNEG/g3OLXTnwlM5PHLH4Vds= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.46.0/go.mod h1:M7k8Xgr0AsECwnDcfxXhGyDZ6ozYWLFZwb4ztT46+tI= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2/go.mod h1:EASdTcM1lGhUe1/p4gkojHwlGJkeoRjjr1sRCzup3Is= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2/go.mod h1:EASdTcM1lGhUe1/p4gkojHwlGJkeoRjjr1sRCzup3Is= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.2 h1:T/ywkX1ed+TsZVQccu/8rRJGxKZF/t0Ivgrb4MHTSeo= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.2/go.mod h1:RnloUnyZ4KN9JStGY1LuQ7Wzqh7V0f8FinmRdHYtuaA= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.1.0/go.mod h1:enkU5tq2HoXY+ZMiQprgF3Q83T3PbO77E83yXXzRZWE= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.7.6 h1:JGrc3+kkyr848/wpG2+kWuzHK3H4Fyxj2jnXj8ijQ/Y= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.7.6/go.mod h1:zwvTysbXES8GDwFcwCPB8NkC+bCdio1abH+E+BRe/xg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.3/go.mod h1:7gcsONBmFoCcKrAqrm95trrMd2+C/ReYKP7Vfu8yHHA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.1/go.mod h1:Ve+eJOx9UWaT/lMVebnFhDhO49fSLVedHoA82+Rqme0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.5/go.mod h1:ZbkttHXaVn3bBo/wpJbQGiiIWR90eTBUVBrEHUEQlho= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.6 h1:0ZxYAZ1cn7Swi/US55VKciCE6RhRHIwCKIWaMLdT6pg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.6/go.mod h1:DxAPjquoEHf3rUHh1b9+47RAaXB8/7cB6jkzCt/GOEI= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2/go.mod h1:QuL2Ym8BkrLmN4lUofXYq6000/i5jPjosCNK//t6gak= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1 h1:YEz2KMyqK2zyG3uOa0l2xBc/H6NUVJir8FhwHQHF3rc= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.1/go.mod h1:yg4EN/BKoc7+DLhNOxxdvoO3+iyW2FuynvaKqLcLDUM= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0/go.mod h1:9O7UG2pELnP0hq35+Gd7XDjOLBkg7tmgRQ0y14ZjoJI= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.15.7 h1:1dF+Y/DV5jqMuStT02Wr7eIm6K/QFhHZ/EMvtdbafBk= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.15.7/go.mod h1:+4Ux150G3wNqJnrxv5tm9z4qygMasqWdqqVfW8Pyg/c= +github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0/go.mod h1:6J++A5xpo7QDsIeSqPK4UHqMSyPOCopa+zKtqAMhqVQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0 h1:dt1JQFj/135ozwGIWeCM3aQ8N/kB3Xu3Uu4r9zuOIyc= +github.com/aws/aws-sdk-go-v2/service/s3 v1.16.0/go.mod h1:Tk23mCmfL3wb3tNIeMk/0diUZ0W4R6uZtjYKguMLW2s= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.3/go.mod h1:Jgw5O+SK7MZ2Yi9Yvzb4PggAPYaFSliiQuWR0hNjexk= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.1/go.mod h1:ycPdbJZlM0BLhuBnd80WX9PucWPG88qps/2jl9HugXs= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.5/go.mod h1:TFVe6Rr2joVLsYQ1ABACXgOC6lXip/qpX2x5jWg/A9w= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.8 h1:GNIdO14AHW5CgnzMml3Tg5Fy/+NqPQvnh1HsC1zpcPo= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.8/go.mod h1:UqRD9bBt15P0ofRyDZX6CfsIqPpzeHOhZKWzgSuAzpo= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.2/go.mod h1:RBhoMJB8yFToaCnbe0jNq5Dcdy0jp6LhHqg55rjClkM= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.1/go.mod h1:r1i8QwKPzwByXqZb3POQfBs7jozrdnHz8PVbsvyx73w= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.6/go.mod h1:rP1rEOKAGZoXp4iGDxSXFvODAtXpm34Egf0lL0eshaQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.7 h1:HLzjwQM9975FQWSF3uENDGHT1gFQm/q3QXu2BYIcI08= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.7/go.mod h1:lVxTdiiSHY3jb1aeg+BBFtDzZGSUCv6qaNOyEGCJ1AY= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.13.6 h1:HwNzaXr3lHe3YPEyyx7Fh41CZplz6G1YqB3OR0FJ2iw= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.13.6/go.mod h1:akrYtxss20JAwAF/VzsUJRHf210HwuLZpUy1Njrgpe0= +github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= +github.com/aws/smithy-go v1.11.3 h1:DQixirEFM9IaKxX1olZ3ke3nvxRS2xMDteKIDWxozW8= +github.com/aws/smithy-go v1.11.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f h1:Pf0BjJDga7C98f0vhw+Ip5EaiE07S3lTKpIYPNS0nMo= +github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f/go.mod h1:SghidfnxvX7ribW6nHI7T+IBbc9puZ9kk5Tx/88h8P4= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bkaradzic/go-lz4 v1.0.0 h1:RXc4wYsyz985CkXXeX04y4VnZFGG8Rd43pRaHsOXAKk= +github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmatcuk/doublestar/v3 v3.0.0 h1:TQtVPlDnAYwcrVNB2JiGuMc++H5qzWZd9PhkNo5WyHI= +github.com/bmatcuk/doublestar/v3 v3.0.0/go.mod h1:6PcTVMw80pCY1RVuoqu3V++99uQB3vsSYKPTd8AWA0k= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/caio/go-tdigest v2.3.0+incompatible h1:zP6nR0nTSUzlSqqr7F/LhslPlSZX/fZeGmgmwj2cxxY= -github.com/caio/go-tdigest v2.3.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= -github.com/cenkalti/backoff v2.0.0+incompatible h1:5IIPUHhlnUZbcHQsQou5k1Tn58nJkeJL9U+ig5CHJbY= -github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= +github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bombsimon/wsl/v3 v3.2.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bsm/sarama-cluster v2.1.13+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/caio/go-tdigest v3.1.0+incompatible h1:uoVMJ3Q5lXmVLCCqaMGHLBWnbGoN6Lpu7OAUPR60cds= +github.com/caio/go-tdigest v3.1.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.0.0/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= +github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charithe/durationcheck v0.0.6/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 h1:57RI0wFkG/smvVTcz7F43+R0k+Hvci3jAVQF9lyMoOo= -github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod h1:ugEfq4B8T8ciw/h5mCkgdiDRFS4CkqqhH2dymDB4knc= -github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= +github.com/cisco-ie/nx-telemetry-proto v0.0.0-20220628142927-f4160bcb943c h1:k3y2XtIffIk230a+e0d7vbs5ebTvH3OcCMKN/jS6IAY= +github.com/cisco-ie/nx-telemetry-proto v0.0.0-20220628142927-f4160bcb943c/go.mod h1:rJDd05J5hqWVU9MjJ+5jw1CuLn/jRhvU0xtFEzzqjwM= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/containerd/containerd v1.4.1 h1:pASeJT3R3YyVn+94qEPk0SnU1OQ20Jd/T+SPKy9xehY= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.13 h1:XqvKw9i4P7/mFrC3TSM7yV5cwFZ9avXe6M3YANKnzEE= +github.com/containerd/containerd v1.5.13/go.mod h1:3AlCrzKROjIuP3JALsY14n8YtntaUDBu7vek+rPN5Vc= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cornelk/hashmap v1.0.1 h1:RXGcy29hEdLLV8T6aK4s+BAd4tq4+3Hq50N2GoG0uIg= github.com/cornelk/hashmap v1.0.1/go.mod h1:8wbysTUDnwJGrPZ1Iwsou3m+An6sldFrJItjRhfegCw= -github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 h1:Dbz60fpCq04vRxVVVJLbQuL0G7pRt0Gyo2BkozFc4SQ= -github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U= -github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 h1:F8nmbiuX+gCz9xvWMi6Ak8HQntB4ATFXP46gaxifbp4= -github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= -github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a h1:Y5XsLCEhtEI8qbD9RP3Qlv5FXdTDHxZM9UPUnMRgBp8= -github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= -github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w= +github.com/couchbase/go-couchbase v0.1.1 h1:ClFXELcKj/ojyoTYbsY34QUrrYCBi/1G749sXSCkdhk= +github.com/couchbase/go-couchbase v0.1.1/go.mod h1:+/bddYDxXsf9qt0xpDUtRR47A2GjaXmGGAqQ/k3GJ8A= +github.com/couchbase/gomemcached v0.1.3 h1:HIc5qMYNbuhB7zNaiEtj61DCYkquAwrQlf64q7JzdEY= +github.com/couchbase/gomemcached v0.1.3/go.mod h1:mxliKQxOv84gQ0bJWbI+w9Wxdpt9HjDvgW9MjCym5Vo= +github.com/couchbase/goutils v0.1.0 h1:0WLlKJilu7IBm98T8nS9+J36lBFVLRUSIUtyD/uWpAE= +github.com/couchbase/goutils v0.1.0/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/crossdock/crossdock-go v0.0.0-20160816171116-049aabb0122b/go.mod h1:v9FBN7gdVTpiD/+LZ7Po0UKvROyT87uLVxTHVky/dlQ= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/daixiang0/gci v0.2.8/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= +github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dchest/siphash v1.1.0 h1:1Rs9eTUlZLPBEvV+2sTaM8O0NWn0ppbgqS7p11aWawI= github.com/dchest/siphash v1.1.0/go.mod h1:q+IRvb2gOSrUnYoPqHiyHXS0FOBBOdl6tONBlVnOnt4= -github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 h1:YcpmyvADGYw5LqMnHqSkyIELsHCGF6PkrmM31V8rF7o= -github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= +github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= +github.com/denisenkom/go-mssqldb v0.12.0 h1:VtrkII767ttSPNRfFekePK3sctr+joXgO58stqQbtUA= +github.com/denisenkom/go-mssqldb v0.12.0/go.mod h1:iiK0YP1ZeepvmBQk/QpLEhhTNJgfzrpArPY/aFvc9yU= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= +github.com/dgraph-io/badger/v3 v3.2103.1/go.mod h1:dULbq6ehJ5K0cGW/1TQ9iSfUk0gbSiToDWmWmTsJ53E= +github.com/dgraph-io/ristretto v0.0.1/go.mod h1:T40EBc7CJke8TkpiYfGGKAeFjSaxuFXhuXRyumBd6RE= +github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 h1:CaO/zOnF8VvUfEbhRatPcwKVWamvbYd8tQGRWacE9kU= -github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= -github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-rendezvous v0.0.0-20200624174652-8d2f3be8b2d9/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/digitalocean/godo v1.58.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible h1:357nGVUC8gSpeSc2Axup8HfrfTLLUfWfCsCUhiQSKIg= -github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible h1:SiUATuP//KecDjpOK2tvZJgeScYAklvyjfK8JZlU6fo= -github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF+n1M6o= -github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= +github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= +github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/djherbis/times v1.5.0 h1:79myA211VwPhFTqUk8xehWrsEO+zcIZj0zT8mXPVARU= +github.com/djherbis/times v1.5.0/go.mod h1:5q7FDLvbNg1L/KaBmPcWlVR9NmoKo3+ucqUA3ijQhA0= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.11+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.14+incompatible h1:+T9/PRYWNDo5SZl5qS1r9Mo/0Q8AwxKKPtu9S1yxM0w= +github.com/docker/docker v20.10.14+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166 h1:KgEcrKF0NWi9GT/OvDp9ioXZIrHRbP8S5o+sot9gznQ= -github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= +github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60 h1:27379cxrsKlr7hAnW/xrusefspUPjqHVRW1K/bZgfGw= +github.com/doclambda/protobufquery v0.0.0-20210317203640-88ffabe06a60/go.mod h1:8Ia4zp86glrUhC29AAdK9hwTYh8RB6v0WRCtpplYqEg= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9 h1:NAvZb7gqQfLSNBPzVsvI7eZMosXtg2g2kxXrei90CtU= +github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9/go.mod h1:glr97hP/JuXb+WMYCizc4PIFuzw1lCR97mwbe1VVXhQ= +github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= +github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dynatrace-oss/dynatrace-metric-utils-go v0.5.0 h1:wHGPJSXvwKQVf/XfhjUPyrhpcPKWNy8F3ikH+eiwoBg= +github.com/dynatrace-oss/dynatrace-metric-utils-go v0.5.0/go.mod h1:PseHFo8Leko7J4A/TfZ6kkHdkzKBLUta6hRZR/OEbbc= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -208,507 +770,1780 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0= +github.com/echlebek/crock v1.0.1 h1:KbzamClMIfVIkkjq/GTXf+N16KylYBpiaTitO3f1ujg= +github.com/echlebek/crock v1.0.1/go.mod h1:/kvwHRX3ZXHj/kHWJkjXDmzzRow54EJuHtQ/PapL/HI= +github.com/echlebek/timeproxy v1.0.0 h1:V41/v8tmmMDNMA2GrBPI45nlXb3F7+OY+nJz1BqKsCk= +github.com/echlebek/timeproxy v1.0.0/go.mod h1:0dg2Lnb8no/jFwoMQKMTU6iAivgoMptGqSTprhnrRtk= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e h1:p1yVGRW3nmb85p1Sh1ZJSDm4A4iKLS5QNbvUHMgGu/M= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633 h1:H2pdYOb3KQ1/YsqVWoWNLQO+fusocsw354rqGTZtAgw= +github.com/eclipse/paho.mqtt.golang v1.3.5 h1:sWtmgNxYM9P2sP+xEItMozsR3w0cqZFlqnNN1bdl41Y= +github.com/eclipse/paho.mqtt.golang v1.3.5/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4 h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ericchiang/k8s v1.2.0 h1:vxrMwEzY43oxu8aZyD/7b1s8tsBM+xoUoxjWECWFbPI= -github.com/ericchiang/k8s v1.2.0/go.mod h1:/OmBgSq2cd9IANnsGHGlEz27nwMZV2YxlpXuQtU3Bz4= -github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90 h1:WXb3TSNmHp2vHoCroCIB1foO/yQ36swABL8aOVeDpgg= +github.com/esimonov/ifshort v1.0.1/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4 h1:fP04zlkPjAGpsduG7xN3rRkxjAqkJaIQnnkNYYw/pAk= +github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4/go.mod h1:SBHk9aNQtiw4R4bEuzHjVmZikkUKCnO1v3lPQ21HZGk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3 h1:fmFk0Wt3bBxxwZnu48jqMdaOR/IZ4vdtJFuaFV8MpIE= +github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3/go.mod h1:bJWSKrZyQvfTnb2OudyUjurSG4/edverV7n82+K3JiM= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= +github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.10.2 h1:19ARM85nVi4xH7xPXuc5eM/udya5ieh7b/Sv+d844Tk= -github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.0.0/go.mod h1:R98jIehRai+d1/3Hv2//jOVCTJhW1VBavT6B6CuGq2k= +github.com/frankban/quicktest v1.1.0/go.mod h1:R98jIehRai+d1/3Hv2//jOVCTJhW1VBavT6B6CuGq2k= +github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20= +github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= +github.com/frankban/quicktest v1.7.3/go.mod h1:V1d2J5pfxYH6EjBAgSK7YNXcXlTWxUHdE1sVDXkjnig= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/frankban/quicktest v1.11.0/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= +github.com/frankban/quicktest v1.11.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.13.0 h1:yNZif1OkDfNoDfb9zZa9aXIpejNR4F23Wely0c+Qdqk= +github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/tcell v1.3.0/go.mod h1:Hjvr+Ofd+gLglo7RYKxxnzCBmev3BzsS67MebKS4zMM= +github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= -github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96 h1:YpooqMW354GG47PXNBiaCv6yCQizyP3MXD9NUPrCEQ8= -github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96/go.mod h1:uY+1eqFUyotrQxF1wYFNtMeHp/swbYRsoGzfcPZ8x3o= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-asn1-ber/asn1-ber v1.5.1 h1:pDbRAunXzIUXfx4CB2QJFv5IuPiuoW+sWvr/Us009o8= +github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-critic/go-critic v0.5.4/go.mod h1:cjB4YGw+n/+X8gREApej7150Uyy1Tg8If6F2XOAUXNE= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4 h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-ldap/ldap/v3 v3.4.1 h1:fU/0xli6HY02ocbMuozHAYsaHLcnkLjvho2r5a34BUU= +github.com/go-ldap/ldap/v3 v3.4.1/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjRvjKpyMg= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1 h1:wSt/4CYxs70xbATrGXhokKF1i0tZjENLOo1ioIO13zk= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9 h1:tF+augKRWlWx0J0B7ZyyKSiTyV6E1zZe+7b3qQlcEf8= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501 h1:C1JKChikHGpXwT5UQDFaryIpDtyyGL/CR6C2kB7F1oc= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87 h1:zP3nY8Tk2E6RTkqGYrarZXuzh+ffyLDljLxCy1iJw80= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-macaroon-bakery/macaroonpb v1.0.0 h1:It9exBaRMZ9iix1iJ6gwzfwsDE6ExNuwtAJ9e09v6XE= +github.com/go-macaroon-bakery/macaroonpb v1.0.0/go.mod h1:UzrGOcbiwTXISFP2XDLDPjfhMINZa+fX/7A2lMd31zc= +github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= +github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk= +github.com/go-openapi/analysis v0.20.0/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= +github.com/go-openapi/analysis v0.20.1/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.4/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.0/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.1/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= +github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= +github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4= +github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= +github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= +github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= +github.com/go-openapi/runtime v0.19.28/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= +github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= +github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ= +github.com/go-openapi/spec v0.20.2/go.mod h1:RW6Xcbs6LOyWLU/mXGdzn2Qc+3aj+ASfI7rvSZh1Vls= +github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= +github.com/go-openapi/strfmt v0.20.2/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= +github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= +github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= +github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= +github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= +github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= +github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0= +github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c h1:fWdhUpCuoeNIPiQ+pkAmmERYEjhVx5/cbVGK7T99OkI= +github.com/go-ping/ping v0.0.0-20210201095549-52eed920f98c/go.mod h1:35JbSyV/BYqHwwRA6Zr1uVDm1637YlNOU61wI797NPI= +github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= +github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= +github.com/go-redis/redis/v8 v8.0.0-beta.6/go.mod h1:g79Vpae8JMzg5qjk8BiwU9tK+HmU3iDVyS4UAJLFycI= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= +github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= +github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= +github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= +github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/goburrow/modbus v0.1.0 h1:DejRZY73nEM6+bt5JSP6IsFolJ9dVcqxsYbpLbeW/ro= github.com/goburrow/modbus v0.1.0/go.mod h1:Kx552D5rLIS8E7TyUwQ/UdHEqvX5T8tyiGBTlzMcZBg= github.com/goburrow/serial v0.1.0 h1:v2T1SQa/dlUqQiYIT8+Cu7YolfqAi3K96UmhwYyuSrA= github.com/goburrow/serial v0.1.0/go.mod h1:sAiqG0nRVswsm1C97xsttiYCzSLBmUZ/VSlVLZJ8haA= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gofrs/uuid v2.1.0+incompatible h1:8oEj3gioPmmDAOLQUZdnW+h4FZu9aSE/SQIas1E9pzA= -github.com/gofrs/uuid v2.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gocql/gocql v0.0.0-20200228163523-cd4b606dd2fb/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= +github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= +github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188 h1:+eHOFJl1BaXrQxKX+T06f78590z4qA2ZzBTqahsKSE4= +github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+wXQnTPR4KqTKDgJukSZ6amVRtWMPEjE6sQoK8= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0 h1:Rd1kQnQu0Hq3qvJppYSG0HtP+f5LPPUiDswTLiEegLg= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= +github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= +github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/golangci-lint v1.38.0/go.mod h1:Knp/sd5ATrVp7EOzWzwIIFH+c8hUfpW+oOQb8NvdZDo= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/addlicense v0.0.0-20200906110928-a0294312aa76/go.mod h1:EMjYTRimagHs1FwlIqKyX3wAM0u3rA+McvlIIWmSamA= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI= +github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/gnxi v0.0.0-20220411075422-cd6b043b7fd0 h1:Ef2sJA0zQvsviQ13sXiidv+SIkE68t3oy4wfXAV66j0= +github.com/google/gnxi v0.0.0-20220411075422-cd6b043b7fd0/go.mod h1:dPTuHPVOqxZ2yGKPjymiMt1vrZa8KHXWKX+Lx1z5d88= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II= github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12 h1:TgXhFz35pKlZuUz1pNlOKk1UCSXPpuUIc144Wd7SxCA= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210323184331-8eee2492667d/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/protobuf v3.11.4+incompatible/go.mod h1:lUQ9D1ePzbH2PrIS7ob/bjm9HXyH5WHB0Akwh7URreM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa h1:7MYGT2XEMam7Mtzv1yDUYXANedWvwk3HKkR3MyGowy8= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gopcua/opcua v0.1.12 h1:TenluCr1CPB1NHjb9tX6yprc0eUmthznXxSc5mnJPBo= -github.com/gopcua/opcua v0.1.12/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZLQyE8= -github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/go-type-adapters v1.0.0 h1:9XdMn+d/G57qq1s8dNc5IesGCXHf6V2HZ2JwRxfA2tA= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/gookit/color v1.3.6/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ= +github.com/gopcua/opcua v0.3.3 h1:tu1t/mx9fJybry1KYljqDdzxmik+BZk6410LJ4QzM2E= +github.com/gopcua/opcua v0.3.3/go.mod h1:n/qSWDVB/KSPIG4vYhBSbs5zdYAW3yOcDCRrWd1BZo0= +github.com/gophercloud/gophercloud v0.16.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= +github.com/gophercloud/gophercloud v0.25.0 h1:C3Oae7y0fUVQGSsBrb3zliAjdX+riCSEh4lNMejFNI4= +github.com/gophercloud/gophercloud v0.25.0/go.mod h1:Q8fZtyi5zZxPS/j9aj3sSxtvj41AdQMDwyo1myduD5c= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0= +github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= +github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gosnmp/gosnmp v1.34.0 h1:p96iiNTTdL4ZYspPC3leSKXiHfE1NiIYffMu9100p5E= +github.com/gosnmp/gosnmp v1.34.0/go.mod h1:QWTRprXN9haHFof3P96XTDYc46boCGAh5IXp0DniEx4= +github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= +github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= +github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= +github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= +github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= +github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grid-x/modbus v0.0.0-20211113184042-7f2251c342c9 h1:Q7e9kXS3sRbTjsNDKazbcbDSGAKjFdk096M3qYbwNpE= +github.com/grid-x/modbus v0.0.0-20211113184042-7f2251c342c9/go.mod h1:qVX2WhsI5xyAoM6I/MV1bXSKBPdLAjp7pCvieO/S0AY= +github.com/grid-x/serial v0.0.0-20191104121038-e24bc9bf6f08/go.mod h1:kdOd86/VGFWRrtkNwf1MPk0u1gIjc4Y7R2j7nhwc7Rk= +github.com/grid-x/serial v0.0.0-20211107191517-583c7356b3aa h1:Rsn6ARgNkXrsXJIzhkE4vQr5Gbx2LvtEMv4BJOK4LyU= +github.com/grid-x/serial v0.0.0-20211107191517-583c7356b3aa/go.mod h1:kdOd86/VGFWRrtkNwf1MPk0u1gIjc4Y7R2j7nhwc7Rk= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.14.5/go.mod h1:UJ0EZAp832vCd54Wev9N1BMKEyvcZ5+IM0AwDrnlkEc= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/gwos/tcg/sdk v0.0.0-20211223101342-35fbd1ae683c h1:befb5xGUwNCoBuN/akLFCKekUzr0ixyws3aAX/7TaOk= +github.com/gwos/tcg/sdk v0.0.0-20211223101342-35fbd1ae683c/go.mod h1:OjlJNRXwlEjznVfU3YtLWH8FyM7KWHUevXDI47UeZeM= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 h1:U0KvGD9CJIl1nbgu9yLsfWxMT6WqL8fG0IBB7RvOZZQ= -github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0/go.mod h1:dk23l2BruuUzRP8wbybQbPn3J7sZga2QHICCeaEy5rQ= -github.com/hashicorp/consul v1.2.1 h1:66MuuTfV4aOXTQM7cjAIKUWFOITSk4XZlMhE09ymVbg= -github.com/hashicorp/consul v1.2.1/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI= +github.com/harlow/kinesis-consumer v0.3.6-0.20210911031324-5a873d6e9fec h1:ya+kv1eNnd5QhcHuaj5g5eMq5Ra3VCNaPY2ZI7Aq91o= +github.com/harlow/kinesis-consumer v0.3.6-0.20210911031324-5a873d6e9fec/go.mod h1:FIT1uhdVv2iXO0l6aACPZSVHxdth7RdmoT34jk9MEm0= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= +github.com/hashicorp/consul/api v1.12.0 h1:k3y1FYv6nuKyNTqj6w9gXOx5r5CfLj/k/euUeBXj1OY= +github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.15.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8= +github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.4.0/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-plugin v1.4.2/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:VBj0QYQ0u2MCJzBfeYXGexnAl17GsH1yidnoxCqqD9E= -github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg= -github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/memberlist v0.1.5 h1:AYBsgJOW9gab/toO5tEB8lWetVgDKZycqkebJ8xxpqM= -github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.1 h1:mYs6SMzu72+90OcPa5wr3nfznA4Dw9UyR791ZFNOIf4= -github.com/hashicorp/serf v0.8.1/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.0 h1:8+567mCcFDnS5ADl7lrpxPMWiFCElyUEeW0gtj34fMA= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.6 h1:uuEX1kLR6aoda1TBttmJQKDLZE1Ob7KN0NPdE7EtCDc= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hetznercloud/hcloud-go v1.24.0/go.mod h1:3YmyK8yaZZ48syie6xpm3dt26rtB6s65AisBHylXYFA= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/influxdata/go-syslog/v2 v2.0.1 h1:l44S4l4Q8MhGQcoOxJpbo+QQYxJqp0vdgIVHh4+DO0s= -github.com/influxdata/go-syslog/v2 v2.0.1/go.mod h1:hjvie1UTaD5E1fTnDmxaCw8RRDrT4Ve+XHr5O2dKSCo= -github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4 h1:K3A5vHPs/p8OjI4SL3l1+hs/98mhxTVDcV1Ap0c265E= -github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4/go.mod h1:VeiWgI3qaGdJWust2fP27a6J+koITo/1c/UhxeOxgaM= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= +github.com/influxdata/go-syslog/v3 v3.0.0 h1:jichmjSZlYK0VMmlz+k4WeOQd7z745YLsvGMqwtYt4I= +github.com/influxdata/go-syslog/v3 v3.0.0/go.mod h1:tulsOp+CecTAYC27u9miMgq21GqXRW6VdKbOG+QSP4Q= +github.com/influxdata/influxdb v1.8.4/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= +github.com/influxdata/influxdb-observability/common v0.2.22 h1:QXVvuY/VgaTrC5gALrQntPC/eQ5txg1GJZ4OI4pScnc= +github.com/influxdata/influxdb-observability/common v0.2.22/go.mod h1:bW4ZANSxOnZu49ZhxiqKRyiFj+pePFII8+JGzvbtTTU= +github.com/influxdata/influxdb-observability/influx2otel v0.2.21 h1:tmmLtJBEOU04+CLc1WsPSWVVW+BTmdmAlpgROlY/zog= +github.com/influxdata/influxdb-observability/influx2otel v0.2.21/go.mod h1:Bg6Pi3swyEv3kvl/+3odBLG+LzPXS38IW5EkW3Qs3wg= +github.com/influxdata/influxdb-observability/otel2influx v0.2.22 h1:GbugqGRMSttCEzfJaQjh/QSY1g2q642aa/TCUo++GT8= +github.com/influxdata/influxdb-observability/otel2influx v0.2.22/go.mod h1:dlIflDYQNuDmEy0iuBE31VJnLJxj5SkEXZ0/2HDOeE0= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= +github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/line-protocol-corpus v0.0.0-20210519164801-ca6fa5da0184/go.mod h1:03nmhxzZ7Xk2pdG+lmMd7mHDfeVOYFyhOgwO61qWU98= +github.com/influxdata/line-protocol-corpus v0.0.0-20210922080147-aa28ccfb8937 h1:MHJNQ+p99hFATQm6ORoLmpUCF7ovjwEFshs/NHzAbig= +github.com/influxdata/line-protocol-corpus v0.0.0-20210922080147-aa28ccfb8937/go.mod h1:BKR9c0uHSmRgM/se9JhFHtTT7JTO67X23MtKMHtZcpo= +github.com/influxdata/line-protocol/v2 v2.0.0-20210312151457-c52fdecb625a/go.mod h1:6+9Xt5Sq1rWx+glMgxhcg2c0DUaehK+5TDcPZ76GypY= +github.com/influxdata/line-protocol/v2 v2.1.0/go.mod h1:QKw43hdUBg3GTk2iC3iyCxksNj7PX9aUSeYOYE/ceHY= +github.com/influxdata/line-protocol/v2 v2.2.1 h1:EAPkqJ9Km4uAxtMRgUubJyqAr6zgWM0dznKMLRauQRE= +github.com/influxdata/line-protocol/v2 v2.2.1/go.mod h1:DmB3Cnh+3oxmG6LOBIxce4oaL4CPj3OmMPgvauXh+tM= +github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= +github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= +github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7 h1:0rQOs1VHLVFpAAOIR0mJEvVOIaMYFgYdreeVbgI9sII= +github.com/influxdata/tail v1.0.1-0.20210707231403-b283181d1fa7/go.mod h1:VeiWgI3qaGdJWust2fP27a6J+koITo/1c/UhxeOxgaM= +github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 h1:vvyMtD5LTJc1W9sQKjDkAWdcg0478CszSdzlHtiAXCY= github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP/bTpQItGZNNUMISDMDAnTXu9UqJ4yT3ocz8= +github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 h1:W2IgzRCb0L9VzMujq/QuTaZUKcH8096jWwP519mHN6Q= github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8/go.mod h1:/2NMgWB1DHM1ti/gqhOlg+LJeBVk6FqR5aVGYY0hlwI= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= -github.com/jackc/pgx v3.6.0+incompatible h1:bJeo4JdVbDAW8KB2m8XkFeo8CPipREoG37BwEoKGz+Q= -github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/intel/iaevents v1.0.0 h1:J8lETV13FMImV0VbOrKhkA790z7+cAHQ/28gbiefu7E= +github.com/intel/iaevents v1.0.0/go.mod h1:nFsAQmrbF6MoZUomrSl4jgmHhe0SrLxTGtyqvqU2X9Y= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.12.1 h1:rsDFzIpRk7xT4B8FufgpCCeyjdNpKyghZeSefViE5W8= +github.com/jackc/pgconn v1.12.1/go.mod h1:ZkhRC59Llhrq3oSfrikvwQ5NaxYExr6twkdkMLaKono= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.0 h1:brH0pCGBDkBW07HWlN/oSBXrmo3WB0UvZd1pIuDcL8Y= +github.com/jackc/pgproto3/v2 v2.3.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.11.0 h1:u4uiGPz/1hryuXzyaBhSk6dnIyyG2683olG2OV+UUgs= +github.com/jackc/pgtype v1.11.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.16.1 h1:JzTglcal01DrghUqt+PmzWsZx/Yh7SC/CTQmSBMTd0Y= +github.com/jackc/pgx/v4 v4.16.1/go.mod h1:SIhx0D5hoADaiXZVyv+3gSm3LCIIINTVO0PficsvWGQ= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jaegertracing/jaeger v1.22.0/go.mod h1:WnwW68MjJEViSLRQhe0nkIsBDaF3CzfFd8wJcpJv24k= +github.com/jaegertracing/jaeger v1.23.0/go.mod h1:gB6Qc+Kjd/IX1G82oGTArbHI3ZRO//iUkaMW+gzL9uw= +github.com/jaegertracing/jaeger v1.26.0 h1:4LbUdb9l/Mx83zYvjLbkrayheX+Aga26NEI+feo3xzA= +github.com/jaegertracing/jaeger v1.26.0/go.mod h1:SwHsl1PLZVAdkQTPrziQ+4xV9FxzJXRvTDW1YrUIWEA= +github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a h1:JxcWget6X/VfBMKxPIc28Jel37LGREut2fpV+ObkwJ0= +github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a/go.mod h1:1qNVsDcmNQDsAXYfUuF/Z0rtK5eT8x9D6Pi7S3PjXAg= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= +github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jgautheron/goconst v1.4.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca h1:a0GZUdb+qnutF8shJxr2qs2qT3fnF+ptxTxPB8+oIvk= +github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= +github.com/jingyugao/rowserrcheck v0.0.0-20210130005344-c6a0c12dd98d/go.mod h1:/EZlaYCnEX24i7qdVhT9du5JrtFWYRQr67bVgR7JJC8= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 h1:K//n/AqR5HjG3qxbrBCL4vJPW0MVFSs9CPK1OOJdRME= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/josharian/native v1.0.0 h1:Ts/E8zCSEsG17dUqv7joXJFybuMLjQfWE04tsBODTxk= +github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= -github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4 h1:nwOc1YaOrYJ37sEBrtWZrdqzK22hiJs3GpDmP3sR2Yw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqoO/u39cqLeBLebZ8fWdE96O7FxrAsRYhnVOdgHxok= +github.com/jsimonetti/rtnetlink v0.0.0-20201216134343-bde56ed16391/go.mod h1:cR77jAZG3Y3bsb8hF6fHJbFoyFukLFOkQ98S0pQz3xw= +github.com/jsimonetti/rtnetlink v0.0.0-20201220180245-69540ac93943/go.mod h1:z4c53zj6Eex712ROyh8WI0ihysb5j2ROyV42iNogmAs= +github.com/jsimonetti/rtnetlink v0.0.0-20210122163228-8d122574c736/go.mod h1:ZXpIyOK59ZnN7J0BV99cZUPmsqDRZ3eq5X+st7u/oSA= +github.com/jsimonetti/rtnetlink v0.0.0-20210212075122-66c871082f2b/go.mod h1:8w9Rh8m+aHZIG69YPGGem1i5VzoyRC8nw2kA8B+ik5U= +github.com/jsimonetti/rtnetlink v0.0.0-20210525051524-4cc836578190/go.mod h1:NmKSdU4VGSiv1bMsdqNALI4RSvvjtz65tTMCnD05qLo= +github.com/jsimonetti/rtnetlink v0.0.0-20211022192332-93da33804786/go.mod h1:v4hqbTdfQngbVSZJVWUhGE/lbTFf9jb+ygmNUDQMuOs= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g= +github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/errors v0.0.0-20181012004132-a4583d0a56ea/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/errors v0.0.0-20220622220526-54a94488269b h1:RDqhIF4b2LKv1CHVw5AKASru2kJ1gnTMJJGhpUNo0LQ= +github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= +github.com/juju/mgotest v1.0.1/go.mod h1:vTaDufYul+Ps8D7bgseHjq87X8eu0ivlKLp9mVc/Bfc= +github.com/juju/postgrestest v1.1.0/go.mod h1:/n17Y2T6iFozzXwSCO0JYJ5gSiz2caEtSwAjh/uLXDM= +github.com/juju/qthttptest v0.0.1/go.mod h1://LCf/Ls22/rPw2u1yWukUJvYtfPY4nYpWUl2uZhryo= +github.com/juju/qthttptest v0.1.1 h1:JPju5P5CDMCy8jmBJV2wGLjDItUsx2KKL514EfOYueM= +github.com/juju/qthttptest v0.1.1/go.mod h1:aTlAv8TYaflIiTDIQYzxnl1QdPjAg8Q8qJMErpKy6A4= +github.com/juju/schema v1.0.0/go.mod h1:Y+ThzXpUJ0E7NYYocAbuvJ7vTivXfrof/IfRPq/0abI= +github.com/juju/testing v0.0.0-20191001232224-ce9dec17d28b/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= +github.com/juju/webbrowser v0.0.0-20160309143629-54b8c57083b4/go.mod h1:G6PCelgkM6cuvyD10iYJsjLBsSadVXtJ+nBxFAxE2BU= +github.com/juju/webbrowser v1.0.0 h1:JLdmbFtCGY6Qf2jmS6bVaenJFGIFkdF1/BjUm76af78= +github.com/juju/webbrowser v1.0.0/go.mod h1:RwVlbBcF91Q4vS+iwlkJ6bZTE3EwlrjbYlM3WMVD6Bc= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5 h1:PJr+ZMXIecYc1Ey2zucXdR73SMBtgjPgwa31099IMv0= +github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/julz/importas v0.0.0-20210226073942-60b4fa260dd0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/kardianos/service v1.0.0 h1:HgQS3mFfOlyntWX8Oke98JcJLqt1DBcHR4kxShpYef0= -github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo= -github.com/karrick/godirwalk v1.12.0 h1:nkS4xxsjiZMvVlazd0mFyiwD4BR9f3m6LXGhM2TUx3Y= -github.com/karrick/godirwalk v1.12.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= +github.com/kardianos/service v1.2.1 h1:AYndMsehS+ywIS6RB9KOlcXzteWUzxgMgBymJD7+BYk= +github.com/kardianos/service v1.2.1/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= +github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kisielk/errcheck v1.2.0 h1:reN85Pxc5larApoH1keMBiu2GWtPqXQ1nc9gx+jOU+E= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY= +github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= +github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee h1:MB75LRhfeLER2RF7neSVpYuX/lL8aPi3yPtv5vdOJmk= -github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee/go.mod h1:Pe/YBTPc3vqoMkbuIWPH8CF9ehINdvNyS0dP3J6HC0s= +github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= +github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 h1:X/79QL0b4YJVO5+OsPH9rF2u428CIrGL/jLmPsoOQQ4= -github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy+E1PZ3C9ctsPRlvOPAFPkCNlcPBDkt0N3U= +github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRCs1Heq84lurVinUPy19keqGEe4jh5vtK37jcg= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= -github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= +github.com/leoluk/perflib_exporter v0.1.0/go.mod h1:rpV0lYj7lemdTm31t7zpCqYqPnw7xs86f+BaaNBVYFM= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 h1:8/+Y8SKf0xCZ8cCTfnrMdY7HNzlEjPAt3bPjalNb6CA= -github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= +github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/lxc/lxd v0.0.0-20220624154119-6d73e2a3d0c5 h1:Gl5wyLyditoHfdQTJTS2EHTdT62tf0gXv1vurzhPQSY= +github.com/lxc/lxd v0.0.0-20220624154119-6d73e2a3d0c5/go.mod h1:0tT0HXFQWVeJ3+80CkkuOTZU1oUmFdA3KYZcd3dIy5o= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.4/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= +github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.12/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.13 h1:1tj15ngiFfcZzii7yd82foL+ks+ouQcj8j/TPq3fk1I= +github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe h1:yMrL+YorbzaBpj/h3BbLMP+qeslPZYMbzcpHFBNy1Yk= -github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe/go.mod h1:y3mw3VG+t0m20OMqpG8RQqw8cDXvShVb+L8Z8FEnebw= -github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= +github.com/mdlayher/apcupsd v0.0.0-20220319200143-473c7b5f3c6a h1:JOlLsLUQnokTyWWwEvOVoKH3XUl6oDMP8jisO54l6J8= +github.com/mdlayher/apcupsd v0.0.0-20220319200143-473c7b5f3c6a/go.mod h1:960H6oqSawdujauTeLX9BOx+ZdYX0TdG9xE9br5bino= +github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43/go.mod h1:+t7E0lkKfbBsebllff1xdTmyJt8lH37niI6kwFk9OTo= +github.com/mdlayher/ethtool v0.0.0-20211028163843-288d040e9d60/go.mod h1:aYbhishWc4Ai3I2U4Gaa2n3kHWSwzme6EsG/46HRQbE= github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= +github.com/mdlayher/genetlink v1.1.0/go.mod h1:1cAHdejIIk9zbWfP3gW30vY1QUlwyuhaqfkyANVVf10= +github.com/mdlayher/genetlink v1.2.0 h1:4yrIkRV5Wfk1WfpWTcoOlGmsWgQj3OtQN9ZsbrE+XtU= +github.com/mdlayher/genetlink v1.2.0/go.mod h1:ra5LDov2KrUCZJiAtEvXXZBxGMInICMXIwshlJ+qRxQ= github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= -github.com/mdlayher/netlink v1.1.0 h1:mpdLgm+brq10nI9zM1BpX1kpDbh3NLl3RSnVq6ZSkfg= github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= -github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= +github.com/mdlayher/netlink v1.1.1/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZmb2XgV7o= +github.com/mdlayher/netlink v1.2.0/go.mod h1:kwVW1io0AZy9A1E2YYgaD4Cj+C+GPkU6klXCMzIJ9p8= +github.com/mdlayher/netlink v1.2.1/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= +github.com/mdlayher/netlink v1.2.2-0.20210123213345-5cc92139ae3e/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= +github.com/mdlayher/netlink v1.3.0/go.mod h1:xK/BssKuwcRXHrtN04UBkwQ6dY9VviGGuriDdoPSWys= +github.com/mdlayher/netlink v1.4.0/go.mod h1:dRJi5IABcZpBD2A3D0Mv/AiX8I9uDEu5oGkAVrekmf8= +github.com/mdlayher/netlink v1.4.1/go.mod h1:e4/KuJ+s8UhfUpO9z00/fDZZmhSrs+oxyqAS9cNgn6Q= +github.com/mdlayher/netlink v1.4.2/go.mod h1:13VaingaArGUTUxFLf/iEovKxXji32JAtF858jZYEug= +github.com/mdlayher/netlink v1.6.0 h1:rOHX5yl7qnlpiVkFWoqccueppMtXzeziFjWAjLg6sz0= +github.com/mdlayher/netlink v1.6.0/go.mod h1:0o3PlBmGst1xve7wQ7j/hwpNaFaH4qCRyWCdcZk8/vA= +github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00/go.mod h1:GAFlyu4/XV68LkQKYzKhIo/WW7j3Zi0YRAz/BOoanUc= +github.com/mdlayher/socket v0.0.0-20211007213009-516dcbdf0267/go.mod h1:nFZ1EtZYK8Gi/k6QNu7z7CgO20i/4ExeQswwWuPmG/g= +github.com/mdlayher/socket v0.0.0-20211102153432-57e3fa563ecb/go.mod h1:nFZ1EtZYK8Gi/k6QNu7z7CgO20i/4ExeQswwWuPmG/g= +github.com/mdlayher/socket v0.1.1/go.mod h1:mYV5YIZAfHh4dzDVzI8x8tWLWCliuX8Mon5Awbj+qDs= +github.com/mdlayher/socket v0.2.3 h1:XZA2X2TjdOwNoNPVPclRCURoX/hokBY8nkTmRZFEheM= +github.com/mdlayher/socket v0.2.3/go.mod h1:bz12/FozYNH/VbvC3q7TRIK/Y6dH1kCKsXaUeXi/FmY= +github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= +github.com/mgechev/revive v1.0.3/go.mod h1:POGGZagSo/0frdr7VeAifzS5Uka0d0GPiM35MsTO8nE= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/mholt/archiver/v3 v3.5.0/go.mod h1:qqTTPUK/HZPFgFQ/TJ3BzvTpF/dPtFVJXdQbCmeMxwc= +github.com/microsoft/ApplicationInsights-Go v0.4.4 h1:G4+H9WNs6ygSCe6sUyxRc2U81TI5Es90b2t/MwX5KqY= +github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= +github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= +github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/mjibson/esc v0.2.0/go.mod h1:9Hw9gxxfHulMF5OJKCyhYD7PzlSdhzXyaGEBRPH1OPs= +github.com/moby/ipvs v1.0.2 h1:NSbzuRTvfneftLU3VwPU5QuA6NZ0IUmqq9+VHcQxqHw= +github.com/moby/ipvs v1.0.2/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= +github.com/moby/moby v1.13.1/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= +github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= +github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= +github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/multiplay/go-ts3 v1.0.0 h1:loxtEFqvYtpoGh1jOqEt6aDzctYuQsi3vb3dMpvWiWw= -github.com/multiplay/go-ts3 v1.0.0/go.mod h1:14S6cS3fLNT3xOytrA/DkRyAFNuQLMLEqOYAsf87IbQ= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d h1:7PxY7LVfSZm7PEeBTyK1rj1gABdCO2mbri6GKO1cMDs= +github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mozilla/tls-observatory v0.0.0-20201209171846-0547674fceff/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= +github.com/multiplay/go-ts3 v1.0.1 h1:Ja8ho7UzUDNvNCwcDzPEPimLRub7MUqbD+sgMWkcR0A= +github.com/multiplay/go-ts3 v1.0.1/go.mod h1:WIP3X0efye5ENZdXLu8LV4woCbPoc41wuMHx3EcU5CI= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2 h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.4 h1:BILRnsJ2Yb/fefiFbBWADpViGF69uh4sxe8poVDQ06g= -github.com/nats-io/nats-server/v2 v2.1.4/go.mod h1:Jw1Z28soD/QasIA2uWjXyM9El1jly3YwyFOuR8tH1rg= -github.com/nats-io/nats.go v1.9.1 h1:ik3HbLhZ0YABLto7iX80pZLPw/6dx3T+++MZJwLnMrQ= +github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a h1:lem6QCvxR0Y28gth9P+wV2K/zYUUAkJ+55U8cpS0p5I= +github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats-server/v2 v2.8.4 h1:0jQzze1T9mECg8YZEl8+WYUXb9JKluJfCBriPUtluB4= +github.com/nats-io/nats-server/v2 v2.8.4/go.mod h1:8zZa+Al3WsESfmgSs98Fi06dRWLH5Bnq90m5bKD/eT4= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nats.go v1.16.0 h1:zvLE7fGBQYW6MWaFaRdsgm9qT39PJDQoju+DS8KsO1g= +github.com/nats-io/nats.go v1.16.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3 h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0 h1:W8+lNIfAldCScGiikToSprbf3DCaMXk0VIM9l73BIpY= -github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0/go.mod h1:G9MqE/cHGv3Hx3qpYhfuyFUsGx2DpVcGi1iJIqTg+JQ= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/nbutton23/zxcvbn-go v0.0.0-20201221231540-e56b841a3c88/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/newrelic/newrelic-telemetry-sdk-go v0.8.1 h1:6OX5VXMuj2salqNBc41eXKz6K+nV6OB/hhlGnAKCbwU= +github.com/newrelic/newrelic-telemetry-sdk-go v0.8.1/go.mod h1:2kY6OeOxrJ+RIQlVjWDc/pZlT3MIf30prs6drzMfJ6E= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nsqio/go-nsq v1.0.7 h1:O0pIZJYTf+x7cZBA0UMY8WxFG79lYTURmWzAAh48ljY= -github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito= +github.com/nishanths/exhaustive v0.1.0/go.mod h1:S1j9110vxV1ECdCudXRkeMnFQ/DQk9ajLT0Uf2MYZQQ= +github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= +github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= +github.com/nsqio/go-nsq v1.1.0 h1:PQg+xxiUjA7V+TLdXw7nVrJ5Jbl3sN86EhGCQj4+FYE= +github.com/nsqio/go-nsq v1.1.0/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= +github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= +github.com/olivere/elastic v6.2.35+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= +github.com/olivere/elastic v6.2.37+incompatible h1:UfSGJem5czY+x/LqxgeCBgjDn6St+z8OnsCuxwD3L0U= +github.com/olivere/elastic v6.2.37+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8= +github.com/olivere/elastic/v7 v7.0.12/go.mod h1:14rWX28Pnh3qCKYRVnSGXWLf9MbLonYS/4FDCY3LAPo= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 h1:lXQqyLroROhwR2Yq/kXbLzVecgmVeZh2TFLg6OxCd+w= -github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ= +github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/openconfig/gnmi v0.0.0-20200414194230-1597cc0f2600/go.mod h1:M/EcuapNQgvzxo1DDXHK4tx3QpYM/uG4l591v33jG2A= +github.com/openconfig/gnmi v0.0.0-20200508230933-d19cebf5e7be/go.mod h1:M/EcuapNQgvzxo1DDXHK4tx3QpYM/uG4l591v33jG2A= +github.com/openconfig/gnmi v0.0.0-20200617225440-d2b4e6a45802 h1:WXFwJlWOJINlwlyAZuNo4GdYZS6qPX36+rRUncLmN8Q= +github.com/openconfig/gnmi v0.0.0-20200617225440-d2b4e6a45802/go.mod h1:M/EcuapNQgvzxo1DDXHK4tx3QpYM/uG4l591v33jG2A= +github.com/openconfig/goyang v0.0.0-20200115183954-d0a48929f0ea/go.mod h1:dhXaV0JgHJzdrHi2l+w0fZrwArtXL7jEFoiqLEdmkvU= +github.com/openconfig/goyang v0.2.2/go.mod h1:vX61x01Q46AzbZUzG617vWqh/cB+aisc+RrNkXRd3W8= +github.com/openconfig/goyang v0.2.3/go.mod h1:vX61x01Q46AzbZUzG617vWqh/cB+aisc+RrNkXRd3W8= +github.com/openconfig/ygot v0.6.0/go.mod h1:o30svNf7O0xK+R35tlx95odkDmZWS9JyWWQSmIhqwAs= +github.com/openconfig/ygot v0.9.0/go.mod h1:oCQNdXnv7dWc8scTDgoFkauv1wwplJn5HspHcjlxSAQ= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.1.2 h1:2VSZwLx5k/BfsBxMMipG/LYUnmqOD/BPkIVgQUcTlLw= +github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= +github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= +github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin/zipkin-go v0.1.6 h1:yXiysv1CSK7Q5yjGy1710zZGnsbMUIjluWBxtLXHPBo= +github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.1-0.20190913142402-a7454ce5950e/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5 h1:ZCnq+JUrvXcDVhX/xRolRBZifmabN1HcS1wrPSvxhrU= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go-opentracing v0.3.4 h1:x/pBv/5VJNWkcHF1G9xqhug8Iw7X1y1zOMzDmyuvP2g= -github.com/openzipkin/zipkin-go-opentracing v0.3.4/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= -github.com/orcaman/concurrent-map v0.0.0-20210106121528-16402b402231 h1:fa50YL1pzKW+1SsBnJDOHppJN9stOEwS+CRWyUtyYGU= -github.com/orcaman/concurrent-map v0.0.0-20210106121528-16402b402231/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.5 h1:UwtQQx2pyPIgWYHRg+epgdx1/HnBQTgN3/oIYEJTQzU= +github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= +github.com/ory/go-acc v0.2.6/go.mod h1:4Kb/UnPcT8qRAk3IAxta+hvVapdxTLWtrr7bFLlEgpw= +github.com/ory/viper v1.7.5/go.mod h1:ypOuyJmEUb3oENywQZRgeAMwqgOyDqwboO1tj3DjTaM= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= +github.com/pavius/impi v0.0.3/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= +github.com/pborman/ansi v1.0.0 h1:OqjHMhvlSuCCV5JT07yqPuJPQzQl+WXsiZ14gZsqOrQ= +github.com/pborman/ansi v1.0.0/go.mod h1:SgWzwMAx1X/Ez7i90VqF8LRiQtx52pWDiQP+x3iGnzw= +github.com/pborman/getopt v0.0.0-20190409184431-ee0cd42419d3/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= +github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= +github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI= -github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4/v4 v4.0.3/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.1/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.14 h1:+fL8AQEZtz/ijeNnpduH0bROTu0O3NZAlPjQxGn8LwE= +github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pion/dtls/v2 v2.0.13 h1:toLgXzq42/MEmfgkXDfzdnwLHMi4tfycaQPGkv9tzRE= +github.com/pion/dtls/v2 v2.0.13/go.mod h1:OaE7eTM+ppaUhJ99OTO4aHl9uY6vPrT1gPY27uNTxRY= +github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= +github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/transport v0.12.2/go.mod h1:N3+vZQD9HlDP5GWkZ85LohxNsDcNgofQmyL6ojX5d8Q= +github.com/pion/transport v0.13.0 h1:KWTA5ZrQogizzYwPEciGtHPLwpAjE91FgXnyu+Hv2uY= +github.com/pion/transport v0.13.0/go.mod h1:yxm9uXpK9bpBBWkITk13cLo1y5/ur5VQpG22ny6EP7g= +github.com/pion/udp v0.1.1 h1:8UAPvyqmsxK8oOjloDk4wUt63TzFe9WEJkg5lChlj7o= +github.com/pion/udp v0.1.1/go.mod h1:6AFo+CMdKQm7UiA0eUPA8/eVCTx8jBIITLZHc9DWX5M= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= +github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go= +github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg= +github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polyfloyd/go-errorlint v0.0.0-20201127212506-19bd8db6546f/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= +github.com/prometheus/common v0.25.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2 h1:AHi2TGs09Mv4v688/bjcY2PfAcu9+p4aPvsgVQ4nYDk= +github.com/prometheus/prometheus v1.8.2-0.20210430082741-2a4b8e12bbf2/go.mod h1:5aBj+GpLB+V5MCnrKm5+JAqEJwzDiLugOmDhgt7sDec= +github.com/prometheus/statsd_exporter v0.20.0/go.mod h1:YL3FWCG8JBBtaUSxAg4Gz2ZYu22bS84XM89ZQXXTWmQ= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= +github.com/quasilyte/go-ruleguard v0.3.0/go.mod h1:p2miAhLp6fERzFNbcuQ4bevXs8rgK//uCHsUDkumITg= +github.com/quasilyte/go-ruleguard/dsl v0.0.0-20210106184943-e47d54850b18/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.0.0-20210115110123-c73ee1cbff1f/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= +github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/rabbitmq/amqp091-go v1.3.4 h1:tXuIslN1nhDqs2t6Jrz3BAoqvt4qIZzxvdbdcxWtHYU= +github.com/rabbitmq/amqp091-go v1.3.4/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk= +github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= +github.com/riemann/riemann-go-client v0.5.1-0.20211206220514-f58f10cdce16 h1:bGXoxRwUpPTCaQ86DRE+3wqE9vh3aH8W0HH5L/ygOFM= +github.com/riemann/riemann-go-client v0.5.1-0.20211206220514-f58f10cdce16/go.mod h1:4rS0vfmzOMwfFPhi6Zve4k/59TsBepqd6WESNULE0ho= +github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff h1:+6NUiITWwE5q1KO6SAfUX918c+Tab0+tGAM/mtdlUyA= +github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff/go.mod h1:xvqspoSXJTIpemEonrMDFq6XzwHYYgToXWj5eRX1OtY= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.6.2 h1:aIihoIOHCiLZHxyoNQ+ABL4NKhFTgKLBdMLyEAh98m0= +github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ryancurrah/gomodguard v1.2.0/go.mod h1:rNqbC4TOIdUDcVMSIpNNAzTbzXAZa6W5lnUepvuMMgQ= +github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec h1:6ncX5ko6B9LntYM0YBRXkiSaZMmLYeZ/NWcmeB43mMY= -github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4wtNWrlj6kiTbkPt2F3rbYnhGX6TWLfco= +github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= +github.com/sanposhiho/wastedassign v0.1.3/go.mod h1:LGpq5Hsv74QaqM47WtIsRSF/ik9kqk07kchgv66tLVE= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shirou/gopsutil v2.20.9+incompatible h1:msXs2frUV+O/JLva9EDLpuJ84PrFsdCTCQex8PUdtkQ= -github.com/shirou/gopsutil v2.20.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= -github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/securego/gosec v0.0.0-20200203094520-d13bb6d2420c/go.mod h1:gp0gaHj0WlmPh9BdsTmo1aq6C27yIPWdxCKGFGdVKBE= +github.com/securego/gosec/v2 v2.6.1/go.mod h1:I76p3NTHBXsGhybUW+cEQ692q2Vp+A0Z6ZLzDIZy+Ao= +github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/sensu/sensu-go/api/core/v2 v2.14.0 h1:z4JVqy7z6iFgMDUH0uc1Ns0bqLFKTpc5bi4Iw7qweks= +github.com/sensu/sensu-go/api/core/v2 v2.14.0/go.mod h1:XCgUjY78ApTahizBz/pkc5KU17L/E5BexeZHkGDdTls= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= +github.com/shirou/gopsutil v3.21.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil/v3 v3.21.1/go.mod h1:igHnfak0qnw1biGeI2qKQvu0ZkwvEkUcCLlYhZzdr/4= +github.com/shirou/gopsutil/v3 v3.21.9/go.mod h1:YWp/H8Qs5fVmf17v7JNZzA0mPJ+mS2e9JdiUF9LlKzQ= +github.com/shirou/gopsutil/v3 v3.22.4 h1:srAQaiX6jX/cYL6q29aE0m8lOskT9CurZ9N61YR3yoI= +github.com/shirou/gopsutil/v3 v3.22.4/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/showwin/speedtest-go v1.1.5 h1:dud1cS2Qppbm50BrTKzrBj78wY78ORL5LIiRjKexmdY= +github.com/showwin/speedtest-go v1.1.5/go.mod h1:dJugxvC/AQDt4HQQKZ9lKNa2+b1c8nzj9IL0a/F8l1U= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2 h1:X886QgwZH5qr9HIQkk3mWcNEhUxx6D8rUZumzLV4Wiw= +github.com/signalfx/com_signalfx_metrics_protobuf v0.0.2/go.mod h1:tCQQqyJAVF1+mxNdqOi18sS/zaSrE6EMyWwRA2QTl70= +github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083 h1:WsShHmu12ZztYPfh9b+I+VjYD1o8iOHhB67WZCMEEE8= +github.com/signalfx/gohistogram v0.0.0-20160107210732-1ccfd2ff5083/go.mod h1:adPDS6s7WaajdFBV9mQ7i0dKfQ8xiDnF9ZNETVPpp7c= +github.com/signalfx/golib/v3 v3.3.43 h1:GvzjE2WaYU3oPhoek52/5zYZ5tPnt05EXUmszSZct+E= +github.com/signalfx/golib/v3 v3.3.43/go.mod h1:LR8eTSda7NzynOqe0ibvV63OuqorWcHDtRCY22zTpKg= +github.com/signalfx/gomemcache v0.0.0-20180823214636-4f7ef64c72a9/go.mod h1:Ytb8KfCSyuwy/VILnROdgCvbQLA5ch0nkbG7lKT0BXw= +github.com/signalfx/sapm-proto v0.7.2 h1:iM/y3gezQm1/j7JBS0gXhEJ8ROeneb6DY7n0OcnvLks= +github.com/signalfx/sapm-proto v0.7.2/go.mod h1:HLufOh6Gd2altGxbeve+s6hh0EWCWoOM7MmuYuvs5PI= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/soniah/gosnmp v1.25.0 h1:0y8vpjD07NPmnT+wojnUrKkYLX9Fxw1jI4cGTumWugQ= -github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5QeUzVuQ= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.0/go.mod h1:4GuYW9TZmE769R5STWrRakJc4UqQ3+QQ95fyz7ENv1A= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sleepinggenius2/gosmi v0.4.4 h1:xgu+Mt7CptuB10IPt3SVXBAA9tARToT4B9xGzjjxQX8= +github.com/sleepinggenius2/gosmi v0.4.4/go.mod h1:l8OniPmd3bJzw0MXP2/qh7AhP/e+bTY2CNivIhsnDT0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= +github.com/smartystreets/gunit v1.1.3/go.mod h1:EH5qMBab2UclzXUcpR8b93eHsIlp9u+pDQIRp5DZNzQ= +github.com/snowflakedb/gosnowflake v1.6.2 h1:drZkX7Ve3qr3lLD/f0vxwesgJZfNerivknAvPRAMy88= +github.com/snowflakedb/gosnowflake v1.6.2/go.mod h1:k1Wq+O8dRD/jmFBLyStEv2OrgHoMFQpqHCRSy70P0dI= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.4.1/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 h1:l6epF6yBwuejBfhGkM5m8VSNM/QAm7ApGyH35ehA7eQ= -github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8/go.mod h1:1WNBiOZtZQLpVAyu0iTduoJL9hEsMloAK5XWrtW0xdY= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.4 h1:wZRexSlwd7ZXfKINDLsO4r7WBt3gTKONc6K/VesHvHM= +github.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOsk3ij21QjjEgAcVSeo9nkp0dI//cD2o= github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= -github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 h1:mujcChM89zOHwgZBBNr5WZ77mBXP1yR+gLThGCYZgAg= -github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= -github.com/tidwall/gjson v1.6.0 h1:9VEQWz6LLMUsUl6PueE49ir4Ka6CzLymOAZDxpFsTDc= -github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= -github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= -github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= +github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= +github.com/testcontainers/testcontainers-go v0.12.0 h1:SK0NryGHIx7aifF6YqReORL18aGAA4bsDPtikDVCEyg= +github.com/testcontainers/testcontainers-go v0.12.0/go.mod h1:SIndOQXZng0IW8iWU1Js0ynrfZ8xcxrTtDfF6rD2pxs= +github.com/tetafro/godot v1.4.4/go.mod h1:FVDd4JuKliW3UgjswZfJfHq4vAx0bD/Jd5brJjGeaz4= +github.com/tidwall/gjson v1.14.1 h1:iymTbGkQBhveq21bEvAQ81I0LEBork8BFe1CUZXdyuo= +github.com/tidwall/gjson v1.14.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tinylib/msgp v1.1.6 h1:i+SbKraHhnrf9M5MYmvQhFnbLhAXSDWF8WWsuyRdocw= +github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= +github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= +github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= +github.com/tj/go-buffer v1.0.1/go.mod h1:iyiJpfFcR2B9sXu7KvjbT9fpM4mOelRSDTbntVj52Uc= +github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= +github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= +github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= +github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= +github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= +github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= +github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= +github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= +github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= +github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= +github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tomarrell/wrapcheck v0.0.0-20201130113247-1683564d9756/go.mod h1:yiFB6fFoV7saXirUGfuK+cPtUh4NX/Hf5y2WC2lehu0= +github.com/tommy-muehle/go-mnd/v2 v2.3.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e h1:f1yevOHP+Suqk0rVc13fIkzcLULJbyQcXDba2klljD0= -github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc h1:R83G5ikgLMxrBvLh22JhdfI8K6YXEPHx5P03Uu3DRs4= +github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.29.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= +github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= +github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/vapourismo/knx-go v0.0.0-20211128234507-8198fa17db36 h1:JBj2CqnFwBhI3XsdMNn9MjKvehog+p5QZihotqq0Zuo= +github.com/vapourismo/knx-go v0.0.0-20211128234507-8198fa17db36/go.mod h1:AslkIOXnEbVmvzc8uqDjm8ZyIqNJcEPiFRqlokmqr2o= +github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vektra/mockery v0.0.0-20181123154057-e78b021dcbb5/go.mod h1:ppEjwdhyy7Y31EnHRDm1JkChoC7LXIJ7Ex0VYLWtZtQ= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= +github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vjeantet/grok v1.0.0 h1:uxMqatJP6MOFXsj6C1tZBnqqAThQEeqnizUZ48gSJQQ= -github.com/vjeantet/grok v1.0.0/go.mod h1:/FWYEVYekkm+2VjcFmO9PufDU5FgXHUz9oy2EGqmQBo= -github.com/vmware/govmomi v0.19.0 h1:CR6tEByWCPOnRoRyhLzuHaU+6o2ybF3qufNRWS/MGrY= -github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/wavefronthq/wavefront-sdk-go v0.9.2 h1:/LvWgZYNjHFUg+ZUX+qv+7e+M8sEMi0lM15zPp681Gk= -github.com/wavefronthq/wavefront-sdk-go v0.9.2/go.mod h1:hQI6y8M9OtTCtc0xdwh+dCER4osxXdEAeCpacjpDZEU= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg= +github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4= +github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo= +github.com/vmware/govmomi v0.28.0 h1:VgeQ/Rvz79U9G8QIKLdgpsN9AndHJL+5iMJLgYIrBGI= +github.com/vmware/govmomi v0.28.0/go.mod h1:F7adsVewLNHsW/IIm7ziFURaXDaHEwcc+ym4r3INMdY= +github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM= +github.com/wavefronthq/wavefront-sdk-go v0.9.11 h1:3qv/yyNNyLKPQftQWFrfHGUv50e/gMxKlUQnILlvHKw= +github.com/wavefronthq/wavefront-sdk-go v0.9.11/go.mod h1:AcW8zJJcYodB7B9KYzoxVH6K0fmYd6MgpmXE1LMo+OU= +github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf h1:TOV5PC6fIWwFOFra9xJfRXZcL2pLhMI8oNuDugNxg9Q= github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf/go.mod h1:nxx7XRXbR9ykhnC8lXqQyJS0rfvJGxKyKw/sT1YOttg= github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a h1:ILoU84rj4AQ3q6cjQvtb9jBjx4xzR/Riq/zYhmDQiOk= github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a/go.mod h1:vQQATAGxVK20DC1rRubTJbZDDhhpA4QfU02pMdPxGO4= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/scram v1.1.1 h1:VOMT+81stJgXW3CpHyqHN3AXDYIMsx56mEFrB37Mb/E= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= +github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/scram v1.0.5 h1:TuS0RFmt5Is5qm9Tm2SoD89OPqe4IRiFtyFY4iwWXsw= +github.com/xdg/scram v1.0.5/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/yuin/goldmark v1.1.25 h1:isv+Q6HQAmmL2Ofcmg8QauBmDPlUUnSoNhEcC940Rds= +github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4= +github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 h1:f6CCNiTjQZ0uWK4jPwhwYB8QIGGfn0ssD9kVzRUUUpk= -github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= -go.opencensus.io v0.20.1 h1:pMEjRZ1M4ebWGikflH7nQpV6+Zr88KBMA2XJD3sbijw= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1 h1:/vn0k+RBvwlxEmP5E7SZMqNxPhfMVFEJiykr15/0XKM= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= +github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da h1:NimzV1aGyq29m5ukMK0AMWEhFaL/lrEOaephfuoiARg= +github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= +github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +go.mongodb.org/mongo-driver v1.5.2/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +go.mongodb.org/mongo-driver v1.9.1 h1:m078y9v7sBItkt1aaoe2YlvWEXcD263e1a4E1fBrJ1c= +go.mongodb.org/mongo-driver v1.9.1/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.starlark.net v0.0.0-20200901195727-6e684ef5eeee h1:N4eRtIIYHZE5Mw/Km/orb+naLdwAe+lv2HCxRR5rEBw= -go.starlark.net v0.0.0-20200901195727-6e684ef5eeee/go.mod h1:f0znQkUKRrkk36XxWbGjMqQM8wGv/xHBVE2qc3B5oFU= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/collector v0.28.0/go.mod h1:AP/BTXwo1eedoJO7V+HQ68CSvJU1lcdqOzJCgt1VsNs= +go.opentelemetry.io/collector/pdata v0.54.0 h1:oo3HyHwdf4lJmDUN0yrOGKj2tiHIoXDutDd0HKR++/0= +go.opentelemetry.io/collector/pdata v0.54.0/go.mod h1:1nSelv/YqGwdHHaIKNW9ZOHSMqicDX7W4/7TjNCm6N8= +go.opentelemetry.io/otel v0.7.0/go.mod h1:aZMyHG5TqDOXEgH2tyLiXSUKly1jT3yqE9PmrzIeCdo= +go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOUXMTQQ= +go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM= +go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.6.0 h1:XFcfoo+vwXXwopiS7vzwbaFuPplf5GB+WTjaiQXmz3U= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.6.0/go.mod h1:NEu79Xo32iVb+0gVNV8PMd7GoWqnyDXRlj04yFjqz40= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.28.0 h1:Z/6EfhHQ1vNQLWM2JWv//1lwa3x6xs4Kg3ooX3+ygMg= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.28.0/go.mod h1:H9M+CbJBE0w06C1WfSbhwTW1t/irNU1NPoOwLqbsYdo= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.28.0 h1:xaOMF4Ka4QUM9iFnIIb35ihDajSWZmwv7X5Q8UnK/Pg= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.28.0/go.mod h1:++De7BFy/U/g2iIzRsVxXlbmll5kYLbYlPr+p5fMz28= +go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= +go.opentelemetry.io/otel/metric v0.30.0 h1:Hs8eQZ8aQgs0U49diZoaS6Uaxw3+bBE3lcMUKBFIk3c= +go.opentelemetry.io/otel/metric v0.30.0/go.mod h1:/ShZ7+TS4dHzDFmfi1kSXMhMVubNoP0oIaBp70J6UXU= +go.opentelemetry.io/otel/sdk v1.6.0 h1:JoriAoiNENuxxIQApR1O0k2h1Md5QegZhbentcRJpWk= +go.opentelemetry.io/otel/sdk v1.6.0/go.mod h1:PjLRUfDsoPy0zl7yrDGSUqjj43tL7rEtFdCEiGlxXRM= +go.opentelemetry.io/otel/sdk/metric v0.28.0 h1:+1ndwHSiknwZtC8VmXM3xtMsd6kbFxtqti4qevn2J+o= +go.opentelemetry.io/otel/sdk/metric v0.28.0/go.mod h1:DqJmT0ovBgoW6TJ8CAQyTnwxZPIp3KWtCiDDZ1uHAzU= +go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= +go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o= +go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.12.0 h1:CMJ/3Wp7iOWES+CYLfnBv+DVmPbB+kmy9PJ92XvlR6c= +go.opentelemetry.io/proto/otlp v0.12.0/go.mod h1:TsIjwGWIx5VFYv9KGVlOpxoBl5Dy+63SUguV7GGvlSQ= +go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd h1:Uo/x0Ir5vQJ+683GXB9Ug+4fcjsbp7z7Ul8UaZbhsRM= +go.starlark.net v0.0.0-20220328144851-d1966c6b9fcd/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 h1:+ELyKg6m8UBf0nPFSqD0mi7zUfwPyXo23HNjMnXPz7w= -golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2 h1:y102fOLFqhV41b+4GPiJoa0k/x+pJcEi2/HB1Y5T6fU= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/exp v0.0.0-20200513190911-00229845015e h1:rMqLP+9XLy+LdbCXHjJHAmTfXCr93W7oruWA6Hq1Alc= +golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -718,135 +2553,414 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20150829230318-ea47fc708ee3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= +golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201201195509-5d6afe98e0b7/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201216054612-986b41b23924/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210324051636-2c4c8ecb7826/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210427231257-85d9c07bbe3a/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210928044308-7d9f5e0b762b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211108170745-6635138e15ea/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211111083644-e5c967477495/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211201190559-0a0e4e1bb54c/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220622184535-263ec571b305 h1:dAgbJ2SP4jD6XYfMNLVj0BF21jo2PjChrtGaAvF5M3I= +golang.org/x/net v0.0.0-20220622184535-263ec571b305/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210323180902-22b0adad7558/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb h1:8tDJ3aechhddbdPAxpycgXHJRMLpk/Ab+aa4OgdN5/g= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6 h1:DvY3Zkh7KabQE/kfzMvYvKirSiguP9Q/veMtkYyf0o8= -golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201024232916-9f70ab9862d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201118182958-a01c418693c7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210110051926-789bb1bd4061/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210123111255-9b0068b26619/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210216163648-f7da38b97c65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210314195730-07df6a141424/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211109184856-51b60fd695b3/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211214234402-4825e8c3871d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664 h1:wEZYwx+kK+KlZ0hpvP2Ls1Xr4+RWnlzGFwPP0aiDjIU= +golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181008205924-a2b3f7f249e9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181112210238-4b1f3b6b1646/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -854,33 +2968,105 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200203023011-6f24f261dadb/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200317043434-63da46f3035e h1:8ogAbHWoJTPepnVbNRqXLOpzMkl0rtRsM7crbflc4XM= -golang.org/x/tools v0.0.0-20200317043434-63da46f3035e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201011145850-ed2f50202694/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210102185154-773b96fafca2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= +golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.11 h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY= +golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.zx2c4.com/wireguard v0.0.20200121 h1:vcswa5Q6f+sylDfjqyrVNNrjsFUUbPsgAQTBCAg/Qf8= -golang.zx2c4.com/wireguard v0.0.20200121/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4= -golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 h1:KTi97NIQGgSMaN0v/oxniJV0MEzfzmrDUOAWxombQVc= -golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:UdS9frhv65KTfwxME1xE8+rHYoFpbm36gOud1GhBe9c= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.zx2c4.com/go118/netip v0.0.0-20211111135330-a4a02eeacf9d/go.mod h1:5yyfuiqVIJ7t+3MqrpTQ+QqRkMWiESiyDvPNvKYCecg= +golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI= +golang.zx2c4.com/wireguard v0.0.0-20211129173154-2dd424e2d808/go.mod h1:TjUWrnD5ATh7bFvmm/ALEJZQ4ivKbETb6pmyj1vUoNI= +golang.zx2c4.com/wireguard v0.0.0-20211209221555-9c9e7e272434 h1:3zl8RkJNQ8wfPRomwv/6DBbH2Ut6dgMaWTxM0ZunWnE= +golang.zx2c4.com/wireguard v0.0.0-20211209221555-9c9e7e272434/go.mod h1:TjUWrnD5ATh7bFvmm/ALEJZQ4ivKbETb6pmyj1vUoNI= +golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211230205640-daad0b7ba671 h1:tJAYx7pB6b5bNqi7XatStqFT2zFAxhXcGDq1R6FqqjU= +golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211230205640-daad0b7ba671/go.mod h1:Q2XNgour4QSkFj0BWCkVlW0HWJwQgNMsMahpSlI0Eno= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.6.2 h1:4r+yNT0+8SWcOkXP+63H2zQbN+USnC73cjGUxnDF94Q= -gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= +gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0 h1:f1IJhK4Km5tBJmaiJXtk/PkL4cdVX6J+tGiM187uT5E= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b h1:Qh4dB5D/WpoUUp3lSod7qgoyEHbDGPUWjIbnqdqqe1k= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -890,142 +3076,396 @@ google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.42.0/go.mod h1:+Oj4s6ch2SEGtPjGqfUfZonBH0GjQH89gTeKKAEGZKI= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0 h1:NMB9J4cCxs9xEm+1Z9QiO3eFvn7EnQj3Eo3hN6ugVlg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191009194640-548a555dbc03/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24 h1:IGPykv426z7LZSVPlaPufOyphngM4at5uZ7x5alaFvE= -google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200519141106-08726f379972/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210312152112-fc591d9ea70f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220623142657-077d458a5694 h1:itnFmgk4Ls5nT+mYO2ZK6F0DpKsGZLhB5BB9y5ZL2HA= +google.golang.org/genproto v0.0.0-20220623142657-077d458a5694/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= -gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v1 v1.0.0/go.mod h1:CxwszS/Xz1C49Ucd2i6Zil5UToP1EmyrFhKaMVbg1mk= +gopkg.in/errgo.v1 v1.0.1 h1:oQFRXzZ7CkBGdm1XZm/EbQYaYNNEElNBOd09M6cqNso= +gopkg.in/errgo.v1 v1.0.1/go.mod h1:3NjfXwocQRYAPTq4/fzX+CwUhPRcR/azYRhj8G+LqMo= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg= gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY= gopkg.in/fsnotify.v1 v1.2.1/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y2O4MU= gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I= +gopkg.in/httprequest.v1 v1.2.0/go.mod h1:T61ZUaJLpMnzvoJDO03ZD8yRXD4nZzBeDoW5e9sffjg= +gopkg.in/httprequest.v1 v1.2.1 h1:pEPLMdF/gjWHnKxLpuCYaHFjc8vAB2wrYjXrqDVC16E= +gopkg.in/httprequest.v1 v1.2.1/go.mod h1:x2Otw96yda5+8+6ZeWwHIJTFkEHWP/qP8pJOzqEtWPM= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.52.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= +gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= -gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= -gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= -gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg= -gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= -gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= +gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= -gopkg.in/ldap.v3 v3.1.0 h1:DIDWEjI7vQWREh0S8X5/NFPCZ3MCVd55LmXKPW4XLGE= -gopkg.in/ldap.v3 v3.1.0/go.mod h1:dQjCc0R0kfyFjIlWNMH1DORwUASZyDxo2Ry1B51dXaQ= -gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU= +gopkg.in/juju/environschema.v1 v1.0.0/go.mod h1:WTgU3KXKCVoO9bMmG/4KHzoaRvLeoxfjArpgd1MGWFA= +gopkg.in/macaroon-bakery.v2 v2.3.0 h1:b40knPgPTke1QLTE8BSYeH7+R/hiIozB1A8CTLYN0Ic= +gopkg.in/macaroon-bakery.v2 v2.3.0/go.mod h1:/8YhtPARXeRzbpEPLmRB66+gQE8/pzBBkWwg7Vz/guc= +gopkg.in/macaroon.v2 v2.1.0 h1:HZcsjBCzq9t0eBPMKqTN/uSN6JOm78ZJ2INbqcBQOUI= +gopkg.in/macaroon.v2 v2.1.0/go.mod h1:OUb+TQP/OP0WOerC2Jp/3CwhIKyIa9kQjuc7H24e6/o= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= -gopkg.in/olivere/elastic.v5 v5.0.70 h1:DqFG2Odzs74JCz6SssgJjd6qpGnsOAzNc7+l5EnvsnE= -gopkg.in/olivere/elastic.v5 v5.0.70/go.mod h1:FylZT6jQWtfHsicejzOm3jIMVPOAksa80i3o+6qtQRk= +gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw= +gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/olivere/elastic.v5 v5.0.86 h1:xFy6qRCGAmo5Wjx96srho9BitLhZl2fcnpuidPwduXM= +gopkg.in/olivere/elastic.v5 v5.0.86/go.mod h1:M3WNlsF+WhYn7api4D87NIflwTV/c0iVs8cqfWhK+68= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/sourcemap.v1 v1.0.5 h1:inv58fC9f9J3TK2Y2R1NPntXEn3/wjWHkonhIUODNTI= +gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20140529071818-c131134a1947/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 h1:yiW+nvdHb9LVqSHQBXfZCieqV4fzYhNBql77zY0ykqs= +gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -honnef.co/go/netdb v0.0.0-20150201073656-a416d700ae39 h1:Oyv66NRkI9fnsvTlaB9foJojt8Lt34vcX8SMNqsvw6U= -honnef.co/go/netdb v0.0.0-20150201073656-a416d700ae39/go.mod h1:rbNo0ST5hSazCG4rGfpHrwnwvzP1QX62WbhzD+ghGzs= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/apimachinery v0.17.1 h1:zUjS3szTxoUjTDYNvdFkYt2uMEXLcthcbp+7uZvWhYM= -k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6 h1:4s3/R4+OYYYUKptXPhZKjQ04WJ6EhQQVFdjOFvCazDk= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.1/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +honnef.co/go/tools v0.1.2/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +honnef.co/go/tools v0.2.0/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= +honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= +honnef.co/go/tools v0.2.2 h1:MNh1AVMyVX23VUHE2O27jm6lNj3vjO5DexS4A1xvnzk= +honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= +k8s.io/api v0.24.1 h1:BjCMRDcyEYz03joa3K1+rbshwh1Ay6oB53+iUx2H8UY= +k8s.io/api v0.24.1/go.mod h1:JhoOvNiLXKTPQ60zh2g0ewpA+bnEYf5q44Flhquh4vQ= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/apimachinery v0.24.1 h1:ShD4aDxTQKN5zNf8K1RQ2u98ELLdIW7jEnlO9uAMX/I= +k8s.io/apimachinery v0.24.1/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= +k8s.io/client-go v0.24.1 h1:w1hNdI9PFrzu3OlovVeTnf4oHDt+FJLd9Ndluvnb42E= +k8s.io/client-go v0.24.1/go.mod h1:f1kIDqcEYmwXS/vTbbhopMUbhKp2JhOeVTfxgaCIlF8= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -modernc.org/httpfs v1.0.0 h1:LtuKNg6JMiaBKVQHKd6Phhvk+2GFp+pUcmDQgRjrds0= -modernc.org/httpfs v1.0.0/go.mod h1:BSkfoMUcahSijQD5J/Vu4UMOxzmEf5SNRwyXC4PJBEw= -modernc.org/libc v1.3.1 h1:ZAAaxQZtb94hXvlPMEQybXBLLxEtJlQtVfvLkKOPZ5w= -modernc.org/libc v1.3.1/go.mod h1:f8sp9GAfEyGYh3lsRIKtBh/XwACdFvGznxm6GJmQvXk= -modernc.org/mathutil v1.1.1 h1:FeylZSVX8S+58VsyJlkEj2bcpdytmp9MmDKZkKx8OIE= -modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.0.1 h1:bhVo78NAdgvRD4N+b2hGnAwL5RP2+QyiEJDsX3jpeDA= -modernc.org/memory v1.0.1/go.mod h1:NSjvC08+g3MLOpcAxQbdctcThAEX4YlJ20WWHYEhvRg= -modernc.org/sqlite v1.7.4 h1:pJVbc3NLKENbO1PJ3/uH+kDeuJiTShqc8eZarwANJgU= -modernc.org/sqlite v1.7.4/go.mod h1:xse4RHCm8Fzw0COf5SJqAyiDrVeDwAQthAS1V/woNIA= -modernc.org/tcl v1.4.1 h1:8ERwg+o+EFtrXmXDOVuGGmo+EkEh8Bkokb/ybI3kXPQ= -modernc.org/tcl v1.4.1/go.mod h1:8YCvzidU9SIwkz7RZwlCWK61mhV8X9UwfkRDRp7y5e0= -rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= +k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= +k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +lukechampine.com/uint128 v1.1.1 h1:pnxCASz787iMf+02ssImqk6OLt+Z5QHMoZyUXR4z6JU= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0 h1:0kmRkTmqNidmu3c7BNDSdVHCxXCkWLmWmCIVX4LUboo= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6 h1:3l18poV+iUemQ98O3X5OMr97LOqlzis+ytivU4NqGhA= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.7 h1:qzQtHhsZNpVPpeCu+aMIQldXeV1P0vRhSqCL0nOIJOA= +modernc.org/libc v1.16.7/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1 h1:bDOL0DIDLQv7bWhP3gMvIrnoFw+Eo6F7a2QK9HPDiFU= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/opt v0.1.1 h1:/0RX92k9vwVeDXj+Xn23DKp2VJubL7k8qNffND6qn3A= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.17.3 h1:iE+coC5g17LtByDYDWKpR6m2Z9022YrSh3bumwOnIrI= +modernc.org/sqlite v1.17.3/go.mod h1:10hPVYar9C0kfXuTWGz8s0XtB8uAGymUy51ZzStYe3k= +modernc.org/strutil v1.1.1 h1:xv+J1BXY3Opl2ALrBwyfEikFAj8pmqcpnfmuwUwcozs= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/tcl v1.13.1 h1:npxzTwFTZYM8ghWicVIX1cRWzj7Nd8i6AqqX2p+IYao= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1 h1:RTNHdsrOpeoSeOF4FbzTo8gBYByaJ5xT7NgZ9ZqRiJM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= +mvdan.cc/gofumpt v0.1.0/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= +pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= +pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e h1:4Z09Hglb792X0kfOBBJUPFEyvVfQWrYT/l8h5EKA6JQ= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= +sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/info.plist b/info.plist new file mode 100644 index 0000000000000..e1267df8c1788 --- /dev/null +++ b/info.plist @@ -0,0 +1,16 @@ + + + + + CFBundleExecutable + telegraf_entry_mac + CFBundleIconFile + icon.icns + CFBundleIdentifier + com.influxdata.telegraf + NSHighResolutionCapable + + LSUIElement + + + \ No newline at end of file diff --git a/internal/choice/choice.go b/internal/choice/choice.go index 33c26096ddfc1..5c178fa731730 100644 --- a/internal/choice/choice.go +++ b/internal/choice/choice.go @@ -14,7 +14,7 @@ func Contains(choice string, choices []string) bool { return false } -// CheckSContains returns an error if a choice is not one of +// Check returns an error if a choice is not one of // the available choices. func Check(choice string, available []string) error { if !Contains(choice, available) { @@ -23,7 +23,7 @@ func Check(choice string, available []string) error { return nil } -// CheckSliceContains returns an error if the choices is not a subset of +// CheckSlice returns an error if the choices is not a subset of // available. func CheckSlice(choices, available []string) error { for _, choice := range choices { diff --git a/internal/content_coding.go b/internal/content_coding.go index daefa20eea633..df572ecb0fd2e 100644 --- a/internal/content_coding.go +++ b/internal/content_coding.go @@ -4,6 +4,7 @@ import ( "bufio" "bytes" "compress/gzip" + "compress/zlib" "errors" "io" ) @@ -65,7 +66,6 @@ func (r *GzipReader) Read(b []byte) (int, error) { return n, nil } return n, err - } // NewContentEncoder returns a ContentEncoder for the encoding type. @@ -73,6 +73,8 @@ func NewContentEncoder(encoding string) (ContentEncoder, error) { switch encoding { case "gzip": return NewGzipEncoder() + case "zlib": + return NewZlibEncoder() case "identity", "": return NewIdentityEncoder(), nil default: @@ -85,6 +87,8 @@ func NewContentDecoder(encoding string) (ContentDecoder, error) { switch encoding { case "gzip": return NewGzipDecoder() + case "zlib": + return NewZlibDecoder() case "identity", "": return NewIdentityDecoder(), nil default: @@ -126,6 +130,34 @@ func (e *GzipEncoder) Encode(data []byte) ([]byte, error) { return e.buf.Bytes(), nil } +type ZlibEncoder struct { + writer *zlib.Writer + buf *bytes.Buffer +} + +func NewZlibEncoder() (*ZlibEncoder, error) { + var buf bytes.Buffer + return &ZlibEncoder{ + writer: zlib.NewWriter(&buf), + buf: &buf, + }, nil +} + +func (e *ZlibEncoder) Encode(data []byte) ([]byte, error) { + e.buf.Reset() + e.writer.Reset(e.buf) + + _, err := e.writer.Write(data) + if err != nil { + return nil, err + } + err = e.writer.Close() + if err != nil { + return nil, err + } + return e.buf.Bytes(), nil +} + // IdentityEncoder is a null encoder that applies no transformation. type IdentityEncoder struct{} @@ -170,6 +202,35 @@ func (d *GzipDecoder) Decode(data []byte) ([]byte, error) { return d.buf.Bytes(), nil } +type ZlibDecoder struct { + buf *bytes.Buffer +} + +func NewZlibDecoder() (*ZlibDecoder, error) { + return &ZlibDecoder{ + buf: new(bytes.Buffer), + }, nil +} + +func (d *ZlibDecoder) Decode(data []byte) ([]byte, error) { + d.buf.Reset() + + b := bytes.NewBuffer(data) + r, err := zlib.NewReader(b) + if err != nil { + return nil, err + } + _, err = io.Copy(d.buf, r) + if err != nil && err != io.EOF { + return nil, err + } + err = r.Close() + if err != nil { + return nil, err + } + return d.buf.Bytes(), nil +} + // IdentityDecoder is a null decoder that returns the input. type IdentityDecoder struct{} diff --git a/internal/content_coding_test.go b/internal/content_coding_test.go index 85496df59c5b6..72e4694f90d87 100644 --- a/internal/content_coding_test.go +++ b/internal/content_coding_test.go @@ -2,7 +2,7 @@ package internal import ( "bytes" - "io/ioutil" + "io" "testing" "github.com/stretchr/testify/require" @@ -46,6 +46,21 @@ func TestGzipReuse(t *testing.T) { require.Equal(t, "doody", string(actual)) } +func TestZlibEncodeDecode(t *testing.T) { + enc, err := NewZlibEncoder() + require.NoError(t, err) + dec, err := NewZlibDecoder() + require.NoError(t, err) + + payload, err := enc.Encode([]byte("howdy")) + require.NoError(t, err) + + actual, err := dec.Decode(payload) + require.NoError(t, err) + + require.Equal(t, "howdy", string(actual)) +} + func TestIdentityEncodeDecode(t *testing.T) { enc := NewIdentityEncoder() dec := NewIdentityDecoder() @@ -68,7 +83,7 @@ func TestStreamIdentityDecode(t *testing.T) { dec, err := NewStreamContentDecoder("identity", &r) require.NoError(t, err) - data, err := ioutil.ReadAll(dec) + data, err := io.ReadAll(dec) require.NoError(t, err) require.Equal(t, []byte("howdy"), data) diff --git a/internal/exec_unix.go b/internal/exec_unix.go index d41aae825d6d5..0f5d3fca037db 100644 --- a/internal/exec_unix.go +++ b/internal/exec_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package internal @@ -50,7 +51,7 @@ func WaitTimeout(c *exec.Cmd, timeout time.Duration) error { // If SIGTERM was sent then treat any process error as a timeout. if termSent { - return TimeoutErr + return ErrTimeout } // Otherwise there was an error unrelated to termination. diff --git a/internal/exec_windows.go b/internal/exec_windows.go index f010bdd96756b..708051dda3a2c 100644 --- a/internal/exec_windows.go +++ b/internal/exec_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package internal @@ -33,7 +34,7 @@ func WaitTimeout(c *exec.Cmd, timeout time.Duration) error { // If SIGTERM was sent then treat any process error as a timeout. if termSent { - return TimeoutErr + return ErrTimeout } // Otherwise there was an error unrelated to termination. diff --git a/internal/globpath/globpath.go b/internal/globpath/globpath.go index d4e7ffd8743bd..98b286d791d71 100644 --- a/internal/globpath/globpath.go +++ b/internal/globpath/globpath.go @@ -5,8 +5,8 @@ import ( "path/filepath" "strings" + "github.com/bmatcuk/doublestar/v3" "github.com/gobwas/glob" - "github.com/karrick/godirwalk" ) type GlobPath struct { @@ -45,42 +45,13 @@ func Compile(path string) (*GlobPath, error) { // If it's a static path, returns path. // All returned path will have the host platform separator. func (g *GlobPath) Match() []string { - if !g.hasMeta { - return []string{g.path} - } - if !g.HasSuperMeta { - files, _ := filepath.Glob(g.path) - return files - } - roots, err := filepath.Glob(g.rootGlob) - if err != nil { - return []string{} - } - out := []string{} - walkfn := func(path string, _ *godirwalk.Dirent) error { - if g.g.Match(path) { - out = append(out, path) - } - return nil + // This string replacement is for backwards compatibility support + // The original implementation allowed **.txt but the double star package requires **/**.txt + g.path = strings.ReplaceAll(g.path, "**/**", "**") + g.path = strings.ReplaceAll(g.path, "**", "**/**") - } - for _, root := range roots { - fileinfo, err := os.Stat(root) - if err != nil { - continue - } - if !fileinfo.IsDir() { - if g.MatchString(root) { - out = append(out, root) - } - continue - } - godirwalk.Walk(root, &godirwalk.Options{ - Callback: walkfn, - Unsorted: true, - }) - } - return out + files, _ := doublestar.Glob(g.path) + return files } // MatchString tests the path string against the glob. The path should contain @@ -113,10 +84,10 @@ func (g *GlobPath) GetRoots() []string { // hasMeta reports whether path contains any magic glob characters. func hasMeta(path string) bool { - return strings.IndexAny(path, "*?[") >= 0 + return strings.ContainsAny(path, "*?[") } // hasSuperMeta reports whether path contains any super magic glob characters (**). func hasSuperMeta(path string) bool { - return strings.Index(path, "**") >= 0 + return strings.Contains(path, "**") } diff --git a/internal/globpath/globpath_test.go b/internal/globpath/globpath_test.go index 60562d8f8f1ae..bc286bc75419e 100644 --- a/internal/globpath/globpath_test.go +++ b/internal/globpath/globpath_test.go @@ -1,53 +1,70 @@ +//go:build !windows +// +build !windows + +// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows +// https://github.com/influxdata/telegraf/issues/6248 + package globpath import ( + "os" + "path/filepath" "runtime" - "strings" "testing" "github.com/stretchr/testify/require" ) +var ( + testdataDir = getTestdataDir() +) + func TestCompileAndMatch(t *testing.T) { - dir := getTestdataDir() - // test super asterisk - g1, err := Compile(dir + "/**") - require.NoError(t, err) - // test single asterisk - g2, err := Compile(dir + "/*.log") - require.NoError(t, err) - // test no meta characters (file exists) - g3, err := Compile(dir + "/log1.log") - require.NoError(t, err) - // test file that doesn't exist - g4, err := Compile(dir + "/i_dont_exist.log") - require.NoError(t, err) - // test super asterisk that doesn't exist - g5, err := Compile(dir + "/dir_doesnt_exist/**") - require.NoError(t, err) + type test struct { + path string + matches int + } - matches := g1.Match() - require.Len(t, matches, 6) - matches = g2.Match() - require.Len(t, matches, 2) - matches = g3.Match() - require.Len(t, matches, 1) - matches = g4.Match() - require.Len(t, matches, 1) - matches = g5.Match() - require.Len(t, matches, 0) + tests := []test{ + //test super asterisk + {path: filepath.Join(testdataDir, "**"), matches: 7}, + // test single asterisk + {path: filepath.Join(testdataDir, "*.log"), matches: 3}, + // test no meta characters (file exists) + {path: filepath.Join(testdataDir, "log1.log"), matches: 1}, + // test file that doesn't exist + {path: filepath.Join(testdataDir, "i_dont_exist.log"), matches: 0}, + // test super asterisk that doesn't exist + {path: filepath.Join(testdataDir, "dir_doesnt_exist", "**"), matches: 0}, + // test exclamation mark creates non-matching list with a range + {path: filepath.Join(testdataDir, "log[!1-2]*"), matches: 1}, + // test caret creates non-matching list + {path: filepath.Join(testdataDir, "log[^1-2]*"), matches: 1}, + // test exclamation mark creates non-matching list without a range + {path: filepath.Join(testdataDir, "log[!2]*"), matches: 2}, + // test exclamation mark creates non-matching list without a range + {path: filepath.Join(testdataDir, "log\\[!*"), matches: 1}, + // test exclamation mark creates non-matching list without a range + {path: filepath.Join(testdataDir, "log\\[^*"), matches: 0}, + } + + for _, tc := range tests { + g, err := Compile(tc.path) + require.Nil(t, err) + matches := g.Match() + require.Len(t, matches, tc.matches) + } } func TestRootGlob(t *testing.T) { - dir := getTestdataDir() tests := []struct { input string output string }{ - {dir + "/**", dir + "/*"}, - {dir + "/nested?/**", dir + "/nested?/*"}, - {dir + "/ne**/nest*", dir + "/ne*"}, - {dir + "/nested?/*", ""}, + {filepath.Join(testdataDir, "**"), filepath.Join(testdataDir, "*")}, + {filepath.Join(testdataDir, "nested?", "**"), filepath.Join(testdataDir, "nested?", "*")}, + {filepath.Join(testdataDir, "ne**", "nest*"), filepath.Join(testdataDir, "ne*")}, + {filepath.Join(testdataDir, "nested?", "*"), ""}, } for _, test := range tests { @@ -57,26 +74,24 @@ func TestRootGlob(t *testing.T) { } func TestFindNestedTextFile(t *testing.T) { - dir := getTestdataDir() // test super asterisk - g1, err := Compile(dir + "/**.txt") + g1, err := Compile(filepath.Join(testdataDir, "**.txt")) require.NoError(t, err) matches := g1.Match() require.Len(t, matches, 1) } -func getTestdataDir() string { - _, filename, _, _ := runtime.Caller(1) - return strings.Replace(filename, "globpath_test.go", "testdata", 1) -} - func TestMatch_ErrPermission(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping Unix only test") + } + tests := []struct { input string expected []string }{ - {"/root/foo", []string{"/root/foo"}}, + {"/root/foo", []string(nil)}, {"/root/f*", []string(nil)}, } @@ -98,3 +113,13 @@ func TestWindowsSeparator(t *testing.T) { ok := glob.MatchString("testdata\\nested1") require.True(t, ok) } + +func getTestdataDir() string { + dir, err := os.Getwd() + if err != nil { + // if we cannot even establish the test directory, further progress is meaningless + panic(err) + } + + return filepath.Join(dir, "testdata") +} diff --git a/internal/globpath/testdata/log[!.log b/internal/globpath/testdata/log[!.log new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/internal/goplugin/noplugin.go b/internal/goplugin/noplugin.go index 23d8634c46520..65fcee418e388 100644 --- a/internal/goplugin/noplugin.go +++ b/internal/goplugin/noplugin.go @@ -1,9 +1,10 @@ +//go:build !goplugin // +build !goplugin package goplugin import "errors" -func LoadExternalPlugins(rootDir string) error { +func LoadExternalPlugins(_ string) error { return errors.New("go plugin support is not enabled") } diff --git a/internal/goplugin/plugin.go b/internal/goplugin/plugin.go index 7e58ec32e92c2..3af051833b6a7 100644 --- a/internal/goplugin/plugin.go +++ b/internal/goplugin/plugin.go @@ -1,3 +1,4 @@ +//go:build goplugin // +build goplugin package goplugin diff --git a/internal/http.go b/internal/http.go index 1c3dd49577557..7469b96506f14 100644 --- a/internal/http.go +++ b/internal/http.go @@ -37,7 +37,6 @@ func (h *basicAuthHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) if !ok || subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.username)) != 1 || subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.password)) != 1 { - rw.Header().Set("WWW-Authenticate", "Basic realm=\""+h.realm+"\"") h.onError(rw) http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) @@ -73,7 +72,6 @@ func (h *genericAuthHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request // Scheme checking authorization := req.Header.Get("Authorization") if subtle.ConstantTimeCompare([]byte(authorization), []byte(h.credentials)) != 1 { - h.onError(rw) http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) return @@ -88,24 +86,24 @@ type ErrorFunc func(rw http.ResponseWriter, code int) // IPRangeHandler returns a http handler that requires the remote address to be // in the specified network. -func IPRangeHandler(network []*net.IPNet, onError ErrorFunc) func(h http.Handler) http.Handler { +func IPRangeHandler(networks []*net.IPNet, onError ErrorFunc) func(h http.Handler) http.Handler { return func(h http.Handler) http.Handler { return &ipRangeHandler{ - network: network, - onError: onError, - next: h, + networks: networks, + onError: onError, + next: h, } } } type ipRangeHandler struct { - network []*net.IPNet - onError ErrorFunc - next http.Handler + networks []*net.IPNet + onError ErrorFunc + next http.Handler } func (h *ipRangeHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - if len(h.network) == 0 { + if len(h.networks) == 0 { h.next.ServeHTTP(rw, req) return } @@ -122,8 +120,8 @@ func (h *ipRangeHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { return } - for _, net := range h.network { - if net.Contains(remoteIP) { + for _, network := range h.networks { + if network.Contains(remoteIP) { h.next.ServeHTTP(rw, req) return } diff --git a/internal/internal.go b/internal/internal.go index 777128f667bf6..0c6cba5afca1e 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -2,7 +2,6 @@ package internal import ( "bufio" - "bytes" "compress/gzip" "context" "errors" @@ -19,37 +18,19 @@ import ( "syscall" "time" "unicode" - - "github.com/alecthomas/units" ) const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" var ( - TimeoutErr = errors.New("Command timed out.") - - NotImplementedError = errors.New("not implemented yet") - - VersionAlreadySetError = errors.New("version has already been set") + ErrTimeout = errors.New("command timed out") + ErrorNotImplemented = errors.New("not implemented yet") + ErrorVersionAlreadySet = errors.New("version has already been set") ) // Set via the main module var version string -// Duration just wraps time.Duration -type Duration struct { - Duration time.Duration -} - -// Size just wraps an int64 -type Size struct { - Size int64 -} - -type Number struct { - Value float64 -} - type ReadWaitCloser struct { pipeReader *io.PipeReader wg sync.WaitGroup @@ -58,9 +39,13 @@ type ReadWaitCloser struct { // SetVersion sets the telegraf agent version func SetVersion(v string) error { if version != "" { - return VersionAlreadySetError + return ErrorVersionAlreadySet } version = v + if version == "" { + version = "unknown" + } + return nil } @@ -75,72 +60,6 @@ func ProductToken() string { Version(), strings.TrimPrefix(runtime.Version(), "go")) } -// UnmarshalTOML parses the duration from the TOML config file -func (d *Duration) UnmarshalTOML(b []byte) error { - var err error - b = bytes.Trim(b, `'`) - - // see if we can directly convert it - d.Duration, err = time.ParseDuration(string(b)) - if err == nil { - return nil - } - - // Parse string duration, ie, "1s" - if uq, err := strconv.Unquote(string(b)); err == nil && len(uq) > 0 { - d.Duration, err = time.ParseDuration(uq) - if err == nil { - return nil - } - } - - // First try parsing as integer seconds - sI, err := strconv.ParseInt(string(b), 10, 64) - if err == nil { - d.Duration = time.Second * time.Duration(sI) - return nil - } - // Second try parsing as float seconds - sF, err := strconv.ParseFloat(string(b), 64) - if err == nil { - d.Duration = time.Second * time.Duration(sF) - return nil - } - - return nil -} - -func (s *Size) UnmarshalTOML(b []byte) error { - var err error - b = bytes.Trim(b, `'`) - - val, err := strconv.ParseInt(string(b), 10, 64) - if err == nil { - s.Size = val - return nil - } - uq, err := strconv.Unquote(string(b)) - if err != nil { - return err - } - val, err = units.ParseStrictBytes(uq) - if err != nil { - return err - } - s.Size = val - return nil -} - -func (n *Number) UnmarshalTOML(b []byte) error { - value, err := strconv.ParseFloat(string(b), 64) - if err != nil { - return err - } - - n.Value = value - return nil -} - // ReadLines reads contents from a file and splits them by new lines. // A convenience wrapper to ReadLinesOffsetN(filename, 0, -1). func ReadLines(filename string) ([]string, error) { @@ -266,7 +185,7 @@ func AlignTime(tm time.Time, interval time.Duration) time.Time { return truncated.Add(interval) } -// Exit status takes the error from exec.Command +// ExitStatus takes the error from exec.Command // and returns the exit status and true // if error is not exit status, will return 0 and false func ExitStatus(err error) (int, bool) { @@ -382,8 +301,25 @@ func parseComponents(timestamp interface{}) (int64, int64, error) { return 0, 0, err } return integer, 0, nil + case int8: + return int64(ts), 0, nil + case int16: + return int64(ts), 0, nil + case int32: + return int64(ts), 0, nil case int64: return ts, 0, nil + case uint8: + return int64(ts), 0, nil + case uint16: + return int64(ts), 0, nil + case uint32: + return int64(ts), 0, nil + case uint64: + return int64(ts), 0, nil + case float32: + integer, fractional := math.Modf(float64(ts)) + return int64(integer), int64(fractional * 1e9), nil case float64: integer, fractional := math.Modf(ts) return int64(integer), int64(fractional * 1e9), nil @@ -417,6 +353,36 @@ func parseTime(format string, timestamp interface{}, location string) (time.Time if err != nil { return time.Unix(0, 0), err } + switch strings.ToLower(format) { + case "ansic": + format = time.ANSIC + case "unixdate": + format = time.UnixDate + case "rubydate": + format = time.RubyDate + case "rfc822": + format = time.RFC822 + case "rfc822z": + format = time.RFC822Z + case "rfc850": + format = time.RFC850 + case "rfc1123": + format = time.RFC1123 + case "rfc1123z": + format = time.RFC1123Z + case "rfc3339": + format = time.RFC3339 + case "rfc3339nano": + format = time.RFC3339Nano + case "stamp": + format = time.Stamp + case "stampmilli": + format = time.StampMilli + case "stampmicro": + format = time.StampMicro + case "stampnano": + format = time.StampNano + } return time.ParseInLocation(format, ts, loc) default: return time.Unix(0, 0), errors.New("unsupported type") diff --git a/internal/internal_test.go b/internal/internal_test.go index 25f0503ba20a8..24fdb91bb2ebc 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -5,7 +5,6 @@ import ( "compress/gzip" "crypto/rand" "io" - "io/ioutil" "log" "os/exec" "regexp" @@ -46,15 +45,14 @@ func TestSnakeCase(t *testing.T) { } var ( - sleepbin, _ = exec.LookPath("sleep") + sleepbin, _ = exec.LookPath("sleep") //nolint:unused // Used in skipped tests echobin, _ = exec.LookPath("echo") shell, _ = exec.LookPath("sh") ) func TestRunTimeout(t *testing.T) { - if testing.Short() { - t.Skip("Skipping test due to random failures.") - } + t.Skip("Skipping test due to random failures & a data race when running test-all.") + if sleepbin == "" { t.Skip("'sleep' binary not available on OS, skipping.") } @@ -63,7 +61,7 @@ func TestRunTimeout(t *testing.T) { err := RunTimeout(cmd, time.Millisecond*20) elapsed := time.Since(start) - assert.Equal(t, TimeoutErr, err) + assert.Equal(t, ErrTimeout, err) // Verify that command gets killed in 20ms, with some breathing room assert.True(t, elapsed < time.Millisecond*75) } @@ -103,7 +101,7 @@ func TestCombinedOutputTimeout(t *testing.T) { _, err := CombinedOutputTimeout(cmd, time.Millisecond*20) elapsed := time.Since(start) - assert.Equal(t, TimeoutErr, err) + assert.Equal(t, ErrTimeout, err) // Verify that command gets killed in 20ms, with some breathing room assert.True(t, elapsed < time.Millisecond*75) } @@ -172,52 +170,6 @@ func TestRandomSleep(t *testing.T) { assert.True(t, elapsed < time.Millisecond*150) } -func TestDuration(t *testing.T) { - var d Duration - - d.UnmarshalTOML([]byte(`"1s"`)) - assert.Equal(t, time.Second, d.Duration) - - d = Duration{} - d.UnmarshalTOML([]byte(`1s`)) - assert.Equal(t, time.Second, d.Duration) - - d = Duration{} - d.UnmarshalTOML([]byte(`'1s'`)) - assert.Equal(t, time.Second, d.Duration) - - d = Duration{} - d.UnmarshalTOML([]byte(`10`)) - assert.Equal(t, 10*time.Second, d.Duration) - - d = Duration{} - d.UnmarshalTOML([]byte(`1.5`)) - assert.Equal(t, time.Second, d.Duration) -} - -func TestSize(t *testing.T) { - var s Size - - s.UnmarshalTOML([]byte(`"1B"`)) - assert.Equal(t, int64(1), s.Size) - - s = Size{} - s.UnmarshalTOML([]byte(`1`)) - assert.Equal(t, int64(1), s.Size) - - s = Size{} - s.UnmarshalTOML([]byte(`'1'`)) - assert.Equal(t, int64(1), s.Size) - - s = Size{} - s.UnmarshalTOML([]byte(`"1GB"`)) - assert.Equal(t, int64(1000*1000*1000), s.Size) - - s = Size{} - s.UnmarshalTOML([]byte(`"12GiB"`)) - assert.Equal(t, int64(12*1024*1024*1024), s.Size) -} - func TestCompressWithGzip(t *testing.T) { testData := "the quick brown fox jumps over the lazy dog" inputBuffer := bytes.NewBuffer([]byte(testData)) @@ -229,7 +181,7 @@ func TestCompressWithGzip(t *testing.T) { assert.NoError(t, err) defer gzipReader.Close() - output, err := ioutil.ReadAll(gzipReader) + output, err := io.ReadAll(gzipReader) assert.NoError(t, err) assert.Equal(t, testData, string(output)) @@ -250,7 +202,7 @@ func TestCompressWithGzipEarlyClose(t *testing.T) { rc, err := CompressWithGzip(mr) assert.NoError(t, err) - n, err := io.CopyN(ioutil.Discard, rc, 10000) + n, err := io.CopyN(io.Discard, rc, 10000) assert.NoError(t, err) assert.Equal(t, int64(10000), n) @@ -258,7 +210,7 @@ func TestCompressWithGzipEarlyClose(t *testing.T) { err = rc.Close() assert.NoError(t, err) - n, err = io.CopyN(ioutil.Discard, rc, 10000) + n, err = io.CopyN(io.Discard, rc, 10000) assert.Error(t, io.EOF, err) assert.Equal(t, int64(0), n) @@ -274,7 +226,7 @@ func TestVersionAlreadySet(t *testing.T) { err = SetVersion("bar") assert.Error(t, err) - assert.IsType(t, VersionAlreadySetError, err) + assert.IsType(t, ErrorVersionAlreadySet, err) assert.Equal(t, "foo", Version()) } @@ -368,9 +320,84 @@ func TestAlignTime(t *testing.T) { func TestParseTimestamp(t *testing.T) { rfc3339 := func(value string) time.Time { tm, err := time.Parse(time.RFC3339Nano, value) - if err != nil { - panic(err) - } + require.NoError(t, err) + return tm + } + ansic := func(value string) time.Time { + tm, err := time.Parse(time.ANSIC, value) + require.NoError(t, err) + return tm + } + + unixdate := func(value string) time.Time { + tm, err := time.Parse(time.UnixDate, value) + require.NoError(t, err) + return tm + } + + rubydate := func(value string) time.Time { + tm, err := time.Parse(time.RubyDate, value) + require.NoError(t, err) + return tm + } + + rfc822 := func(value string) time.Time { + tm, err := time.Parse(time.RFC822, value) + require.NoError(t, err) + return tm + } + + rfc822z := func(value string) time.Time { + tm, err := time.Parse(time.RFC822Z, value) + require.NoError(t, err) + return tm + } + + rfc850 := func(value string) time.Time { + tm, err := time.Parse(time.RFC850, value) + require.NoError(t, err) + return tm + } + + rfc1123 := func(value string) time.Time { + tm, err := time.Parse(time.RFC1123, value) + require.NoError(t, err) + return tm + } + + rfc1123z := func(value string) time.Time { + tm, err := time.Parse(time.RFC1123Z, value) + require.NoError(t, err) + return tm + } + + rfc3339nano := func(value string) time.Time { + tm, err := time.Parse(time.RFC3339Nano, value) + require.NoError(t, err) + return tm + } + + stamp := func(value string) time.Time { + tm, err := time.Parse(time.Stamp, value) + require.NoError(t, err) + return tm + } + + stampmilli := func(value string) time.Time { + tm, err := time.Parse(time.StampMilli, value) + require.NoError(t, err) + return tm + } + + stampmicro := func(value string) time.Time { + tm, err := time.Parse(time.StampMicro, value) + require.NoError(t, err) + return tm + } + + stampnano := func(value string) time.Time { + tm, err := time.Parse(time.StampNano, value) + require.NoError(t, err) return tm } @@ -468,6 +495,111 @@ func TestParseTimestamp(t *testing.T) { timestamp: "1568338208000000500", expected: rfc3339("2019-09-13T01:30:08.000000500Z"), }, + { + name: "rfc339 test", + format: "RFC3339", + timestamp: "2018-10-26T13:30:33Z", + expected: rfc3339("2018-10-26T13:30:33Z"), + }, + + { + name: "ANSIC", + format: "ANSIC", + timestamp: "Mon Jan 2 15:04:05 2006", + expected: ansic("Mon Jan 2 15:04:05 2006"), + }, + + { + name: "UnixDate", + format: "UnixDate", + timestamp: "Mon Jan 2 15:04:05 MST 2006", + expected: unixdate("Mon Jan 2 15:04:05 MST 2006"), + location: "Local", + }, + + { + name: "RubyDate", + format: "RubyDate", + timestamp: "Mon Jan 02 15:04:05 -0700 2006", + expected: rubydate("Mon Jan 02 15:04:05 -0700 2006"), + location: "Local", + }, + + { + name: "RFC822", + format: "RFC822", + timestamp: "02 Jan 06 15:04 MST", + expected: rfc822("02 Jan 06 15:04 MST"), + location: "Local", + }, + + { + name: "RFC822Z", + format: "RFC822Z", + timestamp: "02 Jan 06 15:04 -0700", + expected: rfc822z("02 Jan 06 15:04 -0700"), + location: "Local", + }, + + { + name: "RFC850", + format: "RFC850", + timestamp: "Monday, 02-Jan-06 15:04:05 MST", + expected: rfc850("Monday, 02-Jan-06 15:04:05 MST"), + location: "Local", + }, + + { + name: "RFC1123", + format: "RFC1123", + timestamp: "Mon, 02 Jan 2006 15:04:05 MST", + expected: rfc1123("Mon, 02 Jan 2006 15:04:05 MST"), + location: "Local", + }, + + { + name: "RFC1123Z", + format: "RFC1123Z", + timestamp: "Mon, 02 Jan 2006 15:04:05 -0700", + expected: rfc1123z("Mon, 02 Jan 2006 15:04:05 -0700"), + location: "Local", + }, + + { + name: "RFC3339Nano", + format: "RFC3339Nano", + timestamp: "2006-01-02T15:04:05.999999999-07:00", + expected: rfc3339nano("2006-01-02T15:04:05.999999999-07:00"), + location: "Local", + }, + + { + name: "Stamp", + format: "Stamp", + timestamp: "Jan 2 15:04:05", + expected: stamp("Jan 2 15:04:05"), + }, + + { + name: "StampMilli", + format: "StampMilli", + timestamp: "Jan 2 15:04:05.000", + expected: stampmilli("Jan 2 15:04:05.000"), + }, + + { + name: "StampMicro", + format: "StampMicro", + timestamp: "Jan 2 15:04:05.000000", + expected: stampmicro("Jan 2 15:04:05.000000"), + }, + + { + name: "StampNano", + format: "StampNano", + timestamp: "Jan 2 15:04:05.000000000", + expected: stampnano("Jan 2 15:04:05.000000000"), + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/internal/process/process.go b/internal/process/process.go index 3f88aac57b317..88da25168b63e 100644 --- a/internal/process/process.go +++ b/internal/process/process.go @@ -5,7 +5,7 @@ import ( "errors" "fmt" "io" - "io/ioutil" + "os" "os/exec" "sync" "sync/atomic" @@ -27,13 +27,14 @@ type Process struct { name string args []string + envs []string pid int32 cancel context.CancelFunc mainLoopWg sync.WaitGroup } // New creates a new process wrapper -func New(command []string) (*Process, error) { +func New(command []string, envs []string) (*Process, error) { if len(command) == 0 { return nil, errors.New("no command") } @@ -42,6 +43,7 @@ func New(command []string) (*Process, error) { RestartDelay: 5 * time.Second, name: command[0], args: []string{}, + envs: envs, } if len(command) > 1 { @@ -86,6 +88,10 @@ func (p *Process) Stop() { func (p *Process) cmdStart() error { p.Cmd = exec.Command(p.name, p.args...) + if len(p.envs) > 0 { + p.Cmd.Env = append(os.Environ(), p.envs...) + } + var err error p.Stdin, err = p.Cmd.StdinPipe() if err != nil { @@ -126,12 +132,12 @@ func (p *Process) cmdLoop(ctx context.Context) error { } p.Log.Errorf("Process %s exited: %v", p.Cmd.Path, err) - p.Log.Infof("Restarting in %s...", time.Duration(p.RestartDelay)) + p.Log.Infof("Restarting in %s...", p.RestartDelay) select { case <-ctx.Done(): return nil - case <-time.After(time.Duration(p.RestartDelay)): + case <-time.After(p.RestartDelay): // Continue the loop and restart the process if err := p.cmdStart(); err != nil { return err @@ -187,5 +193,5 @@ func isQuitting(ctx context.Context) bool { } func defaultReadPipe(r io.Reader) { - io.Copy(ioutil.Discard, r) + _, _ = io.Copy(io.Discard, r) } diff --git a/internal/process/process_posix.go b/internal/process/process_posix.go index 7b42b7da13214..8f736bc673592 100644 --- a/internal/process/process_posix.go +++ b/internal/process/process_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package process diff --git a/internal/process/process_test.go b/internal/process/process_test.go index 7a7c8c6f33fd6..d453c73804e7e 100644 --- a/internal/process/process_test.go +++ b/internal/process/process_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package process @@ -19,10 +20,14 @@ import ( // test that a restarting process resets pipes properly func TestRestartingRebindsPipes(t *testing.T) { + if testing.Short() { + t.Skip("Skipping long running test in short mode") + } + exe, err := os.Executable() require.NoError(t, err) - p, err := New([]string{exe, "-external"}) + p, err := New([]string{exe, "-external"}, []string{"INTERNAL_PROCESS_MODE=application"}) p.RestartDelay = 100 * time.Nanosecond p.Log = testutil.Logger{} require.NoError(t, err) @@ -48,6 +53,7 @@ func TestRestartingRebindsPipes(t *testing.T) { time.Sleep(1 * time.Millisecond) } + // the mainLoopWg.Wait() call p.Stop() makes takes multiple seconds to complete p.Stop() } @@ -56,7 +62,8 @@ var external = flag.Bool("external", false, func TestMain(m *testing.M) { flag.Parse() - if *external { + runMode := os.Getenv("INTERNAL_PROCESS_MODE") + if *external && runMode == "application" { externalProcess() os.Exit(0) } @@ -67,7 +74,7 @@ func TestMain(m *testing.M) { // externalProcess is an external "misbehaving" process that won't exit // cleanly. func externalProcess() { - wait := make(chan int, 0) + wait := make(chan int) fmt.Fprintln(os.Stdout, "started") <-wait os.Exit(2) diff --git a/internal/process/process_windows.go b/internal/process/process_windows.go index 0995d52469b07..3aefd20f4aa9c 100644 --- a/internal/process/process_windows.go +++ b/internal/process/process_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package process diff --git a/internal/rotate/file_writer.go b/internal/rotate/file_writer.go index a167b7cb78f7e..7cfde02692cd4 100644 --- a/internal/rotate/file_writer.go +++ b/internal/rotate/file_writer.go @@ -123,10 +123,7 @@ func (w *FileWriter) openCurrent() (err error) { w.bytesWritten = fileInfo.Size() } - if err = w.rotateIfNeeded(); err != nil { - return err - } - return nil + return w.rotateIfNeeded() } func (w *FileWriter) rotateIfNeeded() error { @@ -153,11 +150,7 @@ func (w *FileWriter) rotate() (err error) { return err } - if err = w.purgeArchivesIfNeeded(); err != nil { - return err - } - - return nil + return w.purgeArchivesIfNeeded() } func (w *FileWriter) purgeArchivesIfNeeded() (err error) { diff --git a/internal/rotate/file_writer_test.go b/internal/rotate/file_writer_test.go index ca29b9a2f45d6..b24ece5291c79 100644 --- a/internal/rotate/file_writer_test.go +++ b/internal/rotate/file_writer_test.go @@ -1,7 +1,6 @@ package rotate import ( - "io/ioutil" "os" "path/filepath" "testing" @@ -12,93 +11,91 @@ import ( ) func TestFileWriter_NoRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationNo") - require.NoError(t, err) + tempDir := t.TempDir() writer, err := NewFileWriter(filepath.Join(tempDir, "test"), 0, 0, 0) require.NoError(t, err) - defer func() { writer.Close(); os.RemoveAll(tempDir) }() + t.Cleanup(func() { require.NoError(t, writer.Close()) }) _, err = writer.Write([]byte("Hello World")) require.NoError(t, err) _, err = writer.Write([]byte("Hello World 2")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 1, len(files)) } func TestFileWriter_TimeRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationTime") - require.NoError(t, err) - interval, _ := time.ParseDuration("1s") + tempDir := t.TempDir() + interval, _ := time.ParseDuration("10ms") writer, err := NewFileWriter(filepath.Join(tempDir, "test"), interval, 0, -1) require.NoError(t, err) - defer func() { writer.Close(); os.RemoveAll(tempDir) }() + t.Cleanup(func() { require.NoError(t, writer.Close()) }) _, err = writer.Write([]byte("Hello World")) require.NoError(t, err) - time.Sleep(1 * time.Second) + time.Sleep(interval) _, err = writer.Write([]byte("Hello World 2")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } func TestFileWriter_ReopenTimeRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationTime") - require.NoError(t, err) - interval, _ := time.ParseDuration("1s") + tempDir := t.TempDir() + interval, _ := time.ParseDuration("10ms") filePath := filepath.Join(tempDir, "test.log") - err = ioutil.WriteFile(filePath, []byte("Hello World"), 0644) - time.Sleep(1 * time.Second) + err := os.WriteFile(filePath, []byte("Hello World"), 0644) + time.Sleep(interval) assert.NoError(t, err) writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), interval, 0, -1) require.NoError(t, err) - defer func() { writer.Close(); os.RemoveAll(tempDir) }() + t.Cleanup(func() { require.NoError(t, writer.Close()) }) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } func TestFileWriter_SizeRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationSize") - require.NoError(t, err) + tempDir := t.TempDir() maxSize := int64(9) writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1) require.NoError(t, err) - defer func() { writer.Close(); os.RemoveAll(tempDir) }() + t.Cleanup(func() { require.NoError(t, writer.Close()) }) _, err = writer.Write([]byte("Hello World")) require.NoError(t, err) _, err = writer.Write([]byte("World 2")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } func TestFileWriter_ReopenSizeRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationSize") - require.NoError(t, err) + tempDir := t.TempDir() maxSize := int64(12) filePath := filepath.Join(tempDir, "test.log") - err = ioutil.WriteFile(filePath, []byte("Hello World"), 0644) + err := os.WriteFile(filePath, []byte("Hello World"), 0644) assert.NoError(t, err) writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1) require.NoError(t, err) - defer func() { writer.Close(); os.RemoveAll(tempDir) }() + t.Cleanup(func() { require.NoError(t, writer.Close()) }) _, err = writer.Write([]byte("Hello World Again")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } func TestFileWriter_DeleteArchives(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationDeleteArchives") - require.NoError(t, err) + if testing.Short() { + t.Skip("Skipping long test in short mode") + } + + tempDir := t.TempDir() maxSize := int64(5) writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, 2) require.NoError(t, err) - defer func() { writer.Close(); os.RemoveAll(tempDir) }() + t.Cleanup(func() { require.NoError(t, writer.Close()) }) _, err = writer.Write([]byte("First file")) require.NoError(t, err) @@ -112,14 +109,14 @@ func TestFileWriter_DeleteArchives(t *testing.T) { _, err = writer.Write([]byte("Third file")) require.NoError(t, err) - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 3, len(files)) for _, tempFile := range files { var bytes []byte var err error path := filepath.Join(tempDir, tempFile.Name()) - if bytes, err = ioutil.ReadFile(path); err != nil { + if bytes, err = os.ReadFile(path); err != nil { t.Error(err.Error()) return } @@ -133,16 +130,13 @@ func TestFileWriter_DeleteArchives(t *testing.T) { } func TestFileWriter_CloseRotates(t *testing.T) { - tempDir, err := ioutil.TempDir("", "RotationClose") - require.NoError(t, err) - defer os.RemoveAll(tempDir) + tempDir := t.TempDir() maxSize := int64(9) writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1) require.NoError(t, err) + require.NoError(t, writer.Close()) - writer.Close() - - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 1, len(files)) assert.Regexp(t, "^test\\.[^\\.]+\\.log$", files[0].Name()) } diff --git a/internal/snmp/config.go b/internal/snmp/config.go index e616e75709737..8475c084ab2d6 100644 --- a/internal/snmp/config.go +++ b/internal/snmp/config.go @@ -1,21 +1,25 @@ package snmp import ( - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" ) type ClientConfig struct { // Timeout to wait for a response. - Timeout internal.Duration `toml:"timeout"` - Retries int `toml:"retries"` + Timeout config.Duration `toml:"timeout"` + Retries int `toml:"retries"` // Values: 1, 2, 3 Version uint8 `toml:"version"` + // Path to mib files + Path []string `toml:"path"` + // Translator implementation + Translator string `toml:"-"` // Parameters for Version 1 & 2 Community string `toml:"community"` // Parameters for Version 2 & 3 - MaxRepetitions uint8 `toml:"max_repetitions"` + MaxRepetitions uint32 `toml:"max_repetitions"` // Parameters for Version 3 ContextName string `toml:"context_name"` diff --git a/internal/snmp/testdata/loadMibsFromPath/linkTarget/emptyFile b/internal/snmp/testdata/loadMibsFromPath/linkTarget/emptyFile new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/internal/snmp/testdata/loadMibsFromPath/root/dirOne/dirTwo/empty b/internal/snmp/testdata/loadMibsFromPath/root/dirOne/dirTwo/empty new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/internal/snmp/testdata/loadMibsFromPath/root/symlink b/internal/snmp/testdata/loadMibsFromPath/root/symlink new file mode 120000 index 0000000000000..a10e5b83c1f40 --- /dev/null +++ b/internal/snmp/testdata/loadMibsFromPath/root/symlink @@ -0,0 +1 @@ +../linkTarget/ \ No newline at end of file diff --git a/internal/snmp/testdata/mibs/testmib b/internal/snmp/testdata/mibs/testmib new file mode 100644 index 0000000000000..ce44a135b272c --- /dev/null +++ b/internal/snmp/testdata/mibs/testmib @@ -0,0 +1,22 @@ +TGTEST-MIB DEFINITIONS ::= BEGIN + +org OBJECT IDENTIFIER ::= { iso 3 } -- "iso" = 1 +dod OBJECT IDENTIFIER ::= { org 6 } +internet OBJECT IDENTIFIER ::= { dod 1 } +mgmt OBJECT IDENTIFIER ::= { internet 2 } +mibs OBJECT IDENTIFIER ::= { mgmt 1 } +system OBJECT IDENTIFIER ::= { mibs 1 } +systemUpTime OBJECT IDENTIFIER ::= { system 3 } +sysUpTimeInstance OBJECT IDENTIFIER ::= { systemUpTime 0 } + +private OBJECT IDENTIFIER ::= { internet 4 } +enterprises OBJECT IDENTIFIER ::= { private 1 } + +snmpV2 OBJECT IDENTIFIER ::= { internet 6 } +snmpModules OBJECT IDENTIFIER ::= { snmpV2 3 } +snmpMIB OBJECT IDENTIFIER ::= { snmpModules 1 } +snmpMIBObjects OBJECT IDENTIFIER ::= { snmpMIB 1 } +snmpTraps OBJECT IDENTIFIER ::= { snmpMIBObjects 5 } +coldStart OBJECT IDENTIFIER ::= { snmpTraps 1 } + +END diff --git a/internal/snmp/translate.go b/internal/snmp/translate.go new file mode 100644 index 0000000000000..603dc27129e31 --- /dev/null +++ b/internal/snmp/translate.go @@ -0,0 +1,280 @@ +package snmp + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/influxdata/telegraf" + "github.com/sleepinggenius2/gosmi" + "github.com/sleepinggenius2/gosmi/types" +) + +// must init, append path for each directory, load module for every file +// or gosmi will fail without saying why +var m sync.Mutex +var once sync.Once +var cache = make(map[string]bool) + +type MibLoader interface { + // appendPath takes the path of a directory + appendPath(path string) + + // loadModule takes the name of a file in one of the + // directories. Basename only, no relative or absolute path + loadModule(path string) error +} + +type GosmiMibLoader struct{} + +func (*GosmiMibLoader) appendPath(path string) { + m.Lock() + defer m.Unlock() + + gosmi.AppendPath(path) +} + +func (*GosmiMibLoader) loadModule(path string) error { + m.Lock() + defer m.Unlock() + + _, err := gosmi.LoadModule(path) + return err +} + +func ClearCache() { + cache = make(map[string]bool) +} + +//will give all found folders to gosmi and load in all modules found in the folders +func LoadMibsFromPath(paths []string, log telegraf.Logger, loader MibLoader) error { + folders, err := walkPaths(paths, log) + if err != nil { + return err + } + for _, path := range folders { + loader.appendPath(path) + modules, err := ioutil.ReadDir(path) + if err != nil { + log.Warnf("Can't read directory %v", modules) + } + + for _, info := range modules { + if info.Mode()&os.ModeSymlink != 0 { + symlink := filepath.Join(path, info.Name()) + target, err := filepath.EvalSymlinks(symlink) + if err != nil { + log.Warnf("Couldn't evaluate symbolic links for %v: %v", symlink, err) + continue + } + //replace symlink's info with the target's info + info, err = os.Lstat(target) + if err != nil { + log.Warnf("Couldn't stat target %v: %v", target, err) + continue + } + } + if info.Mode().IsRegular() { + err := loader.loadModule(info.Name()) + if err != nil { + log.Warnf("Couldn't load module %v: %v", info.Name(), err) + continue + } + } + } + } + return nil +} + +//should walk the paths given and find all folders +func walkPaths(paths []string, log telegraf.Logger) ([]string, error) { + once.Do(gosmi.Init) + folders := []string{} + + for _, mibPath := range paths { + // Check if we loaded that path already and skip it if so + m.Lock() + cached := cache[mibPath] + cache[mibPath] = true + m.Unlock() + if cached { + continue + } + + err := filepath.Walk(mibPath, func(path string, info os.FileInfo, err error) error { + if info == nil { + log.Warnf("No mibs found") + if os.IsNotExist(err) { + log.Warnf("MIB path doesn't exist: %q", mibPath) + } else if err != nil { + return err + } + return nil + } + + if info.Mode()&os.ModeSymlink != 0 { + target, err := filepath.EvalSymlinks(path) + if err != nil { + log.Warnf("Couldn't evaluate symbolic links for %v: %v", path, err) + } + info, err = os.Lstat(target) + if err != nil { + log.Warnf("Couldn't stat target %v: %v", target, err) + } + path = target + } + if info.IsDir() { + folders = append(folders, path) + } + + return nil + }) + if err != nil { + return folders, fmt.Errorf("Couldn't walk path %q: %v", mibPath, err) + } + } + return folders, nil +} + +// The following is for snmp_trap +type MibEntry struct { + MibName string + OidText string +} + +func TrapLookup(oid string) (e MibEntry, err error) { + var givenOid types.Oid + if givenOid, err = types.OidFromString(oid); err != nil { + return e, fmt.Errorf("could not convert OID %s: %w", oid, err) + } + + // Get node name + var node gosmi.SmiNode + if node, err = gosmi.GetNodeByOID(givenOid); err != nil { + return e, err + } + e.OidText = node.Name + + // Add not found OID part + if !givenOid.Equals(node.Oid) { + e.OidText += "." + givenOid[len(node.Oid):].String() + } + + // Get module name + module := node.GetModule() + if module.Name != "" { + e.MibName = module.Name + } + + return e, nil +} + +// The following is for snmp + +func GetIndex(oidNum string, mibPrefix string, node gosmi.SmiNode) (col []string, tagOids map[string]struct{}, err error) { + // first attempt to get the table's tags + tagOids = map[string]struct{}{} + + // mimcks grabbing INDEX {} that is returned from snmptranslate -Td MibName + for _, index := range node.GetIndex() { + //nolint:staticcheck //assaignment to nil map to keep backwards compatibilty + tagOids[mibPrefix+index.Name] = struct{}{} + } + + // grabs all columns from the table + // mimmicks grabbing everything returned from snmptable -Ch -Cl -c public 127.0.0.1 oidFullName + _, col = node.GetColumns() + + return col, tagOids, nil +} + +//nolint:revive //Too many return variable but necessary +func SnmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, node gosmi.SmiNode, err error) { + var out gosmi.SmiNode + var end string + if strings.ContainsAny(oid, "::") { + // split given oid + // for example RFC1213-MIB::sysUpTime.0 + s := strings.SplitN(oid, "::", 2) + // moduleName becomes RFC1213 + moduleName := s[0] + module, err := gosmi.GetModule(moduleName) + if err != nil { + return oid, oid, oid, oid, gosmi.SmiNode{}, err + } + if s[1] == "" { + return "", oid, oid, oid, gosmi.SmiNode{}, fmt.Errorf("cannot parse %v\n", oid) + } + // node becomes sysUpTime.0 + node := s[1] + if strings.ContainsAny(node, ".") { + s = strings.SplitN(node, ".", 2) + // node becomes sysUpTime + node = s[0] + end = "." + s[1] + } + + out, err = module.GetNode(node) + if err != nil { + return oid, oid, oid, oid, out, err + } + + if oidNum = out.RenderNumeric(); oidNum == "" { + return oid, oid, oid, oid, out, fmt.Errorf("cannot make %v numeric, please ensure all imported mibs are in the path", oid) + } + + oidNum = "." + oidNum + end + } else if strings.ContainsAny(oid, "abcdefghijklnmopqrstuvwxyz") { + //handle mixed oid ex. .iso.2.3 + s := strings.Split(oid, ".") + for i := range s { + if strings.ContainsAny(s[i], "abcdefghijklmnopqrstuvwxyz") { + out, err = gosmi.GetNode(s[i]) + if err != nil { + return oid, oid, oid, oid, out, err + } + s[i] = out.RenderNumeric() + } + } + oidNum = strings.Join(s, ".") + out, _ = gosmi.GetNodeByOID(types.OidMustFromString(oidNum)) + } else { + out, err = gosmi.GetNodeByOID(types.OidMustFromString(oid)) + oidNum = oid + // ensure modules are loaded or node will be empty (might not error) + // do not return the err as the oid is numeric and telegraf can continue + //nolint:nilerr + if err != nil || out.Name == "iso" { + return oid, oid, oid, oid, out, nil + } + } + + tc := out.GetSubtree() + + for i := range tc { + // case where the mib doesn't have a conversion so Type struct will be nil + // prevents seg fault + if tc[i].Type == nil { + break + } + switch tc[i].Type.Name { + case "MacAddress", "PhysAddress": + conversion = "hwaddr" + case "InetAddressIPv4", "InetAddressIPv6", "InetAddress", "IPSIpAddress": + conversion = "ipaddr" + } + } + + oidText = out.RenderQualified() + i := strings.Index(oidText, "::") + if i == -1 { + return "", oid, oid, oid, out, fmt.Errorf("not found") + } + mibName = oidText[:i] + oidText = oidText[i+2:] + end + + return mibName, oidNum, oidText, conversion, out, nil +} diff --git a/internal/snmp/translate_test.go b/internal/snmp/translate_test.go new file mode 100644 index 0000000000000..9f22947e8ad29 --- /dev/null +++ b/internal/snmp/translate_test.go @@ -0,0 +1,153 @@ +package snmp + +import ( + "path/filepath" + "runtime" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" +) + +func TestTrapLookup(t *testing.T) { + tests := []struct { + name string + oid string + expected MibEntry + }{ + { + name: "Known trap OID", + oid: ".1.3.6.1.6.3.1.1.5.1", + expected: MibEntry{ + MibName: "TGTEST-MIB", + OidText: "coldStart", + }, + }, + { + name: "Known trap value OID", + oid: ".1.3.6.1.2.1.1.3.0", + expected: MibEntry{ + MibName: "TGTEST-MIB", + OidText: "sysUpTimeInstance", + }, + }, + { + name: "Unknown enterprise sub-OID", + oid: ".1.3.6.1.4.1.0.1.2.3", + expected: MibEntry{ + MibName: "TGTEST-MIB", + OidText: "enterprises.0.1.2.3", + }, + }, + { + name: "Unknown MIB", + oid: ".1.2.3", + expected: MibEntry{OidText: "iso.2.3"}, + }, + } + + // Load the MIBs + require.NoError(t, LoadMibsFromPath([]string{"testdata/mibs"}, testutil.Logger{}, &GosmiMibLoader{})) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Run the actual test + actual, err := TrapLookup(tt.oid) + require.NoError(t, err) + require.Equal(t, tt.expected, actual) + }) + } +} + +func TestTrapLookupFail(t *testing.T) { + tests := []struct { + name string + oid string + expected string + }{ + { + name: "New top level OID", + oid: ".3.6.1.3.0", + expected: "Could not find node for OID 3.6.1.3.0", + }, + { + name: "Malformed OID", + oid: ".1.3.dod.1.3.0", + expected: "could not convert OID .1.3.dod.1.3.0: strconv.ParseUint: parsing \"dod\": invalid syntax", + }, + } + + // Load the MIBs + require.NoError(t, LoadMibsFromPath([]string{"testdata/mibs"}, testutil.Logger{}, &GosmiMibLoader{})) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Run the actual test + _, err := TrapLookup(tt.oid) + require.EqualError(t, err, tt.expected) + }) + } +} + +type TestingMibLoader struct { + folders []string + files []string +} + +func (t *TestingMibLoader) appendPath(path string) { + t.folders = append(t.folders, path) +} + +func (t *TestingMibLoader) loadModule(path string) error { + t.files = append(t.files, path) + return nil +} +func TestFolderLookup(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping on windows") + } + + tests := []struct { + name string + mibPath [][]string + paths [][]string + files []string + }{ + { + name: "loading folders", + mibPath: [][]string{{"testdata", "loadMibsFromPath", "root"}}, + paths: [][]string{ + {"testdata", "loadMibsFromPath", "root"}, + {"testdata", "loadMibsFromPath", "root", "dirOne"}, + {"testdata", "loadMibsFromPath", "root", "dirOne", "dirTwo"}, + {"testdata", "loadMibsFromPath", "linkTarget"}, + }, + files: []string{"empty", "emptyFile"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + loader := TestingMibLoader{} + + var givenPath []string + for _, paths := range tt.mibPath { + rootPath := filepath.Join(paths...) + givenPath = append(givenPath, rootPath) + } + + err := LoadMibsFromPath(givenPath, testutil.Logger{}, &loader) + require.NoError(t, err) + + var folders []string + for _, pathSlice := range tt.paths { + path := filepath.Join(pathSlice...) + folders = append(folders, path) + } + require.Equal(t, folders, loader.folders) + + require.Equal(t, tt.files, loader.files) + }) + } +} diff --git a/internal/snmp/translator.go b/internal/snmp/translator.go new file mode 100644 index 0000000000000..6a0993a6d1a04 --- /dev/null +++ b/internal/snmp/translator.go @@ -0,0 +1,5 @@ +package snmp + +type TranslatorPlugin interface { + SetTranslator(name string) // Agent calls this on inputs before Init +} diff --git a/internal/snmp/wrapper.go b/internal/snmp/wrapper.go index 23a15594ed6f7..22649d549fe10 100644 --- a/internal/snmp/wrapper.go +++ b/internal/snmp/wrapper.go @@ -5,8 +5,9 @@ import ( "net/url" "strconv" "strings" + "time" - "github.com/soniah/gosnmp" + "github.com/gosnmp/gosnmp" ) // GosnmpWrapper wraps a *gosnmp.GoSNMP object so we can use it as a snmpConnection. @@ -15,54 +16,23 @@ type GosnmpWrapper struct { } // Host returns the value of GoSNMP.Target. -func (gsw GosnmpWrapper) Host() string { - return gsw.Target +func (gs GosnmpWrapper) Host() string { + return gs.Target } // Walk wraps GoSNMP.Walk() or GoSNMP.BulkWalk(), depending on whether the // connection is using SNMPv1 or newer. -// Also, if any error is encountered, it will just once reconnect and try again. -func (gsw GosnmpWrapper) Walk(oid string, fn gosnmp.WalkFunc) error { - var err error - // On error, retry once. - // Unfortunately we can't distinguish between an error returned by gosnmp, and one returned by the walk function. - for i := 0; i < 2; i++ { - if gsw.Version == gosnmp.Version1 { - err = gsw.GoSNMP.Walk(oid, fn) - } else { - err = gsw.GoSNMP.BulkWalk(oid, fn) - } - if err == nil { - return nil - } - if err := gsw.GoSNMP.Connect(); err != nil { - return fmt.Errorf("reconnecting: %w", err) - } - } - return err -} - -// Get wraps GoSNMP.GET(). -// If any error is encountered, it will just once reconnect and try again. -func (gsw GosnmpWrapper) Get(oids []string) (*gosnmp.SnmpPacket, error) { - var err error - var pkt *gosnmp.SnmpPacket - for i := 0; i < 2; i++ { - pkt, err = gsw.GoSNMP.Get(oids) - if err == nil { - return pkt, nil - } - if err := gsw.GoSNMP.Connect(); err != nil { - return nil, fmt.Errorf("reconnecting: %w", err) - } +func (gs GosnmpWrapper) Walk(oid string, fn gosnmp.WalkFunc) error { + if gs.Version == gosnmp.Version1 { + return gs.GoSNMP.Walk(oid, fn) } - return nil, err + return gs.GoSNMP.BulkWalk(oid, fn) } func NewWrapper(s ClientConfig) (GosnmpWrapper, error) { gs := GosnmpWrapper{&gosnmp.GoSNMP{}} - gs.Timeout = s.Timeout.Duration + gs.Timeout = time.Duration(s.Timeout) gs.Retries = s.Retries @@ -112,6 +82,14 @@ func NewWrapper(s ClientConfig) (GosnmpWrapper, error) { sp.AuthenticationProtocol = gosnmp.MD5 case "sha": sp.AuthenticationProtocol = gosnmp.SHA + case "sha224": + sp.AuthenticationProtocol = gosnmp.SHA224 + case "sha256": + sp.AuthenticationProtocol = gosnmp.SHA256 + case "sha384": + sp.AuthenticationProtocol = gosnmp.SHA384 + case "sha512": + sp.AuthenticationProtocol = gosnmp.SHA512 case "": sp.AuthenticationProtocol = gosnmp.NoAuth default: @@ -125,6 +103,14 @@ func NewWrapper(s ClientConfig) (GosnmpWrapper, error) { sp.PrivacyProtocol = gosnmp.DES case "aes": sp.PrivacyProtocol = gosnmp.AES + case "aes192": + sp.PrivacyProtocol = gosnmp.AES192 + case "aes192c": + sp.PrivacyProtocol = gosnmp.AES192C + case "aes256": + sp.PrivacyProtocol = gosnmp.AES256 + case "aes256c": + sp.PrivacyProtocol = gosnmp.AES256C case "": sp.PrivacyProtocol = gosnmp.NoPriv default: @@ -156,11 +142,14 @@ func (gs *GosnmpWrapper) SetAgent(agent string) error { return err } + // Only allow udp{4,6} and tcp{4,6}. + // Allowing ip{4,6} does not make sense as specifying a port + // requires the specification of a protocol. + // gosnmp does not handle these errors well, which is why + // they can result in cryptic errors by net.Dial. switch u.Scheme { - case "tcp": - gs.Transport = "tcp" - case "", "udp": - gs.Transport = "udp" + case "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6": + gs.Transport = u.Scheme default: return fmt.Errorf("unsupported scheme: %v", u.Scheme) } @@ -178,3 +167,11 @@ func (gs *GosnmpWrapper) SetAgent(agent string) error { gs.Port = uint16(port) return nil } + +func (gs GosnmpWrapper) Reconnect() error { + if gs.Conn == nil { + return gs.Connect() + } + + return nil +} diff --git a/internal/templating/template.go b/internal/templating/template.go index 235d2f2a58928..09b78e19fce66 100644 --- a/internal/templating/template.go +++ b/internal/templating/template.go @@ -59,10 +59,8 @@ func (t *Template) Apply(line string, joiner string) (string, map[string]string, field = append(field, fields[i]) case "field*": field = append(field, fields[i:]...) - break case "measurement*": measurement = append(measurement, fields[i:]...) - break default: tags[tag] = append(tags[tag], fields[i]) } diff --git a/internal/type_conversions.go b/internal/type_conversions.go new file mode 100644 index 0000000000000..e2506a9068de3 --- /dev/null +++ b/internal/type_conversions.go @@ -0,0 +1,200 @@ +package internal + +import ( + "fmt" + "strconv" +) + +func ToString(value interface{}) (string, error) { + switch v := value.(type) { + case string: + return v, nil + case []byte: + return string(v), nil + case int: + return strconv.FormatInt(int64(v), 10), nil + case int8: + return strconv.FormatInt(int64(v), 10), nil + case int16: + return strconv.FormatInt(int64(v), 10), nil + case int32: + return strconv.FormatInt(int64(v), 10), nil + case int64: + return strconv.FormatInt(v, 10), nil + case uint: + return strconv.FormatUint(uint64(v), 10), nil + case uint8: + return strconv.FormatUint(uint64(v), 10), nil + case uint16: + return strconv.FormatUint(uint64(v), 10), nil + case uint32: + return strconv.FormatUint(uint64(v), 10), nil + case uint64: + return strconv.FormatUint(v, 10), nil + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32), nil + case float64: + return strconv.FormatFloat(v, 'f', -1, 64), nil + case bool: + return strconv.FormatBool(v), nil + case fmt.Stringer: + return v.String(), nil + case nil: + return "", nil + } + return "", fmt.Errorf("type \"%T\" unsupported", value) +} + +func ToFloat64(value interface{}) (float64, error) { + switch v := value.(type) { + case string: + return strconv.ParseFloat(v, 64) + case []byte: + return strconv.ParseFloat(string(v), 64) + case fmt.Stringer: + return strconv.ParseFloat(v.String(), 64) + case int: + return float64(v), nil + case int8: + return float64(v), nil + case int16: + return float64(v), nil + case int32: + return float64(v), nil + case int64: + return float64(v), nil + case uint: + return float64(v), nil + case uint8: + return float64(v), nil + case uint16: + return float64(v), nil + case uint32: + return float64(v), nil + case uint64: + return float64(v), nil + case float32: + return float64(v), nil + case float64: + return v, nil + case nil: + return 0, nil + } + return 0, fmt.Errorf("type \"%T\" unsupported", value) +} + +func ToInt64(value interface{}) (int64, error) { + switch v := value.(type) { + case string: + return strconv.ParseInt(v, 10, 64) + case []byte: + return strconv.ParseInt(string(v), 10, 64) + case fmt.Stringer: + return strconv.ParseInt(v.String(), 10, 64) + case int: + return int64(v), nil + case int8: + return int64(v), nil + case int16: + return int64(v), nil + case int32: + return int64(v), nil + case int64: + return v, nil + case uint: + return int64(v), nil + case uint8: + return int64(v), nil + case uint16: + return int64(v), nil + case uint32: + return int64(v), nil + case uint64: + return int64(v), nil + case float32: + return int64(v), nil + case float64: + return int64(v), nil + case nil: + return 0, nil + } + return 0, fmt.Errorf("type \"%T\" unsupported", value) +} + +func ToUint64(value interface{}) (uint64, error) { + switch v := value.(type) { + case string: + return strconv.ParseUint(v, 10, 64) + case []byte: + return strconv.ParseUint(string(v), 10, 64) + case fmt.Stringer: + return strconv.ParseUint(v.String(), 10, 64) + case int: + return uint64(v), nil + case int8: + return uint64(v), nil + case int16: + return uint64(v), nil + case int32: + return uint64(v), nil + case int64: + return uint64(v), nil + case uint: + return uint64(v), nil + case uint8: + return uint64(v), nil + case uint16: + return uint64(v), nil + case uint32: + return uint64(v), nil + case uint64: + return v, nil + case float32: + return uint64(v), nil + case float64: + return uint64(v), nil + case nil: + return 0, nil + } + return 0, fmt.Errorf("type \"%T\" unsupported", value) +} + +func ToBool(value interface{}) (bool, error) { + switch v := value.(type) { + case string: + return strconv.ParseBool(v) + case []byte: + return strconv.ParseBool(string(v)) + case fmt.Stringer: + return strconv.ParseBool(v.String()) + case int: + return v > 0, nil + case int8: + return v > 0, nil + case int16: + return v > 0, nil + case int32: + return v > 0, nil + case int64: + return v > 0, nil + case uint: + return v > 0, nil + case uint8: + return v > 0, nil + case uint16: + return v > 0, nil + case uint32: + return v > 0, nil + case uint64: + return v > 0, nil + case float32: + return v > 0, nil + case float64: + return v > 0, nil + case bool: + return v, nil + case nil: + return false, nil + } + return false, fmt.Errorf("type \"%T\" unsupported", value) +} diff --git a/internal/usage.go b/internal/usage.go index 6eff30e6b0b21..65fb378669df7 100644 --- a/internal/usage.go +++ b/internal/usage.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package internal @@ -16,10 +17,14 @@ The commands & flags are: --aggregator-filter filter the aggregators to enable, separator is : --config configuration file to load --config-directory directory containing additional *.conf files + --watch-config Telegraf will restart on local config changes. Monitor changes + using either fs notifications or polling. Valid values: 'inotify' or 'poll'. + Monitoring is off by default. --plugin-directory directory containing *.so files, this directory will be searched recursively. Any Plugin found will be loaded and namespaced. --debug turn on debug logging + --deprecation-list print all deprecated plugins or plugin options. --input-filter filter the inputs to enable, separator is : --input-list print available input plugins. --output-filter filter the outputs to enable, separator is : @@ -33,9 +38,10 @@ The commands & flags are: 'processors', 'aggregators' and 'inputs' --sample-config print out full sample configuration --once enable once mode: gather metrics once, write them, and exit - --test enable test mode: gather metrics once and print them - --test-wait wait up to this many seconds for service - inputs to complete in test or once mode + --test enable test mode: gather metrics once and print them. + No outputs are executed! + --test-wait wait up to this many seconds for service inputs to complete + in test or once mode. Implies --test if not used with --once. --usage print usage for a plugin, ie, 'telegraf --usage mysql' --version display the version and exit @@ -45,7 +51,7 @@ Examples: telegraf config > telegraf.conf # generate config with only cpu input & influxdb output plugins defined - telegraf --input-filter cpu --output-filter influxdb config + telegraf config --input-filter cpu --output-filter influxdb # run a single telegraf collection, outputting metrics to stdout telegraf --config telegraf.conf --test @@ -57,5 +63,4 @@ Examples: telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb # run telegraf with pprof - telegraf --config telegraf.conf --pprof-addr localhost:6060 -` + telegraf --config telegraf.conf --pprof-addr localhost:6060` diff --git a/internal/usage_windows.go b/internal/usage_windows.go index 7fee6a1f1595c..511bd5ca49d94 100644 --- a/internal/usage_windows.go +++ b/internal/usage_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package internal @@ -16,6 +17,9 @@ The commands & flags are: --aggregator-filter filter the aggregators to enable, separator is : --config configuration file to load --config-directory directory containing additional *.conf files + --watch-config Telegraf will restart on local config changes. Monitor changes + using either fs notifications or polling. Valid values: 'inotify' or 'poll'. + Monitoring is off by default. --debug turn on debug logging --input-filter filter the inputs to enable, separator is : --input-list print available input plugins. @@ -40,6 +44,8 @@ The commands & flags are: --service operate on the service (windows only) --service-name service name (windows only) --service-display-name service display name (windows only) + --service-auto-restart auto restart service on failure (windows only) + --service-restart-delay delay before service auto restart, default is 5m (windows only) Examples: @@ -69,4 +75,6 @@ Examples: # install telegraf service with custom name telegraf --service install --service-name=my-telegraf --service-display-name="My Telegraf" -` + + # install telegraf service with auto restart and restart delay of 3 minutes + telegraf --service install --service-auto-restart --service-restart-delay 3m` diff --git a/logger/event_logger.go b/logger/event_logger.go index 48b645ddedb3c..bb0672de76c5c 100644 --- a/logger/event_logger.go +++ b/logger/event_logger.go @@ -1,35 +1,42 @@ +//go:build windows +// +build windows + package logger import ( "io" + "log" "strings" "github.com/influxdata/wlog" - "github.com/kardianos/service" + "golang.org/x/sys/windows/svc/eventlog" ) const ( LogTargetEventlog = "eventlog" + eidInfo = 1 + eidWarning = 2 + eidError = 3 ) type eventLogger struct { - logger service.Logger + logger *eventlog.Log } func (t *eventLogger) Write(b []byte) (n int, err error) { loc := prefixRegex.FindIndex(b) n = len(b) if loc == nil { - err = t.logger.Info(b) + err = t.logger.Info(1, string(b)) } else if n > 2 { //skip empty log messages line := strings.Trim(string(b[loc[1]:]), " \t\r\n") switch rune(b[loc[0]]) { case 'I': - err = t.logger.Info(line) + err = t.logger.Info(eidInfo, line) case 'W': - err = t.logger.Warning(line) + err = t.logger.Warning(eidWarning, line) case 'E': - err = t.logger.Error(line) + err = t.logger.Error(eidError, line) } } @@ -37,13 +44,20 @@ func (t *eventLogger) Write(b []byte) (n int, err error) { } type eventLoggerCreator struct { - serviceLogger service.Logger + logger *eventlog.Log } func (e *eventLoggerCreator) CreateLogger(config LogConfig) (io.Writer, error) { - return wlog.NewWriter(&eventLogger{logger: e.serviceLogger}), nil + return wlog.NewWriter(&eventLogger{logger: e.logger}), nil } -func RegisterEventLogger(serviceLogger service.Logger) { - registerLogger(LogTargetEventlog, &eventLoggerCreator{serviceLogger: serviceLogger}) +func RegisterEventLogger(name string) error { + eventLog, err := eventlog.Open(name) + if err != nil { + log.Printf("E! An error occurred while initializing an event logger. %s", err) + return err + } + + registerLogger(LogTargetEventlog, &eventLoggerCreator{logger: eventLog}) + return nil } diff --git a/logger/event_logger_test.go b/logger/event_logger_test.go index f2d4eb4209e89..d268252779867 100644 --- a/logger/event_logger_test.go +++ b/logger/event_logger_test.go @@ -1,4 +1,5 @@ -//+build windows +//go:build windows +// +build windows package logger @@ -10,9 +11,9 @@ import ( "testing" "time" - "github.com/kardianos/service" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/sys/windows/svc/eventlog" ) type Levels int @@ -30,7 +31,8 @@ type Event struct { func getEventLog(t *testing.T, since time.Time) []Event { timeStr := since.UTC().Format(time.RFC3339) - cmd := exec.Command("wevtutil", "qe", "Application", "/rd:true", "/q:Event[System[TimeCreated[@SystemTime >= '"+timeStr+"'] and Provider[@Name='Telegraf']]]") + timeStr = timeStr[:19] + cmd := exec.Command("wevtutil", "qe", "Application", "/rd:true", "/q:Event[System[TimeCreated[@SystemTime >= '"+timeStr+"'] and Provider[@Name='telegraf']]]") var out bytes.Buffer cmd.Stdout = &out err := cmd.Run() @@ -44,7 +46,7 @@ func getEventLog(t *testing.T, since time.Time) []Event { return events.Events } -func TestEventLog(t *testing.T) { +func TestEventLogIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -67,7 +69,7 @@ func TestEventLog(t *testing.T) { assert.Contains(t, events, Event{Message: "Err message", Level: Error}) } -func TestRestrictedEventLog(t *testing.T) { +func TestRestrictedEventLogIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -91,10 +93,8 @@ func TestRestrictedEventLog(t *testing.T) { } func prepareLogger(t *testing.T) { - svc, err := service.New(nil, &service.Config{Name: "Telegraf"}) + eventLog, err := eventlog.Open("telegraf") require.NoError(t, err) - svcLogger, err := svc.SystemLogger(nil) - require.NoError(t, err) - require.NotNil(t, svcLogger) - registerLogger(LogTargetEventlog, &eventLoggerCreator{serviceLogger: svcLogger}) + require.NotNil(t, eventLog) + registerLogger(LogTargetEventlog, &eventLoggerCreator{logger: eventLog}) } diff --git a/logger/logger.go b/logger/logger.go index a276d2e807c6c..5c0d88ebf5891 100644 --- a/logger/logger.go +++ b/logger/logger.go @@ -6,9 +6,10 @@ import ( "log" "os" "regexp" + "strings" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/rotate" "github.com/influxdata/wlog" ) @@ -33,15 +34,17 @@ type LogConfig struct { // logger will fallback to stderr Logfile string // will rotate when current file at the specified time interval - RotationInterval internal.Duration + RotationInterval config.Duration // will rotate when current file size exceeds this parameter. - RotationMaxSize internal.Size + RotationMaxSize config.Size // maximum rotated files to keep (older ones will be deleted) RotationMaxArchives int + // pick a timezone to use when logging. or type 'local' for local time. + LogWithTimezone string } type LoggerCreator interface { - CreateLogger(config LogConfig) (io.Writer, error) + CreateLogger(cfg LogConfig) (io.Writer, error) } var loggerRegistry map[string]LoggerCreator @@ -56,58 +59,74 @@ func registerLogger(name string, loggerCreator LoggerCreator) { type telegrafLog struct { writer io.Writer internalWriter io.Writer + timezone *time.Location } func (t *telegrafLog) Write(b []byte) (n int, err error) { var line []byte + timeToPrint := time.Now().In(t.timezone) + if !prefixRegex.Match(b) { - line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" I! "), b...) + line = append([]byte(timeToPrint.Format(time.RFC3339)+" I! "), b...) } else { - line = append([]byte(time.Now().UTC().Format(time.RFC3339)+" "), b...) + line = append([]byte(timeToPrint.Format(time.RFC3339)+" "), b...) } + return t.writer.Write(line) } func (t *telegrafLog) Close() error { - var stdErrWriter io.Writer - stdErrWriter = os.Stderr + stdErrWriter := os.Stderr // avoid closing stderr - if t.internalWriter != stdErrWriter { - closer, isCloser := t.internalWriter.(io.Closer) - if !isCloser { - return errors.New("the underlying writer cannot be closed") - } - return closer.Close() + if t.internalWriter == stdErrWriter { + return nil } - return nil + + closer, isCloser := t.internalWriter.(io.Closer) + if !isCloser { + return errors.New("the underlying writer cannot be closed") + } + return closer.Close() } // newTelegrafWriter returns a logging-wrapped writer. -func newTelegrafWriter(w io.Writer) io.Writer { +func newTelegrafWriter(w io.Writer, c LogConfig) (io.Writer, error) { + timezoneName := c.LogWithTimezone + + if strings.ToLower(timezoneName) == "local" { + timezoneName = "Local" + } + + tz, err := time.LoadLocation(timezoneName) + if err != nil { + return nil, errors.New("error while setting logging timezone: " + err.Error()) + } + return &telegrafLog{ writer: wlog.NewWriter(w), internalWriter: w, - } + timezone: tz, + }, nil } // SetupLogging configures the logging output. -func SetupLogging(config LogConfig) { - newLogWriter(config) +func SetupLogging(cfg LogConfig) { + newLogWriter(cfg) } type telegrafLogCreator struct { } -func (t *telegrafLogCreator) CreateLogger(config LogConfig) (io.Writer, error) { +func (t *telegrafLogCreator) CreateLogger(cfg LogConfig) (io.Writer, error) { var writer, defaultWriter io.Writer defaultWriter = os.Stderr - switch config.LogTarget { + switch cfg.LogTarget { case LogTargetFile: - if config.Logfile != "" { + if cfg.Logfile != "" { var err error - if writer, err = rotate.NewFileWriter(config.Logfile, config.RotationInterval.Duration, config.RotationMaxSize.Size, config.RotationMaxArchives); err != nil { - log.Printf("E! Unable to open %s (%s), using stderr", config.Logfile, err) + if writer, err = rotate.NewFileWriter(cfg.Logfile, time.Duration(cfg.RotationInterval), int64(cfg.RotationMaxSize), cfg.RotationMaxArchives); err != nil { + log.Printf("E! Unable to open %s (%s), using stderr", cfg.Logfile, err) writer = defaultWriter } } else { @@ -116,34 +135,34 @@ func (t *telegrafLogCreator) CreateLogger(config LogConfig) (io.Writer, error) { case LogTargetStderr, "": writer = defaultWriter default: - log.Printf("E! Unsupported logtarget: %s, using stderr", config.LogTarget) + log.Printf("E! Unsupported logtarget: %s, using stderr", cfg.LogTarget) writer = defaultWriter } - return newTelegrafWriter(writer), nil + return newTelegrafWriter(writer, cfg) } // Keep track what is actually set as a log output, because log package doesn't provide a getter. // It allows closing previous writer if re-set and have possibility to test what is actually set var actualLogger io.Writer -func newLogWriter(config LogConfig) io.Writer { +func newLogWriter(cfg LogConfig) io.Writer { log.SetFlags(0) - if config.Debug { + if cfg.Debug { wlog.SetLevel(wlog.DEBUG) } - if config.Quiet { + if cfg.Quiet { wlog.SetLevel(wlog.ERROR) } - if !config.Debug && !config.Quiet { + if !cfg.Debug && !cfg.Quiet { wlog.SetLevel(wlog.INFO) } var logWriter io.Writer - if logCreator, ok := loggerRegistry[config.LogTarget]; ok { - logWriter, _ = logCreator.CreateLogger(config) + if logCreator, ok := loggerRegistry[cfg.LogTarget]; ok { + logWriter, _ = logCreator.CreateLogger(cfg) } if logWriter == nil { - logWriter, _ = (&telegrafLogCreator{}).CreateLogger(config) + logWriter, _ = (&telegrafLogCreator{}).CreateLogger(cfg) } if closer, isCloser := actualLogger.(io.Closer); isCloser { diff --git a/logger/logger_test.go b/logger/logger_test.go index a5f53ca17e89b..571a29c4bef26 100644 --- a/logger/logger_test.go +++ b/logger/logger_test.go @@ -3,85 +3,84 @@ package logger import ( "bytes" "io" - "io/ioutil" "log" "os" "path/filepath" "testing" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestWriteLogToFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() - config := createBasicLogConfig(tmpfile.Name()) - SetupLogging(config) + cfg := createBasicLogConfig(tmpfile.Name()) + SetupLogging(cfg) log.Printf("I! TEST") log.Printf("D! TEST") // <- should be ignored - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z I! TEST\n")) } func TestDebugWriteLogToFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() - config := createBasicLogConfig(tmpfile.Name()) - config.Debug = true - SetupLogging(config) + cfg := createBasicLogConfig(tmpfile.Name()) + cfg.Debug = true + SetupLogging(cfg) log.Printf("D! TEST") - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z D! TEST\n")) } func TestErrorWriteLogToFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() - config := createBasicLogConfig(tmpfile.Name()) - config.Quiet = true - SetupLogging(config) + cfg := createBasicLogConfig(tmpfile.Name()) + cfg.Quiet = true + SetupLogging(cfg) log.Printf("E! TEST") log.Printf("I! TEST") // <- should be ignored - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z E! TEST\n")) } func TestAddDefaultLogLevel(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() - config := createBasicLogConfig(tmpfile.Name()) - config.Debug = true - SetupLogging(config) + cfg := createBasicLogConfig(tmpfile.Name()) + cfg.Debug = true + SetupLogging(cfg) log.Printf("TEST") - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z I! TEST\n")) } func TestWriteToTruncatedFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "") + tmpfile, err := os.CreateTemp("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() - config := createBasicLogConfig(tmpfile.Name()) - config.Debug = true - SetupLogging(config) + cfg := createBasicLogConfig(tmpfile.Name()) + cfg.Debug = true + SetupLogging(cfg) log.Printf("TEST") - f, err := ioutil.ReadFile(tmpfile.Name()) + f, err := os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z I! TEST\n")) @@ -91,44 +90,43 @@ func TestWriteToTruncatedFile(t *testing.T) { log.Printf("SHOULD BE FIRST") - f, err = ioutil.ReadFile(tmpfile.Name()) + f, err = os.ReadFile(tmpfile.Name()) assert.NoError(t, err) assert.Equal(t, f[19:], []byte("Z I! SHOULD BE FIRST\n")) } func TestWriteToFileInRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "LogRotation") - require.NoError(t, err) - config := createBasicLogConfig(filepath.Join(tempDir, "test.log")) - config.LogTarget = LogTargetFile - config.RotationMaxSize = internal.Size{Size: int64(30)} - writer := newLogWriter(config) + tempDir := t.TempDir() + cfg := createBasicLogConfig(filepath.Join(tempDir, "test.log")) + cfg.LogTarget = LogTargetFile + cfg.RotationMaxSize = config.Size(30) + writer := newLogWriter(cfg) // Close the writer here, otherwise the temp folder cannot be deleted because the current log file is in use. closer, isCloser := writer.(io.Closer) assert.True(t, isCloser) - defer func() { closer.Close(); os.RemoveAll(tempDir) }() + t.Cleanup(func() { require.NoError(t, closer.Close()) }) log.Printf("I! TEST 1") // Writes 31 bytes, will rotate log.Printf("I! TEST") // Writes 29 byes, no rotation expected - files, _ := ioutil.ReadDir(tempDir) + files, _ := os.ReadDir(tempDir) assert.Equal(t, 2, len(files)) } func TestLogTargetSettings(t *testing.T) { - config := LogConfig{ + cfg := LogConfig{ LogTarget: "", Quiet: true, } - SetupLogging(config) + SetupLogging(cfg) logger, isTelegrafLogger := actualLogger.(*telegrafLog) assert.True(t, isTelegrafLogger) assert.Equal(t, logger.internalWriter, os.Stderr) - config = LogConfig{ + cfg = LogConfig{ LogTarget: "stderr", Quiet: true, } - SetupLogging(config) + SetupLogging(cfg) logger, isTelegrafLogger = actualLogger.(*telegrafLog) assert.True(t, isTelegrafLogger) assert.Equal(t, logger.internalWriter, os.Stderr) @@ -137,7 +135,10 @@ func TestLogTargetSettings(t *testing.T) { func BenchmarkTelegrafLogWrite(b *testing.B) { var msg = []byte("test") var buf bytes.Buffer - w := newTelegrafWriter(&buf) + w, err := newTelegrafWriter(&buf, LogConfig{}) + if err != nil { + panic("Unable to create log writer.") + } for i := 0; i < b.N; i++ { buf.Reset() w.Write(msg) diff --git a/metric.go b/metric.go index 6c7b1c6c5f75c..23098bb8bc71e 100644 --- a/metric.go +++ b/metric.go @@ -57,9 +57,7 @@ type Metric interface { Time() time.Time // Type returns a general type for the entire metric that describes how you - // might interpret, aggregate the values. - // - // This method may be removed in the future and its use is discouraged. + // might interpret, aggregate the values. Used by prometheus and statsd. Type() ValueType // SetName sets the metric name. @@ -122,14 +120,4 @@ type Metric interface { // Drop marks the metric as processed successfully without being written // to any output. Drop() - - // SetAggregate indicates the metric is an aggregated value. - // - // This method may be removed in the future and its use is discouraged. - SetAggregate(bool) - - // IsAggregate returns true if the Metric is an aggregate. - // - // This method may be removed in the future and its use is discouraged. - IsAggregate() bool } diff --git a/metric/metric.go b/metric/metric.go index 517645a831280..f8483459a93bf 100644 --- a/metric/metric.go +++ b/metric/metric.go @@ -15,8 +15,7 @@ type metric struct { fields []*telegraf.Field tm time.Time - tp telegraf.ValueType - aggregate bool + tp telegraf.ValueType } func New( @@ -25,7 +24,7 @@ func New( fields map[string]interface{}, tm time.Time, tp ...telegraf.ValueType, -) (telegraf.Metric, error) { +) telegraf.Metric { var vtype telegraf.ValueType if len(tp) > 0 { vtype = tp[0] @@ -61,19 +60,18 @@ func New( } } - return m, nil + return m } // FromMetric returns a deep copy of the metric with any tracking information // removed. func FromMetric(other telegraf.Metric) telegraf.Metric { m := &metric{ - name: other.Name(), - tags: make([]*telegraf.Tag, len(other.TagList())), - fields: make([]*telegraf.Field, len(other.FieldList())), - tm: other.Time(), - tp: other.Type(), - aggregate: other.IsAggregate(), + name: other.Name(), + tags: make([]*telegraf.Tag, len(other.TagList())), + fields: make([]*telegraf.Field, len(other.FieldList())), + tm: other.Time(), + tp: other.Type(), } for i, tag := range other.TagList() { @@ -233,12 +231,11 @@ func (m *metric) SetTime(t time.Time) { func (m *metric) Copy() telegraf.Metric { m2 := &metric{ - name: m.name, - tags: make([]*telegraf.Tag, len(m.tags)), - fields: make([]*telegraf.Field, len(m.fields)), - tm: m.tm, - tp: m.tp, - aggregate: m.aggregate, + name: m.name, + tags: make([]*telegraf.Tag, len(m.tags)), + fields: make([]*telegraf.Field, len(m.fields)), + tm: m.tm, + tp: m.tp, } for i, tag := range m.tags { @@ -251,14 +248,6 @@ func (m *metric) Copy() telegraf.Metric { return m2 } -func (m *metric) SetAggregate(b bool) { - m.aggregate = true -} - -func (m *metric) IsAggregate() bool { - return m.aggregate -} - func (m *metric) HashID() uint64 { h := fnv.New64a() h.Write([]byte(m.name)) @@ -297,7 +286,7 @@ func convertField(v interface{}) interface{} { case uint: return uint64(v) case uint64: - return uint64(v) + return v case []byte: return string(v) case int32: @@ -340,7 +329,7 @@ func convertField(v interface{}) interface{} { } case *uint64: if v != nil { - return uint64(*v) + return *v } case *[]byte: if v != nil { diff --git a/metric/metric_test.go b/metric/metric_test.go index 7033d32303f16..d4d1cb11bb8ed 100644 --- a/metric/metric_test.go +++ b/metric/metric_test.go @@ -20,8 +20,7 @@ func TestNewMetric(t *testing.T) { "usage_idle": float64(99), "usage_busy": float64(1), } - m, err := New("cpu", tags, fields, now) - require.NoError(t, err) + m := New("cpu", tags, fields, now) require.Equal(t, "cpu", m.Name()) require.Equal(t, tags, m.Tags()) @@ -38,10 +37,7 @@ func baseMetric() telegraf.Metric { } now := time.Now() - m, err := New("cpu", tags, fields, now) - if err != nil { - panic(err) - } + m := New("cpu", tags, fields, now) return m } @@ -176,7 +172,7 @@ func TestTagList_Sorted(t *testing.T) { func TestEquals(t *testing.T) { now := time.Now() - m1, err := New("cpu", + m1 := New("cpu", map[string]string{ "host": "localhost", }, @@ -185,9 +181,8 @@ func TestEquals(t *testing.T) { }, now, ) - require.NoError(t, err) - m2, err := New("cpu", + m2 := New("cpu", map[string]string{ "host": "localhost", }, @@ -196,7 +191,6 @@ func TestEquals(t *testing.T) { }, now, ) - require.NoError(t, err) lhs := m1.(*metric) require.Equal(t, lhs, m2) @@ -208,7 +202,7 @@ func TestEquals(t *testing.T) { } func TestHashID(t *testing.T) { - m, _ := New( + m := New( "cpu", map[string]string{ "datacenter": "us-east-1", @@ -241,7 +235,7 @@ func TestHashID(t *testing.T) { } func TestHashID_Consistency(t *testing.T) { - m, _ := New( + m := New( "cpu", map[string]string{ "datacenter": "us-east-1", @@ -255,7 +249,7 @@ func TestHashID_Consistency(t *testing.T) { ) hash := m.HashID() - m2, _ := New( + m2 := New( "cpu", map[string]string{ "datacenter": "us-east-1", @@ -274,7 +268,7 @@ func TestHashID_Consistency(t *testing.T) { } func TestHashID_Delimiting(t *testing.T) { - m1, _ := New( + m1 := New( "cpu", map[string]string{ "a": "x", @@ -286,7 +280,7 @@ func TestHashID_Delimiting(t *testing.T) { }, time.Now(), ) - m2, _ := New( + m2 := New( "cpu", map[string]string{ "a": "xbycz", @@ -328,15 +322,7 @@ func TestValueType(t *testing.T) { fields := map[string]interface{}{ "value": float64(42), } - m, err := New("cpu", tags, fields, now, telegraf.Gauge) - assert.NoError(t, err) + m := New("cpu", tags, fields, now, telegraf.Gauge) assert.Equal(t, telegraf.Gauge, m.Type()) } - -func TestCopyAggregate(t *testing.T) { - m1 := baseMetric() - m1.SetAggregate(true) - m2 := m1.Copy() - assert.True(t, m2.IsAggregate()) -} diff --git a/metric/series_grouper.go b/metric/series_grouper.go index 5dc66e11b8e00..03f110abcb429 100644 --- a/metric/series_grouper.go +++ b/metric/series_grouper.go @@ -1,10 +1,9 @@ package metric import ( - "hash/fnv" - "io" + "encoding/binary" + "hash/maphash" "sort" - "strconv" "time" "github.com/influxdata/telegraf" @@ -23,14 +22,17 @@ import ( // + cpu,host=localhost idle_time=42,usage_time=42 func NewSeriesGrouper() *SeriesGrouper { return &SeriesGrouper{ - metrics: make(map[uint64]telegraf.Metric), - ordered: []telegraf.Metric{}, + metrics: make(map[uint64]telegraf.Metric), + ordered: []telegraf.Metric{}, + hashSeed: maphash.MakeSeed(), } } type SeriesGrouper struct { metrics map[uint64]telegraf.Metric ordered []telegraf.Metric + + hashSeed maphash.Seed } // Add adds a field key and value to the series. @@ -41,46 +43,65 @@ func (g *SeriesGrouper) Add( field string, fieldValue interface{}, ) error { - var err error - id := groupID(measurement, tags, tm) - metric := g.metrics[id] - if metric == nil { - metric, err = New(measurement, tags, map[string]interface{}{field: fieldValue}, tm) - if err != nil { - return err - } - g.metrics[id] = metric - g.ordered = append(g.ordered, metric) + taglist := make([]*telegraf.Tag, 0, len(tags)) + for k, v := range tags { + taglist = append(taglist, + &telegraf.Tag{Key: k, Value: v}) + } + sort.Slice(taglist, func(i, j int) bool { return taglist[i].Key < taglist[j].Key }) + + id := groupID(g.hashSeed, measurement, taglist, tm) + m := g.metrics[id] + if m == nil { + m = New(measurement, tags, map[string]interface{}{field: fieldValue}, tm) + g.metrics[id] = m + g.ordered = append(g.ordered, m) } else { - metric.AddField(field, fieldValue) + m.AddField(field, fieldValue) } return nil } +// AddMetric adds a metric to the series, merging with any previous matching metrics. +func (g *SeriesGrouper) AddMetric( + metric telegraf.Metric, +) { + id := groupID(g.hashSeed, metric.Name(), metric.TagList(), metric.Time()) + m := g.metrics[id] + if m == nil { + m = metric.Copy() + g.metrics[id] = m + g.ordered = append(g.ordered, m) + } else { + for _, f := range metric.FieldList() { + m.AddField(f.Key, f.Value) + } + } +} + // Metrics returns the metrics grouped by series and time. func (g *SeriesGrouper) Metrics() []telegraf.Metric { return g.ordered } -func groupID(measurement string, tags map[string]string, tm time.Time) uint64 { - h := fnv.New64a() - h.Write([]byte(measurement)) - h.Write([]byte("\n")) +func groupID(seed maphash.Seed, measurement string, taglist []*telegraf.Tag, tm time.Time) uint64 { + var mh maphash.Hash + mh.SetSeed(seed) + + mh.WriteString(measurement) + mh.WriteByte(0) - taglist := make([]*telegraf.Tag, 0, len(tags)) - for k, v := range tags { - taglist = append(taglist, - &telegraf.Tag{Key: k, Value: v}) - } - sort.Slice(taglist, func(i, j int) bool { return taglist[i].Key < taglist[j].Key }) for _, tag := range taglist { - h.Write([]byte(tag.Key)) - h.Write([]byte("\n")) - h.Write([]byte(tag.Value)) - h.Write([]byte("\n")) + mh.WriteString(tag.Key) + mh.WriteByte(0) + mh.WriteString(tag.Value) + mh.WriteByte(0) } - h.Write([]byte("\n")) + mh.WriteByte(0) + + var tsBuf [8]byte + binary.BigEndian.PutUint64(tsBuf[:], uint64(tm.UnixNano())) + mh.Write(tsBuf[:]) - io.WriteString(h, strconv.FormatInt(tm.UnixNano(), 10)) - return h.Sum64() + return mh.Sum64() } diff --git a/metric/series_grouper_test.go b/metric/series_grouper_test.go new file mode 100644 index 0000000000000..eee338a41d130 --- /dev/null +++ b/metric/series_grouper_test.go @@ -0,0 +1,37 @@ +package metric + +import ( + "hash/maphash" + "testing" + "time" +) + +var m = New( + "mymetric", + map[string]string{ + "host": "host.example.com", + "mykey": "myvalue", + "another key": "another value", + }, + map[string]interface{}{ + "f1": 1, + "f2": 2, + "f3": 3, + "f4": 4, + "f5": 5, + "f6": 6, + "f7": 7, + "f8": 8, + }, + time.Now(), +) + +var result uint64 + +var hashSeed = maphash.MakeSeed() + +func BenchmarkGroupID(b *testing.B) { + for n := 0; n < b.N; n++ { + result = groupID(hashSeed, m.Name(), m.TagList(), m.Time()) + } +} diff --git a/metric/tracking.go b/metric/tracking.go index e370d9f2a7ccc..2f46b4b05f3e3 100644 --- a/metric/tracking.go +++ b/metric/tracking.go @@ -18,7 +18,7 @@ func WithTracking(metric telegraf.Metric, fn NotifyFunc) (telegraf.Metric, teleg return newTrackingMetric(metric, fn) } -// WithBatchTracking adds tracking to the metrics and registers the notify +// WithGroupTracking adds tracking to the metrics and registers the notify // function to be called when processing is complete. func WithGroupTracking(metric []telegraf.Metric, fn NotifyFunc) ([]telegraf.Metric, telegraf.TrackingID) { return newTrackingMetricGroup(metric, fn) @@ -117,7 +117,6 @@ func newTrackingMetricGroup(group []telegraf.Metric, fn NotifyFunc) ([]telegraf. d: d, } group[i] = dm - } if finalizer != nil { runtime.SetFinalizer(d, finalizer) diff --git a/metric/tracking_test.go b/metric/tracking_test.go index 0ca1ca4daa4bc..4d89a32c18623 100644 --- a/metric/tracking_test.go +++ b/metric/tracking_test.go @@ -16,10 +16,7 @@ func mustMetric( tm time.Time, tp ...telegraf.ValueType, ) telegraf.Metric { - m, err := New(name, tags, fields, tm, tp...) - if err != nil { - panic("mustMetric") - } + m := New(name, tags, fields, tm, tp...) return m } @@ -78,12 +75,13 @@ func TestTracking(t *testing.T) { { name: "accept", metric: mustMetric( - "cpu", + "memory", map[string]string{}, map[string]interface{}{ "value": 42, }, time.Unix(0, 0), + telegraf.Gauge, ), actions: func(m telegraf.Metric) { m.Accept() @@ -93,12 +91,13 @@ func TestTracking(t *testing.T) { { name: "reject", metric: mustMetric( - "cpu", + "memory", map[string]string{}, map[string]interface{}{ "value": 42, }, time.Unix(0, 0), + telegraf.Gauge, ), actions: func(m telegraf.Metric) { m.Reject() @@ -108,12 +107,13 @@ func TestTracking(t *testing.T) { { name: "accept copy", metric: mustMetric( - "cpu", + "memory", map[string]string{}, map[string]interface{}{ "value": 42, }, time.Unix(0, 0), + telegraf.Gauge, ), actions: func(m telegraf.Metric) { m2 := m.Copy() @@ -125,12 +125,13 @@ func TestTracking(t *testing.T) { { name: "copy with accept and done", metric: mustMetric( - "cpu", + "memory", map[string]string{}, map[string]interface{}{ "value": 42, }, time.Unix(0, 0), + telegraf.Gauge, ), actions: func(m telegraf.Metric) { m2 := m.Copy() @@ -142,12 +143,13 @@ func TestTracking(t *testing.T) { { name: "copy with mixed delivery", metric: mustMetric( - "cpu", + "memory", map[string]string{}, map[string]interface{}{ "value": 42, }, time.Unix(0, 0), + telegraf.Gauge, ), actions: func(m telegraf.Metric) { m2 := m.Copy() diff --git a/models/buffer.go b/models/buffer.go index 9cc1a3d889f38..1e6ef10fd21f5 100644 --- a/models/buffer.go +++ b/models/buffer.go @@ -220,17 +220,6 @@ func (b *Buffer) Reject(batch []telegraf.Metric) { b.BufferSize.Set(int64(b.length())) } -// dist returns the distance between two indexes. Because this data structure -// uses a half open range the arguments must both either left side or right -// side pairs. -func (b *Buffer) dist(begin, end int) int { - if begin <= end { - return end - begin - } else { - return b.cap - begin + end - } -} - // next returns the next index with wrapping. func (b *Buffer) next(index int) int { index++ @@ -240,22 +229,13 @@ func (b *Buffer) next(index int) int { return index } -// next returns the index that is count newer with wrapping. +// nextby returns the index that is count newer with wrapping. func (b *Buffer) nextby(index, count int) int { index += count index %= b.cap return index } -// next returns the prev index with wrapping. -func (b *Buffer) prev(index int) int { - index-- - if index < 0 { - return b.cap - 1 - } - return index -} - // prevby returns the index that is count older with wrapping. func (b *Buffer) prevby(index, count int) int { index -= count diff --git a/models/buffer_test.go b/models/buffer_test.go index 9aef94fb86585..d830ac91c6dd9 100644 --- a/models/buffer_test.go +++ b/models/buffer_test.go @@ -34,7 +34,7 @@ func Metric() telegraf.Metric { } func MetricTime(sec int64) telegraf.Metric { - m, err := metric.New( + m := metric.New( "cpu", map[string]string{}, map[string]interface{}{ @@ -42,9 +42,6 @@ func MetricTime(sec int64) telegraf.Metric { }, time.Unix(sec, 0), ) - if err != nil { - panic(err) - } return m } diff --git a/models/filter.go b/models/filter.go index 13627daad3434..8103c23173297 100644 --- a/models/filter.go +++ b/models/filter.go @@ -54,41 +54,41 @@ func (f *Filter) Compile() error { var err error f.nameDrop, err = filter.Compile(f.NameDrop) if err != nil { - return fmt.Errorf("Error compiling 'namedrop', %s", err) + return fmt.Errorf("error compiling 'namedrop', %s", err) } f.namePass, err = filter.Compile(f.NamePass) if err != nil { - return fmt.Errorf("Error compiling 'namepass', %s", err) + return fmt.Errorf("error compiling 'namepass', %s", err) } f.fieldDrop, err = filter.Compile(f.FieldDrop) if err != nil { - return fmt.Errorf("Error compiling 'fielddrop', %s", err) + return fmt.Errorf("error compiling 'fielddrop', %s", err) } f.fieldPass, err = filter.Compile(f.FieldPass) if err != nil { - return fmt.Errorf("Error compiling 'fieldpass', %s", err) + return fmt.Errorf("error compiling 'fieldpass', %s", err) } f.tagExclude, err = filter.Compile(f.TagExclude) if err != nil { - return fmt.Errorf("Error compiling 'tagexclude', %s", err) + return fmt.Errorf("error compiling 'tagexclude', %s", err) } f.tagInclude, err = filter.Compile(f.TagInclude) if err != nil { - return fmt.Errorf("Error compiling 'taginclude', %s", err) + return fmt.Errorf("error compiling 'taginclude', %s", err) } for i := range f.TagDrop { f.TagDrop[i].filter, err = filter.Compile(f.TagDrop[i].Filter) if err != nil { - return fmt.Errorf("Error compiling 'tagdrop', %s", err) + return fmt.Errorf("error compiling 'tagdrop', %s", err) } } for i := range f.TagPass { f.TagPass[i].filter, err = filter.Compile(f.TagPass[i].Filter) if err != nil { - return fmt.Errorf("Error compiling 'tagpass', %s", err) + return fmt.Errorf("error compiling 'tagpass', %s", err) } } return nil @@ -132,17 +132,11 @@ func (f *Filter) IsActive() bool { // based on the drop/pass filter parameters func (f *Filter) shouldNamePass(key string) bool { pass := func(f *Filter) bool { - if f.namePass.Match(key) { - return true - } - return false + return f.namePass.Match(key) } drop := func(f *Filter) bool { - if f.nameDrop.Match(key) { - return false - } - return true + return !f.nameDrop.Match(key) } if f.namePass != nil && f.nameDrop != nil { diff --git a/models/filter_test.go b/models/filter_test.go index d241244b9d704..aa32e095163c4 100644 --- a/models/filter_test.go +++ b/models/filter_test.go @@ -15,11 +15,10 @@ func TestFilter_ApplyEmpty(t *testing.T) { require.NoError(t, f.Compile()) require.False(t, f.IsActive()) - m, err := metric.New("m", + m := metric.New("m", map[string]string{}, map[string]interface{}{"value": int64(1)}, time.Now()) - require.NoError(t, err) require.True(t, f.Select(m)) } @@ -37,11 +36,10 @@ func TestFilter_ApplyTagsDontPass(t *testing.T) { require.NoError(t, f.Compile()) require.True(t, f.IsActive()) - m, err := metric.New("m", + m := metric.New("m", map[string]string{"cpu": "cpu-total"}, map[string]interface{}{"value": int64(1)}, time.Now()) - require.NoError(t, err) require.False(t, f.Select(m)) } @@ -53,14 +51,13 @@ func TestFilter_ApplyDeleteFields(t *testing.T) { require.NoError(t, f.Compile()) require.True(t, f.IsActive()) - m, err := metric.New("m", + m := metric.New("m", map[string]string{}, map[string]interface{}{ "value": int64(1), "value2": int64(2), }, time.Now()) - require.NoError(t, err) require.True(t, f.Select(m)) f.Modify(m) require.Equal(t, map[string]interface{}{"value2": int64(2)}, m.Fields()) @@ -74,14 +71,13 @@ func TestFilter_ApplyDeleteAllFields(t *testing.T) { require.NoError(t, f.Compile()) require.True(t, f.IsActive()) - m, err := metric.New("m", + m := metric.New("m", map[string]string{}, map[string]interface{}{ "value": int64(1), "value2": int64(2), }, time.Now()) - require.NoError(t, err) require.True(t, f.Select(m)) f.Modify(m) require.Len(t, m.FieldList(), 0) @@ -332,14 +328,13 @@ func TestFilter_TagDrop(t *testing.T) { } func TestFilter_FilterTagsNoMatches(t *testing.T) { - m, err := metric.New("m", + m := metric.New("m", map[string]string{ "host": "localhost", "mytag": "foobar", }, map[string]interface{}{"value": int64(1)}, time.Now()) - require.NoError(t, err) f := Filter{ TagExclude: []string{"nomatch"}, } @@ -361,14 +356,13 @@ func TestFilter_FilterTagsNoMatches(t *testing.T) { } func TestFilter_FilterTagsMatches(t *testing.T) { - m, err := metric.New("m", + m := metric.New("m", map[string]string{ "host": "localhost", "mytag": "foobar", }, map[string]interface{}{"value": int64(1)}, time.Now()) - require.NoError(t, err) f := Filter{ TagExclude: []string{"ho*"}, } @@ -379,14 +373,13 @@ func TestFilter_FilterTagsMatches(t *testing.T) { "mytag": "foobar", }, m.Tags()) - m, err = metric.New("m", + m = metric.New("m", map[string]string{ "host": "localhost", "mytag": "foobar", }, map[string]interface{}{"value": int64(1)}, time.Now()) - require.NoError(t, err) f = Filter{ TagInclude: []string{"my*"}, } @@ -402,7 +395,6 @@ func TestFilter_FilterTagsMatches(t *testing.T) { // both parameters were defined // see: https://github.com/influxdata/telegraf/issues/2860 func TestFilter_FilterNamePassAndDrop(t *testing.T) { - inputData := []string{"name1", "name2", "name3", "name4"} expectedResult := []bool{false, true, false, false} @@ -422,7 +414,6 @@ func TestFilter_FilterNamePassAndDrop(t *testing.T) { // both parameters were defined // see: https://github.com/influxdata/telegraf/issues/2860 func TestFilter_FilterFieldPassAndDrop(t *testing.T) { - inputData := []string{"field1", "field2", "field3", "field4"} expectedResult := []bool{false, true, false, false} @@ -479,7 +470,6 @@ func TestFilter_FilterTagsPassAndDrop(t *testing.T) { for i, tag := range inputData { require.Equal(t, f.shouldTagsPass(tag), expectedResult[i]) } - } func BenchmarkFilter(b *testing.B) { diff --git a/models/log.go b/models/log.go index c0b52a812d924..c401d1b4601c2 100644 --- a/models/log.go +++ b/models/log.go @@ -4,6 +4,7 @@ import ( "log" "reflect" + "github.com/fatih/color" "github.com/influxdata/telegraf" ) @@ -79,7 +80,7 @@ func logName(pluginType, name, alias string) string { return pluginType + "." + name + "::" + alias } -func SetLoggerOnPlugin(i interface{}, log telegraf.Logger) { +func SetLoggerOnPlugin(i interface{}, logger telegraf.Logger) { valI := reflect.ValueOf(i) if valI.Type().Kind() != reflect.Ptr { @@ -94,12 +95,42 @@ func SetLoggerOnPlugin(i interface{}, log telegraf.Logger) { switch field.Type().String() { case "telegraf.Logger": if field.CanSet() { - field.Set(reflect.ValueOf(log)) + field.Set(reflect.ValueOf(logger)) } default: - log.Debugf("Plugin %q defines a 'Log' field on its struct of an unexpected type %q. Expected telegraf.Logger", + logger.Debugf("Plugin %q defines a 'Log' field on its struct of an unexpected type %q. Expected telegraf.Logger", valI.Type().Name(), field.Type().String()) } +} + +func PrintPluginDeprecationNotice(level telegraf.Escalation, name string, info telegraf.DeprecationInfo) { + var prefix string + + switch level { + case telegraf.Warn: + prefix = "W! " + color.YellowString("DeprecationWarning") + case telegraf.Error: + prefix = "E! " + color.RedString("DeprecationError") + } + + log.Printf( + "%s: Plugin %q deprecated since version %s and will be removed in %s: %s", + prefix, name, info.Since, info.RemovalIn, info.Notice, + ) +} + +func PrintOptionDeprecationNotice(level telegraf.Escalation, plugin, option string, info telegraf.DeprecationInfo) { + var prefix string + + switch level { + case telegraf.Warn: + prefix = "W! " + color.YellowString("DeprecationWarning") + case telegraf.Error: + prefix = "E! " + color.RedString("DeprecationError") + } - return + log.Printf( + "%s: Option %q of plugin %q deprecated since version %s and will be removed in %s: %s", + prefix, option, plugin, info.Since, info.RemovalIn, info.Notice, + ) } diff --git a/models/makemetric.go b/models/makemetric.go index 29ef5f452acf2..b0ce905c4a228 100644 --- a/models/makemetric.go +++ b/models/makemetric.go @@ -4,7 +4,7 @@ import ( "github.com/influxdata/telegraf" ) -// Makemetric applies new metric plugin and agent measurement and tag +// makemetric applies new metric plugin and agent measurement and tag // settings. func makemetric( metric telegraf.Metric, diff --git a/models/running_aggregator.go b/models/running_aggregator.go index cbfb9889b87e5..b1dc621bafac2 100644 --- a/models/running_aggregator.go +++ b/models/running_aggregator.go @@ -108,19 +108,15 @@ func (r *RunningAggregator) UpdateWindow(start, until time.Time) { r.log.Debugf("Updated aggregation range [%s, %s]", start, until) } -func (r *RunningAggregator) MakeMetric(metric telegraf.Metric) telegraf.Metric { +func (r *RunningAggregator) MakeMetric(telegrafMetric telegraf.Metric) telegraf.Metric { m := makemetric( - metric, + telegrafMetric, r.Config.NameOverride, r.Config.MeasurementPrefix, r.Config.MeasurementSuffix, r.Config.Tags, nil) - if m != nil { - m.SetAggregate(true) - } - r.MetricsPushed.Incr(1) return m diff --git a/models/running_input.go b/models/running_input.go index 70a4c2ee3a70f..16f4bd10bc11e 100644 --- a/models/running_input.go +++ b/models/running_input.go @@ -60,6 +60,7 @@ type InputConfig struct { Alias string Interval time.Duration CollectionJitter time.Duration + CollectionOffset time.Duration Precision time.Duration NameOverride string diff --git a/models/running_input_test.go b/models/running_input_test.go index ff3747116f6ca..8f9390f53b730 100644 --- a/models/running_input_test.go +++ b/models/running_input_test.go @@ -23,17 +23,16 @@ func TestMakeMetricFilterAfterApplyingGlobalTags(t *testing.T) { require.NoError(t, ri.Config.Filter.Compile()) ri.SetDefaultTags(map[string]string{"a": "x", "b": "y"}) - m, err := metric.New("cpu", + m := metric.New("cpu", map[string]string{}, map[string]interface{}{ "value": 42, }, now) - require.NoError(t, err) actual := ri.MakeMetric(m) - expected, err := metric.New("cpu", + expected := metric.New("cpu", map[string]string{ "b": "y", }, @@ -41,7 +40,6 @@ func TestMakeMetricFilterAfterApplyingGlobalTags(t *testing.T) { "value": 42, }, now) - require.NoError(t, err) testutil.RequireMetricEqual(t, expected, actual) } @@ -52,13 +50,12 @@ func TestMakeMetricNoFields(t *testing.T) { Name: "TestRunningInput", }) - m, err := metric.New("RITest", + m := metric.New("RITest", map[string]string{}, map[string]interface{}{}, now, telegraf.Untyped) m = ri.MakeMetric(m) - require.NoError(t, err) assert.Nil(t, m) } @@ -69,7 +66,7 @@ func TestMakeMetricNilFields(t *testing.T) { Name: "TestRunningInput", }) - m, err := metric.New("RITest", + m := metric.New("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), @@ -77,17 +74,15 @@ func TestMakeMetricNilFields(t *testing.T) { }, now, telegraf.Untyped) - require.NoError(t, err) m = ri.MakeMetric(m) - expected, err := metric.New("RITest", + expected := metric.New("RITest", map[string]string{}, map[string]interface{}{ "value": int(101), }, now, ) - require.NoError(t, err) require.Equal(t, expected, m) } @@ -110,7 +105,7 @@ func TestMakeMetricWithPluginTags(t *testing.T) { telegraf.Untyped) m = ri.MakeMetric(m) - expected, err := metric.New("RITest", + expected := metric.New("RITest", map[string]string{ "foo": "bar", }, @@ -119,7 +114,6 @@ func TestMakeMetricWithPluginTags(t *testing.T) { }, now, ) - require.NoError(t, err) require.Equal(t, expected, m) } @@ -135,7 +129,7 @@ func TestMakeMetricFilteredOut(t *testing.T) { assert.NoError(t, ri.Config.Filter.Compile()) - m, err := metric.New("RITest", + m := metric.New("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), @@ -143,7 +137,6 @@ func TestMakeMetricFilteredOut(t *testing.T) { now, telegraf.Untyped) m = ri.MakeMetric(m) - require.NoError(t, err) assert.Nil(t, m) } @@ -164,7 +157,7 @@ func TestMakeMetricWithDaemonTags(t *testing.T) { now, telegraf.Untyped) m = ri.MakeMetric(m) - expected, err := metric.New("RITest", + expected := metric.New("RITest", map[string]string{ "foo": "bar", }, @@ -173,7 +166,6 @@ func TestMakeMetricWithDaemonTags(t *testing.T) { }, now, ) - require.NoError(t, err) require.Equal(t, expected, m) } @@ -184,23 +176,21 @@ func TestMakeMetricNameOverride(t *testing.T) { NameOverride: "foobar", }) - m, err := metric.New("RITest", + m := metric.New("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), }, now, telegraf.Untyped) - require.NoError(t, err) m = ri.MakeMetric(m) - expected, err := metric.New("foobar", + expected := metric.New("foobar", nil, map[string]interface{}{ "value": 101, }, now, ) - require.NoError(t, err) require.Equal(t, expected, m) } @@ -211,23 +201,21 @@ func TestMakeMetricNamePrefix(t *testing.T) { MeasurementPrefix: "foobar_", }) - m, err := metric.New("RITest", + m := metric.New("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), }, now, telegraf.Untyped) - require.NoError(t, err) m = ri.MakeMetric(m) - expected, err := metric.New("foobar_RITest", + expected := metric.New("foobar_RITest", nil, map[string]interface{}{ "value": 101, }, now, ) - require.NoError(t, err) require.Equal(t, expected, m) } @@ -238,23 +226,21 @@ func TestMakeMetricNameSuffix(t *testing.T) { MeasurementSuffix: "_foobar", }) - m, err := metric.New("RITest", + m := metric.New("RITest", map[string]string{}, map[string]interface{}{ "value": int64(101), }, now, telegraf.Untyped) - require.NoError(t, err) m = ri.MakeMetric(m) - expected, err := metric.New("RITest_foobar", + expected := metric.New("RITest_foobar", nil, map[string]interface{}{ "value": 101, }, now, ) - require.NoError(t, err) require.Equal(t, expected, m) } @@ -289,6 +275,6 @@ func TestMetricErrorCounters(t *testing.T) { type testInput struct{} -func (t *testInput) Description() string { return "" } -func (t *testInput) SampleConfig() string { return "" } -func (t *testInput) Gather(acc telegraf.Accumulator) error { return nil } +func (t *testInput) Description() string { return "" } +func (t *testInput) SampleConfig() string { return "" } +func (t *testInput) Gather(_ telegraf.Accumulator) error { return nil } diff --git a/models/running_output.go b/models/running_output.go index 894ae011c986d..6f5f8c0a84bad 100644 --- a/models/running_output.go +++ b/models/running_output.go @@ -11,10 +11,10 @@ import ( const ( // Default size of metrics batch size. - DEFAULT_METRIC_BATCH_SIZE = 1000 + DefaultMetricBatchSize = 1000 // Default number of metrics kept. It should be a multiple of batch size. - DEFAULT_METRIC_BUFFER_LIMIT = 10000 + DefaultMetricBufferLimit = 10000 ) // OutputConfig containing name and filter @@ -56,7 +56,6 @@ type RunningOutput struct { } func NewRunningOutput( - name string, output telegraf.Output, config *OutputConfig, batchSize int, @@ -78,13 +77,13 @@ func NewRunningOutput( bufferLimit = config.MetricBufferLimit } if bufferLimit == 0 { - bufferLimit = DEFAULT_METRIC_BUFFER_LIMIT + bufferLimit = DefaultMetricBufferLimit } if config.MetricBatchSize > 0 { batchSize = config.MetricBatchSize } if batchSize == 0 { - batchSize = DEFAULT_METRIC_BATCH_SIZE + batchSize = DefaultMetricBatchSize } ro := &RunningOutput{ @@ -114,8 +113,8 @@ func (r *RunningOutput) LogName() string { return logName("outputs", r.Config.Name, r.Config.Alias) } -func (ro *RunningOutput) metricFiltered(metric telegraf.Metric) { - ro.MetricsFiltered.Incr(1) +func (r *RunningOutput) metricFiltered(metric telegraf.Metric) { + r.MetricsFiltered.Incr(1) metric.Drop() } @@ -125,7 +124,6 @@ func (r *RunningOutput) Init() error { if err != nil { return err } - } return nil } @@ -133,45 +131,45 @@ func (r *RunningOutput) Init() error { // AddMetric adds a metric to the output. // // Takes ownership of metric -func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { - if ok := ro.Config.Filter.Select(metric); !ok { - ro.metricFiltered(metric) +func (r *RunningOutput) AddMetric(metric telegraf.Metric) { + if ok := r.Config.Filter.Select(metric); !ok { + r.metricFiltered(metric) return } - ro.Config.Filter.Modify(metric) + r.Config.Filter.Modify(metric) if len(metric.FieldList()) == 0 { - ro.metricFiltered(metric) + r.metricFiltered(metric) return } - if output, ok := ro.Output.(telegraf.AggregatingOutput); ok { - ro.aggMutex.Lock() + if output, ok := r.Output.(telegraf.AggregatingOutput); ok { + r.aggMutex.Lock() output.Add(metric) - ro.aggMutex.Unlock() + r.aggMutex.Unlock() return } - if len(ro.Config.NameOverride) > 0 { - metric.SetName(ro.Config.NameOverride) + if len(r.Config.NameOverride) > 0 { + metric.SetName(r.Config.NameOverride) } - if len(ro.Config.NamePrefix) > 0 { - metric.AddPrefix(ro.Config.NamePrefix) + if len(r.Config.NamePrefix) > 0 { + metric.AddPrefix(r.Config.NamePrefix) } - if len(ro.Config.NameSuffix) > 0 { - metric.AddSuffix(ro.Config.NameSuffix) + if len(r.Config.NameSuffix) > 0 { + metric.AddSuffix(r.Config.NameSuffix) } - dropped := ro.buffer.Add(metric) - atomic.AddInt64(&ro.droppedMetrics, int64(dropped)) + dropped := r.buffer.Add(metric) + atomic.AddInt64(&r.droppedMetrics, int64(dropped)) - count := atomic.AddInt64(&ro.newMetricsCount, 1) - if count == int64(ro.MetricBatchSize) { - atomic.StoreInt64(&ro.newMetricsCount, 0) + count := atomic.AddInt64(&r.newMetricsCount, 1) + if count == int64(r.MetricBatchSize) { + atomic.StoreInt64(&r.newMetricsCount, 0) select { - case ro.BatchReady <- time.Now(): + case r.BatchReady <- time.Now(): default: } } @@ -179,50 +177,50 @@ func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { // Write writes all metrics to the output, stopping when all have been sent on // or error. -func (ro *RunningOutput) Write() error { - if output, ok := ro.Output.(telegraf.AggregatingOutput); ok { - ro.aggMutex.Lock() +func (r *RunningOutput) Write() error { + if output, ok := r.Output.(telegraf.AggregatingOutput); ok { + r.aggMutex.Lock() metrics := output.Push() - ro.buffer.Add(metrics...) + r.buffer.Add(metrics...) output.Reset() - ro.aggMutex.Unlock() + r.aggMutex.Unlock() } - atomic.StoreInt64(&ro.newMetricsCount, 0) + atomic.StoreInt64(&r.newMetricsCount, 0) // Only process the metrics in the buffer now. Metrics added while we are // writing will be sent on the next call. - nBuffer := ro.buffer.Len() - nBatches := nBuffer/ro.MetricBatchSize + 1 + nBuffer := r.buffer.Len() + nBatches := nBuffer/r.MetricBatchSize + 1 for i := 0; i < nBatches; i++ { - batch := ro.buffer.Batch(ro.MetricBatchSize) + batch := r.buffer.Batch(r.MetricBatchSize) if len(batch) == 0 { break } - err := ro.write(batch) + err := r.write(batch) if err != nil { - ro.buffer.Reject(batch) + r.buffer.Reject(batch) return err } - ro.buffer.Accept(batch) + r.buffer.Accept(batch) } return nil } // WriteBatch writes a single batch of metrics to the output. -func (ro *RunningOutput) WriteBatch() error { - batch := ro.buffer.Batch(ro.MetricBatchSize) +func (r *RunningOutput) WriteBatch() error { + batch := r.buffer.Batch(r.MetricBatchSize) if len(batch) == 0 { return nil } - err := ro.write(batch) + err := r.write(batch) if err != nil { - ro.buffer.Reject(batch) + r.buffer.Reject(batch) return err } - ro.buffer.Accept(batch) + r.buffer.Accept(batch) return nil } diff --git a/models/running_output_test.go b/models/running_output_test.go index 38f79f9db397d..8e8d9a995fdf8 100644 --- a/models/running_output_test.go +++ b/models/running_output_test.go @@ -29,14 +29,6 @@ var next5 = []telegraf.Metric{ testutil.TestMetric(101, "metric10"), } -func reverse(metrics []telegraf.Metric) []telegraf.Metric { - result := make([]telegraf.Metric, 0, len(metrics)) - for i := len(metrics) - 1; i >= 0; i-- { - result = append(result, metrics[i]) - } - return result -} - // Benchmark adding metrics. func BenchmarkRunningOutputAddWrite(b *testing.B) { conf := &OutputConfig{ @@ -44,7 +36,7 @@ func BenchmarkRunningOutputAddWrite(b *testing.B) { } m := &perfOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) for n := 0; n < b.N; n++ { ro.AddMetric(testutil.TestMetric(101, "metric1")) @@ -59,7 +51,7 @@ func BenchmarkRunningOutputAddWriteEvery100(b *testing.B) { } m := &perfOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) for n := 0; n < b.N; n++ { ro.AddMetric(testutil.TestMetric(101, "metric1")) @@ -77,7 +69,7 @@ func BenchmarkRunningOutputAddFailWrites(b *testing.B) { m := &perfOutput{} m.failWrite = true - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) for n := 0; n < b.N; n++ { ro.AddMetric(testutil.TestMetric(101, "metric1")) @@ -94,7 +86,7 @@ func TestRunningOutput_DropFilter(t *testing.T) { assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) for _, metric := range first5 { ro.AddMetric(metric) @@ -119,7 +111,7 @@ func TestRunningOutput_PassFilter(t *testing.T) { assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) for _, metric := range first5 { ro.AddMetric(metric) @@ -144,7 +136,7 @@ func TestRunningOutput_TagIncludeNoMatch(t *testing.T) { assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) ro.AddMetric(testutil.TestMetric(101, "metric1")) assert.Len(t, m.Metrics(), 0) @@ -165,7 +157,7 @@ func TestRunningOutput_TagExcludeMatch(t *testing.T) { assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) ro.AddMetric(testutil.TestMetric(101, "metric1")) assert.Len(t, m.Metrics(), 0) @@ -186,7 +178,7 @@ func TestRunningOutput_TagExcludeNoMatch(t *testing.T) { assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) ro.AddMetric(testutil.TestMetric(101, "metric1")) assert.Len(t, m.Metrics(), 0) @@ -207,7 +199,7 @@ func TestRunningOutput_TagIncludeMatch(t *testing.T) { assert.NoError(t, conf.Filter.Compile()) m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) ro.AddMetric(testutil.TestMetric(101, "metric1")) assert.Len(t, m.Metrics(), 0) @@ -225,7 +217,7 @@ func TestRunningOutput_NameOverride(t *testing.T) { } m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) ro.AddMetric(testutil.TestMetric(101, "metric1")) assert.Len(t, m.Metrics(), 0) @@ -243,7 +235,7 @@ func TestRunningOutput_NamePrefix(t *testing.T) { } m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) ro.AddMetric(testutil.TestMetric(101, "metric1")) assert.Len(t, m.Metrics(), 0) @@ -261,7 +253,7 @@ func TestRunningOutput_NameSuffix(t *testing.T) { } m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) ro.AddMetric(testutil.TestMetric(101, "metric1")) assert.Len(t, m.Metrics(), 0) @@ -279,7 +271,7 @@ func TestRunningOutputDefault(t *testing.T) { } m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) + ro := NewRunningOutput(m, conf, 1000, 10000) for _, metric := range first5 { ro.AddMetric(metric) @@ -301,7 +293,7 @@ func TestRunningOutputWriteFail(t *testing.T) { m := &mockOutput{} m.failWrite = true - ro := NewRunningOutput("test", m, conf, 4, 12) + ro := NewRunningOutput(m, conf, 4, 12) // Fill buffer to limit twice for _, metric := range first5 { @@ -334,7 +326,7 @@ func TestRunningOutputWriteFailOrder(t *testing.T) { m := &mockOutput{} m.failWrite = true - ro := NewRunningOutput("test", m, conf, 100, 1000) + ro := NewRunningOutput(m, conf, 100, 1000) // add 5 metrics for _, metric := range first5 { @@ -372,7 +364,7 @@ func TestRunningOutputWriteFailOrder2(t *testing.T) { m := &mockOutput{} m.failWrite = true - ro := NewRunningOutput("test", m, conf, 5, 100) + ro := NewRunningOutput(m, conf, 5, 100) // add 5 metrics for _, metric := range first5 { @@ -436,7 +428,7 @@ func TestRunningOutputWriteFailOrder3(t *testing.T) { m := &mockOutput{} m.failWrite = true - ro := NewRunningOutput("test", m, conf, 5, 1000) + ro := NewRunningOutput(m, conf, 5, 1000) // add 5 metrics for _, metric := range first5 { @@ -470,7 +462,6 @@ func TestRunningOutputWriteFailOrder3(t *testing.T) { func TestInternalMetrics(t *testing.T) { _ = NewRunningOutput( - "test_internal", &mockOutput{}, &OutputConfig{ Filter: Filter{}, @@ -541,16 +532,14 @@ func (m *mockOutput) Write(metrics []telegraf.Metric) error { m.Lock() defer m.Unlock() if m.failWrite { - return fmt.Errorf("Failed Write!") + return fmt.Errorf("failed write") } if m.metrics == nil { m.metrics = []telegraf.Metric{} } - for _, metric := range metrics { - m.metrics = append(m.metrics, metric) - } + m.metrics = append(m.metrics, metrics...) return nil } @@ -581,9 +570,9 @@ func (m *perfOutput) SampleConfig() string { return "" } -func (m *perfOutput) Write(metrics []telegraf.Metric) error { +func (m *perfOutput) Write(_ []telegraf.Metric) error { if m.failWrite { - return fmt.Errorf("Failed Write!") + return fmt.Errorf("failed write") } return nil } diff --git a/models/running_parsers.go b/models/running_parsers.go new file mode 100644 index 0000000000000..a7d98bbf8b291 --- /dev/null +++ b/models/running_parsers.go @@ -0,0 +1,97 @@ +package models + +import ( + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/selfstat" +) + +type RunningParser struct { + Parser telegraf.Parser + Config *ParserConfig + log telegraf.Logger + + MetricsParsed selfstat.Stat + ParseTime selfstat.Stat +} + +func NewRunningParser(parser telegraf.Parser, config *ParserConfig) *RunningParser { + tags := map[string]string{"type": config.DataFormat} + if config.Alias != "" { + tags["alias"] = config.Alias + } + + parserErrorsRegister := selfstat.Register("parser", "errors", tags) + logger := NewLogger("parsers", config.DataFormat+"::"+config.Parent, config.Alias) + logger.OnErr(func() { + parserErrorsRegister.Incr(1) + }) + SetLoggerOnPlugin(parser, logger) + + return &RunningParser{ + Parser: parser, + Config: config, + MetricsParsed: selfstat.Register( + "parser", + "metrics_parsed", + tags, + ), + ParseTime: selfstat.Register( + "parser", + "parse_time_ns", + tags, + ), + log: logger, + } +} + +// ParserConfig is the common config for all parsers. +type ParserConfig struct { + Parent string + Alias string + DataFormat string + DefaultTags map[string]string +} + +func (r *RunningParser) LogName() string { + return logName("parsers", r.Config.DataFormat+"::"+r.Config.Parent, r.Config.Alias) +} + +func (r *RunningParser) Init() error { + if p, ok := r.Parser.(telegraf.Initializer); ok { + err := p.Init() + if err != nil { + return err + } + } + return nil +} + +func (r *RunningParser) Parse(buf []byte) ([]telegraf.Metric, error) { + start := time.Now() + m, err := r.Parser.Parse(buf) + elapsed := time.Since(start) + r.ParseTime.Incr(elapsed.Nanoseconds()) + r.MetricsParsed.Incr(int64(len(m))) + + return m, err +} + +func (r *RunningParser) ParseLine(line string) (telegraf.Metric, error) { + start := time.Now() + m, err := r.Parser.ParseLine(line) + elapsed := time.Since(start) + r.ParseTime.Incr(elapsed.Nanoseconds()) + r.MetricsParsed.Incr(1) + + return m, err +} + +func (r *RunningParser) SetDefaultTags(tags map[string]string) { + r.Parser.SetDefaultTags(tags) +} + +func (r *RunningParser) Log() telegraf.Logger { + return r.log +} diff --git a/models/running_processor.go b/models/running_processor.go index 1bd2d0f6ed0c7..0e43857418872 100644 --- a/models/running_processor.go +++ b/models/running_processor.go @@ -20,7 +20,7 @@ func (rp RunningProcessors) Len() int { return len(rp) } func (rp RunningProcessors) Swap(i, j int) { rp[i], rp[j] = rp[j], rp[i] } func (rp RunningProcessors) Less(i, j int) bool { return rp[i].Config.Order < rp[j].Config.Order } -// FilterConfig containing a name and filter +// ProcessorConfig containing a name and filter type ProcessorConfig struct { Name string Alias string @@ -52,8 +52,8 @@ func (rp *RunningProcessor) metricFiltered(metric telegraf.Metric) { metric.Drop() } -func (r *RunningProcessor) Init() error { - if p, ok := r.Processor.(telegraf.Initializer); ok { +func (rp *RunningProcessor) Init() error { + if p, ok := rp.Processor.(telegraf.Initializer); ok { err := p.Init() if err != nil { return err @@ -62,39 +62,39 @@ func (r *RunningProcessor) Init() error { return nil } -func (r *RunningProcessor) Log() telegraf.Logger { - return r.log +func (rp *RunningProcessor) Log() telegraf.Logger { + return rp.log } -func (r *RunningProcessor) LogName() string { - return logName("processors", r.Config.Name, r.Config.Alias) +func (rp *RunningProcessor) LogName() string { + return logName("processors", rp.Config.Name, rp.Config.Alias) } -func (r *RunningProcessor) MakeMetric(metric telegraf.Metric) telegraf.Metric { +func (rp *RunningProcessor) MakeMetric(metric telegraf.Metric) telegraf.Metric { return metric } -func (r *RunningProcessor) Start(acc telegraf.Accumulator) error { - return r.Processor.Start(acc) +func (rp *RunningProcessor) Start(acc telegraf.Accumulator) error { + return rp.Processor.Start(acc) } -func (r *RunningProcessor) Add(m telegraf.Metric, acc telegraf.Accumulator) error { - if ok := r.Config.Filter.Select(m); !ok { +func (rp *RunningProcessor) Add(m telegraf.Metric, acc telegraf.Accumulator) error { + if ok := rp.Config.Filter.Select(m); !ok { // pass downstream acc.AddMetric(m) return nil } - r.Config.Filter.Modify(m) + rp.Config.Filter.Modify(m) if len(m.FieldList()) == 0 { // drop metric - r.metricFiltered(m) + rp.metricFiltered(m) return nil } - return r.Processor.Add(m, acc) + return rp.Processor.Add(m, acc) } -func (r *RunningProcessor) Stop() { - r.Processor.Stop() +func (rp *RunningProcessor) Stop() { + rp.Processor.Stop() } diff --git a/parser.go b/parser.go new file mode 100644 index 0000000000000..1112fa2118d35 --- /dev/null +++ b/parser.go @@ -0,0 +1,39 @@ +package telegraf + +// Parser is an interface defining functions that a parser plugin must satisfy. +type Parser interface { + // Parse takes a byte buffer separated by newlines + // ie, `cpu.usage.idle 90\ncpu.usage.busy 10` + // and parses it into telegraf metrics + // + // Must be thread-safe. + Parse(buf []byte) ([]Metric, error) + + // ParseLine takes a single string metric + // ie, "cpu.usage.idle 90" + // and parses it into a telegraf metric. + // + // Must be thread-safe. + ParseLine(line string) (Metric, error) + + // SetDefaultTags tells the parser to add all of the given tags + // to each parsed metric. + // NOTE: do _not_ modify the map after you've passed it here!! + SetDefaultTags(tags map[string]string) +} + +type ParserFunc func() (Parser, error) + +// ParserInput is an interface for input plugins that are able to parse +// arbitrary data formats. +type ParserInput interface { + // SetParser sets the parser function for the interface + SetParser(parser Parser) +} + +// ParserFuncInput is an interface for input plugins that are able to parse +// arbitrary data formats. +type ParserFuncInput interface { + // GetParser returns a new parser. + SetParserFunc(fn ParserFunc) +} diff --git a/plugin.go b/plugin.go index 0793fbb061115..d20d057b51e5d 100644 --- a/plugin.go +++ b/plugin.go @@ -1,5 +1,39 @@ package telegraf +var Debug bool + +// Escalation level for the plugin or option +type Escalation int + +func (e Escalation) String() string { + switch e { + case Warn: + return "WARN" + case Error: + return "ERROR" + } + return "NONE" +} + +const ( + // None means no deprecation + None Escalation = iota + // Warn means deprecated but still within the grace period + Warn + // Error means deprecated and beyond grace period + Error +) + +// DeprecationInfo contains information for marking a plugin deprecated. +type DeprecationInfo struct { + // Since specifies the version since when the plugin is deprecated + Since string + // RemovalIn optionally specifies the version when the plugin is scheduled for removal + RemovalIn string + // Notice for the user on suggested replacements etc. + Notice string +} + // Initializer is an interface that all plugin types: Inputs, Outputs, // Processors, and Aggregators can optionally implement to initialize the // plugin. @@ -14,14 +48,11 @@ type Initializer interface { // not part of the interface, but will receive an injected logger if it's set. // eg: Log telegraf.Logger `toml:"-"` type PluginDescriber interface { - // SampleConfig returns the default configuration of the Processor + // SampleConfig returns the default configuration of the Plugin SampleConfig() string - - // Description returns a one-sentence description on the Processor - Description() string } -// Logger defines an interface for logging. +// Logger defines an plugin-related interface for logging. type Logger interface { // Errorf logs an error message, patterned after log.Printf. Errorf(format string, args ...interface{}) diff --git a/plugins/aggregators/all/all.go b/plugins/aggregators/all/all.go index eabfaa4bf8460..c3a6f274b426d 100644 --- a/plugins/aggregators/all/all.go +++ b/plugins/aggregators/all/all.go @@ -1,10 +1,14 @@ package all import ( + //Blank imports for plugins to register themselves _ "github.com/influxdata/telegraf/plugins/aggregators/basicstats" + _ "github.com/influxdata/telegraf/plugins/aggregators/derivative" _ "github.com/influxdata/telegraf/plugins/aggregators/final" _ "github.com/influxdata/telegraf/plugins/aggregators/histogram" _ "github.com/influxdata/telegraf/plugins/aggregators/merge" _ "github.com/influxdata/telegraf/plugins/aggregators/minmax" + _ "github.com/influxdata/telegraf/plugins/aggregators/quantile" + _ "github.com/influxdata/telegraf/plugins/aggregators/starlark" _ "github.com/influxdata/telegraf/plugins/aggregators/valuecounter" ) diff --git a/plugins/aggregators/basicstats/README.md b/plugins/aggregators/basicstats/README.md index 8fef0c6f4886a..e6715bb48351a 100644 --- a/plugins/aggregators/basicstats/README.md +++ b/plugins/aggregators/basicstats/README.md @@ -1,11 +1,12 @@ # BasicStats Aggregator Plugin -The BasicStats aggregator plugin give us count,diff,max,min,mean,non_negative_diff,sum,s2(variance), stdev for a set of values, -emitting the aggregate every `period` seconds. +The BasicStats aggregator plugin give us count, diff, max, min, mean, +non_negative_diff, sum, s2(variance), stdev for a set of values, emitting the +aggregate every `period` seconds. -### Configuration: +## Configuration -```toml +```toml @sample.conf # Keep the aggregate basicstats of each metric passing through. [[aggregators.basicstats]] ## The period on which to flush & clear the aggregator. @@ -16,38 +17,41 @@ emitting the aggregate every `period` seconds. drop_original = false ## Configures which basic stats to push as fields - # stats = ["count","diff","min","max","mean","non_negative_diff","stdev","s2","sum"] + # stats = ["count","diff","rate","min","max","mean","non_negative_diff","non_negative_rate","stdev","s2","sum","interval"] ``` - stats - - If not specified, then `count`, `min`, `max`, `mean`, `stdev`, and `s2` are aggregated and pushed as fields. `sum`, `diff` and `non_negative_diff` are not aggregated by default to maintain backwards compatibility. - - If empty array, no stats are aggregated + - If not specified, then `count`, `min`, `max`, `mean`, `stdev`, and `s2` are aggregated and pushed as fields. `sum`, `diff` and `non_negative_diff` are not aggregated by default to maintain backwards compatibility. + - If empty array, no stats are aggregated -### Measurements & Fields: +## Measurements & Fields - measurement1 - - field1_count - - field1_diff (difference) - - field1_max - - field1_min - - field1_mean - - field1_non_negative_diff (non-negative difference) - - field1_sum - - field1_s2 (variance) - - field1_stdev (standard deviation) - -### Tags: + - field1_count + - field1_diff (difference) + - field1_rate (rate per second) + - field1_max + - field1_min + - field1_mean + - field1_non_negative_diff (non-negative difference) + - field1_non_negative_rate (non-negative rate per second) + - field1_sum + - field1_s2 (variance) + - field1_stdev (standard deviation) + - field1_interval (interval in nanoseconds) + +## Tags No tags are applied by this aggregator. -### Example Output: +## Example Output -``` +```shell $ telegraf --config telegraf.conf --quiet system,host=tars load1=1 1475583980000000000 system,host=tars load1=1 1475583990000000000 -system,host=tars load1_count=2,load1_diff=0,load1_max=1,load1_min=1,load1_mean=1,load1_sum=2,load1_s2=0,load1_stdev=0 1475584010000000000 +system,host=tars load1_count=2,load1_diff=0,load1_rate=0,load1_max=1,load1_min=1,load1_mean=1,load1_sum=2,load1_s2=0,load1_stdev=0,load1_interval=10000000000i 1475584010000000000 system,host=tars load1=1 1475584020000000000 system,host=tars load1=3 1475584030000000000 -system,host=tars load1_count=2,load1_diff=2,load1_max=3,load1_min=1,load1_mean=2,load1_sum=4,load1_s2=2,load1_stdev=1.414162 1475584010000000000 +system,host=tars load1_count=2,load1_diff=2,load1_rate=0.2,load1_max=3,load1_min=1,load1_mean=2,load1_sum=4,load1_s2=2,load1_stdev=1.414162,load1_interval=10000000000i 1475584010000000000 ``` diff --git a/plugins/aggregators/basicstats/basicstats.go b/plugins/aggregators/basicstats/basicstats.go index 4e62ee31123a4..7e912fe48aa21 100644 --- a/plugins/aggregators/basicstats/basicstats.go +++ b/plugins/aggregators/basicstats/basicstats.go @@ -1,12 +1,19 @@ +//go:generate ../../../tools/readme_config_includer/generator package basicstats import ( + _ "embed" "math" + "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/aggregators" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type BasicStats struct { Stats []string `toml:"stats"` Log telegraf.Logger @@ -16,15 +23,18 @@ type BasicStats struct { } type configuredStats struct { - count bool - min bool - max bool - mean bool - variance bool - stdev bool - sum bool - diff bool - non_negative_diff bool + count bool + min bool + max bool + mean bool + variance bool + stdev bool + sum bool + diff bool + nonNegativeDiff bool + rate bool + nonNegativeRate bool + interval bool } func NewBasicStats() *BasicStats { @@ -40,36 +50,23 @@ type aggregate struct { } type basicstats struct { - count float64 - min float64 - max float64 - sum float64 - mean float64 - diff float64 - M2 float64 //intermediate value for variance/stdev - LAST float64 //intermediate value for diff + count float64 + min float64 + max float64 + sum float64 + mean float64 + diff float64 + rate float64 + interval time.Duration + M2 float64 //intermediate value for variance/stdev + LAST float64 //intermediate value for diff + TIME time.Time //intermediate value for rate } -var sampleConfig = ` - ## The period on which to flush & clear the aggregator. - period = "30s" - - ## If true, the original metric will be dropped by the - ## aggregator and will not get sent to the output plugins. - drop_original = false - - ## Configures which basic stats to push as fields - # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] -` - func (*BasicStats) SampleConfig() string { return sampleConfig } -func (*BasicStats) Description() string { - return "Keep the aggregate basicstats of each metric passing through." -} - func (b *BasicStats) Add(in telegraf.Metric) { id := in.HashID() if _, ok := b.cache[id]; !ok { @@ -88,8 +85,10 @@ func (b *BasicStats) Add(in telegraf.Metric) { mean: fv, sum: fv, diff: 0.0, + rate: 0.0, M2: 0.0, LAST: fv, + TIME: in.Time(), } } } @@ -100,14 +99,17 @@ func (b *BasicStats) Add(in telegraf.Metric) { if _, ok := b.cache[id].fields[field.Key]; !ok { // hit an uncached field of a cached metric b.cache[id].fields[field.Key] = basicstats{ - count: 1, - min: fv, - max: fv, - mean: fv, - sum: fv, - diff: 0.0, - M2: 0.0, - LAST: fv, + count: 1, + min: fv, + max: fv, + mean: fv, + sum: fv, + diff: 0.0, + rate: 0.0, + interval: 0, + M2: 0.0, + LAST: fv, + TIME: in.Time(), } continue } @@ -117,7 +119,7 @@ func (b *BasicStats) Add(in telegraf.Metric) { //variable initialization x := fv mean := tmp.mean - M2 := tmp.M2 + m2 := tmp.M2 //counter compute n := tmp.count + 1 tmp.count = n @@ -126,8 +128,8 @@ func (b *BasicStats) Add(in telegraf.Metric) { mean = mean + delta/n tmp.mean = mean //variance/stdev compute - M2 = M2 + delta*(x-mean) - tmp.M2 = M2 + m2 = m2 + delta*(x-mean) + tmp.M2 = m2 //max/min compute if fv < tmp.min { tmp.min = fv @@ -138,6 +140,12 @@ func (b *BasicStats) Add(in telegraf.Metric) { tmp.sum += fv //diff compute tmp.diff = fv - tmp.LAST + //interval compute + tmp.interval = in.Time().Sub(tmp.TIME) + //rate compute + if !in.Time().Equal(tmp.TIME) { + tmp.rate = tmp.diff / tmp.interval.Seconds() + } //store final data b.cache[id].fields[field.Key] = tmp } @@ -149,7 +157,6 @@ func (b *BasicStats) Push(acc telegraf.Accumulator) { for _, aggregate := range b.cache { fields := map[string]interface{}{} for k, v := range aggregate.fields { - if b.statsConfig.count { fields[k+"_count"] = v.count } @@ -179,10 +186,18 @@ func (b *BasicStats) Push(acc telegraf.Accumulator) { if b.statsConfig.diff { fields[k+"_diff"] = v.diff } - if b.statsConfig.non_negative_diff && v.diff >= 0 { + if b.statsConfig.nonNegativeDiff && v.diff >= 0 { fields[k+"_non_negative_diff"] = v.diff } - + if b.statsConfig.rate { + fields[k+"_rate"] = v.rate + } + if b.statsConfig.nonNegativeRate && v.diff >= 0 { + fields[k+"_non_negative_rate"] = v.rate + } + if b.statsConfig.interval { + fields[k+"_interval"] = v.interval.Nanoseconds() + } } //if count == 1 StdDev = infinite => so I won't send data } @@ -216,8 +231,13 @@ func (b *BasicStats) parseStats() *configuredStats { case "diff": parsed.diff = true case "non_negative_diff": - parsed.non_negative_diff = true - + parsed.nonNegativeDiff = true + case "rate": + parsed.rate = true + case "non_negative_rate": + parsed.nonNegativeRate = true + case "interval": + parsed.interval = true default: b.Log.Warnf("Unrecognized basic stat %q, ignoring", name) } @@ -229,14 +249,16 @@ func (b *BasicStats) parseStats() *configuredStats { func (b *BasicStats) getConfiguredStats() { if b.Stats == nil { b.statsConfig = &configuredStats{ - count: true, - min: true, - max: true, - mean: true, - variance: true, - stdev: true, - sum: false, - non_negative_diff: false, + count: true, + min: true, + max: true, + mean: true, + variance: true, + stdev: true, + sum: false, + nonNegativeDiff: false, + rate: false, + nonNegativeRate: false, } } else { b.statsConfig = b.parseStats() diff --git a/plugins/aggregators/basicstats/basicstats_test.go b/plugins/aggregators/basicstats/basicstats_test.go index c5a093840abc7..3f08624978446 100644 --- a/plugins/aggregators/basicstats/basicstats_test.go +++ b/plugins/aggregators/basicstats/basicstats_test.go @@ -5,12 +5,13 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) -var m1, _ = metric.New("m1", +var m1 = metric.New("m1", map[string]string{"foo": "bar"}, map[string]interface{}{ "a": int64(1), @@ -19,9 +20,9 @@ var m1, _ = metric.New("m1", "d": float64(2), "g": int64(3), }, - time.Now(), + time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), ) -var m2, _ = metric.New("m1", +var m2 = metric.New("m1", map[string]string{"foo": "bar"}, map[string]interface{}{ "a": int64(1), @@ -34,7 +35,7 @@ var m2, _ = metric.New("m1", "andme": true, "g": int64(1), }, - time.Now(), + time.Date(2000, 1, 1, 0, 0, 0, 1e6, time.UTC), ) func BenchmarkApply(b *testing.B) { @@ -184,7 +185,6 @@ func TestBasicStatsDifferentPeriods(t *testing.T) { // Test only aggregating count func TestBasicStatsWithOnlyCount(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"count"} aggregator.Log = testutil.Logger{} @@ -213,7 +213,6 @@ func TestBasicStatsWithOnlyCount(t *testing.T) { // Test only aggregating minimum func TestBasicStatsWithOnlyMin(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"min"} aggregator.Log = testutil.Logger{} @@ -242,7 +241,6 @@ func TestBasicStatsWithOnlyMin(t *testing.T) { // Test only aggregating maximum func TestBasicStatsWithOnlyMax(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"max"} aggregator.Log = testutil.Logger{} @@ -271,7 +269,6 @@ func TestBasicStatsWithOnlyMax(t *testing.T) { // Test only aggregating mean func TestBasicStatsWithOnlyMean(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"mean"} aggregator.Log = testutil.Logger{} @@ -300,7 +297,6 @@ func TestBasicStatsWithOnlyMean(t *testing.T) { // Test only aggregating sum func TestBasicStatsWithOnlySum(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"sum"} aggregator.Log = testutil.Logger{} @@ -331,29 +327,28 @@ func TestBasicStatsWithOnlySum(t *testing.T) { // implementations of sum were calculated from mean and count, which // e.g. summed "1, 1, 5, 1" as "7.999999..." instead of 8. func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) { - - var sum1, _ = metric.New("m1", + var sum1 = metric.New("m1", map[string]string{}, map[string]interface{}{ "a": int64(1), }, time.Now(), ) - var sum2, _ = metric.New("m1", + var sum2 = metric.New("m1", map[string]string{}, map[string]interface{}{ "a": int64(1), }, time.Now(), ) - var sum3, _ = metric.New("m1", + var sum3 = metric.New("m1", map[string]string{}, map[string]interface{}{ "a": int64(5), }, time.Now(), ) - var sum4, _ = metric.New("m1", + var sum4 = metric.New("m1", map[string]string{}, map[string]interface{}{ "a": int64(1), @@ -383,7 +378,6 @@ func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) { // Test only aggregating variance func TestBasicStatsWithOnlyVariance(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"s2"} aggregator.Log = testutil.Logger{} @@ -410,7 +404,6 @@ func TestBasicStatsWithOnlyVariance(t *testing.T) { // Test only aggregating standard deviation func TestBasicStatsWithOnlyStandardDeviation(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"stdev"} aggregator.Log = testutil.Logger{} @@ -437,7 +430,6 @@ func TestBasicStatsWithOnlyStandardDeviation(t *testing.T) { // Test only aggregating minimum and maximum func TestBasicStatsWithMinAndMax(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"min", "max"} aggregator.Log = testutil.Logger{} @@ -473,7 +465,6 @@ func TestBasicStatsWithMinAndMax(t *testing.T) { // Test only aggregating diff func TestBasicStatsWithDiff(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"diff"} aggregator.Log = testutil.Logger{} @@ -498,9 +489,80 @@ func TestBasicStatsWithDiff(t *testing.T) { acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) } +func TestBasicStatsWithRate(t *testing.T) { + aggregator := NewBasicStats() + aggregator.Stats = []string{"rate"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() + + aggregator.Add(m1) + aggregator.Add(m2) + + acc := testutil.Accumulator{} + aggregator.Push(&acc) + expectedFields := map[string]interface{}{ + "a_rate": float64(0), + "b_rate": float64(2000), + "c_rate": float64(2000), + "d_rate": float64(4000), + "g_rate": float64(-2000), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} + +func TestBasicStatsWithNonNegativeRate(t *testing.T) { + aggregator := NewBasicStats() + aggregator.Stats = []string{"non_negative_rate"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() + + aggregator.Add(m1) + aggregator.Add(m2) + + acc := testutil.Accumulator{} + aggregator.Push(&acc) + + expectedFields := map[string]interface{}{ + "a_non_negative_rate": float64(0), + "b_non_negative_rate": float64(2000), + "c_non_negative_rate": float64(2000), + "d_non_negative_rate": float64(4000), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} +func TestBasicStatsWithInterval(t *testing.T) { + aggregator := NewBasicStats() + aggregator.Stats = []string{"interval"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() + + aggregator.Add(m1) + aggregator.Add(m2) + + acc := testutil.Accumulator{} + aggregator.Push(&acc) + + expectedFields := map[string]interface{}{ + "a_interval": int64(time.Millisecond), + "b_interval": int64(time.Millisecond), + "c_interval": int64(time.Millisecond), + "d_interval": int64(time.Millisecond), + "g_interval": int64(time.Millisecond), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} + // Test only aggregating non_negative_diff func TestBasicStatsWithNonNegativeDiff(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"non_negative_diff"} aggregator.Log = testutil.Logger{} @@ -591,7 +653,6 @@ func TestBasicStatsWithAllStats(t *testing.T) { // Test that if an empty array is passed, no points are pushed func TestBasicStatsWithNoStats(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{} aggregator.Log = testutil.Logger{} @@ -608,7 +669,6 @@ func TestBasicStatsWithNoStats(t *testing.T) { // Test that if an unknown stat is configured, it doesn't explode func TestBasicStatsWithUnknownStat(t *testing.T) { - aggregator := NewBasicStats() aggregator.Stats = []string{"crazy"} aggregator.Log = testutil.Logger{} @@ -628,7 +688,6 @@ func TestBasicStatsWithUnknownStat(t *testing.T) { // otherwise user's working systems will suddenly (and surprisingly) start // capturing sum without their input. func TestBasicStatsWithDefaultStats(t *testing.T) { - aggregator := NewBasicStats() aggregator.Log = testutil.Logger{} aggregator.getConfiguredStats() @@ -639,11 +698,11 @@ func TestBasicStatsWithDefaultStats(t *testing.T) { acc := testutil.Accumulator{} aggregator.Push(&acc) - assert.True(t, acc.HasField("m1", "a_count")) - assert.True(t, acc.HasField("m1", "a_min")) - assert.True(t, acc.HasField("m1", "a_max")) - assert.True(t, acc.HasField("m1", "a_mean")) - assert.True(t, acc.HasField("m1", "a_stdev")) - assert.True(t, acc.HasField("m1", "a_s2")) - assert.False(t, acc.HasField("m1", "a_sum")) + require.True(t, acc.HasField("m1", "a_count")) + require.True(t, acc.HasField("m1", "a_min")) + require.True(t, acc.HasField("m1", "a_max")) + require.True(t, acc.HasField("m1", "a_mean")) + require.True(t, acc.HasField("m1", "a_stdev")) + require.True(t, acc.HasField("m1", "a_s2")) + require.False(t, acc.HasField("m1", "a_sum")) } diff --git a/plugins/aggregators/basicstats/sample.conf b/plugins/aggregators/basicstats/sample.conf new file mode 100644 index 0000000000000..8c18e6d2ffa75 --- /dev/null +++ b/plugins/aggregators/basicstats/sample.conf @@ -0,0 +1,11 @@ +# Keep the aggregate basicstats of each metric passing through. +[[aggregators.basicstats]] + ## The period on which to flush & clear the aggregator. + period = "30s" + + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = false + + ## Configures which basic stats to push as fields + # stats = ["count","diff","rate","min","max","mean","non_negative_diff","non_negative_rate","stdev","s2","sum","interval"] diff --git a/plugins/aggregators/deprecations.go b/plugins/aggregators/deprecations.go new file mode 100644 index 0000000000000..dd2302e0255c3 --- /dev/null +++ b/plugins/aggregators/deprecations.go @@ -0,0 +1,6 @@ +package aggregators + +import "github.com/influxdata/telegraf" + +// Deprecations lists the deprecated plugins +var Deprecations = map[string]telegraf.DeprecationInfo{} diff --git a/plugins/aggregators/derivative/README.md b/plugins/aggregators/derivative/README.md new file mode 100644 index 0000000000000..d8c43180c0fb6 --- /dev/null +++ b/plugins/aggregators/derivative/README.md @@ -0,0 +1,217 @@ +# Derivative Aggregator Plugin + +The Derivative Aggregator Plugin estimates the derivative for all fields of the +aggregated metrics. + +## Configuration + +```toml @sample.conf +# Calculates a derivative for every field. +[[aggregators.derivative]] + ## The period in which to flush the aggregator. + period = "30s" + ## + ## Suffix to append for the resulting derivative field. + # suffix = "_rate" + ## + ## Field to use for the quotient when computing the derivative. + ## When using a field as the derivation parameter the name of that field will + ## be used for the resulting derivative, e.g. *fieldname_by_parameter*. + ## By default the timestamps of the metrics are used and the suffix is omitted. + # variable = "" + ## + ## Maximum number of roll-overs in case only one measurement is found during a period. + # max_roll_over = 10 +``` + +This aggregator will estimate a derivative for each field of a metric, which is +contained in both the first and last metric of the aggregation interval. +Without further configuration the derivative will be calculated with respect to +the time difference between these two measurements in seconds. +The following formula is applied is for every field + +```text +derivative = (value_last - value_first) / (time_last - time_first) +``` + +The resulting derivative will be named `_rate` if no `suffix` is +configured. + +To calculate a derivative for every field use + +```toml +[[aggregators.derivative]] + ## Specific Derivative Aggregator Arguments: + + ## Configure a custom derivation variable. Timestamp is used if none is given. + # variable = "" + + ## Suffix to add to the field name for the derivative name. + # suffix = "_rate" + + ## Roll-Over last measurement to first measurement of next period + # max_roll_over = 10 + + ## General Aggregator Arguments: + + ## calculate derivative every 30 seconds + period = "30s" +``` + +## Time Derivatives + +In its default configuration it determines the first and last measurement of +the period. From these measurements the time difference in seconds is +calculated. This time difference is than used to divide the difference of each +field using the following formula: + +```text +derivative = (value_last - value_first) / (time_last - time_first) +``` + +For each field the derivative is emitted with a naming pattern +`_rate`. + +## Custom Derivation Variable + +The plugin supports to use a field of the aggregated measurements as derivation +variable in the denominator. This variable is assumed to be a monotonically +increasing value. In this feature the following formula is used: + +```text +derivative = (value_last - value_first) / (variable_last - variable_first) +``` + +**Make sure the specified variable is not filtered and exists in the metrics +passed to this aggregator!** + +When using a custom derivation variable, you should change the `suffix` of the +derivative name. See the next section on [customizing the derivative +name](#customize-the-derivative-name) for details. + +## Customize the Derivative Name + +The derivatives generated by the aggregator are named `_rate`, +i.e. they are composed of the field name and a suffix `_rate`. You can +configure the suffix to be used by changing the `suffix` parameter. + +## Roll-Over to next Period + +Calculating the derivative for a period requires at least two distinct +measurements during that period. Whether those are available depends on the +configuration of the aggregator `period` and the agent `interval`. By default +the last measurement is used as first measurement in the next aggregation +period. This enables a continuous calculation of the derivative. If within the +next period an earlier timestamp is encountered this measurement will replace +the roll-over metric. A main benefit of this roll-over is the ability to cope +with multiple "quiet" periods, where no new measurement is pushed to the +aggregator. The roll-over will take place at most `max_roll_over` times. + +### Example of Roll-Over + +Let us assume we have an input plugin, that generates a measurement with a +single metric "test" every 2 seconds. Let this metric increase the first 10 +seconds from 0.0 to 10.0 and then decrease the next 10 seconds form 10.0 to 0.0: + +| timestamp | value | +|-----------|-------| +| 0 | 0.0 | +| 2 | 2.0 | +| 4 | 4.0 | +| 6 | 6.0 | +| 8 | 8.0 | +| 10 | 10.0 | +| 12 | 8.0 | +| 14 | 6.0 | +| 16 | 4.0 | +| 18 | 2.0 | +| 20 | 0.0 | + +To avoid thinking about border values, we consider periods to be inclusive at +the start but exclusive in the end. Using `period = "10s"` and `max_roll_over = +0` we would get the following aggregates: + +| timestamp | value | aggregate | explanantion | +|-----------|-------|-----------|--------------| +| 0 | 0.0 | +| 2 | 2.0 | +| 4 | 4.0 | +| 6 | 6.0 | +| 8 | 8.0 | +||| 1.0 | (8.0 - 0.0) / (8 - 0) | +| 10 | 10.0 | +| 12 | 8.0 | +| 14 | 6.0 | +| 16 | 4.0 | +| 18 | 2.0 | +||| -1.0 | (2.0 - 10.0) / (18 - 10) +| 20 | 0.0 | + +If we now decrease the period with `period = 2s`, no derivative could be +calculated since there would only one measurement for each period. The +aggregator will emit the log messages `Same first and last event for "test", +skipping.`. This changes, if we use `max_roll_over = 1`, since now end +measurements of a period are taking as start for the next period. + +| timestamp | value | aggregate | explanantion | +|-----------|-------|-----------|--------------| +| 0 | 0.0 | +| 2 | 2.0 | 1.0 | (2.0 - 0.0) / (2 - 0) | +| 4 | 4.0 | 1.0 | (4.0 - 2.0) / (4 - 2) | +| 6 | 6.0 | 1.0 | (6.0 - 4.0) / (6 - 4) | +| 8 | 8.0 | 1.0 | (8.0 - 6.0) / (8 - 6) | +| 10 | 10.0 | 1.0 | (10.0 - 8.0) / (10 - 8) | +| 12 | 8.0 | -1.0 | (8.0 - 10.0) / (12 - 10) | +| 14 | 6.0 | -1.0 | (6.0 - 8.0) / (14 - 12) | +| 16 | 4.0 | -1.0 | (4.0 - 6.0) / (16 - 14) | +| 18 | 2.0 | -1.0 | (2.0 - 4.0) / (18 - 16) | +| 20 | 0.0 | -1.0 | (0.0 - 2.0) / (20 - 18) | + +The default `max_roll_over = 10` allows for multiple periods without +measurements either due to configuration or missing input. + +There may be a slight difference in the calculation when using `max_roll_over` +compared to running without. To illustrate this, let us compare the derivatives +for `period = "7s"`. + +| timestamp | value | `max_roll_over = 0` | `max_roll_over = 1` | +|-----------|-------|-----------|--------------| +| 0 | 0.0 | +| 2 | 2.0 | +| 4 | 4.0 | +| 6 | 6.0 | +||| 1.0 | 1.0 | +| 8 | 8.0 | +| 10 | 10.0 | +| 12 | 8.0 | +||| 0.0 | 0.33... | +| 14 | 6.0 | +| 16 | 4.0 | +| 18 | 2.0 | +| 20 | 0.0 | +||| -1.0 | -1.0 | + +The difference stems from the change of the value between periods, e.g. from 6.0 +to 8.0 between first and second period. Thoses changes are omitted with +`max_roll_over = 0` but are respected with `max_roll_over = 1`. That there are +no more differences in the calculated derivatives is due to the example data, +which has constant derivatives in during the first and last period, even when +including the gap between the periods. Using `max_roll_over` with a value +greater 0 may be important, if you need to detect changes between periods, +e.g. when you have very few measurements in a period or quasi-constant metrics +with only occasional changes. + +### Tags + +No tags are applied by this aggregator. +Existing tags are passed throug the aggregator untouched. + +## Example Output + +```text +net bytes_recv=15409i,packets_recv=164i,bytes_sent=16649i,packets_sent=120i 1508843640000000000 +net bytes_recv=73987i,packets_recv=364i,bytes_sent=87328i,packets_sent=452i 1508843660000000000 +net bytes_recv_by_packets_recv=292.89 1508843660000000000 +net packets_sent_rate=16.6,bytes_sent_rate=3533.95 1508843660000000000 +net bytes_sent_by_packet=292.89 1508843660000000000 +``` diff --git a/plugins/aggregators/derivative/derivative.go b/plugins/aggregators/derivative/derivative.go new file mode 100644 index 0000000000000..3e86d71322c38 --- /dev/null +++ b/plugins/aggregators/derivative/derivative.go @@ -0,0 +1,184 @@ +//go:generate ../../../tools/readme_config_includer/generator +package derivative + +import ( + _ "embed" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/aggregators" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +type Derivative struct { + Variable string `toml:"variable"` + Suffix string `toml:"suffix"` + MaxRollOver uint `toml:"max_roll_over"` + Log telegraf.Logger `toml:"-"` + cache map[uint64]*aggregate +} + +type aggregate struct { + first *event + last *event + name string + tags map[string]string + rollOver uint +} + +type event struct { + fields map[string]float64 + time time.Time +} + +const defaultSuffix = "_rate" + +func NewDerivative() *Derivative { + derivative := &Derivative{Suffix: defaultSuffix, MaxRollOver: 10} + derivative.cache = make(map[uint64]*aggregate) + derivative.Reset() + return derivative +} + +func (*Derivative) SampleConfig() string { + return sampleConfig +} + +func (d *Derivative) Add(in telegraf.Metric) { + id := in.HashID() + current, ok := d.cache[id] + if !ok { + // hit an uncached metric, create caches for first time: + d.cache[id] = newAggregate(in) + return + } + if current.first.time.After(in.Time()) { + current.first = newEvent(in) + current.rollOver = 0 + } else if current.first.time.Equal(in.Time()) { + upsertConvertedFields(in.Fields(), current.first.fields) + current.rollOver = 0 + } + if current.last.time.Before(in.Time()) { + current.last = newEvent(in) + current.rollOver = 0 + } else if current.last.time.Equal(in.Time()) { + upsertConvertedFields(in.Fields(), current.last.fields) + current.rollOver = 0 + } +} + +func newAggregate(in telegraf.Metric) *aggregate { + event := newEvent(in) + return &aggregate{ + name: in.Name(), + tags: in.Tags(), + first: event, + last: event, + rollOver: 0, + } +} + +func newEvent(in telegraf.Metric) *event { + return &event{ + fields: extractConvertedFields(in), + time: in.Time(), + } +} + +func extractConvertedFields(in telegraf.Metric) map[string]float64 { + fields := make(map[string]float64, len(in.Fields())) + upsertConvertedFields(in.Fields(), fields) + return fields +} + +func upsertConvertedFields(source map[string]interface{}, target map[string]float64) { + for k, v := range source { + if value, ok := convert(v); ok { + target[k] = value + } + } +} + +func convert(in interface{}) (float64, bool) { + switch v := in.(type) { + case float64: + return v, true + case int64: + return float64(v), true + case uint64: + return float64(v), true + } + return 0, false +} + +func (d *Derivative) Push(acc telegraf.Accumulator) { + for _, aggregate := range d.cache { + if aggregate.first == aggregate.last { + d.Log.Debugf("Same first and last event for %q, skipping.", aggregate.name) + continue + } + var denominator float64 + denominator = aggregate.last.time.Sub(aggregate.first.time).Seconds() + if len(d.Variable) > 0 { + var first float64 + var last float64 + var found bool + if first, found = aggregate.first.fields[d.Variable]; !found { + d.Log.Debugf("Did not find %q in first event for %q.", d.Variable, aggregate.name) + continue + } + if last, found = aggregate.last.fields[d.Variable]; !found { + d.Log.Debugf("Did not find %q in last event for %q.", d.Variable, aggregate.name) + continue + } + denominator = last - first + } + if denominator == 0 { + d.Log.Debugf("Got difference 0 in denominator for %q, skipping.", aggregate.name) + continue + } + derivatives := make(map[string]interface{}) + for key, start := range aggregate.first.fields { + if key == d.Variable { + // Skip derivation variable + continue + } + if end, ok := aggregate.last.fields[key]; ok { + d.Log.Debugf("Adding derivative %q to %q.", key+d.Suffix, aggregate.name) + derivatives[key+d.Suffix] = (end - start) / denominator + } + } + acc.AddFields(aggregate.name, derivatives, aggregate.tags) + } +} + +func (d *Derivative) Reset() { + for id, aggregate := range d.cache { + if aggregate.rollOver < d.MaxRollOver { + aggregate.first = aggregate.last + aggregate.rollOver = aggregate.rollOver + 1 + d.cache[id] = aggregate + d.Log.Debugf("Roll-Over %q for the %d time.", aggregate.name, aggregate.rollOver) + } else { + delete(d.cache, id) + d.Log.Debugf("Removed %q from cache.", aggregate.name) + } + } +} + +func (d *Derivative) Init() error { + d.Suffix = strings.TrimSpace(d.Suffix) + d.Variable = strings.TrimSpace(d.Variable) + return nil +} + +func init() { + aggregators.Add("derivative", func() telegraf.Aggregator { + return NewDerivative() + }) +} diff --git a/plugins/aggregators/derivative/derivative_test.go b/plugins/aggregators/derivative/derivative_test.go new file mode 100644 index 0000000000000..e0c91767018ef --- /dev/null +++ b/plugins/aggregators/derivative/derivative_test.go @@ -0,0 +1,416 @@ +package derivative + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" +) + +var start = metric.New("TestMetric", + map[string]string{"state": "full"}, + map[string]interface{}{ + "increasing": int64(0), + "decreasing": int64(100), + "unchanged": int64(42), + "ignored": "strings are not supported", + "parameter": float64(0.0), + }, + time.Now(), +) + +var finish = metric.New("TestMetric", + map[string]string{"state": "full"}, + map[string]interface{}{ + "increasing": int64(1000), + "decreasing": int64(0), + "unchanged": int64(42), + "ignored": "strings are not supported", + "parameter": float64(10.0), + }, + time.Now().Add(time.Second), +) + +func TestTwoFullEventsWithParameter(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + Variable: "parameter", + Suffix: "_by_parameter", + cache: make(map[uint64]*aggregate), + } + derivative.Log = testutil.Logger{} + err := derivative.Init() + require.NoError(t, err) + + derivative.Add(start) + derivative.Add(finish) + derivative.Push(&acc) + + expectedFields := map[string]interface{}{ + "increasing_by_parameter": 100.0, + "decreasing_by_parameter": -10.0, + "unchanged_by_parameter": 0.0, + } + expectedTags := map[string]string{ + "state": "full", + } + + acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags) +} + +func TestTwoFullEventsWithParameterReverseSequence(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + Variable: "parameter", + Suffix: "_by_parameter", + cache: make(map[uint64]*aggregate), + } + derivative.Log = testutil.Logger{} + err := derivative.Init() + require.NoError(t, err) + + derivative.Add(finish) + derivative.Add(start) + derivative.Push(&acc) + + expectedFields := map[string]interface{}{ + "increasing_by_parameter": 100.0, + "decreasing_by_parameter": -10.0, + "unchanged_by_parameter": 0.0, + } + expectedTags := map[string]string{ + "state": "full", + } + + acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags) +} + +func TestTwoFullEventsWithoutParameter(t *testing.T) { + acc := testutil.Accumulator{} + derivative := NewDerivative() + derivative.Log = testutil.Logger{} + err := derivative.Init() + require.NoError(t, err) + + startTime := time.Now() + duration, _ := time.ParseDuration("2s") + endTime := startTime.Add(duration) + + first := metric.New("One Field", + map[string]string{}, + map[string]interface{}{ + "value": int64(10), + }, + startTime, + ) + last := metric.New("One Field", + map[string]string{}, + map[string]interface{}{ + "value": int64(20), + }, + endTime, + ) + + derivative.Add(first) + derivative.Add(last) + derivative.Push(&acc) + + acc.AssertContainsFields(t, + "One Field", + map[string]interface{}{ + "value_rate": float64(5), + }, + ) +} + +func TestTwoFullEventsInSeperatePushes(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + Variable: " parameter", + Suffix: "_wrt_parameter", + MaxRollOver: 10, + cache: make(map[uint64]*aggregate), + } + derivative.Log = testutil.Logger{} + err := derivative.Init() + require.NoError(t, err) + + derivative.Add(start) + derivative.Push(&acc) + + acc.AssertDoesNotContainMeasurement(t, "TestMetric") + + acc.ClearMetrics() + + derivative.Add(finish) + derivative.Push(&acc) + + expectedFields := map[string]interface{}{ + "increasing_wrt_parameter": 100.0, + "decreasing_wrt_parameter": -10.0, + "unchanged_wrt_parameter": 0.0, + } + expectedTags := map[string]string{ + "state": "full", + } + + acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags) +} + +func TestTwoFullEventsInSeperatePushesWithSeveralRollOvers(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + Variable: "parameter", + Suffix: "_wrt_parameter", + MaxRollOver: 10, + cache: make(map[uint64]*aggregate), + } + derivative.Log = testutil.Logger{} + err := derivative.Init() + require.NoError(t, err) + + derivative.Add(start) + derivative.Push(&acc) + + acc.AssertDoesNotContainMeasurement(t, "TestMetric") + + derivative.Push(&acc) + derivative.Push(&acc) + derivative.Push(&acc) + + derivative.Add(finish) + derivative.Push(&acc) + + expectedFields := map[string]interface{}{ + "increasing_wrt_parameter": 100.0, + "decreasing_wrt_parameter": -10.0, + "unchanged_wrt_parameter": 0.0, + } + + acc.AssertContainsFields(t, "TestMetric", expectedFields) +} + +func TestTwoFullEventsInSeperatePushesWithOutRollOver(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + Variable: "parameter", + Suffix: "_by_parameter", + MaxRollOver: 0, + cache: make(map[uint64]*aggregate), + } + derivative.Log = testutil.Logger{} + err := derivative.Init() + require.NoError(t, err) + + derivative.Add(start) + // This test relies on RunningAggregator always callining Reset after Push + // to remove the first metric after max-rollover of 0 has been reached. + derivative.Push(&acc) + derivative.Reset() + + acc.AssertDoesNotContainMeasurement(t, "TestMetric") + + acc.ClearMetrics() + derivative.Add(finish) + derivative.Push(&acc) + + acc.AssertDoesNotContainMeasurement(t, "TestMetric") +} + +func TestIgnoresMissingVariable(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + Variable: "parameter", + Suffix: "_by_parameter", + cache: make(map[uint64]*aggregate), + } + derivative.Log = testutil.Logger{} + err := derivative.Init() + require.NoError(t, err) + + noParameter := metric.New("TestMetric", + map[string]string{"state": "no_parameter"}, + map[string]interface{}{ + "increasing": int64(100), + "decreasing": int64(0), + "unchanged": int64(42), + }, + time.Now(), + ) + + derivative.Add(noParameter) + derivative.Push(&acc) + + acc.AssertDoesNotContainMeasurement(t, "TestMetric") + + acc.ClearMetrics() + derivative.Add(noParameter) + derivative.Add(start) + derivative.Add(noParameter) + derivative.Add(finish) + derivative.Add(noParameter) + derivative.Push(&acc) + expectedFields := map[string]interface{}{ + "increasing_by_parameter": 100.0, + "decreasing_by_parameter": -10.0, + "unchanged_by_parameter": 0.0, + } + expectedTags := map[string]string{ + "state": "full", + } + + acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags) +} + +func TestMergesDifferenMetricsWithSameHash(t *testing.T) { + acc := testutil.Accumulator{} + derivative := NewDerivative() + derivative.Log = testutil.Logger{} + err := derivative.Init() + require.NoError(t, err) + + startTime := time.Now() + duration, _ := time.ParseDuration("2s") + endTime := startTime.Add(duration) + part1 := metric.New("TestMetric", + map[string]string{"state": "full"}, + map[string]interface{}{"field1": int64(10)}, + startTime, + ) + part2 := metric.New("TestMetric", + map[string]string{"state": "full"}, + map[string]interface{}{"field2": int64(20)}, + startTime, + ) + final := metric.New("TestMetric", + map[string]string{"state": "full"}, + map[string]interface{}{ + "field1": int64(30), + "field2": int64(30), + }, + endTime, + ) + + derivative.Add(part1) + derivative.Push(&acc) + derivative.Add(part2) + derivative.Push(&acc) + derivative.Add(final) + derivative.Push(&acc) + + expectedFields := map[string]interface{}{ + "field1_rate": 10.0, + "field2_rate": 5.0, + } + expectedTags := map[string]string{ + "state": "full", + } + + acc.AssertContainsTaggedFields(t, "TestMetric", expectedFields, expectedTags) +} + +func TestDropsAggregatesOnMaxRollOver(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + MaxRollOver: 1, + cache: make(map[uint64]*aggregate), + } + derivative.Log = testutil.Logger{} + err := derivative.Init() + require.NoError(t, err) + + derivative.Add(start) + derivative.Push(&acc) + derivative.Reset() + derivative.Push(&acc) + derivative.Reset() + derivative.Add(finish) + derivative.Push(&acc) + derivative.Reset() + + acc.AssertDoesNotContainMeasurement(t, "TestMetric") +} + +func TestAddMetricsResetsRollOver(t *testing.T) { + acc := testutil.Accumulator{} + derivative := &Derivative{ + Variable: "parameter", + Suffix: "_by_parameter", + MaxRollOver: 1, + cache: make(map[uint64]*aggregate), + Log: testutil.Logger{}, + } + err := derivative.Init() + require.NoError(t, err) + + derivative.Add(start) + derivative.Push(&acc) + derivative.Reset() + derivative.Add(start) + derivative.Reset() + derivative.Add(finish) + derivative.Push(&acc) + + expectedFields := map[string]interface{}{ + "increasing_by_parameter": 100.0, + "decreasing_by_parameter": -10.0, + "unchanged_by_parameter": 0.0, + } + + acc.AssertContainsFields(t, "TestMetric", expectedFields) +} + +func TestCalculatesCorrectDerivativeOnTwoConsecutivePeriods(t *testing.T) { + acc := testutil.Accumulator{} + period, _ := time.ParseDuration("10s") + derivative := NewDerivative() + derivative.Log = testutil.Logger{} + err := derivative.Init() + require.NoError(t, err) + + startTime := time.Now() + first := metric.New("One Field", + map[string]string{}, + map[string]interface{}{ + "value": int64(10), + }, + startTime, + ) + derivative.Add(first) + derivative.Push(&acc) + derivative.Reset() + + second := metric.New("One Field", + map[string]string{}, + map[string]interface{}{ + "value": int64(20), + }, + startTime.Add(period), + ) + derivative.Add(second) + derivative.Push(&acc) + derivative.Reset() + + acc.AssertContainsFields(t, "One Field", map[string]interface{}{ + "value_rate": 1.0, + }) + + acc.ClearMetrics() + third := metric.New("One Field", + map[string]string{}, + map[string]interface{}{ + "value": int64(40), + }, + startTime.Add(period).Add(period), + ) + derivative.Add(third) + derivative.Push(&acc) + derivative.Reset() + + acc.AssertContainsFields(t, "One Field", map[string]interface{}{ + "value_rate": 2.0, + }) +} diff --git a/plugins/aggregators/derivative/sample.conf b/plugins/aggregators/derivative/sample.conf new file mode 100644 index 0000000000000..d43f523c25f49 --- /dev/null +++ b/plugins/aggregators/derivative/sample.conf @@ -0,0 +1,16 @@ +# Calculates a derivative for every field. +[[aggregators.derivative]] + ## The period in which to flush the aggregator. + period = "30s" + ## + ## Suffix to append for the resulting derivative field. + # suffix = "_rate" + ## + ## Field to use for the quotient when computing the derivative. + ## When using a field as the derivation parameter the name of that field will + ## be used for the resulting derivative, e.g. *fieldname_by_parameter*. + ## By default the timestamps of the metrics are used and the suffix is omitted. + # variable = "" + ## + ## Maximum number of roll-overs in case only one measurement is found during a period. + # max_roll_over = 10 diff --git a/plugins/aggregators/final/README.md b/plugins/aggregators/final/README.md index 444746d784349..4bf6800ff0b77 100644 --- a/plugins/aggregators/final/README.md +++ b/plugins/aggregators/final/README.md @@ -11,9 +11,10 @@ discrete time series such as procstat, cgroup, kubernetes etc. When a series has not been updated within the time defined in `series_timeout`, the last metric is emitted with the `_final` appended. -### Configuration +## Configuration -```toml +```toml @sample.conf +# Report the final metric of a series [[aggregators.final]] ## The period on which to flush & clear the aggregator. period = "30s" @@ -25,20 +26,21 @@ When a series has not been updated within the time defined in series_timeout = "5m" ``` -### Metrics +## Metrics Measurement and tags are unchanged, fields are emitted with the suffix `_final`. -### Example Output +## Example Output -``` +```text counter,host=bar i_final=3,j_final=6 1554281635115090133 counter,host=foo i_final=3,j_final=6 1554281635112992012 ``` Original input: -``` + +```text counter,host=bar i=1,j=4 1554281633101153300 counter,host=foo i=1,j=4 1554281633099323601 counter,host=bar i=2,j=5 1554281634107980073 diff --git a/plugins/aggregators/final/final.go b/plugins/aggregators/final/final.go index 53ad0a47c9d95..c67695a36d0a7 100644 --- a/plugins/aggregators/final/final.go +++ b/plugins/aggregators/final/final.go @@ -1,26 +1,21 @@ +//go:generate ../../../tools/readme_config_includer/generator package final import ( + _ "embed" "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/aggregators" ) -var sampleConfig = ` - ## The period on which to flush & clear the aggregator. - period = "30s" - ## If true, the original metric will be dropped by the - ## aggregator and will not get sent to the output plugins. - drop_original = false - - ## The time that a series is not updated until considering it final. - series_timeout = "5m" -` +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string type Final struct { - SeriesTimeout internal.Duration `toml:"series_timeout"` + SeriesTimeout config.Duration `toml:"series_timeout"` // The last metric for all series which are active metricCache map[uint64]telegraf.Metric @@ -28,19 +23,15 @@ type Final struct { func NewFinal() *Final { return &Final{ - SeriesTimeout: internal.Duration{Duration: 5 * time.Minute}, + SeriesTimeout: config.Duration(5 * time.Minute), metricCache: make(map[uint64]telegraf.Metric), } } -func (m *Final) SampleConfig() string { +func (*Final) SampleConfig() string { return sampleConfig } -func (m *Final) Description() string { - return "Report the final metric of a series" -} - func (m *Final) Add(in telegraf.Metric) { id := in.HashID() m.metricCache[id] = in @@ -51,7 +42,7 @@ func (m *Final) Push(acc telegraf.Accumulator) { acc.SetPrecision(time.Nanosecond) for id, metric := range m.metricCache { - if time.Since(metric.Time()) > m.SeriesTimeout.Duration { + if time.Since(metric.Time()) > time.Duration(m.SeriesTimeout) { fields := map[string]interface{}{} for _, field := range metric.FieldList() { fields[field.Key+"_final"] = field.Value diff --git a/plugins/aggregators/final/final_test.go b/plugins/aggregators/final/final_test.go index 1b3367fa5b3ad..6b0c6e8e38c24 100644 --- a/plugins/aggregators/final/final_test.go +++ b/plugins/aggregators/final/final_test.go @@ -5,7 +5,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" ) @@ -15,15 +15,15 @@ func TestSimple(t *testing.T) { final := NewFinal() tags := map[string]string{"foo": "bar"} - m1, _ := metric.New("m1", + m1 := metric.New("m1", tags, map[string]interface{}{"a": int64(1)}, time.Unix(1530939936, 0)) - m2, _ := metric.New("m1", + m2 := metric.New("m1", tags, map[string]interface{}{"a": int64(2)}, time.Unix(1530939937, 0)) - m3, _ := metric.New("m1", + m3 := metric.New("m1", tags, map[string]interface{}{"a": int64(3)}, time.Unix(1530939938, 0)) @@ -52,15 +52,15 @@ func TestTwoTags(t *testing.T) { tags1 := map[string]string{"foo": "bar"} tags2 := map[string]string{"foo": "baz"} - m1, _ := metric.New("m1", + m1 := metric.New("m1", tags1, map[string]interface{}{"a": int64(1)}, time.Unix(1530939936, 0)) - m2, _ := metric.New("m1", + m2 := metric.New("m1", tags2, map[string]interface{}{"a": int64(2)}, time.Unix(1530939937, 0)) - m3, _ := metric.New("m1", + m3 := metric.New("m1", tags1, map[string]interface{}{"a": int64(3)}, time.Unix(1530939938, 0)) @@ -93,24 +93,24 @@ func TestTwoTags(t *testing.T) { func TestLongDifference(t *testing.T) { acc := testutil.Accumulator{} final := NewFinal() - final.SeriesTimeout = internal.Duration{Duration: 30 * time.Second} + final.SeriesTimeout = config.Duration(30 * time.Second) tags := map[string]string{"foo": "bar"} now := time.Now() - m1, _ := metric.New("m", + m1 := metric.New("m", tags, map[string]interface{}{"a": int64(1)}, now.Add(time.Second*-290)) - m2, _ := metric.New("m", + m2 := metric.New("m", tags, map[string]interface{}{"a": int64(2)}, now.Add(time.Second*-275)) - m3, _ := metric.New("m", + m3 := metric.New("m", tags, map[string]interface{}{"a": int64(3)}, now.Add(time.Second*-100)) - m4, _ := metric.New("m", + m4 := metric.New("m", tags, map[string]interface{}{"a": int64(4)}, now.Add(time.Second*-20)) diff --git a/plugins/aggregators/final/sample.conf b/plugins/aggregators/final/sample.conf new file mode 100644 index 0000000000000..e9226c8ce175b --- /dev/null +++ b/plugins/aggregators/final/sample.conf @@ -0,0 +1,10 @@ +# Report the final metric of a series +[[aggregators.final]] + ## The period on which to flush & clear the aggregator. + period = "30s" + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = false + + ## The time that a series is not updated until considering it final. + series_timeout = "5m" diff --git a/plugins/aggregators/histogram/README.md b/plugins/aggregators/histogram/README.md index f0b6c15b11804..c9309d4e48136 100644 --- a/plugins/aggregators/histogram/README.md +++ b/plugins/aggregators/histogram/README.md @@ -4,29 +4,33 @@ The histogram aggregator plugin creates histograms containing the counts of field values within a range. If `cumulative` is set to true, values added to a bucket are also added to the -larger buckets in the distribution. This creates a [cumulative histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg). -Otherwise, values are added to only one bucket, which creates an [ordinary histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg) +larger buckets in the distribution. This creates a [cumulative histogram][1]. +Otherwise, values are added to only one bucket, which creates an [ordinary +histogram][1] Like other Telegraf aggregators, the metric is emitted every `period` seconds. By default bucket counts are not reset between periods and will be non-strictly -increasing while Telegraf is running. This behavior can be changed by setting the -`reset` parameter to true. +increasing while Telegraf is running. This behavior can be changed by setting +the `reset` parameter to true. -#### Design +[1]: https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg -Each metric is passed to the aggregator and this aggregator searches -histogram buckets for those fields, which have been specified in the -config. If buckets are found, the aggregator will increment +1 to the appropriate +## Design + +Each metric is passed to the aggregator and this aggregator searches histogram +buckets for those fields, which have been specified in the config. If buckets +are found, the aggregator will increment +1 to the appropriate bucket. Otherwise, it will be added to the `+Inf` bucket. Every `period` seconds this data will be forwarded to the outputs. -The algorithm of hit counting to buckets was implemented on the base -of the algorithm which is implemented in the Prometheus -[client](https://github.com/prometheus/client_golang/blob/master/prometheus/histogram.go). +The algorithm of hit counting to buckets was implemented on the base of the +algorithm which is implemented in the Prometheus [client][2]. + +[2]: https://github.com/prometheus/client_golang/blob/master/prometheus/histogram.go -### Configuration +## Configuration -```toml +```toml @sample.conf # Configuration for aggregate histogram metrics [[aggregators.histogram]] ## The period in which to flush the aggregator. @@ -44,6 +48,14 @@ of the algorithm which is implemented in the Prometheus ## Defaults to true. cumulative = true + ## Expiration interval for each histogram. The histogram will be expired if + ## there are no changes in any buckets for this time interval. 0 == no expiration. + # expiration_interval = "0m" + + ## If true, aggregated histogram are pushed to output only if it was updated since + ## previous push. Defaults to false. + # push_only_on_update = false + ## Example config that aggregates all fields of the metric. # [[aggregators.histogram.config]] # ## Right borders of buckets (with +Inf implicitly added). @@ -69,44 +81,44 @@ option. Optionally, if `fields` is set only the fields listed will be aggregated. If `fields` is not set all fields are aggregated. The `buckets` option contains a list of floats which specify the bucket -boundaries. Each float value defines the inclusive upper (right) bound of the bucket. -The `+Inf` bucket is added automatically and does not need to be defined. -(For left boundaries, these specified bucket borders and `-Inf` will be used). +boundaries. Each float value defines the inclusive upper (right) bound of the +bucket. The `+Inf` bucket is added automatically and does not need to be +defined. (For left boundaries, these specified bucket borders and `-Inf` will +be used). -### Measurements & Fields: +## Measurements & Fields The postfix `bucket` will be added to each field key. - measurement1 - - field1_bucket - - field2_bucket + - field1_bucket + - field2_bucket -### Tags: +### Tags -* `cumulative = true` (default): - * `le`: Right bucket border. It means that the metric value is less than or +- `cumulative = true` (default): + - `le`: Right bucket border. It means that the metric value is less than or equal to the value of this tag. If a metric value is sorted into a bucket, it is also sorted into all larger buckets. As a result, the value of `_bucket` is rising with rising `le` value. When `le` is `+Inf`, the bucket value is the count of all metrics, because all metric values are less than or equal to positive infinity. -* `cumulative = false`: - * `gt`: Left bucket border. It means that the metric value is greater than +- `cumulative = false`: + - `gt`: Left bucket border. It means that the metric value is greater than (and not equal to) the value of this tag. - * `le`: Right bucket border. It means that the metric value is less than or + - `le`: Right bucket border. It means that the metric value is less than or equal to the value of this tag. - * As both `gt` and `le` are present, each metric is sorted in only exactly - one bucket. + - As both `gt` and `le` are present, each metric is sorted in only exactly + one bucket. - -### Example Output: +## Example Output Let assume we have the buckets [0, 10, 50, 100] and the following field values for `usage_idle`: [50, 7, 99, 12] With `cumulative = true`: -``` +```text cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000 # none cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=1i 1486998330000000000 # 7 cpu,cpu=cpu1,host=localhost,le=50.0 usage_idle_bucket=2i 1486998330000000000 # 7, 12 @@ -116,7 +128,7 @@ cpu,cpu=cpu1,host=localhost,le=+Inf usage_idle_bucket=4i 1486998330000000000 # With `cumulative = false`: -``` +```text cpu,cpu=cpu1,host=localhost,gt=-Inf,le=0.0 usage_idle_bucket=0i 1486998330000000000 # none cpu,cpu=cpu1,host=localhost,gt=0.0,le=10.0 usage_idle_bucket=1i 1486998330000000000 # 7 cpu,cpu=cpu1,host=localhost,gt=10.0,le=50.0 usage_idle_bucket=1i 1486998330000000000 # 12 diff --git a/plugins/aggregators/histogram/histogram.go b/plugins/aggregators/histogram/histogram.go index dab524d62782e..bacab1a1c1d0e 100644 --- a/plugins/aggregators/histogram/histogram.go +++ b/plugins/aggregators/histogram/histogram.go @@ -1,13 +1,21 @@ +//go:generate ../../../tools/readme_config_includer/generator package histogram import ( + _ "embed" "sort" "strconv" + "time" "github.com/influxdata/telegraf" + telegrafConfig "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/aggregators" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // bucketRightTag is the tag, which contains right bucket border const bucketRightTag = "le" @@ -22,9 +30,11 @@ const bucketNegInf = "-Inf" // HistogramAggregator is aggregator with histogram configs and particular histograms for defined metrics type HistogramAggregator struct { - Configs []config `toml:"config"` - ResetBuckets bool `toml:"reset"` - Cumulative bool `toml:"cumulative"` + Configs []config `toml:"config"` + ResetBuckets bool `toml:"reset"` + Cumulative bool `toml:"cumulative"` + ExpirationInterval telegrafConfig.Duration `toml:"expiration_interval"` + PushOnlyOnUpdate bool `toml:"push_only_on_update"` buckets bucketsByMetrics cache map[uint64]metricHistogramCollection @@ -51,6 +61,8 @@ type metricHistogramCollection struct { histogramCollection map[string]counts name string tags map[string]string + expireTime time.Time + updated bool } // counts is the number of hits in the bucket @@ -63,6 +75,8 @@ type groupedByCountFields struct { fieldsWithCount map[string]int64 } +var timeNow = time.Now + // NewHistogramAggregator creates new histogram aggregator func NewHistogramAggregator() *HistogramAggregator { h := &HistogramAggregator{ @@ -74,51 +88,14 @@ func NewHistogramAggregator() *HistogramAggregator { return h } -var sampleConfig = ` - ## The period in which to flush the aggregator. - period = "30s" - - ## If true, the original metric will be dropped by the - ## aggregator and will not get sent to the output plugins. - drop_original = false - - ## If true, the histogram will be reset on flush instead - ## of accumulating the results. - reset = false - - ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added. - ## Defaults to true. - cumulative = true - - ## Example config that aggregates all fields of the metric. - # [[aggregators.histogram.config]] - # ## Right borders of buckets (with +Inf implicitly added). - # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] - # ## The name of metric. - # measurement_name = "cpu" - - ## Example config that aggregates only specific fields of the metric. - # [[aggregators.histogram.config]] - # ## Right borders of buckets (with +Inf implicitly added). - # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] - # ## The name of metric. - # measurement_name = "diskio" - # ## The concrete fields of metric - # fields = ["io_time", "read_time", "write_time"] -` - -// SampleConfig returns sample of config -func (h *HistogramAggregator) SampleConfig() string { +func (*HistogramAggregator) SampleConfig() string { return sampleConfig } -// Description returns description of aggregator plugin -func (h *HistogramAggregator) Description() string { - return "Create aggregate histograms." -} - // Add adds new hit to the buckets func (h *HistogramAggregator) Add(in telegraf.Metric) { + addTime := timeNow() + bucketsByField := make(map[string][]float64) for field := range in.Fields() { buckets := h.getBuckets(in.Name(), field) @@ -151,6 +128,10 @@ func (h *HistogramAggregator) Add(in telegraf.Metric) { index := sort.SearchFloat64s(buckets, value) agr.histogramCollection[field][index]++ } + if h.ExpirationInterval != 0 { + agr.expireTime = addTime.Add(time.Duration(h.ExpirationInterval)) + } + agr.updated = true } } @@ -160,8 +141,18 @@ func (h *HistogramAggregator) Add(in telegraf.Metric) { // Push returns histogram values for metrics func (h *HistogramAggregator) Push(acc telegraf.Accumulator) { metricsWithGroupedFields := []groupedByCountFields{} + now := timeNow() - for _, aggregate := range h.cache { + for id, aggregate := range h.cache { + if h.ExpirationInterval != 0 && now.After(aggregate.expireTime) { + delete(h.cache, id) + continue + } + if h.PushOnlyOnUpdate && !h.cache[id].updated { + continue + } + aggregate.updated = false + h.cache[id] = aggregate for field, counts := range aggregate.histogramCollection { h.groupFieldsByBuckets(&metricsWithGroupedFields, aggregate.name, field, copyTags(aggregate.tags), counts) } diff --git a/plugins/aggregators/histogram/histogram_test.go b/plugins/aggregators/histogram/histogram_test.go index dfb3f5d12dfa8..dc78587b4a185 100644 --- a/plugins/aggregators/histogram/histogram_test.go +++ b/plugins/aggregators/histogram/histogram_test.go @@ -5,27 +5,35 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" + telegrafConfig "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) type fields map[string]interface{} type tags map[string]string // NewTestHistogram creates new test histogram aggregation with specified config -func NewTestHistogram(cfg []config, reset bool, cumulative bool) telegraf.Aggregator { +func NewTestHistogram(cfg []config, reset bool, cumulative bool, pushOnlyOnUpdate bool) telegraf.Aggregator { + return NewTestHistogramWithExpirationInterval(cfg, reset, cumulative, pushOnlyOnUpdate, 0) +} + +func NewTestHistogramWithExpirationInterval(cfg []config, reset bool, cumulative bool, pushOnlyOnUpdate bool, expirationInterval telegrafConfig.Duration) telegraf.Aggregator { htm := NewHistogramAggregator() htm.Configs = cfg htm.ResetBuckets = reset htm.Cumulative = cumulative + htm.ExpirationInterval = expirationInterval + htm.PushOnlyOnUpdate = pushOnlyOnUpdate return htm } // firstMetric1 is the first test metric -var firstMetric1, _ = metric.New( +var firstMetric1 = metric.New( "first_metric_name", tags{}, fields{ @@ -36,7 +44,7 @@ var firstMetric1, _ = metric.New( ) // firstMetric1 is the first test metric with other value -var firstMetric2, _ = metric.New( +var firstMetric2 = metric.New( "first_metric_name", tags{}, fields{ @@ -47,7 +55,7 @@ var firstMetric2, _ = metric.New( ) // secondMetric is the second metric -var secondMetric, _ = metric.New( +var secondMetric = metric.New( "second_metric_name", tags{}, fields{ @@ -73,7 +81,7 @@ func BenchmarkApply(b *testing.B) { func TestHistogram(t *testing.T) { var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) - histogram := NewTestHistogram(cfg, false, true) + histogram := NewTestHistogram(cfg, false, true, false) acc := &testutil.Accumulator{} @@ -82,9 +90,7 @@ func TestHistogram(t *testing.T) { histogram.Add(firstMetric2) histogram.Push(acc) - if len(acc.Metrics) != 6 { - assert.Fail(t, "Incorrect number of metrics") - } + require.Len(t, acc.Metrics, 6, "Incorrect number of metrics") assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "0"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "10"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: "20"}) @@ -93,11 +99,47 @@ func TestHistogram(t *testing.T) { assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: bucketPosInf}) } +// TestHistogram tests metrics for one period, for one field and push only on histogram update +func TestHistogramPushOnUpdate(t *testing.T) { + var cfg []config + cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) + histogram := NewTestHistogram(cfg, false, true, true) + + acc := &testutil.Accumulator{} + + histogram.Add(firstMetric1) + histogram.Reset() + histogram.Add(firstMetric2) + histogram.Push(acc) + + require.Len(t, acc.Metrics, 6, "Incorrect number of metrics") + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "10"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: "20"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: "40"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: bucketPosInf}) + + acc.ClearMetrics() + histogram.Push(acc) + require.Len(t, acc.Metrics, 0, "Incorrect number of metrics") + histogram.Add(firstMetric2) + histogram.Push(acc) + + require.Len(t, acc.Metrics, 6, "Incorrect number of metrics") + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "10"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(3)}, tags{bucketRightTag: "20"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(3)}, tags{bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(3)}, tags{bucketRightTag: "40"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(3)}, tags{bucketRightTag: bucketPosInf}) +} + // TestHistogramNonCumulative tests metrics for one period and for one field func TestHistogramNonCumulative(t *testing.T) { var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) - histogram := NewTestHistogram(cfg, false, false) + histogram := NewTestHistogram(cfg, false, false, false) acc := &testutil.Accumulator{} @@ -106,9 +148,7 @@ func TestHistogramNonCumulative(t *testing.T) { histogram.Add(firstMetric2) histogram.Push(acc) - if len(acc.Metrics) != 6 { - assert.Fail(t, "Incorrect number of metrics") - } + require.Len(t, acc.Metrics, 6, "Incorrect number of metrics") assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: bucketNegInf, bucketRightTag: "0"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: "0", bucketRightTag: "10"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketLeftTag: "10", bucketRightTag: "20"}) @@ -121,7 +161,7 @@ func TestHistogramNonCumulative(t *testing.T) { func TestHistogramWithReset(t *testing.T) { var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) - histogram := NewTestHistogram(cfg, true, true) + histogram := NewTestHistogram(cfg, true, true, false) acc := &testutil.Accumulator{} @@ -130,9 +170,7 @@ func TestHistogramWithReset(t *testing.T) { histogram.Add(firstMetric2) histogram.Push(acc) - if len(acc.Metrics) != 6 { - assert.Fail(t, "Incorrect number of metrics") - } + require.Len(t, acc.Metrics, 6, "Incorrect number of metrics") assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "0"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "10"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1)}, tags{bucketRightTag: "20"}) @@ -146,7 +184,7 @@ func TestHistogramWithAllFields(t *testing.T) { var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 15.5, 20.0, 30.0, 40.0}}) cfg = append(cfg, config{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}}) - histogram := NewTestHistogram(cfg, false, true) + histogram := NewTestHistogram(cfg, false, true, false) acc := &testutil.Accumulator{} @@ -155,10 +193,7 @@ func TestHistogramWithAllFields(t *testing.T) { histogram.Add(secondMetric) histogram.Push(acc) - if len(acc.Metrics) != 12 { - assert.Fail(t, "Incorrect number of metrics") - } - + require.Len(t, acc.Metrics, 12, "Incorrect number of metrics") assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "0"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "15.5"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "20"}) @@ -179,7 +214,7 @@ func TestHistogramWithAllFieldsNonCumulative(t *testing.T) { var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 15.5, 20.0, 30.0, 40.0}}) cfg = append(cfg, config{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}}) - histogram := NewTestHistogram(cfg, false, false) + histogram := NewTestHistogram(cfg, false, false, false) acc := &testutil.Accumulator{} @@ -188,10 +223,7 @@ func TestHistogramWithAllFieldsNonCumulative(t *testing.T) { histogram.Add(secondMetric) histogram.Push(acc) - if len(acc.Metrics) != 12 { - assert.Fail(t, "Incorrect number of metrics") - } - + require.Len(t, acc.Metrics, 12, "Incorrect number of metrics") assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: bucketNegInf, bucketRightTag: "0"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: "0", bucketRightTag: "15.5"}) assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: "15.5", bucketRightTag: "20"}) @@ -210,10 +242,9 @@ func TestHistogramWithAllFieldsNonCumulative(t *testing.T) { // TestHistogramWithTwoPeriodsAndAllFields tests two metrics getting added with a push/reset in between (simulates // getting added in different periods) for all fields func TestHistogramWithTwoPeriodsAndAllFields(t *testing.T) { - var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) - histogram := NewTestHistogram(cfg, false, true) + histogram := NewTestHistogram(cfg, false, true, false) acc := &testutil.Accumulator{} histogram.Add(firstMetric1) @@ -242,7 +273,7 @@ func TestHistogramWithTwoPeriodsAndAllFields(t *testing.T) { func TestWrongBucketsOrder(t *testing.T) { defer func() { if r := recover(); r != nil { - assert.Equal( + require.Equal( t, "histogram buckets must be in increasing order: 90.00 >= 20.00, metrics: first_metric_name, field: a", fmt.Sprint(r), @@ -252,10 +283,41 @@ func TestWrongBucketsOrder(t *testing.T) { var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 90.0, 20.0, 30.0, 40.0}}) - histogram := NewTestHistogram(cfg, false, true) + histogram := NewTestHistogram(cfg, false, true, false) histogram.Add(firstMetric2) } +// TestHistogram tests two metrics getting added and metric expiration +func TestHistogramMetricExpiration(t *testing.T) { + currentTime := time.Unix(10, 0) + timeNow = func() time.Time { + return currentTime + } + defer func() { + timeNow = time.Now + }() + + var cfg []config + cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) + cfg = append(cfg, config{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}}) + histogram := NewTestHistogramWithExpirationInterval(cfg, false, true, false, telegrafConfig.Duration(30)) + + acc := &testutil.Accumulator{} + + histogram.Add(firstMetric1) + currentTime = time.Unix(41, 0) + histogram.Add(secondMetric) + histogram.Push(acc) + + require.Len(t, acc.Metrics, 6, "Incorrect number of metrics") + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "4"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "10"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "23"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(1), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: bucketPosInf}) +} + // assertContainsTaggedField is help functions to test histogram data func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricName string, fields map[string]interface{}, tags map[string]string) { acc.Lock() @@ -292,12 +354,9 @@ func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricNa } // check fields with their counts - if assert.Equal(t, fields, checkedMetric.Fields) { - return - } - - assert.Fail(t, fmt.Sprintf("incorrect fields %v of metric %s", checkedMetric.Fields, metricName)) + require.Equal(t, fields, checkedMetric.Fields) + return } - assert.Fail(t, fmt.Sprintf("unknown measurement '%s' with tags: %v, fields: %v", metricName, tags, fields)) + require.Fail(t, fmt.Sprintf("unknown measurement '%s' with tags: %v, fields: %v", metricName, tags, fields)) } diff --git a/plugins/aggregators/histogram/sample.conf b/plugins/aggregators/histogram/sample.conf new file mode 100644 index 0000000000000..4b2543fd612e8 --- /dev/null +++ b/plugins/aggregators/histogram/sample.conf @@ -0,0 +1,40 @@ +# Configuration for aggregate histogram metrics +[[aggregators.histogram]] + ## The period in which to flush the aggregator. + period = "30s" + + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = false + + ## If true, the histogram will be reset on flush instead + ## of accumulating the results. + reset = false + + ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added. + ## Defaults to true. + cumulative = true + + ## Expiration interval for each histogram. The histogram will be expired if + ## there are no changes in any buckets for this time interval. 0 == no expiration. + # expiration_interval = "0m" + + ## If true, aggregated histogram are pushed to output only if it was updated since + ## previous push. Defaults to false. + # push_only_on_update = false + + ## Example config that aggregates all fields of the metric. + # [[aggregators.histogram.config]] + # ## Right borders of buckets (with +Inf implicitly added). + # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] + # ## The name of metric. + # measurement_name = "cpu" + + ## Example config that aggregates only specific fields of the metric. + # [[aggregators.histogram.config]] + # ## Right borders of buckets (with +Inf implicitly added). + # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] + # ## The name of metric. + # measurement_name = "diskio" + # ## The concrete fields of metric + # fields = ["io_time", "read_time", "write_time"] diff --git a/plugins/aggregators/merge/README.md b/plugins/aggregators/merge/README.md index 89f7f0983c692..0011a57f1b84b 100644 --- a/plugins/aggregators/merge/README.md +++ b/plugins/aggregators/merge/README.md @@ -1,4 +1,4 @@ -# Merge Aggregator +# Merge Aggregator Plugin Merge metrics together into a metric with multiple fields into the most memory and network transfer efficient form. @@ -7,16 +7,17 @@ Use this plugin when fields are split over multiple metrics, with the same measurement, tag set and timestamp. By merging into a single metric they can be handled more efficiently by the output. -### Configuration +## Configuration -```toml +```toml @sample.conf +# Merge metrics into multifield metrics by series key [[aggregators.merge]] ## If true, the original metric will be dropped by the ## aggregator and will not get sent to the output plugins. drop_original = true ``` -### Example +## Example ```diff - cpu,host=localhost usage_time=42 1567562620000000000 diff --git a/plugins/aggregators/merge/merge.go b/plugins/aggregators/merge/merge.go index 083c8fd3e6b0a..a9735f26c16cd 100644 --- a/plugins/aggregators/merge/merge.go +++ b/plugins/aggregators/merge/merge.go @@ -1,6 +1,8 @@ -package seriesgrouper +//go:generate ../../../tools/readme_config_includer/generator +package merge import ( + _ "embed" "time" "github.com/influxdata/telegraf" @@ -8,18 +10,16 @@ import ( "github.com/influxdata/telegraf/plugins/aggregators" ) -const ( - description = "Merge metrics into multifield metrics by series key" - sampleConfig = ` - ## If true, the original metric will be dropped by the - ## aggregator and will not get sent to the output plugins. - drop_original = true -` -) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string type Merge struct { grouper *metric.SeriesGrouper - log telegraf.Logger +} + +func (*Merge) SampleConfig() string { + return sampleConfig } func (a *Merge) Init() error { @@ -27,22 +27,8 @@ func (a *Merge) Init() error { return nil } -func (a *Merge) Description() string { - return description -} - -func (a *Merge) SampleConfig() string { - return sampleConfig -} - func (a *Merge) Add(m telegraf.Metric) { - tags := m.Tags() - for _, field := range m.FieldList() { - err := a.grouper.Add(m.Name(), tags, m.Time(), field.Key, field.Value) - if err != nil { - a.log.Errorf("Error adding metric: %v", err) - } - } + a.grouper.AddMetric(m) } func (a *Merge) Push(acc telegraf.Accumulator) { diff --git a/plugins/aggregators/merge/merge_test.go b/plugins/aggregators/merge/merge_test.go index 2f2703c8f4b7c..0569b03f0c3d7 100644 --- a/plugins/aggregators/merge/merge_test.go +++ b/plugins/aggregators/merge/merge_test.go @@ -1,12 +1,14 @@ -package seriesgrouper +package merge import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestSimple(t *testing.T) { @@ -184,3 +186,70 @@ func TestReset(t *testing.T) { testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) } + +var m1 = metric.New( + "mymetric", + map[string]string{ + "host": "host.example.com", + "mykey": "myvalue", + "another key": "another value", + }, + map[string]interface{}{ + "f1": 1, + "f2": 2, + "f3": 3, + "f4": 4, + "f5": 5, + "f6": 6, + "f7": 7, + "f8": 8, + }, + time.Now(), +) +var m2 = metric.New( + "mymetric", + map[string]string{ + "host": "host.example.com", + "mykey": "myvalue", + "another key": "another value", + }, + map[string]interface{}{ + "f8": 8, + "f9": 9, + "f10": 10, + "f11": 11, + "f12": 12, + "f13": 13, + "f14": 14, + "f15": 15, + "f16": 16, + }, + m1.Time(), +) + +func BenchmarkMergeOne(b *testing.B) { + var merger Merge + err := merger.Init() + require.NoError(b, err) + var acc testutil.NopAccumulator + + for n := 0; n < b.N; n++ { + merger.Reset() + merger.Add(m1) + merger.Push(&acc) + } +} + +func BenchmarkMergeTwo(b *testing.B) { + var merger Merge + err := merger.Init() + require.NoError(b, err) + var acc testutil.NopAccumulator + + for n := 0; n < b.N; n++ { + merger.Reset() + merger.Add(m1) + merger.Add(m2) + merger.Push(&acc) + } +} diff --git a/plugins/aggregators/merge/sample.conf b/plugins/aggregators/merge/sample.conf new file mode 100644 index 0000000000000..146b52633a61f --- /dev/null +++ b/plugins/aggregators/merge/sample.conf @@ -0,0 +1,5 @@ +# Merge metrics into multifield metrics by series key +[[aggregators.merge]] + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = true diff --git a/plugins/aggregators/minmax/README.md b/plugins/aggregators/minmax/README.md index f7405b78cbe9d..84cc1c0bb324f 100644 --- a/plugins/aggregators/minmax/README.md +++ b/plugins/aggregators/minmax/README.md @@ -3,9 +3,9 @@ The minmax aggregator plugin aggregates min & max values of each field it sees, emitting the aggrate every `period` seconds. -### Configuration: +## Configuration -```toml +```toml @sample.conf # Keep the aggregate min/max of each metric passing through. [[aggregators.minmax]] ## General Aggregator Arguments: @@ -16,19 +16,19 @@ emitting the aggrate every `period` seconds. drop_original = false ``` -### Measurements & Fields: +## Measurements & Fields - measurement1 - - field1_max - - field1_min + - field1_max + - field1_min -### Tags: +## Tags No tags are applied by this aggregator. -### Example Output: +## Example Output -``` +```shell $ telegraf --config telegraf.conf --quiet system,host=tars load1=1.72 1475583980000000000 system,host=tars load1=1.6 1475583990000000000 diff --git a/plugins/aggregators/minmax/minmax.go b/plugins/aggregators/minmax/minmax.go index fb0e72f65e945..076fae73c982d 100644 --- a/plugins/aggregators/minmax/minmax.go +++ b/plugins/aggregators/minmax/minmax.go @@ -1,10 +1,17 @@ +//go:generate ../../../tools/readme_config_includer/generator package minmax import ( + _ "embed" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/aggregators" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type MinMax struct { cache map[uint64]aggregate } @@ -26,23 +33,10 @@ type minmax struct { max float64 } -var sampleConfig = ` - ## General Aggregator Arguments: - ## The period on which to flush & clear the aggregator. - period = "30s" - ## If true, the original metric will be dropped by the - ## aggregator and will not get sent to the output plugins. - drop_original = false -` - -func (m *MinMax) SampleConfig() string { +func (*MinMax) SampleConfig() string { return sampleConfig } -func (m *MinMax) Description() string { - return "Keep the aggregate min/max of each metric passing through." -} - func (m *MinMax) Add(in telegraf.Metric) { id := in.HashID() if _, ok := m.cache[id]; !ok { diff --git a/plugins/aggregators/minmax/minmax_test.go b/plugins/aggregators/minmax/minmax_test.go index e7c3cf4eb2024..7835d95e9c72e 100644 --- a/plugins/aggregators/minmax/minmax_test.go +++ b/plugins/aggregators/minmax/minmax_test.go @@ -8,7 +8,7 @@ import ( "github.com/influxdata/telegraf/testutil" ) -var m1, _ = metric.New("m1", +var m1 = metric.New("m1", map[string]string{"foo": "bar"}, map[string]interface{}{ "a": int64(1), @@ -24,7 +24,7 @@ var m1, _ = metric.New("m1", }, time.Now(), ) -var m2, _ = metric.New("m1", +var m2 = metric.New("m1", map[string]string{"foo": "bar"}, map[string]interface{}{ "a": int64(1), diff --git a/plugins/aggregators/minmax/sample.conf b/plugins/aggregators/minmax/sample.conf new file mode 100644 index 0000000000000..f2316cc68bcfa --- /dev/null +++ b/plugins/aggregators/minmax/sample.conf @@ -0,0 +1,8 @@ +# Keep the aggregate min/max of each metric passing through. +[[aggregators.minmax]] + ## General Aggregator Arguments: + ## The period on which to flush & clear the aggregator. + period = "30s" + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = false diff --git a/plugins/aggregators/quantile/README.md b/plugins/aggregators/quantile/README.md new file mode 100644 index 0000000000000..2c0a25ed21a38 --- /dev/null +++ b/plugins/aggregators/quantile/README.md @@ -0,0 +1,136 @@ +# Quantile Aggregator Plugin + +The quantile aggregator plugin aggregates specified quantiles for each numeric +field per metric it sees and emits the quantiles every `period`. + +## Configuration + +```toml @sample.conf +# Keep the aggregate quantiles of each metric passing through. +[[aggregators.quantile]] + ## General Aggregator Arguments: + ## The period on which to flush & clear the aggregator. + period = "30s" + + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = false + + ## Quantiles to output in the range [0,1] + # quantiles = [0.25, 0.5, 0.75] + + ## Type of aggregation algorithm + ## Supported are: + ## "t-digest" -- approximation using centroids, can cope with large number of samples + ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7) + ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8) + ## NOTE: Do not use "exact" algorithms with large number of samples + ## to not impair performance or memory consumption! + # algorithm = "t-digest" + + ## Compression for approximation (t-digest). The value needs to be + ## greater or equal to 1.0. Smaller values will result in more + ## performance but less accuracy. + # compression = 100.0 +``` + +## Algorithm types + +### t-digest + +Proposed by [Dunning & Ertl (2019)][tdigest_paper] this type uses a +special data-structure to cluster data. These clusters are later used +to approximate the requested quantiles. The bounds of the approximation +can be controlled by the `compression` setting where smaller values +result in higher performance but less accuracy. + +Due to its incremental nature, this algorithm can handle large +numbers of samples efficiently. It is recommended for applications +where exact quantile calculation isn't required. + +For implementation details see the underlying [golang library][tdigest_lib]. + +### exact R7 and R8 + +These algorithms compute quantiles as described in [Hyndman & Fan +(1996)][hyndman_fan]. The R7 variant is used in Excel and NumPy. The R8 +variant is recommended by Hyndman & Fan due to its independence of the +underlying sample distribution. + +These algorithms save all data for the aggregation `period`. They require a lot +of memory when used with a large number of series or a large number of +samples. They are slower than the `t-digest` algorithm and are recommended only +to be used with a small number of samples and series. + +## Benchmark (linux/amd64) + +The benchmark was performed by adding 100 metrics with six numeric +(and two non-numeric) fields to the aggregator and the derive the aggregation +result. + +| algorithm | # quantiles | avg. runtime | +| :------------ | -------------:| -------------:| +| t-digest | 3 | 376372 ns/op | +| exact R7 | 3 | 9782946 ns/op | +| exact R8 | 3 | 9158205 ns/op | +| t-digest | 100 | 899204 ns/op | +| exact R7 | 100 | 7868816 ns/op | +| exact R8 | 100 | 8099612 ns/op | + +## Measurements + +Measurement names are passed trough this aggregator. + +### Fields + +For all numeric fields (int32/64, uint32/64 and float32/64) new *quantile* +fields are aggregated in the form `_`. Other field +types (e.g. boolean, string) are ignored and dropped from the output. + +For example passing in the following metric as *input*: + +- somemetric + - average_response_ms (float64) + - minimum_response_ms (float64) + - maximum_response_ms (float64) + - status (string) + - ok (boolean) + +and the default setting for `quantiles` you get the following *output* + +- somemetric + - average_response_ms_025 (float64) + - average_response_ms_050 (float64) + - average_response_ms_075 (float64) + - minimum_response_ms_025 (float64) + - minimum_response_ms_050 (float64) + - minimum_response_ms_075 (float64) + - maximum_response_ms_025 (float64) + - maximum_response_ms_050 (float64) + - maximum_response_ms_075 (float64) + +The `status` and `ok` fields are dropped because they are not numeric. Note +that the number of resulting fields scales with the number of `quantiles` +specified. + +### Tags + +Tags are passed through to the output by this aggregator. + +### Example Output + +```text +cpu,cpu=cpu-total,host=Hugin usage_user=10.814851731872487,usage_system=2.1679541490155687,usage_irq=1.046598554697342,usage_steal=0,usage_guest_nice=0,usage_idle=85.79616247197244,usage_nice=0,usage_iowait=0,usage_softirq=0.1744330924495688,usage_guest=0 1608288360000000000 +cpu,cpu=cpu-total,host=Hugin usage_guest=0,usage_system=2.1601016518428664,usage_iowait=0.02541296060990694,usage_irq=1.0165184243964942,usage_softirq=0.1778907242693666,usage_steal=0,usage_guest_nice=0,usage_user=9.275730622616953,usage_idle=87.34434561626493,usage_nice=0 1608288370000000000 +cpu,cpu=cpu-total,host=Hugin usage_idle=85.78199052131747,usage_nice=0,usage_irq=1.0476428036915637,usage_guest=0,usage_guest_nice=0,usage_system=1.995510102269591,usage_iowait=0,usage_softirq=0.1995510102269662,usage_steal=0,usage_user=10.975305562484735 1608288380000000000 +cpu,cpu=cpu-total,host=Hugin usage_guest_nice_075=0,usage_user_050=10.814851731872487,usage_guest_075=0,usage_steal_025=0,usage_irq_025=1.031558489546918,usage_irq_075=1.0471206791944527,usage_iowait_025=0,usage_guest_050=0,usage_guest_nice_050=0,usage_nice_075=0,usage_iowait_050=0,usage_system_050=2.1601016518428664,usage_irq_050=1.046598554697342,usage_guest_nice_025=0,usage_idle_050=85.79616247197244,usage_softirq_075=0.1887208672481664,usage_steal_075=0,usage_system_025=2.0778058770562287,usage_system_075=2.1640279004292173,usage_softirq_050=0.1778907242693666,usage_nice_050=0,usage_iowait_075=0.01270648030495347,usage_user_075=10.895078647178611,usage_nice_025=0,usage_steal_050=0,usage_user_025=10.04529117724472,usage_idle_025=85.78907649664495,usage_idle_075=86.57025404411868,usage_softirq_025=0.1761619083594677,usage_guest_025=0 1608288390000000000 +``` + +## References + +- Dunning & Ertl: "Computing Extremely Accurate Quantiles Using t-Digests", arXiv:1902.04023 (2019) [pdf][tdigest_paper] +- Hyndman & Fan: "Sample Quantiles in Statistical Packages", The American Statistician, vol. 50, pp. 361-365 (1996) [pdf][hyndman_fan] + +[tdigest_paper]: https://arxiv.org/abs/1902.04023 +[tdigest_lib]: https://github.com/caio/go-tdigest +[hyndman_fan]: http://www.maths.usyd.edu.au/u/UG/SM/STAT3022/r/current/Misc/Sample%20Quantiles%20in%20Statistical%20Packages.pdf diff --git a/plugins/aggregators/quantile/algorithms.go b/plugins/aggregators/quantile/algorithms.go new file mode 100644 index 0000000000000..e6d73507a1155 --- /dev/null +++ b/plugins/aggregators/quantile/algorithms.go @@ -0,0 +1,108 @@ +package quantile + +import ( + "math" + "sort" + + "github.com/caio/go-tdigest" +) + +type algorithm interface { + Add(value float64) error + Quantile(q float64) float64 +} + +func newTDigest(compression float64) (algorithm, error) { + return tdigest.New(tdigest.Compression(compression)) +} + +type exactAlgorithmR7 struct { + xs []float64 + sorted bool +} + +func newExactR7(_ float64) (algorithm, error) { + return &exactAlgorithmR7{xs: make([]float64, 0, 100), sorted: false}, nil +} + +func (e *exactAlgorithmR7) Add(value float64) error { + e.xs = append(e.xs, value) + e.sorted = false + + return nil +} + +func (e *exactAlgorithmR7) Quantile(q float64) float64 { + size := len(e.xs) + + // No information + if len(e.xs) == 0 { + return math.NaN() + } + + // Sort the array if necessary + if !e.sorted { + sort.Float64s(e.xs) + e.sorted = true + } + + // Get the quantile index and the fraction to the neighbor + // Hyndman & Fan; Sample Quantiles in Statistical Packages; The American Statistician vol 50; pp 361-365; 1996 -- R7 + // Same as Excel and Numpy. + n := q * (float64(size) - 1) + i, gamma := math.Modf(n) + j := int(i) + if j < 0 { + return e.xs[0] + } + if j >= size { + return e.xs[size-1] + } + // Linear interpolation + return e.xs[j] + gamma*(e.xs[j+1]-e.xs[j]) +} + +type exactAlgorithmR8 struct { + xs []float64 + sorted bool +} + +func newExactR8(_ float64) (algorithm, error) { + return &exactAlgorithmR8{xs: make([]float64, 0, 100), sorted: false}, nil +} + +func (e *exactAlgorithmR8) Add(value float64) error { + e.xs = append(e.xs, value) + e.sorted = false + + return nil +} + +func (e *exactAlgorithmR8) Quantile(q float64) float64 { + size := len(e.xs) + + // No information + if size == 0 { + return math.NaN() + } + + // Sort the array if necessary + if !e.sorted { + sort.Float64s(e.xs) + e.sorted = true + } + + // Get the quantile index and the fraction to the neighbor + // Hyndman & Fan; Sample Quantiles in Statistical Packages; The American Statistician vol 50; pp 361-365; 1996 -- R8 + n := q*(float64(size)+1.0/3.0) - (2.0 / 3.0) // Indices are zero-base here but one-based in the paper + i, gamma := math.Modf(n) + j := int(i) + if j < 0 { + return e.xs[0] + } + if j >= size { + return e.xs[size-1] + } + // Linear interpolation + return e.xs[j] + gamma*(e.xs[j+1]-e.xs[j]) +} diff --git a/plugins/aggregators/quantile/quantile.go b/plugins/aggregators/quantile/quantile.go new file mode 100644 index 0000000000000..551b6772e0d04 --- /dev/null +++ b/plugins/aggregators/quantile/quantile.go @@ -0,0 +1,140 @@ +//go:generate ../../../tools/readme_config_includer/generator +package quantile + +import ( + _ "embed" + "fmt" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/aggregators" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +type Quantile struct { + Quantiles []float64 `toml:"quantiles"` + Compression float64 `toml:"compression"` + AlgorithmType string `toml:"algorithm"` + + newAlgorithm newAlgorithmFunc + + cache map[uint64]aggregate + suffixes []string +} + +type aggregate struct { + name string + fields map[string]algorithm + tags map[string]string +} + +type newAlgorithmFunc func(compression float64) (algorithm, error) + +func (*Quantile) SampleConfig() string { + return sampleConfig +} + +func (q *Quantile) Add(in telegraf.Metric) { + id := in.HashID() + if cached, ok := q.cache[id]; ok { + fields := in.Fields() + for k, algo := range cached.fields { + if field, ok := fields[k]; ok { + if v, isconvertible := convert(field); isconvertible { + algo.Add(v) + } + } + } + return + } + + // New metric, setup cache and init algorithm + a := aggregate{ + name: in.Name(), + tags: in.Tags(), + fields: make(map[string]algorithm), + } + for k, field := range in.Fields() { + if v, isconvertible := convert(field); isconvertible { + // This should never error out as we tested it in Init() + algo, _ := q.newAlgorithm(q.Compression) + algo.Add(v) + a.fields[k] = algo + } + } + q.cache[id] = a +} + +func (q *Quantile) Push(acc telegraf.Accumulator) { + for _, aggregate := range q.cache { + fields := map[string]interface{}{} + for k, algo := range aggregate.fields { + for i, qtl := range q.Quantiles { + fields[k+q.suffixes[i]] = algo.Quantile(qtl) + } + } + acc.AddFields(aggregate.name, fields, aggregate.tags) + } +} + +func (q *Quantile) Reset() { + q.cache = make(map[uint64]aggregate) +} + +func convert(in interface{}) (float64, bool) { + switch v := in.(type) { + case float64: + return v, true + case int64: + return float64(v), true + case uint64: + return float64(v), true + default: + return 0, false + } +} + +func (q *Quantile) Init() error { + switch q.AlgorithmType { + case "t-digest", "": + q.newAlgorithm = newTDigest + case "exact R7": + q.newAlgorithm = newExactR7 + case "exact R8": + q.newAlgorithm = newExactR8 + default: + return fmt.Errorf("unknown algorithm type %q", q.AlgorithmType) + } + if _, err := q.newAlgorithm(q.Compression); err != nil { + return fmt.Errorf("cannot create %q algorithm: %v", q.AlgorithmType, err) + } + + if len(q.Quantiles) == 0 { + q.Quantiles = []float64{0.25, 0.5, 0.75} + } + + duplicates := make(map[float64]bool) + q.suffixes = make([]string, len(q.Quantiles)) + for i, qtl := range q.Quantiles { + if qtl < 0.0 || qtl > 1.0 { + return fmt.Errorf("quantile %v out of range", qtl) + } + if _, found := duplicates[qtl]; found { + return fmt.Errorf("duplicate quantile %v", qtl) + } + duplicates[qtl] = true + q.suffixes[i] = fmt.Sprintf("_%03d", int(qtl*100.0)) + } + + q.Reset() + + return nil +} + +func init() { + aggregators.Add("quantile", func() telegraf.Aggregator { + return &Quantile{Compression: 100} + }) +} diff --git a/plugins/aggregators/quantile/quantile_test.go b/plugins/aggregators/quantile/quantile_test.go new file mode 100644 index 0000000000000..4095f0c5837be --- /dev/null +++ b/plugins/aggregators/quantile/quantile_test.go @@ -0,0 +1,635 @@ +package quantile + +import ( + "math/rand" + "testing" + "time" + + "github.com/google/go-cmp/cmp/cmpopts" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestConfigInvalidAlgorithm(t *testing.T) { + q := Quantile{AlgorithmType: "a strange one"} + err := q.Init() + require.Error(t, err) + require.Contains(t, err.Error(), "unknown algorithm type") +} + +func TestConfigInvalidCompression(t *testing.T) { + q := Quantile{Compression: 0, AlgorithmType: "t-digest"} + err := q.Init() + require.Error(t, err) + require.Contains(t, err.Error(), "cannot create \"t-digest\" algorithm") +} + +func TestConfigInvalidQuantiles(t *testing.T) { + q := Quantile{Compression: 100, Quantiles: []float64{-0.5}} + err := q.Init() + require.Error(t, err) + require.Contains(t, err.Error(), "quantile -0.5 out of range") + + q = Quantile{Compression: 100, Quantiles: []float64{1.5}} + err = q.Init() + require.Error(t, err) + require.Contains(t, err.Error(), "quantile 1.5 out of range") + + q = Quantile{Compression: 100, Quantiles: []float64{0.1, 0.2, 0.3, 0.1}} + err = q.Init() + require.Error(t, err) + require.Contains(t, err.Error(), "duplicate quantile") +} + +func TestSingleMetricTDigest(t *testing.T) { + acc := testutil.Accumulator{} + + q := Quantile{Compression: 100} + err := q.Init() + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a_025": 24.75, + "a_050": 49.50, + "a_075": 74.25, + "b_025": 24.75, + "b_050": 49.50, + "b_075": 74.25, + "c_025": 24.75, + "c_050": 49.50, + "c_075": 74.25, + "d_025": 24.75, + "d_050": 49.50, + "d_075": 74.25, + "e_025": 24.75, + "e_050": 49.50, + "e_075": 74.25, + "f_025": 24.75, + "f_050": 49.50, + "f_075": 74.25, + "g_025": 0.2475, + "g_050": 0.4950, + "g_075": 0.7425, + }, + time.Now(), + ), + } + + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": int32(i), + "b": int64(i), + "c": uint32(i), + "d": uint64(i), + "e": float32(i), + "f": float64(i), + "g": float64(i) / 100.0, + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + + epsilon := cmpopts.EquateApprox(0, 1e-3) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon) +} + +func TestMultipleMetricsTDigest(t *testing.T) { + acc := testutil.Accumulator{} + + q := Quantile{Compression: 100} + err := q.Init() + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{"series": "foo"}, + map[string]interface{}{ + "a_025": 24.75, "a_050": 49.50, "a_075": 74.25, + "b_025": 24.75, "b_050": 49.50, "b_075": 74.25, + }, + time.Now(), + ), + testutil.MustMetric( + "test", + map[string]string{"series": "bar"}, + map[string]interface{}{ + "a_025": 49.50, "a_050": 99.00, "a_075": 148.50, + "b_025": 49.50, "b_050": 99.00, "b_075": 148.50, + }, + time.Now(), + ), + } + + metricsA := make([]telegraf.Metric, 100) + metricsB := make([]telegraf.Metric, 100) + for i := range metricsA { + metricsA[i] = testutil.MustMetric( + "test", + map[string]string{"series": "foo"}, + map[string]interface{}{"a": int64(i), "b": float64(i), "x1": "string", "x2": true}, + time.Now(), + ) + } + for i := range metricsB { + metricsB[i] = testutil.MustMetric( + "test", + map[string]string{"series": "bar"}, + map[string]interface{}{"a": int64(2 * i), "b": float64(2 * i), "x1": "string", "x2": true}, + time.Now(), + ) + } + + for _, m := range metricsA { + q.Add(m) + } + for _, m := range metricsB { + q.Add(m) + } + q.Push(&acc) + + epsilon := cmpopts.EquateApprox(0, 1e-3) + sort := testutil.SortMetrics() + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon, sort) +} + +func TestSingleMetricExactR7(t *testing.T) { + acc := testutil.Accumulator{} + + q := Quantile{AlgorithmType: "exact R7"} + err := q.Init() + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a_025": 24.75, + "a_050": 49.50, + "a_075": 74.25, + "b_025": 24.75, + "b_050": 49.50, + "b_075": 74.25, + "c_025": 24.75, + "c_050": 49.50, + "c_075": 74.25, + "d_025": 24.75, + "d_050": 49.50, + "d_075": 74.25, + "e_025": 24.75, + "e_050": 49.50, + "e_075": 74.25, + "f_025": 24.75, + "f_050": 49.50, + "f_075": 74.25, + "g_025": 0.2475, + "g_050": 0.4950, + "g_075": 0.7425, + }, + time.Now(), + ), + } + + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": int32(i), + "b": int64(i), + "c": uint32(i), + "d": uint64(i), + "e": float32(i), + "f": float64(i), + "g": float64(i) / 100.0, + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + + epsilon := cmpopts.EquateApprox(0, 1e-3) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon) +} + +func TestMultipleMetricsExactR7(t *testing.T) { + acc := testutil.Accumulator{} + + q := Quantile{AlgorithmType: "exact R7"} + err := q.Init() + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{"series": "foo"}, + map[string]interface{}{ + "a_025": 24.75, "a_050": 49.50, "a_075": 74.25, + "b_025": 24.75, "b_050": 49.50, "b_075": 74.25, + }, + time.Now(), + ), + testutil.MustMetric( + "test", + map[string]string{"series": "bar"}, + map[string]interface{}{ + "a_025": 49.50, "a_050": 99.00, "a_075": 148.50, + "b_025": 49.50, "b_050": 99.00, "b_075": 148.50, + }, + time.Now(), + ), + } + + metricsA := make([]telegraf.Metric, 100) + metricsB := make([]telegraf.Metric, 100) + for i := range metricsA { + metricsA[i] = testutil.MustMetric( + "test", + map[string]string{"series": "foo"}, + map[string]interface{}{"a": int64(i), "b": float64(i), "x1": "string", "x2": true}, + time.Now(), + ) + } + for i := range metricsB { + metricsB[i] = testutil.MustMetric( + "test", + map[string]string{"series": "bar"}, + map[string]interface{}{"a": int64(2 * i), "b": float64(2 * i), "x1": "string", "x2": true}, + time.Now(), + ) + } + + for _, m := range metricsA { + q.Add(m) + } + for _, m := range metricsB { + q.Add(m) + } + q.Push(&acc) + + epsilon := cmpopts.EquateApprox(0, 1e-3) + sort := testutil.SortMetrics() + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon, sort) +} + +func TestSingleMetricExactR8(t *testing.T) { + acc := testutil.Accumulator{} + + q := Quantile{AlgorithmType: "exact R8"} + err := q.Init() + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a_025": 24.417, + "a_050": 49.500, + "a_075": 74.583, + "b_025": 24.417, + "b_050": 49.500, + "b_075": 74.583, + "c_025": 24.417, + "c_050": 49.500, + "c_075": 74.583, + "d_025": 24.417, + "d_050": 49.500, + "d_075": 74.583, + "e_025": 24.417, + "e_050": 49.500, + "e_075": 74.583, + "f_025": 24.417, + "f_050": 49.500, + "f_075": 74.583, + "g_025": 0.24417, + "g_050": 0.49500, + "g_075": 0.74583, + }, + time.Now(), + ), + } + + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": int32(i), + "b": int64(i), + "c": uint32(i), + "d": uint64(i), + "e": float32(i), + "f": float64(i), + "g": float64(i) / 100.0, + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + + epsilon := cmpopts.EquateApprox(0, 1e-3) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon) +} + +func TestMultipleMetricsExactR8(t *testing.T) { + acc := testutil.Accumulator{} + + q := Quantile{AlgorithmType: "exact R8"} + err := q.Init() + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{"series": "foo"}, + map[string]interface{}{ + "a_025": 24.417, "a_050": 49.500, "a_075": 74.583, + "b_025": 24.417, "b_050": 49.500, "b_075": 74.583, + }, + time.Now(), + ), + testutil.MustMetric( + "test", + map[string]string{"series": "bar"}, + map[string]interface{}{ + "a_025": 48.833, "a_050": 99.000, "a_075": 149.167, + "b_025": 48.833, "b_050": 99.000, "b_075": 149.167, + }, + time.Now(), + ), + } + + metricsA := make([]telegraf.Metric, 100) + metricsB := make([]telegraf.Metric, 100) + for i := range metricsA { + metricsA[i] = testutil.MustMetric( + "test", + map[string]string{"series": "foo"}, + map[string]interface{}{"a": int64(i), "b": float64(i), "x1": "string", "x2": true}, + time.Now(), + ) + } + for i := range metricsB { + metricsB[i] = testutil.MustMetric( + "test", + map[string]string{"series": "bar"}, + map[string]interface{}{"a": int64(2 * i), "b": float64(2 * i), "x1": "string", "x2": true}, + time.Now(), + ) + } + + for _, m := range metricsA { + q.Add(m) + } + for _, m := range metricsB { + q.Add(m) + } + q.Push(&acc) + + epsilon := cmpopts.EquateApprox(0, 1e-3) + sort := testutil.SortMetrics() + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), epsilon, sort) +} + +func BenchmarkDefaultTDigest(b *testing.B) { + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": rand.Int31(), + "b": rand.Int63(), + "c": rand.Uint32(), + "d": rand.Uint64(), + "e": rand.Float32(), + "f": rand.Float64(), + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + + q := Quantile{Compression: 100} + err := q.Init() + require.NoError(b, err) + + acc := testutil.Accumulator{} + for n := 0; n < b.N; n++ { + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + } +} + +func BenchmarkDefaultTDigest100Q(b *testing.B) { + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": rand.Int31(), + "b": rand.Int63(), + "c": rand.Uint32(), + "d": rand.Uint64(), + "e": rand.Float32(), + "f": rand.Float64(), + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + quantiles := make([]float64, 100) + for i := range quantiles { + quantiles[i] = 0.01 * float64(i) + } + + q := Quantile{Compression: 100, Quantiles: quantiles} + err := q.Init() + require.NoError(b, err) + + acc := testutil.Accumulator{} + for n := 0; n < b.N; n++ { + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + } +} + +func BenchmarkDefaultExactR7(b *testing.B) { + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": rand.Int31(), + "b": rand.Int63(), + "c": rand.Uint32(), + "d": rand.Uint64(), + "e": rand.Float32(), + "f": rand.Float64(), + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + + q := Quantile{AlgorithmType: "exact R7"} + err := q.Init() + require.NoError(b, err) + + acc := testutil.Accumulator{} + for n := 0; n < b.N; n++ { + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + } +} + +func BenchmarkDefaultExactR7100Q(b *testing.B) { + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": rand.Int31(), + "b": rand.Int63(), + "c": rand.Uint32(), + "d": rand.Uint64(), + "e": rand.Float32(), + "f": rand.Float64(), + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + quantiles := make([]float64, 100) + for i := range quantiles { + quantiles[i] = 0.01 * float64(i) + } + + q := Quantile{AlgorithmType: "exact R7", Quantiles: quantiles} + err := q.Init() + require.NoError(b, err) + + acc := testutil.Accumulator{} + for n := 0; n < b.N; n++ { + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + } +} + +func BenchmarkDefaultExactR8(b *testing.B) { + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": rand.Int31(), + "b": rand.Int63(), + "c": rand.Uint32(), + "d": rand.Uint64(), + "e": rand.Float32(), + "f": rand.Float64(), + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + + q := Quantile{AlgorithmType: "exact R8"} + err := q.Init() + require.NoError(b, err) + + acc := testutil.Accumulator{} + for n := 0; n < b.N; n++ { + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + } +} + +func BenchmarkDefaultExactR8100Q(b *testing.B) { + metrics := make([]telegraf.Metric, 100) + for i := range metrics { + metrics[i] = testutil.MustMetric( + "test", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": rand.Int31(), + "b": rand.Int63(), + "c": rand.Uint32(), + "d": rand.Uint64(), + "e": rand.Float32(), + "f": rand.Float64(), + "x1": "string", + "x2": true, + }, + time.Now(), + ) + } + quantiles := make([]float64, 100) + for i := range quantiles { + quantiles[i] = 0.01 * float64(i) + } + + q := Quantile{AlgorithmType: "exact R8", Quantiles: quantiles} + err := q.Init() + require.NoError(b, err) + + acc := testutil.Accumulator{} + for n := 0; n < b.N; n++ { + for _, m := range metrics { + q.Add(m) + } + q.Push(&acc) + } +} diff --git a/plugins/aggregators/quantile/sample.conf b/plugins/aggregators/quantile/sample.conf new file mode 100644 index 0000000000000..ee4d47eaea6ab --- /dev/null +++ b/plugins/aggregators/quantile/sample.conf @@ -0,0 +1,26 @@ +# Keep the aggregate quantiles of each metric passing through. +[[aggregators.quantile]] + ## General Aggregator Arguments: + ## The period on which to flush & clear the aggregator. + period = "30s" + + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = false + + ## Quantiles to output in the range [0,1] + # quantiles = [0.25, 0.5, 0.75] + + ## Type of aggregation algorithm + ## Supported are: + ## "t-digest" -- approximation using centroids, can cope with large number of samples + ## "exact R7" -- exact computation also used by Excel or NumPy (Hyndman & Fan 1996 R7) + ## "exact R8" -- exact computation (Hyndman & Fan 1996 R8) + ## NOTE: Do not use "exact" algorithms with large number of samples + ## to not impair performance or memory consumption! + # algorithm = "t-digest" + + ## Compression for approximation (t-digest). The value needs to be + ## greater or equal to 1.0. Smaller values will result in more + ## performance but less accuracy. + # compression = 100.0 diff --git a/plugins/aggregators/starlark/README.md b/plugins/aggregators/starlark/README.md new file mode 100644 index 0000000000000..923c27289844a --- /dev/null +++ b/plugins/aggregators/starlark/README.md @@ -0,0 +1,119 @@ +# Starlark Aggregator Plugin + +The `starlark` aggregator allows to implement a custom aggregator plugin with a +Starlark script. The Starlark script needs to be composed of the three methods +defined in the Aggregator plugin interface which are `add`, `push` and `reset`. + +The Starlark Aggregator plugin calls the Starlark function `add` to add the +metrics to the aggregator, then calls the Starlark function `push` to push the +resulting metrics into the accumulator and finally calls the Starlark function +`reset` to reset the entire state of the plugin. + +The Starlark functions can use the global function `state` to keep temporary the +metrics to aggregate. + +The Starlark language is a dialect of Python, and will be familiar to those who +have experience with the Python language. However, there are major +[differences](#python-differences). Existing +Python code is unlikely to work unmodified. The execution environment is +sandboxed, and it is not possible to do I/O operations such as reading from +files or sockets. + +The **[Starlark specification][]** has details about the syntax and available +functions. + +## Configuration + +```toml @sample.conf +# Aggregate metrics using a Starlark script +[[aggregators.starlark]] + ## The Starlark source can be set as a string in this configuration file, or + ## by referencing a file containing the script. Only one source or script + ## should be set at once. + ## + ## Source of the Starlark script. + source = ''' +state = {} + +def add(metric): + state["last"] = metric + +def push(): + return state.get("last") + +def reset(): + state.clear() +''' + + ## File containing a Starlark script. + # script = "/usr/local/bin/myscript.star" + + ## The constants of the Starlark script. + # [aggregators.starlark.constants] + # max_size = 10 + # threshold = 0.75 + # default_name = "Julia" + # debug_mode = true +``` + +## Usage + +The Starlark code should contain a function called `add` that takes a metric as +argument. The function will be called with each metric to add, and doesn't +return anything. + +```python +def add(metric): + state["last"] = metric +``` + +The Starlark code should also contain a function called `push` that doesn't take +any argument. The function will be called to compute the aggregation, and +returns the metrics to push to the accumulator. + +```python +def push(): + return state.get("last") +``` + +The Starlark code should also contain a function called `reset` that doesn't +take any argument. The function will be called to reset the plugin, and doesn't +return anything. + +```python +def push(): + state.clear() +``` + +For a list of available types and functions that can be used in the code, see +the [Starlark specification][]. + +## Python Differences + +Refer to the section [Python +Differences](../../processors/starlark/README.md#python-differences) of the +documentation about the Starlark processor. + +## Libraries available + +Refer to the section [Libraries +available](../../processors/starlark/README.md#libraries-available) of the +documentation about the Starlark processor. + +## Common Questions + +Refer to the section [Common +Questions](../../processors/starlark/README.md#common-questions) of the +documentation about the Starlark processor. + +## Examples + +- [minmax](testdata/min_max.star) - A minmax aggregator implemented with a Starlark script. +- [merge](testdata/merge.star) - A merge aggregator implemented with a Starlark script. + +[All examples](testdata) are in the testdata folder. + +Open a Pull Request to add any other useful Starlark examples. + +[Starlark specification]: https://github.com/google/starlark-go/blob/d1966c6b9fcd/doc/spec.md +[dict]: https://github.com/google/starlark-go/blob/d1966c6b9fcd/doc/spec.md#dictionaries diff --git a/plugins/aggregators/starlark/sample.conf b/plugins/aggregators/starlark/sample.conf new file mode 100644 index 0000000000000..92af9b47a34a7 --- /dev/null +++ b/plugins/aggregators/starlark/sample.conf @@ -0,0 +1,29 @@ +# Aggregate metrics using a Starlark script +[[aggregators.starlark]] + ## The Starlark source can be set as a string in this configuration file, or + ## by referencing a file containing the script. Only one source or script + ## should be set at once. + ## + ## Source of the Starlark script. + source = ''' +state = {} + +def add(metric): + state["last"] = metric + +def push(): + return state.get("last") + +def reset(): + state.clear() +''' + + ## File containing a Starlark script. + # script = "/usr/local/bin/myscript.star" + + ## The constants of the Starlark script. + # [aggregators.starlark.constants] + # max_size = 10 + # threshold = 0.75 + # default_name = "Julia" + # debug_mode = true diff --git a/plugins/aggregators/starlark/starlark.go b/plugins/aggregators/starlark/starlark.go new file mode 100644 index 0000000000000..5c536fa33768a --- /dev/null +++ b/plugins/aggregators/starlark/starlark.go @@ -0,0 +1,115 @@ +//go:generate ../../../tools/readme_config_includer/generator +package starlark + +import ( + _ "embed" + + "go.starlark.net/starlark" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/aggregators" + common "github.com/influxdata/telegraf/plugins/common/starlark" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +type Starlark struct { + common.StarlarkCommon +} + +func (*Starlark) SampleConfig() string { + return sampleConfig +} + +func (s *Starlark) Init() error { + // Execute source + err := s.StarlarkCommon.Init() + if err != nil { + return err + } + + // The source should define an add function. + err = s.AddFunction("add", &common.Metric{}) + if err != nil { + return err + } + + // The source should define a push function. + err = s.AddFunction("push") + if err != nil { + return err + } + + // The source should define a reset function. + err = s.AddFunction("reset") + if err != nil { + return err + } + + return nil +} + +func (s *Starlark) Add(metric telegraf.Metric) { + parameters, found := s.GetParameters("add") + if !found { + s.Log.Errorf("The parameters of the add function could not be found") + return + } + parameters[0].(*common.Metric).Wrap(metric) + + _, err := s.Call("add") + if err != nil { + s.LogError(err) + } +} + +func (s *Starlark) Push(acc telegraf.Accumulator) { + rv, err := s.Call("push") + if err != nil { + s.LogError(err) + acc.AddError(err) + return + } + + switch rv := rv.(type) { + case *starlark.List: + iter := rv.Iterate() + defer iter.Done() + var v starlark.Value + for iter.Next(&v) { + switch v := v.(type) { + case *common.Metric: + m := v.Unwrap() + acc.AddMetric(m) + default: + s.Log.Errorf("Invalid type returned in list: %s", v.Type()) + } + } + case *common.Metric: + m := rv.Unwrap() + acc.AddMetric(m) + case starlark.NoneType: + default: + s.Log.Errorf("Invalid type returned: %T", rv) + } +} + +func (s *Starlark) Reset() { + _, err := s.Call("reset") + if err != nil { + s.LogError(err) + } +} + +// init initializes starlark aggregator plugin +func init() { + aggregators.Add("starlark", func() telegraf.Aggregator { + return &Starlark{ + StarlarkCommon: common.StarlarkCommon{ + StarlarkLoadFunc: common.LoadFunc, + }, + } + }) +} diff --git a/plugins/aggregators/starlark/starlark_test.go b/plugins/aggregators/starlark/starlark_test.go new file mode 100644 index 0000000000000..a45f9e84cd515 --- /dev/null +++ b/plugins/aggregators/starlark/starlark_test.go @@ -0,0 +1,432 @@ +package starlark + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + common "github.com/influxdata/telegraf/plugins/common/starlark" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +var m1 = metric.New("m1", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": int64(1), + "b": int64(1), + "c": int64(1), + "d": int64(1), + "e": int64(1), + "f": int64(2), + "g": int64(2), + "h": int64(2), + "i": int64(2), + "j": int64(3), + }, + time.Now(), +) +var m2 = metric.New("m1", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": int64(1), + "b": int64(3), + "c": int64(3), + "d": int64(3), + "e": int64(3), + "f": int64(1), + "g": int64(1), + "h": int64(1), + "i": int64(1), + "j": int64(1), + "k": int64(200), + "l": int64(200), + "ignoreme": "string", + "andme": true, + }, + time.Now(), +) + +func BenchmarkApply(b *testing.B) { + minmax, _ := newMinMax() + + for n := 0; n < b.N; n++ { + minmax.Add(m1) + minmax.Add(m2) + } +} + +// Test two metrics getting added. +func TestMinMaxWithPeriod(t *testing.T) { + acc := testutil.Accumulator{} + minmax, err := newMinMax() + require.NoError(t, err) + + minmax.Add(m1) + minmax.Add(m2) + minmax.Push(&acc) + + expectedFields := map[string]interface{}{ + "a_max": int64(1), + "a_min": int64(1), + "b_max": int64(3), + "b_min": int64(1), + "c_max": int64(3), + "c_min": int64(1), + "d_max": int64(3), + "d_min": int64(1), + "e_max": int64(3), + "e_min": int64(1), + "f_max": int64(2), + "f_min": int64(1), + "g_max": int64(2), + "g_min": int64(1), + "h_max": int64(2), + "h_min": int64(1), + "i_max": int64(2), + "i_min": int64(1), + "j_max": int64(3), + "j_min": int64(1), + "k_max": int64(200), + "k_min": int64(200), + "l_max": int64(200), + "l_min": int64(200), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} + +// Test two metrics getting added with a push/reset in between (simulates +// getting added in different periods.) +func TestMinMaxDifferentPeriods(t *testing.T) { + acc := testutil.Accumulator{} + minmax, err := newMinMax() + require.NoError(t, err) + minmax.Add(m1) + minmax.Push(&acc) + expectedFields := map[string]interface{}{ + "a_max": int64(1), + "a_min": int64(1), + "b_max": int64(1), + "b_min": int64(1), + "c_max": int64(1), + "c_min": int64(1), + "d_max": int64(1), + "d_min": int64(1), + "e_max": int64(1), + "e_min": int64(1), + "f_max": int64(2), + "f_min": int64(2), + "g_max": int64(2), + "g_min": int64(2), + "h_max": int64(2), + "h_min": int64(2), + "i_max": int64(2), + "i_min": int64(2), + "j_max": int64(3), + "j_min": int64(3), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) + + acc.ClearMetrics() + minmax.Reset() + minmax.Add(m2) + minmax.Push(&acc) + expectedFields = map[string]interface{}{ + "a_max": int64(1), + "a_min": int64(1), + "b_max": int64(3), + "b_min": int64(3), + "c_max": int64(3), + "c_min": int64(3), + "d_max": int64(3), + "d_min": int64(3), + "e_max": int64(3), + "e_min": int64(3), + "f_max": int64(1), + "f_min": int64(1), + "g_max": int64(1), + "g_min": int64(1), + "h_max": int64(1), + "h_min": int64(1), + "i_max": int64(1), + "i_min": int64(1), + "j_max": int64(1), + "j_min": int64(1), + "k_max": int64(200), + "k_min": int64(200), + "l_max": int64(200), + "l_min": int64(200), + } + expectedTags = map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} + +func newMinMax() (*Starlark, error) { + return newStarlarkFromScript("testdata/min_max.star") +} + +func TestSimple(t *testing.T) { + plugin, err := newMerge() + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + + var acc testutil.Accumulator + plugin.Push(&acc) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + "time_guest": 42, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + +func TestNanosecondPrecision(t *testing.T) { + plugin, err := newMerge() + + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 1), + ), + ) + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 42, + }, + time.Unix(0, 1), + ), + ) + require.NoError(t, err) + + var acc testutil.Accumulator + acc.SetPrecision(time.Second) + plugin.Push(&acc) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + "time_guest": 42, + }, + time.Unix(0, 1), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + +func TestReset(t *testing.T) { + plugin, err := newMerge() + + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + + var acc testutil.Accumulator + plugin.Push(&acc) + + plugin.Reset() + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + + plugin.Push(&acc) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 42, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + +func newMerge() (*Starlark, error) { + return newStarlarkFromScript("testdata/merge.star") +} + +func TestLastFromSource(t *testing.T) { + acc := testutil.Accumulator{} + plugin, err := newStarlarkFromSource(` +state = {} +def add(metric): + state["last"] = metric + +def push(): + return state.get("last") + +def reset(): + state.clear() +`) + require.NoError(t, err) + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu2", + }, + map[string]interface{}{ + "time_idle": 31, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + plugin.Push(&acc) + expectedFields := map[string]interface{}{ + "time_idle": int64(31), + } + expectedTags := map[string]string{ + "cpu": "cpu2", + } + acc.AssertContainsTaggedFields(t, "cpu", expectedFields, expectedTags) + plugin.Reset() +} + +func newStarlarkFromSource(source string) (*Starlark, error) { + plugin := &Starlark{ + StarlarkCommon: common.StarlarkCommon{ + StarlarkLoadFunc: common.LoadFunc, + Log: testutil.Logger{}, + Source: source, + }, + } + err := plugin.Init() + if err != nil { + return nil, err + } + return plugin, nil +} + +func newStarlarkFromScript(script string) (*Starlark, error) { + plugin := &Starlark{ + StarlarkCommon: common.StarlarkCommon{ + StarlarkLoadFunc: common.LoadFunc, + Log: testutil.Logger{}, + Script: script, + }, + } + err := plugin.Init() + if err != nil { + return nil, err + } + return plugin, nil +} diff --git a/plugins/aggregators/starlark/testdata/merge.star b/plugins/aggregators/starlark/testdata/merge.star new file mode 100644 index 0000000000000..77c5148ca9f76 --- /dev/null +++ b/plugins/aggregators/starlark/testdata/merge.star @@ -0,0 +1,31 @@ +# Example of a merge aggregator implemented with a starlark script. +load('time.star', 'time') +state = {} +def add(metric): + metrics = state.get("metrics") + if metrics == None: + metrics = {} + state["metrics"] = metrics + state["ordered"] = [] + gId = groupID(metric) + m = metrics.get(gId) + if m == None: + m = deepcopy(metric) + metrics[gId] = m + state["ordered"].append(m) + else: + for k, v in metric.fields.items(): + m.fields[k] = v + +def push(): + return state.get("ordered") + +def reset(): + state.clear() + +def groupID(metric): + key = metric.name + "-" + for k, v in metric.tags.items(): + key = key + k + "-" + v + "-" + key = key + "-" + str(metric.time) + return hash(key) \ No newline at end of file diff --git a/plugins/aggregators/starlark/testdata/min_max.star b/plugins/aggregators/starlark/testdata/min_max.star new file mode 100644 index 0000000000000..f8b23355c8e51 --- /dev/null +++ b/plugins/aggregators/starlark/testdata/min_max.star @@ -0,0 +1,53 @@ +# Example of a min_max aggregator implemented with a starlark script. + +supported_types = (["int", "float"]) +state = {} +def add(metric): + gId = groupID(metric) + aggregate = state.get(gId) + if aggregate == None: + aggregate = { + "name": metric.name, + "tags": metric.tags, + "fields": {} + } + for k, v in metric.fields.items(): + if type(v) in supported_types: + aggregate["fields"][k] = { + "min": v, + "max": v, + } + state[gId] = aggregate + else: + for k, v in metric.fields.items(): + if type(v) in supported_types: + min_max = aggregate["fields"].get(k) + if min_max == None: + aggregate["fields"][k] = { + "min": v, + "max": v, + } + elif v < min_max["min"]: + aggregate["fields"][k]["min"] = v + elif v > min_max["max"]: + aggregate["fields"][k]["max"] = v + +def push(): + metrics = [] + for a in state: + fields = {} + for k in state[a]["fields"]: + fields[k + "_min"] = state[a]["fields"][k]["min"] + fields[k + "_max"] = state[a]["fields"][k]["max"] + m = Metric(state[a]["name"], state[a]["tags"], fields) + metrics.append(m) + return metrics + +def reset(): + state.clear() + +def groupID(metric): + key = metric.name + "-" + for k, v in metric.tags.items(): + key = key + k + "-" + v + return hash(key) \ No newline at end of file diff --git a/plugins/aggregators/valuecounter/README.md b/plugins/aggregators/valuecounter/README.md index ef68e0f4e57ca..fbb278e669fd3 100644 --- a/plugins/aggregators/valuecounter/README.md +++ b/plugins/aggregators/valuecounter/README.md @@ -15,9 +15,10 @@ Counting fields with a high number of potential values may produce significant amounts of new fields and memory usage, take care to only count fields with a limited set of values. -### Configuration: +## Configuration -```toml +```toml @sample.conf +# Count the occurrence of values in fields. [[aggregators.valuecounter]] ## General Aggregator Arguments: ## The period on which to flush & clear the aggregator. @@ -29,22 +30,23 @@ limited set of values. fields = ["status"] ``` -### Measurements & Fields: +### Measurements & Fields - measurement1 - - field_value1 - - field_value2 + - field_value1 + - field_value2 -### Tags: +### Tags No tags are applied by this aggregator. -### Example Output: +## Example Output Example for parsing a HTTP access log. telegraf.conf: -``` + +```toml [[inputs.logparser]] files = ["/tmp/tst.log"] [inputs.logparser.grok] @@ -57,13 +59,14 @@ telegraf.conf: ``` /tmp/tst.log -``` + +```text /some/path 200 /some/path 401 /some/path 200 ``` -``` +```shell $ telegraf --config telegraf.conf --quiet access,url=/some/path,path=/tmp/tst.log,host=localhost.localdomain response="200" 1511948755991487011 diff --git a/plugins/aggregators/valuecounter/sample.conf b/plugins/aggregators/valuecounter/sample.conf new file mode 100644 index 0000000000000..375f7f01ed40b --- /dev/null +++ b/plugins/aggregators/valuecounter/sample.conf @@ -0,0 +1,10 @@ +# Count the occurrence of values in fields. +[[aggregators.valuecounter]] + ## General Aggregator Arguments: + ## The period on which to flush & clear the aggregator. + period = "30s" + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = false + ## The fields for which the values will be counted + fields = ["status"] diff --git a/plugins/aggregators/valuecounter/valuecounter.go b/plugins/aggregators/valuecounter/valuecounter.go index a25c9dcaf68bf..0ddbe43b16b21 100644 --- a/plugins/aggregators/valuecounter/valuecounter.go +++ b/plugins/aggregators/valuecounter/valuecounter.go @@ -1,12 +1,18 @@ +//go:generate ../../../tools/readme_config_includer/generator package valuecounter import ( + _ "embed" "fmt" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/aggregators" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type aggregate struct { name string tags map[string]string @@ -27,27 +33,10 @@ func NewValueCounter() telegraf.Aggregator { return vc } -var sampleConfig = ` - ## General Aggregator Arguments: - ## The period on which to flush & clear the aggregator. - period = "30s" - ## If true, the original metric will be dropped by the - ## aggregator and will not get sent to the output plugins. - drop_original = false - ## The fields for which the values will be counted - fields = [] -` - -// SampleConfig generates a sample config for the ValueCounter plugin -func (vc *ValueCounter) SampleConfig() string { +func (*ValueCounter) SampleConfig() string { return sampleConfig } -// Description returns the description of the ValueCounter plugin -func (vc *ValueCounter) Description() string { - return "Count the occurrence of values in fields." -} - // Add is run on every metric which passes the plugin func (vc *ValueCounter) Add(in telegraf.Metric) { id := in.HashID() diff --git a/plugins/aggregators/valuecounter/valuecounter_test.go b/plugins/aggregators/valuecounter/valuecounter_test.go index 8cec5f36653c4..75aa6deb01bf4 100644 --- a/plugins/aggregators/valuecounter/valuecounter_test.go +++ b/plugins/aggregators/valuecounter/valuecounter_test.go @@ -19,7 +19,7 @@ func NewTestValueCounter(fields []string) telegraf.Aggregator { return vc } -var m1, _ = metric.New("m1", +var m1 = metric.New("m1", map[string]string{"foo": "bar"}, map[string]interface{}{ "status": 200, @@ -28,7 +28,7 @@ var m1, _ = metric.New("m1", time.Now(), ) -var m2, _ = metric.New("m1", +var m2 = metric.New("m1", map[string]string{"foo": "bar"}, map[string]interface{}{ "status": "OK", diff --git a/plugins/common/auth/basic_auth.go b/plugins/common/auth/basic_auth.go new file mode 100644 index 0000000000000..6f92de809cee6 --- /dev/null +++ b/plugins/common/auth/basic_auth.go @@ -0,0 +1,23 @@ +package auth + +import ( + "crypto/subtle" + "net/http" +) + +type BasicAuth struct { + Username string `toml:"username"` + Password string `toml:"password"` +} + +func (b *BasicAuth) Verify(r *http.Request) bool { + if b.Username == "" && b.Password == "" { + return true + } + + username, password, ok := r.BasicAuth() + + usernameComparison := subtle.ConstantTimeCompare([]byte(username), []byte(b.Username)) == 1 + passwordComparison := subtle.ConstantTimeCompare([]byte(password), []byte(b.Password)) == 1 + return ok && usernameComparison && passwordComparison +} diff --git a/plugins/common/auth/basic_auth_test.go b/plugins/common/auth/basic_auth_test.go new file mode 100644 index 0000000000000..781f36ab87af4 --- /dev/null +++ b/plugins/common/auth/basic_auth_test.go @@ -0,0 +1,33 @@ +package auth + +import ( + "github.com/stretchr/testify/require" + "net/http/httptest" + "testing" +) + +func TestBasicAuth_VerifyWithCredentials(t *testing.T) { + auth := BasicAuth{"username", "password"} + + r := httptest.NewRequest("GET", "/github", nil) + r.SetBasicAuth(auth.Username, auth.Password) + + require.True(t, auth.Verify(r)) +} + +func TestBasicAuth_VerifyWithoutCredentials(t *testing.T) { + auth := BasicAuth{} + + r := httptest.NewRequest("GET", "/github", nil) + + require.True(t, auth.Verify(r)) +} + +func TestBasicAuth_VerifyWithInvalidCredentials(t *testing.T) { + auth := BasicAuth{"username", "password"} + + r := httptest.NewRequest("GET", "/github", nil) + r.SetBasicAuth("wrong-username", "wrong-password") + + require.False(t, auth.Verify(r)) +} diff --git a/plugins/common/cookie/cookie.go b/plugins/common/cookie/cookie.go new file mode 100644 index 0000000000000..63dee4858af03 --- /dev/null +++ b/plugins/common/cookie/cookie.go @@ -0,0 +1,121 @@ +package cookie + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/cookiejar" + "strings" + "sync" + "time" + + clockutil "github.com/benbjohnson/clock" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" +) + +type CookieAuthConfig struct { + URL string `toml:"cookie_auth_url"` + Method string `toml:"cookie_auth_method"` + + Headers map[string]string `toml:"cookie_auth_headers"` + + // HTTP Basic Auth Credentials + Username string `toml:"cookie_auth_username"` + Password string `toml:"cookie_auth_password"` + + Body string `toml:"cookie_auth_body"` + Renewal config.Duration `toml:"cookie_auth_renewal"` + + client *http.Client + wg sync.WaitGroup +} + +func (c *CookieAuthConfig) Start(client *http.Client, log telegraf.Logger, clock clockutil.Clock) (err error) { + if err = c.initializeClient(client); err != nil { + return err + } + + // continual auth renewal if set + if c.Renewal > 0 { + ticker := clock.Ticker(time.Duration(c.Renewal)) + // this context is used in the tests only, it is to cancel the goroutine + go c.authRenewal(context.Background(), ticker, log) + } + + return nil +} + +func (c *CookieAuthConfig) initializeClient(client *http.Client) (err error) { + c.client = client + + if c.Method == "" { + c.Method = http.MethodPost + } + + // add cookie jar to HTTP client + if c.client.Jar, err = cookiejar.New(nil); err != nil { + return err + } + + return c.auth() +} + +func (c *CookieAuthConfig) authRenewal(ctx context.Context, ticker *clockutil.Ticker, log telegraf.Logger) { + for { + select { + case <-ctx.Done(): + c.wg.Done() + return + case <-ticker.C: + if err := c.auth(); err != nil && log != nil { + log.Errorf("renewal failed for %q: %v", c.URL, err) + } + } + } +} + +func (c *CookieAuthConfig) auth() error { + var body io.ReadCloser + if c.Body != "" { + body = io.NopCloser(strings.NewReader(c.Body)) + defer body.Close() + } + + req, err := http.NewRequest(c.Method, c.URL, body) + if err != nil { + return err + } + + if c.Username != "" { + req.SetBasicAuth(c.Username, c.Password) + } + + for k, v := range c.Headers { + if strings.ToLower(k) == "host" { + req.Host = v + } else { + req.Header.Add(k, v) + } + } + + resp, err := c.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if _, err = io.Copy(io.Discard, resp.Body); err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("cookie auth renewal received status code: %v (%v)", + resp.StatusCode, + http.StatusText(resp.StatusCode), + ) + } + + return nil +} diff --git a/plugins/common/cookie/cookie_test.go b/plugins/common/cookie/cookie_test.go new file mode 100644 index 0000000000000..c1c7ce294d0f5 --- /dev/null +++ b/plugins/common/cookie/cookie_test.go @@ -0,0 +1,271 @@ +package cookie + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "sync/atomic" + "testing" + "time" + + clockutil "github.com/benbjohnson/clock" + "github.com/google/go-cmp/cmp" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +const ( + reqUser = "testUser" + reqPasswd = "testPassword" + reqBody = "a body" + reqHeaderKey = "hello" + reqHeaderVal = "world" + + authEndpointNoCreds = "/auth" + authEndpointWithBasicAuth = "/authWithCreds" + authEndpointWithBasicAuthOnlyUsername = "/authWithCredsUser" + authEndpointWithBody = "/authWithBody" + authEndpointWithHeader = "/authWithHeader" +) + +var fakeCookie = &http.Cookie{ + Name: "test-cookie", + Value: "this is an auth cookie", +} + +type fakeServer struct { + *httptest.Server + *int32 +} + +func newFakeServer(t *testing.T) fakeServer { + var c int32 + return fakeServer{ + Server: httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + authed := func() { + atomic.AddInt32(&c, 1) // increment auth counter + http.SetCookie(w, fakeCookie) // set fake cookie + } + switch r.URL.Path { + case authEndpointNoCreds: + authed() + case authEndpointWithHeader: + if !cmp.Equal(r.Header.Get(reqHeaderKey), reqHeaderVal) { + w.WriteHeader(http.StatusUnauthorized) + return + } + authed() + case authEndpointWithBody: + body, err := io.ReadAll(r.Body) + require.NoError(t, err) + if !cmp.Equal([]byte(reqBody), body) { + w.WriteHeader(http.StatusUnauthorized) + return + } + authed() + case authEndpointWithBasicAuth: + u, p, ok := r.BasicAuth() + if !ok || u != reqUser || p != reqPasswd { + w.WriteHeader(http.StatusUnauthorized) + return + } + authed() + case authEndpointWithBasicAuthOnlyUsername: + u, p, ok := r.BasicAuth() + if !ok || u != reqUser || p != "" { + w.WriteHeader(http.StatusUnauthorized) + return + } + authed() + default: + // ensure cookie exists on request + if _, err := r.Cookie(fakeCookie.Name); err != nil { + w.WriteHeader(http.StatusForbidden) + return + } + _, _ = w.Write([]byte("good test response")) + } + })), + int32: &c, + } +} + +func (s fakeServer) checkResp(t *testing.T, expCode int) { + t.Helper() + resp, err := s.Client().Get(s.URL + "/endpoint") + require.NoError(t, err) + defer resp.Body.Close() + require.Equal(t, expCode, resp.StatusCode) + + if expCode == http.StatusOK { + require.Len(t, resp.Request.Cookies(), 1) + require.Equal(t, "test-cookie", resp.Request.Cookies()[0].Name) + } +} + +func (s fakeServer) checkAuthCount(t *testing.T, atLeast int32) { + t.Helper() + require.GreaterOrEqual(t, atomic.LoadInt32(s.int32), atLeast) +} + +func TestAuthConfig_Start(t *testing.T) { + const ( + renewal = 50 * time.Millisecond + renewalCheck = 5 * renewal + ) + type fields struct { + Method string + Username string + Password string + Body string + Headers map[string]string + } + type args struct { + renewal time.Duration + endpoint string + } + tests := []struct { + name string + fields fields + args args + wantErr error + firstAuthCount int32 + lastAuthCount int32 + firstHTTPResponse int + lastHTTPResponse int + }{ + { + name: "success no creds, no body, default method", + args: args{ + renewal: renewal, + endpoint: authEndpointNoCreds, + }, + firstAuthCount: 1, + lastAuthCount: 3, + firstHTTPResponse: http.StatusOK, + lastHTTPResponse: http.StatusOK, + }, + { + name: "success no creds, no body, default method, header set", + args: args{ + renewal: renewal, + endpoint: authEndpointWithHeader, + }, + fields: fields{ + Headers: map[string]string{reqHeaderKey: reqHeaderVal}, + }, + firstAuthCount: 1, + lastAuthCount: 3, + firstHTTPResponse: http.StatusOK, + lastHTTPResponse: http.StatusOK, + }, + { + name: "success with creds, no body", + fields: fields{ + Method: http.MethodPost, + Username: reqUser, + Password: reqPasswd, + }, + args: args{ + renewal: renewal, + endpoint: authEndpointWithBasicAuth, + }, + firstAuthCount: 1, + lastAuthCount: 3, + firstHTTPResponse: http.StatusOK, + lastHTTPResponse: http.StatusOK, + }, + { + name: "failure with bad creds", + fields: fields{ + Method: http.MethodPost, + Username: reqUser, + Password: "a bad password", + }, + args: args{ + renewal: renewal, + endpoint: authEndpointWithBasicAuth, + }, + wantErr: fmt.Errorf("cookie auth renewal received status code: 401 (Unauthorized)"), + firstAuthCount: 0, + lastAuthCount: 0, + firstHTTPResponse: http.StatusForbidden, + lastHTTPResponse: http.StatusForbidden, + }, + { + name: "success with no creds, with good body", + fields: fields{ + Method: http.MethodPost, + Body: reqBody, + }, + args: args{ + renewal: renewal, + endpoint: authEndpointWithBody, + }, + firstAuthCount: 1, + lastAuthCount: 3, + firstHTTPResponse: http.StatusOK, + lastHTTPResponse: http.StatusOK, + }, + { + name: "failure with bad body", + fields: fields{ + Method: http.MethodPost, + Body: "a bad body", + }, + args: args{ + renewal: renewal, + endpoint: authEndpointWithBody, + }, + wantErr: fmt.Errorf("cookie auth renewal received status code: 401 (Unauthorized)"), + firstAuthCount: 0, + lastAuthCount: 0, + firstHTTPResponse: http.StatusForbidden, + lastHTTPResponse: http.StatusForbidden, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + srv := newFakeServer(t) + c := &CookieAuthConfig{ + URL: srv.URL + tt.args.endpoint, + Method: tt.fields.Method, + Username: tt.fields.Username, + Password: tt.fields.Password, + Body: tt.fields.Body, + Headers: tt.fields.Headers, + Renewal: config.Duration(tt.args.renewal), + } + if err := c.initializeClient(srv.Client()); tt.wantErr != nil { + require.EqualError(t, err, tt.wantErr.Error()) + } else { + require.NoError(t, err) + } + mock := clockutil.NewMock() + ticker := mock.Ticker(time.Duration(c.Renewal)) + defer ticker.Stop() + + c.wg.Add(1) + ctx, cancel := context.WithCancel(context.Background()) + go c.authRenewal(ctx, ticker, testutil.Logger{Name: "cookie_auth"}) + + srv.checkAuthCount(t, tt.firstAuthCount) + srv.checkResp(t, tt.firstHTTPResponse) + mock.Add(renewalCheck) + + // Ensure that the auth renewal goroutine has completed + require.Eventually(t, func() bool { return atomic.LoadInt32(srv.int32) >= tt.lastAuthCount }, time.Second, 10*time.Millisecond) + + cancel() + c.wg.Wait() + srv.checkAuthCount(t, tt.lastAuthCount) + srv.checkResp(t, tt.lastHTTPResponse) + + srv.Close() + }) + } +} diff --git a/plugins/common/encoding/decoder_reader.go b/plugins/common/encoding/decoder_reader.go index 79bf11ed5a94b..586865cf71511 100644 --- a/plugins/common/encoding/decoder_reader.go +++ b/plugins/common/encoding/decoder_reader.go @@ -23,7 +23,7 @@ type Decoder struct { transform.Transformer // This forces external creators of Decoders to use names in struct - // initializers, allowing for future extendibility without having to break + // initializers, allowing for future extensibility without having to break // code. _ struct{} } diff --git a/plugins/common/encoding/decoder_test.go b/plugins/common/encoding/decoder_test.go index 87115318ad0ed..b8e19af9cea43 100644 --- a/plugins/common/encoding/decoder_test.go +++ b/plugins/common/encoding/decoder_test.go @@ -2,7 +2,7 @@ package encoding import ( "bytes" - "io/ioutil" + "io" "testing" "github.com/stretchr/testify/require" @@ -66,7 +66,7 @@ func TestDecoder(t *testing.T) { require.NoError(t, err) buf := bytes.NewBuffer(tt.input) r := decoder.Reader(buf) - actual, err := ioutil.ReadAll(r) + actual, err := io.ReadAll(r) if tt.expectedErr { require.Error(t, err) return diff --git a/plugins/common/http/config.go b/plugins/common/http/config.go new file mode 100644 index 0000000000000..aad17fd6ed0fc --- /dev/null +++ b/plugins/common/http/config.go @@ -0,0 +1,70 @@ +package httpconfig + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/benbjohnson/clock" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/cookie" + oauthConfig "github.com/influxdata/telegraf/plugins/common/oauth" + "github.com/influxdata/telegraf/plugins/common/proxy" + "github.com/influxdata/telegraf/plugins/common/tls" +) + +// Common HTTP client struct. +type HTTPClientConfig struct { + Timeout config.Duration `toml:"timeout"` + IdleConnTimeout config.Duration `toml:"idle_conn_timeout"` + MaxIdleConns int `toml:"max_idle_conn"` + MaxIdleConnsPerHost int `toml:"max_idle_conn_per_host"` + + proxy.HTTPProxy + tls.ClientConfig + oauthConfig.OAuth2Config + cookie.CookieAuthConfig +} + +func (h *HTTPClientConfig) CreateClient(ctx context.Context, log telegraf.Logger) (*http.Client, error) { + tlsCfg, err := h.ClientConfig.TLSConfig() + if err != nil { + return nil, fmt.Errorf("failed to set TLS config: %w", err) + } + + prox, err := h.HTTPProxy.Proxy() + if err != nil { + return nil, fmt.Errorf("failed to set proxy: %w", err) + } + + transport := &http.Transport{ + TLSClientConfig: tlsCfg, + Proxy: prox, + IdleConnTimeout: time.Duration(h.IdleConnTimeout), + MaxIdleConns: h.MaxIdleConns, + MaxIdleConnsPerHost: h.MaxIdleConnsPerHost, + } + + timeout := h.Timeout + if timeout == 0 { + timeout = config.Duration(time.Second * 5) + } + + client := &http.Client{ + Transport: transport, + Timeout: time.Duration(timeout), + } + + client = h.OAuth2Config.CreateOauth2Client(ctx, client) + + if h.CookieAuthConfig.URL != "" { + if err := h.CookieAuthConfig.Start(client, log, clock.New()); err != nil { + return nil, err + } + } + + return client, nil +} diff --git a/plugins/common/kafka/config.go b/plugins/common/kafka/config.go new file mode 100644 index 0000000000000..127a0a383fd59 --- /dev/null +++ b/plugins/common/kafka/config.go @@ -0,0 +1,95 @@ +package kafka + +import ( + "github.com/Shopify/sarama" + "github.com/influxdata/telegraf/plugins/common/tls" +) + +// ReadConfig for kafka clients meaning to read from Kafka. +type ReadConfig struct { + Config +} + +// SetConfig on the sarama.Config object from the ReadConfig struct. +func (k *ReadConfig) SetConfig(config *sarama.Config) error { + config.Consumer.Return.Errors = true + + return k.Config.SetConfig(config) +} + +// WriteConfig for kafka clients meaning to write to kafka +type WriteConfig struct { + Config + + RequiredAcks int `toml:"required_acks"` + MaxRetry int `toml:"max_retry"` + MaxMessageBytes int `toml:"max_message_bytes"` + IdempotentWrites bool `toml:"idempotent_writes"` +} + +// SetConfig on the sarama.Config object from the WriteConfig struct. +func (k *WriteConfig) SetConfig(config *sarama.Config) error { + config.Producer.Return.Successes = true + config.Producer.Idempotent = k.IdempotentWrites + config.Producer.Retry.Max = k.MaxRetry + if k.MaxMessageBytes > 0 { + config.Producer.MaxMessageBytes = k.MaxMessageBytes + } + config.Producer.RequiredAcks = sarama.RequiredAcks(k.RequiredAcks) + if config.Producer.Idempotent { + config.Net.MaxOpenRequests = 1 + } + return k.Config.SetConfig(config) +} + +// Config common to all Kafka clients. +type Config struct { + SASLAuth + tls.ClientConfig + + Version string `toml:"version"` + ClientID string `toml:"client_id"` + CompressionCodec int `toml:"compression_codec"` + + EnableTLS *bool `toml:"enable_tls" deprecated:"1.17.0;option is ignored"` + + // Disable full metadata fetching + MetadataFull *bool `toml:"metadata_full"` +} + +// SetConfig on the sarama.Config object from the Config struct. +func (k *Config) SetConfig(config *sarama.Config) error { + if k.Version != "" { + version, err := sarama.ParseKafkaVersion(k.Version) + if err != nil { + return err + } + + config.Version = version + } + + if k.ClientID != "" { + config.ClientID = k.ClientID + } else { + config.ClientID = "Telegraf" + } + + config.Producer.Compression = sarama.CompressionCodec(k.CompressionCodec) + + tlsConfig, err := k.ClientConfig.TLSConfig() + if err != nil { + return err + } + + if tlsConfig != nil { + config.Net.TLS.Config = tlsConfig + config.Net.TLS.Enable = true + } + + if k.MetadataFull != nil { + // Defaults to true in Sarama + config.Metadata.Full = *k.MetadataFull + } + + return k.SetSASLConfig(config) +} diff --git a/plugins/common/kafka/sasl.go b/plugins/common/kafka/sasl.go index e565aea5813ce..06ab64dab34be 100644 --- a/plugins/common/kafka/sasl.go +++ b/plugins/common/kafka/sasl.go @@ -105,5 +105,4 @@ func gssapiAuthType(authType string) int { default: return 0 } - } diff --git a/plugins/common/kafka/scram_client.go b/plugins/common/kafka/scram_client.go index f6aa9d6c4e285..765e76e96f7e1 100644 --- a/plugins/common/kafka/scram_client.go +++ b/plugins/common/kafka/scram_client.go @@ -27,8 +27,7 @@ func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { } func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { - response, err = x.ClientConversation.Step(challenge) - return + return x.ClientConversation.Step(challenge) } func (x *XDGSCRAMClient) Done() bool { diff --git a/plugins/common/logrus/hook.go b/plugins/common/logrus/hook.go index a7f99023be1ba..84aae8fe8557b 100644 --- a/plugins/common/logrus/hook.go +++ b/plugins/common/logrus/hook.go @@ -1,8 +1,8 @@ package logrus import ( - "io/ioutil" - "log" + "io" + "log" //nolint:revive // Allow exceptional but valid use of log here. "strings" "sync" @@ -14,12 +14,12 @@ var once sync.Once type LogHook struct { } -// Install a logging hook into the logrus standard logger, diverting all logs +// InstallHook installs a logging hook into the logrus standard logger, diverting all logs // through the Telegraf logger at debug level. This is useful for libraries // that directly log to the logrus system without providing an override method. func InstallHook() { once.Do(func() { - logrus.SetOutput(ioutil.Discard) + logrus.SetOutput(io.Discard) logrus.AddHook(&LogHook{}) }) } diff --git a/plugins/common/oauth/config.go b/plugins/common/oauth/config.go new file mode 100644 index 0000000000000..aa42a7a65569a --- /dev/null +++ b/plugins/common/oauth/config.go @@ -0,0 +1,32 @@ +package oauth + +import ( + "context" + "net/http" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" +) + +type OAuth2Config struct { + // OAuth2 Credentials + ClientID string `toml:"client_id"` + ClientSecret string `toml:"client_secret"` + TokenURL string `toml:"token_url"` + Scopes []string `toml:"scopes"` +} + +func (o *OAuth2Config) CreateOauth2Client(ctx context.Context, client *http.Client) *http.Client { + if o.ClientID != "" && o.ClientSecret != "" && o.TokenURL != "" { + oauthConfig := clientcredentials.Config{ + ClientID: o.ClientID, + ClientSecret: o.ClientSecret, + TokenURL: o.TokenURL, + Scopes: o.Scopes, + } + ctx = context.WithValue(ctx, oauth2.HTTPClient, client) + client = oauthConfig.Client(ctx) + } + + return client +} diff --git a/plugins/processors/reverse_dns/parallel/ordered.go b/plugins/common/parallel/ordered.go similarity index 100% rename from plugins/processors/reverse_dns/parallel/ordered.go rename to plugins/common/parallel/ordered.go diff --git a/plugins/processors/reverse_dns/parallel/parallel.go b/plugins/common/parallel/parallel.go similarity index 100% rename from plugins/processors/reverse_dns/parallel/parallel.go rename to plugins/common/parallel/parallel.go diff --git a/plugins/processors/reverse_dns/parallel/parallel_test.go b/plugins/common/parallel/parallel_test.go similarity index 89% rename from plugins/processors/reverse_dns/parallel/parallel_test.go rename to plugins/common/parallel/parallel_test.go index 0d2839a24f4cd..1e2eaccb98654 100644 --- a/plugins/processors/reverse_dns/parallel/parallel_test.go +++ b/plugins/common/parallel/parallel_test.go @@ -7,7 +7,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/influxdata/telegraf/plugins/processors/reverse_dns/parallel" + "github.com/influxdata/telegraf/plugins/common/parallel" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -18,14 +18,13 @@ func TestOrderedJobsStayOrdered(t *testing.T) { p := parallel.NewOrdered(acc, jobFunc, 10000, 10) now := time.Now() for i := 0; i < 20000; i++ { - m, err := metric.New("test", + m := metric.New("test", map[string]string{}, map[string]interface{}{ "val": i, }, now, ) - require.NoError(t, err) now = now.Add(1) p.Enqueue(m) } @@ -51,14 +50,13 @@ func TestUnorderedJobsDontDropAnyJobs(t *testing.T) { expectedTotal := 0 for i := 0; i < 20000; i++ { expectedTotal += i - m, err := metric.New("test", + m := metric.New("test", map[string]string{}, map[string]interface{}{ "val": i, }, now, ) - require.NoError(t, err) now = now.Add(1) p.Enqueue(m) } @@ -79,7 +77,7 @@ func BenchmarkOrdered(b *testing.B) { p := parallel.NewOrdered(acc, jobFunc, 10000, 10) - m, _ := metric.New("test", + m := metric.New("test", map[string]string{}, map[string]interface{}{ "val": 1, @@ -99,7 +97,7 @@ func BenchmarkUnordered(b *testing.B) { p := parallel.NewUnordered(acc, jobFunc, 10) - m, _ := metric.New("test", + m := metric.New("test", map[string]string{}, map[string]interface{}{ "val": 1, diff --git a/plugins/processors/reverse_dns/parallel/unordered.go b/plugins/common/parallel/unordered.go similarity index 100% rename from plugins/processors/reverse_dns/parallel/unordered.go rename to plugins/common/parallel/unordered.go diff --git a/plugins/common/proxy/connect.go b/plugins/common/proxy/connect.go new file mode 100644 index 0000000000000..6c95ddf95e050 --- /dev/null +++ b/plugins/common/proxy/connect.go @@ -0,0 +1,140 @@ +package proxy + +import ( + "bufio" + "context" + "fmt" + "net" + "net/http" + "net/url" + + netProxy "golang.org/x/net/proxy" +) + +// httpConnectProxy proxies (only?) TCP over a HTTP tunnel using the CONNECT method +type httpConnectProxy struct { + forward netProxy.Dialer + url *url.URL +} + +func (c *httpConnectProxy) DialContext(ctx context.Context, network, addr string) (net.Conn, error) { + // Prevent using UDP + if network == "udp" { + return nil, fmt.Errorf("cannot proxy %q traffic over HTTP CONNECT", network) + } + + var proxyConn net.Conn + var err error + if dialer, ok := c.forward.(netProxy.ContextDialer); ok { + proxyConn, err = dialer.DialContext(ctx, "tcp", c.url.Host) + } else { + shim := contextDialerShim{c.forward} + proxyConn, err = shim.DialContext(ctx, "tcp", c.url.Host) + } + if err != nil { + return nil, err + } + + // Add and strip http:// to extract authority portion of the URL + // since CONNECT doesn't use a full URL. The request header would + // look something like: "CONNECT www.influxdata.com:443 HTTP/1.1" + requestURL, err := url.Parse("http://" + addr) + if err != nil { + if err := proxyConn.Close(); err != nil { + return nil, err + } + return nil, err + } + requestURL.Scheme = "" + + // Build HTTP CONNECT request + req, err := http.NewRequest(http.MethodConnect, requestURL.String(), nil) + if err != nil { + if err := proxyConn.Close(); err != nil { + return nil, err + } + return nil, err + } + req.Close = false + if password, hasAuth := c.url.User.Password(); hasAuth { + req.SetBasicAuth(c.url.User.Username(), password) + } + + err = req.Write(proxyConn) + if err != nil { + if err := proxyConn.Close(); err != nil { + return nil, err + } + return nil, err + } + + resp, err := http.ReadResponse(bufio.NewReader(proxyConn), req) + if err != nil { + if err := proxyConn.Close(); err != nil { + return nil, err + } + return nil, err + } + if err := resp.Body.Close(); err != nil { + return nil, err + } + + if resp.StatusCode != 200 { + if err := proxyConn.Close(); err != nil { + return nil, err + } + return nil, fmt.Errorf("failed to connect to proxy: %q", resp.Status) + } + + return proxyConn, nil +} + +func (c *httpConnectProxy) Dial(network, addr string) (net.Conn, error) { + return c.DialContext(context.Background(), network, addr) +} + +func newHTTPConnectProxy(proxyURL *url.URL, forward netProxy.Dialer) (netProxy.Dialer, error) { + return &httpConnectProxy{forward, proxyURL}, nil +} + +func init() { + // Register new proxy types + netProxy.RegisterDialerType("http", newHTTPConnectProxy) + netProxy.RegisterDialerType("https", newHTTPConnectProxy) +} + +// contextDialerShim allows cancellation of the dial from a context even if the underlying +// dialer does not implement `proxy.ContextDialer`. Arguably, this shouldn't actually get run, +// unless a new proxy type is added that doesn't implement `proxy.ContextDialer`, as all the +// standard library dialers implement `proxy.ContextDialer`. +type contextDialerShim struct { + dialer netProxy.Dialer +} + +func (cd *contextDialerShim) Dial(network, addr string) (net.Conn, error) { + return cd.dialer.Dial(network, addr) +} + +func (cd *contextDialerShim) DialContext(ctx context.Context, network, addr string) (net.Conn, error) { + var ( + conn net.Conn + done = make(chan struct{}, 1) + err error + ) + + go func() { + conn, err = cd.dialer.Dial(network, addr) + close(done) + if conn != nil && ctx.Err() != nil { + _ = conn.Close() + } + }() + + select { + case <-ctx.Done(): + err = ctx.Err() + case <-done: + } + + return conn, err +} diff --git a/plugins/common/proxy/dialer.go b/plugins/common/proxy/dialer.go new file mode 100644 index 0000000000000..844d12ac73596 --- /dev/null +++ b/plugins/common/proxy/dialer.go @@ -0,0 +1,37 @@ +package proxy + +import ( + "context" + "net" + "time" + + netProxy "golang.org/x/net/proxy" +) + +type ProxiedDialer struct { + dialer netProxy.Dialer +} + +func (pd *ProxiedDialer) Dial(network, addr string) (net.Conn, error) { + return pd.dialer.Dial(network, addr) +} + +func (pd *ProxiedDialer) DialContext(ctx context.Context, network, addr string) (net.Conn, error) { + if contextDialer, ok := pd.dialer.(netProxy.ContextDialer); ok { + return contextDialer.DialContext(ctx, network, addr) + } + + contextDialer := contextDialerShim{pd.dialer} + return contextDialer.DialContext(ctx, network, addr) +} + +func (pd *ProxiedDialer) DialTimeout(network, addr string, timeout time.Duration) (net.Conn, error) { + ctx := context.Background() + if timeout.Seconds() != 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + } + + return pd.DialContext(ctx, network, addr) +} diff --git a/plugins/common/proxy/proxy.go b/plugins/common/proxy/proxy.go new file mode 100644 index 0000000000000..39823ef0fcd57 --- /dev/null +++ b/plugins/common/proxy/proxy.go @@ -0,0 +1,57 @@ +package proxy + +import ( + "fmt" + "net/http" + "net/url" + + "golang.org/x/net/proxy" +) + +type HTTPProxy struct { + UseSystemProxy bool `toml:"use_system_proxy"` + HTTPProxyURL string `toml:"http_proxy_url"` +} + +type proxyFunc func(req *http.Request) (*url.URL, error) + +func (p *HTTPProxy) Proxy() (proxyFunc, error) { + if p.UseSystemProxy { + return http.ProxyFromEnvironment, nil + } else if len(p.HTTPProxyURL) > 0 { + address, err := url.Parse(p.HTTPProxyURL) + if err != nil { + return nil, fmt.Errorf("error parsing proxy url %q: %w", p.HTTPProxyURL, err) + } + return http.ProxyURL(address), nil + } + + return nil, nil +} + +type TCPProxy struct { + UseProxy bool `toml:"use_proxy"` + ProxyURL string `toml:"proxy_url"` +} + +func (p *TCPProxy) Proxy() (*ProxiedDialer, error) { + var dialer proxy.Dialer + if p.UseProxy { + if len(p.ProxyURL) > 0 { + parsed, err := url.Parse(p.ProxyURL) + if err != nil { + return nil, fmt.Errorf("error parsing proxy url %q: %w", p.ProxyURL, err) + } + + if dialer, err = proxy.FromURL(parsed, proxy.Direct); err != nil { + return nil, err + } + } else { + dialer = proxy.FromEnvironment() + } + } else { + dialer = proxy.Direct + } + + return &ProxiedDialer{dialer}, nil +} diff --git a/plugins/common/proxy/socks5.go b/plugins/common/proxy/socks5.go new file mode 100644 index 0000000000000..e69dd5f3294d1 --- /dev/null +++ b/plugins/common/proxy/socks5.go @@ -0,0 +1,22 @@ +package proxy + +import ( + "golang.org/x/net/proxy" +) + +type Socks5ProxyConfig struct { + Socks5ProxyEnabled bool `toml:"socks5_enabled"` + Socks5ProxyAddress string `toml:"socks5_address"` + Socks5ProxyUsername string `toml:"socks5_username"` + Socks5ProxyPassword string `toml:"socks5_password"` +} + +func (c *Socks5ProxyConfig) GetDialer() (proxy.Dialer, error) { + var auth *proxy.Auth + if c.Socks5ProxyPassword != "" || c.Socks5ProxyUsername != "" { + auth = new(proxy.Auth) + auth.User = c.Socks5ProxyUsername + auth.Password = c.Socks5ProxyPassword + } + return proxy.SOCKS5("tcp", c.Socks5ProxyAddress, auth, proxy.Direct) +} diff --git a/plugins/common/proxy/socks5_test.go b/plugins/common/proxy/socks5_test.go new file mode 100644 index 0000000000000..33fa4b4aab724 --- /dev/null +++ b/plugins/common/proxy/socks5_test.go @@ -0,0 +1,70 @@ +package proxy + +import ( + "net" + "testing" + "time" + + "github.com/armon/go-socks5" + "github.com/stretchr/testify/require" +) + +func TestSocks5ProxyConfigIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + const ( + proxyAddress = "0.0.0.0:12345" + proxyUsername = "user" + proxyPassword = "password" + ) + + l, err := net.Listen("tcp", "0.0.0.0:0") + require.NoError(t, err) + + server, err := socks5.New(&socks5.Config{ + AuthMethods: []socks5.Authenticator{socks5.UserPassAuthenticator{ + Credentials: socks5.StaticCredentials{ + proxyUsername: proxyPassword, + }, + }}, + }) + require.NoError(t, err) + + go func() { require.NoError(t, server.ListenAndServe("tcp", proxyAddress)) }() + + conf := Socks5ProxyConfig{ + Socks5ProxyEnabled: true, + Socks5ProxyAddress: proxyAddress, + Socks5ProxyUsername: proxyUsername, + Socks5ProxyPassword: proxyPassword, + } + dialer, err := conf.GetDialer() + require.NoError(t, err) + + var proxyConn net.Conn + for i := 0; i < 10; i++ { + proxyConn, err = dialer.Dial("tcp", l.Addr().String()) + if err == nil { + break + } + time.Sleep(10 * time.Millisecond) + } + require.NotNil(t, proxyConn) + defer func() { require.NoError(t, proxyConn.Close()) }() + + serverConn, err := l.Accept() + require.NoError(t, err) + defer func() { require.NoError(t, serverConn.Close()) }() + + writePayload := []byte("test") + _, err = proxyConn.Write(writePayload) + require.NoError(t, err) + + receivePayload := make([]byte, 4) + _, err = serverConn.Read(receivePayload) + require.NoError(t, err) + + require.Equal(t, writePayload, receivePayload) +} diff --git a/plugins/common/shim/README.md b/plugins/common/shim/README.md index 5453c90a4d548..e58249608ae48 100644 --- a/plugins/common/shim/README.md +++ b/plugins/common/shim/README.md @@ -4,6 +4,7 @@ The goal of this _shim_ is to make it trivial to extract an internal input, processor, or output plugin from the main Telegraf repo out to a stand-alone repo. This allows anyone to build and run it as a separate app using one of the execd plugins: + - [inputs.execd](/plugins/inputs/execd) - [processors.execd](/plugins/processors/execd) - [outputs.execd](/plugins/outputs/execd) @@ -56,8 +57,8 @@ execd plugins: Refer to the execd plugin readmes for more information. -## Congratulations! +## Congratulations You've done it! Consider publishing your plugin to github and open a Pull Request back to the Telegraf repo letting us know about the availability of your -[external plugin](https://github.com/influxdata/telegraf/blob/master/EXTERNAL_PLUGINS.md). \ No newline at end of file +[external plugin](https://github.com/influxdata/telegraf/blob/master/EXTERNAL_PLUGINS.md). diff --git a/plugins/common/shim/config.go b/plugins/common/shim/config.go index 439ec90a16283..ad08e08ea803d 100644 --- a/plugins/common/shim/config.go +++ b/plugins/common/shim/config.go @@ -3,8 +3,7 @@ package shim import ( "errors" "fmt" - "io/ioutil" - "log" + "log" //nolint:revive // Allow exceptional but valid use of log here. "os" "github.com/BurntSushi/toml" @@ -34,15 +33,15 @@ func (s *Shim) LoadConfig(filePath *string) error { } if conf.Input != nil { if err = s.AddInput(conf.Input); err != nil { - return fmt.Errorf("Failed to add Input: %w", err) + return fmt.Errorf("failed to add Input: %w", err) } } else if conf.Processor != nil { if err = s.AddStreamingProcessor(conf.Processor); err != nil { - return fmt.Errorf("Failed to add Processor: %w", err) + return fmt.Errorf("failed to add Processor: %w", err) } } else if conf.Output != nil { if err = s.AddOutput(conf.Output); err != nil { - return fmt.Errorf("Failed to add Output: %w", err) + return fmt.Errorf("failed to add Output: %w", err) } } return nil @@ -53,14 +52,12 @@ func LoadConfig(filePath *string) (loaded loadedConfig, err error) { var data string conf := config{} if filePath != nil && *filePath != "" { - - b, err := ioutil.ReadFile(*filePath) + b, err := os.ReadFile(*filePath) if err != nil { return loadedConfig{}, err } data = expandEnvVars(b) - } else { conf, err = DefaultImportedPlugins() if err != nil { diff --git a/plugins/common/shim/config_test.go b/plugins/common/shim/config_test.go index 97d2004200b44..69c18394ae274 100644 --- a/plugins/common/shim/config_test.go +++ b/plugins/common/shim/config_test.go @@ -5,16 +5,19 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" tgConfig "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/processors" - "github.com/stretchr/testify/require" ) func TestLoadConfig(t *testing.T) { - os.Setenv("SECRET_TOKEN", "xxxxxxxxxx") - os.Setenv("SECRET_VALUE", `test"\test`) + err := os.Setenv("SECRET_TOKEN", "xxxxxxxxxx") + require.NoError(t, err) + err = os.Setenv("SECRET_VALUE", `test"\test`) + require.NoError(t, err) inputs.Add("test", func() telegraf.Input { return &serviceInput{} @@ -31,16 +34,6 @@ func TestLoadConfig(t *testing.T) { require.Equal(t, `test"\test`, inp.SecretValue) } -func TestDefaultImportedPluginsSelfRegisters(t *testing.T) { - inputs.Add("test", func() telegraf.Input { - return &testInput{} - }) - - cfg, err := LoadConfig(nil) - require.NoError(t, err) - require.Equal(t, "test", cfg.Input.Description()) -} - func TestLoadingSpecialTypes(t *testing.T) { inputs.Add("test", func() telegraf.Input { return &testDurationInput{} @@ -54,6 +47,7 @@ func TestLoadingSpecialTypes(t *testing.T) { require.EqualValues(t, 3*time.Second, inp.Duration) require.EqualValues(t, 3*1000*1000, inp.Size) + require.EqualValues(t, 52, inp.Hex) } func TestLoadingProcessorWithConfig(t *testing.T) { @@ -72,6 +66,7 @@ func TestLoadingProcessorWithConfig(t *testing.T) { type testDurationInput struct { Duration tgConfig.Duration `toml:"duration"` Size tgConfig.Size `toml:"size"` + Hex int64 `toml:"hex"` } func (i *testDurationInput) SampleConfig() string { @@ -81,7 +76,7 @@ func (i *testDurationInput) SampleConfig() string { func (i *testDurationInput) Description() string { return "" } -func (i *testDurationInput) Gather(acc telegraf.Accumulator) error { +func (i *testDurationInput) Gather(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/common/shim/example/cmd/main.go b/plugins/common/shim/example/cmd/main.go index 4f51f7f878fb3..27c5b8741adc3 100644 --- a/plugins/common/shim/example/cmd/main.go +++ b/plugins/common/shim/example/cmd/main.go @@ -13,7 +13,7 @@ import ( ) var pollInterval = flag.Duration("poll_interval", 1*time.Second, "how often to send metrics") -var pollIntervalDisabled = flag.Bool("poll_interval_disabled", false, "how often to send metrics") +var pollIntervalDisabled = flag.Bool("poll_interval_disabled", false, "set to true to disable polling. You want to use this when you are sending metrics on your own schedule") var configFile = flag.String("config", "", "path to the config file for this plugin") var err error @@ -21,7 +21,7 @@ var err error // // However, if you want to do all your config in code, you can like so: // -// // initialize your plugin with any settngs you want +// // initialize your plugin with any settings you want // myInput := &mypluginname.MyPlugin{ // DefaultSettingHere: 3, // } @@ -30,7 +30,7 @@ var err error // // shim.AddInput(myInput) // -// // now the shim.Run() call as below. +// // now the shim.Run() call as below. Note the shim is only intended to run a single plugin. // func main() { // parse command line options @@ -40,20 +40,20 @@ func main() { } // create the shim. This is what will run your plugins. - shim := shim.New() + shimLayer := shim.New() // If no config is specified, all imported plugins are loaded. - // otherwise follow what the config asks for. + // otherwise, follow what the config asks for. // Check for settings from a config toml file, // (or just use whatever plugins were imported above) - err = shim.LoadConfig(configFile) + err = shimLayer.LoadConfig(configFile) if err != nil { fmt.Fprintf(os.Stderr, "Err loading input: %s\n", err) os.Exit(1) } - // run the input plugin(s) until stdin closes or we receive a termination signal - if err := shim.Run(*pollInterval); err != nil { + // run a single plugin until stdin closes or we receive a termination signal + if err := shimLayer.Run(*pollInterval); err != nil { fmt.Fprintf(os.Stderr, "Err: %s\n", err) os.Exit(1) } diff --git a/plugins/common/shim/goshim.go b/plugins/common/shim/goshim.go index 7be139194520f..ad03cff22d79e 100644 --- a/plugins/common/shim/goshim.go +++ b/plugins/common/shim/goshim.go @@ -84,13 +84,13 @@ func (s *Shim) Run(pollInterval time.Duration) error { if err != nil { return fmt.Errorf("RunProcessor error: %w", err) } - } else if s.Output != nil { + } else if s.Output != nil { //nolint:revive // Not simplifying here to stay in the structure for better understanding the code err := s.RunOutput() if err != nil { return fmt.Errorf("RunOutput error: %w", err) } } else { - return fmt.Errorf("Nothing to run") + return fmt.Errorf("nothing to run") } return nil @@ -102,7 +102,7 @@ func hasQuit(ctx context.Context) bool { func (s *Shim) writeProcessedMetrics() error { serializer := influx.NewSerializer() - for { + for { //nolint:gosimple // for-select used on purpose select { case m, open := <-s.metricCh: if !open { @@ -113,7 +113,10 @@ func (s *Shim) writeProcessedMetrics() error { return fmt.Errorf("failed to serialize metric: %s", err) } // Write this to stdout - fmt.Fprint(s.stdout, string(b)) + _, err = fmt.Fprint(s.stdout, string(b)) + if err != nil { + return fmt.Errorf("failed to write metric: %s", err) + } } } } diff --git a/plugins/common/shim/goshim_test.go b/plugins/common/shim/goshim_test.go index 080a513ade250..1011f9e77293f 100644 --- a/plugins/common/shim/goshim_test.go +++ b/plugins/common/shim/goshim_test.go @@ -8,8 +8,9 @@ import ( "testing" "time" - "github.com/influxdata/telegraf" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" ) func TestShimSetsUpLogger(t *testing.T) { @@ -18,7 +19,8 @@ func TestShimSetsUpLogger(t *testing.T) { runErroringInputPlugin(t, 40*time.Second, stdinReader, nil, stderrWriter) - stdinWriter.Write([]byte("\n")) + _, err := stdinWriter.Write([]byte("\n")) + require.NoError(t, err) // <-metricProcessed @@ -27,7 +29,8 @@ func TestShimSetsUpLogger(t *testing.T) { require.NoError(t, err) require.Contains(t, out, "Error in plugin: intentional") - stdinWriter.Close() + err = stdinWriter.Close() + require.NoError(t, err) } func runErroringInputPlugin(t *testing.T, interval time.Duration, stdin io.Reader, stdout, stderr io.Writer) (metricProcessed chan bool, exited chan bool) { @@ -46,7 +49,8 @@ func runErroringInputPlugin(t *testing.T, interval time.Duration, stdin io.Reade shim.stderr = stderr log.SetOutput(stderr) } - shim.AddInput(inp) + err := shim.AddInput(inp) + require.NoError(t, err) go func() { err := shim.Run(interval) require.NoError(t, err) @@ -62,16 +66,12 @@ func (i *erroringInput) SampleConfig() string { return "" } -func (i *erroringInput) Description() string { - return "" -} - func (i *erroringInput) Gather(acc telegraf.Accumulator) error { acc.AddError(errors.New("intentional")) return nil } -func (i *erroringInput) Start(acc telegraf.Accumulator) error { +func (i *erroringInput) Start(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/common/shim/input_test.go b/plugins/common/shim/input_test.go index 32f97d5924bc5..26d164e54c3e7 100644 --- a/plugins/common/shim/input_test.go +++ b/plugins/common/shim/input_test.go @@ -3,7 +3,6 @@ package shim import ( "bufio" "io" - "io/ioutil" "strings" "testing" "time" @@ -35,7 +34,8 @@ func TestInputShimStdinSignalingWorks(t *testing.T) { metricProcessed, exited := runInputPlugin(t, 40*time.Second, stdinReader, stdoutWriter, nil) - stdinWriter.Write([]byte("\n")) + _, err := stdinWriter.Write([]byte("\n")) + require.NoError(t, err) <-metricProcessed @@ -44,8 +44,11 @@ func TestInputShimStdinSignalingWorks(t *testing.T) { require.NoError(t, err) require.Equal(t, "measurement,tag=tag field=1i 1234000005678\n", out) - stdinWriter.Close() - go ioutil.ReadAll(r) + err = stdinWriter.Close() + require.NoError(t, err) + go func() { + _, _ = io.ReadAll(r) + }() // check that it exits cleanly <-exited } @@ -67,7 +70,8 @@ func runInputPlugin(t *testing.T, interval time.Duration, stdin io.Reader, stdou if stderr != nil { shim.stderr = stderr } - shim.AddInput(inp) + err := shim.AddInput(inp) + require.NoError(t, err) go func() { err := shim.Run(interval) require.NoError(t, err) @@ -100,7 +104,7 @@ func (i *testInput) Gather(acc telegraf.Accumulator) error { return nil } -func (i *testInput) Start(acc telegraf.Accumulator) error { +func (i *testInput) Start(_ telegraf.Accumulator) error { return nil } @@ -133,7 +137,7 @@ func (i *serviceInput) Gather(acc telegraf.Accumulator) error { return nil } -func (i *serviceInput) Start(acc telegraf.Accumulator) error { +func (i *serviceInput) Start(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/common/shim/logger.go b/plugins/common/shim/logger.go index 88db63ab7d58c..74bfbfdef2019 100644 --- a/plugins/common/shim/logger.go +++ b/plugins/common/shim/logger.go @@ -2,7 +2,7 @@ package shim import ( "fmt" - "log" + "log" //nolint:revive // Allow exceptional but valid use of log here. "os" "reflect" @@ -66,7 +66,7 @@ func (l *Logger) Info(args ...interface{}) { // setLoggerOnPlugin injects the logger into the plugin, // if it defines Log telegraf.Logger. This is sort of like SetLogger but using // reflection instead of forcing the plugin author to define the function for it -func setLoggerOnPlugin(i interface{}, log telegraf.Logger) { +func setLoggerOnPlugin(i interface{}, logger telegraf.Logger) { valI := reflect.ValueOf(i) if valI.Type().Kind() != reflect.Ptr { @@ -78,12 +78,9 @@ func setLoggerOnPlugin(i interface{}, log telegraf.Logger) { return } - switch field.Type().String() { - case "telegraf.Logger": + if field.Type().String() == "telegraf.Logger" { if field.CanSet() { - field.Set(reflect.ValueOf(log)) + field.Set(reflect.ValueOf(logger)) } } - - return } diff --git a/plugins/common/shim/output_test.go b/plugins/common/shim/output_test.go index 5a74d59edb240..468ae28e05eee 100644 --- a/plugins/common/shim/output_test.go +++ b/plugins/common/shim/output_test.go @@ -34,7 +34,7 @@ func TestOutputShim(t *testing.T) { serializer, _ := serializers.NewInfluxSerializer() - m, _ := metric.New("thing", + m := metric.New("thing", map[string]string{ "a": "b", }, diff --git a/plugins/common/shim/processor.go b/plugins/common/shim/processor.go index 33dceba872759..d8f660b360cd6 100644 --- a/plugins/common/shim/processor.go +++ b/plugins/common/shim/processor.go @@ -1,14 +1,13 @@ package shim import ( - "bufio" "fmt" "sync" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/agent" - "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/processors" ) @@ -37,12 +36,7 @@ func (s *Shim) RunProcessor() error { acc := agent.NewAccumulator(s, s.metricCh) acc.SetPrecision(time.Nanosecond) - parser, err := parsers.NewInfluxParser() - if err != nil { - return fmt.Errorf("Failed to create new parser: %w", err) - } - - err = s.Processor.Start(acc) + err := s.Processor.Start(acc) if err != nil { return fmt.Errorf("failed to start processor: %w", err) } @@ -54,13 +48,21 @@ func (s *Shim) RunProcessor() error { wg.Done() }() - scanner := bufio.NewScanner(s.stdin) - for scanner.Scan() { - m, err := parser.ParseLine(scanner.Text()) + parser := influx.NewStreamParser(s.stdin) + for { + m, err := parser.Next() if err != nil { - fmt.Fprintf(s.stderr, "Failed to parse metric: %s\b", err) + if err == influx.EOF { + break // stream ended + } + if parseErr, isParseError := err.(*influx.ParseError); isParseError { + fmt.Fprintf(s.stderr, "Failed to parse metric: %s\b", parseErr) + continue + } + fmt.Fprintf(s.stderr, "Failure during reading stdin: %s\b", err) continue } + s.Processor.Add(m, acc) } diff --git a/plugins/common/shim/processor_test.go b/plugins/common/shim/processor_test.go index b4cf01ae0236f..b7b1739aee02a 100644 --- a/plugins/common/shim/processor_test.go +++ b/plugins/common/shim/processor_test.go @@ -3,20 +3,35 @@ package shim import ( "bufio" "io" - "io/ioutil" + "math/rand" "sync" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/serializers" - "github.com/stretchr/testify/require" ) func TestProcessorShim(t *testing.T) { - p := &testProcessor{} + testSendAndReceive(t, "f1", "fv1") +} + +func TestProcessorShimWithLargerThanDefaultScannerBufferSize(t *testing.T) { + letters := []rune("ABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]rune, bufio.MaxScanTokenSize*2) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + + testSendAndReceive(t, "f1", string(b)) +} + +func testSendAndReceive(t *testing.T, fieldKey string, fieldValue string) { + p := &testProcessor{"hi", "mom"} stdinReader, stdinWriter := io.Pipe() stdoutReader, stdoutWriter := io.Pipe() @@ -40,12 +55,13 @@ func TestProcessorShim(t *testing.T) { serializer, _ := serializers.NewInfluxSerializer() parser, _ := parsers.NewInfluxParser() - m, _ := metric.New("thing", + m := metric.New("thing", map[string]string{ "a": "b", }, map[string]interface{}{ - "v": 1, + "v": 1, + fieldKey: fieldValue, }, time.Now(), ) @@ -62,19 +78,26 @@ func TestProcessorShim(t *testing.T) { mOut, err := parser.ParseLine(out) require.NoError(t, err) - val, ok := mOut.GetTag("hi") + val, ok := mOut.GetTag(p.tagName) require.True(t, ok) - require.Equal(t, "mom", val) - - go ioutil.ReadAll(r) + require.Equal(t, p.tagValue, val) + val2, ok := mOut.Fields()[fieldKey] + require.True(t, ok) + require.Equal(t, fieldValue, val2) + go func() { + _, _ = io.ReadAll(r) + }() wg.Wait() } -type testProcessor struct{} +type testProcessor struct { + tagName string + tagValue string +} func (p *testProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric { - for _, metric := range in { - metric.AddTag("hi", "mom") + for _, m := range in { + m.AddTag(p.tagName, p.tagValue) } return in } diff --git a/plugins/common/shim/testdata/special.conf b/plugins/common/shim/testdata/special.conf index c324b638497c5..53af78620701d 100644 --- a/plugins/common/shim/testdata/special.conf +++ b/plugins/common/shim/testdata/special.conf @@ -1,4 +1,5 @@ # testing custom field types [[inputs.test]] duration = "3s" - size = "3MB" \ No newline at end of file + size = "3MB" + hex = 0x34 \ No newline at end of file diff --git a/plugins/processors/starlark/builtins.go b/plugins/common/starlark/builtins.go similarity index 75% rename from plugins/processors/starlark/builtins.go rename to plugins/common/starlark/builtins.go index 4eda39b7d8d12..9bca11af77837 100644 --- a/plugins/processors/starlark/builtins.go +++ b/plugins/common/starlark/builtins.go @@ -5,25 +5,49 @@ import ( "sort" "time" - "github.com/influxdata/telegraf/metric" "go.starlark.net/starlark" + + "github.com/influxdata/telegraf/metric" ) -func newMetric(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { - var name starlark.String - if err := starlark.UnpackPositionalArgs("Metric", args, kwargs, 1, &name); err != nil { +func newMetric(_ *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { + var ( + name starlark.String + tags, fields starlark.Value + ) + if err := starlark.UnpackArgs("Metric", args, kwargs, "name", &name, "tags?", &tags, "fields?", &fields); err != nil { return nil, err } - m, err := metric.New(string(name), nil, nil, time.Now()) + allFields, err := toFields(fields) + if err != nil { + return nil, err + } + allTags, err := toTags(tags) if err != nil { return nil, err } + m := metric.New(string(name), allTags, allFields, time.Now()) + return &Metric{metric: m}, nil } -func deepcopy(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func toString(value starlark.Value, errorMsg string) (string, error) { + if value, ok := value.(starlark.String); ok { + return string(value), nil + } + return "", fmt.Errorf(errorMsg, value) +} + +func items(value starlark.Value, errorMsg string) ([]starlark.Tuple, error) { + if iter, ok := value.(starlark.IterableMapping); ok { + return iter.Items(), nil + } + return nil, fmt.Errorf(errorMsg, value) +} + +func deepcopy(_ *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var sm *Metric if err := starlark.UnpackPositionalArgs("deepcopy", args, kwargs, 1, &sm); err != nil { return nil, err @@ -34,6 +58,19 @@ func deepcopy(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, return &Metric{metric: dup}, nil } +// catch(f) evaluates f() and returns its evaluation error message +// if it failed or None if it succeeded. +func catch(thread *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { + var fn starlark.Callable + if err := starlark.UnpackArgs("catch", args, kwargs, "fn", &fn); err != nil { + return nil, err + } + if _, err := starlark.Call(thread, fn, nil, nil); err != nil { + return starlark.String(err.Error()), nil + } + return starlark.None, nil +} + type builtinMethod func(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) func builtinAttr(recv starlark.Value, name string, methods map[string]builtinMethod) (starlark.Value, error) { @@ -58,16 +95,10 @@ func builtinAttrNames(methods map[string]builtinMethod) []string { return names } -// nameErr returns an error message of the form "name: msg" -// where name is b.Name() and msg is a string or error. -func nameErr(b *starlark.Builtin, msg interface{}) error { - return fmt.Errorf("%s: %v", b.Name(), msg) -} - // --- dictionary methods --- // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·clear -func dict_clear(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictClear(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } @@ -79,7 +110,7 @@ func dict_clear(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tupl } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·pop -func dict_pop(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictPop(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var k, d starlark.Value if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &k, &d); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) @@ -99,7 +130,7 @@ func dict_pop(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·popitem -func dict_popitem(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictPopitem(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } @@ -111,7 +142,7 @@ func dict_popitem(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tu } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·get -func dict_get(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictGet(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var key, dflt starlark.Value if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &key, &dflt); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) @@ -127,7 +158,7 @@ func dict_get(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·setdefault -func dict_setdefault(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictSetdefault(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var key, dflt starlark.Value = nil, starlark.None if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &key, &dflt); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) @@ -148,7 +179,7 @@ func dict_setdefault(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·update -func dict_update(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictUpdate(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { // Unpack the arguments if len(args) > 1 { return nil, fmt.Errorf("update: got %d arguments, want at most 1", len(args)) @@ -178,14 +209,13 @@ func dict_update(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tup iter2 := starlark.Iterate(pair) if iter2 == nil { return nil, fmt.Errorf("dictionary update sequence element #%d is not iterable (%s)", i, pair.Type()) - } defer iter2.Done() - len := starlark.Len(pair) - if len < 0 { + length := starlark.Len(pair) + if length < 0 { return nil, fmt.Errorf("dictionary update sequence element #%d has unknown length (%s)", i, pair.Type()) - } else if len != 2 { - return nil, fmt.Errorf("dictionary update sequence element #%d has length %d, want 2", i, len) + } else if length != 2 { + return nil, fmt.Errorf("dictionary update sequence element #%d has length %d, want 2", i, length) } var k, v starlark.Value iter2.Next(&k) @@ -221,7 +251,7 @@ func dict_update(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tup } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·items -func dict_items(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictItems(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } @@ -234,7 +264,7 @@ func dict_items(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tupl } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·keys -func dict_keys(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictKeys(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } @@ -248,7 +278,7 @@ func dict_keys(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·update -func dict_values(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { +func dictValues(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } diff --git a/plugins/processors/starlark/field_dict.go b/plugins/common/starlark/field_dict.go similarity index 64% rename from plugins/processors/starlark/field_dict.go rename to plugins/common/starlark/field_dict.go index e0c0349b617a1..8b09a045be8e9 100644 --- a/plugins/processors/starlark/field_dict.go +++ b/plugins/common/starlark/field_dict.go @@ -3,10 +3,12 @@ package starlark import ( "errors" "fmt" + "reflect" "strings" - "github.com/influxdata/telegraf" "go.starlark.net/starlark" + + "github.com/influxdata/telegraf" ) // FieldDict is a starlark.Value for the metric fields. It is heavily based on the @@ -17,17 +19,17 @@ type FieldDict struct { func (d FieldDict) String() string { buf := new(strings.Builder) - buf.WriteString("{") + buf.WriteString("{") //nolint:revive // from builder.go: "It returns the length of r and a nil error." sep := "" for _, item := range d.Items() { k, v := item[0], item[1] - buf.WriteString(sep) - buf.WriteString(k.String()) - buf.WriteString(": ") - buf.WriteString(v.String()) + buf.WriteString(sep) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(k.String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(": ") //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(v.String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." sep = ", " } - buf.WriteString("}") + buf.WriteString("}") //nolint:revive // from builder.go: "It returns the length of r and a nil error." return buf.String() } @@ -58,15 +60,15 @@ func (d FieldDict) Attr(name string) (starlark.Value, error) { } var FieldDictMethods = map[string]builtinMethod{ - "clear": dict_clear, - "get": dict_get, - "items": dict_items, - "keys": dict_keys, - "pop": dict_pop, - "popitem": dict_popitem, - "setdefault": dict_setdefault, - "update": dict_update, - "values": dict_values, + "clear": dictClear, + "get": dictGet, + "items": dictItems, + "keys": dictKeys, + "pop": dictPop, + "popitem": dictPopitem, + "setdefault": dictSetdefault, + "update": dictUpdate, + "values": dictValues, } // Get implements the starlark.Mapping interface. @@ -174,12 +176,13 @@ func (d FieldDict) Delete(k starlark.Value) (v starlark.Value, found bool, err e sv, err := asStarlarkValue(value) return sv, ok, err } + return starlark.None, false, nil } return starlark.None, false, errors.New("key must be of type 'str'") } -// Items implements the starlark.Mapping interface. +// Iterate implements the starlark.Iterator interface. func (d FieldDict) Iterate() starlark.Iterator { d.fieldIterCount++ return &FieldIterator{Metric: d.Metric, fields: d.metric.FieldList()} @@ -210,17 +213,44 @@ func (i *FieldIterator) Done() { // AsStarlarkValue converts a field value to a starlark.Value. func asStarlarkValue(value interface{}) (starlark.Value, error) { - switch v := value.(type) { - case float64: - return starlark.Float(v), nil - case int64: - return starlark.MakeInt64(v), nil - case uint64: - return starlark.MakeUint64(v), nil - case string: - return starlark.String(v), nil - case bool: - return starlark.Bool(v), nil + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.Slice: + length := v.Len() + array := make([]starlark.Value, length) + for i := 0; i < length; i++ { + sVal, err := asStarlarkValue(v.Index(i).Interface()) + if err != nil { + return starlark.None, err + } + array[i] = sVal + } + return starlark.NewList(array), nil + case reflect.Map: + dict := starlark.NewDict(v.Len()) + iter := v.MapRange() + for iter.Next() { + sKey, err := asStarlarkValue(iter.Key().Interface()) + if err != nil { + return starlark.None, err + } + sValue, err := asStarlarkValue(iter.Value().Interface()) + if err != nil { + return starlark.None, err + } + dict.SetKey(sKey, sValue) + } + return dict, nil + case reflect.Float32, reflect.Float64: + return starlark.Float(v.Float()), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return starlark.MakeInt64(v.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return starlark.MakeUint64(v.Uint()), nil + case reflect.String: + return starlark.String(v.String()), nil + case reflect.Bool: + return starlark.Bool(v.Bool()), nil } return starlark.None, errors.New("invalid type") @@ -245,3 +275,27 @@ func asGoValue(value interface{}) (interface{}, error) { return nil, errors.New("invalid starlark type") } + +// ToFields converts a starlark.Value to a map of values. +func toFields(value starlark.Value) (map[string]interface{}, error) { + if value == nil { + return nil, nil + } + items, err := items(value, "The type %T is unsupported as type of collection of fields") + if err != nil { + return nil, err + } + result := make(map[string]interface{}, len(items)) + for _, item := range items { + key, err := toString(item[0], "The type %T is unsupported as type of key for fields") + if err != nil { + return nil, err + } + value, err := asGoValue(item[1]) + if err != nil { + return nil, err + } + result[key] = value + } + return result, nil +} diff --git a/plugins/common/starlark/logging.go b/plugins/common/starlark/logging.go new file mode 100644 index 0000000000000..35efa6a7effba --- /dev/null +++ b/plugins/common/starlark/logging.go @@ -0,0 +1,47 @@ +package starlark + +import ( + "errors" + "fmt" + + "github.com/influxdata/telegraf" + "go.starlark.net/starlark" + "go.starlark.net/starlarkstruct" +) + +// Builds a module that defines all the supported logging functions which will log using the provided logger +func LogModule(logger telegraf.Logger) *starlarkstruct.Module { + var logFunc = func(t *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { + return log(b, args, kwargs, logger) + } + return &starlarkstruct.Module{ + Name: "log", + Members: starlark.StringDict{ + "debug": starlark.NewBuiltin("log.debug", logFunc), + "info": starlark.NewBuiltin("log.info", logFunc), + "warn": starlark.NewBuiltin("log.warn", logFunc), + "error": starlark.NewBuiltin("log.error", logFunc), + }, + } +} + +// Logs the provided message according to the level chosen +func log(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple, logger telegraf.Logger) (starlark.Value, error) { + var msg starlark.String + if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &msg); err != nil { + return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) + } + switch b.Name() { + case "log.debug": + logger.Debug(string(msg)) + case "log.info": + logger.Info(string(msg)) + case "log.warn": + logger.Warn(string(msg)) + case "log.error": + logger.Error(string(msg)) + default: + return nil, errors.New("method " + b.Name() + " is unknown") + } + return starlark.None, nil +} diff --git a/plugins/processors/starlark/metric.go b/plugins/common/starlark/metric.go similarity index 73% rename from plugins/processors/starlark/metric.go rename to plugins/common/starlark/metric.go index 031d24ad69635..989c345765cff 100644 --- a/plugins/processors/starlark/metric.go +++ b/plugins/common/starlark/metric.go @@ -6,8 +6,9 @@ import ( "strings" "time" - "github.com/influxdata/telegraf" "go.starlark.net/starlark" + + "github.com/influxdata/telegraf" ) type Metric struct { @@ -36,15 +37,15 @@ func (m *Metric) Unwrap() telegraf.Metric { // it behaves more like the repr function would in Python. func (m *Metric) String() string { buf := new(strings.Builder) - buf.WriteString("Metric(") - buf.WriteString(m.Name().String()) - buf.WriteString(", tags=") - buf.WriteString(m.Tags().String()) - buf.WriteString(", fields=") - buf.WriteString(m.Fields().String()) - buf.WriteString(", time=") - buf.WriteString(m.Time().String()) - buf.WriteString(")") + buf.WriteString("Metric(") //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(m.Name().String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(", tags=") //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(m.Tags().String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(", fields=") //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(m.Fields().String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(", time=") //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(m.Time().String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(")") //nolint:revive // from builder.go: "It returns the length of r and a nil error." return buf.String() } diff --git a/plugins/common/starlark/starlark.go b/plugins/common/starlark/starlark.go new file mode 100644 index 0000000000000..5f365519871d0 --- /dev/null +++ b/plugins/common/starlark/starlark.go @@ -0,0 +1,182 @@ +package starlark //nolint - Needed to avoid getting import-shadowing: The name 'starlark' shadows an import name (revive) + +import ( + "errors" + "fmt" + "strings" + + "github.com/influxdata/telegraf" + "go.starlark.net/lib/math" + "go.starlark.net/lib/time" + "go.starlark.net/resolve" + "go.starlark.net/starlark" + "go.starlark.net/starlarkjson" +) + +type StarlarkCommon struct { + Source string `toml:"source"` + Script string `toml:"script"` + Constants map[string]interface{} `toml:"constants"` + + Log telegraf.Logger `toml:"-"` + StarlarkLoadFunc func(module string, logger telegraf.Logger) (starlark.StringDict, error) + + thread *starlark.Thread + globals starlark.StringDict + functions map[string]*starlark.Function + parameters map[string]starlark.Tuple +} + +func (s *StarlarkCommon) Init() error { + if s.Source == "" && s.Script == "" { + return errors.New("one of source or script must be set") + } + if s.Source != "" && s.Script != "" { + return errors.New("both source or script cannot be set") + } + + s.thread = &starlark.Thread{ + Print: func(_ *starlark.Thread, msg string) { s.Log.Debug(msg) }, + Load: func(thread *starlark.Thread, module string) (starlark.StringDict, error) { + return s.StarlarkLoadFunc(module, s.Log) + }, + } + + builtins := starlark.StringDict{} + builtins["Metric"] = starlark.NewBuiltin("Metric", newMetric) + builtins["deepcopy"] = starlark.NewBuiltin("deepcopy", deepcopy) + builtins["catch"] = starlark.NewBuiltin("catch", catch) + err := s.addConstants(&builtins) + if err != nil { + return err + } + + program, err := s.sourceProgram(builtins, "") + if err != nil { + return err + } + + // Execute source + globals, err := program.Init(s.thread, builtins) + if err != nil { + return err + } + // Make available a shared state to the apply function + globals["state"] = starlark.NewDict(0) + + // Freeze the global state. This prevents modifications to the processor + // state and prevents scripts from containing errors storing tracking + // metrics. Tasks that require global state will not be possible due to + // this, so maybe we should relax this in the future. + globals.Freeze() + + s.globals = globals + s.functions = make(map[string]*starlark.Function) + s.parameters = make(map[string]starlark.Tuple) + return nil +} + +func (s *StarlarkCommon) GetParameters(name string) (starlark.Tuple, bool) { + parameters, found := s.parameters[name] + return parameters, found +} + +func (s *StarlarkCommon) AddFunction(name string, params ...starlark.Value) error { + globalFn, found := s.globals[name] + if !found { + return fmt.Errorf("%s is not defined", name) + } + + fn, found := globalFn.(*starlark.Function) + if !found { + return fmt.Errorf("%s is not a function", name) + } + + if fn.NumParams() != len(params) { + return fmt.Errorf("%s function must take %d parameter(s)", name, len(params)) + } + p := make(starlark.Tuple, len(params)) + for i, param := range params { + p[i] = param + } + s.functions[name] = fn + s.parameters[name] = params + return nil +} + +// Add all the constants defined in the plugin as constants of the script +func (s *StarlarkCommon) addConstants(builtins *starlark.StringDict) error { + for key, val := range s.Constants { + sVal, err := asStarlarkValue(val) + if err != nil { + return fmt.Errorf("converting type %T failed: %v", val, err) + } + (*builtins)[key] = sVal + } + return nil +} + +func (s *StarlarkCommon) sourceProgram(builtins starlark.StringDict, filename string) (*starlark.Program, error) { + var src interface{} + if s.Source != "" { + src = s.Source + } + _, program, err := starlark.SourceProgram(s.Script, src, builtins.Has) + return program, err +} + +// Call calls the function corresponding to the given name. +func (s *StarlarkCommon) Call(name string) (starlark.Value, error) { + fn, ok := s.functions[name] + if !ok { + return nil, fmt.Errorf("function %q does not exist", name) + } + args, ok := s.parameters[name] + if !ok { + return nil, fmt.Errorf("params for function %q do not exist", name) + } + return starlark.Call(s.thread, fn, args, nil) +} + +func (s *StarlarkCommon) LogError(err error) { + if err, ok := err.(*starlark.EvalError); ok { + for _, line := range strings.Split(err.Backtrace(), "\n") { + s.Log.Error(line) + } + } else { + s.Log.Error(err.Msg) + } +} + +func LoadFunc(module string, logger telegraf.Logger) (starlark.StringDict, error) { + switch module { + case "json.star": + return starlark.StringDict{ + "json": starlarkjson.Module, + }, nil + case "logging.star": + return starlark.StringDict{ + "log": LogModule(logger), + }, nil + case "math.star": + return starlark.StringDict{ + "math": math.Module, + }, nil + case "time.star": + return starlark.StringDict{ + "time": time.Module, + }, nil + default: + return nil, errors.New("module " + module + " is not available") + } +} + +func init() { + // https://github.com/bazelbuild/starlark/issues/20 + resolve.AllowNestedDef = true + resolve.AllowLambda = true + resolve.AllowFloat = true + resolve.AllowSet = true + resolve.AllowGlobalReassign = true + resolve.AllowRecursion = true +} diff --git a/plugins/processors/starlark/tag_dict.go b/plugins/common/starlark/tag_dict.go similarity index 70% rename from plugins/processors/starlark/tag_dict.go rename to plugins/common/starlark/tag_dict.go index 3d95264382db5..56ee0f6551d81 100644 --- a/plugins/processors/starlark/tag_dict.go +++ b/plugins/common/starlark/tag_dict.go @@ -5,8 +5,9 @@ import ( "fmt" "strings" - "github.com/influxdata/telegraf" "go.starlark.net/starlark" + + "github.com/influxdata/telegraf" ) // TagDict is a starlark.Value for the metric tags. It is heavily based on the @@ -17,17 +18,17 @@ type TagDict struct { func (d TagDict) String() string { buf := new(strings.Builder) - buf.WriteString("{") + buf.WriteString("{") //nolint:revive // from builder.go: "It returns the length of r and a nil error." sep := "" for _, item := range d.Items() { k, v := item[0], item[1] - buf.WriteString(sep) - buf.WriteString(k.String()) - buf.WriteString(": ") - buf.WriteString(v.String()) + buf.WriteString(sep) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(k.String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(": ") //nolint:revive // from builder.go: "It returns the length of r and a nil error." + buf.WriteString(v.String()) //nolint:revive // from builder.go: "It returns the length of r and a nil error." sep = ", " } - buf.WriteString("}") + buf.WriteString("}") //nolint:revive // from builder.go: "It returns the length of r and a nil error." return buf.String() } @@ -58,15 +59,15 @@ func (d TagDict) Attr(name string) (starlark.Value, error) { } var TagDictMethods = map[string]builtinMethod{ - "clear": dict_clear, - "get": dict_get, - "items": dict_items, - "keys": dict_keys, - "pop": dict_pop, - "popitem": dict_popitem, - "setdefault": dict_setdefault, - "update": dict_update, - "values": dict_values, + "clear": dictClear, + "get": dictGet, + "items": dictItems, + "keys": dictKeys, + "pop": dictPop, + "popitem": dictPopitem, + "setdefault": dictSetdefault, + "update": dictUpdate, + "values": dictValues, } // Get implements the starlark.Mapping interface. @@ -162,12 +163,13 @@ func (d TagDict) Delete(k starlark.Value) (v starlark.Value, found bool, err err v := starlark.String(value) return v, ok, err } + return starlark.None, false, nil } return starlark.None, false, errors.New("key must be of type 'str'") } -// Items implements the starlark.Mapping interface. +// Iterate implements the starlark.Iterator interface. func (d TagDict) Iterate() starlark.Iterator { d.tagIterCount++ return &TagIterator{Metric: d.Metric, tags: d.metric.TagList()} @@ -195,3 +197,27 @@ func (i *TagIterator) Next(p *starlark.Value) bool { func (i *TagIterator) Done() { i.tagIterCount-- } + +// ToTags converts a starlark.Value to a map of string. +func toTags(value starlark.Value) (map[string]string, error) { + if value == nil { + return nil, nil + } + items, err := items(value, "The type %T is unsupported as type of collection of tags") + if err != nil { + return nil, err + } + result := make(map[string]string, len(items)) + for _, item := range items { + key, err := toString(item[0], "The type %T is unsupported as type of key for tags") + if err != nil { + return nil, err + } + value, err := toString(item[1], "The type %T is unsupported as type of value for tags") + if err != nil { + return nil, err + } + result[key] = value + } + return result, nil +} diff --git a/plugins/common/tls/config.go b/plugins/common/tls/config.go index 59fbc49526745..558900d0719d5 100644 --- a/plugins/common/tls/config.go +++ b/plugins/common/tls/config.go @@ -4,8 +4,10 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" + "os" "strings" + + "github.com/influxdata/telegraf/internal/choice" ) // ClientConfig represents the standard client TLS config. @@ -13,22 +15,25 @@ type ClientConfig struct { TLSCA string `toml:"tls_ca"` TLSCert string `toml:"tls_cert"` TLSKey string `toml:"tls_key"` + TLSKeyPwd string `toml:"tls_key_pwd"` InsecureSkipVerify bool `toml:"insecure_skip_verify"` + ServerName string `toml:"tls_server_name"` - // Deprecated in 1.7; use TLS variables above - SSLCA string `toml:"ssl_ca"` - SSLCert string `toml:"ssl_cert"` - SSLKey string `toml:"ssl_key"` + SSLCA string `toml:"ssl_ca" deprecated:"1.7.0;use 'tls_ca' instead"` + SSLCert string `toml:"ssl_cert" deprecated:"1.7.0;use 'tls_cert' instead"` + SSLKey string `toml:"ssl_key" deprecated:"1.7.0;use 'tls_key' instead"` } // ServerConfig represents the standard server TLS config. type ServerConfig struct { - TLSCert string `toml:"tls_cert"` - TLSKey string `toml:"tls_key"` - TLSAllowedCACerts []string `toml:"tls_allowed_cacerts"` - TLSCipherSuites []string `toml:"tls_cipher_suites"` - TLSMinVersion string `toml:"tls_min_version"` - TLSMaxVersion string `toml:"tls_max_version"` + TLSCert string `toml:"tls_cert"` + TLSKey string `toml:"tls_key"` + TLSKeyPwd string `toml:"tls_key_pwd"` + TLSAllowedCACerts []string `toml:"tls_allowed_cacerts"` + TLSCipherSuites []string `toml:"tls_cipher_suites"` + TLSMinVersion string `toml:"tls_min_version"` + TLSMaxVersion string `toml:"tls_max_version"` + TLSAllowedDNSNames []string `toml:"tls_allowed_dns_names"` } // TLSConfig returns a tls.Config, may be nil without error if TLS is not @@ -45,11 +50,14 @@ func (c *ClientConfig) TLSConfig() (*tls.Config, error) { c.TLSKey = c.SSLKey } - // TODO: return default tls.Config; plugins should not call if they don't - // want TLS, this will require using another option to determine. In the - // case of an HTTP plugin, you could use `https`. Other plugins may need - // the dedicated option `TLSEnable`. - if c.TLSCA == "" && c.TLSKey == "" && c.TLSCert == "" && !c.InsecureSkipVerify { + // This check returns a nil (aka, "use the default") + // tls.Config if no field is set that would have an effect on + // a TLS connection. That is, any of: + // * client certificate settings, + // * peer certificate authorities, + // * disabled security, or + // * an SNI server name. + if c.TLSCA == "" && c.TLSKey == "" && c.TLSCert == "" && !c.InsecureSkipVerify && c.ServerName == "" { return nil, nil } @@ -73,6 +81,10 @@ func (c *ClientConfig) TLSConfig() (*tls.Config, error) { } } + if c.ServerName != "" { + tlsConfig.ServerName = c.ServerName + } + return tlsConfig, nil } @@ -133,19 +145,24 @@ func (c *ServerConfig) TLSConfig() (*tls.Config, error) { "tls min version %q can't be greater than tls max version %q", tlsConfig.MinVersion, tlsConfig.MaxVersion) } + // Since clientAuth is tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + // there must be certs to validate. + if len(c.TLSAllowedCACerts) > 0 && len(c.TLSAllowedDNSNames) > 0 { + tlsConfig.VerifyPeerCertificate = c.verifyPeerCertificate + } + return tlsConfig, nil } func makeCertPool(certFiles []string) (*x509.CertPool, error) { pool := x509.NewCertPool() for _, certFile := range certFiles { - pem, err := ioutil.ReadFile(certFile) + pem, err := os.ReadFile(certFile) if err != nil { return nil, fmt.Errorf( "could not read certificate %q: %v", certFile, err) } - ok := pool.AppendCertsFromPEM(pem) - if !ok { + if !pool.AppendCertsFromPEM(pem) { return nil, fmt.Errorf( "could not parse any PEM certificates %q: %v", certFile, err) } @@ -164,3 +181,20 @@ func loadCertificate(config *tls.Config, certFile, keyFile string) error { config.BuildNameToCertificate() return nil } + +func (c *ServerConfig) verifyPeerCertificate(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + // The certificate chain is client + intermediate + root. + // Let's review the client certificate. + cert, err := x509.ParseCertificate(rawCerts[0]) + if err != nil { + return fmt.Errorf("could not validate peer certificate: %v", err) + } + + for _, name := range cert.DNSNames { + if choice.Contains(name, c.TLSAllowedDNSNames) { + return nil + } + } + + return fmt.Errorf("peer certificate not in allowed DNS Name list: %v", cert.DNSNames) +} diff --git a/plugins/common/tls/config_test.go b/plugins/common/tls/config_test.go index 93656087dfd55..123523bb54f05 100644 --- a/plugins/common/tls/config_test.go +++ b/plugins/common/tls/config_test.go @@ -6,9 +6,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) var pki = testutil.NewPKI("../../../testutil/pki") @@ -33,6 +34,15 @@ func TestClientConfig(t *testing.T) { TLSKey: pki.ClientKeyPath(), }, }, + { + name: "success with tls key password set", + client: tls.ClientConfig{ + TLSCA: pki.CACertPath(), + TLSCert: pki.ClientCertPath(), + TLSKey: pki.ClientKeyPath(), + TLSKeyPwd: "", + }, + }, { name: "invalid ca", client: tls.ClientConfig{ @@ -86,6 +96,14 @@ func TestClientConfig(t *testing.T) { SSLKey: pki.ClientKeyPath(), }, }, + { + name: "set SNI server name", + client: tls.ClientConfig{ + ServerName: "foo.example.com", + }, + expNil: false, + expErr: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -119,9 +137,22 @@ func TestServerConfig(t *testing.T) { }, { name: "success", + server: tls.ServerConfig{ + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CipherSuite()}, + TLSAllowedDNSNames: []string{"localhost", "127.0.0.1"}, + TLSMinVersion: pki.TLSMinVersion(), + TLSMaxVersion: pki.TLSMaxVersion(), + }, + }, + { + name: "success with tls key password set", server: tls.ServerConfig{ TLSCert: pki.ServerCertPath(), TLSKey: pki.ServerKeyPath(), + TLSKeyPwd: "", TLSAllowedCACerts: []string{pki.CACertPath()}, TLSCipherSuites: []string{pki.CipherSuite()}, TLSMinVersion: pki.TLSMinVersion(), @@ -285,9 +316,10 @@ func TestConnect(t *testing.T) { } serverConfig := tls.ServerConfig{ - TLSCert: pki.ServerCertPath(), - TLSKey: pki.ServerKeyPath(), - TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSAllowedDNSNames: []string{"localhost", "127.0.0.1"}, } serverTLSConfig, err := serverConfig.TLSConfig() @@ -313,5 +345,50 @@ func TestConnect(t *testing.T) { resp, err := client.Get(ts.URL) require.NoError(t, err) + + defer resp.Body.Close() require.Equal(t, 200, resp.StatusCode) } + +func TestConnectWrongDNS(t *testing.T) { + clientConfig := tls.ClientConfig{ + TLSCA: pki.CACertPath(), + TLSCert: pki.ClientCertPath(), + TLSKey: pki.ClientKeyPath(), + } + + serverConfig := tls.ServerConfig{ + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSAllowedDNSNames: []string{"localhos", "127.0.0.2"}, + } + + serverTLSConfig, err := serverConfig.TLSConfig() + require.NoError(t, err) + + ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + ts.TLS = serverTLSConfig + + ts.StartTLS() + defer ts.Close() + + clientTLSConfig, err := clientConfig.TLSConfig() + require.NoError(t, err) + + client := http.Client{ + Transport: &http.Transport{ + TLSClientConfig: clientTLSConfig, + }, + Timeout: 10 * time.Second, + } + + resp, err := client.Get(ts.URL) + require.Error(t, err) + if resp != nil { + err = resp.Body.Close() + require.NoError(t, err) + } +} diff --git a/plugins/common/tls/utils.go b/plugins/common/tls/utils.go index ddc12d2c1e5e3..65388640f7dd8 100644 --- a/plugins/common/tls/utils.go +++ b/plugins/common/tls/utils.go @@ -10,11 +10,11 @@ func ParseCiphers(ciphers []string) ([]uint16, error) { suites := []uint16{} for _, cipher := range ciphers { - if v, ok := tlsCipherMap[cipher]; ok { - suites = append(suites, v) - } else { + v, ok := tlsCipherMap[cipher] + if !ok { return nil, fmt.Errorf("unsupported cipher %q", cipher) } + suites = append(suites, v) } return suites, nil diff --git a/plugins/inputs/activemq/README.md b/plugins/inputs/activemq/README.md index aba5a7f83ec27..f2a27625bb512 100644 --- a/plugins/inputs/activemq/README.md +++ b/plugins/inputs/activemq/README.md @@ -1,11 +1,12 @@ # ActiveMQ Input Plugin -This plugin gather queues, topics & subscribers metrics using ActiveMQ Console API. +This plugin gather queues, topics & subscribers metrics using ActiveMQ Console +API. -### Configuration: +## Configuration -```toml -# Description +```toml @sample.conf +# Gather ActiveMQ metrics [[inputs.activemq]] ## ActiveMQ WebConsole URL url = "http://127.0.0.1:8161" @@ -33,9 +34,10 @@ This plugin gather queues, topics & subscribers metrics using ActiveMQ Console A # insecure_skip_verify = false ``` -### Metrics +## Metrics -Every effort was made to preserve the names based on the XML response from the ActiveMQ Console API. +Every effort was made to preserve the names based on the XML response from the +ActiveMQ Console API. - activemq_queues - tags: @@ -47,7 +49,7 @@ Every effort was made to preserve the names based on the XML response from the A - consumer_count - enqueue_count - dequeue_count -+ activemq_topics +- activemq_topics - tags: - name - source @@ -74,9 +76,9 @@ Every effort was made to preserve the names based on the XML response from the A - enqueue_counter - dequeue_counter -### Example Output +## Example Output -``` +```shell activemq_queues,name=sandra,host=88284b2fe51b,source=localhost,port=8161 consumer_count=0i,enqueue_count=0i,dequeue_count=0i,size=0i 1492610703000000000 activemq_queues,name=Test,host=88284b2fe51b,source=localhost,port=8161 dequeue_count=0i,size=0i,consumer_count=0i,enqueue_count=0i 1492610703000000000 activemq_topics,name=ActiveMQ.Advisory.MasterBroker\ ,host=88284b2fe51b,source=localhost,port=8161 size=0i,consumer_count=0i,enqueue_count=1i,dequeue_count=0i 1492610703000000000 diff --git a/plugins/inputs/activemq/activemq.go b/plugins/inputs/activemq/activemq.go index f7847f83d8d04..11dcb78f4f64a 100644 --- a/plugins/inputs/activemq/activemq.go +++ b/plugins/inputs/activemq/activemq.go @@ -1,9 +1,11 @@ +//go:generate ../../../tools/readme_config_includer/generator package activemq import ( + _ "embed" "encoding/xml" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "path" @@ -12,19 +14,23 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type ActiveMQ struct { - Server string `toml:"server"` - Port int `toml:"port"` - URL string `toml:"url"` - Username string `toml:"username"` - Password string `toml:"password"` - Webadmin string `toml:"webadmin"` - ResponseTimeout internal.Duration `toml:"response_timeout"` + Server string `toml:"server" deprecated:"1.11.0;use 'url' instead"` + Port int `toml:"port" deprecated:"1.11.0;use 'url' instead"` + URL string `toml:"url"` + Username string `toml:"username"` + Password string `toml:"password"` + Webadmin string `toml:"webadmin"` + ResponseTimeout config.Duration `toml:"response_timeout"` tls.ClientConfig client *http.Client @@ -49,9 +55,9 @@ type Subscribers struct { type Subscriber struct { XMLName xml.Name `xml:"subscriber"` - ClientId string `xml:"clientId,attr"` + ClientID string `xml:"clientId,attr"` SubscriptionName string `xml:"subscriptionName,attr"` - ConnectionId string `xml:"connectionId,attr"` + ConnectionID string `xml:"connectionId,attr"` DestinationName string `xml:"destinationName,attr"` Selector string `xml:"selector,attr"` Active string `xml:"active,attr"` @@ -82,42 +88,7 @@ type Stats struct { DequeueCounter int `xml:"dequeueCounter,attr"` } -var sampleConfig = ` - ## ActiveMQ WebConsole URL - url = "http://127.0.0.1:8161" - - ## Required ActiveMQ Endpoint - ## deprecated in 1.11; use the url option - # server = "127.0.0.1" - # port = 8161 - - ## Credentials for basic HTTP authentication - # username = "admin" - # password = "admin" - - ## Required ActiveMQ webadmin root path - # webadmin = "admin" - - ## Maximum time to receive response. - # response_timeout = "5s" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - ` - -func (a *ActiveMQ) Description() string { - return "Gather ActiveMQ metrics" -} - -func (a *ActiveMQ) SampleConfig() string { - return sampleConfig -} - -func (a *ActiveMQ) createHttpClient() (*http.Client, error) { +func (a *ActiveMQ) createHTTPClient() (*http.Client, error) { tlsCfg, err := a.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -127,15 +98,19 @@ func (a *ActiveMQ) createHttpClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: a.ResponseTimeout.Duration, + Timeout: time.Duration(a.ResponseTimeout), } return client, nil } +func (*ActiveMQ) SampleConfig() string { + return sampleConfig +} + func (a *ActiveMQ) Init() error { - if a.ResponseTimeout.Duration < time.Second { - a.ResponseTimeout.Duration = time.Second * 5 + if a.ResponseTimeout < config.Duration(time.Second) { + a.ResponseTimeout = config.Duration(time.Second * 5) } var err error @@ -157,7 +132,7 @@ func (a *ActiveMQ) Init() error { a.baseURL = u - a.client, err = a.createHttpClient() + a.client, err = a.createHTTPClient() if err != nil { return err } @@ -184,7 +159,7 @@ func (a *ActiveMQ) GetMetrics(u string) ([]byte, error) { return nil, fmt.Errorf("GET %s returned status %q", u, resp.Status) } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } func (a *ActiveMQ) GatherQueuesMetrics(acc telegraf.Accumulator, queues Queues) { @@ -228,9 +203,9 @@ func (a *ActiveMQ) GatherSubscribersMetrics(acc telegraf.Accumulator, subscriber records := make(map[string]interface{}) tags := make(map[string]string) - tags["client_id"] = subscriber.ClientId + tags["client_id"] = subscriber.ClientID tags["subscription_name"] = subscriber.SubscriptionName - tags["connection_id"] = subscriber.ConnectionId + tags["connection_id"] = subscriber.ConnectionID tags["destination_name"] = subscriber.DestinationName tags["selector"] = subscriber.Selector tags["active"] = subscriber.Active diff --git a/plugins/inputs/activemq/activemq_test.go b/plugins/inputs/activemq/activemq_test.go index 407a381775adc..1e733a4eed201 100644 --- a/plugins/inputs/activemq/activemq_test.go +++ b/plugins/inputs/activemq/activemq_test.go @@ -11,7 +11,6 @@ import ( ) func TestGatherQueuesMetrics(t *testing.T) { - s := ` @@ -31,7 +30,7 @@ func TestGatherQueuesMetrics(t *testing.T) { queues := Queues{} - xml.Unmarshal([]byte(s), &queues) + require.NoError(t, xml.Unmarshal([]byte(s), &queues)) records := make(map[string]interface{}) tags := make(map[string]string) @@ -50,14 +49,13 @@ func TestGatherQueuesMetrics(t *testing.T) { activeMQ := new(ActiveMQ) activeMQ.Server = "localhost" activeMQ.Port = 8161 - activeMQ.Init() + require.NoError(t, activeMQ.Init()) activeMQ.GatherQueuesMetrics(&acc, queues) acc.AssertContainsTaggedFields(t, "activemq_queues", records, tags) } func TestGatherTopicsMetrics(t *testing.T) { - s := ` @@ -78,7 +76,7 @@ func TestGatherTopicsMetrics(t *testing.T) { topics := Topics{} - xml.Unmarshal([]byte(s), &topics) + require.NoError(t, xml.Unmarshal([]byte(s), &topics)) records := make(map[string]interface{}) tags := make(map[string]string) @@ -97,14 +95,13 @@ func TestGatherTopicsMetrics(t *testing.T) { activeMQ := new(ActiveMQ) activeMQ.Server = "localhost" activeMQ.Port = 8161 - activeMQ.Init() + require.NoError(t, activeMQ.Init()) activeMQ.GatherTopicsMetrics(&acc, topics) acc.AssertContainsTaggedFields(t, "activemq_topics", records, tags) } func TestGatherSubscribersMetrics(t *testing.T) { - s := ` @@ -113,7 +110,7 @@ func TestGatherSubscribersMetrics(t *testing.T) { subscribers := Subscribers{} - xml.Unmarshal([]byte(s), &subscribers) + require.NoError(t, xml.Unmarshal([]byte(s), &subscribers)) records := make(map[string]interface{}) tags := make(map[string]string) @@ -138,7 +135,7 @@ func TestGatherSubscribersMetrics(t *testing.T) { activeMQ := new(ActiveMQ) activeMQ.Server = "localhost" activeMQ.Port = 8161 - activeMQ.Init() + require.NoError(t, activeMQ.Init()) activeMQ.GatherSubscribersMetrics(&acc, subscribers) acc.AssertContainsTaggedFields(t, "activemq_subscribers", records, tags) @@ -152,13 +149,16 @@ func TestURLs(t *testing.T) { switch r.URL.Path { case "/admin/xml/queues.jsp": w.WriteHeader(http.StatusOK) - w.Write([]byte("")) + _, err := w.Write([]byte("")) + require.NoError(t, err) case "/admin/xml/topics.jsp": w.WriteHeader(http.StatusOK) - w.Write([]byte("")) + _, err := w.Write([]byte("")) + require.NoError(t, err) case "/admin/xml/subscribers.jsp": w.WriteHeader(http.StatusOK) - w.Write([]byte("")) + _, err := w.Write([]byte("")) + require.NoError(t, err) default: w.WriteHeader(http.StatusNotFound) t.Fatalf("unexpected path: " + r.URL.Path) diff --git a/plugins/inputs/activemq/sample.conf b/plugins/inputs/activemq/sample.conf new file mode 100644 index 0000000000000..b66eb8beee7f5 --- /dev/null +++ b/plugins/inputs/activemq/sample.conf @@ -0,0 +1,26 @@ +# Gather ActiveMQ metrics +[[inputs.activemq]] + ## ActiveMQ WebConsole URL + url = "http://127.0.0.1:8161" + + ## Required ActiveMQ Endpoint + ## deprecated in 1.11; use the url option + # server = "192.168.50.10" + # port = 8161 + + ## Credentials for basic HTTP authentication + # username = "admin" + # password = "admin" + + ## Required ActiveMQ webadmin root path + # webadmin = "admin" + + ## Maximum time to receive response. + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/aerospike/README.md b/plugins/inputs/aerospike/README.md index 66fbbe12ec8f0..44d5e6ad26d6b 100644 --- a/plugins/inputs/aerospike/README.md +++ b/plugins/inputs/aerospike/README.md @@ -1,16 +1,19 @@ # Aerospike Input Plugin -The aerospike plugin queries aerospike server(s) and get node statistics & stats for -all the configured namespaces. +The aerospike plugin queries aerospike server(s) and get node statistics & stats +for all the configured namespaces. -For what the measurements mean, please consult the [Aerospike Metrics Reference Docs](http://www.aerospike.com/docs/reference/metrics). +For what the measurements mean, please consult the [Aerospike Metrics Reference +Docs](http://www.aerospike.com/docs/reference/metrics). -The metric names, to make it less complicated in querying, have replaced all `-` with `_` as Aerospike metrics come in both forms (no idea why). +The metric names, to make it less complicated in querying, have replaced all `-` +with `_` as Aerospike metrics come in both forms (no idea why). All metrics are attempted to be cast to integers, then booleans, then strings. -### Configuration: -```toml +## Configuration + +```toml @sample.conf # Read stats from aerospike server(s) [[inputs.aerospike]] ## Aerospike servers to connect to (with port) @@ -26,20 +29,20 @@ All metrics are attempted to be cast to integers, then booleans, then strings. # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" + # tls_name = "tlsname" ## If false, skip chain & host verification # insecure_skip_verify = true - + # Feature Options # Add namespace variable to limit the namespaces executed on # Leave blank to do all # disable_query_namespaces = true # default false # namespaces = ["namespace1", "namespace2"] - # Enable set level telmetry + # Enable set level telemetry # query_sets = true # default: false # Add namespace set combinations to limit sets executed on - # Leave blank to do all - # sets = ["namespace1/set1", "namespace1/set2"] + # Leave blank to do all sets # sets = ["namespace1/set1", "namespace1/set2", "namespace3"] # Histograms @@ -48,71 +51,71 @@ All metrics are attempted to be cast to integers, then booleans, then strings. # by default, aerospike produces a 100 bucket histogram # this is not great for most graphing tools, this will allow - # the ability to squash this to a smaller number of buckets - # To have a balanced histogram, the number of buckets chosen + # the ability to squash this to a smaller number of buckets + # To have a balanced histogram, the number of buckets chosen # should divide evenly into 100. # num_histogram_buckets = 100 # default: 10 - - ``` -### Measurements: +## Metrics The aerospike metrics are under a few measurement names: ***aerospike_node***: These are the aerospike **node** measurements, which are available from the aerospike `statistics` command. - ie, - ``` - telnet localhost 3003 - statistics - ... - ``` +```text + telnet localhost 3003 + statistics + ... +``` ***aerospike_namespace***: These are aerospike namespace measurements, which are available from the aerospike `namespace/` command. - ie, - ``` - telnet localhost 3003 - namespaces - ;;etc. - namespace/ - ... - ``` +```text + telnet localhost 3003 + namespaces + ;;etc. + namespace/ + ... +``` + ***aerospike_set***: These are aerospike set measurements, which are available from the aerospike `sets//` command. - ie, - ``` - telnet localhost 3003 - sets - sets/ - sets// - ... - ``` -***aerospike_histogram_ttl***: These are aerospike ttl hisogram measurements, which -is available from the aerospike `histogram:namespace=;[set=;]type=ttl` command. - - ie, - ``` - telnet localhost 3003 - histogram:namespace=;type=ttl - histogram:namespace=;[set=;]type=ttl - ... - ``` -***aerospike_histogram_object_size_linear***: These are aerospike object size linear histogram measurements, which is available from the aerospike `histogram:namespace=;[set=;]type=object_size_linear` command. - - ie, - ``` - telnet localhost 3003 - histogram:namespace=;type=object_size_linear - histogram:namespace=;[set=;]type=object_size_linear - ... - ``` - -### Tags: +```text + telnet localhost 3003 + sets + sets/ + sets// + ... +``` + +***aerospike_histogram_ttl***: These are aerospike ttl hisogram measurements, +which is available from the aerospike +`histogram:namespace=;[set=;]type=ttl` command. + +```text + telnet localhost 3003 + histogram:namespace=;type=ttl + histogram:namespace=;[set=;]type=ttl + ... +``` + +***aerospike_histogram_object_size_linear***: These are aerospike object size +linear histogram measurements, which is available from the aerospike +`histogram:namespace=;[set=;]type=object_size_linear` +command. + +```text + telnet localhost 3003 + histogram:namespace=;type=object_size_linear + histogram:namespace=;[set=;]type=object_size_linear + ... +``` + +### Tags All measurements have tags: @@ -129,13 +132,14 @@ Set metrics have tags: - set_name Histogram metrics have tags: + - namespace_name - set_name (optional) - type -### Example Output: +## Example Output -``` +```shell % telegraf --input-filter aerospike --test > aerospike_node,aerospike_host=localhost:3000,node_name="BB9020011AC4202" batch_error=0i,batch_index_complete=0i,batch_index_created_buffers=0i,batch_index_destroyed_buffers=0i,batch_index_error=0i,batch_index_huge_buffers=0i,batch_index_initiate=0i,batch_index_queue="0:0,0:0,0:0,0:0",batch_index_timeout=0i,batch_index_unused_buffers=0i,batch_initiate=0i,batch_queue=0i,batch_timeout=0i,client_connections=6i,cluster_integrity=true,cluster_key="8AF422E05281249E",cluster_size=1i,delete_queue=0i,demarshal_error=0i,early_tsvc_batch_sub_error=0i,early_tsvc_client_error=0i,early_tsvc_udf_sub_error=0i,fabric_connections=16i,fabric_msgs_rcvd=0i,fabric_msgs_sent=0i,heartbeat_connections=0i,heartbeat_received_foreign=0i,heartbeat_received_self=0i,info_complete=47i,info_queue=0i,migrate_allowed=true,migrate_partitions_remaining=0i,migrate_progress_recv=0i,migrate_progress_send=0i,objects=0i,paxos_principal="BB9020011AC4202",proxy_in_progress=0i,proxy_retry=0i,query_long_running=0i,query_short_running=0i,reaped_fds=0i,record_refs=0i,rw_in_progress=0i,scans_active=0i,sindex_gc_activity_dur=0i,sindex_gc_garbage_cleaned=0i,sindex_gc_garbage_found=0i,sindex_gc_inactivity_dur=0i,sindex_gc_list_creation_time=0i,sindex_gc_list_deletion_time=0i,sindex_gc_locktimedout=0i,sindex_gc_objects_validated=0i,sindex_ucgarbage_found=0i,sub_objects=0i,system_free_mem_pct=92i,system_swapping=false,tsvc_queue=0i,uptime=1457i 1468923222000000000 > aerospike_namespace,aerospike_host=localhost:3000,namespace=test,node_name="BB9020011AC4202" allow_nonxdr_writes=true,allow_xdr_writes=true,available_bin_names=32768i,batch_sub_proxy_complete=0i,batch_sub_proxy_error=0i,batch_sub_proxy_timeout=0i,batch_sub_read_error=0i,batch_sub_read_not_found=0i,batch_sub_read_success=0i,batch_sub_read_timeout=0i,batch_sub_tsvc_error=0i,batch_sub_tsvc_timeout=0i,client_delete_error=0i,client_delete_not_found=0i,client_delete_success=0i,client_delete_timeout=0i,client_lang_delete_success=0i,client_lang_error=0i,client_lang_read_success=0i,client_lang_write_success=0i,client_proxy_complete=0i,client_proxy_error=0i,client_proxy_timeout=0i,client_read_error=0i,client_read_not_found=0i,client_read_success=0i,client_read_timeout=0i,client_tsvc_error=0i,client_tsvc_timeout=0i,client_udf_complete=0i,client_udf_error=0i,client_udf_timeout=0i,client_write_error=0i,client_write_success=0i,client_write_timeout=0i,cold_start_evict_ttl=4294967295i,conflict_resolution_policy="generation",current_time=206619222i,data_in_index=false,default_ttl=432000i,device_available_pct=99i,device_free_pct=100i,device_total_bytes=4294967296i,device_used_bytes=0i,disallow_null_setname=false,enable_benchmarks_batch_sub=false,enable_benchmarks_read=false,enable_benchmarks_storage=false,enable_benchmarks_udf=false,enable_benchmarks_udf_sub=false,enable_benchmarks_write=false,enable_hist_proxy=false,enable_xdr=false,evict_hist_buckets=10000i,evict_tenths_pct=5i,evict_ttl=0i,evicted_objects=0i,expired_objects=0i,fail_generation=0i,fail_key_busy=0i,fail_record_too_big=0i,fail_xdr_forbidden=0i,geo2dsphere_within.earth_radius_meters=6371000i,geo2dsphere_within.level_mod=1i,geo2dsphere_within.max_cells=12i,geo2dsphere_within.max_level=30i,geo2dsphere_within.min_level=1i,geo2dsphere_within.strict=true,geo_region_query_cells=0i,geo_region_query_falsepos=0i,geo_region_query_points=0i,geo_region_query_reqs=0i,high_water_disk_pct=50i,high_water_memory_pct=60i,hwm_breached=false,ldt_enabled=false,ldt_gc_rate=0i,ldt_page_size=8192i,master_objects=0i,master_sub_objects=0i,max_ttl=315360000i,max_void_time=0i,memory_free_pct=100i,memory_size=1073741824i,memory_used_bytes=0i,memory_used_data_bytes=0i,memory_used_index_bytes=0i,memory_used_sindex_bytes=0i,migrate_order=5i,migrate_record_receives=0i,migrate_record_retransmits=0i,migrate_records_skipped=0i,migrate_records_transmitted=0i,migrate_rx_instances=0i,migrate_rx_partitions_active=0i,migrate_rx_partitions_initial=0i,migrate_rx_partitions_remaining=0i,migrate_sleep=1i,migrate_tx_instances=0i,migrate_tx_partitions_active=0i,migrate_tx_partitions_imbalance=0i,migrate_tx_partitions_initial=0i,migrate_tx_partitions_remaining=0i,non_expirable_objects=0i,ns_forward_xdr_writes=false,nsup_cycle_duration=0i,nsup_cycle_sleep_pct=0i,objects=0i,prole_objects=0i,prole_sub_objects=0i,query_agg=0i,query_agg_abort=0i,query_agg_avg_rec_count=0i,query_agg_error=0i,query_agg_success=0i,query_fail=0i,query_long_queue_full=0i,query_long_reqs=0i,query_lookup_abort=0i,query_lookup_avg_rec_count=0i,query_lookup_error=0i,query_lookup_success=0i,query_lookups=0i,query_reqs=0i,query_short_queue_full=0i,query_short_reqs=0i,query_udf_bg_failure=0i,query_udf_bg_success=0i,read_consistency_level_override="off",repl_factor=1i,scan_aggr_abort=0i,scan_aggr_complete=0i,scan_aggr_error=0i,scan_basic_abort=0i,scan_basic_complete=0i,scan_basic_error=0i,scan_udf_bg_abort=0i,scan_udf_bg_complete=0i,scan_udf_bg_error=0i,set_deleted_objects=0i,sets_enable_xdr=true,sindex.data_max_memory="ULONG_MAX",sindex.num_partitions=32i,single_bin=false,stop_writes=false,stop_writes_pct=90i,storage_engine="device",storage_engine.cold_start_empty=false,storage_engine.data_in_memory=true,storage_engine.defrag_lwm_pct=50i,storage_engine.defrag_queue_min=0i,storage_engine.defrag_sleep=1000i,storage_engine.defrag_startup_minimum=10i,storage_engine.disable_odirect=false,storage_engine.enable_osync=false,storage_engine.file="/opt/aerospike/data/test.dat",storage_engine.filesize=4294967296i,storage_engine.flush_max_ms=1000i,storage_engine.fsync_max_sec=0i,storage_engine.max_write_cache=67108864i,storage_engine.min_avail_pct=5i,storage_engine.post_write_queue=0i,storage_engine.scheduler_mode="null",storage_engine.write_block_size=1048576i,storage_engine.write_threads=1i,sub_objects=0i,udf_sub_lang_delete_success=0i,udf_sub_lang_error=0i,udf_sub_lang_read_success=0i,udf_sub_lang_write_success=0i,udf_sub_tsvc_error=0i,udf_sub_tsvc_timeout=0i,udf_sub_udf_complete=0i,udf_sub_udf_error=0i,udf_sub_udf_timeout=0i,write_commit_level_override="off",xdr_write_error=0i,xdr_write_success=0i,xdr_write_timeout=0i,{test}_query_hist_track_back=300i,{test}_query_hist_track_slice=10i,{test}_query_hist_track_thresholds="1,8,64",{test}_read_hist_track_back=300i,{test}_read_hist_track_slice=10i,{test}_read_hist_track_thresholds="1,8,64",{test}_udf_hist_track_back=300i,{test}_udf_hist_track_slice=10i,{test}_udf_hist_track_thresholds="1,8,64",{test}_write_hist_track_back=300i,{test}_write_hist_track_slice=10i,{test}_write_hist_track_thresholds="1,8,64" 1468923222000000000 diff --git a/plugins/inputs/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go index 7ab15d18168f7..bd5724c3ebd22 100644 --- a/plugins/inputs/aerospike/aerospike.go +++ b/plugins/inputs/aerospike/aerospike.go @@ -1,30 +1,36 @@ +//go:generate ../../../tools/readme_config_includer/generator package aerospike import ( "crypto/tls" + _ "embed" "fmt" "math" - "net" "strconv" "strings" "sync" "time" + as "github.com/aerospike/aerospike-client-go/v5" + "github.com/influxdata/telegraf" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - - as "github.com/aerospike/aerospike-client-go" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Aerospike struct { Servers []string `toml:"servers"` Username string `toml:"username"` Password string `toml:"password"` - EnableTLS bool `toml:"enable_tls"` - EnableSSL bool `toml:"enable_ssl"` // deprecated in 1.7; use enable_tls + EnableTLS bool `toml:"enable_tls"` + EnableSSL bool `toml:"enable_ssl" deprecated:"1.7.0;use 'enable_tls' instead"` + TLSName string `toml:"tls_name"` tlsint.ClientConfig initialized bool @@ -42,51 +48,16 @@ type Aerospike struct { NumberHistogramBuckets int `toml:"num_histogram_buckets"` } -var sampleConfig = ` - ## Aerospike servers to connect to (with port) - ## This plugin will query all namespaces the aerospike - ## server has configured and get stats for them. - servers = ["localhost:3000"] - - # username = "telegraf" - # password = "pa$$word" - - ## Optional TLS Config - # enable_tls = false - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## If false, skip chain & host verification - # insecure_skip_verify = true - - # Feature Options - # Add namespace variable to limit the namespaces executed on - # Leave blank to do all - # disable_query_namespaces = true # default false - # namespaces = ["namespace1", "namespace2"] - - # Enable set level telmetry - # query_sets = true # default: false - # Add namespace set combinations to limit sets executed on - # Leave blank to do all sets - # sets = ["namespace1/set1", "namespace1/set2", "namespace3"] - - # Histograms - # enable_ttl_histogram = true # default: false - # enable_object_size_linear_histogram = true # default: false - - # by default, aerospike produces a 100 bucket histogram - # this is not great for most graphing tools, this will allow - # the ability to squash this to a smaller number of buckets - # num_histogram_buckets = 100 # default: 10 -` - -func (a *Aerospike) SampleConfig() string { - return sampleConfig +// On the random chance a hex value is all digits +// these are fields that can contain hex and should always be strings +var protectedHexFields = map[string]bool{ + "node_name": true, + "cluster_key": true, + "paxos_principal": true, } -func (a *Aerospike) Description() string { - return "Read stats from aerospike server(s)" +func (*Aerospike) SampleConfig() string { + return sampleConfig } func (a *Aerospike) Gather(acc telegraf.Accumulator) error { @@ -111,7 +82,7 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error { } if len(a.Servers) == 0 { - return a.gatherServer("127.0.0.1:3000", acc) + return a.gatherServer(acc, "127.0.0.1:3000") } var wg sync.WaitGroup @@ -119,7 +90,7 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error { for _, server := range a.Servers { go func(serv string) { defer wg.Done() - acc.AddError(a.gatherServer(serv, acc)) + acc.AddError(a.gatherServer(acc, serv)) }(server) } @@ -127,36 +98,37 @@ func (a *Aerospike) Gather(acc telegraf.Accumulator) error { return nil } -func (a *Aerospike) gatherServer(hostPort string, acc telegraf.Accumulator) error { - host, port, err := net.SplitHostPort(hostPort) - if err != nil { - return err - } - - iport, err := strconv.Atoi(port) - if err != nil { - iport = 3000 - } - +func (a *Aerospike) gatherServer(acc telegraf.Accumulator, hostPort string) error { policy := as.NewClientPolicy() policy.User = a.Username policy.Password = a.Password policy.TlsConfig = a.tlsConfig - c, err := as.NewClientWithPolicy(policy, host, iport) + asHosts, err := as.NewHosts(hostPort) + if err != nil { + return err + } + if a.TLSName != "" && (a.EnableTLS || a.EnableSSL) { + for _, asHost := range asHosts { + asHost.TLSName = a.TLSName + } + } + c, err := as.NewClientWithPolicyAndHost(policy, asHosts...) if err != nil { return err } + asInfoPolicy := as.NewInfoPolicy() defer c.Close() nodes := c.GetNodes() for _, n := range nodes { - stats, err := a.getNodeInfo(n) + nodeHost := n.GetHost().String() + stats, err := a.getNodeInfo(n, asInfoPolicy) if err != nil { return err } - a.parseNodeInfo(stats, hostPort, n.GetName(), acc) + a.parseNodeInfo(acc, stats, nodeHost, n.GetName()) - namespaces, err := a.getNamespaces(n) + namespaces, err := a.getNamespaces(n, asInfoPolicy) if err != nil { return err } @@ -164,22 +136,21 @@ func (a *Aerospike) gatherServer(hostPort string, acc telegraf.Accumulator) erro if !a.DisableQueryNamespaces { // Query Namespaces for _, namespace := range namespaces { - stats, err = a.getNamespaceInfo(namespace, n) + stats, err = a.getNamespaceInfo(namespace, n, asInfoPolicy) if err != nil { continue - } else { - a.parseNamespaceInfo(stats, hostPort, namespace, n.GetName(), acc) } + a.parseNamespaceInfo(acc, stats, nodeHost, namespace, n.GetName()) if a.EnableTTLHistogram { - err = a.getTTLHistogram(hostPort, namespace, "", n, acc) + err = a.getTTLHistogram(acc, nodeHost, namespace, "", n, asInfoPolicy) if err != nil { continue } } if a.EnableObjectSizeLinearHistogram { - err = a.getObjectSizeLinearHistogram(hostPort, namespace, "", n, acc) + err = a.getObjectSizeLinearHistogram(acc, nodeHost, namespace, "", n, asInfoPolicy) if err != nil { continue } @@ -188,28 +159,26 @@ func (a *Aerospike) gatherServer(hostPort string, acc telegraf.Accumulator) erro } if a.QuerySets { - namespaceSets, err := a.getSets(n) + namespaceSets, err := a.getSets(n, asInfoPolicy) if err == nil { for _, namespaceSet := range namespaceSets { namespace, set := splitNamespaceSet(namespaceSet) - - stats, err := a.getSetInfo(namespaceSet, n) + stats, err := a.getSetInfo(namespaceSet, n, asInfoPolicy) if err != nil { continue - } else { - a.parseSetInfo(stats, hostPort, namespaceSet, n.GetName(), acc) } + a.parseSetInfo(acc, stats, nodeHost, namespaceSet, n.GetName()) if a.EnableTTLHistogram { - err = a.getTTLHistogram(hostPort, namespace, set, n, acc) + err = a.getTTLHistogram(acc, nodeHost, namespace, set, n, asInfoPolicy) if err != nil { continue } } if a.EnableObjectSizeLinearHistogram { - err = a.getObjectSizeLinearHistogram(hostPort, namespace, set, n, acc) + err = a.getObjectSizeLinearHistogram(acc, nodeHost, namespace, set, n, asInfoPolicy) if err != nil { continue } @@ -221,8 +190,8 @@ func (a *Aerospike) gatherServer(hostPort string, acc telegraf.Accumulator) erro return nil } -func (a *Aerospike) getNodeInfo(n *as.Node) (map[string]string, error) { - stats, err := as.RequestNodeStats(n) +func (a *Aerospike) getNodeInfo(n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) { + stats, err := n.RequestInfo(infoPolicy, "statistics") if err != nil { return nil, err } @@ -230,26 +199,28 @@ func (a *Aerospike) getNodeInfo(n *as.Node) (map[string]string, error) { return stats, nil } -func (a *Aerospike) parseNodeInfo(stats map[string]string, hostPort string, nodeName string, acc telegraf.Accumulator) { - tags := map[string]string{ +func (a *Aerospike) parseNodeInfo(acc telegraf.Accumulator, stats map[string]string, hostPort string, nodeName string) { + nTags := map[string]string{ "aerospike_host": hostPort, "node_name": nodeName, } - fields := make(map[string]interface{}) - - for k, v := range stats { - val := parseValue(v) - fields[strings.Replace(k, "-", "_", -1)] = val + nFields := make(map[string]interface{}) + stat := strings.Split(stats["statistics"], ";") + for _, pair := range stat { + parts := strings.Split(pair, "=") + if len(parts) < 2 { + continue + } + key := strings.ReplaceAll(parts[0], "-", "_") + nFields[key] = parseAerospikeValue(key, parts[1]) } - acc.AddFields("aerospike_node", fields, tags, time.Now()) - - return + acc.AddFields("aerospike_node", nFields, nTags, time.Now()) } -func (a *Aerospike) getNamespaces(n *as.Node) ([]string, error) { +func (a *Aerospike) getNamespaces(n *as.Node, infoPolicy *as.InfoPolicy) ([]string, error) { var namespaces []string if len(a.Namespaces) <= 0 { - info, err := as.RequestNodeInfo(n, "namespaces") + info, err := n.RequestInfo(infoPolicy, "namespaces") if err != nil { return namespaces, err } @@ -261,16 +232,15 @@ func (a *Aerospike) getNamespaces(n *as.Node) ([]string, error) { return namespaces, nil } -func (a *Aerospike) getNamespaceInfo(namespace string, n *as.Node) (map[string]string, error) { - stats, err := as.RequestNodeInfo(n, "namespace/"+namespace) +func (a *Aerospike) getNamespaceInfo(namespace string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) { + stats, err := n.RequestInfo(infoPolicy, "namespace/"+namespace) if err != nil { return nil, err } return stats, err } -func (a *Aerospike) parseNamespaceInfo(stats map[string]string, hostPort string, namespace string, nodeName string, acc telegraf.Accumulator) { - +func (a *Aerospike) parseNamespaceInfo(acc telegraf.Accumulator, stats map[string]string, hostPort string, namespace string, nodeName string) { nTags := map[string]string{ "aerospike_host": hostPort, "node_name": nodeName, @@ -284,23 +254,20 @@ func (a *Aerospike) parseNamespaceInfo(stats map[string]string, hostPort string, if len(parts) < 2 { continue } - val := parseValue(parts[1]) - nFields[strings.Replace(parts[0], "-", "_", -1)] = val + key := strings.ReplaceAll(parts[0], "-", "_") + nFields[key] = parseAerospikeValue(key, parts[1]) } acc.AddFields("aerospike_namespace", nFields, nTags, time.Now()) - - return } -func (a *Aerospike) getSets(n *as.Node) ([]string, error) { +func (a *Aerospike) getSets(n *as.Node, infoPolicy *as.InfoPolicy) ([]string, error) { var namespaceSets []string // Gather all sets if len(a.Sets) <= 0 { - stats, err := as.RequestNodeInfo(n, "sets") + stats, err := n.RequestInfo(infoPolicy, "sets") if err != nil { return namespaceSets, err } - stat := strings.Split(stats["sets"], ";") for _, setStats := range stat { // setInfo is "ns=test:set=foo:objects=1:tombstones=0" @@ -330,16 +297,15 @@ func (a *Aerospike) getSets(n *as.Node) ([]string, error) { return namespaceSets, nil } -func (a *Aerospike) getSetInfo(namespaceSet string, n *as.Node) (map[string]string, error) { - stats, err := as.RequestNodeInfo(n, "sets/"+namespaceSet) +func (a *Aerospike) getSetInfo(namespaceSet string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) { + stats, err := n.RequestInfo(infoPolicy, "sets/"+namespaceSet) if err != nil { return nil, err } return stats, nil } -func (a *Aerospike) parseSetInfo(stats map[string]string, hostPort string, namespaceSet string, nodeName string, acc telegraf.Accumulator) { - +func (a *Aerospike) parseSetInfo(acc telegraf.Accumulator, stats map[string]string, hostPort string, namespaceSet string, nodeName string) { stat := strings.Split( strings.TrimSuffix( stats[fmt.Sprintf("sets/%s", namespaceSet)], ";"), ":") @@ -355,36 +321,37 @@ func (a *Aerospike) parseSetInfo(stats map[string]string, hostPort string, names continue } - val := parseValue(pieces[1]) - nFields[strings.Replace(pieces[0], "-", "_", -1)] = val + key := strings.ReplaceAll(pieces[0], "-", "_") + nFields[key] = parseAerospikeValue(key, pieces[1]) } acc.AddFields("aerospike_set", nFields, nTags, time.Now()) - - return } -func (a *Aerospike) getTTLHistogram(hostPort string, namespace string, set string, n *as.Node, acc telegraf.Accumulator) error { - stats, err := a.getHistogram(namespace, set, "ttl", n) +func (a *Aerospike) getTTLHistogram(acc telegraf.Accumulator, hostPort string, namespace string, set string, n *as.Node, infoPolicy *as.InfoPolicy) error { + stats, err := a.getHistogram(namespace, set, "ttl", n, infoPolicy) if err != nil { return err } - a.parseHistogram(stats, hostPort, namespace, set, "ttl", n.GetName(), acc) + + nTags := createTags(hostPort, n.GetName(), namespace, set) + a.parseHistogram(acc, stats, nTags, "ttl") return nil } -func (a *Aerospike) getObjectSizeLinearHistogram(hostPort string, namespace string, set string, n *as.Node, acc telegraf.Accumulator) error { - - stats, err := a.getHistogram(namespace, set, "object-size-linear", n) +func (a *Aerospike) getObjectSizeLinearHistogram(acc telegraf.Accumulator, hostPort string, namespace string, set string, n *as.Node, infoPolicy *as.InfoPolicy) error { + stats, err := a.getHistogram(namespace, set, "object-size-linear", n, infoPolicy) if err != nil { return err } - a.parseHistogram(stats, hostPort, namespace, set, "object-size-linear", n.GetName(), acc) + + nTags := createTags(hostPort, n.GetName(), namespace, set) + a.parseHistogram(acc, stats, nTags, "object-size-linear") return nil } -func (a *Aerospike) getHistogram(namespace string, set string, histogramType string, n *as.Node) (map[string]string, error) { +func (a *Aerospike) getHistogram(namespace string, set string, histogramType string, n *as.Node, infoPolicy *as.InfoPolicy) (map[string]string, error) { var queryArg string if len(set) > 0 { queryArg = fmt.Sprintf("histogram:type=%s;namespace=%v;set=%v", histogramType, namespace, set) @@ -392,26 +359,14 @@ func (a *Aerospike) getHistogram(namespace string, set string, histogramType str queryArg = fmt.Sprintf("histogram:type=%s;namespace=%v", histogramType, namespace) } - stats, err := as.RequestNodeInfo(n, queryArg) + stats, err := n.RequestInfo(infoPolicy, queryArg) if err != nil { return nil, err } return stats, nil - } -func (a *Aerospike) parseHistogram(stats map[string]string, hostPort string, namespace string, set string, histogramType string, nodeName string, acc telegraf.Accumulator) { - - nTags := map[string]string{ - "aerospike_host": hostPort, - "node_name": nodeName, - "namespace": namespace, - } - - if len(set) > 0 { - nTags["set"] = set - } - +func (a *Aerospike) parseHistogram(acc telegraf.Accumulator, stats map[string]string, nTags map[string]string, histogramType string) { nFields := make(map[string]interface{}) for _, stat := range stats { @@ -424,10 +379,10 @@ func (a *Aerospike) parseHistogram(stats map[string]string, hostPort string, nam if pieces[0] == "buckets" { buckets := strings.Split(pieces[1], ",") - // Normalize incase of less buckets than expected + // Normalize in case of less buckets than expected numRecordsPerBucket := 1 if len(buckets) > a.NumberHistogramBuckets { - numRecordsPerBucket = int(math.Ceil((float64(len(buckets)) / float64(a.NumberHistogramBuckets)))) + numRecordsPerBucket = int(math.Ceil(float64(len(buckets)) / float64(a.NumberHistogramBuckets))) } bucketCount := 0 @@ -436,7 +391,7 @@ func (a *Aerospike) parseHistogram(stats map[string]string, hostPort string, nam for i, bucket := range buckets { // Sum records and increment bucket collection counter if bucketCount < numRecordsPerBucket { - bucketSum = bucketSum + parseValue(bucket).(int64) + bucketSum = bucketSum + parseAerospikeValue("", bucket).(int64) bucketCount++ } @@ -454,40 +409,46 @@ func (a *Aerospike) parseHistogram(stats map[string]string, hostPort string, nam nFields[strconv.Itoa(bucketName)] = bucketSum } } - } } } - acc.AddFields(fmt.Sprintf("aerospike_histogram_%v", strings.Replace(histogramType, "-", "_", -1)), nFields, nTags, time.Now()) - - return + acc.AddFields(fmt.Sprintf("aerospike_histogram_%v", strings.ReplaceAll(histogramType, "-", "_")), nFields, nTags, time.Now()) } -func splitNamespaceSet(namespaceSet string) (string, string) { +func splitNamespaceSet(namespaceSet string) (namespace string, set string) { split := strings.Split(namespaceSet, "/") return split[0], split[1] } -func parseValue(v string) interface{} { - if parsed, err := strconv.ParseInt(v, 10, 64); err == nil { +func parseAerospikeValue(key string, v string) interface{} { + if protectedHexFields[key] { + return v + } else if parsed, err := strconv.ParseInt(v, 10, 64); err == nil { return parsed } else if parsed, err := strconv.ParseUint(v, 10, 64); err == nil { return parsed } else if parsed, err := strconv.ParseBool(v); err == nil { return parsed + } else if parsed, err := strconv.ParseFloat(v, 32); err == nil { + return parsed } else { // leave as string return v } } -func copyTags(m map[string]string) map[string]string { - out := make(map[string]string) - for k, v := range m { - out[k] = v +func createTags(hostPort string, nodeName string, namespace string, set string) map[string]string { + nTags := map[string]string{ + "aerospike_host": hostPort, + "node_name": nodeName, + "namespace": namespace, + } + + if len(set) > 0 { + nTags["set"] = set } - return out + return nTags } func init() { diff --git a/plugins/inputs/aerospike/aerospike_test.go b/plugins/inputs/aerospike/aerospike_test.go index ee69f0049f401..d80860a977dbf 100644 --- a/plugins/inputs/aerospike/aerospike_test.go +++ b/plugins/inputs/aerospike/aerospike_test.go @@ -1,21 +1,43 @@ package aerospike import ( + "fmt" + "strconv" "testing" - as "github.com/aerospike/aerospike-client-go" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + as "github.com/aerospike/aerospike-client-go/v5" "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go/wait" + + "github.com/influxdata/telegraf/testutil" ) -func TestAerospikeStatistics(t *testing.T) { +const servicePort = "3000" + +func launchTestServer(t *testing.T) *testutil.Container { + container := testutil.Container{ + Image: "aerospike:ce-6.0.0.1", + ExposedPorts: []string{servicePort}, + WaitingFor: wait.ForLog("migrations: complete"), + } + err := container.Start() + require.NoError(t, err, "failed to start container") + + return &container +} + +func TestAerospikeStatisticsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } + container := launchTestServer(t) + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + a := &Aerospike{ - Servers: []string{testutil.GetLocalHost() + ":3000"}, + Servers: []string{fmt.Sprintf("%s:%s", container.Address, container.Ports[servicePort])}, } var acc testutil.Accumulator @@ -23,25 +45,29 @@ func TestAerospikeStatistics(t *testing.T) { err := acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("aerospike_node")) - assert.True(t, acc.HasTag("aerospike_node", "node_name")) - assert.True(t, acc.HasMeasurement("aerospike_namespace")) - assert.True(t, acc.HasTag("aerospike_namespace", "node_name")) - assert.True(t, acc.HasInt64Field("aerospike_node", "batch_index_error")) + require.True(t, acc.HasMeasurement("aerospike_node")) + require.True(t, acc.HasTag("aerospike_node", "node_name")) + require.True(t, acc.HasMeasurement("aerospike_namespace")) + require.True(t, acc.HasTag("aerospike_namespace", "node_name")) + require.True(t, acc.HasInt64Field("aerospike_node", "batch_index_error")) namespaceName := acc.TagValue("aerospike_namespace", "namespace") - assert.Equal(t, namespaceName, "test") - + require.Equal(t, "test", namespaceName) } -func TestAerospikeStatisticsPartialErr(t *testing.T) { +func TestAerospikeStatisticsPartialErrIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } + container := launchTestServer(t) + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + a := &Aerospike{ Servers: []string{ - testutil.GetLocalHost() + ":3000", + fmt.Sprintf("%s:%s", container.Address, container.Ports[servicePort]), testutil.GetLocalHost() + ":9999", }, } @@ -51,21 +77,26 @@ func TestAerospikeStatisticsPartialErr(t *testing.T) { require.Error(t, err) - assert.True(t, acc.HasMeasurement("aerospike_node")) - assert.True(t, acc.HasMeasurement("aerospike_namespace")) - assert.True(t, acc.HasInt64Field("aerospike_node", "batch_index_error")) + require.True(t, acc.HasMeasurement("aerospike_node")) + require.True(t, acc.HasMeasurement("aerospike_namespace")) + require.True(t, acc.HasInt64Field("aerospike_node", "batch_index_error")) namespaceName := acc.TagSetValue("aerospike_namespace", "namespace") - assert.Equal(t, namespaceName, "test") + require.Equal(t, "test", namespaceName) } -func TestSelectNamepsaces(t *testing.T) { +func TestSelectNamespacesIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } + container := launchTestServer(t) + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + // Select nonexistent namespace a := &Aerospike{ - Servers: []string{testutil.GetLocalHost() + ":3000"}, + Servers: []string{fmt.Sprintf("%s:%s", container.Address, container.Ports[servicePort])}, Namespaces: []string{"notTest"}, } @@ -74,32 +105,37 @@ func TestSelectNamepsaces(t *testing.T) { err := acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("aerospike_node")) - assert.True(t, acc.HasTag("aerospike_node", "node_name")) - assert.True(t, acc.HasMeasurement("aerospike_namespace")) - assert.True(t, acc.HasTag("aerospike_namespace", "node_name")) + require.True(t, acc.HasMeasurement("aerospike_node")) + require.True(t, acc.HasTag("aerospike_node", "node_name")) + require.True(t, acc.HasMeasurement("aerospike_namespace")) + require.True(t, acc.HasTag("aerospike_namespace", "node_name")) // Expect only 1 namespace count := 0 for _, p := range acc.Metrics { if p.Measurement == "aerospike_namespace" { - count += 1 + count++ } } - assert.Equal(t, count, 1) + require.Equal(t, 1, count) // expect namespace to have no fields as nonexistent - assert.False(t, acc.HasInt64Field("aerospke_namespace", "appeals_tx_remaining")) + require.False(t, acc.HasInt64Field("aerospke_namespace", "appeals_tx_remaining")) } -func TestDisableQueryNamespaces(t *testing.T) { +func TestDisableQueryNamespacesIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } + container := launchTestServer(t) + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + a := &Aerospike{ Servers: []string{ - testutil.GetLocalHost() + ":3000", + fmt.Sprintf("%s:%s", container.Address, container.Ports[servicePort]), }, DisableQueryNamespaces: true, } @@ -108,48 +144,57 @@ func TestDisableQueryNamespaces(t *testing.T) { err := acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("aerospike_node")) - assert.False(t, acc.HasMeasurement("aerospike_namespace")) + require.True(t, acc.HasMeasurement("aerospike_node")) + require.False(t, acc.HasMeasurement("aerospike_namespace")) a.DisableQueryNamespaces = false err = acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("aerospike_node")) - assert.True(t, acc.HasMeasurement("aerospike_namespace")) + require.True(t, acc.HasMeasurement("aerospike_node")) + require.True(t, acc.HasMeasurement("aerospike_namespace")) } -func TestQuerySets(t *testing.T) { +func TestQuerySetsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } + container := launchTestServer(t) + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + + portInt, err := strconv.Atoi(container.Ports[servicePort]) + require.NoError(t, err) + // create a set // test is the default namespace from aerospike policy := as.NewClientPolicy() - client, err := as.NewClientWithPolicy(policy, testutil.GetLocalHost(), 3000) + client, errAs := as.NewClientWithPolicy(policy, container.Address, portInt) + require.NoError(t, errAs) - key, err := as.NewKey("test", "foo", 123) - require.NoError(t, err) + key, errAs := as.NewKey("test", "foo", 123) + require.NoError(t, errAs) bins := as.BinMap{ "e": 2, "pi": 3, } - err = client.Add(nil, key, bins) - require.NoError(t, err) + errAs = client.Add(nil, key, bins) + require.NoError(t, errAs) - key, err = as.NewKey("test", "bar", 1234) - require.NoError(t, err) + key, errAs = as.NewKey("test", "bar", 1234) + require.NoError(t, errAs) bins = as.BinMap{ "e": 2, "pi": 3, } - err = client.Add(nil, key, bins) - require.NoError(t, err) + errAs = client.Add(nil, key, bins) + require.NoError(t, errAs) a := &Aerospike{ Servers: []string{ - testutil.GetLocalHost() + ":3000", + fmt.Sprintf("%s:%s", container.Address, container.Ports[servicePort]), }, QuerySets: true, DisableQueryNamespaces: true, @@ -159,46 +204,54 @@ func TestQuerySets(t *testing.T) { err = acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/foo")) - assert.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/bar")) - - assert.True(t, acc.HasMeasurement("aerospike_set")) - assert.True(t, acc.HasTag("aerospike_set", "set")) - assert.True(t, acc.HasInt64Field("aerospike_set", "memory_data_bytes")) + require.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/foo")) + require.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/bar")) + require.True(t, acc.HasMeasurement("aerospike_set")) + require.True(t, acc.HasTag("aerospike_set", "set")) + require.True(t, acc.HasInt64Field("aerospike_set", "memory_data_bytes")) } -func TestSelectQuerySets(t *testing.T) { +func TestSelectQuerySetsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } + container := launchTestServer(t) + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + + portInt, err := strconv.Atoi(container.Ports[servicePort]) + require.NoError(t, err) + // create a set // test is the default namespace from aerospike policy := as.NewClientPolicy() - client, err := as.NewClientWithPolicy(policy, testutil.GetLocalHost(), 3000) + client, errAs := as.NewClientWithPolicy(policy, container.Address, portInt) + require.NoError(t, errAs) - key, err := as.NewKey("test", "foo", 123) - require.NoError(t, err) + key, errAs := as.NewKey("test", "foo", 123) + require.NoError(t, errAs) bins := as.BinMap{ "e": 2, "pi": 3, } - err = client.Add(nil, key, bins) - require.NoError(t, err) + errAs = client.Add(nil, key, bins) + require.NoError(t, errAs) - key, err = as.NewKey("test", "bar", 1234) - require.NoError(t, err) + key, errAs = as.NewKey("test", "bar", 1234) + require.NoError(t, errAs) bins = as.BinMap{ "e": 2, "pi": 3, } - err = client.Add(nil, key, bins) - require.NoError(t, err) + errAs = client.Add(nil, key, bins) + require.NoError(t, errAs) a := &Aerospike{ Servers: []string{ - testutil.GetLocalHost() + ":3000", + fmt.Sprintf("%s:%s", container.Address, container.Ports[servicePort]), }, QuerySets: true, Sets: []string{"test/foo"}, @@ -209,22 +262,27 @@ func TestSelectQuerySets(t *testing.T) { err = acc.GatherError(a.Gather) require.NoError(t, err) - assert.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/foo")) - assert.False(t, FindTagValue(&acc, "aerospike_set", "set", "test/bar")) - - assert.True(t, acc.HasMeasurement("aerospike_set")) - assert.True(t, acc.HasTag("aerospike_set", "set")) - assert.True(t, acc.HasInt64Field("aerospike_set", "memory_data_bytes")) + require.True(t, FindTagValue(&acc, "aerospike_set", "set", "test/foo")) + require.False(t, FindTagValue(&acc, "aerospike_set", "set", "test/bar")) + require.True(t, acc.HasMeasurement("aerospike_set")) + require.True(t, acc.HasTag("aerospike_set", "set")) + require.True(t, acc.HasInt64Field("aerospike_set", "memory_data_bytes")) } -func TestDisableTTLHistogram(t *testing.T) { +func TestDisableTTLHistogramIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") } + + container := launchTestServer(t) + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + a := &Aerospike{ Servers: []string{ - testutil.GetLocalHost() + ":3000", + fmt.Sprintf("%s:%s", container.Address, container.Ports[servicePort]), }, QuerySets: true, EnableTTLHistogram: false, @@ -236,43 +294,22 @@ func TestDisableTTLHistogram(t *testing.T) { err := acc.GatherError(a.Gather) require.NoError(t, err) - assert.False(t, acc.HasMeasurement("aerospike_histogram_ttl")) + require.False(t, acc.HasMeasurement("aerospike_histogram_ttl")) } -func TestTTLHistogram(t *testing.T) { + +func TestDisableObjectSizeLinearHistogramIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping aerospike integration tests.") - } else { - t.Skip("Skipping, only passes if the aerospike db has been running for at least 1 hour") - } - a := &Aerospike{ - Servers: []string{ - testutil.GetLocalHost() + ":3000", - }, - QuerySets: true, - EnableTTLHistogram: true, } - /* - Produces histogram - Measurment exists - Has appropriate tags (node name etc) - Has appropriate keys (time:value) - may be able to leverage histogram plugin - */ - var acc testutil.Accumulator - err := acc.GatherError(a.Gather) - require.NoError(t, err) - assert.True(t, acc.HasMeasurement("aerospike_histogram_ttl")) - assert.True(t, FindTagValue(&acc, "aerospike_histogram_ttl", "namespace", "test")) + container := launchTestServer(t) + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() -} -func TestDisableObjectSizeLinearHistogram(t *testing.T) { - if testing.Short() { - t.Skip("Skipping aerospike integration tests.") - } a := &Aerospike{ Servers: []string{ - testutil.GetLocalHost() + ":3000", + fmt.Sprintf("%s:%s", container.Address, container.Ports[servicePort]), }, QuerySets: true, EnableObjectSizeLinearHistogram: false, @@ -284,34 +321,7 @@ func TestDisableObjectSizeLinearHistogram(t *testing.T) { err := acc.GatherError(a.Gather) require.NoError(t, err) - assert.False(t, acc.HasMeasurement("aerospike_histogram_object_size_linear")) -} -func TestObjectSizeLinearHistogram(t *testing.T) { - - if testing.Short() { - t.Skip("Skipping aerospike integration tests.") - } else { - t.Skip("Skipping, only passes if the aerospike db has been running for at least 1 hour") - } - a := &Aerospike{ - Servers: []string{ - testutil.GetLocalHost() + ":3000", - }, - QuerySets: true, - EnableObjectSizeLinearHistogram: true, - } - /* - Produces histogram - Measurment exists - Has appropriate tags (node name etc) - Has appropriate keys (time:value) - - */ - var acc testutil.Accumulator - err := acc.GatherError(a.Gather) - require.NoError(t, err) - assert.True(t, acc.HasMeasurement("aerospike_histogram_object_size_linear")) - assert.True(t, FindTagValue(&acc, "aerospike_histogram_object_size_linear", "namespace", "test")) + require.False(t, acc.HasMeasurement("aerospike_histogram_object_size_linear")) } func TestParseNodeInfo(t *testing.T) { @@ -319,9 +329,7 @@ func TestParseNodeInfo(t *testing.T) { var acc testutil.Accumulator stats := map[string]string{ - "early_tsvc_from_proxy_error": "0", - "cluster_principal": "BB9020012AC4202", - "cluster_is_member": "true", + "statistics": "early_tsvc_from_proxy_error=0;cluster_principal=BB9020012AC4202;cluster_is_member=true", } expectedFields := map[string]interface{}{ @@ -335,7 +343,7 @@ func TestParseNodeInfo(t *testing.T) { "node_name": "TestNodeName", } - a.parseNodeInfo(stats, "127.0.0.1:3000", "TestNodeName", &acc) + a.parseNodeInfo(&acc, stats, "127.0.0.1:3000", "TestNodeName") acc.AssertContainsTaggedFields(t, "aerospike_node", expectedFields, expectedTags) } @@ -361,7 +369,7 @@ func TestParseNamespaceInfo(t *testing.T) { "namespace": "test", } - a.parseNamespaceInfo(stats, "127.0.0.1:3000", "test", "TestNodeName", &acc) + a.parseNamespaceInfo(&acc, stats, "127.0.0.1:3000", "test", "TestNodeName") acc.AssertContainsTaggedFields(t, "aerospike_namespace", expectedFields, expectedTags) } @@ -385,7 +393,7 @@ func TestParseSetInfo(t *testing.T) { "node_name": "TestNodeName", "set": "test/foo", } - a.parseSetInfo(stats, "127.0.0.1:3000", "test/foo", "TestNodeName", &acc) + a.parseSetInfo(&acc, stats, "127.0.0.1:3000", "test/foo", "TestNodeName") acc.AssertContainsTaggedFields(t, "aerospike_set", expectedFields, expectedTags) } @@ -417,10 +425,11 @@ func TestParseHistogramSet(t *testing.T) { "set": "foo", } - a.parseHistogram(stats, "127.0.0.1:3000", "test", "foo", "object-size-linear", "TestNodeName", &acc) + nTags := createTags("127.0.0.1:3000", "TestNodeName", "test", "foo") + a.parseHistogram(&acc, stats, nTags, "object-size-linear") acc.AssertContainsTaggedFields(t, "aerospike_histogram_object_size_linear", expectedFields, expectedTags) - } + func TestParseHistogramNamespace(t *testing.T) { a := &Aerospike{ NumberHistogramBuckets: 10, @@ -448,25 +457,34 @@ func TestParseHistogramNamespace(t *testing.T) { "namespace": "test", } - a.parseHistogram(stats, "127.0.0.1:3000", "test", "", "object-size-linear", "TestNodeName", &acc) + nTags := createTags("127.0.0.1:3000", "TestNodeName", "test", "") + a.parseHistogram(&acc, stats, nTags, "object-size-linear") acc.AssertContainsTaggedFields(t, "aerospike_histogram_object_size_linear", expectedFields, expectedTags) - } + func TestAerospikeParseValue(t *testing.T) { // uint64 with value bigger than int64 max - val := parseValue("18446744041841121751") + val := parseAerospikeValue("", "18446744041841121751") require.Equal(t, uint64(18446744041841121751), val) - val = parseValue("true") + val = parseAerospikeValue("", "true") require.Equal(t, true, val) // int values - val = parseValue("42") - require.Equal(t, val, int64(42), "must be parsed as int") + val = parseAerospikeValue("", "42") + require.Equal(t, int64(42), val, "must be parsed as an int64") // string values - val = parseValue("BB977942A2CA502") - require.Equal(t, val, `BB977942A2CA502`, "must be left as string") + val = parseAerospikeValue("", "BB977942A2CA502") + require.Equal(t, `BB977942A2CA502`, val, "must be left as a string") + + // all digit hex values, unprotected + val = parseAerospikeValue("", "1992929191") + require.Equal(t, int64(1992929191), val, "must be parsed as an int64") + + // all digit hex values, protected + val = parseAerospikeValue("node_name", "1992929191") + require.Equal(t, `1992929191`, val, "must be left as a string") } func FindTagValue(acc *testutil.Accumulator, measurement string, key string, value string) bool { @@ -476,7 +494,6 @@ func FindTagValue(acc *testutil.Accumulator, measurement string, key string, val if ok && v == value { return true } - } } return false diff --git a/plugins/inputs/aerospike/sample.conf b/plugins/inputs/aerospike/sample.conf new file mode 100644 index 0000000000000..00e867dcd68ca --- /dev/null +++ b/plugins/inputs/aerospike/sample.conf @@ -0,0 +1,41 @@ +# Read stats from aerospike server(s) +[[inputs.aerospike]] + ## Aerospike servers to connect to (with port) + ## This plugin will query all namespaces the aerospike + ## server has configured and get stats for them. + servers = ["localhost:3000"] + + # username = "telegraf" + # password = "pa$$word" + + ## Optional TLS Config + # enable_tls = false + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + # tls_name = "tlsname" + ## If false, skip chain & host verification + # insecure_skip_verify = true + + # Feature Options + # Add namespace variable to limit the namespaces executed on + # Leave blank to do all + # disable_query_namespaces = true # default false + # namespaces = ["namespace1", "namespace2"] + + # Enable set level telemetry + # query_sets = true # default: false + # Add namespace set combinations to limit sets executed on + # Leave blank to do all sets + # sets = ["namespace1/set1", "namespace1/set2", "namespace3"] + + # Histograms + # enable_ttl_histogram = true # default: false + # enable_object_size_linear_histogram = true # default: false + + # by default, aerospike produces a 100 bucket histogram + # this is not great for most graphing tools, this will allow + # the ability to squash this to a smaller number of buckets + # To have a balanced histogram, the number of buckets chosen + # should divide evenly into 100. + # num_histogram_buckets = 100 # default: 10 diff --git a/plugins/inputs/aliyuncms/README.md b/plugins/inputs/aliyuncms/README.md new file mode 100644 index 0000000000000..8b25d70404088 --- /dev/null +++ b/plugins/inputs/aliyuncms/README.md @@ -0,0 +1,158 @@ +# Alibaba (Aliyun) CloudMonitor Service Statistics Input Plugin + +Here and after we use `Aliyun` instead `Alibaba` as it is default naming across +web console and docs. + +This plugin will pull Metric Statistics from Aliyun CMS. + +## Aliyun Authentication + +This plugin uses an [AccessKey][1] credential for Authentication with the Aliyun +OpenAPI endpoint. In the following order the plugin will attempt to +authenticate. + +1. Ram RoleARN credential if `access_key_id`, `access_key_secret`, `role_arn`, `role_session_name` is specified +2. AccessKey STS token credential if `access_key_id`, `access_key_secret`, `access_key_sts_token` is specified +3. AccessKey credential if `access_key_id`, `access_key_secret` is specified +4. Ecs Ram Role Credential if `role_name` is specified +5. RSA keypair credential if `private_key`, `public_key_id` is specified +6. Environment variables credential +7. Instance metadata credential + +[1]: https://www.alibabacloud.com/help/doc-detail/53045.htm?spm=a2c63.p38356.b99.127.5cba21fdt5MJKr&parentId=28572 + +## Configuration + +```toml @sample.conf +# Pull Metric Statistics from Aliyun CMS +[[inputs.aliyuncms]] + ## Aliyun Credentials + ## Credentials are loaded in the following order + ## 1) Ram RoleArn credential + ## 2) AccessKey STS token credential + ## 3) AccessKey credential + ## 4) Ecs Ram Role credential + ## 5) RSA keypair credential + ## 6) Environment variables credential + ## 7) Instance metadata credential + + # access_key_id = "" + # access_key_secret = "" + # access_key_sts_token = "" + # role_arn = "" + # role_session_name = "" + # private_key = "" + # public_key_id = "" + # role_name = "" + + ## Specify the ali cloud region list to be queried for metrics and objects discovery + ## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here + ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm + ## Default supported regions are: + ## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen, + ## cn-heyuan,cn-chengdu,cn-hongkong,ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, + ## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1 + ## + ## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich + ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then + ## it will be reported on the start - for example for 'acs_cdn' project: + ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) + ## Currently, discovery supported for the following projects: + ## - acs_ecs_dashboard + ## - acs_rds_dashboard + ## - acs_slb_dashboard + ## - acs_vpc_eip + regions = ["cn-hongkong"] + + # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all + # metrics are made available to the 1 minute period. Some are collected at + # 3 minute, 5 minute, or larger intervals. + # See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv + # Note that if a period is configured that is smaller than the minimum for a + # particular metric, that metric will not be returned by the Aliyun OpenAPI + # and will not be collected by Telegraf. + # + ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) + period = "5m" + + ## Collection Delay (required - must account for metrics availability via AliyunCMS API) + delay = "1m" + + ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid + ## gaps or overlap in pulled data + interval = "5m" + + ## Metric Statistic Project (required) + project = "acs_slb_dashboard" + + ## Maximum requests per second, default value is 200 + ratelimit = 200 + + ## How often the discovery API call executed (default 1m) + #discovery_interval = "1m" + + ## Metrics to Pull (Required) + [[inputs.aliyuncms.metrics]] + ## Metrics names to be requested, + ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq + names = ["InstanceActiveConnection", "InstanceNewConnection"] + + ## Dimension filters for Metric (these are optional). + ## This allows to get additional metric dimension. If dimension is not specified it can be returned or + ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq + ## + ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) + ## Values specified here would be added into the list of discovered objects. + ## You can specify either single dimension: + #dimensions = '{"instanceId": "p-example"}' + + ## Or you can specify several dimensions at once: + #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' + + ## Enrichment tags, can be added from discovery (if supported) + ## Notation is : + ## To figure out which fields are available, consult the Describe API per project. + ## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO + #tag_query_path = [ + # "address:Address", + # "name:LoadBalancerName", + # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" + # ] + ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. + + ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery + ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage + ## of discovery scope vs monitoring scope + #allow_dps_without_discovery = false +``` + +### Requirements and Terminology + +Plugin Configuration utilizes [preset metric items references][2] + +- `discovery_region` must be a valid Aliyun [Region](https://www.alibabacloud.com/help/doc-detail/40654.htm) value +- `period` must be a valid duration value +- `project` must be a preset project value +- `names` must be preset metric names +- `dimensions` must be preset dimension values + +[2]: https://www.alibabacloud.com/help/doc-detail/28619.htm?spm=a2c63.p38356.a3.2.389f233d0kPJn0 + +## Metrics + +Each Aliyun CMS Project monitored records a measurement with fields for each +available Metric Statistic Project and Metrics are represented in [snake +case](https://en.wikipedia.org/wiki/Snake_case) + +- aliyuncms_{project} + - {metric}_average (metric Average value) + - {metric}_minimum (metric Minimum value) + - {metric}_maximum (metric Maximum value) + - {metric}_value (metric Value value) + +## Example Output + +```shell +$ ./telegraf --config telegraf.conf --input-filter aliyuncms --test +> aliyuncms_acs_slb_dashboard,instanceId=p-example,regionId=cn-hangzhou,userId=1234567890 latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875 +``` diff --git a/plugins/inputs/aliyuncms/aliyuncms.go b/plugins/inputs/aliyuncms/aliyuncms.go new file mode 100644 index 0000000000000..ef5ae4a9d852e --- /dev/null +++ b/plugins/inputs/aliyuncms/aliyuncms.go @@ -0,0 +1,506 @@ +//go:generate ../../../tools/readme_config_includer/generator +package aliyuncms + +import ( + _ "embed" + "encoding/json" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers" + "github.com/aliyun/alibaba-cloud-sdk-go/services/cms" + "github.com/jmespath/go-jmespath" + "github.com/pkg/errors" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/limiter" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +type ( + // AliyunCMS is aliyun cms config info. + AliyunCMS struct { + AccessKeyID string `toml:"access_key_id"` + AccessKeySecret string `toml:"access_key_secret"` + AccessKeyStsToken string `toml:"access_key_sts_token"` + RoleArn string `toml:"role_arn"` + RoleSessionName string `toml:"role_session_name"` + PrivateKey string `toml:"private_key"` + PublicKeyID string `toml:"public_key_id"` + RoleName string `toml:"role_name"` + + Regions []string `toml:"regions"` + DiscoveryInterval config.Duration `toml:"discovery_interval"` + Period config.Duration `toml:"period"` + Delay config.Duration `toml:"delay"` + Project string `toml:"project"` + Metrics []*Metric `toml:"metrics"` + RateLimit int `toml:"ratelimit"` + + Log telegraf.Logger `toml:"-"` + + client aliyuncmsClient + windowStart time.Time + windowEnd time.Time + dt *discoveryTool + dimensionKey string + discoveryData map[string]interface{} + measurement string + } + + // Metric describes what metrics to get + Metric struct { + ObjectsFilter string `toml:"objects_filter"` + MetricNames []string `toml:"names"` + Dimensions string `toml:"dimensions"` //String representation of JSON dimensions + TagsQueryPath []string `toml:"tag_query_path"` + AllowDataPointWODiscoveryData bool `toml:"allow_dps_without_discovery"` //Allow data points without discovery data (if no discovery data found) + + dtLock sync.Mutex //Guard for discoveryTags & dimensions + discoveryTags map[string]map[string]string //Internal data structure that can enrich metrics with tags + dimensionsUdObj map[string]string + dimensionsUdArr []map[string]string //Parsed Dimesnsions JSON string (unmarshalled) + requestDimensions []map[string]string //this is the actual dimensions list that would be used in API request + requestDimensionsStr string //String representation of the above + + } + + // Dimension describe how to get metrics + Dimension struct { + Value string `toml:"value"` + } + + aliyuncmsClient interface { + DescribeMetricList(request *cms.DescribeMetricListRequest) (response *cms.DescribeMetricListResponse, err error) + } +) + +// https://www.alibabacloud.com/help/doc-detail/40654.htm?gclid=Cj0KCQjw4dr0BRCxARIsAKUNjWTAMfyVUn_Y3OevFBV3CMaazrhq0URHsgE7c0m0SeMQRKlhlsJGgIEaAviyEALw_wcB +var aliyunRegionList = []string{ + "cn-qingdao", + "cn-beijing", + "cn-zhangjiakou", + "cn-huhehaote", + "cn-hangzhou", + "cn-shanghai", + "cn-shenzhen", + "cn-heyuan", + "cn-chengdu", + "cn-hongkong", + "ap-southeast-1", + "ap-southeast-2", + "ap-southeast-3", + "ap-southeast-5", + "ap-south-1", + "ap-northeast-1", + "us-west-1", + "us-east-1", + "eu-central-1", + "eu-west-1", + "me-east-1", +} + +func (*AliyunCMS) SampleConfig() string { + return sampleConfig +} + +// Init perform checks of plugin inputs and initialize internals +func (s *AliyunCMS) Init() error { + if s.Project == "" { + return errors.New("project is not set") + } + + var ( + roleSessionExpiration = 600 + sessionExpiration = 600 + ) + configuration := &providers.Configuration{ + AccessKeyID: s.AccessKeyID, + AccessKeySecret: s.AccessKeySecret, + AccessKeyStsToken: s.AccessKeyStsToken, + RoleArn: s.RoleArn, + RoleSessionName: s.RoleSessionName, + RoleSessionExpiration: &roleSessionExpiration, + PrivateKey: s.PrivateKey, + PublicKeyID: s.PublicKeyID, + SessionExpiration: &sessionExpiration, + RoleName: s.RoleName, + } + credentialProviders := []providers.Provider{ + providers.NewConfigurationCredentialProvider(configuration), + providers.NewEnvCredentialProvider(), + providers.NewInstanceMetadataProvider(), + } + credential, err := providers.NewChainProvider(credentialProviders).Retrieve() + if err != nil { + return errors.Errorf("failed to retrieve credential: %v", err) + } + s.client, err = cms.NewClientWithOptions("", sdk.NewConfig(), credential) + if err != nil { + return errors.Errorf("failed to create cms client: %v", err) + } + + //check metrics dimensions consistency + for _, metric := range s.Metrics { + if metric.Dimensions != "" { + metric.dimensionsUdObj = map[string]string{} + metric.dimensionsUdArr = []map[string]string{} + + // first try to unmarshal as an object + err := json.Unmarshal([]byte(metric.Dimensions), &metric.dimensionsUdObj) + if err != nil { + // then try to unmarshal as an array + err := json.Unmarshal([]byte(metric.Dimensions), &metric.dimensionsUdArr) + + if err != nil { + return errors.Errorf("cannot parse dimensions (neither obj, nor array) %q :%v", metric.Dimensions, err) + } + } + } + } + + s.measurement = formatMeasurement(s.Project) + + //Check regions + if len(s.Regions) == 0 { + s.Regions = aliyunRegionList + s.Log.Infof("'regions' is not set. Metrics will be queried across %d regions:\n%s", + len(s.Regions), strings.Join(s.Regions, ",")) + } + + //Init discovery... + if s.dt == nil { //Support for tests + s.dt, err = newDiscoveryTool(s.Regions, s.Project, s.Log, credential, int(float32(s.RateLimit)*0.2), time.Duration(s.DiscoveryInterval)) + if err != nil { + s.Log.Errorf("Discovery tool is not activated: %v", err) + s.dt = nil + return nil + } + } + + s.discoveryData, err = s.dt.getDiscoveryDataAcrossRegions(nil) + if err != nil { + s.Log.Errorf("Discovery tool is not activated: %v", err) + s.dt = nil + return nil + } + + s.Log.Infof("%d object(s) discovered...", len(s.discoveryData)) + + //Special setting for acs_oss project since the API differs + if s.Project == "acs_oss" { + s.dimensionKey = "BucketName" + } + + return nil +} + +// Start plugin discovery loop, metrics are gathered through Gather +func (s *AliyunCMS) Start(telegraf.Accumulator) error { + //Start periodic discovery process + if s.dt != nil { + s.dt.start() + } + + return nil +} + +// Gather implements telegraf.Inputs interface +func (s *AliyunCMS) Gather(acc telegraf.Accumulator) error { + s.updateWindow(time.Now()) + + // limit concurrency or we can easily exhaust user connection limit + lmtr := limiter.NewRateLimiter(s.RateLimit, time.Second) + defer lmtr.Stop() + + var wg sync.WaitGroup + for _, metric := range s.Metrics { + //Prepare internal structure with data from discovery + s.prepareTagsAndDimensions(metric) + wg.Add(len(metric.MetricNames)) + for _, metricName := range metric.MetricNames { + <-lmtr.C + go func(metricName string, metric *Metric) { + defer wg.Done() + acc.AddError(s.gatherMetric(acc, metricName, metric)) + }(metricName, metric) + } + wg.Wait() + } + + return nil +} + +// Stop - stops the plugin discovery loop +func (s *AliyunCMS) Stop() { + if s.dt != nil { + s.dt.stop() + } +} + +func (s *AliyunCMS) updateWindow(relativeTo time.Time) { + //https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.6.701.54025679zh6wiR + //The start and end times are executed in the mode of + //opening left and closing right, and startTime cannot be equal + //to or greater than endTime. + + windowEnd := relativeTo.Add(-time.Duration(s.Delay)) + + if s.windowEnd.IsZero() { + // this is the first run, no window info, so just get a single period + s.windowStart = windowEnd.Add(-time.Duration(s.Period)) + } else { + // subsequent window, start where last window left off + s.windowStart = s.windowEnd + } + + s.windowEnd = windowEnd +} + +// Gather given metric and emit error +func (s *AliyunCMS) gatherMetric(acc telegraf.Accumulator, metricName string, metric *Metric) error { + for _, region := range s.Regions { + req := cms.CreateDescribeMetricListRequest() + req.Period = strconv.FormatInt(int64(time.Duration(s.Period).Seconds()), 10) + req.MetricName = metricName + req.Length = "10000" + req.Namespace = s.Project + req.EndTime = strconv.FormatInt(s.windowEnd.Unix()*1000, 10) + req.StartTime = strconv.FormatInt(s.windowStart.Unix()*1000, 10) + req.Dimensions = metric.requestDimensionsStr + req.RegionId = region + + for more := true; more; { + resp, err := s.client.DescribeMetricList(req) + if err != nil { + return errors.Errorf("failed to query metricName list: %v", err) + } + if resp.Code != "200" { + s.Log.Errorf("failed to query metricName list: %v", resp.Message) + break + } + + var datapoints []map[string]interface{} + if err := json.Unmarshal([]byte(resp.Datapoints), &datapoints); err != nil { + return errors.Errorf("failed to decode response datapoints: %v", err) + } + + if len(datapoints) == 0 { + s.Log.Debugf("No metrics returned from CMS, response msg: %s", resp.Message) + break + } + + NextDataPoint: + for _, datapoint := range datapoints { + fields := map[string]interface{}{} + datapointTime := int64(0) + tags := map[string]string{} + for key, value := range datapoint { + switch key { + case "instanceId", "BucketName": + tags[key] = value.(string) + if metric.discoveryTags != nil { //discovery can be not activated + //Skipping data point if discovery data not exist + _, ok := metric.discoveryTags[value.(string)] + if !ok && + !metric.AllowDataPointWODiscoveryData { + s.Log.Warnf("Instance %q is not found in discovery, skipping monitoring datapoint...", value.(string)) + continue NextDataPoint + } + + for k, v := range metric.discoveryTags[value.(string)] { + tags[k] = v + } + } + case "userId": + tags[key] = value.(string) + case "timestamp": + datapointTime = int64(value.(float64)) / 1000 + default: + fields[formatField(metricName, key)] = value + } + } + //Log.logW("Datapoint time: %s, now: %s", time.Unix(datapointTime, 0).Format(time.RFC3339), time.Now().Format(time.RFC3339)) + acc.AddFields(s.measurement, fields, tags, time.Unix(datapointTime, 0)) + } + + req.NextToken = resp.NextToken + more = req.NextToken != "" + } + } + return nil +} + +//tag helper +func parseTag(tagSpec string, data interface{}) (tagKey string, tagValue string, err error) { + var ( + ok bool + queryPath = tagSpec + ) + tagKey = tagSpec + + //Split query path to tagKey and query path + if splitted := strings.Split(tagSpec, ":"); len(splitted) == 2 { + tagKey = splitted[0] + queryPath = splitted[1] + } + + tagRawValue, err := jmespath.Search(queryPath, data) + if err != nil { + return "", "", errors.Errorf("Can't query data from discovery data using query path %q: %v", + queryPath, err) + } + + if tagRawValue == nil { //Nothing found + return "", "", nil + } + + tagValue, ok = tagRawValue.(string) + if !ok { + return "", "", errors.Errorf("Tag value %v parsed by query %q is not a string value", + tagRawValue, queryPath) + } + + return tagKey, tagValue, nil +} + +func (s *AliyunCMS) prepareTagsAndDimensions(metric *Metric) { + var ( + newData bool + defaulTags = []string{"RegionId:RegionId"} + ) + + if s.dt == nil { //Discovery is not activated + return + } + + //Reading all data from buffered channel +L: + for { + select { + case s.discoveryData = <-s.dt.dataChan: + newData = true + continue + default: + break L + } + } + + //new data arrives (so process it) or this is the first call + if newData || len(metric.discoveryTags) == 0 { + metric.dtLock.Lock() + defer metric.dtLock.Unlock() + + if metric.discoveryTags == nil { + metric.discoveryTags = make(map[string]map[string]string, len(s.discoveryData)) + } + + metric.requestDimensions = nil //erasing + metric.requestDimensions = make([]map[string]string, 0, len(s.discoveryData)) + + //Preparing tags & dims... + for instanceID, elem := range s.discoveryData { + //Start filing tags + //Remove old value if exist + delete(metric.discoveryTags, instanceID) + metric.discoveryTags[instanceID] = make(map[string]string, len(metric.TagsQueryPath)+len(defaulTags)) + + for _, tagQueryPath := range metric.TagsQueryPath { + tagKey, tagValue, err := parseTag(tagQueryPath, elem) + if err != nil { + s.Log.Errorf("%v", err) + continue + } + if err == nil && tagValue == "" { //Nothing found + s.Log.Debugf("Data by query path %q: is not found, for instance %q", tagQueryPath, instanceID) + continue + } + + metric.discoveryTags[instanceID][tagKey] = tagValue + } + + //Adding default tags if not already there + for _, defaultTagQP := range defaulTags { + tagKey, tagValue, err := parseTag(defaultTagQP, elem) + + if err != nil { + s.Log.Errorf("%v", err) + continue + } + + if err == nil && tagValue == "" { //Nothing found + s.Log.Debugf("Data by query path %q: is not found, for instance %q", + defaultTagQP, instanceID) + continue + } + + metric.discoveryTags[instanceID][tagKey] = tagValue + } + + //if no dimension configured in config file, use discovery data + if len(metric.dimensionsUdArr) == 0 && len(metric.dimensionsUdObj) == 0 { + metric.requestDimensions = append( + metric.requestDimensions, + map[string]string{s.dimensionKey: instanceID}) + } + } + + //add dimensions filter from config file + if len(metric.dimensionsUdArr) != 0 { + metric.requestDimensions = append(metric.requestDimensions, metric.dimensionsUdArr...) + } + if len(metric.dimensionsUdObj) != 0 { + metric.requestDimensions = append(metric.requestDimensions, metric.dimensionsUdObj) + } + + //Unmarshalling to string + reqDim, err := json.Marshal(metric.requestDimensions) + if err != nil { + s.Log.Errorf("Can't marshal metric request dimensions %v :%v", + metric.requestDimensions, err) + metric.requestDimensionsStr = "" + } else { + metric.requestDimensionsStr = string(reqDim) + } + } +} + +// Formatting helpers +func formatField(metricName string, statistic string) string { + if metricName == statistic { + statistic = "value" + } + return fmt.Sprintf("%s_%s", snakeCase(metricName), snakeCase(statistic)) +} + +func formatMeasurement(project string) string { + project = strings.ReplaceAll(project, "/", "_") + project = snakeCase(project) + return fmt.Sprintf("aliyuncms_%s", project) +} + +func snakeCase(s string) string { + s = internal.SnakeCase(s) + s = strings.ReplaceAll(s, "__", "_") + return s +} + +func init() { + inputs.Add("aliyuncms", func() telegraf.Input { + return &AliyunCMS{ + RateLimit: 200, + DiscoveryInterval: config.Duration(time.Minute), + dimensionKey: "instanceId", + } + }) +} diff --git a/plugins/inputs/aliyuncms/aliyuncms_test.go b/plugins/inputs/aliyuncms/aliyuncms_test.go new file mode 100644 index 0000000000000..ed1c8d7e645cb --- /dev/null +++ b/plugins/inputs/aliyuncms/aliyuncms_test.go @@ -0,0 +1,517 @@ +package aliyuncms + +import ( + "bytes" + "io" + "net/http" + "testing" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" + "github.com/aliyun/alibaba-cloud-sdk-go/services/cms" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/testutil" +) + +const inputTitle = "inputs.aliyuncms" + +type mockGatherAliyunCMSClient struct{} + +func (m *mockGatherAliyunCMSClient) DescribeMetricList(request *cms.DescribeMetricListRequest) (*cms.DescribeMetricListResponse, error) { + resp := new(cms.DescribeMetricListResponse) + + //switch request.Metric { + switch request.MetricName { + case "InstanceActiveConnection": + resp.Code = "200" + resp.Period = "60" + resp.Datapoints = ` + [{ + "timestamp": 1490152860000, + "Maximum": 200, + "userId": "1234567898765432", + "Minimum": 100, + "instanceId": "i-abcdefgh123456", + "Average": 150, + "Value": 300 + }]` + case "ErrorCode": + resp.Code = "404" + resp.Message = "ErrorCode" + case "ErrorDatapoint": + resp.Code = "200" + resp.Period = "60" + resp.Datapoints = ` + [{ + "timestamp": 1490152860000, + "Maximum": 200, + "userId": "1234567898765432", + "Minimum": 100, + "instanceId": "i-abcdefgh123456", + "Average": 150, + }]` + case "EmptyDatapoint": + resp.Code = "200" + resp.Period = "60" + resp.Datapoints = `[]` + case "ErrorResp": + return nil, errors.New("error response") + } + return resp, nil +} + +type mockAliyunSDKCli struct { + resp *responses.CommonResponse +} + +func (m *mockAliyunSDKCli) ProcessCommonRequest(_ *requests.CommonRequest) (response *responses.CommonResponse, err error) { + return m.resp, nil +} + +func getDiscoveryTool(project string, discoverRegions []string) (*discoveryTool, error) { + var ( + err error + credential auth.Credential + ) + + configuration := &providers.Configuration{ + AccessKeyID: "dummyKey", + AccessKeySecret: "dummySecret", + } + credentialProviders := []providers.Provider{ + providers.NewConfigurationCredentialProvider(configuration), + providers.NewEnvCredentialProvider(), + providers.NewInstanceMetadataProvider(), + } + credential, err = providers.NewChainProvider(credentialProviders).Retrieve() + if err != nil { + return nil, errors.Errorf("failed to retrieve credential: %v", err) + } + + dt, err := newDiscoveryTool(discoverRegions, project, testutil.Logger{Name: inputTitle}, credential, 1, time.Minute*2) + + if err != nil { + return nil, errors.Errorf("Can't create discovery tool object: %v", err) + } + return dt, nil +} + +func getMockSdkCli(httpResp *http.Response) (mockAliyunSDKCli, error) { + resp := responses.NewCommonResponse() + if err := responses.Unmarshal(resp, httpResp, "JSON"); err != nil { + return mockAliyunSDKCli{}, errors.Errorf("Can't parse response: %v", err) + } + return mockAliyunSDKCli{resp: resp}, nil +} + +func TestPluginDefaults(t *testing.T) { + require.Equal(t, &AliyunCMS{RateLimit: 200, + DiscoveryInterval: config.Duration(time.Minute), + dimensionKey: "instanceId", + }, inputs.Inputs["aliyuncms"]()) +} + +func TestPluginInitialize(t *testing.T) { + var err error + + plugin := new(AliyunCMS) + plugin.Log = testutil.Logger{Name: inputTitle} + plugin.Regions = []string{"cn-shanghai"} + plugin.dt, err = getDiscoveryTool("acs_slb_dashboard", plugin.Regions) + if err != nil { + t.Fatalf("Can't create discovery tool object: %v", err) + } + + httpResp := &http.Response{ + StatusCode: 200, + Body: io.NopCloser(bytes.NewBufferString( + `{ + "LoadBalancers": + { + "LoadBalancer": [ + {"LoadBalancerId":"bla"} + ] + }, + "TotalCount": 1, + "PageSize": 1, + "PageNumber": 1 + }`)), + } + mockCli, err := getMockSdkCli(httpResp) + if err != nil { + t.Fatalf("Can't create mock sdk cli: %v", err) + } + plugin.dt.cli = map[string]aliyunSdkClient{plugin.Regions[0]: &mockCli} + + tests := []struct { + name string + project string + accessKeyID string + accessKeySecret string + expectedErrorString string + regions []string + discoveryRegions []string + }{ + { + name: "Empty project", + expectedErrorString: "project is not set", + regions: []string{"cn-shanghai"}, + }, + { + name: "Valid project", + project: "acs_slb_dashboard", + regions: []string{"cn-shanghai"}, + accessKeyID: "dummy", + accessKeySecret: "dummy", + }, + { + name: "'regions' is not set", + project: "acs_slb_dashboard", + accessKeyID: "dummy", + accessKeySecret: "dummy", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + plugin.Project = tt.project + plugin.AccessKeyID = tt.accessKeyID + plugin.AccessKeySecret = tt.accessKeySecret + plugin.Regions = tt.regions + + if tt.expectedErrorString != "" { + require.EqualError(t, plugin.Init(), tt.expectedErrorString) + } else { + require.Equal(t, nil, plugin.Init()) + } + if len(tt.regions) == 0 { //Check if set to default + require.Equal(t, plugin.Regions, aliyunRegionList) + } + }) + } +} + +func TestPluginMetricsInitialize(t *testing.T) { + var err error + + plugin := new(AliyunCMS) + plugin.Log = testutil.Logger{Name: inputTitle} + plugin.Regions = []string{"cn-shanghai"} + plugin.dt, err = getDiscoveryTool("acs_slb_dashboard", plugin.Regions) + if err != nil { + t.Fatalf("Can't create discovery tool object: %v", err) + } + + httpResp := &http.Response{ + StatusCode: 200, + Body: io.NopCloser(bytes.NewBufferString( + `{ + "LoadBalancers": + { + "LoadBalancer": [ + {"LoadBalancerId":"bla"} + ] + }, + "TotalCount": 1, + "PageSize": 1, + "PageNumber": 1 + }`)), + } + mockCli, err := getMockSdkCli(httpResp) + if err != nil { + t.Fatalf("Can't create mock sdk cli: %v", err) + } + plugin.dt.cli = map[string]aliyunSdkClient{plugin.Regions[0]: &mockCli} + + tests := []struct { + name string + project string + accessKeyID string + accessKeySecret string + expectedErrorString string + regions []string + discoveryRegions []string + metrics []*Metric + }{ + { + name: "Valid project", + project: "acs_slb_dashboard", + regions: []string{"cn-shanghai"}, + accessKeyID: "dummy", + accessKeySecret: "dummy", + metrics: []*Metric{ + { + MetricNames: []string{}, + Dimensions: `{"instanceId": "i-abcdefgh123456"}`, + }, + }, + }, + { + name: "Valid project", + project: "acs_slb_dashboard", + regions: []string{"cn-shanghai"}, + accessKeyID: "dummy", + accessKeySecret: "dummy", + metrics: []*Metric{ + { + MetricNames: []string{}, + Dimensions: `[{"instanceId": "p-example"},{"instanceId": "q-example"}]`, + }, + }, + }, + { + name: "Valid project", + project: "acs_slb_dashboard", + regions: []string{"cn-shanghai"}, + accessKeyID: "dummy", + accessKeySecret: "dummy", + expectedErrorString: `cannot parse dimensions (neither obj, nor array) "[" :unexpected end of JSON input`, + metrics: []*Metric{ + { + MetricNames: []string{}, + Dimensions: `[`, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + plugin.Project = tt.project + plugin.AccessKeyID = tt.accessKeyID + plugin.AccessKeySecret = tt.accessKeySecret + plugin.Regions = tt.regions + plugin.Metrics = tt.metrics + + if tt.expectedErrorString != "" { + require.EqualError(t, plugin.Init(), tt.expectedErrorString) + } else { + require.Equal(t, nil, plugin.Init()) + } + }) + } +} + +func TestUpdateWindow(t *testing.T) { + duration, _ := time.ParseDuration("1m") + internalDuration := config.Duration(duration) + + plugin := &AliyunCMS{ + Project: "acs_slb_dashboard", + Period: internalDuration, + Delay: internalDuration, + Log: testutil.Logger{Name: inputTitle}, + } + + now := time.Now() + + require.True(t, plugin.windowEnd.IsZero()) + require.True(t, plugin.windowStart.IsZero()) + + plugin.updateWindow(now) + + newStartTime := plugin.windowEnd + + // initial window just has a single period + require.EqualValues(t, plugin.windowEnd, now.Add(-time.Duration(plugin.Delay))) + require.EqualValues(t, plugin.windowStart, now.Add(-time.Duration(plugin.Delay)).Add(-time.Duration(plugin.Period))) + + now = time.Now() + plugin.updateWindow(now) + + // subsequent window uses previous end time as start time + require.EqualValues(t, plugin.windowEnd, now.Add(-time.Duration(plugin.Delay))) + require.EqualValues(t, plugin.windowStart, newStartTime) +} + +func TestGatherMetric(t *testing.T) { + plugin := &AliyunCMS{ + Project: "acs_slb_dashboard", + client: new(mockGatherAliyunCMSClient), + measurement: formatMeasurement("acs_slb_dashboard"), + Log: testutil.Logger{Name: inputTitle}, + Regions: []string{"cn-shanghai"}, + } + + metric := &Metric{ + MetricNames: []string{}, + Dimensions: `"instanceId": "i-abcdefgh123456"`, + } + + tests := []struct { + name string + metricName string + expectedErrorString string + }{ + { + name: "Datapoint with corrupted JSON", + metricName: "ErrorDatapoint", + expectedErrorString: `failed to decode response datapoints: invalid character '}' looking for beginning of object key string`, + }, + { + name: "General CMS response error", + metricName: "ErrorResp", + expectedErrorString: "failed to query metricName list: error response", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc telegraf.Accumulator + require.EqualError(t, plugin.gatherMetric(acc, tt.metricName, metric), tt.expectedErrorString) + }) + } +} + +func TestGather(t *testing.T) { + metric := &Metric{ + MetricNames: []string{}, + Dimensions: `{"instanceId": "i-abcdefgh123456"}`, + } + plugin := &AliyunCMS{ + AccessKeyID: "my_access_key_id", + AccessKeySecret: "my_access_key_secret", + Project: "acs_slb_dashboard", + Metrics: []*Metric{metric}, + RateLimit: 200, + measurement: formatMeasurement("acs_slb_dashboard"), + Regions: []string{"cn-shanghai"}, + client: new(mockGatherAliyunCMSClient), + Log: testutil.Logger{Name: inputTitle}, + } + + //test table: + tests := []struct { + name string + hasMeasurment bool + metricNames []string + expected []telegraf.Metric + }{ + { + name: "Empty data point", + metricNames: []string{"EmptyDatapoint"}, + expected: []telegraf.Metric{ + testutil.MustMetric( + "aliyuncms_acs_slb_dashboard", + nil, + nil, + time.Time{}), + }, + }, + { + name: "Data point with fields & tags", + hasMeasurment: true, + metricNames: []string{"InstanceActiveConnection"}, + expected: []telegraf.Metric{ + testutil.MustMetric( + "aliyuncms_acs_slb_dashboard", + map[string]string{ + "instanceId": "i-abcdefgh123456", + "userId": "1234567898765432", + }, + map[string]interface{}{ + "instance_active_connection_minimum": float64(100), + "instance_active_connection_maximum": float64(200), + "instance_active_connection_average": float64(150), + "instance_active_connection_value": float64(300), + }, + time.Unix(1490152860000, 0)), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + plugin.Metrics[0].MetricNames = tt.metricNames + require.Empty(t, acc.GatherError(plugin.Gather)) + require.Equal(t, acc.HasMeasurement("aliyuncms_acs_slb_dashboard"), tt.hasMeasurment) + if tt.hasMeasurment { + acc.AssertContainsTaggedFields(t, "aliyuncms_acs_slb_dashboard", tt.expected[0].Fields(), tt.expected[0].Tags()) + } + }) + } +} + +func TestGetDiscoveryDataAcrossRegions(t *testing.T) { + //test table: + tests := []struct { + name string + project string + region string + httpResp *http.Response + discData map[string]interface{} + totalCount int + pageSize int + pageNumber int + expectedErrorString string + }{ + { + name: "No root key in discovery response", + project: "acs_slb_dashboard", + region: "cn-hongkong", + httpResp: &http.Response{ + StatusCode: 200, + Body: io.NopCloser(bytes.NewBufferString(`{}`)), + }, + totalCount: 0, + pageSize: 0, + pageNumber: 0, + expectedErrorString: `Didn't find root key "LoadBalancers" in discovery response`, + }, + { + name: "1 object discovered", + project: "acs_slb_dashboard", + region: "cn-hongkong", + httpResp: &http.Response{ + StatusCode: 200, + Body: io.NopCloser(bytes.NewBufferString( + `{ + "LoadBalancers": + { + "LoadBalancer": [ + {"LoadBalancerId":"bla"} + ] + }, + "TotalCount": 1, + "PageSize": 1, + "PageNumber": 1 + }`)), + }, + discData: map[string]interface{}{"bla": map[string]interface{}{"LoadBalancerId": "bla"}}, + totalCount: 1, + pageSize: 1, + pageNumber: 1, + expectedErrorString: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dt, err := getDiscoveryTool(tt.project, []string{tt.region}) + if err != nil { + t.Fatalf("Can't create discovery tool object: %v", err) + } + + mockCli, err := getMockSdkCli(tt.httpResp) + if err != nil { + t.Fatalf("Can't create mock sdk cli: %v", err) + } + dt.cli = map[string]aliyunSdkClient{tt.region: &mockCli} + data, err := dt.getDiscoveryDataAcrossRegions(nil) + + require.Equal(t, tt.discData, data) + if err != nil { + require.EqualError(t, err, tt.expectedErrorString) + } + }) + } +} diff --git a/plugins/inputs/aliyuncms/discovery.go b/plugins/inputs/aliyuncms/discovery.go new file mode 100644 index 0000000000000..c287d07a388c2 --- /dev/null +++ b/plugins/inputs/aliyuncms/discovery.go @@ -0,0 +1,464 @@ +package aliyuncms + +import ( + "encoding/json" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" + "github.com/aliyun/alibaba-cloud-sdk-go/services/ecs" + "github.com/aliyun/alibaba-cloud-sdk-go/services/rds" + "github.com/aliyun/alibaba-cloud-sdk-go/services/slb" + "github.com/aliyun/alibaba-cloud-sdk-go/services/vpc" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/limiter" + "github.com/pkg/errors" +) + +type discoveryRequest interface { +} + +type aliyunSdkClient interface { + ProcessCommonRequest(req *requests.CommonRequest) (response *responses.CommonResponse, err error) +} + +// discoveryTool is a object that provides discovery feature +type discoveryTool struct { + req map[string]discoveryRequest //Discovery request (specific per object type) + rateLimit int //Rate limit for API query, as it is limited by API backend + reqDefaultPageSize int //Default page size while querying data from API (how many objects per request) + cli map[string]aliyunSdkClient //API client, which perform discovery request + + respRootKey string //Root key in JSON response where to look for discovery data + respObjectIDKey string //Key in element of array under root key, that stores object ID + //for ,majority of cases it would be InstanceId, for OSS it is BucketName. This key is also used in dimension filtering// ) + wg sync.WaitGroup //WG for primary discovery goroutine + interval time.Duration //Discovery interval + done chan bool //Done channel to stop primary discovery goroutine + dataChan chan map[string]interface{} //Discovery data + lg telegraf.Logger //Telegraf logger (should be provided) +} + +type parsedDResp struct { + data []interface{} + totalCount int + pageSize int + pageNumber int +} + +//getRPCReqFromDiscoveryRequest - utility function to map between aliyun request primitives +//discoveryRequest represents different type of discovery requests +func getRPCReqFromDiscoveryRequest(req discoveryRequest) (*requests.RpcRequest, error) { + if reflect.ValueOf(req).Type().Kind() != reflect.Ptr || + reflect.ValueOf(req).IsNil() { + return nil, errors.Errorf("Not expected type of the discovery request object: %q, %q", reflect.ValueOf(req).Type(), reflect.ValueOf(req).Kind()) + } + + ptrV := reflect.Indirect(reflect.ValueOf(req)) + + for i := 0; i < ptrV.NumField(); i++ { + if ptrV.Field(i).Type().String() == "*requests.RpcRequest" { + if !ptrV.Field(i).CanInterface() { + return nil, errors.Errorf("Can't get interface of %v", ptrV.Field(i)) + } + + rpcReq, ok := ptrV.Field(i).Interface().(*requests.RpcRequest) + + if !ok { + return nil, errors.Errorf("Cant convert interface of %v to '*requests.RpcRequest' type", ptrV.Field(i).Interface()) + } + + return rpcReq, nil + } + } + return nil, errors.Errorf("Didn't find *requests.RpcRequest embedded struct in %q", ptrV.Type()) +} + +//newDiscoveryTool function returns discovery tool object. +//The object is used to periodically get data about aliyun objects and send this +//data into channel. The intention is to enrich reported metrics with discovery data. +//Discovery is supported for a limited set of object types (defined by project) and can be extended in future. +//Discovery can be limited by region if not set, then all regions is queried. +//Request against API can inquire additional costs, consult with aliyun API documentation. +func newDiscoveryTool(regions []string, project string, lg telegraf.Logger, credential auth.Credential, rateLimit int, discoveryInterval time.Duration) (*discoveryTool, error) { + var ( + dscReq = map[string]discoveryRequest{} + cli = map[string]aliyunSdkClient{} + responseRootKey string + responseObjectIDKey string + err error + noDiscoverySupportErr = errors.Errorf("no discovery support for project %q", project) + ) + + if len(regions) == 0 { + regions = aliyunRegionList + lg.Infof("'regions' is not provided! Discovery data will be queried across %d regions:\n%s", + len(aliyunRegionList), strings.Join(aliyunRegionList, ",")) + } + + if rateLimit == 0 { //Can be a rounding case + rateLimit = 1 + } + + for _, region := range regions { + switch project { + case "acs_ecs_dashboard": + dscReq[region] = ecs.CreateDescribeInstancesRequest() + responseRootKey = "Instances" + responseObjectIDKey = "InstanceId" + case "acs_rds_dashboard": + dscReq[region] = rds.CreateDescribeDBInstancesRequest() + responseRootKey = "Items" + responseObjectIDKey = "DBInstanceId" + case "acs_slb_dashboard": + dscReq[region] = slb.CreateDescribeLoadBalancersRequest() + responseRootKey = "LoadBalancers" + responseObjectIDKey = "LoadBalancerId" + case "acs_memcache": + return nil, noDiscoverySupportErr + case "acs_ocs": + return nil, noDiscoverySupportErr + case "acs_oss": + //oss is really complicated + //it is on it's own format + return nil, noDiscoverySupportErr + + //As a possible solution we can + //mimic to request format supported by oss + + //req := DescribeLOSSRequest{ + // RpcRequest: &requests.RpcRequest{}, + //} + //req.InitWithApiInfo("oss", "2014-08-15", "DescribeDBInstances", "oss", "openAPI") + case "acs_vpc_eip": + dscReq[region] = vpc.CreateDescribeEipAddressesRequest() + responseRootKey = "EipAddresses" + responseObjectIDKey = "AllocationId" + case "acs_kvstore": + return nil, noDiscoverySupportErr + case "acs_mns_new": + return nil, noDiscoverySupportErr + case "acs_cdn": + //API replies are in its own format. + return nil, noDiscoverySupportErr + case "acs_polardb": + return nil, noDiscoverySupportErr + case "acs_gdb": + return nil, noDiscoverySupportErr + case "acs_ads": + return nil, noDiscoverySupportErr + case "acs_mongodb": + return nil, noDiscoverySupportErr + case "acs_express_connect": + return nil, noDiscoverySupportErr + case "acs_fc": + return nil, noDiscoverySupportErr + case "acs_nat_gateway": + return nil, noDiscoverySupportErr + case "acs_sls_dashboard": + return nil, noDiscoverySupportErr + case "acs_containerservice_dashboard": + return nil, noDiscoverySupportErr + case "acs_vpn": + return nil, noDiscoverySupportErr + case "acs_bandwidth_package": + return nil, noDiscoverySupportErr + case "acs_cen": + return nil, noDiscoverySupportErr + case "acs_ens": + return nil, noDiscoverySupportErr + case "acs_opensearch": + return nil, noDiscoverySupportErr + case "acs_scdn": + return nil, noDiscoverySupportErr + case "acs_drds": + return nil, noDiscoverySupportErr + case "acs_iot": + return nil, noDiscoverySupportErr + case "acs_directmail": + return nil, noDiscoverySupportErr + case "acs_elasticsearch": + return nil, noDiscoverySupportErr + case "acs_ess_dashboard": + return nil, noDiscoverySupportErr + case "acs_streamcompute": + return nil, noDiscoverySupportErr + case "acs_global_acceleration": + return nil, noDiscoverySupportErr + case "acs_hitsdb": + return nil, noDiscoverySupportErr + case "acs_kafka": + return nil, noDiscoverySupportErr + case "acs_openad": + return nil, noDiscoverySupportErr + case "acs_pcdn": + return nil, noDiscoverySupportErr + case "acs_dcdn": + return nil, noDiscoverySupportErr + case "acs_petadata": + return nil, noDiscoverySupportErr + case "acs_videolive": + return nil, noDiscoverySupportErr + case "acs_hybriddb": + return nil, noDiscoverySupportErr + case "acs_adb": + return nil, noDiscoverySupportErr + case "acs_mps": + return nil, noDiscoverySupportErr + case "acs_maxcompute_prepay": + return nil, noDiscoverySupportErr + case "acs_hdfs": + return nil, noDiscoverySupportErr + case "acs_ddh": + return nil, noDiscoverySupportErr + case "acs_hbr": + return nil, noDiscoverySupportErr + case "acs_hdr": + return nil, noDiscoverySupportErr + case "acs_cds": + return nil, noDiscoverySupportErr + default: + return nil, errors.Errorf("project %q is not recognized by discovery...", project) + } + + cli[region], err = sdk.NewClientWithOptions(region, sdk.NewConfig(), credential) + if err != nil { + return nil, err + } + } + + if len(dscReq) == 0 || len(cli) == 0 { + return nil, errors.Errorf("Can't build discovery request for project: %q,\nregions: %v", project, regions) + } + + return &discoveryTool{ + req: dscReq, + cli: cli, + respRootKey: responseRootKey, + respObjectIDKey: responseObjectIDKey, + rateLimit: rateLimit, + interval: discoveryInterval, + reqDefaultPageSize: 20, + dataChan: make(chan map[string]interface{}, 1), + lg: lg, + }, nil +} + +func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse) (*parsedDResp, error) { + var ( + fullOutput = map[string]interface{}{} + data []byte + foundDataItem bool + foundRootKey bool + pdResp = &parsedDResp{} + ) + + data = resp.GetHttpContentBytes() + if data == nil { //No data + return nil, errors.Errorf("No data in response to be parsed") + } + + if err := json.Unmarshal(data, &fullOutput); err != nil { + return nil, errors.Errorf("Can't parse JSON from discovery response: %v", err) + } + + for key, val := range fullOutput { + switch key { + case dt.respRootKey: + foundRootKey = true + rootKeyVal, ok := val.(map[string]interface{}) + if !ok { + return nil, errors.Errorf("Content of root key %q, is not an object: %v", key, val) + } + + //It should contain the array with discovered data + for _, item := range rootKeyVal { + if pdResp.data, foundDataItem = item.([]interface{}); foundDataItem { + break + } + } + if !foundDataItem { + return nil, errors.Errorf("Didn't find array item in root key %q", key) + } + case "TotalCount", "TotalRecordCount": + pdResp.totalCount = int(val.(float64)) + case "PageSize", "PageRecordCount": + pdResp.pageSize = int(val.(float64)) + case "PageNumber": + pdResp.pageNumber = int(val.(float64)) + } + } + if !foundRootKey { + return nil, errors.Errorf("Didn't find root key %q in discovery response", dt.respRootKey) + } + + return pdResp, nil +} + +func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.CommonRequest, lmtr chan bool) (map[string]interface{}, error) { + var ( + err error + resp *responses.CommonResponse + pDResp *parsedDResp + discoveryData []interface{} + totalCount int + pageNumber int + ) + defer delete(req.QueryParams, "PageNumber") + + for { + if lmtr != nil { + <-lmtr //Rate limiting + } + + resp, err = cli.ProcessCommonRequest(req) + if err != nil { + return nil, err + } + + pDResp, err = dt.parseDiscoveryResponse(resp) + if err != nil { + return nil, err + } + discoveryData = append(discoveryData, pDResp.data...) + pageNumber = pDResp.pageNumber + totalCount = pDResp.totalCount + + //Pagination + pageNumber++ + req.QueryParams["PageNumber"] = strconv.Itoa(pageNumber) + + if len(discoveryData) == totalCount { //All data received + //Map data to appropriate shape before return + preparedData := map[string]interface{}{} + + for _, raw := range discoveryData { + elem, ok := raw.(map[string]interface{}) + if !ok { + return nil, errors.Errorf("can't parse input data element, not a map[string]interface{} type") + } + if objectID, ok := elem[dt.respObjectIDKey].(string); ok { + preparedData[objectID] = elem + } + } + return preparedData, nil + } + } +} + +func (dt *discoveryTool) getDiscoveryDataAcrossRegions(lmtr chan bool) (map[string]interface{}, error) { + var ( + data map[string]interface{} + resultData = map[string]interface{}{} + ) + + for region, cli := range dt.cli { + //Building common request, as the code below is the same no matter + //which aliyun object type (project) is used + dscReq, ok := dt.req[region] + if !ok { + return nil, errors.Errorf("Error building common discovery request: not valid region %q", region) + } + + rpcReq, err := getRPCReqFromDiscoveryRequest(dscReq) + if err != nil { + return nil, err + } + + commonRequest := requests.NewCommonRequest() + commonRequest.Method = rpcReq.GetMethod() + commonRequest.Product = rpcReq.GetProduct() + commonRequest.Domain = rpcReq.GetDomain() + commonRequest.Version = rpcReq.GetVersion() + commonRequest.Scheme = rpcReq.GetScheme() + commonRequest.ApiName = rpcReq.GetActionName() + commonRequest.QueryParams = rpcReq.QueryParams + commonRequest.QueryParams["PageSize"] = strconv.Itoa(dt.reqDefaultPageSize) + commonRequest.TransToAcsRequest() + + //Get discovery data using common request + data, err = dt.getDiscoveryData(cli, commonRequest, lmtr) + if err != nil { + return nil, err + } + + for k, v := range data { + resultData[k] = v + } + } + return resultData, nil +} + +// start the discovery pooling +// In case smth. new found it will be reported back through `DataChan` +func (dt *discoveryTool) start() { + var ( + err error + data map[string]interface{} + lastData map[string]interface{} + ) + + //Initializing channel + dt.done = make(chan bool) + + dt.wg.Add(1) + go func() { + defer dt.wg.Done() + + ticker := time.NewTicker(dt.interval) + defer ticker.Stop() + + lmtr := limiter.NewRateLimiter(dt.rateLimit, time.Second) + defer lmtr.Stop() + + for { + select { + case <-dt.done: + return + case <-ticker.C: + data, err = dt.getDiscoveryDataAcrossRegions(lmtr.C) + if err != nil { + dt.lg.Errorf("Can't get discovery data: %v", err) + continue + } + + if !reflect.DeepEqual(data, lastData) { + lastData = nil + lastData = map[string]interface{}{} + for k, v := range data { + lastData[k] = v + } + + //send discovery data in blocking mode + dt.dataChan <- data + } + } + } + }() +} + +// stop the discovery loop, making sure +// all data is read from 'dataChan' +func (dt *discoveryTool) stop() { + close(dt.done) + + //Shutdown timer + timer := time.NewTimer(time.Second * 3) + defer timer.Stop() +L: + for { //Unblock go routine by reading from dt.dataChan + select { + case <-timer.C: + break L + case <-dt.dataChan: + } + } + + dt.wg.Wait() +} diff --git a/plugins/inputs/aliyuncms/sample.conf b/plugins/inputs/aliyuncms/sample.conf new file mode 100644 index 0000000000000..640674ca6dc32 --- /dev/null +++ b/plugins/inputs/aliyuncms/sample.conf @@ -0,0 +1,100 @@ +# Pull Metric Statistics from Aliyun CMS +[[inputs.aliyuncms]] + ## Aliyun Credentials + ## Credentials are loaded in the following order + ## 1) Ram RoleArn credential + ## 2) AccessKey STS token credential + ## 3) AccessKey credential + ## 4) Ecs Ram Role credential + ## 5) RSA keypair credential + ## 6) Environment variables credential + ## 7) Instance metadata credential + + # access_key_id = "" + # access_key_secret = "" + # access_key_sts_token = "" + # role_arn = "" + # role_session_name = "" + # private_key = "" + # public_key_id = "" + # role_name = "" + + ## Specify the ali cloud region list to be queried for metrics and objects discovery + ## If not set, all supported regions (see below) would be covered, it can provide a significant load on API, so the recommendation here + ## is to limit the list as much as possible. Allowed values: https://www.alibabacloud.com/help/zh/doc-detail/40654.htm + ## Default supported regions are: + ## 21 items: cn-qingdao,cn-beijing,cn-zhangjiakou,cn-huhehaote,cn-hangzhou,cn-shanghai,cn-shenzhen, + ## cn-heyuan,cn-chengdu,cn-hongkong,ap-southeast-1,ap-southeast-2,ap-southeast-3,ap-southeast-5, + ## ap-south-1,ap-northeast-1,us-west-1,us-east-1,eu-central-1,eu-west-1,me-east-1 + ## + ## From discovery perspective it set the scope for object discovery, the discovered info can be used to enrich + ## the metrics with objects attributes/tags. Discovery is supported not for all projects (if not supported, then + ## it will be reported on the start - for example for 'acs_cdn' project: + ## 'E! [inputs.aliyuncms] Discovery tool is not activated: no discovery support for project "acs_cdn"' ) + ## Currently, discovery supported for the following projects: + ## - acs_ecs_dashboard + ## - acs_rds_dashboard + ## - acs_slb_dashboard + ## - acs_vpc_eip + regions = ["cn-hongkong"] + + # The minimum period for AliyunCMS metrics is 1 minute (60s). However not all + # metrics are made available to the 1 minute period. Some are collected at + # 3 minute, 5 minute, or larger intervals. + # See: https://help.aliyun.com/document_detail/51936.html?spm=a2c4g.11186623.2.18.2bc1750eeOw1Pv + # Note that if a period is configured that is smaller than the minimum for a + # particular metric, that metric will not be returned by the Aliyun OpenAPI + # and will not be collected by Telegraf. + # + ## Requested AliyunCMS aggregation Period (required - must be a multiple of 60s) + period = "5m" + + ## Collection Delay (required - must account for metrics availability via AliyunCMS API) + delay = "1m" + + ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid + ## gaps or overlap in pulled data + interval = "5m" + + ## Metric Statistic Project (required) + project = "acs_slb_dashboard" + + ## Maximum requests per second, default value is 200 + ratelimit = 200 + + ## How often the discovery API call executed (default 1m) + #discovery_interval = "1m" + + ## Metrics to Pull (Required) + [[inputs.aliyuncms.metrics]] + ## Metrics names to be requested, + ## described here (per project): https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq + names = ["InstanceActiveConnection", "InstanceNewConnection"] + + ## Dimension filters for Metric (these are optional). + ## This allows to get additional metric dimension. If dimension is not specified it can be returned or + ## the data can be aggregated - it depends on particular metric, you can find details here: https://help.aliyun.com/document_detail/28619.html?spm=a2c4g.11186623.6.690.1938ad41wg8QSq + ## + ## Note, that by default dimension filter includes the list of discovered objects in scope (if discovery is enabled) + ## Values specified here would be added into the list of discovered objects. + ## You can specify either single dimension: + #dimensions = '{"instanceId": "p-example"}' + + ## Or you can specify several dimensions at once: + #dimensions = '[{"instanceId": "p-example"},{"instanceId": "q-example"}]' + + ## Enrichment tags, can be added from discovery (if supported) + ## Notation is : + ## To figure out which fields are available, consult the Describe API per project. + ## For example, for SLB: https://api.aliyun.com/#/?product=Slb&version=2014-05-15&api=DescribeLoadBalancers¶ms={}&tab=MOCK&lang=GO + #tag_query_path = [ + # "address:Address", + # "name:LoadBalancerName", + # "cluster_owner:Tags.Tag[?TagKey=='cs.cluster.name'].TagValue | [0]" + # ] + ## The following tags added by default: regionId (if discovery enabled), userId, instanceId. + + ## Allow metrics without discovery data, if discovery is enabled. If set to true, then metric without discovery + ## data would be emitted, otherwise dropped. This cane be of help, in case debugging dimension filters, or partial coverage + ## of discovery scope vs monitoring scope + #allow_dps_without_discovery = false diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 862db4c681d47..698cd847ea44c 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -1,8 +1,11 @@ package all import ( + //Blank imports for plugins to register themselves _ "github.com/influxdata/telegraf/plugins/inputs/activemq" _ "github.com/influxdata/telegraf/plugins/inputs/aerospike" + _ "github.com/influxdata/telegraf/plugins/inputs/aliyuncms" + _ "github.com/influxdata/telegraf/plugins/inputs/amd_rocm_smi" _ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/apache" _ "github.com/influxdata/telegraf/plugins/inputs/apcupsd" @@ -10,6 +13,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/azure_storage_queue" _ "github.com/influxdata/telegraf/plugins/inputs/bcache" _ "github.com/influxdata/telegraf/plugins/inputs/beanstalkd" + _ "github.com/influxdata/telegraf/plugins/inputs/beat" _ "github.com/influxdata/telegraf/plugins/inputs/bind" _ "github.com/influxdata/telegraf/plugins/inputs/bond" _ "github.com/influxdata/telegraf/plugins/inputs/burrow" @@ -22,12 +26,16 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub" _ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub_push" _ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch" + _ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch_metric_streams" _ "github.com/influxdata/telegraf/plugins/inputs/conntrack" _ "github.com/influxdata/telegraf/plugins/inputs/consul" + _ "github.com/influxdata/telegraf/plugins/inputs/consul_agent" _ "github.com/influxdata/telegraf/plugins/inputs/couchbase" _ "github.com/influxdata/telegraf/plugins/inputs/couchdb" _ "github.com/influxdata/telegraf/plugins/inputs/cpu" + _ "github.com/influxdata/telegraf/plugins/inputs/csgo" _ "github.com/influxdata/telegraf/plugins/inputs/dcos" + _ "github.com/influxdata/telegraf/plugins/inputs/directory_monitor" _ "github.com/influxdata/telegraf/plugins/inputs/disk" _ "github.com/influxdata/telegraf/plugins/inputs/diskio" _ "github.com/influxdata/telegraf/plugins/inputs/disque" @@ -36,8 +44,10 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/docker" _ "github.com/influxdata/telegraf/plugins/inputs/docker_log" _ "github.com/influxdata/telegraf/plugins/inputs/dovecot" + _ "github.com/influxdata/telegraf/plugins/inputs/dpdk" _ "github.com/influxdata/telegraf/plugins/inputs/ecs" _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch" + _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch_query" _ "github.com/influxdata/telegraf/plugins/inputs/ethtool" _ "github.com/influxdata/telegraf/plugins/inputs/eventhub_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/exec" @@ -58,13 +68,17 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/http_listener_v2" _ "github.com/influxdata/telegraf/plugins/inputs/http_response" _ "github.com/influxdata/telegraf/plugins/inputs/httpjson" + _ "github.com/influxdata/telegraf/plugins/inputs/hugepages" _ "github.com/influxdata/telegraf/plugins/inputs/icinga2" _ "github.com/influxdata/telegraf/plugins/inputs/infiniband" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb_listener" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb_v2_listener" + _ "github.com/influxdata/telegraf/plugins/inputs/intel_pmu" + _ "github.com/influxdata/telegraf/plugins/inputs/intel_powerstat" _ "github.com/influxdata/telegraf/plugins/inputs/intel_rdt" _ "github.com/influxdata/telegraf/plugins/inputs/internal" + _ "github.com/influxdata/telegraf/plugins/inputs/internet_speed" _ "github.com/influxdata/telegraf/plugins/inputs/interrupts" _ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor" _ "github.com/influxdata/telegraf/plugins/inputs/ipset" @@ -81,6 +95,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/kernel_vmstat" _ "github.com/influxdata/telegraf/plugins/inputs/kibana" _ "github.com/influxdata/telegraf/plugins/inputs/kinesis_consumer" + _ "github.com/influxdata/telegraf/plugins/inputs/knx_listener" _ "github.com/influxdata/telegraf/plugins/inputs/kube_inventory" _ "github.com/influxdata/telegraf/plugins/inputs/kubernetes" _ "github.com/influxdata/telegraf/plugins/inputs/lanz" @@ -89,13 +104,16 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/logparser" _ "github.com/influxdata/telegraf/plugins/inputs/logstash" _ "github.com/influxdata/telegraf/plugins/inputs/lustre2" + _ "github.com/influxdata/telegraf/plugins/inputs/lvm" _ "github.com/influxdata/telegraf/plugins/inputs/mailchimp" _ "github.com/influxdata/telegraf/plugins/inputs/marklogic" _ "github.com/influxdata/telegraf/plugins/inputs/mcrouter" + _ "github.com/influxdata/telegraf/plugins/inputs/mdstat" _ "github.com/influxdata/telegraf/plugins/inputs/mem" _ "github.com/influxdata/telegraf/plugins/inputs/memcached" _ "github.com/influxdata/telegraf/plugins/inputs/mesos" _ "github.com/influxdata/telegraf/plugins/inputs/minecraft" + _ "github.com/influxdata/telegraf/plugins/inputs/mock" _ "github.com/influxdata/telegraf/plugins/inputs/modbus" _ "github.com/influxdata/telegraf/plugins/inputs/mongodb" _ "github.com/influxdata/telegraf/plugins/inputs/monit" @@ -107,12 +125,15 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/neptune_apex" _ "github.com/influxdata/telegraf/plugins/inputs/net" _ "github.com/influxdata/telegraf/plugins/inputs/net_response" + _ "github.com/influxdata/telegraf/plugins/inputs/netstat" + _ "github.com/influxdata/telegraf/plugins/inputs/nfsclient" _ "github.com/influxdata/telegraf/plugins/inputs/nginx" _ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus" _ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus_api" _ "github.com/influxdata/telegraf/plugins/inputs/nginx_sts" _ "github.com/influxdata/telegraf/plugins/inputs/nginx_upstream_check" _ "github.com/influxdata/telegraf/plugins/inputs/nginx_vts" + _ "github.com/influxdata/telegraf/plugins/inputs/nomad" _ "github.com/influxdata/telegraf/plugins/inputs/nsd" _ "github.com/influxdata/telegraf/plugins/inputs/nsq" _ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer" @@ -123,6 +144,8 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/openldap" _ "github.com/influxdata/telegraf/plugins/inputs/openntpd" _ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd" + _ "github.com/influxdata/telegraf/plugins/inputs/openstack" + _ "github.com/influxdata/telegraf/plugins/inputs/opentelemetry" _ "github.com/influxdata/telegraf/plugins/inputs/openweathermap" _ "github.com/influxdata/telegraf/plugins/inputs/passenger" _ "github.com/influxdata/telegraf/plugins/inputs/pf" @@ -142,20 +165,26 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/rabbitmq" _ "github.com/influxdata/telegraf/plugins/inputs/raindrops" _ "github.com/influxdata/telegraf/plugins/inputs/ras" + _ "github.com/influxdata/telegraf/plugins/inputs/ravendb" _ "github.com/influxdata/telegraf/plugins/inputs/redfish" _ "github.com/influxdata/telegraf/plugins/inputs/redis" + _ "github.com/influxdata/telegraf/plugins/inputs/redis_sentinel" _ "github.com/influxdata/telegraf/plugins/inputs/rethinkdb" _ "github.com/influxdata/telegraf/plugins/inputs/riak" + _ "github.com/influxdata/telegraf/plugins/inputs/riemann_listener" _ "github.com/influxdata/telegraf/plugins/inputs/salesforce" _ "github.com/influxdata/telegraf/plugins/inputs/sensors" _ "github.com/influxdata/telegraf/plugins/inputs/sflow" _ "github.com/influxdata/telegraf/plugins/inputs/sflow_a10" + _ "github.com/influxdata/telegraf/plugins/inputs/slab" _ "github.com/influxdata/telegraf/plugins/inputs/smart" _ "github.com/influxdata/telegraf/plugins/inputs/snmp" _ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy" _ "github.com/influxdata/telegraf/plugins/inputs/snmp_trap" _ "github.com/influxdata/telegraf/plugins/inputs/socket_listener" + _ "github.com/influxdata/telegraf/plugins/inputs/socketstat" _ "github.com/influxdata/telegraf/plugins/inputs/solr" + _ "github.com/influxdata/telegraf/plugins/inputs/sql" _ "github.com/influxdata/telegraf/plugins/inputs/sqlserver" _ "github.com/influxdata/telegraf/plugins/inputs/stackdriver" _ "github.com/influxdata/telegraf/plugins/inputs/statsd" @@ -178,6 +207,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/unbound" _ "github.com/influxdata/telegraf/plugins/inputs/uwsgi" _ "github.com/influxdata/telegraf/plugins/inputs/varnish" + _ "github.com/influxdata/telegraf/plugins/inputs/vault" _ "github.com/influxdata/telegraf/plugins/inputs/vsphere" _ "github.com/influxdata/telegraf/plugins/inputs/webhooks" _ "github.com/influxdata/telegraf/plugins/inputs/win_eventlog" @@ -186,6 +216,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/wireguard" _ "github.com/influxdata/telegraf/plugins/inputs/wireless" _ "github.com/influxdata/telegraf/plugins/inputs/x509_cert" + _ "github.com/influxdata/telegraf/plugins/inputs/xtremio" _ "github.com/influxdata/telegraf/plugins/inputs/zfs" _ "github.com/influxdata/telegraf/plugins/inputs/zipkin" _ "github.com/influxdata/telegraf/plugins/inputs/zookeeper" diff --git a/plugins/inputs/amd_rocm_smi/README.md b/plugins/inputs/amd_rocm_smi/README.md new file mode 100644 index 0000000000000..db800713d9a51 --- /dev/null +++ b/plugins/inputs/amd_rocm_smi/README.md @@ -0,0 +1,74 @@ +# AMD ROCm System Management Interface (SMI) Input Plugin + +This plugin uses a query on the [`rocm-smi`][1] binary to pull GPU stats +including memory and GPU usage, temperatures and other. + +[1]: https://github.com/RadeonOpenCompute/rocm_smi_lib/tree/master/python_smi_tools + +## Configuration + +```toml @sample.conf +# Query statistics from AMD Graphics cards using rocm-smi binary +[[inputs.amd_rocm_smi]] + ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath + # bin_path = "/opt/rocm/bin/rocm-smi" + + ## Optional: timeout for GPU polling + # timeout = "5s" +``` + +## Metrics + +- measurement: `amd_rocm_smi` + - tags + - `name` (entry name assigned by rocm-smi executable) + - `gpu_id` (id of the GPU according to rocm-smi) + - `gpu_unique_id` (unique id of the GPU) + + - fields + - `driver_version` (integer) + - `fan_speed`(integer) + - `memory_total`(integer B) + - `memory_used`(integer B) + - `memory_free`(integer B) + - `temperature_sensor_edge` (float, Celsius) + - `temperature_sensor_junction` (float, Celsius) + - `temperature_sensor_memory` (float, Celsius) + - `utilization_gpu` (integer, percentage) + - `utilization_memory` (integer, percentage) + - `clocks_current_sm` (integer, Mhz) + - `clocks_current_memory` (integer, Mhz) + - `power_draw` (float, Watt) + +## Troubleshooting + +Check the full output by running `rocm-smi` binary manually. + +Linux: + +```sh +rocm-smi rocm-smi -o -l -m -M -g -c -t -u -i -f -p -P -s -S -v --showreplaycount --showpids --showdriverversion --showmemvendor --showfwinfo --showproductname --showserial --showuniqueid --showbus --showpendingpages --showpagesinfo --showretiredpages --showunreservablepages --showmemuse --showvoltage --showtopo --showtopoweight --showtopohops --showtopotype --showtoponuma --showmeminfo all --json +``` + +Please include the output of this command if opening a GitHub issue, together +with ROCm version. + +## Example Output + +```shell +amd_rocm_smi,gpu_id=0x6861,gpu_unique_id=0x2150e7d042a1124,host=ali47xl,name=card0 clocks_current_memory=167i,clocks_current_sm=852i,driver_version=51114i,fan_speed=14i,memory_free=17145282560i,memory_total=17163091968i,memory_used=17809408i,power_draw=7,temperature_sensor_edge=28,temperature_sensor_junction=29,temperature_sensor_memory=92,utilization_gpu=0i 1630572551000000000 +amd_rocm_smi,gpu_id=0x6861,gpu_unique_id=0x2150e7d042a1124,host=ali47xl,name=card0 clocks_current_memory=167i,clocks_current_sm=852i,driver_version=51114i,fan_speed=14i,memory_free=17145282560i,memory_total=17163091968i,memory_used=17809408i,power_draw=7,temperature_sensor_edge=29,temperature_sensor_junction=30,temperature_sensor_memory=91,utilization_gpu=0i 1630572701000000000 +amd_rocm_smi,gpu_id=0x6861,gpu_unique_id=0x2150e7d042a1124,host=ali47xl,name=card0 clocks_current_memory=167i,clocks_current_sm=852i,driver_version=51114i,fan_speed=14i,memory_free=17145282560i,memory_total=17163091968i,memory_used=17809408i,power_draw=7,temperature_sensor_edge=29,temperature_sensor_junction=29,temperature_sensor_memory=92,utilization_gpu=0i 1630572749000000000 +``` + +## Limitations and notices + +Please notice that this plugin has been developed and tested on a limited number +of versions and small set of GPUs. Currently the latest ROCm version tested is +4.3.0. Notice that depending on the device and driver versions the amount of +information provided by `rocm-smi` can vary so that some fields would start/stop +appearing in the metrics upon updates. The `rocm-smi` JSON output is not +perfectly homogeneous and is possibly changing in the future, hence parsing and +unmarshaling can start failing upon updating ROCm. + +Inspired by the current state of the art of the `nvidia-smi` plugin. diff --git a/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go b/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go new file mode 100644 index 0000000000000..53157d31dbcee --- /dev/null +++ b/plugins/inputs/amd_rocm_smi/amd_rocm_smi.go @@ -0,0 +1,286 @@ +//go:generate ../../../tools/readme_config_includer/generator +package amd_rocm_smi + +import ( + _ "embed" + "encoding/json" + "fmt" + "os" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +const measurement = "amd_rocm_smi" + +type ROCmSMI struct { + BinPath string + Timeout config.Duration +} + +func (*ROCmSMI) SampleConfig() string { + return sampleConfig +} + +// Gather implements the telegraf interface +func (rsmi *ROCmSMI) Gather(acc telegraf.Accumulator) error { + if _, err := os.Stat(rsmi.BinPath); os.IsNotExist(err) { + return fmt.Errorf("rocm-smi binary not found in path %s, cannot query GPUs statistics", rsmi.BinPath) + } + + data, err := rsmi.pollROCmSMI() + if err != nil { + return err + } + + err = gatherROCmSMI(data, acc) + if err != nil { + return err + } + + return nil +} + +func init() { + inputs.Add("amd_rocm_smi", func() telegraf.Input { + return &ROCmSMI{ + BinPath: "/opt/rocm/bin/rocm-smi", + Timeout: config.Duration(5 * time.Second), + } + }) +} + +func (rsmi *ROCmSMI) pollROCmSMI() ([]byte, error) { + // Construct and execute metrics query, there currently exist (ROCm v4.3.x) a "-a" option + // that does not provide all the information, so each needed parameter is set manually + cmd := exec.Command(rsmi.BinPath, + "-o", + "-l", + "-m", + "-M", + "-g", + "-c", + "-t", + "-u", + "-i", + "-f", + "-p", + "-P", + "-s", + "-S", + "-v", + "--showreplaycount", + "--showpids", + "--showdriverversion", + "--showmemvendor", + "--showfwinfo", + "--showproductname", + "--showserial", + "--showuniqueid", + "--showbus", + "--showpendingpages", + "--showpagesinfo", + "--showmeminfo", + "all", + "--showretiredpages", + "--showunreservablepages", + "--showmemuse", + "--showvoltage", + "--showtopo", + "--showtopoweight", + "--showtopohops", + "--showtopotype", + "--showtoponuma", + "--json") + + ret, _ := internal.StdOutputTimeout(cmd, + time.Duration(rsmi.Timeout)) + return ret, nil +} + +func gatherROCmSMI(ret []byte, acc telegraf.Accumulator) error { + var gpus map[string]GPU + var sys map[string]sysInfo + + err1 := json.Unmarshal(ret, &gpus) + if err1 != nil { + return err1 + } + + err2 := json.Unmarshal(ret, &sys) + if err2 != nil { + return err2 + } + + metrics := genTagsFields(gpus, sys) + for _, metric := range metrics { + acc.AddFields(measurement, metric.fields, metric.tags) + } + + return nil +} + +type metric struct { + tags map[string]string + fields map[string]interface{} +} + +func genTagsFields(gpus map[string]GPU, system map[string]sysInfo) []metric { + metrics := []metric{} + for cardID, payload := range gpus { + if strings.Contains(cardID, "card") { + tags := map[string]string{ + "name": cardID, + } + fields := map[string]interface{}{} + + totVRAM, _ := strconv.ParseInt(payload.GpuVRAMTotalMemory, 10, 64) + usdVRAM, _ := strconv.ParseInt(payload.GpuVRAMTotalUsedMemory, 10, 64) + strFree := strconv.FormatInt(totVRAM-usdVRAM, 10) + + setTagIfUsed(tags, "gpu_id", payload.GpuID) + setTagIfUsed(tags, "gpu_unique_id", payload.GpuUniqueID) + + setIfUsed("int", fields, "driver_version", strings.ReplaceAll(system["system"].DriverVersion, ".", "")) + setIfUsed("int", fields, "fan_speed", payload.GpuFanSpeedPercentage) + setIfUsed("int64", fields, "memory_total", payload.GpuVRAMTotalMemory) + setIfUsed("int64", fields, "memory_used", payload.GpuVRAMTotalUsedMemory) + setIfUsed("int64", fields, "memory_free", strFree) + setIfUsed("float", fields, "temperature_sensor_edge", payload.GpuTemperatureSensorEdge) + setIfUsed("float", fields, "temperature_sensor_junction", payload.GpuTemperatureSensorJunction) + setIfUsed("float", fields, "temperature_sensor_memory", payload.GpuTemperatureSensorMemory) + setIfUsed("int", fields, "utilization_gpu", payload.GpuUsePercentage) + setIfUsed("int", fields, "utilization_memory", payload.GpuMemoryUsePercentage) + setIfUsed("int", fields, "clocks_current_sm", strings.Trim(payload.GpuSclkClockSpeed, "(Mhz)")) + setIfUsed("int", fields, "clocks_current_memory", strings.Trim(payload.GpuMclkClockSpeed, "(Mhz)")) + setIfUsed("float", fields, "power_draw", payload.GpuAveragePower) + + metrics = append(metrics, metric{tags, fields}) + } + } + return metrics +} + +func setTagIfUsed(m map[string]string, k, v string) { + if v != "" { + m[k] = v + } +} + +func setIfUsed(t string, m map[string]interface{}, k, v string) { + vals := strings.Fields(v) + if len(vals) < 1 { + return + } + + val := vals[0] + + switch t { + case "float": + if val != "" { + f, err := strconv.ParseFloat(val, 64) + if err == nil { + m[k] = f + } + } + case "int": + if val != "" { + i, err := strconv.Atoi(val) + if err == nil { + m[k] = i + } + } + case "int64": + if val != "" { + i, err := strconv.ParseInt(val, 10, 64) + if err == nil { + m[k] = i + } + } + case "str": + if val != "" { + m[k] = val + } + } +} + +type sysInfo struct { + DriverVersion string `json:"Driver version"` +} + +type GPU struct { + GpuID string `json:"GPU ID"` + GpuUniqueID string `json:"Unique ID"` + GpuVBIOSVersion string `json:"VBIOS version"` + GpuTemperatureSensorEdge string `json:"Temperature (Sensor edge) (C)"` + GpuTemperatureSensorJunction string `json:"Temperature (Sensor junction) (C)"` + GpuTemperatureSensorMemory string `json:"Temperature (Sensor memory) (C)"` + GpuDcefClkClockSpeed string `json:"dcefclk clock speed"` + GpuDcefClkClockLevel string `json:"dcefclk clock level"` + GpuFclkClockSpeed string `json:"fclk clock speed"` + GpuFclkClockLevel string `json:"fclk clock level"` + GpuMclkClockSpeed string `json:"mclk clock speed:"` + GpuMclkClockLevel string `json:"mclk clock level:"` + GpuSclkClockSpeed string `json:"sclk clock speed:"` + GpuSclkClockLevel string `json:"sclk clock level:"` + GpuSocclkClockSpeed string `json:"socclk clock speed"` + GpuSocclkClockLevel string `json:"socclk clock level"` + GpuPcieClock string `json:"pcie clock level"` + GpuFanSpeedLevel string `json:"Fan speed (level)"` + GpuFanSpeedPercentage string `json:"Fan speed (%)"` + GpuFanRPM string `json:"Fan RPM"` + GpuPerformanceLevel string `json:"Performance Level"` + GpuOverdrive string `json:"GPU OverDrive value (%)"` + GpuMaxPower string `json:"Max Graphics Package Power (W)"` + GpuAveragePower string `json:"Average Graphics Package Power (W)"` + GpuUsePercentage string `json:"GPU use (%)"` + GpuMemoryUsePercentage string `json:"GPU memory use (%)"` + GpuMemoryVendor string `json:"GPU memory vendor"` + GpuPCIeReplay string `json:"PCIe Replay Count"` + GpuSerialNumber string `json:"Serial Number"` + GpuVoltagemV string `json:"Voltage (mV)"` + GpuPCIBus string `json:"PCI Bus"` + GpuASDDirmware string `json:"ASD firmware version"` + GpuCEFirmware string `json:"CE firmware version"` + GpuDMCUFirmware string `json:"DMCU firmware version"` + GpuMCFirmware string `json:"MC firmware version"` + GpuMEFirmware string `json:"ME firmware version"` + GpuMECFirmware string `json:"MEC firmware version"` + GpuMEC2Firmware string `json:"MEC2 firmware version"` + GpuPFPFirmware string `json:"PFP firmware version"` + GpuRLCFirmware string `json:"RLC firmware version"` + GpuRLCSRLC string `json:"RLC SRLC firmware version"` + GpuRLCSRLG string `json:"RLC SRLG firmware version"` + GpuRLCSRLS string `json:"RLC SRLS firmware version"` + GpuSDMAFirmware string `json:"SDMA firmware version"` + GpuSDMA2Firmware string `json:"SDMA2 firmware version"` + GpuSMCFirmware string `json:"SMC firmware version"` + GpuSOSFirmware string `json:"SOS firmware version"` + GpuTARAS string `json:"TA RAS firmware version"` + GpuTAXGMI string `json:"TA XGMI firmware version"` + GpuUVDFirmware string `json:"UVD firmware version"` + GpuVCEFirmware string `json:"VCE firmware version"` + GpuVCNFirmware string `json:"VCN firmware version"` + GpuCardSeries string `json:"Card series"` + GpuCardModel string `json:"Card model"` + GpuCardVendor string `json:"Card vendor"` + GpuCardSKU string `json:"Card SKU"` + GpuNUMANode string `json:"(Topology) Numa Node"` + GpuNUMAAffinity string `json:"(Topology) Numa Affinity"` + GpuVisVRAMTotalMemory string `json:"VIS_VRAM Total Memory (B)"` + GpuVisVRAMTotalUsedMemory string `json:"VIS_VRAM Total Used Memory (B)"` + GpuVRAMTotalMemory string `json:"VRAM Total Memory (B)"` + GpuVRAMTotalUsedMemory string `json:"VRAM Total Used Memory (B)"` + GpuGTTTotalMemory string `json:"GTT Total Memory (B)"` + GpuGTTTotalUsedMemory string `json:"GTT Total Used Memory (B)"` +} diff --git a/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go b/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go new file mode 100644 index 0000000000000..e38e0ff89eae0 --- /dev/null +++ b/plugins/inputs/amd_rocm_smi/amd_rocm_smi_test.go @@ -0,0 +1,90 @@ +package amd_rocm_smi + +import ( + "os" + "path/filepath" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestGatherValidJSON(t *testing.T) { + tests := []struct { + name string + filename string + expected []telegraf.Metric + }{ + { + name: "Vega 10 XT", + filename: "vega-10-XT.json", + expected: []telegraf.Metric{ + testutil.MustMetric( + "amd_rocm_smi", + map[string]string{ + "gpu_id": "0x6861", + "gpu_unique_id": "0x2150e7d042a1124", + "name": "card0", + }, + map[string]interface{}{ + "driver_version": 5925, + "fan_speed": 13, + "memory_total": int64(17163091968), + "memory_used": int64(17776640), + "memory_free": int64(17145315328), + "temperature_sensor_edge": 39.0, + "temperature_sensor_junction": 40.0, + "temperature_sensor_memory": 92.0, + "utilization_gpu": 0, + "clocks_current_sm": 1269, + "clocks_current_memory": 167, + "power_draw": 15.0, + }, + time.Unix(0, 0)), + }, + }, + { + name: "Vega 20 WKS GL-XE [Radeon Pro VII]", + filename: "vega-20-WKS-GL-XE.json", + expected: []telegraf.Metric{ + testutil.MustMetric( + "amd_rocm_smi", + map[string]string{ + "gpu_id": "0x66a1", + "gpu_unique_id": "0x2f048617326b1ea", + "name": "card0", + }, + map[string]interface{}{ + "driver_version": 5917, + "fan_speed": 0, + "memory_total": int64(34342961152), + "memory_used": int64(10850304), + "memory_free": int64(34332110848), + "temperature_sensor_edge": 36.0, + "temperature_sensor_junction": 38.0, + "temperature_sensor_memory": 35.0, + "utilization_gpu": 0, + "utilization_memory": 0, + "clocks_current_sm": 1725, + "clocks_current_memory": 1000, + "power_draw": 26.0, + }, + time.Unix(0, 0)), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + octets, err := os.ReadFile(filepath.Join("testdata", tt.filename)) + require.NoError(t, err) + + err = gatherROCmSMI(octets, &acc) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + }) + } +} diff --git a/plugins/inputs/amd_rocm_smi/sample.conf b/plugins/inputs/amd_rocm_smi/sample.conf new file mode 100644 index 0000000000000..aed15aae966c3 --- /dev/null +++ b/plugins/inputs/amd_rocm_smi/sample.conf @@ -0,0 +1,7 @@ +# Query statistics from AMD Graphics cards using rocm-smi binary +[[inputs.amd_rocm_smi]] + ## Optional: path to rocm-smi binary, defaults to $PATH via exec.LookPath + # bin_path = "/opt/rocm/bin/rocm-smi" + + ## Optional: timeout for GPU polling + # timeout = "5s" diff --git a/plugins/inputs/amd_rocm_smi/testdata/vega-10-XT.json b/plugins/inputs/amd_rocm_smi/testdata/vega-10-XT.json new file mode 100644 index 0000000000000..c4d51f5253a51 --- /dev/null +++ b/plugins/inputs/amd_rocm_smi/testdata/vega-10-XT.json @@ -0,0 +1,77 @@ +{ + "card0": { + "GPU ID": "0x6861", + "Unique ID": "0x2150e7d042a1124", + "VBIOS version": "113-D0510100-106", + "Temperature (Sensor edge) (C)": "39.0", + "Temperature (Sensor junction) (C)": "40.0", + "Temperature (Sensor memory) (C)": "92.0", + "dcefclk clock speed:": "(600Mhz)", + "dcefclk clock level:": "0", + "mclk clock speed:": "(167Mhz)", + "mclk clock level:": "0", + "sclk clock speed:": "(1269Mhz)", + "sclk clock level:": "3", + "socclk clock speed:": "(960Mhz)", + "socclk clock level:": "3", + "pcie clock level": "1 (8.0GT/s x16)", + "sclk clock level": "3 (1269Mhz)", + "Fan speed (level)": "33", + "Fan speed (%)": "13", + "Fan RPM": "682", + "Performance Level": "auto", + "GPU OverDrive value (%)": "0", + "GPU Memory OverDrive value (%)": "0", + "Max Graphics Package Power (W)": "170.0", + "Average Graphics Package Power (W)": "15.0", + "0": "8.0GT/s x16", + "1": "8.0GT/s x16 *", + "2": "847Mhz", + "3": "960Mhz *", + "4": "1028Mhz", + "5": "1107Mhz", + "6": "1440Mhz", + "7": "1500Mhz", + "GPU use (%)": "0", + "GPU memory vendor": "samsung", + "PCIe Replay Count": "0", + "Serial Number": "N/A", + "Voltage (mV)": "906", + "PCI Bus": "0000:04:00.0", + "VRAM Total Memory (B)": "17163091968", + "VRAM Total Used Memory (B)": "17776640", + "VIS_VRAM Total Memory (B)": "268435456", + "VIS_VRAM Total Used Memory (B)": "13557760", + "GTT Total Memory (B)": "17163091968", + "GTT Total Used Memory (B)": "25608192", + "ASD firmware version": "553648152", + "CE firmware version": "79", + "DMCU firmware version": "0", + "MC firmware version": "0", + "ME firmware version": "163", + "MEC firmware version": "432", + "MEC2 firmware version": "432", + "PFP firmware version": "186", + "RLC firmware version": "93", + "RLC SRLC firmware version": "0", + "RLC SRLG firmware version": "0", + "RLC SRLS firmware version": "0", + "SDMA firmware version": "430", + "SDMA2 firmware version": "430", + "SMC firmware version": "00.28.54.00", + "SOS firmware version": "0x0008015d", + "TA RAS firmware version": "00.00.00.00", + "TA XGMI firmware version": "00.00.00.00", + "UVD firmware version": "0x422b1100", + "VCE firmware version": "0x39060400", + "VCN firmware version": "0x00000000", + "Card model": "0xc1e", + "Card vendor": "Advanced Micro Devices, Inc. [AMD/ATI]", + "Card SKU": "D05101", + "(Topology) Numa Node": "0", + "(Topology) Numa Affinity": "0" + }, + "system": { + "Driver version": "5.9.25" + } +} \ No newline at end of file diff --git a/plugins/inputs/amd_rocm_smi/testdata/vega-20-WKS-GL-XE.json b/plugins/inputs/amd_rocm_smi/testdata/vega-20-WKS-GL-XE.json new file mode 100644 index 0000000000000..771565a607bd5 --- /dev/null +++ b/plugins/inputs/amd_rocm_smi/testdata/vega-20-WKS-GL-XE.json @@ -0,0 +1,165 @@ +{ + "card0": { + "GPU ID": "0x66a1", + "Unique ID": "0x2f048617326b1ea", + "VBIOS version": "113-D1631700-111", + "Temperature (Sensor edge) (C)": "36.0", + "Temperature (Sensor junction) (C)": "38.0", + "Temperature (Sensor memory) (C)": "35.0", + "dcefclk clock speed:": "(357Mhz)", + "dcefclk clock level:": "0", + "fclk clock speed:": "(1080Mhz)", + "fclk clock level:": "6", + "mclk clock speed:": "(1000Mhz)", + "mclk clock level:": "2", + "sclk clock speed:": "(1725Mhz)", + "sclk clock level:": "8", + "socclk clock speed:": "(971Mhz)", + "socclk clock level:": "7", + "pcie clock level": "1 (16.0GT/s x16)", + "sclk clock level": "8 (1725Mhz)", + "Fan speed (level)": "0", + "Fan speed (%)": "0", + "Fan RPM": "0", + "Performance Level": "high", + "GPU OverDrive value (%)": "0", + "Max Graphics Package Power (W)": "225.0", + "Average Graphics Package Power (W)": "26.0", + "0": "2.5GT/s x16", + "1": "16.0GT/s x16 *", + "2": "566Mhz", + "3": "618Mhz", + "4": "680Mhz", + "5": "755Mhz", + "6": "850Mhz", + "7": "971Mhz *", + "8": "1725Mhz *", + "GPU use (%)": "0", + "GPU memory use (%)": "0", + "GPU memory vendor": "samsung", + "PCIe Replay Count": "0", + "Serial Number": "692024000810", + "Voltage (mV)": "1000", + "PCI Bus": "0000:63:00.0", + "VRAM Total Memory (B)": "34342961152", + "VRAM Total Used Memory (B)": "10850304", + "VIS_VRAM Total Memory (B)": "34342961152", + "VIS_VRAM Total Used Memory (B)": "10850304", + "GTT Total Memory (B)": "54974742528", + "GTT Total Used Memory (B)": "11591680", + "ASD firmware version": "553648199", + "CE firmware version": "79", + "DMCU firmware version": "0", + "MC firmware version": "0", + "ME firmware version": "164", + "MEC firmware version": "448", + "MEC2 firmware version": "448", + "PFP firmware version": "188", + "RLC firmware version": "50", + "RLC SRLC firmware version": "1", + "RLC SRLG firmware version": "1", + "RLC SRLS firmware version": "1", + "SDMA firmware version": "144", + "SDMA2 firmware version": "144", + "SMC firmware version": "00.40.59.00", + "SOS firmware version": "0x00080b67", + "TA RAS firmware version": "27.00.01.36", + "TA XGMI firmware version": "32.00.00.02", + "UVD firmware version": "0x42002b13", + "VCE firmware version": "0x39060400", + "VCN firmware version": "0x00000000", + "Card series": "Radeon Instinct MI50 32GB", + "Card model": "0x834", + "Card vendor": "Advanced Micro Devices, Inc. [AMD/ATI]", + "Card SKU": "D16317", + "(Topology) Numa Node": "0", + "(Topology) Numa Affinity": "0" + }, + "system": { + "Driver version": "5.9.17", + "(Topology) Weight between DRM devices 0 and 1": "40", + "(Topology) Weight between DRM devices 0 and 2": "40", + "(Topology) Weight between DRM devices 0 and 3": "40", + "(Topology) Weight between DRM devices 0 and 4": "72", + "(Topology) Weight between DRM devices 0 and 5": "72", + "(Topology) Weight between DRM devices 0 and 6": "72", + "(Topology) Weight between DRM devices 0 and 7": "72", + "(Topology) Weight between DRM devices 1 and 2": "40", + "(Topology) Weight between DRM devices 1 and 3": "40", + "(Topology) Weight between DRM devices 1 and 4": "72", + "(Topology) Weight between DRM devices 1 and 5": "72", + "(Topology) Weight between DRM devices 1 and 6": "72", + "(Topology) Weight between DRM devices 1 and 7": "72", + "(Topology) Weight between DRM devices 2 and 3": "40", + "(Topology) Weight between DRM devices 2 and 4": "72", + "(Topology) Weight between DRM devices 2 and 5": "72", + "(Topology) Weight between DRM devices 2 and 6": "72", + "(Topology) Weight between DRM devices 2 and 7": "72", + "(Topology) Weight between DRM devices 3 and 4": "72", + "(Topology) Weight between DRM devices 3 and 5": "72", + "(Topology) Weight between DRM devices 3 and 6": "72", + "(Topology) Weight between DRM devices 3 and 7": "72", + "(Topology) Weight between DRM devices 4 and 5": "40", + "(Topology) Weight between DRM devices 4 and 6": "40", + "(Topology) Weight between DRM devices 4 and 7": "40", + "(Topology) Weight between DRM devices 5 and 6": "40", + "(Topology) Weight between DRM devices 5 and 7": "40", + "(Topology) Weight between DRM devices 6 and 7": "40", + "(Topology) Hops between DRM devices 0 and 1": "2", + "(Topology) Hops between DRM devices 0 and 2": "2", + "(Topology) Hops between DRM devices 0 and 3": "2", + "(Topology) Hops between DRM devices 0 and 4": "3", + "(Topology) Hops between DRM devices 0 and 5": "3", + "(Topology) Hops between DRM devices 0 and 6": "3", + "(Topology) Hops between DRM devices 0 and 7": "3", + "(Topology) Hops between DRM devices 1 and 2": "2", + "(Topology) Hops between DRM devices 1 and 3": "2", + "(Topology) Hops between DRM devices 1 and 4": "3", + "(Topology) Hops between DRM devices 1 and 5": "3", + "(Topology) Hops between DRM devices 1 and 6": "3", + "(Topology) Hops between DRM devices 1 and 7": "3", + "(Topology) Hops between DRM devices 2 and 3": "2", + "(Topology) Hops between DRM devices 2 and 4": "3", + "(Topology) Hops between DRM devices 2 and 5": "3", + "(Topology) Hops between DRM devices 2 and 6": "3", + "(Topology) Hops between DRM devices 2 and 7": "3", + "(Topology) Hops between DRM devices 3 and 4": "3", + "(Topology) Hops between DRM devices 3 and 5": "3", + "(Topology) Hops between DRM devices 3 and 6": "3", + "(Topology) Hops between DRM devices 3 and 7": "3", + "(Topology) Hops between DRM devices 4 and 5": "2", + "(Topology) Hops between DRM devices 4 and 6": "2", + "(Topology) Hops between DRM devices 4 and 7": "2", + "(Topology) Hops between DRM devices 5 and 6": "2", + "(Topology) Hops between DRM devices 5 and 7": "2", + "(Topology) Hops between DRM devices 6 and 7": "2", + "(Topology) Link type between DRM devices 0 and 1": "PCIE", + "(Topology) Link type between DRM devices 0 and 2": "PCIE", + "(Topology) Link type between DRM devices 0 and 3": "PCIE", + "(Topology) Link type between DRM devices 0 and 4": "PCIE", + "(Topology) Link type between DRM devices 0 and 5": "PCIE", + "(Topology) Link type between DRM devices 0 and 6": "PCIE", + "(Topology) Link type between DRM devices 0 and 7": "PCIE", + "(Topology) Link type between DRM devices 1 and 2": "PCIE", + "(Topology) Link type between DRM devices 1 and 3": "PCIE", + "(Topology) Link type between DRM devices 1 and 4": "PCIE", + "(Topology) Link type between DRM devices 1 and 5": "PCIE", + "(Topology) Link type between DRM devices 1 and 6": "PCIE", + "(Topology) Link type between DRM devices 1 and 7": "PCIE", + "(Topology) Link type between DRM devices 2 and 3": "PCIE", + "(Topology) Link type between DRM devices 2 and 4": "PCIE", + "(Topology) Link type between DRM devices 2 and 5": "PCIE", + "(Topology) Link type between DRM devices 2 and 6": "PCIE", + "(Topology) Link type between DRM devices 2 and 7": "PCIE", + "(Topology) Link type between DRM devices 3 and 4": "PCIE", + "(Topology) Link type between DRM devices 3 and 5": "PCIE", + "(Topology) Link type between DRM devices 3 and 6": "PCIE", + "(Topology) Link type between DRM devices 3 and 7": "PCIE", + "(Topology) Link type between DRM devices 4 and 5": "PCIE", + "(Topology) Link type between DRM devices 4 and 6": "PCIE", + "(Topology) Link type between DRM devices 4 and 7": "PCIE", + "(Topology) Link type between DRM devices 5 and 6": "PCIE", + "(Topology) Link type between DRM devices 5 and 7": "PCIE", + "(Topology) Link type between DRM devices 6 and 7": "PCIE" + } +} \ No newline at end of file diff --git a/plugins/inputs/amqp_consumer/README.md b/plugins/inputs/amqp_consumer/README.md index 8ef6d6fe2a8e9..f64426bcfd2ed 100644 --- a/plugins/inputs/amqp_consumer/README.md +++ b/plugins/inputs/amqp_consumer/README.md @@ -1,23 +1,24 @@ # AMQP Consumer Input Plugin -This plugin provides a consumer for use with AMQP 0-9-1, a prominent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/). +This plugin provides a consumer for use with AMQP 0-9-1, a prominent +implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/). -Metrics are read from a topic exchange using the configured queue and binding_key. +Metrics are read from a topic exchange using the configured queue and +binding_key. -Message payload should be formatted in one of the [Telegraf Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). +Message payload should be formatted in one of the [Telegraf Data +Formats](../../../docs/DATA_FORMATS_INPUT.md). For an introduction to AMQP see: -- https://www.rabbitmq.com/tutorials/amqp-concepts.html -- https://www.rabbitmq.com/getstarted.html -The following defaults are known to work with RabbitMQ: +- [amqp - concepts](https://www.rabbitmq.com/tutorials/amqp-concepts.html) +- [rabbitmq: getting started](https://www.rabbitmq.com/getstarted.html) -```toml -[[inputs.amqp_consumer]] - ## Broker to consume from. - ## deprecated in 1.7; use the brokers option - # url = "amqp://localhost:5672/influxdb" +## Configuration +```toml @sample.conf +# AMQP consumer plugin +[[inputs.amqp_consumer]] ## Brokers to consume from. If multiple brokers are specified a random broker ## will be selected anytime a connection is established. This can be ## helpful for load balancing when not using a dedicated load balancer. @@ -43,7 +44,7 @@ The following defaults are known to work with RabbitMQ: # exchange_arguments = { } # exchange_arguments = {"hash_property" = "timestamp"} - ## AMQP queue name + ## AMQP queue name. queue = "telegraf" ## AMQP queue durability can be "transient" or "durable". diff --git a/plugins/inputs/amqp_consumer/amqp_consumer.go b/plugins/inputs/amqp_consumer/amqp_consumer.go index d98b1c19f4ab3..4c6d366121d07 100644 --- a/plugins/inputs/amqp_consumer/amqp_consumer.go +++ b/plugins/inputs/amqp_consumer/amqp_consumer.go @@ -1,7 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package amqp_consumer import ( "context" + _ "embed" "errors" "fmt" "math/rand" @@ -9,14 +11,19 @@ import ( "sync" "time" + amqp "github.com/rabbitmq/amqp091-go" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" - "github.com/streadway/amqp" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const ( defaultMaxUndeliveredMessages = 1000 ) @@ -26,7 +33,7 @@ type semaphore chan empty // AMQPConsumer is the top level struct for this plugin type AMQPConsumer struct { - URL string `toml:"url"` // deprecated in 1.7; use brokers + URL string `toml:"url" deprecated:"1.7.0;use 'brokers' instead"` Brokers []string `toml:"brokers"` Username string `toml:"username"` Password string `toml:"password"` @@ -71,7 +78,7 @@ func (a *externalAuth) Mechanism() string { return "EXTERNAL" } func (a *externalAuth) Response() string { - return fmt.Sprintf("\000") + return "\000" } const ( @@ -87,89 +94,8 @@ const ( DefaultPrefetchCount = 50 ) -func (a *AMQPConsumer) SampleConfig() string { - return ` - ## Broker to consume from. - ## deprecated in 1.7; use the brokers option - # url = "amqp://localhost:5672/influxdb" - - ## Brokers to consume from. If multiple brokers are specified a random broker - ## will be selected anytime a connection is established. This can be - ## helpful for load balancing when not using a dedicated load balancer. - brokers = ["amqp://localhost:5672/influxdb"] - - ## Authentication credentials for the PLAIN auth_method. - # username = "" - # password = "" - - ## Name of the exchange to declare. If unset, no exchange will be declared. - exchange = "telegraf" - - ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". - # exchange_type = "topic" - - ## If true, exchange will be passively declared. - # exchange_passive = false - - ## Exchange durability can be either "transient" or "durable". - # exchange_durability = "durable" - - ## Additional exchange arguments. - # exchange_arguments = { } - # exchange_arguments = {"hash_property" = "timestamp"} - - ## AMQP queue name. - queue = "telegraf" - - ## AMQP queue durability can be "transient" or "durable". - queue_durability = "durable" - - ## If true, queue will be passively declared. - # queue_passive = false - - ## A binding between the exchange and queue using this binding key is - ## created. If unset, no binding is created. - binding_key = "#" - - ## Maximum number of messages server should give to the worker. - # prefetch_count = 50 - - ## Maximum messages to read from the broker that have not been written by an - ## output. For best throughput set based on the number of metrics within - ## each message and the size of the output's metric_batch_size. - ## - ## For example, if each message from the queue contains 10 metrics and the - ## output metric_batch_size is 1000, setting this to 100 will ensure that a - ## full batch is collected and the write is triggered immediately without - ## waiting until the next flush_interval. - # max_undelivered_messages = 1000 - - ## Auth method. PLAIN and EXTERNAL are supported - ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as - ## described here: https://www.rabbitmq.com/plugins.html - # auth_method = "PLAIN" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## Content encoding for message payloads, can be set to "gzip" to or - ## "identity" to apply no encoding. - # content_encoding = "identity" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" -` -} - -func (a *AMQPConsumer) Description() string { - return "AMQP consumer plugin" +func (*AMQPConsumer) SampleConfig() string { + return sampleConfig } func (a *AMQPConsumer) SetParser(parser parsers.Parser) { @@ -183,7 +109,7 @@ func (a *AMQPConsumer) Gather(_ telegraf.Accumulator) error { func (a *AMQPConsumer) createConfig() (*amqp.Config, error) { // make new tls config - tls, err := a.ClientConfig.TLSConfig() + tlsCfg, err := a.ClientConfig.TLSConfig() if err != nil { return nil, err } @@ -201,7 +127,7 @@ func (a *AMQPConsumer) createConfig() (*amqp.Config, error) { } config := amqp.Config{ - TLSClientConfig: tls, + TLSClientConfig: tlsCfg, SASL: auth, // if nil, it will be PLAIN } return &config, nil @@ -288,16 +214,13 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err ch, err := a.conn.Channel() if err != nil { - return nil, fmt.Errorf("Failed to open a channel: %s", err.Error()) + return nil, fmt.Errorf("failed to open a channel: %s", err.Error()) } if a.Exchange != "" { - var exchangeDurable = true - switch a.ExchangeDurability { - case "transient": + exchangeDurable := true + if a.ExchangeDurability == "transient" { exchangeDurable = false - default: - exchangeDurable = true } exchangeArgs := make(amqp.Table, len(a.ExchangeArguments)) @@ -305,11 +228,8 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err exchangeArgs[k] = v } - err = declareExchange( + err = a.declareExchange( ch, - a.Exchange, - a.ExchangeType, - a.ExchangePassive, exchangeDurable, exchangeArgs) if err != nil { @@ -317,11 +237,7 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err } } - q, err := declareQueue( - ch, - a.Queue, - a.QueueDurability, - a.QueuePassive) + q, err := a.declareQueue(ch) if err != nil { return nil, err } @@ -335,7 +251,7 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err nil, ) if err != nil { - return nil, fmt.Errorf("Failed to bind a queue: %s", err) + return nil, fmt.Errorf("failed to bind a queue: %s", err) } } @@ -345,7 +261,7 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err false, // global ) if err != nil { - return nil, fmt.Errorf("Failed to set QoS: %s", err) + return nil, fmt.Errorf("failed to set QoS: %s", err) } msgs, err := ch.Consume( @@ -358,25 +274,22 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err nil, // arguments ) if err != nil { - return nil, fmt.Errorf("Failed establishing connection to queue: %s", err) + return nil, fmt.Errorf("failed establishing connection to queue: %s", err) } return msgs, err } -func declareExchange( +func (a *AMQPConsumer) declareExchange( channel *amqp.Channel, - exchangeName string, - exchangeType string, - exchangePassive bool, exchangeDurable bool, exchangeArguments amqp.Table, ) error { var err error - if exchangePassive { + if a.ExchangePassive { err = channel.ExchangeDeclarePassive( - exchangeName, - exchangeType, + a.Exchange, + a.ExchangeType, exchangeDurable, false, // delete when unused false, // internal @@ -385,8 +298,8 @@ func declareExchange( ) } else { err = channel.ExchangeDeclare( - exchangeName, - exchangeType, + a.Exchange, + a.ExchangeType, exchangeDurable, false, // delete when unused false, // internal @@ -395,31 +308,23 @@ func declareExchange( ) } if err != nil { - return fmt.Errorf("Error declaring exchange: %v", err) + return fmt.Errorf("error declaring exchange: %v", err) } return nil } -func declareQueue( - channel *amqp.Channel, - queueName string, - queueDurability string, - queuePassive bool, -) (*amqp.Queue, error) { +func (a *AMQPConsumer) declareQueue(channel *amqp.Channel) (*amqp.Queue, error) { var queue amqp.Queue var err error - var queueDurable = true - switch queueDurability { - case "transient": + queueDurable := true + if a.QueueDurability == "transient" { queueDurable = false - default: - queueDurable = true } - if queuePassive { + if a.QueuePassive { queue, err = channel.QueueDeclarePassive( - queueName, // queue + a.Queue, // queue queueDurable, // durable false, // delete when unused false, // exclusive @@ -428,7 +333,7 @@ func declareQueue( ) } else { queue, err = channel.QueueDeclare( - queueName, // queue + a.Queue, // queue queueDurable, // durable false, // delete when unused false, // exclusive @@ -437,7 +342,7 @@ func declareQueue( ) } if err != nil { - return nil, fmt.Errorf("Error declaring queue: %v", err) + return nil, fmt.Errorf("error declaring queue: %v", err) } return &queue, nil } diff --git a/plugins/inputs/amqp_consumer/sample.conf b/plugins/inputs/amqp_consumer/sample.conf new file mode 100644 index 0000000000000..87992e35c4443 --- /dev/null +++ b/plugins/inputs/amqp_consumer/sample.conf @@ -0,0 +1,74 @@ +# AMQP consumer plugin +[[inputs.amqp_consumer]] + ## Brokers to consume from. If multiple brokers are specified a random broker + ## will be selected anytime a connection is established. This can be + ## helpful for load balancing when not using a dedicated load balancer. + brokers = ["amqp://localhost:5672/influxdb"] + + ## Authentication credentials for the PLAIN auth_method. + # username = "" + # password = "" + + ## Name of the exchange to declare. If unset, no exchange will be declared. + exchange = "telegraf" + + ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". + # exchange_type = "topic" + + ## If true, exchange will be passively declared. + # exchange_passive = false + + ## Exchange durability can be either "transient" or "durable". + # exchange_durability = "durable" + + ## Additional exchange arguments. + # exchange_arguments = { } + # exchange_arguments = {"hash_property" = "timestamp"} + + ## AMQP queue name. + queue = "telegraf" + + ## AMQP queue durability can be "transient" or "durable". + queue_durability = "durable" + + ## If true, queue will be passively declared. + # queue_passive = false + + ## A binding between the exchange and queue using this binding key is + ## created. If unset, no binding is created. + binding_key = "#" + + ## Maximum number of messages server should give to the worker. + # prefetch_count = 50 + + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## Auth method. PLAIN and EXTERNAL are supported + ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as + ## described here: https://www.rabbitmq.com/plugins.html + # auth_method = "PLAIN" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Content encoding for message payloads, can be set to "gzip" to or + ## "identity" to apply no encoding. + # content_encoding = "identity" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" diff --git a/plugins/inputs/apache/README.md b/plugins/inputs/apache/README.md index b8822edebf314..de596bec0c568 100644 --- a/plugins/inputs/apache/README.md +++ b/plugins/inputs/apache/README.md @@ -1,12 +1,19 @@ # Apache Input Plugin -The Apache plugin collects server performance information using the [`mod_status`](https://httpd.apache.org/docs/2.4/mod/mod_status.html) module of the [Apache HTTP Server](https://httpd.apache.org/). +The Apache plugin collects server performance information using the +[`mod_status`](https://httpd.apache.org/docs/2.4/mod/mod_status.html) module of +the [Apache HTTP Server](https://httpd.apache.org/). -Typically, the `mod_status` module is configured to expose a page at the `/server-status?auto` location of the Apache server. The [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) option must be enabled in order to collect all available fields. For information about how to configure your server reference the [module documentation](https://httpd.apache.org/docs/2.4/mod/mod_status.html#enable). +Typically, the `mod_status` module is configured to expose a page at the +`/server-status?auto` location of the Apache server. The +[ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) +option must be enabled in order to collect all available fields. For +information about how to configure your server reference the [module +documentation](https://httpd.apache.org/docs/2.4/mod/mod_status.html#enable). -### Configuration: +## Configuration -```toml +```toml @sample.conf # Read Apache status information (mod_status) [[inputs.apache]] ## An array of URLs to gather from, must be directed at the machine @@ -29,7 +36,7 @@ Typically, the `mod_status` module is configured to expose a page at the `/serve # insecure_skip_verify = false ``` -### Measurements & Fields: +## Metrics - apache - BusyWorkers (float) @@ -56,7 +63,8 @@ Typically, the `mod_status` module is configured to expose a page at the `/serve - TotalkBytes (float) - Uptime (float) -The following fields are collected from the `Scoreboard`, and represent the number of requests in the given state: +The following fields are collected from the `Scoreboard`, and represent the +number of requests in the given state: - apache - scboard_closing (float) @@ -71,14 +79,14 @@ The following fields are collected from the `Scoreboard`, and represent the numb - scboard_starting (float) - scboard_waiting (float) -### Tags: +## Tags - All measurements have the following tags: - - port - - server + - port + - server -### Example Output: +## Example Output -``` +```shell apache,port=80,server=debian-stretch-apache BusyWorkers=1,BytesPerReq=0,BytesPerSec=0,CPUChildrenSystem=0,CPUChildrenUser=0,CPULoad=0.00995025,CPUSystem=0.01,CPUUser=0.01,ConnsAsyncClosing=0,ConnsAsyncKeepAlive=0,ConnsAsyncWriting=0,ConnsTotal=0,IdleWorkers=49,Load1=0.01,Load15=0,Load5=0,ParentServerConfigGeneration=3,ParentServerMPMGeneration=2,ReqPerSec=0.00497512,ServerUptimeSeconds=201,TotalAccesses=1,TotalkBytes=0,Uptime=201,scboard_closing=0,scboard_dnslookup=0,scboard_finishing=0,scboard_idle_cleanup=0,scboard_keepalive=0,scboard_logging=0,scboard_open=100,scboard_reading=0,scboard_sending=1,scboard_starting=0,scboard_waiting=49 1502489900000000000 ``` diff --git a/plugins/inputs/apache/apache.go b/plugins/inputs/apache/apache.go index ff7341b838f75..761c25d31e620 100644 --- a/plugins/inputs/apache/apache.go +++ b/plugins/inputs/apache/apache.go @@ -1,7 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package apache import ( "bufio" + _ "embed" "fmt" "net" "net/http" @@ -12,62 +14,41 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Apache struct { Urls []string Username string Password string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration tls.ClientConfig client *http.Client } -var sampleConfig = ` - ## An array of URLs to gather from, must be directed at the machine - ## readable version of the mod_status page including the auto query string. - ## Default is "http://localhost/server-status?auto". - urls = ["http://localhost/server-status?auto"] - - ## Credentials for basic HTTP authentication. - # username = "myuser" - # password = "mypassword" - - ## Maximum time to receive response. - # response_timeout = "5s" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - -func (n *Apache) SampleConfig() string { +func (*Apache) SampleConfig() string { return sampleConfig } -func (n *Apache) Description() string { - return "Read Apache status information (mod_status)" -} - func (n *Apache) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup if len(n.Urls) == 0 { n.Urls = []string{"http://localhost/server-status?auto"} } - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = time.Second * 5 + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(time.Second * 5) } if n.client == nil { - client, err := n.createHttpClient() + client, err := n.createHTTPClient() if err != nil { return err } @@ -77,14 +58,14 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error { for _, u := range n.Urls { addr, err := url.Parse(u) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + acc.AddError(fmt.Errorf("unable to parse address '%s': %s", u, err)) continue } wg.Add(1) go func(addr *url.URL) { defer wg.Done() - acc.AddError(n.gatherUrl(addr, acc)) + acc.AddError(n.gatherURL(addr, acc)) }(addr) } @@ -92,7 +73,7 @@ func (n *Apache) Gather(acc telegraf.Accumulator) error { return nil } -func (n *Apache) createHttpClient() (*http.Client, error) { +func (n *Apache) createHTTPClient() (*http.Client, error) { tlsCfg, err := n.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -102,16 +83,16 @@ func (n *Apache) createHttpClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } return client, nil } -func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { +func (n *Apache) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { req, err := http.NewRequest("GET", addr.String(), nil) if err != nil { - return fmt.Errorf("error on new request to %s : %s\n", addr.String(), err) + return fmt.Errorf("error on new request to %s : %s", addr.String(), err) } if len(n.Username) != 0 && len(n.Password) != 0 { @@ -120,7 +101,7 @@ func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { resp, err := n.client.Do(req) if err != nil { - return fmt.Errorf("error on request to %s : %s\n", addr.String(), err) + return fmt.Errorf("error on request to %s : %s", addr.String(), err) } defer resp.Body.Close() @@ -136,7 +117,7 @@ func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { line := sc.Text() if strings.Contains(line, ":") { parts := strings.SplitN(line, ":", 2) - key, part := strings.Replace(parts[0], " ", "", -1), strings.TrimSpace(parts[1]) + key, part := strings.ReplaceAll(parts[0], " ", ""), strings.TrimSpace(parts[1]) switch key { case "Scoreboard": @@ -158,32 +139,31 @@ func (n *Apache) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { } func (n *Apache) gatherScores(data string) map[string]interface{} { - var waiting, open int = 0, 0 - var S, R, W, K, D, C, L, G, I int = 0, 0, 0, 0, 0, 0, 0, 0, 0 - - for _, s := range strings.Split(data, "") { + var waiting, open = 0, 0 + var s, r, w, k, d, c, l, g, i = 0, 0, 0, 0, 0, 0, 0, 0, 0 - switch s { + for _, str := range strings.Split(data, "") { + switch str { case "_": waiting++ case "S": - S++ + s++ case "R": - R++ + r++ case "W": - W++ + w++ case "K": - K++ + k++ case "D": - D++ + d++ case "C": - C++ + c++ case "L": - L++ + l++ case "G": - G++ + g++ case "I": - I++ + i++ case ".": open++ } @@ -191,15 +171,15 @@ func (n *Apache) gatherScores(data string) map[string]interface{} { fields := map[string]interface{}{ "scboard_waiting": float64(waiting), - "scboard_starting": float64(S), - "scboard_reading": float64(R), - "scboard_sending": float64(W), - "scboard_keepalive": float64(K), - "scboard_dnslookup": float64(D), - "scboard_closing": float64(C), - "scboard_logging": float64(L), - "scboard_finishing": float64(G), - "scboard_idle_cleanup": float64(I), + "scboard_starting": float64(s), + "scboard_reading": float64(r), + "scboard_sending": float64(w), + "scboard_keepalive": float64(k), + "scboard_dnslookup": float64(d), + "scboard_closing": float64(c), + "scboard_logging": float64(l), + "scboard_finishing": float64(g), + "scboard_idle_cleanup": float64(i), "scboard_open": float64(open), } return fields diff --git a/plugins/inputs/apache/apache_test.go b/plugins/inputs/apache/apache_test.go index ca8f4733c6bc5..534f6f9e1f7e9 100644 --- a/plugins/inputs/apache/apache_test.go +++ b/plugins/inputs/apache/apache_test.go @@ -31,7 +31,8 @@ Scoreboard: WW_____W_RW_R_W__RRR____WR_W___WW________W_WW_W_____R__R_WR__WRWR_RR func TestHTTPApache(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, apacheStatus) + _, err := fmt.Fprintln(w, apacheStatus) + require.NoError(t, err) })) defer ts.Close() diff --git a/plugins/inputs/apache/sample.conf b/plugins/inputs/apache/sample.conf new file mode 100644 index 0000000000000..7a168f6ea6e0d --- /dev/null +++ b/plugins/inputs/apache/sample.conf @@ -0,0 +1,20 @@ +# Read Apache status information (mod_status) +[[inputs.apache]] + ## An array of URLs to gather from, must be directed at the machine + ## readable version of the mod_status page including the auto query string. + ## Default is "http://localhost/server-status?auto". + urls = ["http://localhost/server-status?auto"] + + ## Credentials for basic HTTP authentication. + # username = "myuser" + # password = "mypassword" + + ## Maximum time to receive response. + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/apcupsd/README.md b/plugins/inputs/apcupsd/README.md index 97526d7ec3847..a5ad298385fc7 100644 --- a/plugins/inputs/apcupsd/README.md +++ b/plugins/inputs/apcupsd/README.md @@ -2,13 +2,14 @@ This plugin reads data from an apcupsd daemon over its NIS network protocol. -### Requirements +## Requirements apcupsd should be installed and it's daemon should be running. -### Configuration +## Configuration -```toml +```toml @sample.conf +# Monitor APC UPSes connected to apcupsd [[inputs.apcupsd]] # A list of running apcupsd server to connect to. # If not provided will default to tcp://127.0.0.1:3551 @@ -18,7 +19,7 @@ apcupsd should be installed and it's daemon should be running. timeout = "5s" ``` -### Metrics +## Metrics - apcupsd - tags: @@ -43,11 +44,9 @@ apcupsd should be installed and it's daemon should be running. - nominal_power - firmware +## Example Output - -### Example output - -``` +```shell apcupsd,serial=AS1231515,status=ONLINE,ups_name=name1 time_on_battery=0,load_percent=9.7,time_left_minutes=98,output_voltage=230.4,internal_temp=32.4,battery_voltage=27.4,input_frequency=50.2,input_voltage=230.4,battery_charge_percent=100,status_flags=8i 1490035922000000000 ``` diff --git a/plugins/inputs/apcupsd/apcupsd.go b/plugins/inputs/apcupsd/apcupsd.go index a862bbfc881f8..1122df7e4bc94 100644 --- a/plugins/inputs/apcupsd/apcupsd.go +++ b/plugins/inputs/apcupsd/apcupsd.go @@ -1,40 +1,34 @@ +//go:generate ../../../tools/readme_config_includer/generator package apcupsd import ( "context" + _ "embed" "net/url" "strconv" "strings" "time" + apcupsdClient "github.com/mdlayher/apcupsd" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/mdlayher/apcupsd" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const defaultAddress = "tcp://127.0.0.1:3551" -var defaultTimeout = internal.Duration{Duration: time.Duration(time.Second * 5)} +var defaultTimeout = config.Duration(5 * time.Second) type ApcUpsd struct { Servers []string - Timeout internal.Duration + Timeout config.Duration } -func (*ApcUpsd) Description() string { - return "Monitor APC UPSes connected to apcupsd" -} - -var sampleConfig = ` - # A list of running apcupsd server to connect to. - # If not provided will default to tcp://127.0.0.1:3551 - servers = ["tcp://127.0.0.1:3551"] - - ## Timeout for dialing server. - timeout = "5s" -` - func (*ApcUpsd) SampleConfig() string { return sampleConfig } @@ -42,60 +36,67 @@ func (*ApcUpsd) SampleConfig() string { func (h *ApcUpsd) Gather(acc telegraf.Accumulator) error { ctx := context.Background() - for _, addr := range h.Servers { - addrBits, err := url.Parse(addr) - if err != nil { - return err - } - if addrBits.Scheme == "" { - addrBits.Scheme = "tcp" - } - - ctx, cancel := context.WithTimeout(ctx, h.Timeout.Duration) - defer cancel() - - status, err := fetchStatus(ctx, addrBits) - if err != nil { - return err - } - - tags := map[string]string{ - "serial": status.SerialNumber, - "ups_name": status.UPSName, - "status": status.Status, - "model": status.Model, - } + for _, server := range h.Servers { + err := func(address string) error { + addrBits, err := url.Parse(address) + if err != nil { + return err + } + if addrBits.Scheme == "" { + addrBits.Scheme = "tcp" + } + + ctx, cancel := context.WithTimeout(ctx, time.Duration(h.Timeout)) + defer cancel() + + status, err := fetchStatus(ctx, addrBits) + if err != nil { + return err + } + + tags := map[string]string{ + "serial": status.SerialNumber, + "ups_name": status.UPSName, + "status": status.Status, + "model": status.Model, + } + + flags, err := strconv.ParseUint(strings.Fields(status.StatusFlags)[0], 0, 64) + if err != nil { + return err + } + + fields := map[string]interface{}{ + "status_flags": flags, + "input_voltage": status.LineVoltage, + "load_percent": status.LoadPercent, + "battery_charge_percent": status.BatteryChargePercent, + "time_left_ns": status.TimeLeft.Nanoseconds(), + "output_voltage": status.OutputVoltage, + "internal_temp": status.InternalTemp, + "battery_voltage": status.BatteryVoltage, + "input_frequency": status.LineFrequency, + "time_on_battery_ns": status.TimeOnBattery.Nanoseconds(), + "nominal_input_voltage": status.NominalInputVoltage, + "nominal_battery_voltage": status.NominalBatteryVoltage, + "nominal_power": status.NominalPower, + "firmware": status.Firmware, + "battery_date": status.BatteryDate, + } + + acc.AddFields("apcupsd", fields, tags) + return nil + }(server) - flags, err := strconv.ParseUint(strings.Fields(status.StatusFlags)[0], 0, 64) if err != nil { return err } - - fields := map[string]interface{}{ - "status_flags": flags, - "input_voltage": status.LineVoltage, - "load_percent": status.LoadPercent, - "battery_charge_percent": status.BatteryChargePercent, - "time_left_ns": status.TimeLeft.Nanoseconds(), - "output_voltage": status.OutputVoltage, - "internal_temp": status.InternalTemp, - "battery_voltage": status.BatteryVoltage, - "input_frequency": status.LineFrequency, - "time_on_battery_ns": status.TimeOnBattery.Nanoseconds(), - "nominal_input_voltage": status.NominalInputVoltage, - "nominal_battery_voltage": status.NominalBatteryVoltage, - "nominal_power": status.NominalPower, - "firmware": status.Firmware, - "battery_date": status.BatteryDate, - } - - acc.AddFields("apcupsd", fields, tags) } return nil } -func fetchStatus(ctx context.Context, addr *url.URL) (*apcupsd.Status, error) { - client, err := apcupsd.DialContext(ctx, addr.Scheme, addr.Host) +func fetchStatus(ctx context.Context, addr *url.URL) (*apcupsdClient.Status, error) { + client, err := apcupsdClient.DialContext(ctx, addr.Scheme, addr.Host) if err != nil { return nil, err } diff --git a/plugins/inputs/apcupsd/apcupsd_test.go b/plugins/inputs/apcupsd/apcupsd_test.go index e749d5137daba..ee9474adc422f 100644 --- a/plugins/inputs/apcupsd/apcupsd_test.go +++ b/plugins/inputs/apcupsd/apcupsd_test.go @@ -7,14 +7,14 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) -func TestApcupsdDocs(t *testing.T) { +func TestApcupsdDocs(_ *testing.T) { apc := &ApcUpsd{} - apc.Description() apc.SampleConfig() } @@ -35,31 +35,33 @@ func listen(ctx context.Context, t *testing.T, out [][]byte) (string, error) { } go func() { - for ctx.Err() == nil { - defer ln.Close() - - conn, err := ln.Accept() - if err != nil { - continue - } - defer conn.Close() - conn.SetReadDeadline(time.Now().Add(time.Second)) + defer ln.Close() - in := make([]byte, 128) - n, err := conn.Read(in) - require.NoError(t, err, "failed to read from connection") - - status := []byte{0, 6, 's', 't', 'a', 't', 'u', 's'} - want, got := status, in[:n] - require.Equal(t, want, got) - - // Run against test function and append EOF to end of output bytes - out = append(out, []byte{0, 0}) - - for _, o := range out { - _, err := conn.Write(o) - require.NoError(t, err, "failed to write to connection") - } + for ctx.Err() == nil { + func() { + conn, err := ln.Accept() + if err != nil { + return + } + defer conn.Close() + require.NoError(t, conn.SetReadDeadline(time.Now().Add(time.Second))) + + in := make([]byte, 128) + n, err := conn.Read(in) + require.NoError(t, err, "failed to read from connection") + + status := []byte{0, 6, 's', 't', 'a', 't', 'u', 's'} + want, got := status, in[:n] + require.Equal(t, want, got) + + // Run against test function and append EOF to end of output bytes + out = append(out, []byte{0, 0}) + + for _, o := range out { + _, err := conn.Write(o) + require.NoError(t, err, "failed to write to connection") + } + }() } }() @@ -102,7 +104,6 @@ func TestConfig(t *testing.T) { } }) } - } func TestApcupsdGather(t *testing.T) { @@ -138,9 +139,9 @@ func TestApcupsdGather(t *testing.T) { "time_on_battery_ns": int64(0), "nominal_input_voltage": float64(230), "nominal_battery_voltage": float64(12), - "nominal_power": int(865), - "firmware": string("857.L3 .I USB FW:L3"), - "battery_date": time.Date(2016, time.September, 06, 0, 0, 0, 0, time.UTC), + "nominal_power": 865, + "firmware": "857.L3 .I USB FW:L3", + "battery_date": "2016-09-06", }, out: genOutput, }, @@ -155,7 +156,6 @@ func TestApcupsdGather(t *testing.T) { ) for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) @@ -207,6 +207,7 @@ func genOutput() [][]byte { "NOMBATTV : 12.0 Volts", "NOMPOWER : 865 Watts", "FIRMWARE : 857.L3 .I USB FW:L3", + "ALARMDEL : Low Battery", } var out [][]byte diff --git a/plugins/inputs/apcupsd/sample.conf b/plugins/inputs/apcupsd/sample.conf new file mode 100644 index 0000000000000..488e567c64c91 --- /dev/null +++ b/plugins/inputs/apcupsd/sample.conf @@ -0,0 +1,8 @@ +# Monitor APC UPSes connected to apcupsd +[[inputs.apcupsd]] + # A list of running apcupsd server to connect to. + # If not provided will default to tcp://127.0.0.1:3551 + servers = ["tcp://127.0.0.1:3551"] + + ## Timeout for dialing server. + timeout = "5s" diff --git a/plugins/inputs/aurora/README.md b/plugins/inputs/aurora/README.md index cef7ac6c7e045..2ff7f7680b50d 100644 --- a/plugins/inputs/aurora/README.md +++ b/plugins/inputs/aurora/README.md @@ -1,12 +1,15 @@ # Aurora Input Plugin -The Aurora Input Plugin gathers metrics from [Apache Aurora](https://aurora.apache.org/) schedulers. +The Aurora Input Plugin gathers metrics from [Apache +Aurora](https://aurora.apache.org/) schedulers. -For monitoring recommendations reference [Monitoring your Aurora cluster](https://aurora.apache.org/documentation/latest/operations/monitoring/) +For monitoring recommendations reference [Monitoring your Aurora +cluster](https://aurora.apache.org/documentation/latest/operations/monitoring/) -### Configuration: +## Configuration -```toml +```toml @sample.conf +# Gather metrics from Apache Aurora schedulers [[inputs.aurora]] ## Schedulers are the base addresses of your Aurora Schedulers schedulers = ["http://127.0.0.1:8081"] @@ -32,7 +35,7 @@ For monitoring recommendations reference [Monitoring your Aurora cluster](https: # insecure_skip_verify = false ``` -### Metrics: +## Metrics - aurora - tags: @@ -42,22 +45,24 @@ For monitoring recommendations reference [Monitoring your Aurora cluster](https: - Numeric metrics are collected from the `/vars` endpoint; string fields are not gathered. - -### Troubleshooting: +## Troubleshooting Check the Scheduler role, the leader will return a 200 status: -``` + +```shell curl -v http://127.0.0.1:8081/leaderhealth ``` Get available metrics: -``` + +```shell curl http://127.0.0.1:8081/vars ``` -### Example Output: +## Example Output The example output below has been trimmed. -``` + +```shell > aurora,role=leader,scheduler=http://debian-stretch-aurora-coordinator-3.virt:8081 CronBatchWorker_batch_locked_events=0i,CronBatchWorker_batch_locked_events_per_sec=0,CronBatchWorker_batch_locked_nanos_per_event=0,CronBatchWorker_batch_locked_nanos_total=0i,CronBatchWorker_batch_locked_nanos_total_per_sec=0,CronBatchWorker_batch_unlocked_events=0i,CronBatchWorker_batch_unlocked_events_per_sec=0,CronBatchWorker_batch_unlocked_nanos_per_event=0,CronBatchWorker_batch_unlocked_nanos_total=0i,CronBatchWorker_batch_unlocked_nanos_total_per_sec=0,CronBatchWorker_batches_processed=0i,CronBatchWorker_items_processed=0i,CronBatchWorker_last_processed_batch_size=0i,CronBatchWorker_queue_size=0i,TaskEventBatchWorker_batch_locked_events=0i,TaskEventBatchWorker_batch_locked_events_per_sec=0,TaskEventBatchWorker_batch_locked_nanos_per_event=0,TaskEventBatchWorker_batch_locked_nanos_total=0i,TaskEventBatchWorker_batch_locked_nanos_total_per_sec=0,TaskEventBatchWorker_batch_unlocked_events=0i,TaskEventBatchWorker_batch_unlocked_events_per_sec=0,TaskEventBatchWorker_batch_unlocked_nanos_per_event=0,TaskEventBatchWorker_batch_unlocked_nanos_total=0i,TaskEventBatchWorker_batch_unlocked_nanos_total_per_sec=0,TaskEventBatchWorker_batches_processed=0i,TaskEventBatchWorker_items_processed=0i,TaskEventBatchWorker_last_processed_batch_size=0i,TaskEventBatchWorker_queue_size=0i,TaskGroupBatchWorker_batch_locked_events=0i,TaskGroupBatchWorker_batch_locked_events_per_sec=0,TaskGroupBatchWorker_batch_locked_nanos_per_event=0,TaskGroupBatchWorker_batch_locked_nanos_total=0i,TaskGroupBatchWorker_batch_locked_nanos_total_per_sec=0,TaskGroupBatchWorker_batch_unlocked_events=0i,TaskGroupBatchWorker_batch_unlocked_events_per_sec=0,TaskGroupBatchWorker_batch_unlocked_nanos_per_event=0,TaskGroupBatchWorker_batch_unlocked_nanos_total=0i,TaskGroupBatchWorker_batch_unlocked_nanos_total_per_sec=0,TaskGroupBatchWorker_batches_processed=0i,TaskGroupBatchWorker_items_processed=0i,TaskGroupBatchWorker_last_processed_batch_size=0i,TaskGroupBatchWorker_queue_size=0i,assigner_launch_failures=0i,async_executor_uncaught_exceptions=0i,async_tasks_completed=1i,cron_job_collisions=0i,cron_job_concurrent_runs=0i,cron_job_launch_failures=0i,cron_job_misfires=0i,cron_job_parse_failures=0i,cron_job_triggers=0i,cron_jobs_loaded=1i,empty_slots_dedicated_large=0i,empty_slots_dedicated_medium=0i,empty_slots_dedicated_revocable_large=0i,empty_slots_dedicated_revocable_medium=0i,empty_slots_dedicated_revocable_small=0i,empty_slots_dedicated_revocable_xlarge=0i,empty_slots_dedicated_small=0i,empty_slots_dedicated_xlarge=0i,empty_slots_large=0i,empty_slots_medium=0i,empty_slots_revocable_large=0i,empty_slots_revocable_medium=0i,empty_slots_revocable_small=0i,empty_slots_revocable_xlarge=0i,empty_slots_small=0i,empty_slots_xlarge=0i,event_bus_dead_events=0i,event_bus_exceptions=1i,framework_registered=1i,globally_banned_offers_size=0i,http_200_responses_events=55i,http_200_responses_events_per_sec=0,http_200_responses_nanos_per_event=0,http_200_responses_nanos_total=310416694i,http_200_responses_nanos_total_per_sec=0,job_update_delete_errors=0i,job_update_recovery_errors=0i,job_update_state_change_errors=0i,job_update_store_delete_all_events=1i,job_update_store_delete_all_events_per_sec=0,job_update_store_delete_all_nanos_per_event=0,job_update_store_delete_all_nanos_total=1227254i,job_update_store_delete_all_nanos_total_per_sec=0,job_update_store_fetch_details_query_events=74i,job_update_store_fetch_details_query_events_per_sec=0,job_update_store_fetch_details_query_nanos_per_event=0,job_update_store_fetch_details_query_nanos_total=24643149i,job_update_store_fetch_details_query_nanos_total_per_sec=0,job_update_store_prune_history_events=59i,job_update_store_prune_history_events_per_sec=0,job_update_store_prune_history_nanos_per_event=0,job_update_store_prune_history_nanos_total=262868218i,job_update_store_prune_history_nanos_total_per_sec=0,job_updates_pruned=0i,jvm_available_processors=2i,jvm_class_loaded_count=6707i,jvm_class_total_loaded_count=6732i,jvm_class_unloaded_count=25i,jvm_gc_PS_MarkSweep_collection_count=2i,jvm_gc_PS_MarkSweep_collection_time_ms=223i,jvm_gc_PS_Scavenge_collection_count=27i,jvm_gc_PS_Scavenge_collection_time_ms=1691i,jvm_gc_collection_count=29i,jvm_gc_collection_time_ms=1914i,jvm_memory_free_mb=65i,jvm_memory_heap_mb_committed=157i,jvm_memory_heap_mb_max=446i,jvm_memory_heap_mb_used=91i,jvm_memory_max_mb=446i,jvm_memory_mb_total=157i,jvm_memory_non_heap_mb_committed=50i,jvm_memory_non_heap_mb_max=0i,jvm_memory_non_heap_mb_used=49i,jvm_threads_active=47i,jvm_threads_daemon=28i,jvm_threads_peak=48i,jvm_threads_started=62i,jvm_time_ms=1526530686927i,jvm_uptime_secs=79947i,log_entry_serialize_events=16i,log_entry_serialize_events_per_sec=0,log_entry_serialize_nanos_per_event=0,log_entry_serialize_nanos_total=4815321i,log_entry_serialize_nanos_total_per_sec=0,log_manager_append_events=16i,log_manager_append_events_per_sec=0,log_manager_append_nanos_per_event=0,log_manager_append_nanos_total=506453428i,log_manager_append_nanos_total_per_sec=0,log_manager_deflate_events=14i,log_manager_deflate_events_per_sec=0,log_manager_deflate_nanos_per_event=0,log_manager_deflate_nanos_total=21010565i,log_manager_deflate_nanos_total_per_sec=0 1526530687000000000 ``` diff --git a/plugins/inputs/aurora/aurora.go b/plugins/inputs/aurora/aurora.go index fc6f82aadda17..131fd8882669b 100644 --- a/plugins/inputs/aurora/aurora.go +++ b/plugins/inputs/aurora/aurora.go @@ -1,7 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package aurora import ( "context" + _ "embed" "encoding/json" "fmt" "net/http" @@ -11,11 +13,15 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type RoleType int const ( @@ -43,50 +49,21 @@ var ( type Vars map[string]interface{} type Aurora struct { - Schedulers []string `toml:"schedulers"` - Roles []string `toml:"roles"` - Timeout internal.Duration `toml:"timeout"` - Username string `toml:"username"` - Password string `toml:"password"` + Schedulers []string `toml:"schedulers"` + Roles []string `toml:"roles"` + Timeout config.Duration `toml:"timeout"` + Username string `toml:"username"` + Password string `toml:"password"` tls.ClientConfig client *http.Client urls []*url.URL } -var sampleConfig = ` - ## Schedulers are the base addresses of your Aurora Schedulers - schedulers = ["http://127.0.0.1:8081"] - - ## Set of role types to collect metrics from. - ## - ## The scheduler roles are checked each interval by contacting the - ## scheduler nodes; zookeeper is not contacted. - # roles = ["leader", "follower"] - - ## Timeout is the max time for total network operations. - # timeout = "5s" - - ## Username and password are sent using HTTP Basic Auth. - # username = "username" - # password = "pa$$word" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - -func (a *Aurora) SampleConfig() string { +func (*Aurora) SampleConfig() string { return sampleConfig } -func (a *Aurora) Description() string { - return "Gather metrics from Apache Aurora schedulers" -} - func (a *Aurora) Gather(acc telegraf.Accumulator) error { if a.client == nil { err := a.initialize() @@ -95,7 +72,7 @@ func (a *Aurora) Gather(acc telegraf.Accumulator) error { } } - ctx, cancel := context.WithTimeout(context.Background(), a.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(a.Timeout)) defer cancel() var wg sync.WaitGroup @@ -147,8 +124,8 @@ func (a *Aurora) initialize() error { urls = append(urls, loc) } - if a.Timeout.Duration < time.Second { - a.Timeout.Duration = defaultTimeout + if a.Timeout < config.Duration(time.Second) { + a.Timeout = config.Duration(defaultTimeout) } if len(a.Roles) == 0 { @@ -190,7 +167,9 @@ func (a *Aurora) gatherRole(ctx context.Context, origin *url.URL) (RoleType, err if err != nil { return Unknown, err } - resp.Body.Close() + if err := resp.Body.Close(); err != nil { + return Unknown, fmt.Errorf("closing body failed: %v", err) + } switch resp.StatusCode { case http.StatusOK: diff --git a/plugins/inputs/aurora/aurora_test.go b/plugins/inputs/aurora/aurora_test.go index 6e2c004f2e7b3..e22488929e545 100644 --- a/plugins/inputs/aurora/aurora_test.go +++ b/plugins/inputs/aurora/aurora_test.go @@ -46,7 +46,8 @@ func TestAurora(t *testing.T) { "variable_scrape_micros_total_per_sec": 1485.0 }` w.WriteHeader(http.StatusOK) - w.Write([]byte(body)) + _, err := w.Write([]byte(body)) + require.NoError(t, err) }, check: func(t *testing.T, err error, acc *testutil.Accumulator) { require.NoError(t, err) @@ -86,7 +87,8 @@ func TestAurora(t *testing.T) { }, varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte("{}")) + _, err := w.Write([]byte("{}")) + require.NoError(t, err) }, check: func(t *testing.T, err error, acc *testutil.Accumulator) { require.NoError(t, err) @@ -104,7 +106,8 @@ func TestAurora(t *testing.T) { "foo": "bar" }` w.WriteHeader(http.StatusOK) - w.Write([]byte(body)) + _, err := w.Write([]byte(body)) + require.NoError(t, err) }, check: func(t *testing.T, err error, acc *testutil.Accumulator) { require.NoError(t, err) @@ -123,7 +126,8 @@ func TestAurora(t *testing.T) { "foo": 1e309 }` w.WriteHeader(http.StatusOK) - w.Write([]byte(body)) + _, err := w.Write([]byte(body)) + require.NoError(t, err) }, check: func(t *testing.T, err error, acc *testutil.Accumulator) { require.NoError(t, err) @@ -142,7 +146,8 @@ func TestAurora(t *testing.T) { "foo": 9223372036854775808 }` w.WriteHeader(http.StatusOK) - w.Write([]byte(body)) + _, err := w.Write([]byte(body)) + require.NoError(t, err) }, check: func(t *testing.T, err error, acc *testutil.Accumulator) { require.NoError(t, err) @@ -158,7 +163,8 @@ func TestAurora(t *testing.T) { varsjson: func(t *testing.T, w http.ResponseWriter, r *http.Request) { body := `{]` w.WriteHeader(http.StatusOK) - w.Write([]byte(body)) + _, err := w.Write([]byte(body)) + require.NoError(t, err) }, check: func(t *testing.T, err error, acc *testutil.Accumulator) { require.NoError(t, err) @@ -176,7 +182,8 @@ func TestAurora(t *testing.T) { "value": 42 }` w.WriteHeader(http.StatusServiceUnavailable) - w.Write([]byte(body)) + _, err := w.Write([]byte(body)) + require.NoError(t, err) }, check: func(t *testing.T, err error, acc *testutil.Accumulator) { require.NoError(t, err) @@ -244,7 +251,8 @@ func TestBasicAuth(t *testing.T) { require.Equal(t, tt.username, username) require.Equal(t, tt.password, password) w.WriteHeader(http.StatusOK) - w.Write([]byte("{}")) + _, err := w.Write([]byte("{}")) + require.NoError(t, err) }) var acc testutil.Accumulator diff --git a/plugins/inputs/aurora/sample.conf b/plugins/inputs/aurora/sample.conf new file mode 100644 index 0000000000000..dd87656339c5d --- /dev/null +++ b/plugins/inputs/aurora/sample.conf @@ -0,0 +1,24 @@ +# Gather metrics from Apache Aurora schedulers +[[inputs.aurora]] + ## Schedulers are the base addresses of your Aurora Schedulers + schedulers = ["http://127.0.0.1:8081"] + + ## Set of role types to collect metrics from. + ## + ## The scheduler roles are checked each interval by contacting the + ## scheduler nodes; zookeeper is not contacted. + # roles = ["leader", "follower"] + + ## Timeout is the max time for total network operations. + # timeout = "5s" + + ## Username and password are sent using HTTP Basic Auth. + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/azure_storage_queue/README.md b/plugins/inputs/azure_storage_queue/README.md index 905e85e4cdea6..8427e71b4c464 100644 --- a/plugins/inputs/azure_storage_queue/README.md +++ b/plugins/inputs/azure_storage_queue/README.md @@ -2,22 +2,23 @@ This plugin gathers sizes of Azure Storage Queues. -### Configuration: +## Configuration -```toml -# Description +```toml @sample.conf +# Gather Azure Storage Queue metrics [[inputs.azure_storage_queue]] ## Required Azure Storage Account name account_name = "mystorageaccount" ## Required Azure Storage Account access key account_key = "storageaccountaccesskey" - + ## Set to false to disable peeking age of oldest message (executes faster) # peek_oldest_message_age = true ``` -### Metrics +## Metrics + - azure_storage_queues - tags: - queue @@ -26,10 +27,10 @@ This plugin gathers sizes of Azure Storage Queues. - size (integer, count) - oldest_message_age_ns (integer, nanoseconds) Age of message at the head of the queue. Requires `peek_oldest_message_age` to be configured to `true`. - -### Example Output -``` +## Example Output + +```shell azure_storage_queues,queue=myqueue,account=mystorageaccount oldest_message_age=799714900i,size=7i 1565970503000000000 azure_storage_queues,queue=myemptyqueue,account=mystorageaccount size=0i 1565970502000000000 -``` \ No newline at end of file +``` diff --git a/plugins/inputs/azure_storage_queue/azure_storage_queue.go b/plugins/inputs/azure_storage_queue/azure_storage_queue.go index 6d132a5ef0171..ca80a16b7cd9b 100644 --- a/plugins/inputs/azure_storage_queue/azure_storage_queue.go +++ b/plugins/inputs/azure_storage_queue/azure_storage_queue.go @@ -1,17 +1,24 @@ +//go:generate ../../../tools/readme_config_includer/generator package azure_storage_queue import ( "context" + _ "embed" "errors" "net/url" "strings" "time" "github.com/Azure/azure-storage-queue-go/azqueue" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type AzureStorageQueue struct { StorageAccountName string `toml:"account_name"` StorageAccountKey string `toml:"account_key"` @@ -21,22 +28,7 @@ type AzureStorageQueue struct { serviceURL *azqueue.ServiceURL } -var sampleConfig = ` - ## Required Azure Storage Account name - account_name = "mystorageaccount" - - ## Required Azure Storage Account access key - account_key = "storageaccountaccesskey" - - ## Set to false to disable peeking age of oldest message (executes faster) - # peek_oldest_message_age = true - ` - -func (a *AzureStorageQueue) Description() string { - return "Gather Azure Storage Queue metrics" -} - -func (a *AzureStorageQueue) SampleConfig() string { +func (*AzureStorageQueue) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/azure_storage_queue/sample.conf b/plugins/inputs/azure_storage_queue/sample.conf new file mode 100644 index 0000000000000..54799105f6a07 --- /dev/null +++ b/plugins/inputs/azure_storage_queue/sample.conf @@ -0,0 +1,10 @@ +# Gather Azure Storage Queue metrics +[[inputs.azure_storage_queue]] + ## Required Azure Storage Account name + account_name = "mystorageaccount" + + ## Required Azure Storage Account access key + account_key = "storageaccountaccesskey" + + ## Set to false to disable peeking age of oldest message (executes faster) + # peek_oldest_message_age = true diff --git a/plugins/inputs/bcache/README.md b/plugins/inputs/bcache/README.md index 11d567ec5616b..d1261c81cf5e5 100644 --- a/plugins/inputs/bcache/README.md +++ b/plugins/inputs/bcache/README.md @@ -2,7 +2,7 @@ Get bcache stat from stats_total directory and dirty_data file. -# Measurements +## Metrics Meta: @@ -20,9 +20,9 @@ Measurement names: - cache_misses - cache_readaheads -### Description +## Description -``` +```text dirty_data Amount of dirty data for this backing device in the cache. Continuously updated unlike the cache set's version, but may be slightly off. @@ -51,31 +51,28 @@ cache_readaheads Count of times readahead occurred. ``` -# Example output +## Configuration -Using this configuration: +```toml @sample.conf +# Read metrics of bcache from stats_total and dirty_data +[[inputs.bcache]] + ## Bcache sets path + ## If not specified, then default is: + bcachePath = "/sys/fs/bcache" -```toml -[bcache] - # Bcache sets path - # If not specified, then default is: - # bcachePath = "/sys/fs/bcache" - # - # By default, telegraf gather stats for all bcache devices - # Setting devices will restrict the stats to the specified - # bcache devices. - # bcacheDevs = ["bcache0", ...] + ## By default, Telegraf gather stats for all bcache devices + ## Setting devices will restrict the stats to the specified + ## bcache devices. + bcacheDevs = ["bcache0"] ``` -When run with: +## Example Output -``` +```shell ./telegraf --config telegraf.conf --input-filter bcache --test ``` -It produces: - -``` +```shell * Plugin: bcache, Collection 1 > [backing_dev="md10" bcache_dev="bcache0"] bcache_dirty_data value=11639194 > [backing_dev="md10" bcache_dev="bcache0"] bcache_bypassed value=5167704440832 diff --git a/plugins/inputs/bcache/bcache.go b/plugins/inputs/bcache/bcache.go index 849e6dd37de0d..67cb714193cb7 100644 --- a/plugins/inputs/bcache/bcache.go +++ b/plugins/inputs/bcache/bcache.go @@ -1,8 +1,15 @@ +//go:generate ../../../tools/readme_config_includer/generator +//go:build !windows +// +build !windows + +// bcache doesn't aim for Windows + package bcache import ( + _ "embed" "errors" - "io/ioutil" + "fmt" "os" "path/filepath" "strconv" @@ -12,30 +19,15 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Bcache struct { BcachePath string BcacheDevs []string } -var sampleConfig = ` - ## Bcache sets path - ## If not specified, then default is: - bcachePath = "/sys/fs/bcache" - - ## By default, telegraf gather stats for all bcache devices - ## Setting devices will restrict the stats to the specified - ## bcache devices. - bcacheDevs = ["bcache0"] -` - -func (b *Bcache) SampleConfig() string { - return sampleConfig -} - -func (b *Bcache) Description() string { - return "Read metrics of bcache from stats_total and dirty_data" -} - func getTags(bdev string) map[string]string { backingDevFile, _ := os.Readlink(bdev) backingDevPath := strings.Split(backingDevFile, "/") @@ -79,7 +71,7 @@ func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error { if len(metrics) == 0 { return errors.New("can't read any stats file") } - file, err := ioutil.ReadFile(bdev + "/dirty_data") + file, err := os.ReadFile(bdev + "/dirty_data") if err != nil { return err } @@ -91,7 +83,7 @@ func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error { for _, path := range metrics { key := filepath.Base(path) - file, err := ioutil.ReadFile(path) + file, err := os.ReadFile(path) rawValue := strings.TrimSpace(string(file)) if err != nil { return err @@ -108,6 +100,10 @@ func (b *Bcache) gatherBcache(bdev string, acc telegraf.Accumulator) error { return nil } +func (*Bcache) SampleConfig() string { + return sampleConfig +} + func (b *Bcache) Gather(acc telegraf.Accumulator) error { bcacheDevsChecked := make(map[string]bool) var restrictDevs bool @@ -124,7 +120,7 @@ func (b *Bcache) Gather(acc telegraf.Accumulator) error { } bdevs, _ := filepath.Glob(bcachePath + "/*/bdev*") if len(bdevs) < 1 { - return errors.New("Can't find any bcache device") + return errors.New("can't find any bcache device") } for _, bdev := range bdevs { if restrictDevs { @@ -133,7 +129,9 @@ func (b *Bcache) Gather(acc telegraf.Accumulator) error { continue } } - b.gatherBcache(bdev, acc) + if err := b.gatherBcache(bdev, acc); err != nil { + return fmt.Errorf("gathering bcache failed: %v", err) + } } return nil } diff --git a/plugins/inputs/bcache/bcache_test.go b/plugins/inputs/bcache/bcache_test.go index bd191528fd014..4c62e0f014f14 100644 --- a/plugins/inputs/bcache/bcache_test.go +++ b/plugins/inputs/bcache/bcache_test.go @@ -1,7 +1,9 @@ +//go:build !windows +// +build !windows + package bcache import ( - "io/ioutil" "os" "testing" @@ -10,26 +12,26 @@ import ( ) const ( - dirty_data = "1.5G" - bypassed = "4.7T" - cache_bypass_hits = "146155333" - cache_bypass_misses = "0" - cache_hit_ratio = "90" - cache_hits = "511469583" - cache_miss_collisions = "157567" - cache_misses = "50616331" - cache_readaheads = "2" + dirtyData = "1.5G" + bypassed = "4.7T" + cacheBypassHits = "146155333" + cacheBypassMisses = "0" + cacheHitRatio = "90" + cacheHits = "511469583" + cacheMissCollisions = "157567" + cacheMisses = "50616331" + cacheReadaheads = "2" ) var ( testBcachePath = os.TempDir() + "/telegraf/sys/fs/bcache" - testBcacheUuidPath = testBcachePath + "/663955a3-765a-4737-a9fd-8250a7a78411" + testBcacheUUIDPath = testBcachePath + "/663955a3-765a-4737-a9fd-8250a7a78411" testBcacheDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/bcache0" testBcacheBackingDevPath = os.TempDir() + "/telegraf/sys/devices/virtual/block/md10" ) func TestBcacheGeneratesMetrics(t *testing.T) { - err := os.MkdirAll(testBcacheUuidPath, 0755) + err := os.MkdirAll(testBcacheUUIDPath, 0755) require.NoError(t, err) err = os.MkdirAll(testBcacheDevPath, 0755) @@ -38,49 +40,49 @@ func TestBcacheGeneratesMetrics(t *testing.T) { err = os.MkdirAll(testBcacheBackingDevPath+"/bcache", 0755) require.NoError(t, err) - err = os.Symlink(testBcacheBackingDevPath+"/bcache", testBcacheUuidPath+"/bdev0") + err = os.Symlink(testBcacheBackingDevPath+"/bcache", testBcacheUUIDPath+"/bdev0") require.NoError(t, err) - err = os.Symlink(testBcacheDevPath, testBcacheUuidPath+"/bdev0/dev") + err = os.Symlink(testBcacheDevPath, testBcacheUUIDPath+"/bdev0/dev") require.NoError(t, err) - err = os.MkdirAll(testBcacheUuidPath+"/bdev0/stats_total", 0755) + err = os.MkdirAll(testBcacheUUIDPath+"/bdev0/stats_total", 0755) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/dirty_data", - []byte(dirty_data), 0644) + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/dirty_data", + []byte(dirtyData), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/bypassed", + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/bypassed", []byte(bypassed), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_hits", - []byte(cache_bypass_hits), 0644) + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_bypass_hits", + []byte(cacheBypassHits), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_bypass_misses", - []byte(cache_bypass_misses), 0644) + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_bypass_misses", + []byte(cacheBypassMisses), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hit_ratio", - []byte(cache_hit_ratio), 0644) + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_hit_ratio", + []byte(cacheHitRatio), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_hits", - []byte(cache_hits), 0644) + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_hits", + []byte(cacheHits), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_miss_collisions", - []byte(cache_miss_collisions), 0644) + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_miss_collisions", + []byte(cacheMissCollisions), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_misses", - []byte(cache_misses), 0644) + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_misses", + []byte(cacheMisses), 0644) require.NoError(t, err) - err = ioutil.WriteFile(testBcacheUuidPath+"/bdev0/stats_total/cache_readaheads", - []byte(cache_readaheads), 0644) + err = os.WriteFile(testBcacheUUIDPath+"/bdev0/stats_total/cache_readaheads", + []byte(cacheReadaheads), 0644) require.NoError(t, err) fields := map[string]interface{}{ diff --git a/plugins/inputs/bcache/bcache_windows.go b/plugins/inputs/bcache/bcache_windows.go new file mode 100644 index 0000000000000..faeba8888bb3b --- /dev/null +++ b/plugins/inputs/bcache/bcache_windows.go @@ -0,0 +1,4 @@ +//go:build windows +// +build windows + +package bcache diff --git a/plugins/inputs/bcache/sample.conf b/plugins/inputs/bcache/sample.conf new file mode 100644 index 0000000000000..d2639cdbc8189 --- /dev/null +++ b/plugins/inputs/bcache/sample.conf @@ -0,0 +1,10 @@ +# Read metrics of bcache from stats_total and dirty_data +[[inputs.bcache]] + ## Bcache sets path + ## If not specified, then default is: + bcachePath = "/sys/fs/bcache" + + ## By default, Telegraf gather stats for all bcache devices + ## Setting devices will restrict the stats to the specified + ## bcache devices. + bcacheDevs = ["bcache0"] diff --git a/plugins/inputs/beanstalkd/README.md b/plugins/inputs/beanstalkd/README.md index e4fe2203d8d9b..7052444dddf10 100644 --- a/plugins/inputs/beanstalkd/README.md +++ b/plugins/inputs/beanstalkd/README.md @@ -1,10 +1,12 @@ # Beanstalkd Input Plugin -The `beanstalkd` plugin collects server stats as well as tube stats (reported by `stats` and `stats-tube` commands respectively). +The `beanstalkd` plugin collects server stats as well as tube stats (reported by +`stats` and `stats-tube` commands respectively). -### Configuration: +## Configuration -```toml +```toml @sample.conf +# Collects Beanstalkd server and tubes stats [[inputs.beanstalkd]] ## Server to collect data from server = "localhost:11300" @@ -14,11 +16,14 @@ The `beanstalkd` plugin collects server stats as well as tube stats (reported by tubes = ["notifications"] ``` -### Metrics: +## Metrics -Please see the [Beanstalk Protocol doc](https://raw.githubusercontent.com/kr/beanstalkd/master/doc/protocol.txt) for detailed explanation of `stats` and `stats-tube` commands output. +Please see the [Beanstalk Protocol +doc](https://raw.githubusercontent.com/kr/beanstalkd/master/doc/protocol.txt) +for detailed explanation of `stats` and `stats-tube` commands output. `beanstalkd_overview` – statistical information about the system as a whole + - fields - cmd_delete - cmd_pause_tube @@ -38,6 +43,7 @@ Please see the [Beanstalk Protocol doc](https://raw.githubusercontent.com/kr/bea - server (address taken from config) `beanstalkd_tube` – statistical information about the specified tube + - fields - binlog_current_index - binlog_max_size @@ -90,8 +96,9 @@ Please see the [Beanstalk Protocol doc](https://raw.githubusercontent.com/kr/bea - server (address taken from config) - version -### Example Output: -``` +## Example Output + +```shell beanstalkd_overview,host=server.local,hostname=a2ab22ed12e0,id=232485800aa11b24,server=localhost:11300,version=1.10 cmd_stats_tube=29482i,current_jobs_delayed=0i,current_jobs_urgent=6i,cmd_kick=0i,cmd_stats=7378i,cmd_stats_job=0i,current_waiting=0i,max_job_size=65535i,pid=6i,cmd_bury=0i,cmd_reserve_with_timeout=0i,cmd_touch=0i,current_connections=1i,current_jobs_ready=6i,current_producers=0i,cmd_delete=0i,cmd_list_tubes=7369i,cmd_peek_ready=0i,cmd_put=6i,cmd_use=3i,cmd_watch=0i,current_jobs_reserved=0i,rusage_stime=6.07,cmd_list_tubes_watched=0i,cmd_pause_tube=0i,total_jobs=6i,binlog_records_migrated=0i,cmd_list_tube_used=0i,cmd_peek_delayed=0i,cmd_release=0i,current_jobs_buried=0i,job_timeouts=0i,binlog_current_index=0i,binlog_max_size=10485760i,total_connections=7378i,cmd_peek_buried=0i,cmd_reserve=0i,current_tubes=4i,binlog_records_written=0i,cmd_peek=0i,rusage_utime=1.13,uptime=7099i,binlog_oldest_index=0i,current_workers=0i,cmd_ignore=0i 1528801650000000000 beanstalkd_tube,host=server.local,name=notifications,server=localhost:11300 pause_time_left=0i,current_jobs_buried=0i,current_jobs_delayed=0i,current_jobs_reserved=0i,current_using=0i,current_waiting=0i,pause=0i,total_jobs=3i,cmd_delete=0i,cmd_pause_tube=0i,current_jobs_ready=3i,current_jobs_urgent=3i,current_watching=0i 1528801650000000000 diff --git a/plugins/inputs/beanstalkd/beanstalkd.go b/plugins/inputs/beanstalkd/beanstalkd.go index 932edd301f910..68c189f345cfb 100644 --- a/plugins/inputs/beanstalkd/beanstalkd.go +++ b/plugins/inputs/beanstalkd/beanstalkd.go @@ -1,35 +1,29 @@ +//go:generate ../../../tools/readme_config_includer/generator package beanstalkd import ( + _ "embed" "fmt" "io" "net/textproto" "sync" + "gopkg.in/yaml.v2" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - "gopkg.in/yaml.v2" ) -const sampleConfig = ` - ## Server to collect data from - server = "localhost:11300" - - ## List of tubes to gather stats about. - ## If no tubes specified then data gathered for each tube on server reported by list-tubes command - tubes = ["notifications"] -` +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string type Beanstalkd struct { Server string `toml:"server"` Tubes []string `toml:"tubes"` } -func (b *Beanstalkd) Description() string { - return "Collects Beanstalkd server and tubes stats" -} - -func (b *Beanstalkd) SampleConfig() string { +func (*Beanstalkd) SampleConfig() string { return sampleConfig } @@ -62,7 +56,10 @@ func (b *Beanstalkd) Gather(acc telegraf.Accumulator) error { for _, tube := range tubes { wg.Add(1) go func(tube string) { - b.gatherTubeStats(connection, tube, acc) + err := b.gatherTubeStats(connection, tube, acc) + if err != nil { + acc.AddError(err) + } wg.Done() }(tube) } @@ -128,7 +125,7 @@ func (b *Beanstalkd) gatherServerStats(connection *textproto.Conn, acc telegraf. }, map[string]string{ "hostname": stats.Hostname, - "id": stats.Id, + "id": stats.ID, "server": b.Server, "version": stats.Version, }, @@ -169,13 +166,13 @@ func (b *Beanstalkd) gatherTubeStats(connection *textproto.Conn, tube string, ac } func runQuery(connection *textproto.Conn, cmd string, result interface{}) error { - requestId, err := connection.Cmd(cmd) + requestID, err := connection.Cmd(cmd) if err != nil { return err } - connection.StartResponse(requestId) - defer connection.EndResponse(requestId) + connection.StartResponse(requestID) + defer connection.EndResponse(requestID) status, err := connection.ReadLine() if err != nil { @@ -240,7 +237,7 @@ type statsResponse struct { CurrentWaiting int `yaml:"current-waiting"` CurrentWorkers int `yaml:"current-workers"` Hostname string `yaml:"hostname"` - Id string `yaml:"id"` + ID string `yaml:"id"` JobTimeouts int `yaml:"job-timeouts"` MaxJobSize int `yaml:"max-job-size"` Pid int `yaml:"pid"` diff --git a/plugins/inputs/beanstalkd/beanstalkd_test.go b/plugins/inputs/beanstalkd/beanstalkd_test.go index 92c108e06aa91..9d97a682c4873 100644 --- a/plugins/inputs/beanstalkd/beanstalkd_test.go +++ b/plugins/inputs/beanstalkd/beanstalkd_test.go @@ -22,6 +22,7 @@ func TestBeanstalkd(t *testing.T) { tubesConfig []string expectedTubes []tubeStats notExpectedTubes []tubeStats + expectedError string }{ { name: "All tubes stats", @@ -50,15 +51,14 @@ func TestBeanstalkd(t *testing.T) { {name: "default", fields: defaultTubeFields}, {name: "test", fields: testTubeFields}, }, + expectedError: "input does not match format", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { server, err := startTestServer(t) - if err != nil { - t.Fatalf("Unable to create test server") - } + require.NoError(t, err, "Unable to create test server") defer server.Close() serverAddress := server.Addr().String() @@ -68,8 +68,13 @@ func TestBeanstalkd(t *testing.T) { } var acc testutil.Accumulator - require.NoError(t, acc.GatherError(plugin.Gather)) - + err = acc.GatherError(plugin.Gather) + if test.expectedError == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + require.Equal(t, test.expectedError, err.Error()) + } acc.AssertContainsTaggedFields(t, "beanstalkd_overview", overviewFields, getOverviewTags(serverAddress), @@ -110,8 +115,8 @@ func startTestServer(t *testing.T) (net.Listener, error) { tp := textproto.NewConn(connection) defer tp.Close() - sendSuccessResponse := func(body string) { - tp.PrintfLine("OK %d\r\n%s", len(body), body) + sendSuccessResponse := func(body string) error { + return tp.PrintfLine("OK %d\r\n%s", len(body), body) } for { @@ -125,15 +130,30 @@ func startTestServer(t *testing.T) (net.Listener, error) { switch cmd { case "list-tubes": - sendSuccessResponse(listTubesResponse) + if err := sendSuccessResponse(listTubesResponse); err != nil { + t.Logf("sending response %q failed: %v", listTubesResponse, err) + return + } case "stats": - sendSuccessResponse(statsResponse) + if err := sendSuccessResponse(statsResponse); err != nil { + t.Logf("sending response %q failed: %v", statsResponse, err) + return + } case "stats-tube default": - sendSuccessResponse(statsTubeDefaultResponse) + if err := sendSuccessResponse(statsTubeDefaultResponse); err != nil { + t.Logf("sending response %q failed: %v", statsTubeDefaultResponse, err) + return + } case "stats-tube test": - sendSuccessResponse(statsTubeTestResponse) + if err := sendSuccessResponse(statsTubeTestResponse); err != nil { + t.Logf("sending response %q failed: %v", statsTubeTestResponse, err) + return + } case "stats-tube unknown": - tp.PrintfLine("NOT_FOUND") + if err := tp.PrintfLine("NOT_FOUND"); err != nil { + t.Logf("sending response %q failed: %v", "NOT_FOUND", err) + return + } default: t.Log("Test server: unknown command") } diff --git a/plugins/inputs/beanstalkd/sample.conf b/plugins/inputs/beanstalkd/sample.conf new file mode 100644 index 0000000000000..eca26780f00ba --- /dev/null +++ b/plugins/inputs/beanstalkd/sample.conf @@ -0,0 +1,8 @@ +# Collects Beanstalkd server and tubes stats +[[inputs.beanstalkd]] + ## Server to collect data from + server = "localhost:11300" + + ## List of tubes to gather stats about. + ## If no tubes specified then data gathered for each tube on server reported by list-tubes command + tubes = ["notifications"] diff --git a/plugins/inputs/beat/README.md b/plugins/inputs/beat/README.md new file mode 100644 index 0000000000000..3312b93ced717 --- /dev/null +++ b/plugins/inputs/beat/README.md @@ -0,0 +1,151 @@ +# Beat Input Plugin + +The Beat plugin will collect metrics from the given Beat instances. It is +known to work with Filebeat and Kafkabeat. + +## Configuration + +```toml @sample.conf +# Read metrics exposed by Beat +[[inputs.beat]] + ## An URL from which to read Beat-formatted JSON + ## Default is "http://127.0.0.1:5066". + url = "http://127.0.0.1:5066" + + ## Enable collection of the listed stats + ## An empty list means collect all. Available options are currently + ## "beat", "libbeat", "system" and "filebeat". + # include = ["beat", "libbeat", "filebeat"] + + ## HTTP method + # method = "GET" + + ## Optional HTTP headers + # headers = {"X-Special-Header" = "Special-Value"} + + ## Override HTTP "Host" header + # host_header = "logstash.example.com" + + ## Timeout for HTTP requests + # timeout = "5s" + + ## Optional HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` + +## Metrics + +- **beat** + - Fields: + - cpu_system_ticks + - cpu_system_time_ms + - cpu_total_ticks + - cpu_total_time_ms + - cpu_total_value + - cpu_user_ticks + - cpu_user_time_ms + - info_uptime_ms + - memstats_gc_next + - memstats_memory_alloc + - memstats_memory_total + - memstats_rss + - Tags: + - beat_beat + - beat_host + - beat_id + - beat_name + - beat_version + +- **beat_filebeat** + - Fields: + - events_active + - events_added + - events_done + - harvester_closed + - harvester_open_files + - harvester_running + - harvester_skipped + - harvester_started + - input_log_files_renamed + - input_log_files_truncated + - Tags: + - beat_beat + - beat_host + - beat_id + - beat_name + - beat_version + +- **beat_libbeat** + - Fields: + - config_module_running + - config_module_starts + - config_module_stops + - config_reloads + - output_events_acked + - output_events_active + - output_events_batches + - output_events_dropped + - output_events_duplicates + - output_events_failed + - output_events_total + - output_type + - output_read_bytes + - output_read_errors + - output_write_bytes + - output_write_errors + - outputs_kafka_bytes_read + - outputs_kafka_bytes_write + - pipeline_clients + - pipeline_events_active + - pipeline_events_dropped + - pipeline_events_failed + - pipeline_events_filtered + - pipeline_events_published + - pipeline_events_retry + - pipeline_events_total + - pipeline_queue_acked + - Tags: + - beat_beat + - beat_host + - beat_id + - beat_name + - beat_version + +- **beat_system** + - Field: + - cpu_cores + - load_1 + - load_15 + - load_5 + - load_norm_1 + - load_norm_15 + - load_norm_5 + - Tags: + - beat_beat + - beat_host + - beat_id + - beat_name + - beat_version + +## Example Output + +```shell +$ telegraf --input-filter beat --test + +> beat,beat_beat=filebeat,beat_host=node-6,beat_id=9c1c8697-acb4-4df0-987d-28197814f788,beat_name=node-6-test,beat_version=6.4.2,host=node-6 + cpu_system_ticks=656750,cpu_system_time_ms=656750,cpu_total_ticks=5461190,cpu_total_time_ms=5461198,cpu_total_value=5461190,cpu_user_ticks=4804440,cpu_user_time_ms=4804448,info_uptime_ms=342634196,memstats_gc_next=20199584,memstats_memory_alloc=12547424,memstats_memory_total=486296424792,memstats_rss=72552448 1540316047000000000 +> beat_libbeat,beat_beat=filebeat,beat_host=node-6,beat_id=9c1c8697-acb4-4df0-987d-28197814f788,beat_name=node-6-test,beat_version=6.4.2,host=node-6 + config_module_running=0,config_module_starts=0,config_module_stops=0,config_reloads=0,output_events_acked=192404,output_events_active=0,output_events_batches=1607,output_events_dropped=0,output_events_duplicates=0,output_events_failed=0,output_events_total=192404,output_read_bytes=0,output_read_errors=0,output_write_bytes=0,output_write_errors=0,outputs_kafka_bytes_read=1118528,outputs_kafka_bytes_write=48002014,pipeline_clients=1,pipeline_events_active=0,pipeline_events_dropped=0,pipeline_events_failed=0,pipeline_events_filtered=11496,pipeline_events_published=192404,pipeline_events_retry=14,pipeline_events_total=203900,pipeline_queue_acked=192404 1540316047000000000 +> beat_system,beat_beat=filebeat,beat_host=node-6,beat_id=9c1c8697-acb4-4df0-987d-28197814f788,beat_name=node-6-test,beat_version=6.4.2,host=node-6 + cpu_cores=32,load_1=46.08,load_15=49.82,load_5=47.88,load_norm_1=1.44,load_norm_15=1.5569,load_norm_5=1.4963 1540316047000000000 +> beat_filebeat,beat_beat=filebeat,beat_host=node-6,beat_id=9c1c8697-acb4-4df0-987d-28197814f788,beat_name=node-6-test,beat_version=6.4.2,host=node-6 + events_active=0,events_added=3223,events_done=3223,harvester_closed=0,harvester_open_files=0,harvester_running=0,harvester_skipped=0,harvester_started=0,input_log_files_renamed=0,input_log_files_truncated=0 1540320286000000000 +``` diff --git a/plugins/inputs/beat/beat.go b/plugins/inputs/beat/beat.go new file mode 100644 index 0000000000000..7558d8e58c8c8 --- /dev/null +++ b/plugins/inputs/beat/beat.go @@ -0,0 +1,199 @@ +//go:generate ../../../tools/readme_config_includer/generator +package beat + +import ( + _ "embed" + "encoding/json" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" + jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +const suffixInfo = "/" +const suffixStats = "/stats" + +type Info struct { + Beat string `json:"beat"` + Hostname string `json:"hostname"` + Name string `json:"name"` + UUID string `json:"uuid"` + Version string `json:"version"` +} + +type Stats struct { + Beat map[string]interface{} `json:"beat"` + FileBeat interface{} `json:"filebeat"` + Libbeat interface{} `json:"libbeat"` + System interface{} `json:"system"` +} + +type Beat struct { + URL string `toml:"url"` + + Includes []string `toml:"include"` + + Username string `toml:"username"` + Password string `toml:"password"` + Method string `toml:"method"` + Headers map[string]string `toml:"headers"` + HostHeader string `toml:"host_header"` + Timeout config.Duration `toml:"timeout"` + + tls.ClientConfig + client *http.Client +} + +func NewBeat() *Beat { + return &Beat{ + URL: "http://127.0.0.1:5066", + Includes: []string{"beat", "libbeat", "filebeat"}, + Method: "GET", + Headers: make(map[string]string), + Timeout: config.Duration(time.Second * 5), + } +} + +func (*Beat) SampleConfig() string { + return sampleConfig +} + +func (beat *Beat) Init() error { + availableStats := []string{"beat", "libbeat", "system", "filebeat"} + + var err error + beat.client, err = beat.createHTTPClient() + + if err != nil { + return err + } + + err = choice.CheckSlice(beat.Includes, availableStats) + if err != nil { + return err + } + + return nil +} + +// createHTTPClient create a clients to access API +func (beat *Beat) createHTTPClient() (*http.Client, error) { + tlsConfig, err := beat.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + Timeout: time.Duration(beat.Timeout), + } + + return client, nil +} + +// gatherJSONData query the data source and parse the response JSON +func (beat *Beat) gatherJSONData(address string, value interface{}) error { + request, err := http.NewRequest(beat.Method, address, nil) + if err != nil { + return err + } + + if beat.Username != "" { + request.SetBasicAuth(beat.Username, beat.Password) + } + for k, v := range beat.Headers { + request.Header.Add(k, v) + } + if beat.HostHeader != "" { + request.Host = beat.HostHeader + } + + response, err := beat.client.Do(request) + if err != nil { + return err + } + + defer response.Body.Close() + + return json.NewDecoder(response.Body).Decode(value) +} + +func (beat *Beat) Gather(accumulator telegraf.Accumulator) error { + beatStats := &Stats{} + beatInfo := &Info{} + + infoURL, err := url.Parse(beat.URL + suffixInfo) + if err != nil { + return err + } + statsURL, err := url.Parse(beat.URL + suffixStats) + if err != nil { + return err + } + + err = beat.gatherJSONData(infoURL.String(), beatInfo) + if err != nil { + return err + } + tags := map[string]string{ + "beat_beat": beatInfo.Beat, + "beat_id": beatInfo.UUID, + "beat_name": beatInfo.Name, + "beat_host": beatInfo.Hostname, + "beat_version": beatInfo.Version, + } + + err = beat.gatherJSONData(statsURL.String(), beatStats) + if err != nil { + return err + } + + for _, name := range beat.Includes { + var stats interface{} + var metric string + + switch name { + case "beat": + stats = beatStats.Beat + metric = "beat" + case "filebeat": + stats = beatStats.FileBeat + metric = "beat_filebeat" + case "system": + stats = beatStats.System + metric = "beat_system" + case "libbeat": + stats = beatStats.Libbeat + metric = "beat_libbeat" + default: + return fmt.Errorf("unknown stats-type %q", name) + } + flattener := jsonparser.JSONFlattener{} + err := flattener.FullFlattenJSON("", stats, true, true) + if err != nil { + return err + } + accumulator.AddFields(metric, flattener.Fields, tags) + } + + return nil +} + +func init() { + inputs.Add("beat", func() telegraf.Input { + return NewBeat() + }) +} diff --git a/plugins/inputs/beat/beat6_info.json b/plugins/inputs/beat/beat6_info.json new file mode 100644 index 0000000000000..3cc318c330447 --- /dev/null +++ b/plugins/inputs/beat/beat6_info.json @@ -0,0 +1,7 @@ +{ + "beat": "filebeat", + "hostname": "node-6", + "name": "node-6-test", + "uuid": "9c1c8697-acb4-4df0-987d-28197814f785", + "version": "6.4.2" +} diff --git a/plugins/inputs/beat/beat6_stats.json b/plugins/inputs/beat/beat6_stats.json new file mode 100644 index 0000000000000..f34b9d1f06d1e --- /dev/null +++ b/plugins/inputs/beat/beat6_stats.json @@ -0,0 +1,137 @@ +{ + "beat": { + "cpu": { + "system": { + "ticks": 626970, + "time": { + "ms": 626972 + } + }, + "total": { + "ticks": 5215010, + "time": { + "ms": 5215018 + }, + "value": 5215010 + }, + "user": { + "ticks": 4588040, + "time": { + "ms": 4588046 + } + } + }, + "info": { + "ephemeral_id": "809e3b63-4fa0-4f74-822a-8e3c08298336", + "uptime": { + "ms": 327248661 + } + }, + "memstats": { + "gc_next": 20611808, + "memory_alloc": 12692544, + "memory_total": 462910102088, + "rss": 80273408 + } + }, + "filebeat": { + "events": { + "active": 0, + "added": 182990, + "done": 182990 + }, + "harvester": { + "closed": 2222, + "open_files": 4, + "running": 4, + "skipped": 0, + "started": 2226 + }, + "input": { + "log": { + "files": { + "renamed": 0, + "truncated": 0 + } + } + } + }, + "libbeat": { + "config": { + "module": { + "running": 0, + "starts": 0, + "stops": 0 + }, + "reloads": 0 + }, + "output": { + "events": { + "acked": 172067, + "active": 0, + "batches": 1490, + "dropped": 0, + "duplicates": 0, + "failed": 0, + "total": 172067 + }, + "read": { + "bytes": 0, + "errors": 0 + }, + "type": "kafka", + "write": { + "bytes": 0, + "errors": 0 + } + }, + "outputs": { + "kafka": { + "bytes_read": 1048670, + "bytes_write": 43136887 + } + }, + "pipeline": { + "clients": 1, + "events": { + "active": 0, + "dropped": 0, + "failed": 0, + "filtered": 10923, + "published": 172067, + "retry": 14, + "total": 182990 + }, + "queue": { + "acked": 172067 + } + } + }, + "registrar": { + "states": { + "cleanup": 3446, + "current": 16409, + "update": 182990 + }, + "writes": { + "fail": 0, + "success": 11718, + "total": 11718 + } + }, + "system": { + "cpu": { + "cores": 32 + }, + "load": { + "1": 32.49, + "15": 41.9, + "5": 40.16, + "norm": { + "1": 1.0153, + "15": 1.3094, + "5": 1.255 + } + } + } +} diff --git a/plugins/inputs/beat/beat_test.go b/plugins/inputs/beat/beat_test.go new file mode 100644 index 0000000000000..433e8fcd61337 --- /dev/null +++ b/plugins/inputs/beat/beat_test.go @@ -0,0 +1,203 @@ +package beat + +import ( + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/url" + "os" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func Test_BeatStats(t *testing.T) { + var beat6StatsAccumulator testutil.Accumulator + var beatTest = NewBeat() + // System stats are disabled by default + beatTest.Includes = []string{"beat", "libbeat", "system", "filebeat"} + require.NoError(t, beatTest.Init()) + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, request *http.Request) { + var jsonFilePath string + + switch request.URL.Path { + case suffixInfo: + jsonFilePath = "beat6_info.json" + case suffixStats: + jsonFilePath = "beat6_stats.json" + default: + require.FailNow(t, "cannot handle request") + } + + data, err := os.ReadFile(jsonFilePath) + require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) + _, err = w.Write(data) + require.NoError(t, err, "could not write data") + })) + requestURL, err := url.Parse(beatTest.URL) + require.NoErrorf(t, err, "can't parse URL %s", beatTest.URL) + fakeServer.Listener, err = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + require.NoErrorf(t, err, "can't listen for %s: %v", requestURL, err) + + fakeServer.Start() + defer fakeServer.Close() + + require.NoError(t, err, beatTest.Gather(&beat6StatsAccumulator)) + + beat6StatsAccumulator.AssertContainsTaggedFields( + t, + "beat", + map[string]interface{}{ + "cpu_system_ticks": float64(626970), + "cpu_system_time_ms": float64(626972), + "cpu_total_ticks": float64(5215010), + "cpu_total_time_ms": float64(5215018), + "cpu_total_value": float64(5215010), + "cpu_user_ticks": float64(4588040), + "cpu_user_time_ms": float64(4588046), + "info_uptime_ms": float64(327248661), + "info_ephemeral_id": "809e3b63-4fa0-4f74-822a-8e3c08298336", + "memstats_gc_next": float64(20611808), + "memstats_memory_alloc": float64(12692544), + "memstats_memory_total": float64(462910102088), + "memstats_rss": float64(80273408), + }, + map[string]string{ + "beat_beat": string("filebeat"), + "beat_host": string("node-6"), + "beat_id": string("9c1c8697-acb4-4df0-987d-28197814f785"), + "beat_name": string("node-6-test"), + "beat_version": string("6.4.2"), + }, + ) + beat6StatsAccumulator.AssertContainsTaggedFields( + t, + "beat_filebeat", + map[string]interface{}{ + "events_active": float64(0), + "events_added": float64(182990), + "events_done": float64(182990), + "harvester_closed": float64(2222), + "harvester_open_files": float64(4), + "harvester_running": float64(4), + "harvester_skipped": float64(0), + "harvester_started": float64(2226), + "input_log_files_renamed": float64(0), + "input_log_files_truncated": float64(0), + }, + map[string]string{ + "beat_beat": string("filebeat"), + "beat_host": string("node-6"), + "beat_id": string("9c1c8697-acb4-4df0-987d-28197814f785"), + "beat_name": string("node-6-test"), + "beat_version": string("6.4.2"), + }, + ) + beat6StatsAccumulator.AssertContainsTaggedFields( + t, + "beat_libbeat", + map[string]interface{}{ + "config_module_running": float64(0), + "config_module_starts": float64(0), + "config_module_stops": float64(0), + "config_reloads": float64(0), + "output_type": "kafka", + "output_events_acked": float64(172067), + "output_events_active": float64(0), + "output_events_batches": float64(1490), + "output_events_dropped": float64(0), + "output_events_duplicates": float64(0), + "output_events_failed": float64(0), + "output_events_total": float64(172067), + "output_read_bytes": float64(0), + "output_read_errors": float64(0), + "output_write_bytes": float64(0), + "output_write_errors": float64(0), + "outputs_kafka_bytes_read": float64(1048670), + "outputs_kafka_bytes_write": float64(43136887), + "pipeline_clients": float64(1), + "pipeline_events_active": float64(0), + "pipeline_events_dropped": float64(0), + "pipeline_events_failed": float64(0), + "pipeline_events_filtered": float64(10923), + "pipeline_events_published": float64(172067), + "pipeline_events_retry": float64(14), + "pipeline_events_total": float64(182990), + "pipeline_queue_acked": float64(172067), + }, + map[string]string{ + "beat_beat": string("filebeat"), + "beat_host": string("node-6"), + "beat_id": string("9c1c8697-acb4-4df0-987d-28197814f785"), + "beat_name": string("node-6-test"), + "beat_version": string("6.4.2"), + }, + ) + beat6StatsAccumulator.AssertContainsTaggedFields( + t, + "beat_system", + map[string]interface{}{ + "cpu_cores": float64(32), + "load_1": float64(32.49), + "load_15": float64(41.9), + "load_5": float64(40.16), + "load_norm_1": float64(1.0153), + "load_norm_15": float64(1.3094), + "load_norm_5": float64(1.255), + }, + map[string]string{ + "beat_beat": string("filebeat"), + "beat_host": string("node-6"), + "beat_id": string("9c1c8697-acb4-4df0-987d-28197814f785"), + "beat_name": string("node-6-test"), + "beat_version": string("6.4.2"), + }, + ) +} + +func Test_BeatRequest(t *testing.T) { + var beat6StatsAccumulator testutil.Accumulator + beatTest := NewBeat() + // System stats are disabled by default + beatTest.Includes = []string{"beat", "libbeat", "system", "filebeat"} + require.NoError(t, beatTest.Init()) + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, request *http.Request) { + var jsonFilePath string + + switch request.URL.Path { + case suffixInfo: + jsonFilePath = "beat6_info.json" + case suffixStats: + jsonFilePath = "beat6_stats.json" + default: + require.FailNow(t, "cannot handle request") + } + + data, err := os.ReadFile(jsonFilePath) + require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) + require.Equal(t, request.Host, "beat.test.local") + require.Equal(t, request.Method, "POST") + require.Equal(t, request.Header.Get("Authorization"), "Basic YWRtaW46UFdE") + require.Equal(t, request.Header.Get("X-Test"), "test-value") + + _, err = w.Write(data) + require.NoError(t, err, "could not write data") + })) + + requestURL, err := url.Parse(beatTest.URL) + require.NoErrorf(t, err, "can't parse URL %s", beatTest.URL) + fakeServer.Listener, err = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + require.NoErrorf(t, err, "can't listen for %s: %v", requestURL, err) + fakeServer.Start() + defer fakeServer.Close() + + beatTest.Headers["X-Test"] = "test-value" + beatTest.HostHeader = "beat.test.local" + beatTest.Method = "POST" + beatTest.Username = "admin" + beatTest.Password = "PWD" + + require.NoError(t, beatTest.Gather(&beat6StatsAccumulator)) +} diff --git a/plugins/inputs/beat/sample.conf b/plugins/inputs/beat/sample.conf new file mode 100644 index 0000000000000..ffa186b3c0f19 --- /dev/null +++ b/plugins/inputs/beat/sample.conf @@ -0,0 +1,33 @@ +# Read metrics exposed by Beat +[[inputs.beat]] + ## An URL from which to read Beat-formatted JSON + ## Default is "http://127.0.0.1:5066". + url = "http://127.0.0.1:5066" + + ## Enable collection of the listed stats + ## An empty list means collect all. Available options are currently + ## "beat", "libbeat", "system" and "filebeat". + # include = ["beat", "libbeat", "filebeat"] + + ## HTTP method + # method = "GET" + + ## Optional HTTP headers + # headers = {"X-Special-Header" = "Special-Value"} + + ## Override HTTP "Host" header + # host_header = "logstash.example.com" + + ## Timeout for HTTP requests + # timeout = "5s" + + ## Optional HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/bind/README.md b/plugins/inputs/bind/README.md index e3bcf6a75b252..965d54141cba8 100644 --- a/plugins/inputs/bind/README.md +++ b/plugins/inputs/bind/README.md @@ -2,50 +2,67 @@ This plugin decodes the JSON or XML statistics provided by BIND 9 nameservers. -### XML Statistics Channel +## XML Statistics Channel -Version 2 statistics (BIND 9.6 - 9.9) and version 3 statistics (BIND 9.9+) are supported. Note that -for BIND 9.9 to support version 3 statistics, it must be built with the `--enable-newstats` compile -flag, and it must be specifically requested via the correct URL. Version 3 statistics are the -default (and only) XML format in BIND 9.10+. +Version 2 statistics (BIND 9.6 - 9.9) and version 3 statistics (BIND 9.9+) are +supported. Note that for BIND 9.9 to support version 3 statistics, it must be +built with the `--enable-newstats` compile flag, and it must be specifically +requested via the correct URL. Version 3 statistics are the default (and only) +XML format in BIND 9.10+. -### JSON Statistics Channel +## JSON Statistics Channel -JSON statistics schema version 1 (BIND 9.10+) is supported. As of writing, some distros still do -not enable support for JSON statistics in their BIND packages. +JSON statistics schema version 1 (BIND 9.10+) is supported. As of writing, some +distros still do not enable support for JSON statistics in their BIND packages. -### Configuration: +## Configuration + +```toml @sample.conf +# Read BIND nameserver XML statistics +[[inputs.bind]] + ## An array of BIND XML statistics URI to gather stats. + ## Default is "http://localhost:8053/xml/v3". + # urls = ["http://localhost:8053/xml/v3"] + # gather_memory_contexts = false + # gather_views = false + + ## Timeout for http requests made by bind nameserver + # timeout = "4s" +``` - **urls** []string: List of BIND statistics channel URLs to collect from. Do not include a trailing slash in the URL. Default is "http://localhost:8053/xml/v3". - **gather_memory_contexts** bool: Report per-context memory statistics. - **gather_views** bool: Report per-view query statistics. +- **timeout** Timeout for http requests made by bind nameserver (example: "4s"). -The following table summarizes the URL formats which should be used, depending on your BIND -version and configured statistics channel. +The following table summarizes the URL formats which should be used, depending +on your BIND version and configured statistics channel. | BIND Version | Statistics Format | Example URL | | ------------ | ----------------- | ----------------------------- | -| 9.6 - 9.8 | XML v2 | http://localhost:8053 | -| 9.9 | XML v2 | http://localhost:8053/xml/v2 | -| 9.9+ | XML v3 | http://localhost:8053/xml/v3 | -| 9.10+ | JSON v1 | http://localhost:8053/json/v1 | +| 9.6 - 9.8 | XML v2 | `http://localhost:8053` | +| 9.9 | XML v2 | `http://localhost:8053/xml/v2` | +| 9.9+ | XML v3 | `http://localhost:8053/xml/v3` | +| 9.10+ | JSON v1 | `http://localhost:8053/json/v1` | -#### Configuration of BIND Daemon +### Configuration of BIND Daemon -Add the following to your named.conf if running Telegraf on the same host as the BIND daemon: -``` +Add the following to your named.conf if running Telegraf on the same host as the +BIND daemon: + +```json statistics-channels { inet 127.0.0.1 port 8053; }; ``` -Alternatively, specify a wildcard address (e.g., 0.0.0.0) or specific IP address of an interface to -configure the BIND daemon to listen on that address. Note that you should secure the statistics -channel with an ACL if it is publicly reachable. Consult the BIND Administrator Reference Manual -for more information. +Alternatively, specify a wildcard address (e.g., 0.0.0.0) or specific IP address +of an interface to configure the BIND daemon to listen on that address. Note +that you should secure the statistics channel with an ACL if it is publicly +reachable. Consult the BIND Administrator Reference Manual for more information. -### Measurements & Fields: +## Metrics - bind_counter - name=value (multiple) @@ -59,7 +76,7 @@ for more information. - total - in_use -### Tags: +## Tags - All measurements - url @@ -72,10 +89,10 @@ for more information. - id - name -### Sample Queries: +## Sample Queries -These are some useful queries (to generate dashboards or other) to run against data from this -plugin: +These are some useful queries (to generate dashboards or other) to run against +data from this plugin: ```sql SELECT non_negative_derivative(mean(/^A$|^PTR$/), 5m) FROM bind_counter \ @@ -83,7 +100,7 @@ WHERE "url" = 'localhost:8053' AND "type" = 'qtype' AND time > now() - 1h \ GROUP BY time(5m), "type" ``` -``` +```text name: bind_counter tags: type=qtype time non_negative_derivative_A non_negative_derivative_PTR @@ -103,11 +120,11 @@ time non_negative_derivative_A non_negative_derivative_PTR 1553865600000000000 280.6666666667443 1807.9071428570896 ``` -### Example Output +## Example Output Here is example output of this plugin: -``` +```shell bind_memory,host=LAP,port=8053,source=localhost,url=localhost:8053 block_size=12058624i,context_size=4575056i,in_use=4113717i,lost=0i,total_use=16663252i 1554276619000000000 bind_counter,host=LAP,port=8053,source=localhost,type=opcode,url=localhost:8053 IQUERY=0i,NOTIFY=0i,QUERY=9i,STATUS=0i,UPDATE=0i 1554276619000000000 bind_counter,host=LAP,port=8053,source=localhost,type=rcode,url=localhost:8053 17=0i,18=0i,19=0i,20=0i,21=0i,22=0i,BADCOOKIE=0i,BADVERS=0i,FORMERR=0i,NOERROR=7i,NOTAUTH=0i,NOTIMP=0i,NOTZONE=0i,NXDOMAIN=0i,NXRRSET=0i,REFUSED=0i,RESERVED11=0i,RESERVED12=0i,RESERVED13=0i,RESERVED14=0i,RESERVED15=0i,SERVFAIL=2i,YXDOMAIN=0i,YXRRSET=0i 1554276619000000000 diff --git a/plugins/inputs/bind/bind.go b/plugins/inputs/bind/bind.go index 967c9031a2634..321822d63f4bf 100644 --- a/plugins/inputs/bind/bind.go +++ b/plugins/inputs/bind/bind.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package bind import ( + _ "embed" "fmt" "net/http" "net/url" @@ -8,33 +10,33 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Bind struct { Urls []string GatherMemoryContexts bool GatherViews bool -} + Timeout config.Duration `toml:"timeout"` -var sampleConfig = ` - ## An array of BIND XML statistics URI to gather stats. - ## Default is "http://localhost:8053/xml/v3". - # urls = ["http://localhost:8053/xml/v3"] - # gather_memory_contexts = false - # gather_views = false -` - -var client = &http.Client{ - Timeout: time.Duration(4 * time.Second), + client http.Client } -func (b *Bind) Description() string { - return "Read BIND nameserver XML statistics" +func (*Bind) SampleConfig() string { + return sampleConfig } -func (b *Bind) SampleConfig() string { - return sampleConfig +func (b *Bind) Init() error { + b.client = http.Client{ + Timeout: time.Duration(b.Timeout), + } + + return nil } func (b *Bind) Gather(acc telegraf.Accumulator) error { @@ -47,14 +49,14 @@ func (b *Bind) Gather(acc telegraf.Accumulator) error { for _, u := range b.Urls { addr, err := url.Parse(u) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + acc.AddError(fmt.Errorf("unable to parse address '%s': %s", u, err)) continue } wg.Add(1) go func(addr *url.URL) { defer wg.Done() - acc.AddError(b.gatherUrl(addr, acc)) + acc.AddError(b.gatherURL(addr, acc)) }(addr) } @@ -62,7 +64,7 @@ func (b *Bind) Gather(acc telegraf.Accumulator) error { return nil } -func (b *Bind) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { +func (b *Bind) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { switch addr.Path { case "": // BIND 9.6 - 9.8 @@ -77,7 +79,7 @@ func (b *Bind) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { // BIND 9.9+ return b.readStatsXMLv3(addr, acc) default: - return fmt.Errorf("URL %s is ambiguous. Please check plugin documentation for supported URL formats.", + return fmt.Errorf("provided URL %s is ambiguous, please check plugin documentation for supported URL formats", addr) } } diff --git a/plugins/inputs/bind/bind_test.go b/plugins/inputs/bind/bind_test.go index 6ed953b691dd3..db2358239cc17 100644 --- a/plugins/inputs/bind/bind_test.go +++ b/plugins/inputs/bind/bind_test.go @@ -5,9 +5,11 @@ import ( "net/http" "net/http/httptest" "testing" + "time" + + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) func TestBindJsonStats(t *testing.T) { @@ -20,12 +22,15 @@ func TestBindJsonStats(t *testing.T) { Urls: []string{ts.URL + "/json/v1"}, GatherMemoryContexts: true, GatherViews: true, + client: http.Client{ + Timeout: 4 * time.Second, + }, } var acc testutil.Accumulator err := acc.GatherError(b.Gather) - assert.NoError(t, err) + require.NoError(t, err) // Use subtests for counters, since they are similar structure type fieldSet struct { @@ -175,8 +180,8 @@ func TestBindJsonStats(t *testing.T) { // Subtest for per-context memory stats t.Run("memory_context", func(t *testing.T) { - assert.True(t, acc.HasInt64Field("bind_memory_context", "total")) - assert.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) + require.True(t, acc.HasInt64Field("bind_memory_context", "total")) + require.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) }) } @@ -190,12 +195,15 @@ func TestBindXmlStatsV2(t *testing.T) { Urls: []string{ts.URL + "/xml/v2"}, GatherMemoryContexts: true, GatherViews: true, + client: http.Client{ + Timeout: 4 * time.Second, + }, } var acc testutil.Accumulator err := acc.GatherError(b.Gather) - assert.NoError(t, err) + require.NoError(t, err) // Use subtests for counters, since they are similar structure type fieldSet struct { @@ -377,8 +385,8 @@ func TestBindXmlStatsV2(t *testing.T) { // Subtest for per-context memory stats t.Run("memory_context", func(t *testing.T) { - assert.True(t, acc.HasInt64Field("bind_memory_context", "total")) - assert.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) + require.True(t, acc.HasInt64Field("bind_memory_context", "total")) + require.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) }) } @@ -392,12 +400,15 @@ func TestBindXmlStatsV3(t *testing.T) { Urls: []string{ts.URL + "/xml/v3"}, GatherMemoryContexts: true, GatherViews: true, + client: http.Client{ + Timeout: 4 * time.Second, + }, } var acc testutil.Accumulator err := acc.GatherError(b.Gather) - assert.NoError(t, err) + require.NoError(t, err) // Use subtests for counters, since they are similar structure type fieldSet struct { @@ -601,8 +612,8 @@ func TestBindXmlStatsV3(t *testing.T) { // Subtest for per-context memory stats t.Run("memory_context", func(t *testing.T) { - assert.True(t, acc.HasInt64Field("bind_memory_context", "total")) - assert.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) + require.True(t, acc.HasInt64Field("bind_memory_context", "total")) + require.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) }) } @@ -613,5 +624,5 @@ func TestBindUnparseableURL(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(b.Gather) - assert.Contains(t, err.Error(), "Unable to parse address") + require.Contains(t, err.Error(), "unable to parse address") } diff --git a/plugins/inputs/bind/json_stats.go b/plugins/inputs/bind/json_stats.go index 87b6065e2eb1c..61307683aac35 100644 --- a/plugins/inputs/bind/json_stats.go +++ b/plugins/inputs/bind/json_stats.go @@ -31,7 +31,7 @@ type jsonMemory struct { ContextSize int64 Lost int64 Contexts []struct { - Id string + ID string Name string Total int64 InUse int64 @@ -58,12 +58,14 @@ func addJSONCounter(acc telegraf.Accumulator, commonTags map[string]string, stat tags[k] = v } - grouper.Add("bind_counter", tags, ts, name, value) + if err := grouper.Add("bind_counter", tags, ts, name, value); err != nil { + acc.AddError(fmt.Errorf("adding field %q to group failed: %v", name, err)) + } } //Add grouped metrics - for _, metric := range grouper.Metrics() { - acc.AddMetric(metric) + for _, groupedMetric := range grouper.Metrics() { + acc.AddMetric(groupedMetric) } } @@ -113,7 +115,7 @@ func (b *Bind) addStatsJSON(stats jsonStats, acc telegraf.Accumulator, urlTag st // Detailed, per-context memory stats if b.GatherMemoryContexts { for _, c := range stats.Memory.Contexts { - tags := map[string]string{"url": urlTag, "id": c.Id, "name": c.Name, "source": host, "port": port} + tags := map[string]string{"url": urlTag, "id": c.ID, "name": c.Name, "source": host, "port": port} fields := map[string]interface{}{"total": c.Total, "in_use": c.InUse} acc.AddGauge("bind_memory_context", fields, tags) @@ -133,15 +135,17 @@ func (b *Bind) addStatsJSON(stats jsonStats, acc telegraf.Accumulator, urlTag st "type": cntrType, } - grouper.Add("bind_counter", tags, ts, cntrName, value) + if err := grouper.Add("bind_counter", tags, ts, cntrName, value); err != nil { + acc.AddError(fmt.Errorf("adding tags %q to group failed: %v", tags, err)) + } } } } } //Add grouped metrics - for _, metric := range grouper.Metrics() { - acc.AddMetric(metric) + for _, groupedMetric := range grouper.Metrics() { + acc.AddMetric(groupedMetric) } } @@ -153,21 +157,29 @@ func (b *Bind) readStatsJSON(addr *url.URL, acc telegraf.Accumulator) error { // Progressively build up full jsonStats struct by parsing the individual HTTP responses for _, suffix := range [...]string{"/server", "/net", "/mem"} { - scrapeUrl := addr.String() + suffix + err := func() error { + scrapeURL := addr.String() + suffix - resp, err := client.Get(scrapeUrl) - if err != nil { - return err - } + resp, err := b.client.Get(scrapeURL) + if err != nil { + return err + } - defer resp.Body.Close() + defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("%s returned HTTP status: %s", scrapeUrl, resp.Status) - } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status: %s", scrapeURL, resp.Status) + } + + if err := json.NewDecoder(resp.Body).Decode(&stats); err != nil { + return fmt.Errorf("unable to decode JSON blob: %s", err) + } - if err := json.NewDecoder(resp.Body).Decode(&stats); err != nil { - return fmt.Errorf("Unable to decode JSON blob: %s", err) + return nil + }() + + if err != nil { + return err } } diff --git a/plugins/inputs/bind/sample.conf b/plugins/inputs/bind/sample.conf new file mode 100644 index 0000000000000..9550953706b0e --- /dev/null +++ b/plugins/inputs/bind/sample.conf @@ -0,0 +1,10 @@ +# Read BIND nameserver XML statistics +[[inputs.bind]] + ## An array of BIND XML statistics URI to gather stats. + ## Default is "http://localhost:8053/xml/v3". + # urls = ["http://localhost:8053/xml/v3"] + # gather_memory_contexts = false + # gather_views = false + + ## Timeout for http requests made by bind nameserver + # timeout = "4s" diff --git a/plugins/inputs/bind/xml_stats_v2.go b/plugins/inputs/bind/xml_stats_v2.go index 5e17851fb671c..5a0092c5af7cc 100644 --- a/plugins/inputs/bind/xml_stats_v2.go +++ b/plugins/inputs/bind/xml_stats_v2.go @@ -42,7 +42,7 @@ type v2Statistics struct { Memory struct { Contexts []struct { // Omitted nodes: references, maxinuse, blocksize, pools, hiwater, lowater - Id string `xml:"id"` + ID string `xml:"id"` Name string `xml:"name"` Total int64 `xml:"total"` InUse int64 `xml:"inuse"` @@ -75,12 +75,14 @@ func addXMLv2Counter(acc telegraf.Accumulator, commonTags map[string]string, sta tags[k] = v } - grouper.Add("bind_counter", tags, ts, c.Name, c.Value) + if err := grouper.Add("bind_counter", tags, ts, c.Name, c.Value); err != nil { + acc.AddError(fmt.Errorf("adding field %q to group failed: %v", c.Name, err)) + } } //Add grouped metrics - for _, metric := range grouper.Metrics() { - acc.AddMetric(metric) + for _, groupedMetric := range grouper.Metrics() { + acc.AddMetric(groupedMetric) } } @@ -89,7 +91,7 @@ func addXMLv2Counter(acc telegraf.Accumulator, commonTags map[string]string, sta func (b *Bind) readStatsXMLv2(addr *url.URL, acc telegraf.Accumulator) error { var stats v2Root - resp, err := client.Get(addr.String()) + resp, err := b.client.Get(addr.String()) if err != nil { return err } @@ -101,7 +103,7 @@ func (b *Bind) readStatsXMLv2(addr *url.URL, acc telegraf.Accumulator) error { } if err := xml.NewDecoder(resp.Body).Decode(&stats); err != nil { - return fmt.Errorf("Unable to decode XML document: %s", err) + return fmt.Errorf("unable to decode XML document: %s", err) } tags := map[string]string{"url": addr.Host} @@ -142,7 +144,7 @@ func (b *Bind) readStatsXMLv2(addr *url.URL, acc telegraf.Accumulator) error { // Detailed, per-context memory stats if b.GatherMemoryContexts { for _, c := range stats.Statistics.Memory.Contexts { - tags := map[string]string{"url": addr.Host, "id": c.Id, "name": c.Name, "source": host, "port": port} + tags := map[string]string{"url": addr.Host, "id": c.ID, "name": c.Name, "source": host, "port": port} fields := map[string]interface{}{"total": c.Total, "in_use": c.InUse} acc.AddGauge("bind_memory_context", fields, tags) diff --git a/plugins/inputs/bind/xml_stats_v3.go b/plugins/inputs/bind/xml_stats_v3.go index 89e4ea0b8fcb6..ef303f4bf052c 100644 --- a/plugins/inputs/bind/xml_stats_v3.go +++ b/plugins/inputs/bind/xml_stats_v3.go @@ -25,7 +25,7 @@ type v3Stats struct { type v3Memory struct { Contexts []struct { // Omitted nodes: references, maxinuse, blocksize, pools, hiwater, lowater - Id string `xml:"id"` + ID string `xml:"id"` Name string `xml:"name"` Total int64 `xml:"total"` InUse int64 `xml:"inuse"` @@ -81,7 +81,9 @@ func (b *Bind) addStatsXMLv3(stats v3Stats, acc telegraf.Accumulator, hostPort s tags := map[string]string{"url": hostPort, "source": host, "port": port, "type": cg.Type} - grouper.Add("bind_counter", tags, ts, c.Name, c.Value) + if err := grouper.Add("bind_counter", tags, ts, c.Name, c.Value); err != nil { + acc.AddError(fmt.Errorf("adding tags %q to group failed: %v", tags, err)) + } } } @@ -98,7 +100,7 @@ func (b *Bind) addStatsXMLv3(stats v3Stats, acc telegraf.Accumulator, hostPort s // Detailed, per-context memory stats if b.GatherMemoryContexts { for _, c := range stats.Memory.Contexts { - tags := map[string]string{"url": hostPort, "source": host, "port": port, "id": c.Id, "name": c.Name} + tags := map[string]string{"url": hostPort, "source": host, "port": port, "id": c.ID, "name": c.Name} fields := map[string]interface{}{"total": c.Total, "in_use": c.InUse} acc.AddGauge("bind_memory_context", fields, tags) @@ -118,15 +120,17 @@ func (b *Bind) addStatsXMLv3(stats v3Stats, acc telegraf.Accumulator, hostPort s "type": cg.Type, } - grouper.Add("bind_counter", tags, ts, c.Name, c.Value) + if err := grouper.Add("bind_counter", tags, ts, c.Name, c.Value); err != nil { + acc.AddError(fmt.Errorf("adding tags %q to group failed: %v", tags, err)) + } } } } } //Add grouped metrics - for _, metric := range grouper.Metrics() { - acc.AddMetric(metric) + for _, groupedMetric := range grouper.Metrics() { + acc.AddMetric(groupedMetric) } } @@ -138,21 +142,29 @@ func (b *Bind) readStatsXMLv3(addr *url.URL, acc telegraf.Accumulator) error { // Progressively build up full v3Stats struct by parsing the individual HTTP responses for _, suffix := range [...]string{"/server", "/net", "/mem"} { - scrapeUrl := addr.String() + suffix + err := func() error { + scrapeURL := addr.String() + suffix - resp, err := client.Get(scrapeUrl) - if err != nil { - return err - } + resp, err := b.client.Get(scrapeURL) + if err != nil { + return err + } - defer resp.Body.Close() + defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("%s returned HTTP status: %s", scrapeUrl, resp.Status) - } + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status: %s", scrapeURL, resp.Status) + } + + if err := xml.NewDecoder(resp.Body).Decode(&stats); err != nil { + return fmt.Errorf("unable to decode XML document: %s", err) + } - if err := xml.NewDecoder(resp.Body).Decode(&stats); err != nil { - return fmt.Errorf("Unable to decode XML document: %s", err) + return nil + }() + + if err != nil { + return err } } diff --git a/plugins/inputs/bond/README.md b/plugins/inputs/bond/README.md index abcf72c9193ca..e59664ae629f9 100644 --- a/plugins/inputs/bond/README.md +++ b/plugins/inputs/bond/README.md @@ -4,21 +4,30 @@ The Bond input plugin collects network bond interface status for both the network bond interface as well as slave interfaces. The plugin collects these metrics from `/proc/net/bonding/*` files. -### Configuration: +## Configuration -```toml +```toml @sample.conf +# Collect bond interface status, slaves statuses and failures count [[inputs.bond]] ## Sets 'proc' directory path ## If not specified, then default is /proc # host_proc = "/proc" + ## Sets 'sys' directory path + ## If not specified, then default is /sys + # host_sys = "/sys" + ## By default, telegraf gather stats for all bond interfaces ## Setting interfaces will restrict the stats to the specified ## bond interfaces. # bond_interfaces = ["bond0"] + + ## Tries to collect additional bond details from /sys/class/net/{bond} + ## currently only useful for LACP (mode 4) bonds + # collect_sys_details = false ``` -### Measurements & Fields: +## Metrics - bond - active_slave (for active-backup mode) @@ -27,21 +36,33 @@ The plugin collects these metrics from `/proc/net/bonding/*` files. - bond_slave - failures - status - -### Description: - -``` -active_slave - Currently active slave interface for active-backup mode. - -status - Status of bond interface or bonds's slave interface (down = 0, up = 1). - -failures - Amount of failures for bond's slave interface. -``` - -### Tags: + - count + - actor_churned (for LACP bonds) + - partner_churned (for LACP bonds) + - total_churned (for LACP bonds) + +- bond_sys + - slave_count + - ad_port_count + +## Description + +- active_slave + - Currently active slave interface for active-backup mode. +- status + - Status of bond interface or bonds's slave interface (down = 0, up = 1). +- failures + - Amount of failures for bond's slave interface. +- count + - Number of slaves attached to bond +- actor_churned + - number of times local end of LACP bond flapped +- partner_churned + - number of times remote end of LACP bond flapped +- total_churned + - full count of all churn events + +## Tags - bond - bond @@ -50,11 +71,15 @@ failures - bond - interface -### Example output: +- bond_sys + - bond + - mode + +## Example Output Configuration: -``` +```toml [[inputs.bond]] ## Sets 'proc' directory path ## If not specified, then default is /proc @@ -68,18 +93,22 @@ Configuration: Run: -``` +```bash +```shell telegraf --config telegraf.conf --input-filter bond --test ``` Output: -``` +```bash +```shell * Plugin: inputs.bond, Collection 1 > bond,bond=bond1,host=local active_slave="eth0",status=1i 1509704525000000000 > bond_slave,bond=bond1,interface=eth0,host=local status=1i,failures=0i 1509704525000000000 > bond_slave,host=local,bond=bond1,interface=eth1 status=1i,failures=0i 1509704525000000000 +> bond_slave,host=local,bond=bond1 count=2i 1509704525000000000 > bond,bond=bond0,host=isvetlov-mac.local status=1i 1509704525000000000 > bond_slave,bond=bond0,interface=eth1,host=local status=1i,failures=0i 1509704525000000000 > bond_slave,bond=bond0,interface=eth2,host=local status=1i,failures=0i 1509704525000000000 +> bond_slave,bond=bond0,host=local count=2i 1509704525000000000 ``` diff --git a/plugins/inputs/bond/bond.go b/plugins/inputs/bond/bond.go index 01f6f251be776..c8a6b7c848703 100644 --- a/plugins/inputs/bond/bond.go +++ b/plugins/inputs/bond/bond.go @@ -1,9 +1,10 @@ +//go:generate ../../../tools/readme_config_includer/generator package bond import ( "bufio" + _ "embed" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" @@ -13,39 +14,39 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // default host proc path const defaultHostProc = "/proc" +const defaultHostSys = "/sys" // env host proc variable name const envProc = "HOST_PROC" +const envSys = "HOST_SYS" type Bond struct { HostProc string `toml:"host_proc"` + HostSys string `toml:"host_sys"` + SysDetails bool `toml:"collect_sys_details"` BondInterfaces []string `toml:"bond_interfaces"` + BondType string } -var sampleConfig = ` - ## Sets 'proc' directory path - ## If not specified, then default is /proc - # host_proc = "/proc" - - ## By default, telegraf gather stats for all bond interfaces - ## Setting interfaces will restrict the stats to the specified - ## bond interfaces. - # bond_interfaces = ["bond0"] -` - -func (bond *Bond) Description() string { - return "Collect bond interface status, slaves statuses and failures count" +type sysFiles struct { + ModeFile string + SlaveFile string + ADPortsFile string } -func (bond *Bond) SampleConfig() string { +func (*Bond) SampleConfig() string { return sampleConfig } func (bond *Bond) Gather(acc telegraf.Accumulator) error { // load proc path, get default value if config value and env variable are empty - bond.loadPath() + bond.loadPaths() // list bond interfaces from bonding directory or gather all interfaces. bondNames, err := bond.listInterfaces() if err != nil { @@ -53,15 +54,27 @@ func (bond *Bond) Gather(acc telegraf.Accumulator) error { } for _, bondName := range bondNames { bondAbsPath := bond.HostProc + "/net/bonding/" + bondName - file, err := ioutil.ReadFile(bondAbsPath) + file, err := os.ReadFile(bondAbsPath) if err != nil { - acc.AddError(fmt.Errorf("error inspecting '%s' interface: %v", bondAbsPath, err)) + acc.AddError(fmt.Errorf("error inspecting %q interface: %v", bondAbsPath, err)) continue } - rawFile := strings.TrimSpace(string(file)) - err = bond.gatherBondInterface(bondName, rawFile, acc) + rawProcFile := strings.TrimSpace(string(file)) + err = bond.gatherBondInterface(bondName, rawProcFile, acc) if err != nil { - acc.AddError(fmt.Errorf("error inspecting '%s' interface: %v", bondName, err)) + acc.AddError(fmt.Errorf("error inspecting %q interface: %v", bondName, err)) + } + + /* + Some details about bonds only exist in /sys/class/net/ + In particular, LACP bonds track upstream port state here + */ + if bond.SysDetails { + files, err := bond.readSysFiles(bond.HostSys + "/class/net/" + bondName) + if err != nil { + acc.AddError(err) + } + bond.gatherSysDetails(bondName, files, acc) } } return nil @@ -91,8 +104,14 @@ func (bond *Bond) gatherBondPart(bondName string, rawFile string, acc telegraf.A tags := map[string]string{ "bond": bondName, } - scanner := bufio.NewScanner(strings.NewReader(rawFile)) + /* + /proc/bond/... files are formatted in a way that is difficult + to use regexes to parse. Because of that, we scan through + the file one line at a time and rely on specific lines to + mark "ends" of blocks. It's a hack that should be resolved, + but for now, it works. + */ for scanner.Scan() { line := scanner.Text() stats := strings.Split(line, ":") @@ -101,6 +120,9 @@ func (bond *Bond) gatherBondPart(bondName string, rawFile string, acc telegraf.A } name := strings.TrimSpace(stats[0]) value := strings.TrimSpace(stats[1]) + if name == "Bonding Mode" { + bond.BondType = value + } if strings.Contains(name, "Currently Active Slave") { fields["active_slave"] = value } @@ -119,9 +141,86 @@ func (bond *Bond) gatherBondPart(bondName string, rawFile string, acc telegraf.A return fmt.Errorf("Couldn't find status info for '%s' ", bondName) } +func (bond *Bond) readSysFiles(bondDir string) (sysFiles, error) { + /* + Files we may need + bonding/mode + bonding/slaves + bonding/ad_num_ports + + We load files here first to allow for easier testing + */ + var output sysFiles + + file, err := os.ReadFile(bondDir + "/bonding/mode") + if err != nil { + return sysFiles{}, fmt.Errorf("error inspecting %q interface: %v", bondDir+"/bonding/mode", err) + } + output.ModeFile = strings.TrimSpace(string(file)) + file, err = os.ReadFile(bondDir + "/bonding/slaves") + if err != nil { + return sysFiles{}, fmt.Errorf("error inspecting %q interface: %v", bondDir+"/bonding/slaves", err) + } + output.SlaveFile = strings.TrimSpace(string(file)) + if bond.BondType == "IEEE 802.3ad Dynamic link aggregation" { + file, err = os.ReadFile(bondDir + "/bonding/ad_num_ports") + if err != nil { + return sysFiles{}, fmt.Errorf("error inspecting %q interface: %v", bondDir+"/bonding/ad_num_ports", err) + } + output.ADPortsFile = strings.TrimSpace(string(file)) + } + return output, nil +} + +func (bond *Bond) gatherSysDetails(bondName string, files sysFiles, acc telegraf.Accumulator) { + var slaves []string + var adPortCount int + + // To start with, we get the bond operating mode + mode := strings.TrimSpace(strings.Split(files.ModeFile, " ")[0]) + + tags := map[string]string{ + "bond": bondName, + "mode": mode, + } + + // Next we collect the number of bond slaves the system expects + slavesTmp := strings.Split(files.SlaveFile, " ") + for _, slave := range slavesTmp { + if slave != "" { + slaves = append(slaves, slave) + } + } + if mode == "802.3ad" { + /* + If we're in LACP mode, we should check on how the bond ports are + interacting with the upstream switch ports + a failed conversion can be treated as 0 ports + */ + adPortCount, _ = strconv.Atoi(strings.TrimSpace(files.ADPortsFile)) + } else { + adPortCount = len(slaves) + } + + fields := map[string]interface{}{ + "slave_count": len(slaves), + "ad_port_count": adPortCount, + } + acc.AddFields("bond_sys", fields, tags) +} + func (bond *Bond) gatherSlavePart(bondName string, rawFile string, acc telegraf.Accumulator) error { - var slave string - var status int + var slaveCount int + tags := map[string]string{ + "bond": bondName, + } + fields := map[string]interface{}{ + "status": 0, + } + var scanPast bool + if bond.BondType == "IEEE 802.3ad Dynamic link aggregation" { + scanPast = true + } scanner := bufio.NewScanner(strings.NewReader(rawFile)) for scanner.Scan() { @@ -133,42 +232,59 @@ func (bond *Bond) gatherSlavePart(bondName string, rawFile string, acc telegraf. name := strings.TrimSpace(stats[0]) value := strings.TrimSpace(stats[1]) if strings.Contains(name, "Slave Interface") { - slave = value + tags["interface"] = value + slaveCount++ } - if strings.Contains(name, "MII Status") { - status = 0 - if value == "up" { - status = 1 - } + if strings.Contains(name, "MII Status") && value == "up" { + fields["status"] = 1 } if strings.Contains(name, "Link Failure Count") { count, err := strconv.Atoi(value) if err != nil { return err } - fields := map[string]interface{}{ - "status": status, - "failures": count, + fields["failures"] = count + if !scanPast { + acc.AddFields("bond_slave", fields, tags) } - tags := map[string]string{ - "bond": bondName, - "interface": slave, + } + if strings.Contains(name, "Actor Churned Count") { + count, err := strconv.Atoi(value) + if err != nil { + return err + } + fields["actor_churned"] = count + } + if strings.Contains(name, "Partner Churned Count") { + count, err := strconv.Atoi(value) + if err != nil { + return err } + fields["partner_churned"] = count + fields["total_churned"] = fields["actor_churned"].(int) + fields["partner_churned"].(int) acc.AddFields("bond_slave", fields, tags) } } - if err := scanner.Err(); err != nil { - return err + tags = map[string]string{ + "bond": bondName, } - return nil + fields = map[string]interface{}{ + "count": slaveCount, + } + acc.AddFields("bond_slave", fields, tags) + + return scanner.Err() } -// loadPath can be used to read path firstly from config +// loadPaths can be used to read path firstly from config // if it is empty then try read from env variable -func (bond *Bond) loadPath() { +func (bond *Bond) loadPaths() { if bond.HostProc == "" { bond.HostProc = proc(envProc, defaultHostProc) } + if bond.HostSys == "" { + bond.HostSys = proc(envSys, defaultHostSys) + } } // proc can be used to read file paths from env diff --git a/plugins/inputs/bond/bond_test.go b/plugins/inputs/bond/bond_test.go index c07224350352c..838f4c4651c72 100644 --- a/plugins/inputs/bond/bond_test.go +++ b/plugins/inputs/bond/bond_test.go @@ -4,37 +4,10 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) -var sampleTest802 = ` -Ethernet Channel Bonding Driver: v3.5.0 (November 4, 2008) - -Bonding Mode: IEEE 802.3ad Dynamic link aggregation -Transmit Hash Policy: layer2 (0) -MII Status: up -MII Polling Interval (ms): 100 -Up Delay (ms): 0 -Down Delay (ms): 0 - -802.3ad info -LACP rate: fast -Aggregator selection policy (ad_select): stable -bond bond0 has no active aggregator - -Slave Interface: eth1 -MII Status: up -Link Failure Count: 0 -Permanent HW addr: 00:0c:29:f5:b7:11 -Aggregator ID: N/A - -Slave Interface: eth2 -MII Status: up -Link Failure Count: 3 -Permanent HW addr: 00:0c:29:f5:b7:1b -Aggregator ID: N/A -` - -var sampleTestAB = ` +const sampleTestAB = ` Ethernet Channel Bonding Driver: v3.6.0 (September 26, 2009) Bonding Mode: fault-tolerance (active-backup) @@ -61,17 +34,68 @@ Link Failure Count: 0 Permanent HW addr: ` +const sampleTestLACP = ` +Ethernet Channel Bonding Driver: v3.7.1 (April 27, 2011) + +Bonding Mode: IEEE 802.3ad Dynamic link aggregation +Transmit Hash Policy: layer2 (0) +MII Status: up +MII Polling Interval (ms): 100 +Up Delay (ms): 0 +Down Delay (ms): 0 + +802.3ad info +LACP rate: fast +Min links: 0 +Aggregator selection policy (ad_select): stable + +Slave Interface: eth0 +MII Status: up +Speed: 10000 Mbps +Duplex: full +Link Failure Count: 2 +Permanent HW addr: 3c:ec:ef:5e:71:58 +Slave queue ID: 0 +Aggregator ID: 2 +Actor Churn State: none +Partner Churn State: none +Actor Churned Count: 2 +Partner Churned Count: 0 + +Slave Interface: eth1 +MII Status: up +Speed: 10000 Mbps +Duplex: full +Link Failure Count: 1 +Permanent HW addr: 3c:ec:ef:5e:71:59 +Slave queue ID: 0 +Aggregator ID: 2 +Actor Churn State: none +Partner Churn State: none +Actor Churned Count: 0 +Partner Churned Count: 0 +` + +const sampleSysMode = "802.3ad 5" +const sampleSysSlaves = "eth0 eth1 " +const sampleSysAdPorts = " 2 " + func TestGatherBondInterface(t *testing.T) { var acc testutil.Accumulator bond := &Bond{} - bond.gatherBondInterface("bond802", sampleTest802, &acc) - acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"status": 1}, map[string]string{"bond": "bond802"}) - acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bond802", "interface": "eth1"}) - acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 3, "status": 1}, map[string]string{"bond": "bond802", "interface": "eth2"}) - - bond.gatherBondInterface("bondAB", sampleTestAB, &acc) + require.NoError(t, bond.gatherBondInterface("bondAB", sampleTestAB, &acc)) acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"active_slave": "eth2", "status": 1}, map[string]string{"bond": "bondAB"}) acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 2, "status": 0}, map[string]string{"bond": "bondAB", "interface": "eth3"}) acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 0, "status": 1}, map[string]string{"bond": "bondAB", "interface": "eth2"}) + acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"count": 2}, map[string]string{"bond": "bondAB"}) + + acc = testutil.Accumulator{} + require.NoError(t, bond.gatherBondInterface("bondLACP", sampleTestLACP, &acc)) + bond.gatherSysDetails("bondLACP", sysFiles{ModeFile: sampleSysMode, SlaveFile: sampleSysSlaves, ADPortsFile: sampleSysAdPorts}, &acc) + acc.AssertContainsTaggedFields(t, "bond", map[string]interface{}{"status": 1}, map[string]string{"bond": "bondLACP"}) + acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 2, "status": 1, "actor_churned": 2, "partner_churned": 0, "total_churned": 2}, map[string]string{"bond": "bondLACP", "interface": "eth0"}) + acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"failures": 1, "status": 1, "actor_churned": 0, "partner_churned": 0, "total_churned": 0}, map[string]string{"bond": "bondLACP", "interface": "eth1"}) + acc.AssertContainsTaggedFields(t, "bond_slave", map[string]interface{}{"count": 2}, map[string]string{"bond": "bondLACP"}) + acc.AssertContainsTaggedFields(t, "bond_sys", map[string]interface{}{"slave_count": 2, "ad_port_count": 2}, map[string]string{"bond": "bondLACP", "mode": "802.3ad"}) } diff --git a/plugins/inputs/bond/sample.conf b/plugins/inputs/bond/sample.conf new file mode 100644 index 0000000000000..ca8e04ccdfabf --- /dev/null +++ b/plugins/inputs/bond/sample.conf @@ -0,0 +1,18 @@ +# Collect bond interface status, slaves statuses and failures count +[[inputs.bond]] + ## Sets 'proc' directory path + ## If not specified, then default is /proc + # host_proc = "/proc" + + ## Sets 'sys' directory path + ## If not specified, then default is /sys + # host_sys = "/sys" + + ## By default, telegraf gather stats for all bond interfaces + ## Setting interfaces will restrict the stats to the specified + ## bond interfaces. + # bond_interfaces = ["bond0"] + + ## Tries to collect additional bond details from /sys/class/net/{bond} + ## currently only useful for LACP (mode 4) bonds + # collect_sys_details = false diff --git a/plugins/inputs/burrow/README.md b/plugins/inputs/burrow/README.md index 1d763a430455f..c6e8a8b4e8ba5 100644 --- a/plugins/inputs/burrow/README.md +++ b/plugins/inputs/burrow/README.md @@ -1,13 +1,15 @@ # Burrow Kafka Consumer Lag Checking Input Plugin -Collect Kafka topic, consumer and partition status -via [Burrow](https://github.com/linkedin/Burrow) HTTP [API](https://github.com/linkedin/Burrow/wiki/HTTP-Endpoint). +Collect Kafka topic, consumer and partition status via +[Burrow](https://github.com/linkedin/Burrow) HTTP +[API](https://github.com/linkedin/Burrow/wiki/HTTP-Endpoint). Supported Burrow version: `1.x` -### Configuration +## Configuration -```toml +```toml @sample.conf +# Collect Kafka topics and consumers status from Burrow HTTP API. [[inputs.burrow]] ## Burrow API endpoints in format "schema://host:port". ## Default is "http://localhost:8000". @@ -50,7 +52,7 @@ Supported Burrow version: `1.x` # insecure_skip_verify = false ``` -### Group/Partition Status mappings +## Group/Partition Status mappings * `OK` = 1 * `NOT_FOUND` = 2 @@ -61,42 +63,43 @@ Supported Burrow version: `1.x` > unknown value will be mapped to 0 +## Metrics + ### Fields * `burrow_group` (one event per each consumer group) - - status (string, see Partition Status mappings) - - status_code (int, `1..6`, see Partition status mappings) - - partition_count (int, `number of partitions`) - - offset (int64, `total offset of all partitions`) - - total_lag (int64, `totallag`) - - lag (int64, `maxlag.current_lag || 0`) - - timestamp (int64, `end.timestamp`) + * status (string, see Partition Status mappings) + * status_code (int, `1..6`, see Partition status mappings) + * partition_count (int, `number of partitions`) + * offset (int64, `total offset of all partitions`) + * total_lag (int64, `totallag`) + * lag (int64, `maxlag.current_lag || 0`) + * timestamp (int64, `end.timestamp`) * `burrow_partition` (one event per each topic partition) - - status (string, see Partition Status mappings) - - status_code (int, `1..6`, see Partition status mappings) - - lag (int64, `current_lag || 0`) - - offset (int64, `end.timestamp`) - - timestamp (int64, `end.timestamp`) + * status (string, see Partition Status mappings) + * status_code (int, `1..6`, see Partition status mappings) + * lag (int64, `current_lag || 0`) + * offset (int64, `end.timestamp`) + * timestamp (int64, `end.timestamp`) * `burrow_topic` (one event per topic offset) - - offset (int64) - + * offset (int64) ### Tags * `burrow_group` - - cluster (string) - - group (string) + * cluster (string) + * group (string) * `burrow_partition` - - cluster (string) - - group (string) - - topic (string) - - partition (int) - - owner (string) + * cluster (string) + * group (string) + * topic (string) + * partition (int) + * owner (string) * `burrow_topic` - - cluster (string) - - topic (string) - - partition (int) + * cluster (string) + * topic (string) + * partition (int) diff --git a/plugins/inputs/burrow/burrow.go b/plugins/inputs/burrow/burrow.go index 501fddf16ad77..b5415ae2f22f3 100644 --- a/plugins/inputs/burrow/burrow.go +++ b/plugins/inputs/burrow/burrow.go @@ -1,8 +1,11 @@ +//go:generate ../../../tools/readme_config_includer/generator package burrow import ( + _ "embed" "encoding/json" "fmt" + "net" "net/http" "net/url" "strconv" @@ -11,12 +14,16 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const ( defaultBurrowPrefix = "/v3/kafka" defaultConcurrentConnections = 20 @@ -24,48 +31,6 @@ const ( defaultServer = "http://localhost:8000" ) -const configSample = ` - ## Burrow API endpoints in format "schema://host:port". - ## Default is "http://localhost:8000". - servers = ["http://localhost:8000"] - - ## Override Burrow API prefix. - ## Useful when Burrow is behind reverse-proxy. - # api_prefix = "/v3/kafka" - - ## Maximum time to receive response. - # response_timeout = "5s" - - ## Limit per-server concurrent connections. - ## Useful in case of large number of topics or consumer groups. - # concurrent_connections = 20 - - ## Filter clusters, default is no filtering. - ## Values can be specified as glob patterns. - # clusters_include = [] - # clusters_exclude = [] - - ## Filter consumer groups, default is no filtering. - ## Values can be specified as glob patterns. - # groups_include = [] - # groups_exclude = [] - - ## Filter topics, default is no filtering. - ## Values can be specified as glob patterns. - # topics_include = [] - # topics_exclude = [] - - ## Credentials for basic HTTP authentication. - # username = "" - # password = "" - - ## Optional SSL config - # ssl_ca = "/etc/telegraf/ca.pem" - # ssl_cert = "/etc/telegraf/cert.pem" - # ssl_key = "/etc/telegraf/key.pem" - # insecure_skip_verify = false -` - type ( burrow struct { tls.ClientConfig @@ -73,7 +38,7 @@ type ( Servers []string Username string Password string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration ConcurrentConnections int APIPrefix string `toml:"api_prefix"` @@ -133,12 +98,8 @@ func init() { }) } -func (b *burrow) SampleConfig() string { - return configSample -} - -func (b *burrow) Description() string { - return "Collect Kafka topics and consumers status from Burrow HTTP API." +func (*burrow) SampleConfig() string { + return sampleConfig } func (b *burrow) Gather(acc telegraf.Accumulator) error { @@ -188,10 +149,8 @@ func (b *burrow) setDefaults() { if b.ConcurrentConnections < 1 { b.ConcurrentConnections = defaultConcurrentConnections } - if b.ResponseTimeout.Duration < time.Second { - b.ResponseTimeout = internal.Duration{ - Duration: defaultResponseTimeout, - } + if time.Duration(b.ResponseTimeout) < time.Second { + b.ResponseTimeout = config.Duration(defaultResponseTimeout) } } @@ -220,11 +179,22 @@ func (b *burrow) createClient() (*http.Client, error) { return nil, err } + timeout := time.Duration(b.ResponseTimeout) + dialContext := net.Dialer{Timeout: timeout, DualStack: true} + transport := http.Transport{ + DialContext: dialContext.DialContext, + TLSClientConfig: tlsCfg, + // If b.ConcurrentConnections <= 1, then DefaultMaxIdleConnsPerHost is used (=2) + MaxIdleConnsPerHost: b.ConcurrentConnections / 2, + // If b.ConcurrentConnections == 0, then it is treated as "no limits" + MaxConnsPerHost: b.ConcurrentConnections, + ResponseHeaderTimeout: timeout, + IdleConnTimeout: 90 * time.Second, + } + client := &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsCfg, - }, - Timeout: b.ResponseTimeout.Duration, + Transport: &transport, + Timeout: timeout, } return client, nil diff --git a/plugins/inputs/burrow/burrow_test.go b/plugins/inputs/burrow/burrow_test.go index cafbcb9408775..2bf9c75fb0a1e 100644 --- a/plugins/inputs/burrow/burrow_test.go +++ b/plugins/inputs/burrow/burrow_test.go @@ -2,21 +2,21 @@ package burrow import ( "fmt" - "io/ioutil" "net/http" "net/http/httptest" "os" "strings" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) // remap uri to json file, eg: /v3/kafka -> ./testdata/v3_kafka.json func getResponseJSON(requestURI string) ([]byte, int) { uri := strings.TrimLeft(requestURI, "/") - mappedFile := strings.Replace(uri, "/", "_", -1) + mappedFile := strings.ReplaceAll(uri, "/", "_") jsonFile := fmt.Sprintf("./testdata/%s.json", mappedFile) code := 200 @@ -27,7 +27,7 @@ func getResponseJSON(requestURI string) ([]byte, int) { } // respond with file - b, _ := ioutil.ReadFile(jsonFile) + b, _ := os.ReadFile(jsonFile) return b, code } @@ -37,6 +37,8 @@ func getHTTPServer() *httptest.Server { body, code := getResponseJSON(r.RequestURI) w.WriteHeader(code) w.Header().Set("Content-Type", "application/json") + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive w.Write(body) })) } @@ -47,7 +49,7 @@ func getHTTPServerBasicAuth() *httptest.Server { w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`) username, password, authOK := r.BasicAuth() - if authOK == false { + if !authOK { http.Error(w, "Not authorized", 401) return } @@ -61,6 +63,8 @@ func getHTTPServerBasicAuth() *httptest.Server { body, code := getResponseJSON(r.RequestURI) w.WriteHeader(code) w.Header().Set("Content-Type", "application/json") + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive w.Write(body) })) } @@ -72,7 +76,7 @@ func TestBurrowTopic(t *testing.T) { plugin := &burrow{Servers: []string{s.URL}} acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) fields := []map[string]interface{}{ // topicA @@ -103,7 +107,7 @@ func TestBurrowPartition(t *testing.T) { Servers: []string{s.URL}, } acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) fields := []map[string]interface{}{ { @@ -151,7 +155,7 @@ func TestBurrowGroup(t *testing.T) { Servers: []string{s.URL}, } acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) fields := []map[string]interface{}{ { @@ -189,7 +193,7 @@ func TestMultipleServers(t *testing.T) { Servers: []string{s1.URL, s2.URL}, } acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) require.Exactly(t, 14, len(acc.Metrics)) require.Empty(t, acc.Errors) @@ -205,7 +209,7 @@ func TestMultipleRuns(t *testing.T) { } for i := 0; i < 4; i++ { acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) require.Exactly(t, 7, len(acc.Metrics)) require.Empty(t, acc.Errors) @@ -224,7 +228,7 @@ func TestBasicAuthConfig(t *testing.T) { } acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) require.Exactly(t, 7, len(acc.Metrics)) require.Empty(t, acc.Errors) @@ -241,7 +245,7 @@ func TestFilterClusters(t *testing.T) { } acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) // no match by cluster require.Exactly(t, 0, len(acc.Metrics)) @@ -260,7 +264,7 @@ func TestFilterGroups(t *testing.T) { } acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) require.Exactly(t, 1, len(acc.Metrics)) require.Empty(t, acc.Errors) @@ -278,7 +282,7 @@ func TestFilterTopics(t *testing.T) { } acc := &testutil.Accumulator{} - plugin.Gather(acc) + require.NoError(t, plugin.Gather(acc)) require.Exactly(t, 3, len(acc.Metrics)) require.Empty(t, acc.Errors) diff --git a/plugins/inputs/burrow/sample.conf b/plugins/inputs/burrow/sample.conf new file mode 100644 index 0000000000000..e9cbe73ab97ce --- /dev/null +++ b/plugins/inputs/burrow/sample.conf @@ -0,0 +1,41 @@ +# Collect Kafka topics and consumers status from Burrow HTTP API. +[[inputs.burrow]] + ## Burrow API endpoints in format "schema://host:port". + ## Default is "http://localhost:8000". + servers = ["http://localhost:8000"] + + ## Override Burrow API prefix. + ## Useful when Burrow is behind reverse-proxy. + # api_prefix = "/v3/kafka" + + ## Maximum time to receive response. + # response_timeout = "5s" + + ## Limit per-server concurrent connections. + ## Useful in case of large number of topics or consumer groups. + # concurrent_connections = 20 + + ## Filter clusters, default is no filtering. + ## Values can be specified as glob patterns. + # clusters_include = [] + # clusters_exclude = [] + + ## Filter consumer groups, default is no filtering. + ## Values can be specified as glob patterns. + # groups_include = [] + # groups_exclude = [] + + ## Filter topics, default is no filtering. + ## Values can be specified as glob patterns. + # topics_include = [] + # topics_exclude = [] + + ## Credentials for basic HTTP authentication. + # username = "" + # password = "" + + ## Optional SSL config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + # insecure_skip_verify = false diff --git a/plugins/inputs/cassandra/README.md b/plugins/inputs/cassandra/README.md index d89459533f55e..b3df5a0980ecc 100644 --- a/plugins/inputs/cassandra/README.md +++ b/plugins/inputs/cassandra/README.md @@ -1,56 +1,79 @@ # Cassandra Input Plugin -### **Deprecated in version 1.7**: Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin with the [cassandra.conf](/plugins/inputs/jolokia2/examples/cassandra.conf) example configuration. +**Deprecated in version 1.7**: Please use the [jolokia2](../jolokia2/README.md) +plugin with the [cassandra.conf](../jolokia2/examples/cassandra.conf) example +configuration. + +## Plugin arguments -#### Plugin arguments: - **context** string: Context root used for jolokia url -- **servers** []string: List of servers with the format ":port" +- **servers** []string: List of servers with the format `:port`" - **metrics** []string: List of Jmx paths that identify mbeans attributes -#### Description +## Description + +The Cassandra plugin collects Cassandra 3 / JVM metrics exposed as MBean's +attributes through jolokia REST endpoint. All metrics are collected for each +server configured. -The Cassandra plugin collects Cassandra 3 / JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics are collected for each server configured. +See: [https://jolokia.org/](https://jolokia.org/) and [Cassandra +Documentation][1] -See: https://jolokia.org/ and [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html) +[1]: http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html -# Measurements: -Cassandra plugin produces one or more measurements for each metric configured, adding Server's name as `host` tag. More than one measurement is generated when querying table metrics with a wildcard for the keyspace or table name. +## Metrics -Given a configuration like: +Cassandra plugin produces one or more measurements for each metric configured, +adding Server's name as `host` tag. More than one measurement is generated when +querying table metrics with a wildcard for the keyspace or table name. -```toml +## Configuration + +```toml @sample.conf +# Read Cassandra metrics through Jolokia [[inputs.cassandra]] context = "/jolokia/read" - servers = [":8778"] - metrics = ["/java.lang:type=Memory/HeapMemoryUsage"] + ## List of cassandra servers exposing jolokia read service + servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] + ## List of metrics collected on above servers + ## Each metric consists of a jmx path. + ## This will collect all heap memory usage metrics from the jvm and + ## ReadLatency metrics for all keyspaces and tables. + ## "type=Table" in the query works with Cassandra3.0. Older versions might + ## need to use "type=ColumnFamily" + metrics = [ + "/java.lang:type=Memory/HeapMemoryUsage", + "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" + ] ``` -The collected metrics will be: +## Example Output -``` +```shell javaMemory,host=myHost,mname=HeapMemoryUsage HeapMemoryUsage_committed=1040187392,HeapMemoryUsage_init=1050673152,HeapMemoryUsage_max=1040187392,HeapMemoryUsage_used=368155000 1459551767230567084 ``` -# Useful Metrics: +## Useful Metrics -Here is a list of metrics that might be useful to monitor your cassandra cluster. This was put together from multiple sources on the web. +Here is a list of metrics that might be useful to monitor your cassandra +cluster. This was put together from multiple sources on the web. - [How to monitor Cassandra performance metrics](https://www.datadoghq.com/blog/how-to-monitor-cassandra-performance-metrics) - [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html) -#### measurement = javaGarbageCollector +### measurement = javaGarbageCollector - /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionTime - /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionCount - /java.lang:type=GarbageCollector,name=ParNew/CollectionTime - /java.lang:type=GarbageCollector,name=ParNew/CollectionCount -#### measurement = javaMemory +### measurement = javaMemory - /java.lang:type=Memory/HeapMemoryUsage - /java.lang:type=Memory/NonHeapMemoryUsage -#### measurement = cassandraCache +### measurement = cassandraCache - /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Hits - /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Requests @@ -63,11 +86,11 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster - /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Size - /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Capacity -#### measurement = cassandraClient +### measurement = cassandraClient - /org.apache.cassandra.metrics:type=Client,name=connectedNativeClients -#### measurement = cassandraClientRequest +### measurement = cassandraClientRequest - /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=TotalLatency - /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=TotalLatency @@ -80,25 +103,28 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster - /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Failures - /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Failures -#### measurement = cassandraCommitLog +### measurement = cassandraCommitLog - /org.apache.cassandra.metrics:type=CommitLog,name=PendingTasks - /org.apache.cassandra.metrics:type=CommitLog,name=TotalCommitLogSize -#### measurement = cassandraCompaction +### measurement = cassandraCompaction - /org.apache.cassandra.metrics:type=Compaction,name=CompletedTasks - /org.apache.cassandra.metrics:type=Compaction,name=PendingTasks - /org.apache.cassandra.metrics:type=Compaction,name=TotalCompactionsCompleted - /org.apache.cassandra.metrics:type=Compaction,name=BytesCompacted -#### measurement = cassandraStorage +### measurement = cassandraStorage - /org.apache.cassandra.metrics:type=Storage,name=Load - /org.apache.cassandra.metrics:type=Storage,name=Exceptions -#### measurement = cassandraTable -Using wildcards for "keyspace" and "scope" can create a lot of series as metrics will be reported for every table and keyspace including internal system tables. Specify a keyspace name and/or a table name to limit them. +### measurement = cassandraTable + +Using wildcards for "keyspace" and "scope" can create a lot of series as metrics +will be reported for every table and keyspace including internal system +tables. Specify a keyspace name and/or a table name to limit them. - /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=LiveDiskSpaceUsed - /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=TotalDiskSpaceUsed @@ -108,20 +134,17 @@ Using wildcards for "keyspace" and "scope" can create a lot of series as metrics - /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=ReadTotalLatency - /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteTotalLatency - -#### measurement = cassandraThreadPools +### measurement = cassandraThreadPools - /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=ActiveTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=ActiveTasks -- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=PendingTasks -- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=CurrentlyBlockedTasks +- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=PendingTasks +- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=CounterMutationStage,name=CurrentlyBlockedTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=PendingTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=MutationStage,name=CurrentlyBlockedTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=PendingTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadRepairStage,name=CurrentlyBlockedTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=PendingTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=ReadStage,name=CurrentlyBlockedTasks -- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=PendingTasks +- /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=PendingTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=request,scope=RequestResponseStage,name=CurrentlyBlockedTasks - - diff --git a/plugins/inputs/cassandra/cassandra.go b/plugins/inputs/cassandra/cassandra.go index 6f6f86e32f592..13f76c92b2d54 100644 --- a/plugins/inputs/cassandra/cassandra.go +++ b/plugins/inputs/cassandra/cassandra.go @@ -1,11 +1,12 @@ +//go:generate ../../../tools/readme_config_includer/generator package cassandra import ( + _ "embed" "encoding/json" "errors" "fmt" - "io/ioutil" - "log" + "io" "net/http" "net/url" "strings" @@ -14,6 +15,10 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type JolokiaClient interface { MakeRequest(req *http.Request) (*http.Response, error) } @@ -28,9 +33,10 @@ func (c JolokiaClientImpl) MakeRequest(req *http.Request) (*http.Response, error type Cassandra struct { jClient JolokiaClient - Context string - Servers []string - Metrics []string + Context string `toml:"context"` + Servers []string `toml:"servers"` + Metrics []string `toml:"metrics"` + Log telegraf.Logger `toml:"-"` } type javaMetric struct { @@ -49,13 +55,11 @@ type jmxMetric interface { addTagsFields(out map[string]interface{}) } -func newJavaMetric(host string, metric string, - acc telegraf.Accumulator) *javaMetric { +func newJavaMetric(acc telegraf.Accumulator, host string, metric string) *javaMetric { return &javaMetric{host: host, metric: metric, acc: acc} } -func newCassandraMetric(host string, metric string, - acc telegraf.Accumulator) *cassandraMetric { +func newCassandraMetric(acc telegraf.Accumulator, host string, metric string) *cassandraMetric { return &cassandraMetric{host: host, metric: metric, acc: acc} } @@ -72,13 +76,15 @@ func addValuesAsFields(values map[string]interface{}, fields map[string]interfac func parseJmxMetricRequest(mbean string) map[string]string { tokens := make(map[string]string) classAndPairs := strings.Split(mbean, ":") - if classAndPairs[0] == "org.apache.cassandra.metrics" { + switch classAndPairs[0] { + case "org.apache.cassandra.metrics": tokens["class"] = "cassandra" - } else if classAndPairs[0] == "java.lang" { + case "java.lang": tokens["class"] = "java" - } else { + default: return tokens } + pairs := strings.Split(classAndPairs[1], ",") for _, pair := range pairs { p := strings.Split(pair, "=") @@ -125,14 +131,11 @@ func (j javaMetric) addTagsFields(out map[string]interface{}) { } j.acc.AddFields(tokens["class"]+tokens["type"], fields, tags) } else { - j.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n", - j.metric, out)) + j.acc.AddError(fmt.Errorf("missing key 'value' in '%s' output response: %v", j.metric, out)) } } -func addCassandraMetric(mbean string, c cassandraMetric, - values map[string]interface{}) { - +func addCassandraMetric(mbean string, c cassandraMetric, values map[string]interface{}) { tags := make(map[string]string) fields := make(map[string]interface{}) tokens := parseJmxMetricRequest(mbean) @@ -140,11 +143,9 @@ func addCassandraMetric(mbean string, c cassandraMetric, tags["cassandra_host"] = c.host addValuesAsFields(values, fields, tags["mname"]) c.acc.AddFields(tokens["class"]+tokens["type"], fields, tags) - } func (c cassandraMetric) addTagsFields(out map[string]interface{}) { - r := out["request"] tokens := parseJmxMetricRequest(r.(map[string]interface{})["mbean"].(string)) @@ -152,62 +153,32 @@ func (c cassandraMetric) addTagsFields(out map[string]interface{}) { // maps in the json response if (tokens["type"] == "Table" || tokens["type"] == "ColumnFamily") && (tokens["keyspace"] == "*" || tokens["scope"] == "*") { - if valuesMap, ok := out["value"]; ok { - for k, v := range valuesMap.(map[string]interface{}) { - addCassandraMetric(k, c, v.(map[string]interface{})) - } - } else { - c.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n", - c.metric, out)) + valuesMap, ok := out["value"] + if !ok { + c.acc.AddError(fmt.Errorf("missing key 'value' in '%s' output response: %v", c.metric, out)) return } + for k, v := range valuesMap.(map[string]interface{}) { + addCassandraMetric(k, c, v.(map[string]interface{})) + } } else { - if values, ok := out["value"]; ok { - addCassandraMetric(r.(map[string]interface{})["mbean"].(string), - c, values.(map[string]interface{})) - } else { - c.acc.AddError(fmt.Errorf("Missing key 'value' in '%s' output response\n%v\n", - c.metric, out)) + values, ok := out["value"] + if !ok { + c.acc.AddError(fmt.Errorf("missing key 'value' in '%s' output response: %v", c.metric, out)) return } + addCassandraMetric(r.(map[string]interface{})["mbean"].(string), c, values.(map[string]interface{})) } } -func (j *Cassandra) SampleConfig() string { - return ` - ## DEPRECATED: The cassandra plugin has been deprecated. Please use the - ## jolokia2 plugin instead. - ## - ## see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 - - context = "/jolokia/read" - ## List of cassandra servers exposing jolokia read service - servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] - ## List of metrics collected on above servers - ## Each metric consists of a jmx path. - ## This will collect all heap memory usage metrics from the jvm and - ## ReadLatency metrics for all keyspaces and tables. - ## "type=Table" in the query works with Cassandra3.0. Older versions might - ## need to use "type=ColumnFamily" - metrics = [ - "/java.lang:type=Memory/HeapMemoryUsage", - "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" - ] -` -} - -func (j *Cassandra) Description() string { - return "Read Cassandra metrics through Jolokia" -} - -func (j *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error) { +func (c *Cassandra) getAttr(requestURL *url.URL) (map[string]interface{}, error) { // Create + send request - req, err := http.NewRequest("GET", requestUrl.String(), nil) + req, err := http.NewRequest("GET", requestURL.String(), nil) if err != nil { return nil, err } - resp, err := j.jClient.MakeRequest(req) + resp, err := c.jClient.MakeRequest(req) if err != nil { return nil, err } @@ -215,8 +186,8 @@ func (j *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error) // Process response if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)", - requestUrl, + err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", + requestURL, resp.StatusCode, http.StatusText(resp.StatusCode), http.StatusOK, @@ -225,15 +196,15 @@ func (j *Cassandra) getAttr(requestUrl *url.URL) (map[string]interface{}, error) } // read body - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } // Unmarshal json var jsonOut map[string]interface{} - if err = json.Unmarshal([]byte(body), &jsonOut); err != nil { - return nil, errors.New("Error decoding JSON response") + if err = json.Unmarshal(body, &jsonOut); err != nil { + return nil, errors.New("error decoding JSON response") } return jsonOut, nil @@ -263,8 +234,12 @@ func parseServerTokens(server string) map[string]string { return serverTokens } -func (c *Cassandra) Start(acc telegraf.Accumulator) error { - log.Println("W! DEPRECATED: The cassandra plugin has been deprecated. " + +func (*Cassandra) SampleConfig() string { + return sampleConfig +} + +func (c *Cassandra) Start(_ telegraf.Accumulator) error { + c.Log.Warn("DEPRECATED: The cassandra plugin has been deprecated. " + "Please use the jolokia2 plugin instead. " + "https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2") return nil @@ -284,36 +259,35 @@ func (c *Cassandra) Gather(acc telegraf.Accumulator) error { var m jmxMetric if strings.HasPrefix(metric, "/java.lang:") { - m = newJavaMetric(serverTokens["host"], metric, acc) + m = newJavaMetric(acc, serverTokens["host"], metric) } else if strings.HasPrefix(metric, "/org.apache.cassandra.metrics:") { - m = newCassandraMetric(serverTokens["host"], metric, acc) + m = newCassandraMetric(acc, serverTokens["host"], metric) } else { // unsupported metric type - acc.AddError(fmt.Errorf("E! Unsupported Cassandra metric [%s], skipping", - metric)) + acc.AddError(fmt.Errorf("unsupported Cassandra metric [%s], skipping", metric)) continue } // Prepare URL - requestUrl, err := url.Parse("http://" + serverTokens["host"] + ":" + + requestURL, err := url.Parse("http://" + serverTokens["host"] + ":" + serverTokens["port"] + context + metric) if err != nil { acc.AddError(err) continue } if serverTokens["user"] != "" && serverTokens["passwd"] != "" { - requestUrl.User = url.UserPassword(serverTokens["user"], + requestURL.User = url.UserPassword(serverTokens["user"], serverTokens["passwd"]) } - out, err := c.getAttr(requestUrl) + out, err := c.getAttr(requestURL) if err != nil { acc.AddError(err) continue } if out["status"] != 200.0 { - acc.AddError(fmt.Errorf("URL returned with status %v - %s\n", out["status"], requestUrl)) + acc.AddError(fmt.Errorf("provided URL returned with status %v - %s", out["status"], requestURL)) continue } m.addTagsFields(out) diff --git a/plugins/inputs/cassandra/cassandra_test.go b/plugins/inputs/cassandra/cassandra_test.go index 43a9a0c1eb105..35551cf847970 100644 --- a/plugins/inputs/cassandra/cassandra_test.go +++ b/plugins/inputs/cassandra/cassandra_test.go @@ -1,15 +1,14 @@ package cassandra import ( - _ "fmt" - "io/ioutil" + "io" "net/http" "strings" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - _ "github.com/stretchr/testify/require" ) const validJavaMultiValueJSON = ` @@ -77,19 +76,6 @@ const validCassandraNestedMultiValueJSON = ` } }` -const validSingleValueJSON = ` -{ - "request":{ - "path":"used", - "mbean":"java.lang:type=Memory", - "attribute":"HeapMemoryUsage", - "type":"read" - }, - "value":209274376, - "timestamp":1446129256, - "status":200 -}` - const validJavaMultiTypeJSON = ` { "request":{ @@ -104,8 +90,6 @@ const validJavaMultiTypeJSON = ` const invalidJSON = "I don't think this is JSON" -const empty = "" - var Servers = []string{"10.10.10.10:8778"} var AuthServers = []string{"user:passwd@10.10.10.10:8778"} var MultipleServers = []string{"10.10.10.10:8778", "10.10.10.11:8778"} @@ -121,10 +105,10 @@ type jolokiaClientStub struct { statusCode int } -func (c jolokiaClientStub) MakeRequest(req *http.Request) (*http.Response, error) { +func (c jolokiaClientStub) MakeRequest(_ *http.Request) (*http.Response, error) { resp := http.Response{} resp.StatusCode = c.statusCode - resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) + resp.Body = io.NopCloser(strings.NewReader(c.responseBody)) return &resp, nil } @@ -153,8 +137,8 @@ func TestHttpJsonJavaMultiValue(t *testing.T) { acc.SetDebug(true) err := acc.GatherError(cassandra.Gather) - assert.NoError(t, err) - assert.Equal(t, 2, len(acc.Metrics)) + require.NoError(t, err) + require.Equal(t, 2, len(acc.Metrics)) fields := map[string]interface{}{ "HeapMemoryUsage_init": 67108864.0, @@ -182,8 +166,8 @@ func TestHttpJsonJavaMultiType(t *testing.T) { acc.SetDebug(true) err := acc.GatherError(cassandra.Gather) - assert.NoError(t, err) - assert.Equal(t, 2, len(acc.Metrics)) + require.NoError(t, err) + require.Equal(t, 2, len(acc.Metrics)) fields := map[string]interface{}{ "CollectionCount": 1.0, @@ -198,16 +182,14 @@ func TestHttpJsonJavaMultiType(t *testing.T) { // Test that the proper values are ignored or collected func TestHttp404(t *testing.T) { - - jolokia := genJolokiaClientStub(invalidJSON, 404, Servers, - []string{HeapMetric}) + jolokia := genJolokiaClientStub(invalidJSON, 404, Servers, []string{HeapMetric}) var acc testutil.Accumulator err := acc.GatherError(jolokia.Gather) - assert.Error(t, err) - assert.Equal(t, 0, len(acc.Metrics)) - assert.Contains(t, err.Error(), "has status code 404") + require.Error(t, err) + require.Equal(t, 0, len(acc.Metrics)) + require.Contains(t, err.Error(), "has status code 404") } // Test that the proper values are ignored or collected for class=Cassandra @@ -217,8 +199,8 @@ func TestHttpJsonCassandraMultiValue(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(cassandra.Gather) - assert.NoError(t, err) - assert.Equal(t, 1, len(acc.Metrics)) + require.NoError(t, err) + require.Equal(t, 1, len(acc.Metrics)) fields := map[string]interface{}{ "ReadLatency_999thPercentile": 20.0, @@ -249,8 +231,8 @@ func TestHttpJsonCassandraNestedMultiValue(t *testing.T) { acc.SetDebug(true) err := acc.GatherError(cassandra.Gather) - assert.NoError(t, err) - assert.Equal(t, 2, len(acc.Metrics)) + require.NoError(t, err) + require.Equal(t, 2, len(acc.Metrics)) fields1 := map[string]interface{}{ "ReadLatency_999thPercentile": 1.0, diff --git a/plugins/inputs/cassandra/sample.conf b/plugins/inputs/cassandra/sample.conf new file mode 100644 index 0000000000000..d69175df1c723 --- /dev/null +++ b/plugins/inputs/cassandra/sample.conf @@ -0,0 +1,15 @@ +# Read Cassandra metrics through Jolokia +[[inputs.cassandra]] + context = "/jolokia/read" + ## List of cassandra servers exposing jolokia read service + servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] + ## List of metrics collected on above servers + ## Each metric consists of a jmx path. + ## This will collect all heap memory usage metrics from the jvm and + ## ReadLatency metrics for all keyspaces and tables. + ## "type=Table" in the query works with Cassandra3.0. Older versions might + ## need to use "type=ColumnFamily" + metrics = [ + "/java.lang:type=Memory/HeapMemoryUsage", + "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" + ] diff --git a/plugins/inputs/ceph/README.md b/plugins/inputs/ceph/README.md index 171b64760654f..41b4e7d3709f9 100644 --- a/plugins/inputs/ceph/README.md +++ b/plugins/inputs/ceph/README.md @@ -1,16 +1,22 @@ # Ceph Storage Input Plugin -Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. +Collects performance metrics from the MON and OSD nodes in a Ceph storage +cluster. -Ceph has introduced a Telegraf and Influx plugin in the 13.x Mimic release. The Telegraf module sends to a Telegraf configured with a socket_listener. [Learn more in their docs](http://docs.ceph.com/docs/mimic/mgr/telegraf/) +Ceph has introduced a Telegraf and Influx plugin in the 13.x Mimic release. The +Telegraf module sends to a Telegraf configured with a socket_listener. [Learn +more in their docs](https://docs.ceph.com/en/latest/mgr/telegraf/) -*Admin Socket Stats* +## Admin Socket Stats -This gatherer works by scanning the configured SocketDir for OSD, MON, MDS and RGW socket files. When it finds -a MON socket, it runs **ceph --admin-daemon $file perfcounters_dump**. For OSDs it runs **ceph --admin-daemon $file perf dump** +This gatherer works by scanning the configured SocketDir for OSD, MON, MDS and +RGW socket files. When it finds a MON socket, it runs **ceph --admin-daemon +$file perfcounters_dump**. For OSDs it runs **ceph --admin-daemon $file perf +dump** -The resulting JSON is parsed and grouped into collections, based on top-level key. Top-level keys are -used as collection tags, and all sub-keys are flattened. For example: +The resulting JSON is parsed and grouped into collections, based on top-level +key. Top-level keys are used as collection tags, and all sub-keys are +flattened. For example: ```json { @@ -24,28 +30,29 @@ used as collection tags, and all sub-keys are flattened. For example: } ``` -Would be parsed into the following metrics, all of which would be tagged with collection=paxos: +Would be parsed into the following metrics, all of which would be tagged with +collection=paxos: - - refresh = 9363435 - - refresh_latency.avgcount: 9363435 - - refresh_latency.sum: 5378.794002000 +- refresh = 9363435 +- refresh_latency.avgcount: 9363435 +- refresh_latency.sum: 5378.794002000 +## Cluster Stats -*Cluster Stats* +This gatherer works by invoking ceph commands against the cluster thus only +requires the ceph client, valid ceph configuration and an access key to function +(the ceph_config and ceph_user configuration variables work in conjunction to +specify these prerequisites). It may be run on any server you wish which has +access to the cluster. The currently supported commands are: -This gatherer works by invoking ceph commands against the cluster thus only requires the ceph client, valid -ceph configuration and an access key to function (the ceph_config and ceph_user configuration variables work -in conjunction to specify these prerequisites). It may be run on any server you wish which has access to -the cluster. The currently supported commands are: +- ceph status +- ceph df +- ceph osd pool stats -* ceph status -* ceph df -* ceph osd pool stats +## Configuration -### Configuration: - -```toml -# Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. +```toml @sample.conf +# Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster. [[inputs.ceph]] ## This is the recommended interval to poll. Too frequent and you will lose ## data points due to timeouts during rebalancing and recovery @@ -89,11 +96,12 @@ the cluster. The currently supported commands are: gather_cluster_stats = false ``` -### Metrics: +## Metrics -*Admin Socket Stats* +### Admin Socket -All fields are collected under the **ceph** measurement and stored as float64s. For a full list of fields, see the sample perf dumps in ceph_test.go. +All fields are collected under the **ceph** measurement and stored as +float64s. For a full list of fields, see the sample perf dumps in ceph_test.go. All admin measurements will have the following tags: @@ -167,9 +175,9 @@ All admin measurements will have the following tags: - throttle-objecter_ops - throttle-rgw_async_rados_ops -*Cluster Stats* +## Cluster -+ ceph_health +- ceph_health - fields: - status - overall_status @@ -184,7 +192,7 @@ All admin measurements will have the following tags: - nearfull (bool) - num_remapped_pgs (float) -+ ceph_pgmap +- ceph_pgmap - fields: - version (float) - num_pgs (float) @@ -204,7 +212,7 @@ All admin measurements will have the following tags: - fields: - count (float) -+ ceph_usage +- ceph_usage - fields: - total_bytes (float) - total_used_bytes (float) @@ -223,7 +231,7 @@ All admin measurements will have the following tags: - percent_used (float) - max_avail (float) -+ ceph_pool_stats +- ceph_pool_stats - tags: - name - fields: @@ -236,12 +244,11 @@ All admin measurements will have the following tags: - recovering_bytes_per_sec (float) - recovering_keys_per_sec (float) +## Example Output -### Example Output: - -*Cluster Stats* +Below is an example of a custer stats: -``` +```shell ceph_health,host=stefanmon1 overall_status="",status="HEALTH_WARN" 1587118504000000000 ceph_osdmap,host=stefanmon1 epoch=203,full=false,nearfull=false,num_in_osds=8,num_osds=9,num_remapped_pgs=0,num_up_osds=8 1587118504000000000 ceph_pgmap,host=stefanmon1 bytes_avail=849879302144,bytes_total=858959904768,bytes_used=9080602624,data_bytes=5055,num_pgs=504,read_bytes_sec=0,read_op_per_sec=0,version=0,write_bytes_sec=0,write_op_per_sec=0 1587118504000000000 @@ -251,9 +258,9 @@ ceph_pool_usage,host=stefanmon1,name=cephfs_data bytes_used=0,kb_used=0,max_avai ceph_pool_stats,host=stefanmon1,name=cephfs_data read_bytes_sec=0,read_op_per_sec=0,recovering_bytes_per_sec=0,recovering_keys_per_sec=0,recovering_objects_per_sec=0,write_bytes_sec=0,write_op_per_sec=0 1587118506000000000 ``` -*Admin Socket Stats* +Below is an example of admin socket stats: -``` +```shell > ceph,collection=cct,host=stefanmon1,id=stefanmon1,type=monitor total_workers=0,unhealthy_workers=0 1587117563000000000 > ceph,collection=mempool,host=stefanmon1,id=stefanmon1,type=monitor bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=0,bluefs_items=0,bluestore_alloc_bytes=0,bluestore_alloc_items=0,bluestore_cache_data_bytes=0,bluestore_cache_data_items=0,bluestore_cache_onode_bytes=0,bluestore_cache_onode_items=0,bluestore_cache_other_bytes=0,bluestore_cache_other_items=0,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=0,bluestore_txc_items=0,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=0,bluestore_writing_deferred_items=0,bluestore_writing_items=0,buffer_anon_bytes=719152,buffer_anon_items=192,buffer_meta_bytes=352,buffer_meta_items=4,mds_co_bytes=0,mds_co_items=0,osd_bytes=0,osd_items=0,osd_mapbl_bytes=0,osd_mapbl_items=0,osd_pglog_bytes=0,osd_pglog_items=0,osdmap_bytes=15872,osdmap_items=138,osdmap_mapping_bytes=63112,osdmap_mapping_items=7626,pgmap_bytes=38680,pgmap_items=477,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117563000000000 > ceph,collection=throttle-mon_client_bytes,host=stefanmon1,id=stefanmon1,type=monitor get=1041157,get_or_fail_fail=0,get_or_fail_success=1041157,get_started=0,get_sum=64928901,max=104857600,put=1041157,put_sum=64928901,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117563000000000 diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index c875de8dfaeba..8b1ac3b38c32a 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -1,11 +1,12 @@ +//go:generate ../../../tools/readme_config_includer/generator package ceph import ( "bytes" + _ "embed" "encoding/json" "fmt" - "io/ioutil" - "log" + "os" "os/exec" "path/filepath" "strings" @@ -14,6 +15,10 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const ( measurement = "ceph" typeMon = "monitor" @@ -28,59 +33,22 @@ const ( ) type Ceph struct { - CephBinary string - OsdPrefix string - MonPrefix string - MdsPrefix string - RgwPrefix string - SocketDir string - SocketSuffix string - CephUser string - CephConfig string - GatherAdminSocketStats bool - GatherClusterStats bool -} - -func (c *Ceph) Description() string { - return "Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster." + CephBinary string `toml:"ceph_binary"` + OsdPrefix string `toml:"osd_prefix"` + MonPrefix string `toml:"mon_prefix"` + MdsPrefix string `toml:"mds_prefix"` + RgwPrefix string `toml:"rgw_prefix"` + SocketDir string `toml:"socket_dir"` + SocketSuffix string `toml:"socket_suffix"` + CephUser string `toml:"ceph_user"` + CephConfig string `toml:"ceph_config"` + GatherAdminSocketStats bool `toml:"gather_admin_socket_stats"` + GatherClusterStats bool `toml:"gather_cluster_stats"` + + Log telegraf.Logger `toml:"-"` } -var sampleConfig = ` - ## This is the recommended interval to poll. Too frequent and you will lose - ## data points due to timeouts during rebalancing and recovery - interval = '1m' - - ## All configuration values are optional, defaults are shown below - - ## location of ceph binary - ceph_binary = "/usr/bin/ceph" - - ## directory in which to look for socket files - socket_dir = "/var/run/ceph" - - ## prefix of MON and OSD socket files, used to determine socket type - mon_prefix = "ceph-mon" - osd_prefix = "ceph-osd" - mds_prefix = "ceph-mds" - rgw_prefix = "ceph-client" - - ## suffix used to identify socket files - socket_suffix = "asok" - - ## Ceph user to authenticate as - ceph_user = "client.admin" - - ## Ceph configuration to use to locate the cluster - ceph_config = "/etc/ceph/ceph.conf" - - ## Whether to gather statistics via the admin socket - gather_admin_socket_stats = true - - ## Whether to gather statistics via ceph commands - gather_cluster_stats = false -` - -func (c *Ceph) SampleConfig() string { +func (*Ceph) SampleConfig() string { return sampleConfig } @@ -112,15 +80,15 @@ func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error { acc.AddError(fmt.Errorf("error reading from socket '%s': %v", s.socket, err)) continue } - data, err := parseDump(dump) + data, err := c.parseDump(dump) if err != nil { acc.AddError(fmt.Errorf("error parsing dump from socket '%s': %v", s.socket, err)) continue } for tag, metrics := range data { acc.AddFields(measurement, - map[string]interface{}(metrics), - map[string]string{"type": s.sockType, "id": s.sockId, "collection": tag}) + metrics, + map[string]string{"type": s.sockType, "id": s.sockID, "collection": tag}) } } return nil @@ -138,7 +106,7 @@ func (c *Ceph) gatherClusterStats(acc telegraf.Accumulator) error { // For each job, execute against the cluster, parse and accumulate the data points for _, job := range jobs { - output, err := c.exec(job.command) + output, err := c.execute(job.command) if err != nil { return fmt.Errorf("error executing command: %v", err) } @@ -152,34 +120,36 @@ func (c *Ceph) gatherClusterStats(acc telegraf.Accumulator) error { } func init() { - c := Ceph{ - CephBinary: "/usr/bin/ceph", - OsdPrefix: osdPrefix, - MonPrefix: monPrefix, - MdsPrefix: mdsPrefix, - RgwPrefix: rgwPrefix, - SocketDir: "/var/run/ceph", - SocketSuffix: sockSuffix, - CephUser: "client.admin", - CephConfig: "/etc/ceph/ceph.conf", - GatherAdminSocketStats: true, - GatherClusterStats: false, - } - - inputs.Add(measurement, func() telegraf.Input { return &c }) + inputs.Add(measurement, func() telegraf.Input { + return &Ceph{ + CephBinary: "/usr/bin/ceph", + OsdPrefix: osdPrefix, + MonPrefix: monPrefix, + MdsPrefix: mdsPrefix, + RgwPrefix: rgwPrefix, + SocketDir: "/var/run/ceph", + SocketSuffix: sockSuffix, + CephUser: "client.admin", + CephConfig: "/etc/ceph/ceph.conf", + GatherAdminSocketStats: true, + GatherClusterStats: false, + } + }) } var perfDump = func(binary string, socket *socket) (string, error) { cmdArgs := []string{"--admin-daemon", socket.socket} - if socket.sockType == typeOsd { + + switch socket.sockType { + case typeOsd: cmdArgs = append(cmdArgs, "perf", "dump") - } else if socket.sockType == typeMon { + case typeMon: cmdArgs = append(cmdArgs, "perfcounters_dump") - } else if socket.sockType == typeMds { + case typeMds: cmdArgs = append(cmdArgs, "perf", "dump") - } else if socket.sockType == typeRgw { + case typeRgw: cmdArgs = append(cmdArgs, "perf", "dump") - } else { + default: return "", fmt.Errorf("ignoring unknown socket type: %s", socket.sockType) } @@ -195,7 +165,7 @@ var perfDump = func(binary string, socket *socket) (string, error) { } var findSockets = func(c *Ceph) ([]*socket, error) { - listing, err := ioutil.ReadDir(c.SocketDir) + listing, err := os.ReadDir(c.SocketDir) if err != nil { return []*socket{}, fmt.Errorf("Failed to read socket directory '%s': %v", c.SocketDir, err) } @@ -211,28 +181,25 @@ var findSockets = func(c *Ceph) ([]*socket, error) { if strings.HasPrefix(f, c.OsdPrefix) { sockType = typeOsd sockPrefix = osdPrefix - } if strings.HasPrefix(f, c.MdsPrefix) { sockType = typeMds sockPrefix = mdsPrefix - } if strings.HasPrefix(f, c.RgwPrefix) { sockType = typeRgw sockPrefix = rgwPrefix - } if sockType == typeOsd || sockType == typeMon || sockType == typeMds || sockType == typeRgw { path := filepath.Join(c.SocketDir, f) - sockets = append(sockets, &socket{parseSockId(f, sockPrefix, c.SocketSuffix), sockType, path}) + sockets = append(sockets, &socket{parseSockID(f, sockPrefix, c.SocketSuffix), sockType, path}) } } return sockets, nil } -func parseSockId(fname, prefix, suffix string) string { +func parseSockID(fname, prefix, suffix string) string { s := fname s = strings.TrimPrefix(s, prefix) s = strings.TrimSuffix(s, suffix) @@ -241,7 +208,7 @@ func parseSockId(fname, prefix, suffix string) string { } type socket struct { - sockId string + sockID string sockType string socket string } @@ -256,8 +223,10 @@ func (m *metric) name() string { buf := bytes.Buffer{} for i := len(m.pathStack) - 1; i >= 0; i-- { if buf.Len() > 0 { + //nolint:errcheck,revive // should never return an error buf.WriteString(".") } + //nolint:errcheck,revive // should never return an error buf.WriteString(m.pathStack[i]) } return buf.String() @@ -269,23 +238,23 @@ type taggedMetricMap map[string]metricMap // Parses a raw JSON string into a taggedMetricMap // Delegates the actual parsing to newTaggedMetricMap(..) -func parseDump(dump string) (taggedMetricMap, error) { +func (c *Ceph) parseDump(dump string) (taggedMetricMap, error) { data := make(map[string]interface{}) err := json.Unmarshal([]byte(dump), &data) if err != nil { return nil, fmt.Errorf("failed to parse json: '%s': %v", dump, err) } - return newTaggedMetricMap(data), nil + return c.newTaggedMetricMap(data), nil } // Builds a TaggedMetricMap out of a generic string map. // The top-level key is used as a tag and all sub-keys are flattened into metrics -func newTaggedMetricMap(data map[string]interface{}) taggedMetricMap { +func (c *Ceph) newTaggedMetricMap(data map[string]interface{}) taggedMetricMap { tmm := make(taggedMetricMap) for tag, datapoints := range data { mm := make(metricMap) - for _, m := range flatten(datapoints) { + for _, m := range c.flatten(datapoints) { mm[m.name()] = m.value } tmm[tag] = mm @@ -297,29 +266,33 @@ func newTaggedMetricMap(data map[string]interface{}) taggedMetricMap { // Nested keys are flattened into ordered slices associated with a metric value. // The key slices are treated as stacks, and are expected to be reversed and concatenated // when passed as metrics to the accumulator. (see (*metric).name()) -func flatten(data interface{}) []*metric { +func (c *Ceph) flatten(data interface{}) []*metric { var metrics []*metric switch val := data.(type) { case float64: - metrics = []*metric{{make([]string, 0, 1), val}} + metrics = []*metric{ + { + make([]string, 0, 1), val, + }, + } case map[string]interface{}: metrics = make([]*metric, 0, len(val)) for k, v := range val { - for _, m := range flatten(v) { + for _, m := range c.flatten(v) { m.pathStack = append(m.pathStack, k) metrics = append(metrics, m) } } default: - log.Printf("I! [inputs.ceph] ignoring unexpected type '%T' for value %v", val, val) + c.Log.Infof("ignoring unexpected type '%T' for value %v", val, val) } return metrics } -// exec executes the 'ceph' command with the supplied arguments, returning JSON formatted output -func (c *Ceph) exec(command string) (string, error) { +// execute executes the 'ceph' command with the supplied arguments, returning JSON formatted output +func (c *Ceph) execute(command string) (string, error) { cmdArgs := []string{"--conf", c.CephConfig, "--name", c.CephUser, "--format", "json"} cmdArgs = append(cmdArgs, strings.Split(command, " ")...) @@ -336,8 +309,8 @@ func (c *Ceph) exec(command string) (string, error) { // Ceph doesn't sanitize its output, and may return invalid JSON. Patch this // up for them, as having some inaccurate data is better than none. - output = strings.Replace(output, "-inf", "0", -1) - output = strings.Replace(output, "inf", "0", -1) + output = strings.ReplaceAll(output, "-inf", "0") + output = strings.ReplaceAll(output, "inf", "0") return output, nil } diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go index 78da3438de691..1f0b58ad30df0 100644 --- a/plugins/inputs/ceph/ceph_test.go +++ b/plugins/inputs/ceph/ceph_test.go @@ -2,15 +2,15 @@ package ceph import ( "fmt" - "io/ioutil" "os" - "path" + "path/filepath" "strconv" "strings" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) const ( @@ -24,42 +24,46 @@ type expectedResult struct { } func TestParseSockId(t *testing.T) { - s := parseSockId(sockFile(osdPrefix, 1), osdPrefix, sockSuffix) - assert.Equal(t, s, "1") + s := parseSockID(sockFile(osdPrefix, 1), osdPrefix, sockSuffix) + require.Equal(t, s, "1") } func TestParseMonDump(t *testing.T) { - dump, err := parseDump(monPerfDump) - assert.NoError(t, err) - assert.InEpsilon(t, int64(5678670180), dump["cluster"]["osd_kb_used"], epsilon) - assert.InEpsilon(t, 6866.540527000, dump["paxos"]["store_state_latency.sum"], epsilon) + c := &Ceph{Log: testutil.Logger{}} + dump, err := c.parseDump(monPerfDump) + require.NoError(t, err) + require.InEpsilon(t, int64(5678670180), dump["cluster"]["osd_kb_used"], epsilon) + require.InEpsilon(t, 6866.540527000, dump["paxos"]["store_state_latency.sum"], epsilon) } func TestParseOsdDump(t *testing.T) { - dump, err := parseDump(osdPerfDump) - assert.NoError(t, err) - assert.InEpsilon(t, 552132.109360000, dump["filestore"]["commitcycle_interval.sum"], epsilon) - assert.Equal(t, float64(0), dump["mutex-FileJournal::finisher_lock"]["wait.avgcount"]) + c := &Ceph{Log: testutil.Logger{}} + dump, err := c.parseDump(osdPerfDump) + require.NoError(t, err) + require.InEpsilon(t, 552132.109360000, dump["filestore"]["commitcycle_interval.sum"], epsilon) + require.Equal(t, float64(0), dump["mutex-FileJournal::finisher_lock"]["wait.avgcount"]) } func TestParseMdsDump(t *testing.T) { - dump, err := parseDump(mdsPerfDump) - assert.NoError(t, err) - assert.InEpsilon(t, 2408386.600934982, dump["mds"]["reply_latency.sum"], epsilon) - assert.Equal(t, float64(0), dump["throttle-write_buf_throttle"]["wait.avgcount"]) + c := &Ceph{Log: testutil.Logger{}} + dump, err := c.parseDump(mdsPerfDump) + require.NoError(t, err) + require.InEpsilon(t, 2408386.600934982, dump["mds"]["reply_latency.sum"], epsilon) + require.Equal(t, float64(0), dump["throttle-write_buf_throttle"]["wait.avgcount"]) } func TestParseRgwDump(t *testing.T) { - dump, err := parseDump(rgwPerfDump) - assert.NoError(t, err) - assert.InEpsilon(t, 0.002219876, dump["rgw"]["get_initial_lat.sum"], epsilon) - assert.Equal(t, float64(0), dump["rgw"]["put_initial_lat.avgcount"]) + c := &Ceph{Log: testutil.Logger{}} + dump, err := c.parseDump(rgwPerfDump) + require.NoError(t, err) + require.InEpsilon(t, 0.002219876, dump["rgw"]["get_initial_lat.sum"], epsilon) + require.Equal(t, float64(0), dump["rgw"]["put_initial_lat.avgcount"]) } func TestDecodeStatus(t *testing.T) { acc := &testutil.Accumulator{} err := decodeStatus(acc, clusterStatusDump) - assert.NoError(t, err) + require.NoError(t, err) for _, r := range cephStatusResults { acc.AssertContainsTaggedFields(t, r.metric, r.fields, r.tags) @@ -69,7 +73,7 @@ func TestDecodeStatus(t *testing.T) { func TestDecodeDf(t *testing.T) { acc := &testutil.Accumulator{} err := decodeDf(acc, cephDFDump) - assert.NoError(t, err) + require.NoError(t, err) for _, r := range cephDfResults { acc.AssertContainsTaggedFields(t, r.metric, r.fields, r.tags) @@ -79,7 +83,7 @@ func TestDecodeDf(t *testing.T) { func TestDecodeOSDPoolStats(t *testing.T) { acc := &testutil.Accumulator{} err := decodeOsdPoolStats(acc, cephODSPoolStatsDump) - assert.NoError(t, err) + require.NoError(t, err) for _, r := range cephOSDPoolStatsResults { acc.AssertContainsTaggedFields(t, r.metric, r.fields, r.tags) @@ -104,17 +108,11 @@ func TestGather(t *testing.T) { acc := &testutil.Accumulator{} c := &Ceph{} - c.Gather(acc) - + require.NoError(t, c.Gather(acc)) } func TestFindSockets(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "socktest") - assert.NoError(t, err) - defer func() { - err := os.Remove(tmpdir) - assert.NoError(t, err) - }() + tmpdir := t.TempDir() c := &Ceph{ CephBinary: "foo", OsdPrefix: "ceph-osd", @@ -130,10 +128,10 @@ func TestFindSockets(t *testing.T) { } for _, st := range sockTestParams { - createTestFiles(tmpdir, st) + require.NoError(t, createTestFiles(tmpdir, st)) sockets, err := findSockets(c) - assert.NoError(t, err) + require.NoError(t, err) for i := 1; i <= st.osds; i++ { assertFoundSocket(t, tmpdir, typeOsd, i, sockets) @@ -148,7 +146,7 @@ func TestFindSockets(t *testing.T) { for i := 1; i <= st.rgws; i++ { assertFoundSocket(t, tmpdir, typeRgw, i, sockets) } - cleanupTestFiles(tmpdir, st) + require.NoError(t, cleanupTestFiles(tmpdir, st)) } } @@ -163,57 +161,64 @@ func assertFoundSocket(t *testing.T, dir, sockType string, i int, sockets []*soc } else { prefix = monPrefix } - expected := path.Join(dir, sockFile(prefix, i)) + expected := filepath.Join(dir, sockFile(prefix, i)) found := false for _, s := range sockets { - fmt.Printf("Checking %s\n", s.socket) + _, err := fmt.Printf("Checking %s\n", s.socket) + require.NoError(t, err) if s.socket == expected { found = true - assert.Equal(t, s.sockType, sockType, "Unexpected socket type for '%s'", s) - assert.Equal(t, s.sockId, strconv.Itoa(i)) + require.Equal(t, s.sockType, sockType, "Unexpected socket type for '%s'", s) + require.Equal(t, s.sockID, strconv.Itoa(i)) } } - assert.True(t, found, "Did not find socket: %s", expected) + require.True(t, found, "Did not find socket: %s", expected) } func sockFile(prefix string, i int) string { return strings.Join([]string{prefix, strconv.Itoa(i), sockSuffix}, ".") } -func createTestFiles(dir string, st *SockTest) { - writeFile := func(prefix string, i int) { +func createTestFiles(dir string, st *SockTest) error { + writeFile := func(prefix string, i int) error { f := sockFile(prefix, i) - fpath := path.Join(dir, f) - ioutil.WriteFile(fpath, []byte(""), 0777) + fpath := filepath.Join(dir, f) + return os.WriteFile(fpath, []byte(""), 0644) } - tstFileApply(st, writeFile) + return tstFileApply(st, writeFile) } -func cleanupTestFiles(dir string, st *SockTest) { - rmFile := func(prefix string, i int) { +func cleanupTestFiles(dir string, st *SockTest) error { + rmFile := func(prefix string, i int) error { f := sockFile(prefix, i) - fpath := path.Join(dir, f) - err := os.Remove(fpath) - if err != nil { - fmt.Printf("Error removing test file %s: %v\n", fpath, err) - } + fpath := filepath.Join(dir, f) + return os.Remove(fpath) } - tstFileApply(st, rmFile) + return tstFileApply(st, rmFile) } -func tstFileApply(st *SockTest, fn func(prefix string, i int)) { +func tstFileApply(st *SockTest, fn func(string, int) error) error { for i := 1; i <= st.osds; i++ { - fn(osdPrefix, i) + if err := fn(osdPrefix, i); err != nil { + return err + } } for i := 1; i <= st.mons; i++ { - fn(monPrefix, i) + if err := fn(monPrefix, i); err != nil { + return err + } } for i := 1; i <= st.mdss; i++ { - fn(mdsPrefix, i) + if err := fn(mdsPrefix, i); err != nil { + return err + } } for i := 1; i <= st.rgws; i++ { - fn(rgwPrefix, i) + if err := fn(rgwPrefix, i); err != nil { + return err + } } + return nil } type SockTest struct { diff --git a/plugins/inputs/ceph/sample.conf b/plugins/inputs/ceph/sample.conf new file mode 100644 index 0000000000000..5cc4b2537e02f --- /dev/null +++ b/plugins/inputs/ceph/sample.conf @@ -0,0 +1,42 @@ +# Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster. +[[inputs.ceph]] + ## This is the recommended interval to poll. Too frequent and you will lose + ## data points due to timeouts during rebalancing and recovery + interval = '1m' + + ## All configuration values are optional, defaults are shown below + + ## location of ceph binary + ceph_binary = "/usr/bin/ceph" + + ## directory in which to look for socket files + socket_dir = "/var/run/ceph" + + ## prefix of MON and OSD socket files, used to determine socket type + mon_prefix = "ceph-mon" + osd_prefix = "ceph-osd" + mds_prefix = "ceph-mds" + rgw_prefix = "ceph-client" + + ## suffix used to identify socket files + socket_suffix = "asok" + + ## Ceph user to authenticate as, ceph will search for the corresponding keyring + ## e.g. client.admin.keyring in /etc/ceph, or the explicit path defined in the + ## client section of ceph.conf for example: + ## + ## [client.telegraf] + ## keyring = /etc/ceph/client.telegraf.keyring + ## + ## Consult the ceph documentation for more detail on keyring generation. + ceph_user = "client.admin" + + ## Ceph configuration to use to locate the cluster + ceph_config = "/etc/ceph/ceph.conf" + + ## Whether to gather statistics via the admin socket + gather_admin_socket_stats = true + + ## Whether to gather statistics via ceph commands, requires ceph_user and ceph_config + ## to be specified + gather_cluster_stats = false diff --git a/plugins/inputs/cgroup/README.md b/plugins/inputs/cgroup/README.md index 6982517bc5879..cf67cc32fad97 100644 --- a/plugins/inputs/cgroup/README.md +++ b/plugins/inputs/cgroup/README.md @@ -10,50 +10,54 @@ Following file formats are supported: * Single value -``` +```text VAL\n ``` * New line separated values -``` +```text VAL0\n VAL1\n ``` * Space separated values -``` +```text VAL0 VAL1 ...\n ``` -* New line separated key-space-value's +* Space separated keys and value, separated by new line -``` -KEY0 VAL0\n -KEY1 VAL1\n +```text +KEY0 ... VAL0\n +KEY1 ... VAL1\n ``` +## Metrics -### Tags: +All measurements have the `path` tag. -All measurements have the following tags: - - path +## Configuration - -### Configuration: - -```toml -# [[inputs.cgroup]] +```toml @sample.conf +# Read specific statistics per cgroup +[[inputs.cgroup]] + ## Directories in which to look for files, globs are supported. + ## Consider restricting paths to the set of cgroups you really + ## want to monitor if you have a large number of cgroups, to avoid + ## any cardinality issues. # paths = [ - # "/sys/fs/cgroup/memory", # root cgroup - # "/sys/fs/cgroup/memory/child1", # container cgroup - # "/sys/fs/cgroup/memory/child2/*", # all children cgroups under child2, but not child2 itself + # "/sys/fs/cgroup/memory", + # "/sys/fs/cgroup/memory/child1", + # "/sys/fs/cgroup/memory/child2/*", # ] + ## cgroup stat fields, as file names, globs are supported. + ## these file names are appended to each path from above. # files = ["memory.*usage*", "memory.limit_in_bytes"] ``` -### usage examples: +## Example Configurations ```toml # [[inputs.cgroup]] diff --git a/plugins/inputs/cgroup/cgroup.go b/plugins/inputs/cgroup/cgroup.go index f3853a9da9a20..f5ded195ef324 100644 --- a/plugins/inputs/cgroup/cgroup.go +++ b/plugins/inputs/cgroup/cgroup.go @@ -1,38 +1,26 @@ +//go:generate ../../../tools/readme_config_includer/generator package cgroup import ( + _ "embed" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type CGroup struct { Paths []string `toml:"paths"` Files []string `toml:"files"` } -var sampleConfig = ` - ## Directories in which to look for files, globs are supported. - ## Consider restricting paths to the set of cgroups you really - ## want to monitor if you have a large number of cgroups, to avoid - ## any cardinality issues. - # paths = [ - # "/sys/fs/cgroup/memory", - # "/sys/fs/cgroup/memory/child1", - # "/sys/fs/cgroup/memory/child2/*", - # ] - ## cgroup stat fields, as file names, globs are supported. - ## these file names are appended to each path from above. - # files = ["memory.*usage*", "memory.limit_in_bytes"] -` - -func (g *CGroup) SampleConfig() string { +func (*CGroup) SampleConfig() string { return sampleConfig } -func (g *CGroup) Description() string { - return "Read specific statistics per cgroup" -} - func init() { inputs.Add("cgroup", func() telegraf.Input { return &CGroup{} }) } diff --git a/plugins/inputs/cgroup/cgroup_linux.go b/plugins/inputs/cgroup/cgroup_linux.go index bb38525b7a8f5..b892f528c234f 100644 --- a/plugins/inputs/cgroup/cgroup_linux.go +++ b/plugins/inputs/cgroup/cgroup_linux.go @@ -1,15 +1,16 @@ +//go:build linux // +build linux package cgroup import ( "fmt" - "io/ioutil" "os" "path" "path/filepath" "regexp" "strconv" + "strings" "github.com/influxdata/telegraf" ) @@ -25,7 +26,7 @@ func (g *CGroup) Gather(acc telegraf.Accumulator) error { acc.AddError(dir.err) continue } - if err := g.gatherDir(dir.path, acc); err != nil { + if err := g.gatherDir(acc, dir.path); err != nil { acc.AddError(err) } } @@ -33,7 +34,7 @@ func (g *CGroup) Gather(acc telegraf.Accumulator) error { return nil } -func (g *CGroup) gatherDir(dir string, acc telegraf.Accumulator) error { +func (g *CGroup) gatherDir(acc telegraf.Accumulator, dir string) error { fields := make(map[string]interface{}) list := make(chan pathInfo) @@ -44,7 +45,7 @@ func (g *CGroup) gatherDir(dir string, acc telegraf.Accumulator) error { return file.err } - raw, err := ioutil.ReadFile(file.path) + raw, err := os.ReadFile(file.path) if err != nil { return err } @@ -72,8 +73,8 @@ type pathInfo struct { err error } -func isDir(path string) (bool, error) { - result, err := os.Stat(path) +func isDir(pathToCheck string) (bool, error) { + result, err := os.Stat(pathToCheck) if err != nil { return false, err } @@ -168,7 +169,7 @@ type fileFormat struct { parser func(measurement string, fields map[string]interface{}, b []byte) } -const keyPattern = "[[:alpha:]_]+" +const keyPattern = "[[:alnum:]:_]+" const valuePattern = "[\\d-]+" var fileFormats = [...]fileFormat{ @@ -208,17 +209,18 @@ var fileFormats = [...]fileFormat{ } }, }, - // KEY0 VAL0\n - // KEY1 VAL1\n + // KEY0 ... VAL0\n + // KEY1 ... VAL1\n // ... { - name: "New line separated key-space-value's", - pattern: "^(" + keyPattern + " " + valuePattern + "\n)+$", + name: "Space separated keys and value, separated by new line", + pattern: "^((" + keyPattern + " )+" + valuePattern + "\n)+$", parser: func(measurement string, fields map[string]interface{}, b []byte) { - re := regexp.MustCompile("(" + keyPattern + ") (" + valuePattern + ")\n") + re := regexp.MustCompile("((?:" + keyPattern + " ?)+) (" + valuePattern + ")\n") matches := re.FindAllStringSubmatch(string(b), -1) for _, v := range matches { - fields[measurement+"."+v[1]] = numberOrString(v[2]) + k := strings.ReplaceAll(v[1], " ", ".") + fields[measurement+"."+k] = numberOrString(v[2]) } }, }, diff --git a/plugins/inputs/cgroup/cgroup_notlinux.go b/plugins/inputs/cgroup/cgroup_notlinux.go index 2bc227410a6e2..1c9c08ec41ac5 100644 --- a/plugins/inputs/cgroup/cgroup_notlinux.go +++ b/plugins/inputs/cgroup/cgroup_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package cgroup diff --git a/plugins/inputs/cgroup/cgroup_test.go b/plugins/inputs/cgroup/cgroup_test.go index b3094baef31ae..ba74247eeb1f3 100644 --- a/plugins/inputs/cgroup/cgroup_test.go +++ b/plugins/inputs/cgroup/cgroup_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package cgroup @@ -180,3 +181,155 @@ func TestCgroupStatistics_6(t *testing.T) { } acc.AssertContainsTaggedFields(t, "cgroup", fields, tags) } + +// ====================================================================== + +var cg7 = &CGroup{ + Paths: []string{"testdata/blkio"}, + Files: []string{"blkio.throttle.io_serviced"}, +} + +func TestCgroupStatistics_7(t *testing.T) { + var acc testutil.Accumulator + + err := acc.GatherError(cg7.Gather) + require.NoError(t, err) + + tags := map[string]string{ + "path": "testdata/blkio", + } + fields := map[string]interface{}{ + "blkio.throttle.io_serviced.11:0.Read": int64(0), + "blkio.throttle.io_serviced.11:0.Write": int64(0), + "blkio.throttle.io_serviced.11:0.Sync": int64(0), + "blkio.throttle.io_serviced.11:0.Async": int64(0), + "blkio.throttle.io_serviced.11:0.Total": int64(0), + "blkio.throttle.io_serviced.8:0.Read": int64(49134), + "blkio.throttle.io_serviced.8:0.Write": int64(216703), + "blkio.throttle.io_serviced.8:0.Sync": int64(177906), + "blkio.throttle.io_serviced.8:0.Async": int64(87931), + "blkio.throttle.io_serviced.8:0.Total": int64(265837), + "blkio.throttle.io_serviced.7:7.Read": int64(0), + "blkio.throttle.io_serviced.7:7.Write": int64(0), + "blkio.throttle.io_serviced.7:7.Sync": int64(0), + "blkio.throttle.io_serviced.7:7.Async": int64(0), + "blkio.throttle.io_serviced.7:7.Total": int64(0), + "blkio.throttle.io_serviced.7:6.Read": int64(0), + "blkio.throttle.io_serviced.7:6.Write": int64(0), + "blkio.throttle.io_serviced.7:6.Sync": int64(0), + "blkio.throttle.io_serviced.7:6.Async": int64(0), + "blkio.throttle.io_serviced.7:6.Total": int64(0), + "blkio.throttle.io_serviced.7:5.Read": int64(0), + "blkio.throttle.io_serviced.7:5.Write": int64(0), + "blkio.throttle.io_serviced.7:5.Sync": int64(0), + "blkio.throttle.io_serviced.7:5.Async": int64(0), + "blkio.throttle.io_serviced.7:5.Total": int64(0), + "blkio.throttle.io_serviced.7:4.Read": int64(0), + "blkio.throttle.io_serviced.7:4.Write": int64(0), + "blkio.throttle.io_serviced.7:4.Sync": int64(0), + "blkio.throttle.io_serviced.7:4.Async": int64(0), + "blkio.throttle.io_serviced.7:4.Total": int64(0), + "blkio.throttle.io_serviced.7:3.Read": int64(0), + "blkio.throttle.io_serviced.7:3.Write": int64(0), + "blkio.throttle.io_serviced.7:3.Sync": int64(0), + "blkio.throttle.io_serviced.7:3.Async": int64(0), + "blkio.throttle.io_serviced.7:3.Total": int64(0), + "blkio.throttle.io_serviced.7:2.Read": int64(0), + "blkio.throttle.io_serviced.7:2.Write": int64(0), + "blkio.throttle.io_serviced.7:2.Sync": int64(0), + "blkio.throttle.io_serviced.7:2.Async": int64(0), + "blkio.throttle.io_serviced.7:2.Total": int64(0), + "blkio.throttle.io_serviced.7:1.Read": int64(0), + "blkio.throttle.io_serviced.7:1.Write": int64(0), + "blkio.throttle.io_serviced.7:1.Sync": int64(0), + "blkio.throttle.io_serviced.7:1.Async": int64(0), + "blkio.throttle.io_serviced.7:1.Total": int64(0), + "blkio.throttle.io_serviced.7:0.Read": int64(0), + "blkio.throttle.io_serviced.7:0.Write": int64(0), + "blkio.throttle.io_serviced.7:0.Sync": int64(0), + "blkio.throttle.io_serviced.7:0.Async": int64(0), + "blkio.throttle.io_serviced.7:0.Total": int64(0), + "blkio.throttle.io_serviced.1:15.Read": int64(3), + "blkio.throttle.io_serviced.1:15.Write": int64(0), + "blkio.throttle.io_serviced.1:15.Sync": int64(0), + "blkio.throttle.io_serviced.1:15.Async": int64(3), + "blkio.throttle.io_serviced.1:15.Total": int64(3), + "blkio.throttle.io_serviced.1:14.Read": int64(3), + "blkio.throttle.io_serviced.1:14.Write": int64(0), + "blkio.throttle.io_serviced.1:14.Sync": int64(0), + "blkio.throttle.io_serviced.1:14.Async": int64(3), + "blkio.throttle.io_serviced.1:14.Total": int64(3), + "blkio.throttle.io_serviced.1:13.Read": int64(3), + "blkio.throttle.io_serviced.1:13.Write": int64(0), + "blkio.throttle.io_serviced.1:13.Sync": int64(0), + "blkio.throttle.io_serviced.1:13.Async": int64(3), + "blkio.throttle.io_serviced.1:13.Total": int64(3), + "blkio.throttle.io_serviced.1:12.Read": int64(3), + "blkio.throttle.io_serviced.1:12.Write": int64(0), + "blkio.throttle.io_serviced.1:12.Sync": int64(0), + "blkio.throttle.io_serviced.1:12.Async": int64(3), + "blkio.throttle.io_serviced.1:12.Total": int64(3), + "blkio.throttle.io_serviced.1:11.Read": int64(3), + "blkio.throttle.io_serviced.1:11.Write": int64(0), + "blkio.throttle.io_serviced.1:11.Sync": int64(0), + "blkio.throttle.io_serviced.1:11.Async": int64(3), + "blkio.throttle.io_serviced.1:11.Total": int64(3), + "blkio.throttle.io_serviced.1:10.Read": int64(3), + "blkio.throttle.io_serviced.1:10.Write": int64(0), + "blkio.throttle.io_serviced.1:10.Sync": int64(0), + "blkio.throttle.io_serviced.1:10.Async": int64(3), + "blkio.throttle.io_serviced.1:10.Total": int64(3), + "blkio.throttle.io_serviced.1:9.Read": int64(3), + "blkio.throttle.io_serviced.1:9.Write": int64(0), + "blkio.throttle.io_serviced.1:9.Sync": int64(0), + "blkio.throttle.io_serviced.1:9.Async": int64(3), + "blkio.throttle.io_serviced.1:9.Total": int64(3), + "blkio.throttle.io_serviced.1:8.Read": int64(3), + "blkio.throttle.io_serviced.1:8.Write": int64(0), + "blkio.throttle.io_serviced.1:8.Sync": int64(0), + "blkio.throttle.io_serviced.1:8.Async": int64(3), + "blkio.throttle.io_serviced.1:8.Total": int64(3), + "blkio.throttle.io_serviced.1:7.Read": int64(3), + "blkio.throttle.io_serviced.1:7.Write": int64(0), + "blkio.throttle.io_serviced.1:7.Sync": int64(0), + "blkio.throttle.io_serviced.1:7.Async": int64(3), + "blkio.throttle.io_serviced.1:7.Total": int64(3), + "blkio.throttle.io_serviced.1:6.Read": int64(3), + "blkio.throttle.io_serviced.1:6.Write": int64(0), + "blkio.throttle.io_serviced.1:6.Sync": int64(0), + "blkio.throttle.io_serviced.1:6.Async": int64(3), + "blkio.throttle.io_serviced.1:6.Total": int64(3), + "blkio.throttle.io_serviced.1:5.Read": int64(3), + "blkio.throttle.io_serviced.1:5.Write": int64(0), + "blkio.throttle.io_serviced.1:5.Sync": int64(0), + "blkio.throttle.io_serviced.1:5.Async": int64(3), + "blkio.throttle.io_serviced.1:5.Total": int64(3), + "blkio.throttle.io_serviced.1:4.Read": int64(3), + "blkio.throttle.io_serviced.1:4.Write": int64(0), + "blkio.throttle.io_serviced.1:4.Sync": int64(0), + "blkio.throttle.io_serviced.1:4.Async": int64(3), + "blkio.throttle.io_serviced.1:4.Total": int64(3), + "blkio.throttle.io_serviced.1:3.Read": int64(3), + "blkio.throttle.io_serviced.1:3.Write": int64(0), + "blkio.throttle.io_serviced.1:3.Sync": int64(0), + "blkio.throttle.io_serviced.1:3.Async": int64(3), + "blkio.throttle.io_serviced.1:3.Total": int64(3), + "blkio.throttle.io_serviced.1:2.Read": int64(3), + "blkio.throttle.io_serviced.1:2.Write": int64(0), + "blkio.throttle.io_serviced.1:2.Sync": int64(0), + "blkio.throttle.io_serviced.1:2.Async": int64(3), + "blkio.throttle.io_serviced.1:2.Total": int64(3), + "blkio.throttle.io_serviced.1:1.Read": int64(3), + "blkio.throttle.io_serviced.1:1.Write": int64(0), + "blkio.throttle.io_serviced.1:1.Sync": int64(0), + "blkio.throttle.io_serviced.1:1.Async": int64(3), + "blkio.throttle.io_serviced.1:1.Total": int64(3), + "blkio.throttle.io_serviced.1:0.Read": int64(3), + "blkio.throttle.io_serviced.1:0.Write": int64(0), + "blkio.throttle.io_serviced.1:0.Sync": int64(0), + "blkio.throttle.io_serviced.1:0.Async": int64(3), + "blkio.throttle.io_serviced.1:0.Total": int64(3), + "blkio.throttle.io_serviced.Total": int64(265885), + } + acc.AssertContainsTaggedFields(t, "cgroup", fields, tags) +} diff --git a/plugins/inputs/cgroup/sample.conf b/plugins/inputs/cgroup/sample.conf new file mode 100644 index 0000000000000..f931a11a74a56 --- /dev/null +++ b/plugins/inputs/cgroup/sample.conf @@ -0,0 +1,14 @@ +# Read specific statistics per cgroup +[[inputs.cgroup]] + ## Directories in which to look for files, globs are supported. + ## Consider restricting paths to the set of cgroups you really + ## want to monitor if you have a large number of cgroups, to avoid + ## any cardinality issues. + # paths = [ + # "/sys/fs/cgroup/memory", + # "/sys/fs/cgroup/memory/child1", + # "/sys/fs/cgroup/memory/child2/*", + # ] + ## cgroup stat fields, as file names, globs are supported. + ## these file names are appended to each path from above. + # files = ["memory.*usage*", "memory.limit_in_bytes"] diff --git a/plugins/inputs/chrony/README.md b/plugins/inputs/chrony/README.md index aa4f848065297..9761a94b8b90e 100644 --- a/plugins/inputs/chrony/README.md +++ b/plugins/inputs/chrony/README.md @@ -2,92 +2,96 @@ Get standard chrony metrics, requires chronyc executable. -Below is the documentation of the various headers returned by `chronyc tracking`. +Below is the documentation of the various headers returned by `chronyc +tracking`. - Reference ID - This is the refid and name (or IP address) if available, of the -server to which the computer is currently synchronised. If this is 127.127.1.1 -it means the computer is not synchronised to any external source and that you -have the ‘local’ mode operating (via the local command in chronyc (see section local), -or the local directive in the ‘/etc/chrony.conf’ file (see section local)). -- Stratum - The stratum indicates how many hops away from a computer with an attached -reference clock we are. Such a computer is a stratum-1 computer, so the computer in the -example is two hops away (i.e. a.b.c is a stratum-2 and is synchronised from a stratum-1). -- Ref time - This is the time (UTC) at which the last measurement from the reference -source was processed. -- System time - In normal operation, chronyd never steps the system clock, because any -jump in the timescale can have adverse consequences for certain application programs. -Instead, any error in the system clock is corrected by slightly speeding up or slowing -down the system clock until the error has been removed, and then returning to the system -clock’s normal speed. A consequence of this is that there will be a period when the -system clock (as read by other programs using the gettimeofday() system call, or by the -date command in the shell) will be different from chronyd's estimate of the current true -time (which it reports to NTP clients when it is operating in server mode). The value -reported on this line is the difference due to this effect. + server to which the computer is currently synchronised. If this is 127.127.1.1 + it means the computer is not synchronised to any external source and that you + have the ‘local’ mode operating (via the local command in chronyc (see section + local), or the local directive in the ‘/etc/chrony.conf’ file (see section + local)). +- Stratum - The stratum indicates how many hops away from a computer with an + attached reference clock we are. Such a computer is a stratum-1 computer, so + the computer in the example is two hops away (i.e. a.b.c is a stratum-2 and is + synchronised from a stratum-1). +- Ref time - This is the time (UTC) at which the last measurement from the + reference source was processed. +- System time - In normal operation, chronyd never steps the system clock, + because any jump in the timescale can have adverse consequences for certain + application programs. Instead, any error in the system clock is corrected by + slightly speeding up or slowing down the system clock until the error has been + removed, and then returning to the system clock’s normal speed. A consequence + of this is that there will be a period when the system clock (as read by other + programs using the gettimeofday() system call, or by the date command in the + shell) will be different from chronyd's estimate of the current true time + (which it reports to NTP clients when it is operating in server mode). The + value reported on this line is the difference due to this effect. - Last offset - This is the estimated local offset on the last clock update. - RMS offset - This is a long-term average of the offset value. - Frequency - The ‘frequency’ is the rate by which the system’s clock would be -wrong if chronyd was not correcting it. It is expressed in ppm (parts per million). -For example, a value of 1ppm would mean that when the system’s clock thinks it has -advanced 1 second, it has actually advanced by 1.000001 seconds relative to true time. + wrong if chronyd was not correcting it. It is expressed in ppm (parts per + million). For example, a value of 1ppm would mean that when the system’s + clock thinks it has advanced 1 second, it has actually advanced by 1.000001 + seconds relative to true time. - Residual freq - This shows the ‘residual frequency’ for the currently selected -reference source. This reflects any difference between what the measurements from the -reference source indicate the frequency should be and the frequency currently being used. -The reason this is not always zero is that a smoothing procedure is applied to the -frequency. Each time a measurement from the reference source is obtained and a new -residual frequency computed, the estimated accuracy of this residual is compared with the -estimated accuracy (see ‘skew’ next) of the existing frequency value. A weighted average -is computed for the new frequency, with weights depending on these accuracies. If the -measurements from the reference source follow a consistent trend, the residual will be -driven to zero over time. + reference source. This reflects any difference between what the measurements + from the reference source indicate the frequency should be and the frequency + currently being used. The reason this is not always zero is that a smoothing + procedure is applied to the frequency. Each time a measurement from the + reference source is obtained and a new residual frequency computed, the + estimated accuracy of this residual is compared with the estimated accuracy + (see ‘skew’ next) of the existing frequency value. A weighted average is + computed for the new frequency, with weights depending on these accuracies. If + the measurements from the reference source follow a consistent trend, the + residual will be driven to zero over time. - Skew - This is the estimated error bound on the frequency. -- Root delay - This is the total of the network path delays to the stratum-1 computer -from which the computer is ultimately synchronised. In certain extreme situations, this -value can be negative. (This can arise in a symmetric peer arrangement where the computers’ -frequencies are not tracking each other and the network delay is very short relative to the -turn-around time at each computer.) -- Root dispersion - This is the total dispersion accumulated through all the computers -back to the stratum-1 computer from which the computer is ultimately synchronised. -Dispersion is due to system clock resolution, statistical measurement variations etc. +- Root delay - This is the total of the network path delays to the stratum-1 + computer from which the computer is ultimately synchronised. In certain + extreme situations, this value can be negative. (This can arise in a symmetric + peer arrangement where the computers’ frequencies are not tracking each other + and the network delay is very short relative to the turn-around time at each + computer.) +- Root dispersion - This is the total dispersion accumulated through all the + computers back to the stratum-1 computer from which the computer is ultimately + synchronised. Dispersion is due to system clock resolution, statistical + measurement variations etc. - Leap status - This is the leap status, which can be Normal, Insert second, -Delete second or Not synchronised. + Delete second or Not synchronised. -### Configuration: +## Configuration -```toml +```toml @sample.conf # Get standard chrony metrics, requires chronyc executable. [[inputs.chrony]] ## If true, chronyc tries to perform a DNS lookup for the time server. # dns_lookup = false ``` -### Measurements & Fields: +## Metrics - chrony - - system_time (float, seconds) - - last_offset (float, seconds) - - rms_offset (float, seconds) - - frequency (float, ppm) - - residual_freq (float, ppm) - - skew (float, ppm) - - root_delay (float, seconds) - - root_dispersion (float, seconds) - - update_interval (float, seconds) + - system_time (float, seconds) + - last_offset (float, seconds) + - rms_offset (float, seconds) + - frequency (float, ppm) + - residual_freq (float, ppm) + - skew (float, ppm) + - root_delay (float, seconds) + - root_dispersion (float, seconds) + - update_interval (float, seconds) -### Tags: +### Tags - All measurements have the following tags: - - reference_id - - stratum - - leap_status + - reference_id + - stratum + - leap_status -### Example Output: +## Example Output -``` +```shell $ telegraf --config telegraf.conf --input-filter chrony --test * Plugin: chrony, Collection 1 > chrony,leap_status=normal,reference_id=192.168.1.1,stratum=3 frequency=-35.657,system_time=0.000027073,last_offset=-0.000013616,residual_freq=-0,rms_offset=0.000027073,root_delay=0.000644,root_dispersion=0.003444,skew=0.001,update_interval=1031.2 1463750789687639161 ``` - - - - diff --git a/plugins/inputs/chrony/chrony.go b/plugins/inputs/chrony/chrony.go index 3fe18e89c91cb..7052ae538baba 100644 --- a/plugins/inputs/chrony/chrony.go +++ b/plugins/inputs/chrony/chrony.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package chrony import ( + _ "embed" "errors" "fmt" "os/exec" @@ -13,6 +15,10 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + var ( execCommand = exec.Command // execCommand is used to mock commands in tests. ) @@ -22,15 +28,8 @@ type Chrony struct { path string } -func (*Chrony) Description() string { - return "Get standard chrony metrics, requires chronyc executable." -} - func (*Chrony) SampleConfig() string { - return ` - ## If true, chronyc tries to perform a DNS lookup for the time server. - # dns_lookup = false - ` + return sampleConfig } func (c *Chrony) Init() error { @@ -93,7 +92,7 @@ func processChronycOutput(out string) (map[string]interface{}, map[string]string if len(stats) < 2 { return nil, nil, fmt.Errorf("unexpected output from chronyc, expected ':' in %s", out) } - name := strings.ToLower(strings.Replace(strings.TrimSpace(stats[0]), " ", "_", -1)) + name := strings.ToLower(strings.ReplaceAll(strings.TrimSpace(stats[0]), " ", "_")) // ignore reference time if strings.Contains(name, "ref_time") { continue diff --git a/plugins/inputs/chrony/chrony_test.go b/plugins/inputs/chrony/chrony_test.go index a5fd9dd028e57..01f5f458dd738 100644 --- a/plugins/inputs/chrony/chrony_test.go +++ b/plugins/inputs/chrony/chrony_test.go @@ -49,10 +49,9 @@ func TestGather(t *testing.T) { t.Fatal(err) } acc.AssertContainsTaggedFields(t, "chrony", fields, tags) - } -// fackeExecCommand is a helper function that mock +// fakeExecCommand is a helper function that mock // the exec.Command call (and call the test binary) func fakeExecCommand(command string, args ...string) *exec.Cmd { cs := []string{"-test.run=TestHelperProcess", "--", command} @@ -66,7 +65,7 @@ func fakeExecCommand(command string, args ...string) *exec.Cmd { // For example, if you run: // GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking // it returns below mockData. -func TestHelperProcess(t *testing.T) { +func TestHelperProcess(_ *testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } @@ -95,14 +94,18 @@ Leap status : Not synchronized if cmd == "chronyc" { if args[0] == "tracking" { + //nolint:errcheck,revive // test will fail anyway fmt.Fprint(os.Stdout, lookup+mockData) } else { + //nolint:errcheck,revive // test will fail anyway fmt.Fprint(os.Stdout, noLookup+mockData) } } else { + //nolint:errcheck,revive // test will fail anyway fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // error code is important for this "test" os.Exit(1) - } + //nolint:revive // error code is important for this "test" os.Exit(0) } diff --git a/plugins/inputs/chrony/sample.conf b/plugins/inputs/chrony/sample.conf new file mode 100644 index 0000000000000..2a4e2923ce5fe --- /dev/null +++ b/plugins/inputs/chrony/sample.conf @@ -0,0 +1,4 @@ +# Get standard chrony metrics, requires chronyc executable. +[[inputs.chrony]] + ## If true, chronyc tries to perform a DNS lookup for the time server. + # dns_lookup = false diff --git a/plugins/inputs/cisco_telemetry_mdt/README.md b/plugins/inputs/cisco_telemetry_mdt/README.md index 9c4eb3645d491..af4d861513a1e 100644 --- a/plugins/inputs/cisco_telemetry_mdt/README.md +++ b/plugins/inputs/cisco_telemetry_mdt/README.md @@ -1,18 +1,21 @@ # Cisco Model-Driven Telemetry (MDT) Input Plugin -Cisco model-driven telemetry (MDT) is an input plugin that consumes -telemetry data from Cisco IOS XR, IOS XE and NX-OS platforms. It supports TCP & GRPC dialout transports. -GRPC-based transport can utilize TLS for authentication and encryption. -Telemetry data is expected to be GPB-KV (self-describing-gpb) encoded. +Cisco model-driven telemetry (MDT) is an input plugin that consumes telemetry +data from Cisco IOS XR, IOS XE and NX-OS platforms. It supports TCP & GRPC +dialout transports. RPC-based transport can utilize TLS for authentication and +encryption. Telemetry data is expected to be GPB-KV (self-describing-gpb) +encoded. -The GRPC dialout transport is supported on various IOS XR (64-bit) 6.1.x and later, IOS XE 16.10 and later, as well as NX-OS 7.x and later platforms. +The GRPC dialout transport is supported on various IOS XR (64-bit) 6.1.x and +later, IOS XE 16.10 and later, as well as NX-OS 7.x and later platforms. -The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and later. +The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and +later. +## Configuration -### Configuration: - -```toml +```toml @sample.conf +# Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms [[inputs.cisco_telemetry_mdt]] ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when ## using the grpc transport. @@ -21,6 +24,9 @@ The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and l ## Address and port to host telemetry listener service_address = ":57000" + ## Grpc Maximum Message Size, default is 4MB, increase the size. + max_msg_size = 4000000 + ## Enable TLS; grpc transport only. # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" @@ -35,10 +41,68 @@ The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and l ## Define aliases to map telemetry encoding paths to simple measurement names [inputs.cisco_telemetry_mdt.aliases] ifstats = "ietf-interfaces:interfaces-state/interface/statistics" + ## Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details. + [inputs.cisco_telemetry_mdt.dmes] +# Global Property Xformation. +# prop1 = "uint64 to int" +# prop2 = "uint64 to string" +# prop3 = "string to uint64" +# prop4 = "string to int64" +# prop5 = "string to float64" +# auto-prop-xfrom = "auto-float-xfrom" #Xform any property which is string, and has float number to type float64 +# Per Path property xformation, Name is telemetry configuration under sensor-group, path configuration "WORD Distinguished Name" +# Per Path configuration is better as it avoid property collision issue of types. +# dnpath = '{"Name": "show ip route summary","prop": [{"Key": "routes","Value": "string"}, {"Key": "best-paths","Value": "string"}]}' +# dnpath2 = '{"Name": "show processes cpu","prop": [{"Key": "kernel_percent","Value": "float"}, {"Key": "idle_percent","Value": "float"}, {"Key": "process","Value": "string"}, {"Key": "user_percent","Value": "float"}, {"Key": "onesec","Value": "float"}]}' +# dnpath3 = '{"Name": "show processes memory physical","prop": [{"Key": "processname","Value": "string"}]}' ``` -### Example Output: -``` +## Example Output + +```shell ifstats,path=ietf-interfaces:interfaces-state/interface/statistics,host=linux,name=GigabitEthernet2,source=csr1kv,subscription=101 in-unicast-pkts=27i,in-multicast-pkts=0i,discontinuity-time="2019-05-23T07:40:23.000362+00:00",in-octets=5233i,in-errors=0i,out-multicast-pkts=0i,out-discards=0i,in-broadcast-pkts=0i,in-discards=0i,in-unknown-protos=0i,out-unicast-pkts=0i,out-broadcast-pkts=0i,out-octets=0i,out-errors=0i 1559150462624000000 ifstats,path=ietf-interfaces:interfaces-state/interface/statistics,host=linux,name=GigabitEthernet1,source=csr1kv,subscription=101 in-octets=3394770806i,in-broadcast-pkts=0i,in-multicast-pkts=0i,out-broadcast-pkts=0i,in-unknown-protos=0i,out-octets=350212i,in-unicast-pkts=9477273i,in-discards=0i,out-unicast-pkts=2726i,out-discards=0i,discontinuity-time="2019-05-23T07:40:23.000363+00:00",in-errors=30i,out-multicast-pkts=0i,out-errors=0i 1559150462624000000 ``` + +### NX-OS Configuration Example + +```text +Requirement DATA-SOURCE Configuration +----------------------------------------- +Environment DME path sys/ch query-condition query-target=subtree&target-subtree-class=eqptPsuSlot,eqptFtSlot,eqptSupCSlot,eqptPsu,eqptFt,eqptSensor,eqptLCSlot + DME path sys/ch depth 5 (Another configuration option) +Environment NXAPI show environment power + NXAPI show environment fan + NXAPI show environment temperature +Interface Stats DME path sys/intf query-condition query-target=subtree&target-subtree-class=rmonIfIn,rmonIfOut,rmonIfHCIn,rmonIfHCOut,rmonEtherStats +Interface State DME path sys/intf depth unbounded query-condition query-target=subtree&target-subtree-class=l1PhysIf,pcAggrIf,l3EncRtdIf,l3LbRtdIf,ethpmPhysIf +VPC DME path sys/vpc query-condition query-target=subtree&target-subtree-class=vpcDom,vpcIf +Resources cpu DME path sys/procsys query-condition query-target=subtree&target-subtree-class=procSystem,procSysCore,procSysCpuSummary,procSysCpu,procIdle,procIrq,procKernel,procNice,procSoftirq,procTotal,procUser,procWait,procSysCpuHistory,procSysLoad +Resources Mem DME path sys/procsys/sysmem/sysmemused + path sys/procsys/sysmem/sysmemusage + path sys/procsys/sysmem/sysmemfree +Per Process cpu DME path sys/proc depth unbounded query-condition rsp-foreign-subtree=ephemeral +vxlan(svi stats) DME path sys/bd query-condition query-target=subtree&target-subtree-class=l2VlanStats +BGP DME path sys/bgp query-condition query-target=subtree&target-subtree-class=bgpDom,bgpPeer,bgpPeerAf,bgpDomAf,bgpPeerAfEntry,bgpOperRtctrlL3,bgpOperRttP,bgpOperRttEntry,bgpOperAfCtrl +mac dynamic DME path sys/mac query-condition query-target=subtree&target-subtree-class=l2MacAddressTable +bfd DME path sys/bfd/inst depth unbounded +lldp DME path sys/lldp depth unbounded +urib DME path sys/urib depth unbounded query-condition rsp-foreign-subtree=ephemeral +u6rib DME path sys/u6rib depth unbounded query-condition rsp-foreign-subtree=ephemeral +multicast flow DME path sys/mca/show/flows depth unbounded +multicast stats DME path sys/mca/show/stats depth unbounded +multicast igmp NXAPI show ip igmp groups vrf all +multicast igmp NXAPI show ip igmp interface vrf all +multicast igmp NXAPI show ip igmp snooping +multicast igmp NXAPI show ip igmp snooping groups +multicast igmp NXAPI show ip igmp snooping groups detail +multicast igmp NXAPI show ip igmp snooping groups summary +multicast igmp NXAPI show ip igmp snooping mrouter +multicast igmp NXAPI show ip igmp snooping statistics +multicast pim NXAPI show ip pim interface vrf all +multicast pim NXAPI show ip pim neighbor vrf all +multicast pim NXAPI show ip pim route vrf all +multicast pim NXAPI show ip pim rp vrf all +multicast pim NXAPI show ip pim statistics vrf all +multicast pim NXAPI show ip pim vrf all +``` diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go index 1a669e96f878e..6608fb645cca2 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go @@ -1,8 +1,11 @@ +//go:generate ../../../tools/readme_config_includer/generator package cisco_telemetry_mdt import ( "bytes" + _ "embed" "encoding/binary" + "encoding/json" "fmt" "io" "net" @@ -14,17 +17,22 @@ import ( dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" - "github.com/golang/protobuf/proto" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + _ "google.golang.org/grpc/encoding/gzip" // Required to allow gzip encoding + "google.golang.org/grpc/peer" + "google.golang.org/protobuf/proto" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" internaltls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" // Register GRPC gzip decoder to support compressed telemetry - _ "google.golang.org/grpc/encoding/gzip" - "google.golang.org/grpc/peer" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const ( // Maximum telemetry payload size (in bytes) to accept for GRPC dialout transport tcpMaxMsgLen uint32 = 1024 * 1024 @@ -37,6 +45,7 @@ type CiscoTelemetryMDT struct { ServiceAddress string `toml:"service_address"` MaxMsgSize int `toml:"max_msg_size"` Aliases map[string]string `toml:"aliases"` + Dmes map[string]string `toml:"dmes"` EmbeddedTags []string `toml:"embedded_tags"` Log telegraf.Logger @@ -49,12 +58,30 @@ type CiscoTelemetryMDT struct { listener net.Listener // Internal state - aliases map[string]string - warned map[string]struct{} - extraTags map[string]map[string]struct{} - mutex sync.Mutex - acc telegraf.Accumulator - wg sync.WaitGroup + internalAliases map[string]string + dmesFuncs map[string]string + warned map[string]struct{} + extraTags map[string]map[string]struct{} + nxpathMap map[string]map[string]string //per path map + propMap map[string]func(field *telemetry.TelemetryField, value interface{}) interface{} + mutex sync.Mutex + acc telegraf.Accumulator + wg sync.WaitGroup + + // Though unused in the code, required by protoc-gen-go-grpc to maintain compatibility + dialout.UnimplementedGRPCMdtDialoutServer +} + +type NxPayloadXfromStructure struct { + Name string `json:"Name"` + Prop []struct { + Key string `json:"Key"` + Value string `json:"Value"` + } `json:"prop"` +} + +func (*CiscoTelemetryMDT) SampleConfig() string { + return sampleConfig } // Start the Cisco MDT service @@ -66,17 +93,60 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { return err } + c.propMap = make(map[string]func(field *telemetry.TelemetryField, value interface{}) interface{}, 100) + c.propMap["test"] = nxosValueXformUint64Toint64 + c.propMap["asn"] = nxosValueXformUint64ToString //uint64 to string. + c.propMap["subscriptionId"] = nxosValueXformUint64ToString //uint64 to string. + c.propMap["operState"] = nxosValueXformUint64ToString //uint64 to string. + // Invert aliases list c.warned = make(map[string]struct{}) - c.aliases = make(map[string]string, len(c.Aliases)) - for alias, path := range c.Aliases { - c.aliases[path] = alias + c.internalAliases = make(map[string]string, len(c.Aliases)) + for alias, encodingPath := range c.Aliases { + c.internalAliases[encodingPath] = alias + } + c.initDb() + + c.dmesFuncs = make(map[string]string, len(c.Dmes)) + for dme, dmeKey := range c.Dmes { + c.dmesFuncs[dmeKey] = dme + switch dmeKey { + case "uint64 to int": + c.propMap[dme] = nxosValueXformUint64Toint64 + case "uint64 to string": + c.propMap[dme] = nxosValueXformUint64ToString + case "string to float64": + c.propMap[dme] = nxosValueXformStringTofloat + case "string to uint64": + c.propMap[dme] = nxosValueXformStringToUint64 + case "string to int64": + c.propMap[dme] = nxosValueXformStringToInt64 + case "auto-float-xfrom": + c.propMap[dme] = nxosValueAutoXformFloatProp + default: + if !strings.HasPrefix(dme, "dnpath") { // not path based property map + continue + } + + var jsStruct NxPayloadXfromStructure + err := json.Unmarshal([]byte(dmeKey), &jsStruct) + if err != nil { + continue + } + + // Build 2 level Hash nxpathMap Key = jsStruct.Name, Value = map of jsStruct.Prop + // It will override the default of code if same path is provided in configuration. + c.nxpathMap[jsStruct.Name] = make(map[string]string, len(jsStruct.Prop)) + for _, prop := range jsStruct.Prop { + c.nxpathMap[jsStruct.Name][prop.Key] = prop.Value + } + } } // Fill extra tags c.extraTags = make(map[string]map[string]struct{}) for _, tag := range c.EmbeddedTags { - dir := strings.Replace(path.Dir(tag), "-", "_", -1) + dir := strings.ReplaceAll(path.Dir(tag), "-", "_") if _, hasKey := c.extraTags[dir]; !hasKey { c.extraTags[dir] = make(map[string]struct{}) } @@ -96,6 +166,7 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { var opts []grpc.ServerOption tlsConfig, err := c.ServerConfig.TLSConfig() if err != nil { + //nolint:errcheck,revive // we cannot do anything if the closing fails c.listener.Close() return err } else if tlsConfig != nil { @@ -111,11 +182,14 @@ func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { c.wg.Add(1) go func() { - c.grpcServer.Serve(c.listener) + if err := c.grpcServer.Serve(c.listener); err != nil { + c.Log.Errorf("serving GRPC server failed: %v", err) + } c.wg.Done() }() default: + //nolint:errcheck,revive // we cannot do anything if the closing fails c.listener.Close() return fmt.Errorf("invalid Cisco MDT transport: %s", c.Transport) } @@ -154,7 +228,9 @@ func (c *CiscoTelemetryMDT) acceptTCPClients() { delete(clients, conn) mutex.Unlock() - conn.Close() + if err := conn.Close(); err != nil { + c.Log.Warnf("closing connection failed: %v", err) + } c.wg.Done() }() } @@ -214,9 +290,9 @@ func (c *CiscoTelemetryMDT) handleTCPClient(conn net.Conn) error { // MdtDialout RPC server method for grpc-dialout transport func (c *CiscoTelemetryMDT) MdtDialout(stream dialout.GRPCMdtDialout_MdtDialoutServer) error { - peer, peerOK := peer.FromContext(stream.Context()) + peerInCtx, peerOK := peer.FromContext(stream.Context()) if peerOK { - c.Log.Debugf("Accepted Cisco MDT GRPC dialout connection from %s", peer.Addr) + c.Log.Debugf("Accepted Cisco MDT GRPC dialout connection from %s", peerInCtx.Addr) } var chunkBuffer bytes.Buffer @@ -239,7 +315,9 @@ func (c *CiscoTelemetryMDT) MdtDialout(stream dialout.GRPCMdtDialout_MdtDialoutS if packet.TotalSize == 0 { c.handleTelemetry(packet.Data) } else if int(packet.TotalSize) <= c.MaxMsgSize { - chunkBuffer.Write(packet.Data) + if _, err := chunkBuffer.Write(packet.Data); err != nil { + c.acc.AddError(fmt.Errorf("writing packet %q failed: %v", packet.Data, err)) + } if chunkBuffer.Len() >= int(packet.TotalSize) { c.handleTelemetry(chunkBuffer.Bytes()) chunkBuffer.Reset() @@ -250,7 +328,7 @@ func (c *CiscoTelemetryMDT) MdtDialout(stream dialout.GRPCMdtDialout_MdtDialoutS } if peerOK { - c.Log.Debugf("Closed Cisco MDT GRPC dialout connection from %s", peer.Addr) + c.Log.Debugf("Closed Cisco MDT GRPC dialout connection from %s", peerInCtx.Addr) } return nil @@ -261,7 +339,7 @@ func (c *CiscoTelemetryMDT) handleTelemetry(data []byte) { msg := &telemetry.Telemetry{} err := proto.Unmarshal(data, msg) if err != nil { - c.acc.AddError(fmt.Errorf("Cisco MDT failed to decode: %v", err)) + c.acc.AddError(fmt.Errorf("failed to decode: %v", err)) return } @@ -288,15 +366,18 @@ func (c *CiscoTelemetryMDT) handleTelemetry(data []byte) { } } + // if the keys and content fields are missing, skip the message as it + // does not have parsable data used by Telegraf if keys == nil || content == nil { - c.Log.Infof("Message from %s missing keys or content", msg.GetNodeIdStr()) continue } // Parse keys tags = make(map[string]string, len(keys.Fields)+3) tags["source"] = msg.GetNodeIdStr() - tags["subscription"] = msg.GetSubscriptionIdStr() + if msgID := msg.GetSubscriptionIdStr(); msgID != "" { + tags["subscription"] = msgID + } tags["path"] = msg.GetEncodingPath() for _, subfield := range keys.Fields { @@ -309,8 +390,8 @@ func (c *CiscoTelemetryMDT) handleTelemetry(data []byte) { } } - for _, metric := range grouper.Metrics() { - c.acc.AddMetric(metric) + for _, groupedMetric := range grouper.Metrics() { + c.acc.AddMetric(groupedMetric) } } @@ -370,7 +451,7 @@ func decodeTag(field *telemetry.TelemetryField) string { // Recursively parse tag fields func (c *CiscoTelemetryMDT) parseKeyField(tags map[string]string, field *telemetry.TelemetryField, prefix string) { - localname := strings.Replace(field.Name, "-", "_", -1) + localname := strings.ReplaceAll(field.Name, "-", "_") name := localname if len(localname) == 0 { name = prefix @@ -391,54 +472,148 @@ func (c *CiscoTelemetryMDT) parseKeyField(tags map[string]string, field *telemet } } +func (c *CiscoTelemetryMDT) parseRib(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, + encodingPath string, tags map[string]string, timestamp time.Time) { + // RIB + measurement := encodingPath + for _, subfield := range field.Fields { + //For Every table fill the keys which are vrfName, address and masklen + switch subfield.Name { + case "vrfName", "address", "maskLen": + tags[subfield.Name] = decodeTag(subfield) + } + if value := decodeValue(subfield); value != nil { + if err := grouper.Add(measurement, tags, timestamp, subfield.Name, value); err != nil { + c.Log.Errorf("adding field %q to group failed: %v", subfield.Name, err) + } + } + if subfield.Name != "nextHop" { + continue + } + //For next hop table fill the keys in the tag - which is address and vrfname + for _, subf := range subfield.Fields { + for _, ff := range subf.Fields { + switch ff.Name { + case "address", "vrfName": + key := "nextHop/" + ff.Name + tags[key] = decodeTag(ff) + } + if value := decodeValue(ff); value != nil { + name := "nextHop/" + ff.Name + if err := grouper.Add(measurement, tags, timestamp, name, value); err != nil { + c.Log.Errorf("adding field %q to group failed: %v", name, err) + } + } + } + } + } +} + +func (c *CiscoTelemetryMDT) parseClassAttributeField(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, + encodingPath string, tags map[string]string, timestamp time.Time) { + // DME structure: https://developer.cisco.com/site/nxapi-dme-model-reference-api/ + var nxAttributes *telemetry.TelemetryField + isDme := strings.Contains(encodingPath, "sys/") + if encodingPath == "rib" { + //handle native data path rib + c.parseRib(grouper, field, encodingPath, tags, timestamp) + return + } + if field == nil || !isDme || len(field.Fields) == 0 || len(field.Fields[0].Fields) == 0 || len(field.Fields[0].Fields[0].Fields) == 0 { + return + } + + if field.Fields[0] != nil && field.Fields[0].Fields != nil && field.Fields[0].Fields[0] != nil && field.Fields[0].Fields[0].Fields[0].Name != "attributes" { + return + } + nxAttributes = field.Fields[0].Fields[0].Fields[0].Fields[0] + + for _, subfield := range nxAttributes.Fields { + if subfield.Name == "dn" { + tags["dn"] = decodeTag(subfield) + } else { + c.parseContentField(grouper, subfield, "", encodingPath, tags, timestamp) + } + } +} + func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, prefix string, - path string, tags map[string]string, timestamp time.Time) { - name := strings.Replace(field.Name, "-", "_", -1) + encodingPath string, tags map[string]string, timestamp time.Time) { + name := strings.ReplaceAll(field.Name, "-", "_") + + if (name == "modTs" || name == "createTs") && decodeValue(field) == "never" { + return + } if len(name) == 0 { name = prefix } else if len(prefix) > 0 { name = prefix + "/" + name } - extraTags := c.extraTags[strings.Replace(path, "-", "_", -1)+"/"+name] + extraTags := c.extraTags[strings.ReplaceAll(encodingPath, "-", "_")+"/"+name] if value := decodeValue(field); value != nil { // Do alias lookup, to shorten measurement names - measurement := path - if alias, ok := c.aliases[path]; ok { + measurement := encodingPath + if alias, ok := c.internalAliases[encodingPath]; ok { measurement = alias } else { c.mutex.Lock() - if _, haveWarned := c.warned[path]; !haveWarned { - c.Log.Debugf("No measurement alias for encoding path: %s", path) - c.warned[path] = struct{}{} + if _, haveWarned := c.warned[encodingPath]; !haveWarned { + c.Log.Debugf("No measurement alias for encoding path: %s", encodingPath) + c.warned[encodingPath] = struct{}{} } c.mutex.Unlock() } - grouper.Add(measurement, tags, timestamp, name, value) + if val := c.nxosValueXform(field, value, encodingPath); val != nil { + if err := grouper.Add(measurement, tags, timestamp, name, val); err != nil { + c.Log.Errorf("adding field %q to group failed: %v", name, err) + } + } else { + if err := grouper.Add(measurement, tags, timestamp, name, value); err != nil { + c.Log.Errorf("adding field %q to group failed: %v", name, err) + } + } return } if len(extraTags) > 0 { for _, subfield := range field.Fields { if _, isExtraTag := extraTags[subfield.Name]; isExtraTag { - tags[name+"/"+strings.Replace(subfield.Name, "-", "_", -1)] = decodeTag(subfield) + tags[name+"/"+strings.ReplaceAll(subfield.Name, "-", "_")] = decodeTag(subfield) } } } var nxAttributes, nxChildren, nxRows *telemetry.TelemetryField - isNXOS := !strings.ContainsRune(path, ':') // IOS-XR and IOS-XE have a colon in their encoding path, NX-OS does not + isNXOS := !strings.ContainsRune(encodingPath, ':') // IOS-XR and IOS-XE have a colon in their encoding path, NX-OS does not + isEVENT := isNXOS && strings.Contains(encodingPath, "EVENT-LIST") + nxChildren = nil + nxAttributes = nil for _, subfield := range field.Fields { if isNXOS && subfield.Name == "attributes" && len(subfield.Fields) > 0 { nxAttributes = subfield.Fields[0] } else if isNXOS && subfield.Name == "children" && len(subfield.Fields) > 0 { - nxChildren = subfield + if !isEVENT { + nxChildren = subfield + } else { + sub := subfield.Fields + if len(sub) > 0 && sub[0] != nil && sub[0].Fields[0].Name == "subscriptionId" && len(sub[0].Fields) >= 2 { + nxAttributes = sub[0].Fields[1].Fields[0].Fields[0].Fields[0].Fields[0].Fields[0] + } + } + //if nxAttributes == NULL then class based query. + if nxAttributes == nil { + //call function walking over walking list. + for _, sub := range subfield.Fields { + c.parseClassAttributeField(grouper, sub, encodingPath, tags, timestamp) + } + } } else if isNXOS && strings.HasPrefix(subfield.Name, "ROW_") { nxRows = subfield } else if _, isExtraTag := extraTags[subfield.Name]; !isExtraTag { // Regular telemetry decoding - c.parseContentField(grouper, subfield, name, path, tags, timestamp) + c.parseContentField(grouper, subfield, name, encodingPath, tags, timestamp) } } @@ -450,9 +625,16 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie for i, subfield := range row.Fields { if i == 0 { // First subfield contains the index, promote it from value to tag tags[prefix] = decodeTag(subfield) + //We can have subfield so recursively handle it. + if len(row.Fields) == 1 { + tags["row_number"] = strconv.FormatInt(int64(i), 10) + c.parseContentField(grouper, subfield, "", encodingPath, tags, timestamp) + } } else { - c.parseContentField(grouper, subfield, "", path, tags, timestamp) + c.parseContentField(grouper, subfield, "", encodingPath, tags, timestamp) } + // Nxapi we can't identify keys always from prefix + tags["row_number"] = strconv.FormatInt(int64(i), 10) } delete(tags, prefix) } @@ -480,14 +662,14 @@ func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, fie for _, subfield := range nxAttributes.Fields { if subfield.Name != "rn" { - c.parseContentField(grouper, subfield, "", path, tags, timestamp) + c.parseContentField(grouper, subfield, "", encodingPath, tags, timestamp) } } if nxChildren != nil { // This is a nested structure, children will inherit relative name keys of parent for _, subfield := range nxChildren.Fields { - c.parseContentField(grouper, subfield, prefix, path, tags, timestamp) + c.parseContentField(grouper, subfield, prefix, encodingPath, tags, timestamp) } } delete(tags, prefix) @@ -501,48 +683,16 @@ func (c *CiscoTelemetryMDT) Address() net.Addr { func (c *CiscoTelemetryMDT) Stop() { if c.grpcServer != nil { // Stop server and terminate all running dialout routines + //nolint:errcheck,revive // we cannot do anything if the stopping fails c.grpcServer.Stop() } if c.listener != nil { + //nolint:errcheck,revive // we cannot do anything if the closing fails c.listener.Close() } c.wg.Wait() } -const sampleConfig = ` - ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when - ## using the grpc transport. - transport = "grpc" - - ## Address and port to host telemetry listener - service_address = ":57000" - - ## Enable TLS; grpc transport only. - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - - ## Enable TLS client authentication and define allowed CA certificates; grpc - ## transport only. - # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - - ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags - # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"] - - ## Define aliases to map telemetry encoding paths to simple measurement names - [inputs.cisco_telemetry_mdt.aliases] - ifstats = "ietf-interfaces:interfaces-state/interface/statistics" -` - -// SampleConfig of plugin -func (c *CiscoTelemetryMDT) SampleConfig() string { - return sampleConfig -} - -// Description of plugin -func (c *CiscoTelemetryMDT) Description() string { - return "Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms" -} - // Gather plugin measurements (unused) func (c *CiscoTelemetryMDT) Gather(_ telegraf.Accumulator) error { return nil diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go index ea200bc744a7d..90fc949276948 100644 --- a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go @@ -4,15 +4,17 @@ import ( "context" "encoding/binary" "errors" + "io" "net" "testing" dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" - telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" - "github.com/golang/protobuf/proto" - "github.com/influxdata/telegraf/testutil" + telemetryBis "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" "github.com/stretchr/testify/require" "google.golang.org/grpc" + "google.golang.org/protobuf/proto" + + "github.com/influxdata/telegraf/testutil" ) func TestHandleTelemetryTwoSimple(t *testing.T) { @@ -22,55 +24,55 @@ func TestHandleTelemetryTwoSimple(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetry.Telemetry{ + telemetry := &telemetryBis.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "type:model/some/path", - NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetry.TelemetryField{ + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "name", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "str"}, }, { Name: "uint64", - ValueByType: &telemetry.TelemetryField_Uint64Value{Uint64Value: 1234}, + ValueByType: &telemetryBis.TelemetryField_Uint64Value{Uint64Value: 1234}, }, }, }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "bool", - ValueByType: &telemetry.TelemetryField_BoolValue{BoolValue: true}, + ValueByType: &telemetryBis.TelemetryField_BoolValue{BoolValue: true}, }, }, }, }, }, { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "name", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str2"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "str2"}, }, }, }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "bool", - ValueByType: &telemetry.TelemetryField_BoolValue{BoolValue: false}, + ValueByType: &telemetryBis.TelemetryField_BoolValue{BoolValue: false}, }, }, }, @@ -78,7 +80,8 @@ func TestHandleTelemetryTwoSimple(t *testing.T) { }, }, } - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) c.handleTelemetry(data) require.Empty(t, acc.Errors) @@ -99,26 +102,26 @@ func TestHandleTelemetrySingleNested(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetry.Telemetry{ + telemetry := &telemetryBis.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "type:model/nested/path", - NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetry.TelemetryField{ + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "nested", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "key", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "level", - ValueByType: &telemetry.TelemetryField_DoubleValue{DoubleValue: 3}, + ValueByType: &telemetryBis.TelemetryField_DoubleValue{DoubleValue: 3}, }, }, }, @@ -128,16 +131,16 @@ func TestHandleTelemetrySingleNested(t *testing.T) { }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "nested", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "value", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "foo", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, @@ -149,7 +152,8 @@ func TestHandleTelemetrySingleNested(t *testing.T) { }, }, } - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) c.handleTelemetry(data) require.Empty(t, acc.Errors) @@ -166,49 +170,49 @@ func TestHandleEmbeddedTags(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetry.Telemetry{ + telemetry := &telemetryBis.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "type:model/extra", - NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetry.TelemetryField{ + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "foo", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "list", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "name", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "entry1"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "entry1"}, }, { Name: "test", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "foo"}, }, }, }, { Name: "list", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "name", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "entry2"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "entry2"}, }, { Name: "test", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, @@ -218,7 +222,8 @@ func TestHandleEmbeddedTags(t *testing.T) { }, }, } - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) c.handleTelemetry(data) require.Empty(t, acc.Errors) @@ -238,57 +243,57 @@ func TestHandleNXAPI(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetry.Telemetry{ + telemetry := &telemetryBis.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "show nxapi", - NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetry.TelemetryField{ + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "foo", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "TABLE_nxapi", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "ROW_nxapi", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "index", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "i1"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "i1"}, }, { Name: "value", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "foo"}, }, }, }, { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "index", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "i2"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "i2"}, }, { Name: "value", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, @@ -306,19 +311,179 @@ func TestHandleNXAPI(t *testing.T) { }, }, } - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) c.handleTelemetry(data) require.Empty(t, acc.Errors) - tags1 := map[string]string{"path": "show nxapi", "foo": "bar", "TABLE_nxapi": "i1", "source": "hostname", "subscription": "subscription"} + tags1 := map[string]string{"path": "show nxapi", "foo": "bar", "TABLE_nxapi": "i1", "row_number": "0", "source": "hostname", "subscription": "subscription"} fields1 := map[string]interface{}{"value": "foo"} - tags2 := map[string]string{"path": "show nxapi", "foo": "bar", "TABLE_nxapi": "i2", "source": "hostname", "subscription": "subscription"} + tags2 := map[string]string{"path": "show nxapi", "foo": "bar", "TABLE_nxapi": "i2", "row_number": "0", "source": "hostname", "subscription": "subscription"} fields2 := map[string]interface{}{"value": "bar"} acc.AssertContainsTaggedFields(t, "nxapi", fields1, tags1) acc.AssertContainsTaggedFields(t, "nxapi", fields2, tags2) } +func TestHandleNXAPIXformNXAPI(t *testing.T) { + c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "dummy", Aliases: map[string]string{"nxapi": "show nxapi"}} + acc := &testutil.Accumulator{} + err := c.Start(acc) + // error is expected since we are passing in dummy transport + require.Error(t, err) + + telemetry := &telemetryBis.Telemetry{ + MsgTimestamp: 1543236572000, + EncodingPath: "show processes cpu", + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ + { + Fields: []*telemetryBis.TelemetryField{ + { + Name: "keys", + Fields: []*telemetryBis.TelemetryField{ + { + Name: "foo", + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, + }, + }, + }, + { + Name: "content", + Fields: []*telemetryBis.TelemetryField{ + { + Fields: []*telemetryBis.TelemetryField{ + { + Name: "TABLE_process_cpu", + Fields: []*telemetryBis.TelemetryField{ + { + Fields: []*telemetryBis.TelemetryField{ + { + Name: "ROW_process_cpu", + Fields: []*telemetryBis.TelemetryField{ + { + Fields: []*telemetryBis.TelemetryField{ + { + Name: "index", + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "i1"}, + }, + { + Name: "value", + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "foo"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + data, err := proto.Marshal(telemetry) + require.NoError(t, err) + + c.handleTelemetry(data) + require.Empty(t, acc.Errors) + + tags1 := map[string]string{"path": "show processes cpu", "foo": "bar", "TABLE_process_cpu": "i1", "row_number": "0", "source": "hostname", "subscription": "subscription"} + fields1 := map[string]interface{}{"value": "foo"} + acc.AssertContainsTaggedFields(t, "show processes cpu", fields1, tags1) +} + +func TestHandleNXXformMulti(t *testing.T) { + c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"dme": "sys/lldp"}} + acc := &testutil.Accumulator{} + err := c.Start(acc) + // error is expected since we are passing in dummy transport + require.Error(t, err) + + telemetry := &telemetryBis.Telemetry{ + MsgTimestamp: 1543236572000, + EncodingPath: "sys/lldp", + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ + { + Fields: []*telemetryBis.TelemetryField{ + { + Name: "keys", + Fields: []*telemetryBis.TelemetryField{ + { + Name: "foo", + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, + }, + }, + }, + { + Name: "content", + Fields: []*telemetryBis.TelemetryField{ + { + Fields: []*telemetryBis.TelemetryField{ + { + Name: "fooEntity", + Fields: []*telemetryBis.TelemetryField{ + { + Fields: []*telemetryBis.TelemetryField{ + { + Name: "attributes", + Fields: []*telemetryBis.TelemetryField{ + { + Fields: []*telemetryBis.TelemetryField{ + { + Name: "rn", + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "some-rn"}, + }, + { + Name: "portIdV", + ValueByType: &telemetryBis.TelemetryField_Uint32Value{Uint32Value: 12}, + }, + { + Name: "portDesc", + ValueByType: &telemetryBis.TelemetryField_Uint64Value{Uint64Value: 100}, + }, + { + Name: "test", + ValueByType: &telemetryBis.TelemetryField_Uint64Value{Uint64Value: 281474976710655}, + }, + { + Name: "subscriptionId", + ValueByType: &telemetryBis.TelemetryField_Uint64Value{Uint64Value: 2814749767106551}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + data, err := proto.Marshal(telemetry) + require.NoError(t, err) + + c.handleTelemetry(data) + require.Empty(t, acc.Errors) + //validate various transformation scenaarios newly added in the code. + fields := map[string]interface{}{"portIdV": "12", "portDesc": "100", "test": int64(281474976710655), "subscriptionId": "2814749767106551"} + acc.AssertContainsFields(t, "dme", fields) +} + func TestHandleNXDME(t *testing.T) { c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"dme": "sys/dme"}} acc := &testutil.Accumulator{} @@ -326,45 +491,45 @@ func TestHandleNXDME(t *testing.T) { // error is expected since we are passing in dummy transport require.Error(t, err) - telemetry := &telemetry.Telemetry{ + telemetry := &telemetryBis.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "sys/dme", - NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetry.TelemetryField{ + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "foo", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "bar"}, }, }, }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "fooEntity", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "attributes", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "rn", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "some-rn"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "some-rn"}, }, { Name: "value", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "foo"}, }, }, }, @@ -382,7 +547,8 @@ func TestHandleNXDME(t *testing.T) { }, }, } - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) c.handleTelemetry(data) require.Empty(t, acc.Errors) @@ -409,39 +575,40 @@ func TestTCPDialoutOverflow(t *testing.T) { addr := c.Address() conn, err := net.Dial(addr.Network(), addr.String()) require.NoError(t, err) - binary.Write(conn, binary.BigEndian, hdr) - conn.Read([]byte{0}) - conn.Close() + require.NoError(t, binary.Write(conn, binary.BigEndian, hdr)) + _, err = conn.Read([]byte{0}) + require.True(t, err == nil || err == io.EOF) + require.NoError(t, conn.Close()) c.Stop() require.Contains(t, acc.Errors, errors.New("dialout packet too long: 1000000000")) } -func mockTelemetryMessage() *telemetry.Telemetry { - return &telemetry.Telemetry{ +func mockTelemetryMessage() *telemetryBis.Telemetry { + return &telemetryBis.Telemetry{ MsgTimestamp: 1543236572000, EncodingPath: "type:model/some/path", - NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, - Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, - DataGpbkv: []*telemetry.TelemetryField{ + NodeId: &telemetryBis.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetryBis.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetryBis.TelemetryField{ { - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "keys", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "name", - ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str"}, + ValueByType: &telemetryBis.TelemetryField_StringValue{StringValue: "str"}, }, }, }, { Name: "content", - Fields: []*telemetry.TelemetryField{ + Fields: []*telemetryBis.TelemetryField{ { Name: "value", - ValueByType: &telemetry.TelemetryField_Sint64Value{Sint64Value: -1}, + ValueByType: &telemetryBis.TelemetryField_Sint64Value{Sint64Value: -1}, }, }, }, @@ -472,32 +639,42 @@ func TestTCPDialoutMultiple(t *testing.T) { conn, err := net.Dial(addr.Network(), addr.String()) require.NoError(t, err) - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) hdr.MsgLen = uint32(len(data)) - binary.Write(conn, binary.BigEndian, hdr) - conn.Write(data) + require.NoError(t, binary.Write(conn, binary.BigEndian, hdr)) + _, err = conn.Write(data) + require.NoError(t, err) conn2, err := net.Dial(addr.Network(), addr.String()) require.NoError(t, err) telemetry.EncodingPath = "type:model/parallel/path" - data, _ = proto.Marshal(telemetry) + data, err = proto.Marshal(telemetry) + require.NoError(t, err) hdr.MsgLen = uint32(len(data)) - binary.Write(conn2, binary.BigEndian, hdr) - conn2.Write(data) - conn2.Write([]byte{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0}) - conn2.Read([]byte{0}) - conn2.Close() + require.NoError(t, binary.Write(conn2, binary.BigEndian, hdr)) + _, err = conn2.Write(data) + require.NoError(t, err) + _, err = conn2.Write([]byte{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0}) + require.NoError(t, err) + _, err = conn2.Read([]byte{0}) + require.True(t, err == nil || err == io.EOF) + require.NoError(t, conn2.Close()) telemetry.EncodingPath = "type:model/other/path" - data, _ = proto.Marshal(telemetry) + data, err = proto.Marshal(telemetry) + require.NoError(t, err) hdr.MsgLen = uint32(len(data)) - binary.Write(conn, binary.BigEndian, hdr) - conn.Write(data) - conn.Write([]byte{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0}) - conn.Read([]byte{0}) + require.NoError(t, binary.Write(conn, binary.BigEndian, hdr)) + _, err = conn.Write(data) + require.NoError(t, err) + _, err = conn.Write([]byte{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0}) + require.NoError(t, err) + _, err = conn.Read([]byte{0}) + require.True(t, err == nil || err == io.EOF) c.Stop() - conn.Close() + require.NoError(t, conn.Close()) // We use the invalid dialout flags to let the server close the connection require.Equal(t, acc.Errors, []error{errors.New("invalid dialout flags: 257"), errors.New("invalid dialout flags: 257")}) @@ -522,15 +699,18 @@ func TestGRPCDialoutError(t *testing.T) { require.NoError(t, err) addr := c.Address() - conn, _ := grpc.Dial(addr.String(), grpc.WithInsecure()) + conn, err := grpc.Dial(addr.String(), grpc.WithInsecure()) + require.NoError(t, err) client := dialout.NewGRPCMdtDialoutClient(conn) - stream, _ := client.MdtDialout(context.Background()) + stream, err := client.MdtDialout(context.Background()) + require.NoError(t, err) args := &dialout.MdtDialoutArgs{Errors: "foobar"} - stream.Send(args) + require.NoError(t, stream.Send(args)) // Wait for the server to close - stream.Recv() + _, err = stream.Recv() + require.True(t, err == nil || err == io.EOF) c.Stop() require.Equal(t, acc.Errors, []error{errors.New("GRPC dialout error: foobar")}) @@ -545,35 +725,44 @@ func TestGRPCDialoutMultiple(t *testing.T) { telemetry := mockTelemetryMessage() addr := c.Address() - conn, _ := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock()) + conn, err := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock()) + require.NoError(t, err) client := dialout.NewGRPCMdtDialoutClient(conn) - stream, _ := client.MdtDialout(context.TODO()) + stream, err := client.MdtDialout(context.TODO()) + require.NoError(t, err) - data, _ := proto.Marshal(telemetry) + data, err := proto.Marshal(telemetry) + require.NoError(t, err) args := &dialout.MdtDialoutArgs{Data: data, ReqId: 456} - stream.Send(args) + require.NoError(t, stream.Send(args)) - conn2, _ := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock()) + conn2, err := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock()) + require.NoError(t, err) client2 := dialout.NewGRPCMdtDialoutClient(conn2) - stream2, _ := client2.MdtDialout(context.TODO()) + stream2, err := client2.MdtDialout(context.TODO()) + require.NoError(t, err) telemetry.EncodingPath = "type:model/parallel/path" - data, _ = proto.Marshal(telemetry) + data, err = proto.Marshal(telemetry) + require.NoError(t, err) args = &dialout.MdtDialoutArgs{Data: data} - stream2.Send(args) - stream2.Send(&dialout.MdtDialoutArgs{Errors: "testclose"}) - stream2.Recv() - conn2.Close() + require.NoError(t, stream2.Send(args)) + require.NoError(t, stream2.Send(&dialout.MdtDialoutArgs{Errors: "testclose"})) + _, err = stream2.Recv() + require.True(t, err == nil || err == io.EOF) + require.NoError(t, conn2.Close()) telemetry.EncodingPath = "type:model/other/path" - data, _ = proto.Marshal(telemetry) + data, err = proto.Marshal(telemetry) + require.NoError(t, err) args = &dialout.MdtDialoutArgs{Data: data} - stream.Send(args) - stream.Send(&dialout.MdtDialoutArgs{Errors: "testclose"}) - stream.Recv() + require.NoError(t, stream.Send(args)) + require.NoError(t, stream.Send(&dialout.MdtDialoutArgs{Errors: "testclose"})) + _, err = stream.Recv() + require.True(t, err == nil || err == io.EOF) c.Stop() - conn.Close() + require.NoError(t, conn.Close()) require.Equal(t, acc.Errors, []error{errors.New("GRPC dialout error: testclose"), errors.New("GRPC dialout error: testclose")}) @@ -588,5 +777,4 @@ func TestGRPCDialoutMultiple(t *testing.T) { tags = map[string]string{"path": "type:model/other/path", "name": "str", "source": "hostname", "subscription": "subscription"} fields = map[string]interface{}{"value": int64(-1)} acc.AssertContainsTaggedFields(t, "other", fields, tags) - } diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go new file mode 100644 index 0000000000000..1d7d95a95a757 --- /dev/null +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_util.go @@ -0,0 +1,876 @@ +package cisco_telemetry_mdt + +import ( + "strconv" + "strings" + + telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" +) + +//xform Field to string +func xformValueString(field *telemetry.TelemetryField) string { + var str string + switch val := field.ValueByType.(type) { + case *telemetry.TelemetryField_StringValue: + if len(val.StringValue) > 0 { + return val.StringValue + } + case *telemetry.TelemetryField_Uint32Value: + str = strconv.FormatUint(uint64(val.Uint32Value), 10) + return str + case *telemetry.TelemetryField_Uint64Value: + str = strconv.FormatUint(val.Uint64Value, 10) + return str + case *telemetry.TelemetryField_Sint32Value: + str = strconv.FormatInt(int64(val.Sint32Value), 10) + return str + case *telemetry.TelemetryField_Sint64Value: + str = strconv.FormatInt(val.Sint64Value, 10) + return str + } + return "" +} + +//xform Uint64 to int64 +func nxosValueXformUint64Toint64(field *telemetry.TelemetryField, value interface{}) interface{} { + if field.GetUint64Value() != 0 { + return int64(value.(uint64)) + } + return nil +} + +//xform string to float +func nxosValueXformStringTofloat(field *telemetry.TelemetryField, _ interface{}) interface{} { + //convert property to float from string. + vals := field.GetStringValue() + if vals != "" { + if valf, err := strconv.ParseFloat(vals, 64); err == nil { + return valf + } + } + return nil +} + +//xform string to uint64 +func nxosValueXformStringToUint64(field *telemetry.TelemetryField, _ interface{}) interface{} { + //string to uint64 + vals := field.GetStringValue() + if vals != "" { + if val64, err := strconv.ParseUint(vals, 10, 64); err == nil { + return val64 + } + } + return nil +} + +//xform string to int64 +func nxosValueXformStringToInt64(field *telemetry.TelemetryField, _ interface{}) interface{} { + //string to int64 + vals := field.GetStringValue() + if vals != "" { + if val64, err := strconv.ParseInt(vals, 10, 64); err == nil { + return val64 + } + } + return nil +} + +//auto-xform float properties +func nxosValueAutoXformFloatProp(field *telemetry.TelemetryField, _ interface{}) interface{} { + //check if we want auto xformation + vals := field.GetStringValue() + if vals != "" { + if valf, err := strconv.ParseFloat(vals, 64); err == nil { + return valf + } + } // switch + return nil +} + +//xform uint64 to string +func nxosValueXformUint64ToString(field *telemetry.TelemetryField, _ interface{}) interface{} { + switch val := field.ValueByType.(type) { + case *telemetry.TelemetryField_StringValue: + if len(val.StringValue) > 0 { + return val.StringValue + } + case *telemetry.TelemetryField_Uint64Value: + return strconv.FormatUint(val.Uint64Value, 10) + } + return nil +} + +//Xform value field. +func (c *CiscoTelemetryMDT) nxosValueXform(field *telemetry.TelemetryField, value interface{}, path string) interface{} { + if strings.ContainsRune(path, ':') { + // not NXOS + return nil + } + if _, ok := c.propMap[field.Name]; ok { + return c.propMap[field.Name](field, value) + } + //check if we want auto xformation + if _, ok := c.propMap["auto-prop-xfromi"]; ok { + return c.propMap["auto-prop-xfrom"](field, value) + } + //Now check path based conversion. + //If mapping is found then do the required transformation. + if c.nxpathMap[path] == nil { + return nil + } + switch c.nxpathMap[path][field.Name] { + //Xformation supported is only from String, Uint32 and Uint64 + case "integer": + switch val := field.ValueByType.(type) { + case *telemetry.TelemetryField_StringValue: + if vali, err := strconv.ParseInt(val.StringValue, 10, 32); err == nil { + return vali + } + case *telemetry.TelemetryField_Uint32Value: + vali, ok := value.(uint32) + if ok { + return vali + } + case *telemetry.TelemetryField_Uint64Value: + vali, ok := value.(uint64) + if ok { + return vali + } + } //switch + return nil + //Xformation supported is only from String + case "float": + //nolint:revive // switch needed for `.(type)` + switch val := field.ValueByType.(type) { + case *telemetry.TelemetryField_StringValue: + if valf, err := strconv.ParseFloat(val.StringValue, 64); err == nil { + return valf + } + } //switch + return nil + case "string": + return xformValueString(field) + case "int64": + switch val := field.ValueByType.(type) { + case *telemetry.TelemetryField_StringValue: + if vali, err := strconv.ParseInt(val.StringValue, 10, 64); err == nil { + return vali + } + case *telemetry.TelemetryField_Uint64Value: + return int64(value.(uint64)) + } //switch + } //switch + return nil +} + +func (c *CiscoTelemetryMDT) initMemPhys() { + c.nxpathMap["show processes memory physical"] = map[string]string{"processname": "string"} +} + +func (c *CiscoTelemetryMDT) initBgpV4() { + key := "show bgp ipv4 unicast" + c.nxpathMap[key] = make(map[string]string, 1) + c.nxpathMap[key]["aspath"] = "string" +} + +func (c *CiscoTelemetryMDT) initCPU() { + key := "show processes cpu" + c.nxpathMap[key] = make(map[string]string, 5) + c.nxpathMap[key]["kernel_percent"] = "float" + c.nxpathMap[key]["idle_percent"] = "float" + c.nxpathMap[key]["process"] = "string" + c.nxpathMap[key]["user_percent"] = "float" + c.nxpathMap[key]["onesec"] = "float" +} + +func (c *CiscoTelemetryMDT) initResources() { + key := "show system resources" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["cpu_state_user"] = "float" + c.nxpathMap[key]["kernel"] = "float" + c.nxpathMap[key]["current_memory_status"] = "string" + c.nxpathMap[key]["load_avg_15min"] = "float" + c.nxpathMap[key]["idle"] = "float" + c.nxpathMap[key]["load_avg_1min"] = "float" + c.nxpathMap[key]["user"] = "float" + c.nxpathMap[key]["cpu_state_idle"] = "float" + c.nxpathMap[key]["load_avg_5min"] = "float" + c.nxpathMap[key]["cpu_state_kernel"] = "float" +} + +func (c *CiscoTelemetryMDT) initPower() { + key := "show environment power" + c.nxpathMap[key] = make(map[string]string, 100) + c.nxpathMap[key]["reserve_sup"] = "string" + c.nxpathMap[key]["det_volt"] = "string" + c.nxpathMap[key]["heatsink_temp"] = "string" + c.nxpathMap[key]["det_pintot"] = "string" + c.nxpathMap[key]["det_iinb"] = "string" + c.nxpathMap[key]["ps_input_current"] = "string" + c.nxpathMap[key]["modnum"] = "string" + c.nxpathMap[key]["trayfannum"] = "string" + c.nxpathMap[key]["modstatus_3k"] = "string" + c.nxpathMap[key]["fan2rpm"] = "string" + c.nxpathMap[key]["amps_alloced"] = "string" + c.nxpathMap[key]["all_inlets_connected"] = "string" + c.nxpathMap[key]["tot_pow_out_actual_draw"] = "string" + c.nxpathMap[key]["ps_redun_op_mode"] = "string" + c.nxpathMap[key]["curtemp"] = "string" + c.nxpathMap[key]["mod_model"] = "string" + c.nxpathMap[key]["fanmodel"] = "string" + c.nxpathMap[key]["ps_output_current"] = "string" + c.nxpathMap[key]["majthres"] = "string" + c.nxpathMap[key]["input_type"] = "string" + c.nxpathMap[key]["allocated"] = "string" + c.nxpathMap[key]["fanhwver"] = "string" + c.nxpathMap[key]["clkhwver"] = "string" + c.nxpathMap[key]["fannum"] = "string" + c.nxpathMap[key]["watts_requested"] = "string" + c.nxpathMap[key]["cumulative_power"] = "string" + c.nxpathMap[key]["tot_gridB_capacity"] = "string" + c.nxpathMap[key]["pow_used_by_mods"] = "string" + c.nxpathMap[key]["tot_pow_alloc_budgeted"] = "string" + c.nxpathMap[key]["psumod"] = "string" + c.nxpathMap[key]["ps_status_3k"] = "string" + c.nxpathMap[key]["temptype"] = "string" + c.nxpathMap[key]["regval"] = "string" + c.nxpathMap[key]["inlet_temp"] = "string" + c.nxpathMap[key]["det_cord"] = "string" + c.nxpathMap[key]["reserve_fan"] = "string" + c.nxpathMap[key]["det_pina"] = "string" + c.nxpathMap[key]["minthres"] = "string" + c.nxpathMap[key]["actual_draw"] = "string" + c.nxpathMap[key]["sensor"] = "string" + c.nxpathMap[key]["zone"] = "string" + c.nxpathMap[key]["det_iin"] = "string" + c.nxpathMap[key]["det_iout"] = "string" + c.nxpathMap[key]["det_vin"] = "string" + c.nxpathMap[key]["fan1rpm"] = "string" + c.nxpathMap[key]["tot_gridA_capacity"] = "string" + c.nxpathMap[key]["fanperc"] = "string" + c.nxpathMap[key]["det_pout"] = "string" + c.nxpathMap[key]["alarm_str"] = "string" + c.nxpathMap[key]["zonespeed"] = "string" + c.nxpathMap[key]["det_total_cap"] = "string" + c.nxpathMap[key]["reserve_xbar"] = "string" + c.nxpathMap[key]["det_vout"] = "string" + c.nxpathMap[key]["watts_alloced"] = "string" + c.nxpathMap[key]["ps_in_power"] = "string" + c.nxpathMap[key]["tot_pow_input_actual_draw"] = "string" + c.nxpathMap[key]["ps_output_voltage"] = "string" + c.nxpathMap[key]["det_name"] = "string" + c.nxpathMap[key]["tempmod"] = "string" + c.nxpathMap[key]["clockname"] = "string" + c.nxpathMap[key]["fanname"] = "string" + c.nxpathMap[key]["regnumstr"] = "string" + c.nxpathMap[key]["bitnumstr"] = "string" + c.nxpathMap[key]["ps_slot"] = "string" + c.nxpathMap[key]["actual_out"] = "string" + c.nxpathMap[key]["ps_input_voltage"] = "string" + c.nxpathMap[key]["psmodel"] = "string" + c.nxpathMap[key]["speed"] = "string" + c.nxpathMap[key]["clkmodel"] = "string" + c.nxpathMap[key]["ps_redun_mode_3k"] = "string" + c.nxpathMap[key]["tot_pow_capacity"] = "string" + c.nxpathMap[key]["amps"] = "string" + c.nxpathMap[key]["available_pow"] = "string" + c.nxpathMap[key]["reserve_supxbarfan"] = "string" + c.nxpathMap[key]["watts"] = "string" + c.nxpathMap[key]["det_pinb"] = "string" + c.nxpathMap[key]["det_vinb"] = "string" + c.nxpathMap[key]["ps_state"] = "string" + c.nxpathMap[key]["det_sw_alarm"] = "string" + c.nxpathMap[key]["regnum"] = "string" + c.nxpathMap[key]["amps_requested"] = "string" + c.nxpathMap[key]["fanrpm"] = "string" + c.nxpathMap[key]["actual_input"] = "string" + c.nxpathMap[key]["outlet_temp"] = "string" + c.nxpathMap[key]["tot_capa"] = "string" +} + +func (c *CiscoTelemetryMDT) initPtpCorrection() { + key := "show ptp corrections" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["sup-time"] = "string" + c.nxpathMap[key]["correction-val"] = "int64" + c.nxpathMap[key]["ptp-header"] = "string" + c.nxpathMap[key]["intf-name"] = "string" + c.nxpathMap[key]["ptp-end"] = "string" +} + +func (c *CiscoTelemetryMDT) initTrans() { + key := "show interface transceiver details" + c.nxpathMap[key] = make(map[string]string, 100) + c.nxpathMap[key]["uncorrect_ber_alrm_hi"] = "string" + c.nxpathMap[key]["uncorrect_ber_cur_warn_lo"] = "string" + c.nxpathMap[key]["current_warn_lo"] = "float" + c.nxpathMap[key]["pre_fec_ber_max_alrm_hi"] = "string" + c.nxpathMap[key]["serialnum"] = "string" + c.nxpathMap[key]["pre_fec_ber_acc_warn_lo"] = "string" + c.nxpathMap[key]["pre_fec_ber_max_warn_lo"] = "string" + c.nxpathMap[key]["laser_temp_warn_hi"] = "float" + c.nxpathMap[key]["type"] = "string" + c.nxpathMap[key]["rx_pwr_0"] = "float" + c.nxpathMap[key]["rx_pwr_warn_hi"] = "float" + c.nxpathMap[key]["uncorrect_ber_warn_hi"] = "string" + c.nxpathMap[key]["qsfp_or_cfp"] = "string" + c.nxpathMap[key]["protocol_type"] = "string" + c.nxpathMap[key]["uncorrect_ber"] = "string" + c.nxpathMap[key]["uncorrect_ber_cur_alrm_hi"] = "string" + c.nxpathMap[key]["tec_current"] = "float" + c.nxpathMap[key]["pre_fec_ber"] = "string" + c.nxpathMap[key]["uncorrect_ber_max_warn_lo"] = "string" + c.nxpathMap[key]["uncorrect_ber_min"] = "string" + c.nxpathMap[key]["current_alrm_lo"] = "float" + c.nxpathMap[key]["uncorrect_ber_acc_warn_lo"] = "string" + c.nxpathMap[key]["snr_warn_lo"] = "float" + c.nxpathMap[key]["rev"] = "string" + c.nxpathMap[key]["laser_temp_alrm_lo"] = "float" + c.nxpathMap[key]["current"] = "float" + c.nxpathMap[key]["rx_pwr_1"] = "float" + c.nxpathMap[key]["tec_current_warn_hi"] = "float" + c.nxpathMap[key]["pre_fec_ber_cur_warn_lo"] = "string" + c.nxpathMap[key]["cisco_part_number"] = "string" + c.nxpathMap[key]["uncorrect_ber_acc_warn_hi"] = "string" + c.nxpathMap[key]["temp_warn_hi"] = "float" + c.nxpathMap[key]["laser_freq_warn_lo"] = "float" + c.nxpathMap[key]["uncorrect_ber_max_alrm_lo"] = "string" + c.nxpathMap[key]["snr_alrm_hi"] = "float" + c.nxpathMap[key]["pre_fec_ber_cur_alrm_lo"] = "string" + c.nxpathMap[key]["tx_pwr_alrm_hi"] = "float" + c.nxpathMap[key]["pre_fec_ber_min_warn_lo"] = "string" + c.nxpathMap[key]["pre_fec_ber_min_warn_hi"] = "string" + c.nxpathMap[key]["rx_pwr_alrm_hi"] = "float" + c.nxpathMap[key]["tec_current_warn_lo"] = "float" + c.nxpathMap[key]["uncorrect_ber_acc_alrm_hi"] = "string" + c.nxpathMap[key]["rx_pwr_4"] = "float" + c.nxpathMap[key]["uncorrect_ber_cur"] = "string" + c.nxpathMap[key]["pre_fec_ber_alrm_hi"] = "string" + c.nxpathMap[key]["rx_pwr_warn_lo"] = "float" + c.nxpathMap[key]["bit_encoding"] = "string" + c.nxpathMap[key]["pre_fec_ber_acc"] = "string" + c.nxpathMap[key]["sfp"] = "string" + c.nxpathMap[key]["pre_fec_ber_acc_alrm_hi"] = "string" + c.nxpathMap[key]["pre_fec_ber_min"] = "string" + c.nxpathMap[key]["current_warn_hi"] = "float" + c.nxpathMap[key]["pre_fec_ber_max_alrm_lo"] = "string" + c.nxpathMap[key]["uncorrect_ber_cur_warn_hi"] = "string" + c.nxpathMap[key]["current_alrm_hi"] = "float" + c.nxpathMap[key]["pre_fec_ber_acc_alrm_lo"] = "string" + c.nxpathMap[key]["snr_alrm_lo"] = "float" + c.nxpathMap[key]["uncorrect_ber_acc"] = "string" + c.nxpathMap[key]["tx_len"] = "string" + c.nxpathMap[key]["uncorrect_ber_alrm_lo"] = "string" + c.nxpathMap[key]["pre_fec_ber_alrm_lo"] = "string" + c.nxpathMap[key]["txcvr_type"] = "string" + c.nxpathMap[key]["tec_current_alrm_lo"] = "float" + c.nxpathMap[key]["volt_alrm_lo"] = "float" + c.nxpathMap[key]["temp_alrm_hi"] = "float" + c.nxpathMap[key]["uncorrect_ber_min_warn_lo"] = "string" + c.nxpathMap[key]["laser_freq"] = "float" + c.nxpathMap[key]["uncorrect_ber_min_warn_hi"] = "string" + c.nxpathMap[key]["uncorrect_ber_cur_alrm_lo"] = "string" + c.nxpathMap[key]["pre_fec_ber_max_warn_hi"] = "string" + c.nxpathMap[key]["name"] = "string" + c.nxpathMap[key]["fiber_type_byte0"] = "string" + c.nxpathMap[key]["laser_freq_alrm_lo"] = "float" + c.nxpathMap[key]["pre_fec_ber_cur_warn_hi"] = "string" + c.nxpathMap[key]["partnum"] = "string" + c.nxpathMap[key]["snr"] = "float" + c.nxpathMap[key]["volt_alrm_hi"] = "float" + c.nxpathMap[key]["connector_type"] = "string" + c.nxpathMap[key]["tx_medium"] = "string" + c.nxpathMap[key]["tx_pwr_warn_hi"] = "float" + c.nxpathMap[key]["cisco_vendor_id"] = "string" + c.nxpathMap[key]["cisco_ext_id"] = "string" + c.nxpathMap[key]["uncorrect_ber_max_warn_hi"] = "string" + c.nxpathMap[key]["pre_fec_ber_max"] = "string" + c.nxpathMap[key]["uncorrect_ber_min_alrm_hi"] = "string" + c.nxpathMap[key]["pre_fec_ber_warn_hi"] = "string" + c.nxpathMap[key]["tx_pwr_alrm_lo"] = "float" + c.nxpathMap[key]["uncorrect_ber_warn_lo"] = "string" + c.nxpathMap[key]["10gbe_code"] = "string" + c.nxpathMap[key]["cable_type"] = "string" + c.nxpathMap[key]["laser_freq_alrm_hi"] = "float" + c.nxpathMap[key]["rx_pwr_3"] = "float" + c.nxpathMap[key]["rx_pwr"] = "float" + c.nxpathMap[key]["volt_warn_hi"] = "float" + c.nxpathMap[key]["pre_fec_ber_cur_alrm_hi"] = "string" + c.nxpathMap[key]["temperature"] = "float" + c.nxpathMap[key]["voltage"] = "float" + c.nxpathMap[key]["tx_pwr"] = "float" + c.nxpathMap[key]["laser_temp_alrm_hi"] = "float" + c.nxpathMap[key]["tx_speeds"] = "string" + c.nxpathMap[key]["uncorrect_ber_min_alrm_lo"] = "string" + c.nxpathMap[key]["pre_fec_ber_min_alrm_hi"] = "string" + c.nxpathMap[key]["ciscoid"] = "string" + c.nxpathMap[key]["tx_pwr_warn_lo"] = "float" + c.nxpathMap[key]["cisco_product_id"] = "string" + c.nxpathMap[key]["info_not_available"] = "string" + c.nxpathMap[key]["laser_temp"] = "float" + c.nxpathMap[key]["pre_fec_ber_cur"] = "string" + c.nxpathMap[key]["fiber_type_byte1"] = "string" + c.nxpathMap[key]["tx_type"] = "string" + c.nxpathMap[key]["pre_fec_ber_min_alrm_lo"] = "string" + c.nxpathMap[key]["pre_fec_ber_warn_lo"] = "string" + c.nxpathMap[key]["temp_alrm_lo"] = "float" + c.nxpathMap[key]["volt_warn_lo"] = "float" + c.nxpathMap[key]["rx_pwr_alrm_lo"] = "float" + c.nxpathMap[key]["rx_pwr_2"] = "float" + c.nxpathMap[key]["tec_current_alrm_hi"] = "float" + c.nxpathMap[key]["uncorrect_ber_acc_alrm_lo"] = "string" + c.nxpathMap[key]["uncorrect_ber_max_alrm_hi"] = "string" + c.nxpathMap[key]["temp_warn_lo"] = "float" + c.nxpathMap[key]["snr_warn_hi"] = "float" + c.nxpathMap[key]["laser_temp_warn_lo"] = "float" + c.nxpathMap[key]["pre_fec_ber_acc_warn_hi"] = "string" + c.nxpathMap[key]["laser_freq_warn_hi"] = "float" + c.nxpathMap[key]["uncorrect_ber_max"] = "string" +} + +func (c *CiscoTelemetryMDT) initIgmp() { + key := "show ip igmp groups vrf all" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["group-type"] = "string" + c.nxpathMap[key]["translate"] = "string" + c.nxpathMap[key]["sourceaddress"] = "string" + c.nxpathMap[key]["vrf-cntxt"] = "string" + c.nxpathMap[key]["expires"] = "string" + c.nxpathMap[key]["group-addr"] = "string" + c.nxpathMap[key]["uptime"] = "string" +} + +func (c *CiscoTelemetryMDT) initVrfAll() { + key := "show ip igmp interface vrf all" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["if-name"] = "string" + c.nxpathMap[key]["static-group-map"] = "string" + c.nxpathMap[key]["rll"] = "string" + c.nxpathMap[key]["host-proxy"] = "string" + c.nxpathMap[key]["il"] = "string" + c.nxpathMap[key]["join-group-map"] = "string" + c.nxpathMap[key]["expires"] = "string" + c.nxpathMap[key]["host-proxy-group-map"] = "string" + c.nxpathMap[key]["next-query"] = "string" + c.nxpathMap[key]["q-ver"] = "string" + c.nxpathMap[key]["if-status"] = "string" + c.nxpathMap[key]["un-solicited"] = "string" + c.nxpathMap[key]["ip-sum"] = "string" +} + +func (c *CiscoTelemetryMDT) initIgmpSnoop() { + key := "show ip igmp snooping" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["repsup"] = "string" + c.nxpathMap[key]["omf_enabled"] = "string" + c.nxpathMap[key]["v3repsup"] = "string" + c.nxpathMap[key]["grepsup"] = "string" + c.nxpathMap[key]["lkupmode"] = "string" + c.nxpathMap[key]["description"] = "string" + c.nxpathMap[key]["vlinklocalgrpsup"] = "string" + c.nxpathMap[key]["gv3repsup"] = "string" + c.nxpathMap[key]["reportfloodall"] = "string" + c.nxpathMap[key]["leavegroupaddress"] = "string" + c.nxpathMap[key]["enabled"] = "string" + c.nxpathMap[key]["omf"] = "string" + c.nxpathMap[key]["sq"] = "string" + c.nxpathMap[key]["sqr"] = "string" + c.nxpathMap[key]["eht"] = "string" + c.nxpathMap[key]["fl"] = "string" + c.nxpathMap[key]["reportfloodenable"] = "string" + c.nxpathMap[key]["snoop-on"] = "string" + c.nxpathMap[key]["glinklocalgrpsup"] = "string" +} + +func (c *CiscoTelemetryMDT) initIgmpSnoopGroups() { + key := "show ip igmp snooping groups" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["src-uptime"] = "string" + c.nxpathMap[key]["source"] = "string" + c.nxpathMap[key]["dyn-if-name"] = "string" + c.nxpathMap[key]["raddr"] = "string" + c.nxpathMap[key]["old-host"] = "string" + c.nxpathMap[key]["snoop-enabled"] = "string" + c.nxpathMap[key]["expires"] = "string" + c.nxpathMap[key]["omf-enabled"] = "string" + c.nxpathMap[key]["uptime"] = "string" + c.nxpathMap[key]["src-expires"] = "string" + c.nxpathMap[key]["addr"] = "string" +} + +func (c *CiscoTelemetryMDT) initIgmpSnoopGroupDetails() { + key := "show ip igmp snooping groups detail" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["src-uptime"] = "string" + c.nxpathMap[key]["source"] = "string" + c.nxpathMap[key]["dyn-if-name"] = "string" + c.nxpathMap[key]["raddr"] = "string" + c.nxpathMap[key]["old-host"] = "string" + c.nxpathMap[key]["snoop-enabled"] = "string" + c.nxpathMap[key]["expires"] = "string" + c.nxpathMap[key]["omf-enabled"] = "string" + c.nxpathMap[key]["uptime"] = "string" + c.nxpathMap[key]["src-expires"] = "string" + c.nxpathMap[key]["addr"] = "string" +} + +func (c *CiscoTelemetryMDT) initIgmpSnoopGroupsSumm() { + key := "show ip igmp snooping groups summary" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["src-uptime"] = "string" + c.nxpathMap[key]["source"] = "string" + c.nxpathMap[key]["dyn-if-name"] = "string" + c.nxpathMap[key]["raddr"] = "string" + c.nxpathMap[key]["old-host"] = "string" + c.nxpathMap[key]["snoop-enabled"] = "string" + c.nxpathMap[key]["expires"] = "string" + c.nxpathMap[key]["omf-enabled"] = "string" + c.nxpathMap[key]["uptime"] = "string" + c.nxpathMap[key]["src-expires"] = "string" + c.nxpathMap[key]["addr"] = "string" +} + +func (c *CiscoTelemetryMDT) initMrouter() { + key := "show ip igmp snooping mrouter" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["uptime"] = "string" + c.nxpathMap[key]["expires"] = "string" +} + +func (c *CiscoTelemetryMDT) initSnoopStats() { + key := "show ip igmp snooping statistics" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["ut"] = "string" +} + +func (c *CiscoTelemetryMDT) initPimInterface() { + key := "show ip pim interface vrf all" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["if-is-border"] = "string" + c.nxpathMap[key]["cached_if_status"] = "string" + c.nxpathMap[key]["genid"] = "string" + c.nxpathMap[key]["if-name"] = "string" + c.nxpathMap[key]["last-cleared"] = "string" + c.nxpathMap[key]["is-pim-vpc-svi"] = "string" + c.nxpathMap[key]["if-addr"] = "string" + c.nxpathMap[key]["is-pim-enabled"] = "string" + c.nxpathMap[key]["pim-dr-address"] = "string" + c.nxpathMap[key]["hello-timer"] = "string" + c.nxpathMap[key]["pim-bfd-enabled"] = "string" + c.nxpathMap[key]["vpc-peer-nbr"] = "string" + c.nxpathMap[key]["nbr-policy-name"] = "string" + c.nxpathMap[key]["is-auto-enabled"] = "string" + c.nxpathMap[key]["if-status"] = "string" + c.nxpathMap[key]["jp-out-policy-name"] = "string" + c.nxpathMap[key]["if-addr-summary"] = "string" + c.nxpathMap[key]["if-dr"] = "string" + c.nxpathMap[key]["jp-in-policy-name"] = "string" +} + +func (c *CiscoTelemetryMDT) initPimNeigh() { + key := "show ip pim neighbor vrf all" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["longest-hello-intvl"] = "string" + c.nxpathMap[key]["if-name"] = "string" + c.nxpathMap[key]["uptime"] = "string" + c.nxpathMap[key]["expires"] = "string" + c.nxpathMap[key]["bfd-state"] = "string" +} + +func (c *CiscoTelemetryMDT) initPimRoute() { + key := "show ip pim route vrf all" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["rpf-nbr-1"] = "string" + c.nxpathMap[key]["rpf-nbr-addr"] = "string" + c.nxpathMap[key]["register"] = "string" + c.nxpathMap[key]["sgexpire"] = "string" + c.nxpathMap[key]["oif-bf-str"] = "string" + c.nxpathMap[key]["mcast-addrs"] = "string" + c.nxpathMap[key]["rp-addr"] = "string" + c.nxpathMap[key]["immediate-bf-str"] = "string" + c.nxpathMap[key]["sgr-prune-list-bf-str"] = "string" + c.nxpathMap[key]["context-name"] = "string" + c.nxpathMap[key]["intf-name"] = "string" + c.nxpathMap[key]["immediate-timeout-bf-str"] = "string" + c.nxpathMap[key]["rp-local"] = "string" + c.nxpathMap[key]["sgrexpire"] = "string" + c.nxpathMap[key]["timeout-bf-str"] = "string" + c.nxpathMap[key]["timeleft"] = "string" +} + +func (c *CiscoTelemetryMDT) initPimRp() { + key := "show ip pim rp vrf all" + c.nxpathMap[key] = make(map[string]string, 20) + c.nxpathMap[key]["is-bsr-forward-only"] = "string" + c.nxpathMap[key]["is-rpaddr-local"] = "string" + c.nxpathMap[key]["bsr-expires"] = "string" + c.nxpathMap[key]["autorp-expire-time"] = "string" + c.nxpathMap[key]["rp-announce-policy-name"] = "string" + c.nxpathMap[key]["rp-cand-policy-name"] = "string" + c.nxpathMap[key]["is-autorp-forward-only"] = "string" + c.nxpathMap[key]["rp-uptime"] = "string" + c.nxpathMap[key]["rp-owner-flags"] = "string" + c.nxpathMap[key]["df-bits-recovered"] = "string" + c.nxpathMap[key]["bs-timer"] = "string" + c.nxpathMap[key]["rp-discovery-policy-name"] = "string" + c.nxpathMap[key]["arp-rp-addr"] = "string" + c.nxpathMap[key]["auto-rp-addr"] = "string" + c.nxpathMap[key]["autorp-expires"] = "string" + c.nxpathMap[key]["is-autorp-enabled"] = "string" + c.nxpathMap[key]["is-bsr-local"] = "string" + c.nxpathMap[key]["is-autorp-listen-only"] = "string" + c.nxpathMap[key]["autorp-dis-timer"] = "string" + c.nxpathMap[key]["bsr-rp-expires"] = "string" + c.nxpathMap[key]["static-rp-group-map"] = "string" + c.nxpathMap[key]["rp-source"] = "string" + c.nxpathMap[key]["autorp-cand-address"] = "string" + c.nxpathMap[key]["autorp-up-time"] = "string" + c.nxpathMap[key]["is-bsr-enabled"] = "string" + c.nxpathMap[key]["bsr-uptime"] = "string" + c.nxpathMap[key]["is-bsr-listen-only"] = "string" + c.nxpathMap[key]["rpf-nbr-address"] = "string" + c.nxpathMap[key]["is-rp-local"] = "string" + c.nxpathMap[key]["is-autorp-local"] = "string" + c.nxpathMap[key]["bsr-policy-name"] = "string" + c.nxpathMap[key]["grange-grp"] = "string" + c.nxpathMap[key]["rp-addr"] = "string" + c.nxpathMap[key]["anycast-rp-addr"] = "string" +} + +func (c *CiscoTelemetryMDT) initPimStats() { + key := "show ip pim statistics vrf all" + c.nxpathMap[key] = make(map[string]string, 1) + c.nxpathMap[key]["vrf-name"] = "string" +} + +func (c *CiscoTelemetryMDT) initIntfBrief() { + key := "show interface brief" + c.nxpathMap[key] = make(map[string]string, 2) + c.nxpathMap[key]["speed"] = "string" + c.nxpathMap[key]["vlan"] = "string" +} + +func (c *CiscoTelemetryMDT) initPimVrf() { + key := "show ip pim vrf all" + c.nxpathMap[key] = make(map[string]string, 1) + c.nxpathMap[key]["table-id"] = "string" +} + +func (c *CiscoTelemetryMDT) initIPMroute() { + key := "show ip mroute summary vrf all" + c.nxpathMap[key] = make(map[string]string, 40) + c.nxpathMap[key]["nat-mode"] = "string" + c.nxpathMap[key]["oif-name"] = "string" + c.nxpathMap[key]["nat-route-type"] = "string" + c.nxpathMap[key]["uptime"] = "string" + c.nxpathMap[key]["mofrr-nbr"] = "string" + c.nxpathMap[key]["extranet_addr"] = "string" + c.nxpathMap[key]["stale-route"] = "string" + c.nxpathMap[key]["pending"] = "string" + c.nxpathMap[key]["bidir"] = "string" + c.nxpathMap[key]["expry_timer"] = "string" + c.nxpathMap[key]["mofrr-iif"] = "string" + c.nxpathMap[key]["group_addrs"] = "string" + c.nxpathMap[key]["mpib-name"] = "string" + c.nxpathMap[key]["rpf"] = "string" + c.nxpathMap[key]["mcast-addrs"] = "string" + c.nxpathMap[key]["route-mdt-iod"] = "string" + c.nxpathMap[key]["sr-oif"] = "string" + c.nxpathMap[key]["stats-rate-buf"] = "string" + c.nxpathMap[key]["source_addr"] = "string" + c.nxpathMap[key]["route-iif"] = "string" + c.nxpathMap[key]["rpf-nbr"] = "string" + c.nxpathMap[key]["translated-route-src"] = "string" + c.nxpathMap[key]["group_addr"] = "string" + c.nxpathMap[key]["lisp-src-rloc"] = "string" + c.nxpathMap[key]["stats-pndg"] = "string" + c.nxpathMap[key]["rate_buf"] = "string" + c.nxpathMap[key]["extranet_vrf_name"] = "string" + c.nxpathMap[key]["fabric-interest"] = "string" + c.nxpathMap[key]["translated-route-grp"] = "string" + c.nxpathMap[key]["internal"] = "string" + c.nxpathMap[key]["oif-mpib-name"] = "string" + c.nxpathMap[key]["oif-uptime"] = "string" + c.nxpathMap[key]["omd-vpc-svi"] = "string" + c.nxpathMap[key]["source_addrs"] = "string" + c.nxpathMap[key]["stale-oif"] = "string" + c.nxpathMap[key]["core-interest"] = "string" + c.nxpathMap[key]["oif-list-bitfield"] = "string" +} + +func (c *CiscoTelemetryMDT) initIpv6Mroute() { + key := "show ipv6 mroute summary vrf all" + c.nxpathMap[key] = make(map[string]string, 40) + c.nxpathMap[key]["nat-mode"] = "string" + c.nxpathMap[key]["oif-name"] = "string" + c.nxpathMap[key]["nat-route-type"] = "string" + c.nxpathMap[key]["uptime"] = "string" + c.nxpathMap[key]["mofrr-nbr"] = "string" + c.nxpathMap[key]["extranet_addr"] = "string" + c.nxpathMap[key]["stale-route"] = "string" + c.nxpathMap[key]["pending"] = "string" + c.nxpathMap[key]["bidir"] = "string" + c.nxpathMap[key]["expry_timer"] = "string" + c.nxpathMap[key]["mofrr-iif"] = "string" + c.nxpathMap[key]["group_addrs"] = "string" + c.nxpathMap[key]["mpib-name"] = "string" + c.nxpathMap[key]["rpf"] = "string" + c.nxpathMap[key]["mcast-addrs"] = "string" + c.nxpathMap[key]["route-mdt-iod"] = "string" + c.nxpathMap[key]["sr-oif"] = "string" + c.nxpathMap[key]["stats-rate-buf"] = "string" + c.nxpathMap[key]["source_addr"] = "string" + c.nxpathMap[key]["route-iif"] = "string" + c.nxpathMap[key]["rpf-nbr"] = "string" + c.nxpathMap[key]["translated-route-src"] = "string" + c.nxpathMap[key]["group_addr"] = "string" + c.nxpathMap[key]["lisp-src-rloc"] = "string" + c.nxpathMap[key]["stats-pndg"] = "string" + c.nxpathMap[key]["rate_buf"] = "string" + c.nxpathMap[key]["extranet_vrf_name"] = "string" + c.nxpathMap[key]["fabric-interest"] = "string" + c.nxpathMap[key]["translated-route-grp"] = "string" + c.nxpathMap[key]["internal"] = "string" + c.nxpathMap[key]["oif-mpib-name"] = "string" + c.nxpathMap[key]["oif-uptime"] = "string" + c.nxpathMap[key]["omd-vpc-svi"] = "string" + c.nxpathMap[key]["source_addrs"] = "string" + c.nxpathMap[key]["stale-oif"] = "string" + c.nxpathMap[key]["core-interest"] = "string" + c.nxpathMap[key]["oif-list-bitfield"] = "string" +} + +func (c *CiscoTelemetryMDT) initVpc() { + key := "sys/vpc" + c.nxpathMap[key] = make(map[string]string, 5) + c.nxpathMap[key]["type2CompatQualStr"] = "string" + c.nxpathMap[key]["compatQualStr"] = "string" + c.nxpathMap[key]["name"] = "string" + c.nxpathMap[key]["issuFromVer"] = "string" + c.nxpathMap[key]["issuToVer"] = "string" +} + +func (c *CiscoTelemetryMDT) initBgp() { + key := "sys/bgp" + c.nxpathMap[key] = make(map[string]string, 18) + c.nxpathMap[key]["dynRtMap"] = "string" + c.nxpathMap[key]["nhRtMap"] = "string" + c.nxpathMap[key]["epePeerSet"] = "string" + c.nxpathMap[key]["asn"] = "string" + c.nxpathMap[key]["peerImp"] = "string" + c.nxpathMap[key]["wght"] = "string" + c.nxpathMap[key]["assocDom"] = "string" + c.nxpathMap[key]["tblMap"] = "string" + c.nxpathMap[key]["unSupprMap"] = "string" + c.nxpathMap[key]["sessionContImp"] = "string" + c.nxpathMap[key]["allocLblRtMap"] = "string" + c.nxpathMap[key]["defMetric"] = "string" + c.nxpathMap[key]["password"] = "string" + c.nxpathMap[key]["retainRttRtMap"] = "string" + c.nxpathMap[key]["clusterId"] = "string" + c.nxpathMap[key]["localAsn"] = "string" + c.nxpathMap[key]["name"] = "string" + c.nxpathMap[key]["defOrgRtMap"] = "string" +} + +func (c *CiscoTelemetryMDT) initCh() { + key := "sys/ch" + c.nxpathMap[key] = make(map[string]string, 10) + c.nxpathMap[key]["fanName"] = "string" + c.nxpathMap[key]["typeCordConnected"] = "string" + c.nxpathMap[key]["vendor"] = "string" + c.nxpathMap[key]["model"] = "string" + c.nxpathMap[key]["rev"] = "string" + c.nxpathMap[key]["vdrId"] = "string" + c.nxpathMap[key]["hardwareAlarm"] = "string" + c.nxpathMap[key]["unit"] = "string" + c.nxpathMap[key]["hwVer"] = "string" +} + +func (c *CiscoTelemetryMDT) initIntf() { + key := "sys/intf" + c.nxpathMap[key] = make(map[string]string, 10) + c.nxpathMap[key]["descr"] = "string" + c.nxpathMap[key]["name"] = "string" + c.nxpathMap[key]["lastStCause"] = "string" + c.nxpathMap[key]["description"] = "string" + c.nxpathMap[key]["unit"] = "string" + c.nxpathMap[key]["operFECMode"] = "string" + c.nxpathMap[key]["operBitset"] = "string" + c.nxpathMap[key]["mdix"] = "string" +} + +func (c *CiscoTelemetryMDT) initProcsys() { + key := "sys/procsys" + c.nxpathMap[key] = make(map[string]string, 10) + c.nxpathMap[key]["name"] = "string" + c.nxpathMap[key]["id"] = "string" + c.nxpathMap[key]["upTs"] = "string" + c.nxpathMap[key]["interval"] = "string" + c.nxpathMap[key]["memstatus"] = "string" +} + +func (c *CiscoTelemetryMDT) initProc() { + key := "sys/proc" + c.nxpathMap[key] = make(map[string]string, 2) + c.nxpathMap[key]["processName"] = "string" + c.nxpathMap[key]["procArg"] = "string" +} + +func (c *CiscoTelemetryMDT) initBfd() { + key := "sys/bfd/inst" + c.nxpathMap[key] = make(map[string]string, 4) + c.nxpathMap[key]["descr"] = "string" + c.nxpathMap[key]["vrfName"] = "string" + c.nxpathMap[key]["name"] = "string" + c.nxpathMap[key]["name"] = "string" +} + +func (c *CiscoTelemetryMDT) initLldp() { + key := "sys/lldp" + c.nxpathMap[key] = make(map[string]string, 7) + c.nxpathMap[key]["sysDesc"] = "string" + c.nxpathMap[key]["portDesc"] = "string" + c.nxpathMap[key]["portIdV"] = "string" + c.nxpathMap[key]["chassisIdV"] = "string" + c.nxpathMap[key]["sysName"] = "string" + c.nxpathMap[key]["name"] = "string" + c.nxpathMap[key]["id"] = "string" +} + +func (c *CiscoTelemetryMDT) initDb() { + c.nxpathMap = make(map[string]map[string]string, 200) + + c.initPower() + c.initMemPhys() + c.initBgpV4() + c.initCPU() + c.initResources() + c.initPtpCorrection() + c.initTrans() + c.initIgmp() + c.initVrfAll() + c.initIgmpSnoop() + c.initIgmpSnoopGroups() + c.initIgmpSnoopGroupDetails() + c.initIgmpSnoopGroupsSumm() + c.initMrouter() + c.initSnoopStats() + c.initPimInterface() + c.initPimNeigh() + c.initPimRoute() + c.initPimRp() + c.initPimStats() + c.initIntfBrief() + c.initPimVrf() + c.initIPMroute() + c.initIpv6Mroute() + c.initVpc() + c.initBgp() + c.initCh() + c.initIntf() + c.initProcsys() + c.initProc() + c.initBfd() + c.initLldp() +} diff --git a/plugins/inputs/cisco_telemetry_mdt/sample.conf b/plugins/inputs/cisco_telemetry_mdt/sample.conf new file mode 100644 index 0000000000000..601fd9e9a2516 --- /dev/null +++ b/plugins/inputs/cisco_telemetry_mdt/sample.conf @@ -0,0 +1,40 @@ +# Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms +[[inputs.cisco_telemetry_mdt]] + ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when + ## using the grpc transport. + transport = "grpc" + + ## Address and port to host telemetry listener + service_address = ":57000" + + ## Grpc Maximum Message Size, default is 4MB, increase the size. + max_msg_size = 4000000 + + ## Enable TLS; grpc transport only. + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Enable TLS client authentication and define allowed CA certificates; grpc + ## transport only. + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags + # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"] + + ## Define aliases to map telemetry encoding paths to simple measurement names + [inputs.cisco_telemetry_mdt.aliases] + ifstats = "ietf-interfaces:interfaces-state/interface/statistics" + ## Define Property Xformation, please refer README and https://pubhub.devnetcloud.com/media/dme-docs-9-3-3/docs/appendix/ for Model details. + [inputs.cisco_telemetry_mdt.dmes] +# Global Property Xformation. +# prop1 = "uint64 to int" +# prop2 = "uint64 to string" +# prop3 = "string to uint64" +# prop4 = "string to int64" +# prop5 = "string to float64" +# auto-prop-xfrom = "auto-float-xfrom" #Xform any property which is string, and has float number to type float64 +# Per Path property xformation, Name is telemetry configuration under sensor-group, path configuration "WORD Distinguished Name" +# Per Path configuration is better as it avoid property collision issue of types. +# dnpath = '{"Name": "show ip route summary","prop": [{"Key": "routes","Value": "string"}, {"Key": "best-paths","Value": "string"}]}' +# dnpath2 = '{"Name": "show processes cpu","prop": [{"Key": "kernel_percent","Value": "float"}, {"Key": "idle_percent","Value": "float"}, {"Key": "process","Value": "string"}, {"Key": "user_percent","Value": "float"}, {"Key": "onesec","Value": "float"}]}' +# dnpath3 = '{"Name": "show processes memory physical","prop": [{"Key": "processname","Value": "string"}]}' diff --git a/plugins/inputs/clickhouse/README.md b/plugins/inputs/clickhouse/README.md index 9b9e6caa904f7..ada10d83c192e 100644 --- a/plugins/inputs/clickhouse/README.md +++ b/plugins/inputs/clickhouse/README.md @@ -1,9 +1,11 @@ # ClickHouse Input Plugin -This plugin gathers the statistic data from [ClickHouse](https://github.com/ClickHouse/ClickHouse) server. +This plugin gathers the statistic data from +[ClickHouse](https://github.com/ClickHouse/ClickHouse) server. -### Configuration -```toml +## Configuration + +```toml @sample.conf # Read metrics from one or many ClickHouse servers [[inputs.clickhouse]] ## Username for authorization on ClickHouse server @@ -71,7 +73,7 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic # insecure_skip_verify = false ``` -### Metrics +## Metrics - clickhouse_events - tags: @@ -81,7 +83,7 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - fields: - all rows from [system.events][] -+ clickhouse_metrics +- clickhouse_metrics - tags: - source (ClickHouse server hostname) - cluster (Name of the cluster [optional]) @@ -97,7 +99,7 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - fields: - all rows from [system.asynchronous_metrics][] -+ clickhouse_tables +- clickhouse_tables - tags: - source (ClickHouse server hostname) - table @@ -115,9 +117,9 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - cluster (Name of the cluster [optional]) - shard_num (Shard number in the cluster [optional]) - fields: - - root_nodes (count of node from [system.zookeeper][] where path=/) + - root_nodes (count of node from [system.zookeeper][] where path=/) -+ clickhouse_replication_queue +- clickhouse_replication_queue - tags: - source (ClickHouse server hostname) - cluster (Name of the cluster [optional]) @@ -132,8 +134,8 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - shard_num (Shard number in the cluster [optional]) - fields: - detached_parts (total detached parts for all tables and databases from [system.detached_parts][]) - -+ clickhouse_dictionaries + +- clickhouse_dictionaries - tags: - source (ClickHouse server hostname) - cluster (Name of the cluster [optional]) @@ -153,7 +155,7 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - failed - counter which show total failed mutations from first clickhouse-server run - completed - counter which show total successful finished mutations from first clickhouse-server run -+ clickhouse_disks +- clickhouse_disks - tags: - source (ClickHouse server hostname) - cluster (Name of the cluster [optional]) @@ -161,8 +163,8 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - name (disk name in storage configuration) - path (path to disk) - fields: - - free_space_percent - 0-100, gauge which show current percent of free disk space bytes relative to total disk space bytes - - keep_free_space_percent - 0-100, gauge which show current percent of required keep free disk bytes relative to total disk space bytes + - free_space_percent - 0-100, gauge which show current percent of free disk space bytes relative to total disk space bytes + - keep_free_space_percent - 0-100, gauge which show current percent of required keep free disk bytes relative to total disk space bytes - clickhouse_processes - tags: @@ -170,8 +172,8 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - cluster (Name of the cluster [optional]) - shard_num (Shard number in the cluster [optional]) - fields: - - percentile_50 - float gauge which show 50% percentile (quantile 0.5) for `elapsed` field of running processes, see [system.processes][] for details - - percentile_90 - float gauge which show 90% percentile (quantile 0.9) for `elapsed` field of running processes, see [system.processes][] for details + - percentile_50 - float gauge which show 50% percentile (quantile 0.5) for `elapsed` field of running processes, see [system.processes][] for details + - percentile_90 - float gauge which show 90% percentile (quantile 0.9) for `elapsed` field of running processes, see [system.processes][] for details - longest_running - float gauge which show maximum value for `elapsed` field of running processes, see [system.processes][] for details - clickhouse_text_log @@ -179,13 +181,13 @@ This plugin gathers the statistic data from [ClickHouse](https://github.com/Clic - source (ClickHouse server hostname) - cluster (Name of the cluster [optional]) - shard_num (Shard number in the cluster [optional]) - - level (message level, only message with level less or equal Notice is collects), see details on [system.text_log][] + - level (message level, only message with level less or equal Notice is collects), see details on [system.text_log][] - fields: - messages_last_10_min - gauge which show how many messages collected - -### Example Output -``` +## Example Output + +```text clickhouse_events,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 read_compressed_bytes=212i,arena_alloc_chunks=35i,function_execute=85i,merge_tree_data_writer_rows=3i,rw_lock_acquired_read_locks=421i,file_open=46i,io_buffer_alloc_bytes=86451985i,inserted_bytes=196i,regexp_created=3i,real_time_microseconds=116832i,query=23i,network_receive_elapsed_microseconds=268i,merge_tree_data_writer_compressed_bytes=1080i,arena_alloc_bytes=212992i,disk_write_elapsed_microseconds=556i,inserted_rows=3i,compressed_read_buffer_bytes=81i,read_buffer_from_file_descriptor_read_bytes=148i,write_buffer_from_file_descriptor_write=47i,merge_tree_data_writer_blocks=3i,soft_page_faults=896i,hard_page_faults=7i,select_query=21i,merge_tree_data_writer_uncompressed_bytes=196i,merge_tree_data_writer_blocks_already_sorted=3i,user_time_microseconds=40196i,compressed_read_buffer_blocks=5i,write_buffer_from_file_descriptor_write_bytes=3246i,io_buffer_allocs=296i,created_write_buffer_ordinary=12i,disk_read_elapsed_microseconds=59347044i,network_send_elapsed_microseconds=1538i,context_lock=1040i,insert_query=1i,system_time_microseconds=14582i,read_buffer_from_file_descriptor_read=3i 1569421000000000000 clickhouse_asynchronous_metrics,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 jemalloc.metadata_thp=0i,replicas_max_relative_delay=0i,jemalloc.mapped=1803177984i,jemalloc.allocated=1724839256i,jemalloc.background_thread.run_interval=0i,jemalloc.background_thread.num_threads=0i,uncompressed_cache_cells=0i,replicas_max_absolute_delay=0i,mark_cache_bytes=0i,compiled_expression_cache_count=0i,replicas_sum_queue_size=0i,number_of_tables=35i,replicas_max_merges_in_queue=0i,replicas_max_inserts_in_queue=0i,replicas_sum_merges_in_queue=0i,replicas_max_queue_size=0i,mark_cache_files=0i,jemalloc.background_thread.num_runs=0i,jemalloc.active=1726210048i,uptime=158i,jemalloc.retained=380481536i,replicas_sum_inserts_in_queue=0i,uncompressed_cache_bytes=0i,number_of_databases=2i,jemalloc.metadata=9207704i,max_part_count_for_partition=1i,jemalloc.resident=1742442496i 1569421000000000000 clickhouse_metrics,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 replicated_send=0i,write=0i,ephemeral_node=0i,zoo_keeper_request=0i,distributed_files_to_insert=0i,replicated_fetch=0i,background_schedule_pool_task=0i,interserver_connection=0i,leader_replica=0i,delayed_inserts=0i,global_thread_active=41i,merge=0i,readonly_replica=0i,memory_tracking_in_background_schedule_pool=0i,memory_tracking_for_merges=0i,zoo_keeper_session=0i,context_lock_wait=0i,storage_buffer_bytes=0i,background_pool_task=0i,send_external_tables=0i,zoo_keeper_watch=0i,part_mutation=0i,disk_space_reserved_for_merge=0i,distributed_send=0i,version_integer=19014003i,local_thread=0i,replicated_checks=0i,memory_tracking=0i,memory_tracking_in_background_processing_pool=0i,leader_election=0i,revision=54425i,open_file_for_read=0i,open_file_for_write=0i,storage_buffer_rows=0i,rw_lock_waiting_readers=0i,rw_lock_waiting_writers=0i,rw_lock_active_writers=0i,local_thread_active=0i,query_preempted=0i,tcp_connection=1i,http_connection=1i,read=2i,query_thread=0i,dict_cache_requests=0i,rw_lock_active_readers=1i,global_thread=43i,query=1i 1569421000000000000 @@ -196,10 +198,10 @@ clickhouse_tables,cluster=test_cluster_two_shards_localhost,database=default,hos [system.events]: https://clickhouse.tech/docs/en/operations/system-tables/events/ [system.metrics]: https://clickhouse.tech/docs/en/operations/system-tables/metrics/ [system.asynchronous_metrics]: https://clickhouse.tech/docs/en/operations/system-tables/asynchronous_metrics/ -[system.zookeeper]: https://clickhouse.tech/docs/en/operations/system-tables/zookeeper/ +[system.zookeeper]: https://clickhouse.tech/docs/en/operations/system-tables/zookeeper/ [system.detached_parts]: https://clickhouse.tech/docs/en/operations/system-tables/detached_parts/ -[system.dictionaries]: https://clickhouse.tech/docs/en/operations/system-tables/dictionaries/ -[system.mutations]: https://clickhouse.tech/docs/en/operations/system-tables/mutations/ -[system.disks]: https://clickhouse.tech/docs/en/operations/system-tables/disks/ -[system.processes]: https://clickhouse.tech/docs/en/operations/system-tables/processes/ -[system.text_log]: https://clickhouse.tech/docs/en/operations/system-tables/text_log/ +[system.dictionaries]: https://clickhouse.tech/docs/en/operations/system-tables/dictionaries/ +[system.mutations]: https://clickhouse.tech/docs/en/operations/system-tables/mutations/ +[system.disks]: https://clickhouse.tech/docs/en/operations/system-tables/disks/ +[system.processes]: https://clickhouse.tech/docs/en/operations/system-tables/processes/ +[system.text_log]: https://clickhouse.tech/docs/en/operations/system-tables/text_log/ diff --git a/plugins/inputs/clickhouse/clickhouse.go b/plugins/inputs/clickhouse/clickhouse.go index 187ead5cf6790..d444d6035f0ba 100644 --- a/plugins/inputs/clickhouse/clickhouse.go +++ b/plugins/inputs/clickhouse/clickhouse.go @@ -1,11 +1,12 @@ +//go:generate ../../../tools/readme_config_includer/generator package clickhouse import ( "bytes" + _ "embed" "encoding/json" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -14,78 +15,17 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) -var defaultTimeout = 5 * time.Second +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string -var sampleConfig = ` - ## Username for authorization on ClickHouse server - ## example: username = "default"" - username = "default" - - ## Password for authorization on ClickHouse server - ## example: password = "super_secret" - - ## HTTP(s) timeout while getting metrics values - ## The timeout includes connection time, any redirects, and reading the response body. - ## example: timeout = 1s - # timeout = 5s - - ## List of servers for metrics scraping - ## metrics scrape via HTTP(s) clickhouse interface - ## https://clickhouse.tech/docs/en/interfaces/http/ - ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"] - servers = ["http://127.0.0.1:8123"] - - ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster - ## with using same "user:password" described in "user" and "password" parameters - ## and get this server hostname list from "system.clusters" table - ## see - ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters - ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers - ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/ - ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables - ## example: auto_discovery = false - # auto_discovery = true - - ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" - ## when this filter present then "WHERE cluster IN (...)" filter will apply - ## please use only full cluster names here, regexp and glob filters is not allowed - ## for "/etc/clickhouse-server/config.d/remote.xml" - ## - ## - ## - ## - ## clickhouse-ru-1.local9000 - ## clickhouse-ru-2.local9000 - ## - ## - ## clickhouse-eu-1.local9000 - ## clickhouse-eu-2.local9000 - ## - ## - ## - ## - ## - ## - ## example: cluster_include = ["my-own-cluster"] - # cluster_include = [] - - ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" - ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply - ## example: cluster_exclude = ["my-internal-not-discovered-cluster"] - # cluster_exclude = [] - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` +var defaultTimeout = 5 * time.Second type connect struct { Cluster string `json:"cluster"` @@ -101,39 +41,33 @@ func init() { ClientConfig: tls.ClientConfig{ InsecureSkipVerify: false, }, - Timeout: internal.Duration{Duration: defaultTimeout}, + Timeout: config.Duration(defaultTimeout), } }) } // ClickHouse Telegraf Input Plugin type ClickHouse struct { - Username string `toml:"username"` - Password string `toml:"password"` - Servers []string `toml:"servers"` - AutoDiscovery bool `toml:"auto_discovery"` - ClusterInclude []string `toml:"cluster_include"` - ClusterExclude []string `toml:"cluster_exclude"` - Timeout internal.Duration `toml:"timeout"` + Username string `toml:"username"` + Password string `toml:"password"` + Servers []string `toml:"servers"` + AutoDiscovery bool `toml:"auto_discovery"` + ClusterInclude []string `toml:"cluster_include"` + ClusterExclude []string `toml:"cluster_exclude"` + Timeout config.Duration `toml:"timeout"` HTTPClient http.Client tls.ClientConfig } -// SampleConfig returns the sample config func (*ClickHouse) SampleConfig() string { return sampleConfig } -// Description return plugin description -func (*ClickHouse) Description() string { - return "Read metrics from one or many ClickHouse servers" -} - // Start ClickHouse input service func (ch *ClickHouse) Start(telegraf.Accumulator) error { timeout := defaultTimeout - if ch.Timeout.Duration != 0 { - timeout = ch.Timeout.Duration + if time.Duration(ch.Timeout) != 0 { + timeout = time.Duration(ch.Timeout) } tlsCfg, err := ch.ClientConfig.TLSConfig() if err != nil { @@ -195,7 +129,6 @@ func (ch *ClickHouse) Gather(acc telegraf.Accumulator) (err error) { } for _, conn := range connects { - metricsFuncs := []func(acc telegraf.Accumulator, conn *connect) error{ ch.tables, ch.zookeeper, @@ -212,7 +145,6 @@ func (ch *ClickHouse) Gather(acc telegraf.Accumulator) (err error) { if err := metricFunc(acc, &conn); err != nil { acc.AddError(err) } - } for metric := range commonMetrics { @@ -262,21 +194,34 @@ func (ch *ClickHouse) clusterIncludeExcludeFilter() string { } func (ch *ClickHouse) commonMetrics(acc telegraf.Accumulator, conn *connect, metric string) error { - var result []struct { + var intResult []struct { Metric string `json:"metric"` Value chUInt64 `json:"value"` } - if err := ch.execQuery(conn.url, commonMetrics[metric], &result); err != nil { - return err + + var floatResult []struct { + Metric string `json:"metric"` + Value float64 `json:"value"` } tags := ch.makeDefaultTags(conn) - fields := make(map[string]interface{}) - for _, r := range result { - fields[internal.SnakeCase(r.Metric)] = uint64(r.Value) - } + if commonMetricsIsFloat[metric] { + if err := ch.execQuery(conn.url, commonMetrics[metric], &floatResult); err != nil { + return err + } + for _, r := range floatResult { + fields[internal.SnakeCase(r.Metric)] = r.Value + } + } else { + if err := ch.execQuery(conn.url, commonMetrics[metric], &intResult); err != nil { + return err + } + for _, r := range intResult { + fields[internal.SnakeCase(r.Metric)] = uint64(r.Value) + } + } acc.AddFields("clickhouse_"+metric, fields, tags) return nil @@ -342,7 +287,6 @@ func (ch *ClickHouse) replicationQueue(acc telegraf.Accumulator, conn *connect) } func (ch *ClickHouse) detachedParts(acc telegraf.Accumulator, conn *connect) error { - var detachedParts []struct { DetachedParts chUInt64 `json:"detached_parts"` } @@ -363,7 +307,6 @@ func (ch *ClickHouse) detachedParts(acc telegraf.Accumulator, conn *connect) err } func (ch *ClickHouse) dictionaries(acc telegraf.Accumulator, conn *connect) error { - var brokenDictionaries []struct { Origin string `json:"origin"` BytesAllocated chUInt64 `json:"bytes_allocated"` @@ -397,7 +340,6 @@ func (ch *ClickHouse) dictionaries(acc telegraf.Accumulator, conn *connect) erro } func (ch *ClickHouse) mutations(acc telegraf.Accumulator, conn *connect) error { - var mutationsStatus []struct { Failed chUInt64 `json:"failed"` Running chUInt64 `json:"running"` @@ -424,7 +366,6 @@ func (ch *ClickHouse) mutations(acc telegraf.Accumulator, conn *connect) error { } func (ch *ClickHouse) disks(acc telegraf.Accumulator, conn *connect) error { - var disksStatus []struct { Name string `json:"name"` Path string `json:"path"` @@ -448,14 +389,12 @@ func (ch *ClickHouse) disks(acc telegraf.Accumulator, conn *connect) error { }, tags, ) - } return nil } func (ch *ClickHouse) processes(acc telegraf.Accumulator, conn *connect) error { - var processesStats []struct { QueryType string `json:"query_type"` Percentile50 float64 `json:"p50"` @@ -479,7 +418,6 @@ func (ch *ClickHouse) processes(acc telegraf.Accumulator, conn *connect) error { }, tags, ) - } return nil @@ -568,11 +506,11 @@ func (e *clickhouseError) Error() string { return fmt.Sprintf("received error code %d: %s", e.StatusCode, e.body) } -func (ch *ClickHouse) execQuery(url *url.URL, query string, i interface{}) error { - q := url.Query() +func (ch *ClickHouse) execQuery(address *url.URL, query string, i interface{}) error { + q := address.Query() q.Set("query", query+" FORMAT JSON") - url.RawQuery = q.Encode() - req, _ := http.NewRequest("GET", url.String(), nil) + address.RawQuery = q.Encode() + req, _ := http.NewRequest("GET", address.String(), nil) if ch.Username != "" { req.Header.Add("X-ClickHouse-User", ch.Username) } @@ -583,9 +521,9 @@ func (ch *ClickHouse) execQuery(url *url.URL, query string, i interface{}) error if err != nil { return err } - defer resp.Body.Close() + defer func() { _ = resp.Body.Close() }() if resp.StatusCode >= 300 { - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return &clickhouseError{ StatusCode: resp.StatusCode, body: body, @@ -601,7 +539,7 @@ func (ch *ClickHouse) execQuery(url *url.URL, query string, i interface{}) error return err } - if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + if _, err := io.Copy(io.Discard, resp.Body); err != nil { return err } return nil @@ -622,9 +560,9 @@ func (i *chUInt64) UnmarshalJSON(b []byte) error { } const ( - systemEventsSQL = "SELECT event AS metric, CAST(value AS UInt64) AS value FROM system.events" - systemMetricsSQL = "SELECT metric, CAST(value AS UInt64) AS value FROM system.metrics" - systemAsyncMetricsSQL = "SELECT metric, CAST(value AS UInt64) AS value FROM system.asynchronous_metrics" + systemEventsSQL = "SELECT event AS metric, toUInt64(value) AS value FROM system.events" + systemMetricsSQL = "SELECT metric, toUInt64(value) AS value FROM system.metrics" + systemAsyncMetricsSQL = "SELECT metric, toFloat64(value) AS value FROM system.asynchronous_metrics" systemPartsSQL = ` SELECT database, @@ -643,18 +581,18 @@ const ( systemZookeeperRootNodesSQL = "SELECT count() AS zk_root_nodes FROM system.zookeeper WHERE path='/'" systemReplicationExistsSQL = "SELECT count() AS replication_queue_exists FROM system.tables WHERE database='system' AND name='replication_queue'" - systemReplicationNumTriesSQL = "SELECT countIf(num_tries>1) AS replication_num_tries_replicas, countIf(num_tries>100) AS replication_too_many_tries_replicas FROM system.replication_queue" + systemReplicationNumTriesSQL = "SELECT countIf(num_tries>1) AS replication_num_tries_replicas, countIf(num_tries>100) AS replication_too_many_tries_replicas FROM system.replication_queue SETTINGS empty_result_for_aggregation_by_empty_set=0" - systemDetachedPartsSQL = "SELECT count() AS detached_parts FROM system.detached_parts" + systemDetachedPartsSQL = "SELECT count() AS detached_parts FROM system.detached_parts SETTINGS empty_result_for_aggregation_by_empty_set=0" systemDictionariesSQL = "SELECT origin, status, bytes_allocated FROM system.dictionaries" - systemMutationSQL = "SELECT countIf(latest_fail_time>toDateTime('0000-00-00 00:00:00') AND is_done=0) AS failed, countIf(latest_fail_time=toDateTime('0000-00-00 00:00:00') AND is_done=0) AS running, countIf(is_done=1) AS completed FROM system.mutations" + systemMutationSQL = "SELECT countIf(latest_fail_time>toDateTime('0000-00-00 00:00:00') AND is_done=0) AS failed, countIf(latest_fail_time=toDateTime('0000-00-00 00:00:00') AND is_done=0) AS running, countIf(is_done=1) AS completed FROM system.mutations SETTINGS empty_result_for_aggregation_by_empty_set=0" systemDisksSQL = "SELECT name, path, toUInt64(100*free_space / total_space) AS free_space_percent, toUInt64( 100 * keep_free_space / total_space) AS keep_free_space_percent FROM system.disks" - systemProcessesSQL = "SELECT multiIf(positionCaseInsensitive(query,'select')=1,'select',positionCaseInsensitive(query,'insert')=1,'insert','other') AS query_type, quantile\n(0.5)(elapsed) AS p50, quantile(0.9)(elapsed) AS p90, max(elapsed) AS longest_running FROM system.processes GROUP BY query_type" + systemProcessesSQL = "SELECT multiIf(positionCaseInsensitive(query,'select')=1,'select',positionCaseInsensitive(query,'insert')=1,'insert','other') AS query_type, quantile\n(0.5)(elapsed) AS p50, quantile(0.9)(elapsed) AS p90, max(elapsed) AS longest_running FROM system.processes GROUP BY query_type SETTINGS empty_result_for_aggregation_by_empty_set=0" systemTextLogExistsSQL = "SELECT count() AS text_log_exists FROM system.tables WHERE database='system' AND name='text_log'" - systemTextLogSQL = "SELECT count() AS messages_last_10_min, level FROM system.text_log WHERE level <= 'Notice' AND event_time >= now() - INTERVAL 600 SECOND GROUP BY level" + systemTextLogSQL = "SELECT count() AS messages_last_10_min, level FROM system.text_log WHERE level <= 'Notice' AND event_time >= now() - INTERVAL 600 SECOND GROUP BY level SETTINGS empty_result_for_aggregation_by_empty_set=0" ) var commonMetrics = map[string]string{ @@ -663,4 +601,10 @@ var commonMetrics = map[string]string{ "asynchronous_metrics": systemAsyncMetricsSQL, } +var commonMetricsIsFloat = map[string]bool{ + "events": false, + "metrics": false, + "asynchronous_metrics": true, +} + var _ telegraf.ServiceInput = &ClickHouse{} diff --git a/plugins/inputs/clickhouse/clickhouse_test.go b/plugins/inputs/clickhouse/clickhouse_test.go index 68a4438442d12..b342e6872c37c 100644 --- a/plugins/inputs/clickhouse/clickhouse_test.go +++ b/plugins/inputs/clickhouse/clickhouse_test.go @@ -8,28 +8,28 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) func TestClusterIncludeExcludeFilter(t *testing.T) { ch := ClickHouse{} - if assert.Equal(t, "", ch.clusterIncludeExcludeFilter()) { - ch.ClusterExclude = []string{"test_cluster"} - assert.Equal(t, "WHERE cluster NOT IN ('test_cluster')", ch.clusterIncludeExcludeFilter()) + require.Equal(t, "", ch.clusterIncludeExcludeFilter()) + ch.ClusterExclude = []string{"test_cluster"} + require.Equal(t, "WHERE cluster NOT IN ('test_cluster')", ch.clusterIncludeExcludeFilter()) - ch.ClusterExclude = []string{"test_cluster"} - ch.ClusterInclude = []string{"cluster"} - assert.Equal(t, "WHERE cluster IN ('cluster') OR cluster NOT IN ('test_cluster')", ch.clusterIncludeExcludeFilter()) + ch.ClusterExclude = []string{"test_cluster"} + ch.ClusterInclude = []string{"cluster"} + require.Equal(t, "WHERE cluster IN ('cluster') OR cluster NOT IN ('test_cluster')", ch.clusterIncludeExcludeFilter()) - ch.ClusterExclude = []string{} - ch.ClusterInclude = []string{"cluster1", "cluster2"} - assert.Equal(t, "WHERE cluster IN ('cluster1', 'cluster2')", ch.clusterIncludeExcludeFilter()) + ch.ClusterExclude = []string{} + ch.ClusterInclude = []string{"cluster1", "cluster2"} + require.Equal(t, "WHERE cluster IN ('cluster1', 'cluster2')", ch.clusterIncludeExcludeFilter()) - ch.ClusterExclude = []string{"cluster1", "cluster2"} - ch.ClusterInclude = []string{} - assert.Equal(t, "WHERE cluster NOT IN ('cluster1', 'cluster2')", ch.clusterIncludeExcludeFilter()) - } + ch.ClusterExclude = []string{"cluster1", "cluster2"} + ch.ClusterInclude = []string{} + require.Equal(t, "WHERE cluster NOT IN ('cluster1', 'cluster2')", ch.clusterIncludeExcludeFilter()) } func TestChInt64(t *testing.T) { @@ -42,9 +42,9 @@ func TestChInt64(t *testing.T) { } for src, expected := range assets { var v chUInt64 - if err := v.UnmarshalJSON([]byte(src)); assert.NoError(t, err) { - assert.Equal(t, expected, uint64(v)) - } + err := v.UnmarshalJSON([]byte(src)) + require.NoError(t, err) + require.Equal(t, expected, uint64(v)) } } @@ -57,7 +57,7 @@ func TestGather(t *testing.T) { enc := json.NewEncoder(w) switch query := r.URL.Query().Get("query"); { case strings.Contains(query, "system.parts"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Database string `json:"database"` Table string `json:"table"` @@ -74,8 +74,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "system.events"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Metric string `json:"metric"` Value chUInt64 `json:"value"` @@ -90,8 +91,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "system.metrics"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Metric string `json:"metric"` Value chUInt64 `json:"value"` @@ -106,8 +108,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "system.asynchronous_metrics"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Metric string `json:"metric"` Value chUInt64 `json:"value"` @@ -122,8 +125,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "zk_exists"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { ZkExists chUInt64 `json:"zk_exists"` }{ @@ -132,8 +136,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "zk_root_nodes"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { ZkRootNodes chUInt64 `json:"zk_root_nodes"` }{ @@ -142,8 +147,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "replication_queue_exists"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { ReplicationQueueExists chUInt64 `json:"replication_queue_exists"` }{ @@ -152,8 +158,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "replication_too_many_tries_replicas"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { TooManyTriesReplicas chUInt64 `json:"replication_too_many_tries_replicas"` NumTriesReplicas chUInt64 `json:"replication_num_tries_replicas"` @@ -164,8 +171,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "system.detached_parts"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { DetachedParts chUInt64 `json:"detached_parts"` }{ @@ -174,8 +182,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "system.dictionaries"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Origin string `json:"origin"` Status string `json:"status"` @@ -188,8 +197,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "system.mutations"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Failed chUInt64 `json:"failed"` Completed chUInt64 `json:"completed"` @@ -202,8 +212,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "system.disks"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Name string `json:"name"` Path string `json:"path"` @@ -218,8 +229,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "system.processes"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { QueryType string `json:"query_type"` Percentile50 float64 `json:"p50"` @@ -246,8 +258,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "text_log_exists"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { TextLogExists chUInt64 `json:"text_log_exists"` }{ @@ -256,8 +269,9 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "system.text_log"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { Level string `json:"level"` LastMessagesLast10Min chUInt64 `json:"messages_last_10_min"` @@ -284,6 +298,7 @@ func TestGather(t *testing.T) { }, }, }) + require.NoError(t, err) } })) ch = &ClickHouse{ @@ -294,7 +309,7 @@ func TestGather(t *testing.T) { acc = &testutil.Accumulator{} ) defer ts.Close() - ch.Gather(acc) + require.NoError(t, ch.Gather(acc)) acc.AssertContainsTaggedFields(t, "clickhouse_tables", map[string]interface{}{ @@ -322,8 +337,8 @@ func TestGather(t *testing.T) { ) acc.AssertContainsFields(t, "clickhouse_asynchronous_metrics", map[string]interface{}{ - "test_system_asynchronous_metric": uint64(1000), - "test_system_asynchronous_metric2": uint64(2000), + "test_system_asynchronous_metric": float64(1000), + "test_system_asynchronous_metric2": float64(2000), }, ) acc.AssertContainsFields(t, "clickhouse_zookeeper", @@ -427,7 +442,7 @@ func TestGatherWithSomeTablesNotExists(t *testing.T) { enc := json.NewEncoder(w) switch query := r.URL.Query().Get("query"); { case strings.Contains(query, "zk_exists"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { ZkExists chUInt64 `json:"zk_exists"` }{ @@ -436,8 +451,9 @@ func TestGatherWithSomeTablesNotExists(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "replication_queue_exists"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { ReplicationQueueExists chUInt64 `json:"replication_queue_exists"` }{ @@ -446,8 +462,9 @@ func TestGatherWithSomeTablesNotExists(t *testing.T) { }, }, }) + require.NoError(t, err) case strings.Contains(query, "text_log_exists"): - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct { TextLogExists chUInt64 `json:"text_log_exists"` }{ @@ -456,6 +473,7 @@ func TestGatherWithSomeTablesNotExists(t *testing.T) { }, }, }) + require.NoError(t, err) } })) ch = &ClickHouse{ @@ -467,7 +485,7 @@ func TestGatherWithSomeTablesNotExists(t *testing.T) { acc = &testutil.Accumulator{} ) defer ts.Close() - ch.Gather(acc) + require.NoError(t, ch.Gather(acc)) acc.AssertDoesNotContainMeasurement(t, "clickhouse_zookeeper") acc.AssertDoesNotContainMeasurement(t, "clickhouse_replication_queue") @@ -482,9 +500,10 @@ func TestWrongJSONMarshalling(t *testing.T) { } enc := json.NewEncoder(w) //wrong data section json - enc.Encode(result{ + err := enc.Encode(result{ Data: []struct{}{}, }) + require.NoError(t, err) })) ch = &ClickHouse{ Servers: []string{ @@ -495,9 +514,9 @@ func TestWrongJSONMarshalling(t *testing.T) { acc = &testutil.Accumulator{} ) defer ts.Close() - ch.Gather(acc) + require.NoError(t, ch.Gather(acc)) - assert.Equal(t, 0, len(acc.Metrics)) + require.Equal(t, 0, len(acc.Metrics)) allMeasurements := []string{ "clickhouse_events", "clickhouse_metrics", @@ -512,7 +531,7 @@ func TestWrongJSONMarshalling(t *testing.T) { "clickhouse_processes", "clickhouse_text_log", } - assert.GreaterOrEqual(t, len(allMeasurements), len(acc.Errors)) + require.GreaterOrEqual(t, len(allMeasurements), len(acc.Errors)) } func TestOfflineServer(t *testing.T) { @@ -528,9 +547,9 @@ func TestOfflineServer(t *testing.T) { }, } ) - ch.Gather(acc) + require.NoError(t, ch.Gather(acc)) - assert.Equal(t, 0, len(acc.Metrics)) + require.Equal(t, 0, len(acc.Metrics)) allMeasurements := []string{ "clickhouse_events", "clickhouse_metrics", @@ -545,7 +564,7 @@ func TestOfflineServer(t *testing.T) { "clickhouse_processes", "clickhouse_text_log", } - assert.GreaterOrEqual(t, len(allMeasurements), len(acc.Errors)) + require.GreaterOrEqual(t, len(allMeasurements), len(acc.Errors)) } func TestAutoDiscovery(t *testing.T) { @@ -555,9 +574,9 @@ func TestAutoDiscovery(t *testing.T) { Data interface{} `json:"data"` } enc := json.NewEncoder(w) - switch query := r.URL.Query().Get("query"); { - case strings.Contains(query, "system.clusters"): - enc.Encode(result{ + query := r.URL.Query().Get("query") + if strings.Contains(query, "system.clusters") { + err := enc.Encode(result{ Data: []struct { Cluster string `json:"test"` Hostname string `json:"localhost"` @@ -570,6 +589,7 @@ func TestAutoDiscovery(t *testing.T) { }, }, }) + require.NoError(t, err) } })) ch = &ClickHouse{ @@ -582,6 +602,5 @@ func TestAutoDiscovery(t *testing.T) { acc = &testutil.Accumulator{} ) defer ts.Close() - ch.Gather(acc) - + require.NoError(t, ch.Gather(acc)) } diff --git a/plugins/inputs/clickhouse/dev/docker-compose.yml b/plugins/inputs/clickhouse/dev/docker-compose.yml index c34ee9320d931..22fb2b2d94295 100644 --- a/plugins/inputs/clickhouse/dev/docker-compose.yml +++ b/plugins/inputs/clickhouse/dev/docker-compose.yml @@ -5,16 +5,19 @@ services: # choose `:latest` after resolve https://github.com/ClickHouse/ClickHouse/issues/13057 image: docker.io/yandex/clickhouse-server:${CLICKHOUSE_VERSION:-latest} volumes: + - ./init_schema.sql:/docker-entrypoint-initdb.d/init_schema.sql - ./test_dictionary.xml:/etc/clickhouse-server/01-test_dictionary.xml - ./zookeeper.xml:/etc/clickhouse-server/config.d/00-zookeeper.xml - ./tls_settings.xml:/etc/clickhouse-server/config.d/01-tls_settings.xml # please comment text_log.xml when CLICKHOUSE_VERSION = 19.16 - ./text_log.xml:/etc/clickhouse-server/config.d/02-text_log.xml - ./part_log.xml:/etc/clickhouse-server/config.d/03-part_log.xml + - ./mysql_port.xml:/etc/clickhouse-server/config.d/04-mysql_port.xml - ./dhparam.pem:/etc/clickhouse-server/dhparam.pem - ../../../../testutil/pki/serverkey.pem:/etc/clickhouse-server/server.key - ../../../../testutil/pki/servercert.pem:/etc/clickhouse-server/server.crt ports: + - 3306:3306 - 8123:8123 - 8443:8443 - 9000:9000 diff --git a/plugins/inputs/clickhouse/dev/init_schema.sql b/plugins/inputs/clickhouse/dev/init_schema.sql new file mode 100644 index 0000000000000..85cd2e3a0d552 --- /dev/null +++ b/plugins/inputs/clickhouse/dev/init_schema.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS default.test; +CREATE TABLE default.test( + Nom String, + Code Nullable(String) DEFAULT Null, + Cur Nullable(String) DEFAULT Null +) ENGINE=MergeTree() ORDER BY tuple(); diff --git a/plugins/inputs/clickhouse/dev/mysql_port.xml b/plugins/inputs/clickhouse/dev/mysql_port.xml new file mode 100644 index 0000000000000..275ec42bba2ae --- /dev/null +++ b/plugins/inputs/clickhouse/dev/mysql_port.xml @@ -0,0 +1,3 @@ + + 3306 + diff --git a/plugins/inputs/clickhouse/dev/test_dictionary.xml b/plugins/inputs/clickhouse/dev/test_dictionary.xml index 2f8f1ae5e26c5..b7472001452cb 100644 --- a/plugins/inputs/clickhouse/dev/test_dictionary.xml +++ b/plugins/inputs/clickhouse/dev/test_dictionary.xml @@ -1,11 +1,11 @@ - - Nom - - - + Nom String + - - Code - String - - - Cur - String - + + + + Code + String + + + + Cur + String + @@ -40,8 +40,8 @@ LIFETIME(MIN 300 MAX 600); 3306 - wrong - wrong + default + 127.0.0.1 1 @@ -56,8 +56,7 @@ LIFETIME(MIN 300 MAX 600); - - - + + 300 diff --git a/plugins/inputs/clickhouse/sample.conf b/plugins/inputs/clickhouse/sample.conf new file mode 100644 index 0000000000000..e5b5f08f1097f --- /dev/null +++ b/plugins/inputs/clickhouse/sample.conf @@ -0,0 +1,65 @@ +# Read metrics from one or many ClickHouse servers +[[inputs.clickhouse]] + ## Username for authorization on ClickHouse server + ## example: username = "default" + username = "default" + + ## Password for authorization on ClickHouse server + ## example: password = "super_secret" + + ## HTTP(s) timeout while getting metrics values + ## The timeout includes connection time, any redirects, and reading the response body. + ## example: timeout = 1s + # timeout = 5s + + ## List of servers for metrics scraping + ## metrics scrape via HTTP(s) clickhouse interface + ## https://clickhouse.tech/docs/en/interfaces/http/ + ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"] + servers = ["http://127.0.0.1:8123"] + + ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster + ## with using same "user:password" described in "user" and "password" parameters + ## and get this server hostname list from "system.clusters" table + ## see + ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters + ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers + ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/ + ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables + ## example: auto_discovery = false + # auto_discovery = true + + ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" + ## when this filter present then "WHERE cluster IN (...)" filter will apply + ## please use only full cluster names here, regexp and glob filters is not allowed + ## for "/etc/clickhouse-server/config.d/remote.xml" + ## + ## + ## + ## + ## clickhouse-ru-1.local9000 + ## clickhouse-ru-2.local9000 + ## + ## + ## clickhouse-eu-1.local9000 + ## clickhouse-eu-2.local9000 + ## + ## + ## + ## + ## + ## + ## example: cluster_include = ["my-own-cluster"] + # cluster_include = [] + + ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" + ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply + ## example: cluster_exclude = ["my-internal-not-discovered-cluster"] + # cluster_exclude = [] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/cloud_pubsub/README.md b/plugins/inputs/cloud_pubsub/README.md index a4244b881cb62..22226c1434396 100644 --- a/plugins/inputs/cloud_pubsub/README.md +++ b/plugins/inputs/cloud_pubsub/README.md @@ -3,10 +3,10 @@ The GCP PubSub plugin ingests metrics from [Google Cloud PubSub][pubsub] and creates metrics using one of the supported [input data formats][]. +## Configuration -### Configuration - -```toml +```toml @sample.conf +# Read metrics from Google PubSub [[inputs.cloud_pubsub]] ## Required. Name of Google Cloud Platform (GCP) Project that owns ## the given PubSub subscription. @@ -26,8 +26,8 @@ and creates metrics using one of the supported [input data formats][]. ## Application Default Credentials, which is preferred. # credentials_file = "path/to/my/creds.json" - ## Optional. Number of seconds to wait before attempting to restart the - ## PubSub subscription receiver after an unexpected error. + ## Optional. Number of seconds to wait before attempting to restart the + ## PubSub subscription receiver after an unexpected error. ## If the streaming pull for a PubSub Subscription fails (receiver), ## the agent attempts to restart receiving messages after this many seconds. # retry_delay_seconds = 5 @@ -76,7 +76,7 @@ and creates metrics using one of the supported [input data formats][]. ## processed concurrently (use "max_outstanding_messages" instead). # max_receiver_go_routines = 0 - ## Optional. If true, Telegraf will attempt to base64 decode the + ## Optional. If true, Telegraf will attempt to base64 decode the ## PubSub message data before parsing. Many GCP services that ## output JSON to Google PubSub base64-encode the JSON payload. # base64_data = false @@ -85,14 +85,13 @@ and creates metrics using one of the supported [input data formats][]. ### Multiple Subscriptions and Topics This plugin assumes you have already created a PULL subscription for a given -PubSub topic. To learn how to do so, see [how to create a subscription][pubsub create sub]. +PubSub topic. To learn how to do so, see [how to create a subscription][pubsub +create sub]. Each plugin agent can listen to one subscription at a time, so you will need to run multiple instances of the plugin to pull messages from multiple subscriptions/topics. - - [pubsub]: https://cloud.google.com/pubsub [pubsub create sub]: https://cloud.google.com/pubsub/docs/admin#create_a_pull_subscription [input data formats]: /docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/cloud_pubsub/pubsub.go b/plugins/inputs/cloud_pubsub/cloud_pubsub.go similarity index 63% rename from plugins/inputs/cloud_pubsub/pubsub.go rename to plugins/inputs/cloud_pubsub/cloud_pubsub.go index b418274f3b34a..98858cfc9f599 100644 --- a/plugins/inputs/cloud_pubsub/pubsub.go +++ b/plugins/inputs/cloud_pubsub/cloud_pubsub.go @@ -1,22 +1,29 @@ +//go:generate ../../../tools/readme_config_includer/generator package cloud_pubsub import ( "context" + _ "embed" + "encoding/base64" "fmt" "sync" - - "encoding/base64" "time" "cloud.google.com/go/pubsub" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" - "golang.org/x/oauth2/google" - "google.golang.org/api/option" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type empty struct{} type semaphore chan empty @@ -31,10 +38,10 @@ type PubSub struct { Subscription string `toml:"subscription"` // Subscription ReceiveSettings - MaxExtension internal.Duration `toml:"max_extension"` - MaxOutstandingMessages int `toml:"max_outstanding_messages"` - MaxOutstandingBytes int `toml:"max_outstanding_bytes"` - MaxReceiverGoRoutines int `toml:"max_receiver_go_routines"` + MaxExtension config.Duration `toml:"max_extension"` + MaxOutstandingMessages int `toml:"max_outstanding_messages"` + MaxOutstandingBytes int `toml:"max_outstanding_bytes"` + MaxReceiverGoRoutines int `toml:"max_receiver_go_routines"` // Agent settings MaxMessageLen int `toml:"max_message_len"` @@ -58,16 +65,12 @@ type PubSub struct { sem semaphore } -func (ps *PubSub) Description() string { - return "Read metrics from Google PubSub" -} - -func (ps *PubSub) SampleConfig() string { - return fmt.Sprintf(sampleConfig, defaultMaxUndeliveredMessages) +func (*PubSub) SampleConfig() string { + return sampleConfig } // Gather does nothing for this service input. -func (ps *PubSub) Gather(acc telegraf.Accumulator) error { +func (ps *PubSub) Gather(_ telegraf.Accumulator) error { return nil } @@ -180,7 +183,7 @@ func (ps *PubSub) onMessage(ctx context.Context, msg message) error { if err != nil { return fmt.Errorf("unable to base64 decode message: %v", err) } - data = []byte(strData) + data = strData } else { data = msg.Data() } @@ -269,15 +272,15 @@ func (ps *PubSub) getPubSubClient() (*pubsub.Client, error) { return client, nil } -func (ps *PubSub) getGCPSubscription(subId string) (subscription, error) { +func (ps *PubSub) getGCPSubscription(subID string) (subscription, error) { client, err := ps.getPubSubClient() if err != nil { return nil, err } - s := client.Subscription(subId) + s := client.Subscription(subID) s.ReceiveSettings = pubsub.ReceiveSettings{ NumGoroutines: ps.MaxReceiverGoRoutines, - MaxExtension: ps.MaxExtension.Duration, + MaxExtension: time.Duration(ps.MaxExtension), MaxOutstandingMessages: ps.MaxOutstandingMessages, MaxOutstandingBytes: ps.MaxOutstandingBytes, } @@ -292,77 +295,3 @@ func init() { return ps }) } - -const sampleConfig = ` - ## Required. Name of Google Cloud Platform (GCP) Project that owns - ## the given PubSub subscription. - project = "my-project" - - ## Required. Name of PubSub subscription to ingest metrics from. - subscription = "my-subscription" - - ## Required. Data format to consume. - ## Each data format has its own unique set of configuration options. - ## Read more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" - - ## Optional. Filepath for GCP credentials JSON file to authorize calls to - ## PubSub APIs. If not set explicitly, Telegraf will attempt to use - ## Application Default Credentials, which is preferred. - # credentials_file = "path/to/my/creds.json" - - ## Optional. Number of seconds to wait before attempting to restart the - ## PubSub subscription receiver after an unexpected error. - ## If the streaming pull for a PubSub Subscription fails (receiver), - ## the agent attempts to restart receiving messages after this many seconds. - # retry_delay_seconds = 5 - - ## Optional. Maximum byte length of a message to consume. - ## Larger messages are dropped with an error. If less than 0 or unspecified, - ## treated as no limit. - # max_message_len = 1000000 - - ## Optional. Maximum messages to read from PubSub that have not been written - ## to an output. Defaults to %d. - ## For best throughput set based on the number of metrics within - ## each message and the size of the output's metric_batch_size. - ## - ## For example, if each message contains 10 metrics and the output - ## metric_batch_size is 1000, setting this to 100 will ensure that a - ## full batch is collected and the write is triggered immediately without - ## waiting until the next flush_interval. - # max_undelivered_messages = 1000 - - ## The following are optional Subscription ReceiveSettings in PubSub. - ## Read more about these values: - ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings - - ## Optional. Maximum number of seconds for which a PubSub subscription - ## should auto-extend the PubSub ACK deadline for each message. If less than - ## 0, auto-extension is disabled. - # max_extension = 0 - - ## Optional. Maximum number of unprocessed messages in PubSub - ## (unacknowledged but not yet expired in PubSub). - ## A value of 0 is treated as the default PubSub value. - ## Negative values will be treated as unlimited. - # max_outstanding_messages = 0 - - ## Optional. Maximum size in bytes of unprocessed messages in PubSub - ## (unacknowledged but not yet expired in PubSub). - ## A value of 0 is treated as the default PubSub value. - ## Negative values will be treated as unlimited. - # max_outstanding_bytes = 0 - - ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn - ## to pull messages from PubSub concurrently. This limit applies to each - ## subscription separately and is treated as the PubSub default if less than - ## 1. Note this setting does not limit the number of messages that can be - ## processed concurrently (use "max_outstanding_messages" instead). - # max_receiver_go_routines = 0 - - ## Optional. If true, Telegraf will attempt to base64 decode the - ## PubSub message data before parsing - # base64_data = false -` diff --git a/plugins/inputs/cloud_pubsub/pubsub_test.go b/plugins/inputs/cloud_pubsub/cloud_pubsub_test.go similarity index 84% rename from plugins/inputs/cloud_pubsub/pubsub_test.go rename to plugins/inputs/cloud_pubsub/cloud_pubsub_test.go index 2045cf4ccbc89..e27c1e8104bcf 100644 --- a/plugins/inputs/cloud_pubsub/pubsub_test.go +++ b/plugins/inputs/cloud_pubsub/cloud_pubsub_test.go @@ -5,9 +5,10 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) const ( @@ -16,12 +17,12 @@ const ( // Test ingesting InfluxDB-format PubSub message func TestRunParse(t *testing.T) { - subId := "sub-run-parse" + subID := "sub-run-parse" testParser, _ := parsers.NewInfluxParser() sub := &stubSub{ - id: subId, + id: subID, messages: make(chan *testMsg, 100), } sub.receiver = testMessagesReceive(sub) @@ -31,7 +32,7 @@ func TestRunParse(t *testing.T) { parser: testParser, stubSub: func() subscription { return sub }, Project: "projectIDontMatterForTests", - Subscription: subId, + Subscription: subID, MaxUndeliveredMessages: defaultMaxUndeliveredMessages, } @@ -53,19 +54,19 @@ func TestRunParse(t *testing.T) { sub.messages <- msg acc.Wait(1) - assert.Equal(t, acc.NFields(), 1) + require.Equal(t, acc.NFields(), 1) metric := acc.Metrics[0] validateTestInfluxMetric(t, metric) } // Test ingesting InfluxDB-format PubSub message func TestRunBase64(t *testing.T) { - subId := "sub-run-base64" + subID := "sub-run-base64" testParser, _ := parsers.NewInfluxParser() sub := &stubSub{ - id: subId, + id: subID, messages: make(chan *testMsg, 100), } sub.receiver = testMessagesReceive(sub) @@ -75,7 +76,7 @@ func TestRunBase64(t *testing.T) { parser: testParser, stubSub: func() subscription { return sub }, Project: "projectIDontMatterForTests", - Subscription: subId, + Subscription: subID, MaxUndeliveredMessages: defaultMaxUndeliveredMessages, Base64Data: true, } @@ -98,18 +99,18 @@ func TestRunBase64(t *testing.T) { sub.messages <- msg acc.Wait(1) - assert.Equal(t, acc.NFields(), 1) + require.Equal(t, acc.NFields(), 1) metric := acc.Metrics[0] validateTestInfluxMetric(t, metric) } func TestRunInvalidMessages(t *testing.T) { - subId := "sub-invalid-messages" + subID := "sub-invalid-messages" testParser, _ := parsers.NewInfluxParser() sub := &stubSub{ - id: subId, + id: subID, messages: make(chan *testMsg, 100), } sub.receiver = testMessagesReceive(sub) @@ -119,7 +120,7 @@ func TestRunInvalidMessages(t *testing.T) { parser: testParser, stubSub: func() subscription { return sub }, Project: "projectIDontMatterForTests", - Subscription: subId, + Subscription: subID, MaxUndeliveredMessages: defaultMaxUndeliveredMessages, } @@ -145,18 +146,18 @@ func TestRunInvalidMessages(t *testing.T) { // Make sure we acknowledged message so we don't receive it again. testTracker.WaitForAck(1) - assert.Equal(t, acc.NFields(), 0) + require.Equal(t, acc.NFields(), 0) } func TestRunOverlongMessages(t *testing.T) { - subId := "sub-message-too-long" + subID := "sub-message-too-long" acc := &testutil.Accumulator{} testParser, _ := parsers.NewInfluxParser() sub := &stubSub{ - id: subId, + id: subID, messages: make(chan *testMsg, 100), } sub.receiver = testMessagesReceive(sub) @@ -166,7 +167,7 @@ func TestRunOverlongMessages(t *testing.T) { parser: testParser, stubSub: func() subscription { return sub }, Project: "projectIDontMatterForTests", - Subscription: subId, + Subscription: subID, MaxUndeliveredMessages: defaultMaxUndeliveredMessages, // Add MaxMessageLen Param MaxMessageLen: 1, @@ -192,29 +193,29 @@ func TestRunOverlongMessages(t *testing.T) { // Make sure we acknowledged message so we don't receive it again. testTracker.WaitForAck(1) - assert.Equal(t, acc.NFields(), 0) + require.Equal(t, acc.NFields(), 0) } func TestRunErrorInSubscriber(t *testing.T) { - subId := "sub-unexpected-error" + subID := "sub-unexpected-error" acc := &testutil.Accumulator{} testParser, _ := parsers.NewInfluxParser() sub := &stubSub{ - id: subId, + id: subID, messages: make(chan *testMsg, 100), } fakeErrStr := "a fake error" - sub.receiver = testMessagesError(sub, errors.New("a fake error")) + sub.receiver = testMessagesError(errors.New("a fake error")) ps := &PubSub{ Log: testutil.Logger{}, parser: testParser, stubSub: func() subscription { return sub }, Project: "projectIDontMatterForTests", - Subscription: subId, + Subscription: subID, MaxUndeliveredMessages: defaultMaxUndeliveredMessages, RetryReceiveDelaySeconds: 1, } @@ -228,12 +229,12 @@ func TestRunErrorInSubscriber(t *testing.T) { t.Fatal("expected plugin subscription to be non-nil") } acc.WaitError(1) - assert.Regexp(t, fakeErrStr, acc.Errors[0]) + require.Regexp(t, fakeErrStr, acc.Errors[0]) } func validateTestInfluxMetric(t *testing.T, m *testutil.Metric) { - assert.Equal(t, "cpu_load_short", m.Measurement) - assert.Equal(t, "server01", m.Tags["host"]) - assert.Equal(t, 23422.0, m.Fields["value"]) - assert.Equal(t, int64(1422568543702900257), m.Time.UnixNano()) + require.Equal(t, "cpu_load_short", m.Measurement) + require.Equal(t, "server01", m.Tags["host"]) + require.Equal(t, 23422.0, m.Fields["value"]) + require.Equal(t, int64(1422568543702900257), m.Time.UnixNano()) } diff --git a/plugins/inputs/cloud_pubsub/sample.conf b/plugins/inputs/cloud_pubsub/sample.conf new file mode 100644 index 0000000000000..81a8290917b01 --- /dev/null +++ b/plugins/inputs/cloud_pubsub/sample.conf @@ -0,0 +1,74 @@ +# Read metrics from Google PubSub +[[inputs.cloud_pubsub]] + ## Required. Name of Google Cloud Platform (GCP) Project that owns + ## the given PubSub subscription. + project = "my-project" + + ## Required. Name of PubSub subscription to ingest metrics from. + subscription = "my-subscription" + + ## Required. Data format to consume. + ## Each data format has its own unique set of configuration options. + ## Read more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + + ## Optional. Filepath for GCP credentials JSON file to authorize calls to + ## PubSub APIs. If not set explicitly, Telegraf will attempt to use + ## Application Default Credentials, which is preferred. + # credentials_file = "path/to/my/creds.json" + + ## Optional. Number of seconds to wait before attempting to restart the + ## PubSub subscription receiver after an unexpected error. + ## If the streaming pull for a PubSub Subscription fails (receiver), + ## the agent attempts to restart receiving messages after this many seconds. + # retry_delay_seconds = 5 + + ## Optional. Maximum byte length of a message to consume. + ## Larger messages are dropped with an error. If less than 0 or unspecified, + ## treated as no limit. + # max_message_len = 1000000 + + ## Optional. Maximum messages to read from PubSub that have not been written + ## to an output. Defaults to %d. + ## For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message contains 10 metrics and the output + ## metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## The following are optional Subscription ReceiveSettings in PubSub. + ## Read more about these values: + ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings + + ## Optional. Maximum number of seconds for which a PubSub subscription + ## should auto-extend the PubSub ACK deadline for each message. If less than + ## 0, auto-extension is disabled. + # max_extension = 0 + + ## Optional. Maximum number of unprocessed messages in PubSub + ## (unacknowledged but not yet expired in PubSub). + ## A value of 0 is treated as the default PubSub value. + ## Negative values will be treated as unlimited. + # max_outstanding_messages = 0 + + ## Optional. Maximum size in bytes of unprocessed messages in PubSub + ## (unacknowledged but not yet expired in PubSub). + ## A value of 0 is treated as the default PubSub value. + ## Negative values will be treated as unlimited. + # max_outstanding_bytes = 0 + + ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn + ## to pull messages from PubSub concurrently. This limit applies to each + ## subscription separately and is treated as the PubSub default if less than + ## 1. Note this setting does not limit the number of messages that can be + ## processed concurrently (use "max_outstanding_messages" instead). + # max_receiver_go_routines = 0 + + ## Optional. If true, Telegraf will attempt to base64 decode the + ## PubSub message data before parsing. Many GCP services that + ## output JSON to Google PubSub base64-encode the JSON payload. + # base64_data = false diff --git a/plugins/inputs/cloud_pubsub/subscription_stub.go b/plugins/inputs/cloud_pubsub/subscription_stub.go index e061728caf7fe..1e5bd009bc138 100644 --- a/plugins/inputs/cloud_pubsub/subscription_stub.go +++ b/plugins/inputs/cloud_pubsub/subscription_stub.go @@ -22,7 +22,7 @@ func (s *stubSub) Receive(ctx context.Context, f func(context.Context, message)) type receiveFunc func(ctx context.Context, f func(context.Context, message)) error -func testMessagesError(s *stubSub, expectedErr error) receiveFunc { +func testMessagesError(expectedErr error) receiveFunc { return func(ctx context.Context, f func(context.Context, message)) error { return expectedErr } diff --git a/plugins/inputs/cloud_pubsub_push/README.md b/plugins/inputs/cloud_pubsub_push/README.md index 3173b43361fb6..5c04e6b0f6f3f 100644 --- a/plugins/inputs/cloud_pubsub_push/README.md +++ b/plugins/inputs/cloud_pubsub_push/README.md @@ -1,20 +1,22 @@ # Google Cloud PubSub Push Input Plugin -The Google Cloud PubSub Push listener is a service input plugin that listens for messages sent via an HTTP POST from [Google Cloud PubSub][pubsub]. -The plugin expects messages in Google's Pub/Sub JSON Format ONLY. -The intent of the plugin is to allow Telegraf to serve as an endpoint of the Google Pub/Sub 'Push' service. -Google's PubSub service will **only** send over HTTPS/TLS so this plugin must be behind a valid proxy or must be configured to use TLS. +The Google Cloud PubSub Push listener is a service input plugin that listens for +messages sent via an HTTP POST from [Google Cloud PubSub][pubsub]. The plugin +expects messages in Google's Pub/Sub JSON Format ONLY. The intent of the plugin +is to allow Telegraf to serve as an endpoint of the Google Pub/Sub 'Push' +service. Google's PubSub service will **only** send over HTTPS/TLS so this +plugin must be behind a valid proxy or must be configured to use TLS. Enable TLS by specifying the file names of a service TLS certificate and key. -Enable mutually authenticated TLS and authorize client connections by signing certificate authority by including a list of allowed CA certificate file names in `tls_allowed_cacerts`. +Enable mutually authenticated TLS and authorize client connections by signing +certificate authority by including a list of allowed CA certificate file names +in `tls_allowed_cacerts`. +## Configuration -### Configuration: - -This is a sample configuration for the plugin. - -```toml +```toml @sample.conf +# Google Cloud Pub/Sub Push HTTP listener [[inputs.cloud_pubsub_push]] ## Address and port to host HTTP listener on service_address = ":8080" diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push.go b/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go similarity index 63% rename from plugins/inputs/cloud_pubsub_push/pubsub_push.go rename to plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go index b320daedbacc1..f81adb508d1b2 100644 --- a/plugins/inputs/cloud_pubsub_push/pubsub_push.go +++ b/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push.go @@ -1,23 +1,28 @@ +//go:generate ../../../tools/readme_config_includer/generator package cloud_pubsub_push import ( "context" "crypto/subtle" + _ "embed" "encoding/base64" "encoding/json" - "io/ioutil" - "net" + "io" "net/http" "sync" "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // defaultMaxBodySize is the default maximum request body size, in bytes. // if the request body is over this size, we will return an HTTP 413 error. // 500 MB @@ -28,9 +33,9 @@ type PubSubPush struct { ServiceAddress string Token string Path string - ReadTimeout internal.Duration - WriteTimeout internal.Duration - MaxBodySize internal.Size + ReadTimeout config.Duration + WriteTimeout config.Duration + MaxBodySize config.Size AddMeta bool Log telegraf.Logger @@ -39,13 +44,12 @@ type PubSubPush struct { tlsint.ServerConfig parsers.Parser - listener net.Listener - server *http.Server - acc telegraf.TrackingAccumulator - ctx context.Context - cancel context.CancelFunc - wg *sync.WaitGroup - mu *sync.Mutex + server *http.Server + acc telegraf.TrackingAccumulator + ctx context.Context + cancel context.CancelFunc + wg *sync.WaitGroup + mu *sync.Mutex undelivered map[telegraf.TrackingID]chan bool sem chan struct{} @@ -63,64 +67,10 @@ type Payload struct { Subscription string `json:"subscription"` } -const sampleConfig = ` - ## Address and port to host HTTP listener on - service_address = ":8080" - - ## Application secret to verify messages originate from Cloud Pub/Sub - # token = "" - - ## Path to listen to. - # path = "/" - - ## Maximum duration before timing out read of the request - # read_timeout = "10s" - ## Maximum duration before timing out write of the response. This should be set to a value - ## large enough that you can send at least 'metric_batch_size' number of messages within the - ## duration. - # write_timeout = "10s" - - ## Maximum allowed http request body size in bytes. - ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) - # max_body_size = "500MB" - - ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag. - # add_meta = false - - ## Optional. Maximum messages to read from PubSub that have not been written - ## to an output. Defaults to 1000. - ## For best throughput set based on the number of metrics within - ## each message and the size of the output's metric_batch_size. - ## - ## For example, if each message contains 10 metrics and the output - ## metric_batch_size is 1000, setting this to 100 will ensure that a - ## full batch is collected and the write is triggered immediately without - ## waiting until the next flush_interval. - # max_undelivered_messages = 1000 - - ## Set one or more allowed client CA certificate file names to - ## enable mutually authenticated TLS connections - # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - - ## Add service certificate and key - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" -` - -func (p *PubSubPush) SampleConfig() string { +func (*PubSubPush) SampleConfig() string { return sampleConfig } -func (p *PubSubPush) Description() string { - return "Google Cloud Pub/Sub Push HTTP listener" -} - func (p *PubSubPush) Gather(_ telegraf.Accumulator) error { return nil } @@ -131,15 +81,15 @@ func (p *PubSubPush) SetParser(parser parsers.Parser) { // Start starts the http listener service. func (p *PubSubPush) Start(acc telegraf.Accumulator) error { - if p.MaxBodySize.Size == 0 { - p.MaxBodySize.Size = defaultMaxBodySize + if p.MaxBodySize == 0 { + p.MaxBodySize = config.Size(defaultMaxBodySize) } - if p.ReadTimeout.Duration < time.Second { - p.ReadTimeout.Duration = time.Second * 10 + if p.ReadTimeout < config.Duration(time.Second) { + p.ReadTimeout = config.Duration(time.Second * 10) } - if p.WriteTimeout.Duration < time.Second { - p.WriteTimeout.Duration = time.Second * 10 + if p.WriteTimeout < config.Duration(time.Second) { + p.WriteTimeout = config.Duration(time.Second * 10) } tlsConf, err := p.ServerConfig.TLSConfig() @@ -149,8 +99,8 @@ func (p *PubSubPush) Start(acc telegraf.Accumulator) error { p.server = &http.Server{ Addr: p.ServiceAddress, - Handler: http.TimeoutHandler(p, p.WriteTimeout.Duration, "timed out processing metric"), - ReadTimeout: p.ReadTimeout.Duration, + Handler: http.TimeoutHandler(p, time.Duration(p.WriteTimeout), "timed out processing metric"), + ReadTimeout: time.Duration(p.ReadTimeout), TLSConfig: tlsConf, } @@ -171,9 +121,13 @@ func (p *PubSubPush) Start(acc telegraf.Accumulator) error { go func() { defer p.wg.Done() if tlsConf != nil { - p.server.ListenAndServeTLS("", "") + if err := p.server.ListenAndServeTLS("", ""); err != nil && err != http.ErrServerClosed { + p.Log.Errorf("listening and serving TLS failed: %v", err) + } } else { - p.server.ListenAndServe() + if err := p.server.ListenAndServe(); err != nil { + p.Log.Errorf("listening and serving TLS failed: %v", err) + } } }() @@ -183,6 +137,7 @@ func (p *PubSubPush) Start(acc telegraf.Accumulator) error { // Stop cleans up all resources func (p *PubSubPush) Stop() { p.cancel() + //nolint:errcheck,revive // we cannot do anything if the shutdown fails p.server.Shutdown(p.ctx) p.wg.Wait() } @@ -208,7 +163,7 @@ func (p *PubSubPush) serveWrite(res http.ResponseWriter, req *http.Request) { } // Check that the content length is not too large for us to handle. - if req.ContentLength > p.MaxBodySize.Size { + if req.ContentLength > int64(p.MaxBodySize) { res.WriteHeader(http.StatusRequestEntityTooLarge) return } @@ -218,8 +173,8 @@ func (p *PubSubPush) serveWrite(res http.ResponseWriter, req *http.Request) { return } - body := http.MaxBytesReader(res, req.Body, p.MaxBodySize.Size) - bytes, err := ioutil.ReadAll(body) + body := http.MaxBytesReader(res, req.Body, int64(p.MaxBodySize)) + bytes, err := io.ReadAll(body) if err != nil { res.WriteHeader(http.StatusRequestEntityTooLarge) return diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go b/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push_test.go similarity index 92% rename from plugins/inputs/cloud_pubsub_push/pubsub_push_test.go rename to plugins/inputs/cloud_pubsub_push/cloud_pubsub_push_test.go index ae7601b20cccc..3c922f2d526a3 100644 --- a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go +++ b/plugins/inputs/cloud_pubsub_push/cloud_pubsub_push_test.go @@ -15,7 +15,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/agent" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" @@ -119,15 +119,13 @@ func TestServeHTTP(t *testing.T) { rr := httptest.NewRecorder() pubPush := &PubSubPush{ - Log: testutil.Logger{}, - Path: "/", - MaxBodySize: internal.Size{ - Size: test.maxsize, - }, + Log: testutil.Logger{}, + Path: "/", + MaxBodySize: config.Size(test.maxsize), sem: make(chan struct{}, 1), undelivered: make(map[telegraf.TrackingID]chan bool), mu: &sync.Mutex{}, - WriteTimeout: internal.Duration{Duration: time.Second * 1}, + WriteTimeout: config.Duration(time.Millisecond * 10), } pubPush.ctx, pubPush.cancel = context.WithCancel(context.Background()) @@ -144,7 +142,7 @@ func TestServeHTTP(t *testing.T) { pubPush.SetParser(p) dst := make(chan telegraf.Metric, 1) - ro := models.NewRunningOutput("test", &testOutput{failWrite: test.fail}, &models.OutputConfig{}, 1, 1) + ro := models.NewRunningOutput(&testOutput{failWrite: test.fail}, &models.OutputConfig{}, 1, 1) pubPush.acc = agent.NewAccumulator(&testMetricMaker{}, dst).WithTracking(1) wg.Add(1) @@ -154,15 +152,16 @@ func TestServeHTTP(t *testing.T) { }() wg.Add(1) - go func(status int, d chan telegraf.Metric) { + go func(d chan telegraf.Metric) { defer wg.Done() for m := range d { ro.AddMetric(m) + //nolint:errcheck,revive // test will fail anyway if the write fails ro.Write() } - }(test.status, dst) + }(dst) - ctx, cancel := context.WithTimeout(req.Context(), pubPush.WriteTimeout.Duration) + ctx, cancel := context.WithTimeout(req.Context(), time.Duration(pubPush.WriteTimeout)) req = req.WithContext(ctx) pubPush.ServeHTTP(rr, req) @@ -218,7 +217,7 @@ func (*testOutput) SampleConfig() string { return "" } -func (t *testOutput) Write(metrics []telegraf.Metric) error { +func (t *testOutput) Write(_ []telegraf.Metric) error { if t.failWrite { return fmt.Errorf("failed write") } diff --git a/plugins/inputs/cloud_pubsub_push/sample.conf b/plugins/inputs/cloud_pubsub_push/sample.conf new file mode 100644 index 0000000000000..c058fd96c9e93 --- /dev/null +++ b/plugins/inputs/cloud_pubsub_push/sample.conf @@ -0,0 +1,49 @@ +# Google Cloud Pub/Sub Push HTTP listener +[[inputs.cloud_pubsub_push]] + ## Address and port to host HTTP listener on + service_address = ":8080" + + ## Application secret to verify messages originate from Cloud Pub/Sub + # token = "" + + ## Path to listen to. + # path = "/" + + ## Maximum duration before timing out read of the request + # read_timeout = "10s" + ## Maximum duration before timing out write of the response. This should be set to a value + ## large enough that you can send at least 'metric_batch_size' number of messages within the + ## duration. + # write_timeout = "10s" + + ## Maximum allowed http request body size in bytes. + ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) + # max_body_size = "500MB" + + ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag. + # add_meta = false + + ## Optional. Maximum messages to read from PubSub that have not been written + ## to an output. Defaults to 1000. + ## For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message contains 10 metrics and the output + ## metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Add service certificate and key + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" diff --git a/plugins/inputs/cloudwatch/README.md b/plugins/inputs/cloudwatch/README.md index bc7b9b50c5d80..05220925068f3 100644 --- a/plugins/inputs/cloudwatch/README.md +++ b/plugins/inputs/cloudwatch/README.md @@ -2,10 +2,11 @@ This plugin will pull Metric Statistics from Amazon CloudWatch. -### Amazon Authentication +## Amazon Authentication This plugin uses a credential chain for Authentication with the CloudWatch API endpoint. In the following order the plugin will attempt to authenticate. + 1. Assumed credentials via STS if `role_arn` attribute is specified (source credentials are evaluated from subsequent rules) 2. Explicit credentials from `access_key`, `secret_key`, and `token` attributes 3. Shared profile from `profile` attribute @@ -13,25 +14,29 @@ API endpoint. In the following order the plugin will attempt to authenticate. 5. [Shared Credentials](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#shared-credentials-file) 6. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) -### Configuration: +## Configuration -```toml +```toml @sample.conf +# Pull Metric Statistics from Amazon CloudWatch [[inputs.cloudwatch]] ## Amazon Region region = "us-east-1" ## Amazon Credentials ## Credentials are loaded in the following order - ## 1) Assumed credentials via STS if role_arn is specified - ## 2) explicit credentials from 'access_key' and 'secret_key' - ## 3) shared profile from 'profile' - ## 4) environment variables - ## 5) shared credentials file - ## 6) EC2 Instance Profile + ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified + ## 2) Assumed credentials via STS if role_arn is specified + ## 3) explicit credentials from 'access_key' and 'secret_key' + ## 4) shared profile from 'profile' + ## 5) environment variables + ## 6) shared credentials file + ## 7) EC2 Instance Profile # access_key = "" # secret_key = "" # token = "" # role_arn = "" + # web_identity_token_file = "" + # role_session_name = "" # profile = "" # shared_credential_file = "" @@ -41,6 +46,10 @@ API endpoint. In the following order the plugin will attempt to authenticate. ## ex: endpoint_url = "http://localhost:8000" # endpoint_url = "" + ## Set http_proxy + # use_system_proxy = false + # http_proxy_url = "http://localhost:8888" + # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all # metrics are made available to the 1 minute period. Some are collected at # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. @@ -68,8 +77,10 @@ API endpoint. In the following order the plugin will attempt to authenticate. ## Configure the TTL for the internal cache of metrics. # cache_ttl = "1h" - ## Metric Statistic Namespace (required) - namespace = "AWS/ELB" + ## Metric Statistic Namespaces (required) + namespaces = ["AWS/ELB"] + # A single metric statistic namespace that will be appended to namespaces on startup + # namespace = "AWS/ELB" ## Maximum requests per second. Note that the global default AWS rate limit is ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a @@ -98,13 +109,16 @@ API endpoint. In the following order the plugin will attempt to authenticate. # # ## Dimension filters for Metric. All dimensions defined for the metric names # ## must be specified in order to retrieve the metric statistics. + # ## 'value' has wildcard / 'glob' matching support such as 'p-*'. # [[inputs.cloudwatch.metrics.dimensions]] # name = "LoadBalancerName" # value = "p-example" ``` -#### Requirements and Terminology -Plugin Configuration utilizes [CloudWatch concepts](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html) and access pattern to allow monitoring of any CloudWatch Metric. +## Requirements and Terminology + +Plugin Configuration utilizes [CloudWatch concepts][1] and access pattern to +allow monitoring of any CloudWatch Metric. - `region` must be a valid AWS [Region](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#CloudWatchRegions) value - `period` must be a valid CloudWatch [Period](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#CloudWatchPeriods) value @@ -112,12 +126,14 @@ Plugin Configuration utilizes [CloudWatch concepts](http://docs.aws.amazon.com/A - `names` must be valid CloudWatch [Metric](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Metric) names - `dimensions` must be valid CloudWatch [Dimension](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html#Dimension) name/value pairs -Omitting or specifying a value of `'*'` for a dimension value configures all available metrics that contain a dimension with the specified name -to be retrieved. If specifying >1 dimension, then the metric must contain *all* the configured dimensions where the the value of the -wildcard dimension is ignored. +Omitting or specifying a value of `'*'` for a dimension value configures all +available metrics that contain a dimension with the specified name to be +retrieved. If specifying >1 dimension, then the metric must contain *all* the +configured dimensions where the the value of the wildcard dimension is ignored. Example: -``` + +```toml [[inputs.cloudwatch]] period = "1m" interval = "5m" @@ -136,29 +152,38 @@ Example: ``` If the following ELBs are available: + - name: `p-example`, availabilityZone: `us-east-1a` - name: `p-example`, availabilityZone: `us-east-1b` - name: `q-example`, availabilityZone: `us-east-1a` - name: `q-example`, availabilityZone: `us-east-1b` - Then 2 metrics will be output: + - name: `p-example`, availabilityZone: `us-east-1a` - name: `p-example`, availabilityZone: `us-east-1b` -If the `AvailabilityZone` wildcard dimension was omitted, then a single metric (name: `p-example`) -would be exported containing the aggregate values of the ELB across availability zones. +If the `AvailabilityZone` wildcard dimension was omitted, then a single metric +(name: `p-example`) would be exported containing the aggregate values of the ELB +across availability zones. + +To maximize efficiency and savings, consider making fewer requests by increasing +`interval` but keeping `period` at the duration you would like metrics to be +reported. The above example will request metrics from Cloudwatch every 5 minutes +but will output five metrics timestamped one minute apart. -To maximize efficiency and savings, consider making fewer requests by increasing `interval` but keeping `period` at the duration you would like metrics to be reported. The above example will request metrics from Cloudwatch every 5 minutes but will output five metrics timestamped one minute apart. +[1]: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_concepts.html + +## Restrictions and Limitations -#### Restrictions and Limitations - CloudWatch metrics are not available instantly via the CloudWatch API. You should adjust your collection `delay` to account for this lag in metrics availability based on your [monitoring subscription level](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html) - CloudWatch API usage incurs cost - see [GetMetricData Pricing](https://aws.amazon.com/cloudwatch/pricing/) -### Measurements & Fields: +## Metrics -Each CloudWatch Namespace monitored records a measurement with fields for each available Metric Statistic. -Namespace and Metrics are represented in [snake case](https://en.wikipedia.org/wiki/Snake_case) +Each CloudWatch Namespace monitored records a measurement with fields for each +available Metric Statistic. Namespace and Metrics are represented in [snake +case](https://en.wikipedia.org/wiki/Snake_case) - cloudwatch_{namespace} - {metric}_sum (metric Sum value) @@ -167,26 +192,29 @@ Namespace and Metrics are represented in [snake case](https://en.wikipedia.org/w - {metric}_maximum (metric Maximum value) - {metric}_sample_count (metric SampleCount value) +### Tags -### Tags: -Each measurement is tagged with the following identifiers to uniquely identify the associated metric -Tag Dimension names are represented in [snake case](https://en.wikipedia.org/wiki/Snake_case) +Each measurement is tagged with the following identifiers to uniquely identify +the associated metric Tag Dimension names are represented in [snake +case](https://en.wikipedia.org/wiki/Snake_case) - All measurements have the following tags: - region (CloudWatch Region) - {dimension-name} (Cloudwatch Dimension value - one for each metric dimension) -### Troubleshooting: +## Troubleshooting You can use the aws cli to get a list of available metrics and dimensions: -``` + +```shell aws cloudwatch list-metrics --namespace AWS/EC2 --region us-east-1 aws cloudwatch list-metrics --namespace AWS/EC2 --region us-east-1 --metric-name CPUCreditBalance ``` If the expected metrics are not returned, you can try getting them manually for a short period of time: -``` + +```shell aws cloudwatch get-metric-data \ --start-time 2018-07-01T00:00:00Z \ --end-time 2018-07-01T00:15:00Z \ @@ -212,9 +240,9 @@ aws cloudwatch get-metric-data \ ]' ``` -### Example Output: +## Example Output -``` +```shell $ ./telegraf --config telegraf.conf --input-filter cloudwatch --test > cloudwatch_aws_elb,load_balancer_name=p-example,region=us-east-1 latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875,latency_sample_count=4029,latency_sum=19.382705211639404 1459542420000000000 ``` diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index d1f5661a03eba..7f9c646995749 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -1,6 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package cloudwatch import ( + "context" + _ "embed" "fmt" "net" "net/http" @@ -9,35 +12,45 @@ import ( "sync" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/aws/aws-sdk-go-v2/aws" + cwClient "github.com/aws/aws-sdk-go-v2/service/cloudwatch" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" internalaws "github.com/influxdata/telegraf/config/aws" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/limiter" - "github.com/influxdata/telegraf/metric" + internalMetric "github.com/influxdata/telegraf/metric" + internalProxy "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +const ( + StatisticAverage = "Average" + StatisticMaximum = "Maximum" + StatisticMinimum = "Minimum" + StatisticSum = "Sum" + StatisticSampleCount = "SampleCount" +) + // CloudWatch contains the configuration and cache for the cloudwatch plugin. type CloudWatch struct { - Region string `toml:"region"` - AccessKey string `toml:"access_key"` - SecretKey string `toml:"secret_key"` - RoleARN string `toml:"role_arn"` - Profile string `toml:"profile"` - CredentialPath string `toml:"shared_credential_file"` - Token string `toml:"token"` - EndpointURL string `toml:"endpoint_url"` StatisticExclude []string `toml:"statistic_exclude"` StatisticInclude []string `toml:"statistic_include"` Timeout config.Duration `toml:"timeout"` + internalProxy.HTTPProxy + Period config.Duration `toml:"period"` Delay config.Duration `toml:"delay"` Namespace string `toml:"namespace"` + Namespaces []string `toml:"namespaces"` Metrics []*Metric `toml:"metrics"` CacheTTL config.Duration `toml:"cache_ttl"` RateLimit int `toml:"ratelimit"` @@ -51,6 +64,8 @@ type CloudWatch struct { queryDimensions map[string]*map[string]string windowStart time.Time windowEnd time.Time + + internalaws.CredentialConfig } // Metric defines a simplified Cloudwatch metric. @@ -63,8 +78,9 @@ type Metric struct { // Dimension defines a simplified Cloudwatch dimension (provides metric filtering). type Dimension struct { - Name string `toml:"name"` - Value string `toml:"value"` + Name string `toml:"name"` + Value string `toml:"value"` + valueMatcher filter.Filter } // metricCache caches metrics, their filters, and generated queries. @@ -72,138 +88,49 @@ type metricCache struct { ttl time.Duration built time.Time metrics []filteredMetric - queries []*cloudwatch.MetricDataQuery + queries map[string][]types.MetricDataQuery } type cloudwatchClient interface { - ListMetrics(*cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) - GetMetricData(*cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) -} - -// SampleConfig returns the default configuration of the Cloudwatch input plugin. -func (c *CloudWatch) SampleConfig() string { - return ` - ## Amazon Region - region = "us-east-1" - - ## Amazon Credentials - ## Credentials are loaded in the following order - ## 1) Assumed credentials via STS if role_arn is specified - ## 2) explicit credentials from 'access_key' and 'secret_key' - ## 3) shared profile from 'profile' - ## 4) environment variables - ## 5) shared credentials file - ## 6) EC2 Instance Profile - # access_key = "" - # secret_key = "" - # token = "" - # role_arn = "" - # profile = "" - # shared_credential_file = "" - - ## Endpoint to make request against, the correct endpoint is automatically - ## determined and this option should only be set if you wish to override the - ## default. - ## ex: endpoint_url = "http://localhost:8000" - # endpoint_url = "" - - # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all - # metrics are made available to the 1 minute period. Some are collected at - # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. - # Note that if a period is configured that is smaller than the minimum for a - # particular metric, that metric will not be returned by the Cloudwatch API - # and will not be collected by Telegraf. - # - ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) - period = "5m" - - ## Collection Delay (required - must account for metrics availability via CloudWatch API) - delay = "5m" - - ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid - ## gaps or overlap in pulled data - interval = "5m" - - ## Recommended if "delay" and "period" are both within 3 hours of request time. Invalid values will be ignored. - ## Recently Active feature will only poll for CloudWatch ListMetrics values that occurred within the last 3 Hours. - ## If enabled, it will reduce total API usage of the CloudWatch ListMetrics API and require less memory to retain. - ## Do not enable if "period" or "delay" is longer than 3 hours, as it will not return data more than 3 hours old. - ## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html - #recently_active = "PT3H" - - ## Configure the TTL for the internal cache of metrics. - # cache_ttl = "1h" - - ## Metric Statistic Namespace (required) - namespace = "AWS/ELB" - - ## Maximum requests per second. Note that the global default AWS rate limit is - ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a - ## maximum of 50. - ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html - # ratelimit = 25 - - ## Timeout for http requests made by the cloudwatch client. - # timeout = "5s" - - ## Namespace-wide statistic filters. These allow fewer queries to be made to - ## cloudwatch. - # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] - # statistic_exclude = [] - - ## Metrics to Pull - ## Defaults to all Metrics in Namespace if nothing is provided - ## Refreshes Namespace available metrics every 1h - #[[inputs.cloudwatch.metrics]] - # names = ["Latency", "RequestCount"] - # - # ## Statistic filters for Metric. These allow for retrieving specific - # ## statistics for an individual metric. - # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] - # # statistic_exclude = [] - # - # ## Dimension filters for Metric. All dimensions defined for the metric names - # ## must be specified in order to retrieve the metric statistics. - # [[inputs.cloudwatch.metrics.dimensions]] - # name = "LoadBalancerName" - # value = "p-example" -` + ListMetrics(context.Context, *cwClient.ListMetricsInput, ...func(*cwClient.Options)) (*cwClient.ListMetricsOutput, error) + GetMetricData(context.Context, *cwClient.GetMetricDataInput, ...func(*cwClient.Options)) (*cwClient.GetMetricDataOutput, error) } -// Description returns a one-sentence description on the Cloudwatch input plugin. -func (c *CloudWatch) Description() string { - return "Pull Metric Statistics from Amazon CloudWatch" +func (*CloudWatch) SampleConfig() string { + return sampleConfig } -// Gather takes in an accumulator and adds the metrics that the Input -// gathers. This is called every "interval". -func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { - if c.statFilter == nil { - var err error - // Set config level filter (won't change throughout life of plugin). - c.statFilter, err = filter.NewIncludeExcludeFilter(c.StatisticInclude, c.StatisticExclude) - if err != nil { - return err - } +func (c *CloudWatch) Init() error { + if len(c.Namespace) != 0 { + c.Namespaces = append(c.Namespaces, c.Namespace) } - if c.client == nil { - c.initializeCloudWatch() + err := c.initializeCloudWatch() + if err != nil { + return err } - filteredMetrics, err := getFilteredMetrics(c) + // Set config level filter (won't change throughout life of plugin). + c.statFilter, err = filter.NewIncludeExcludeFilter(c.StatisticInclude, c.StatisticExclude) if err != nil { return err } - c.updateWindow(time.Now()) + return nil +} - // Get all of the possible queries so we can send groups of 100. - queries, err := c.getDataQueries(filteredMetrics) +// Gather takes in an accumulator and adds the metrics that the Input +// gathers. This is called every "interval". +func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { + filteredMetrics, err := getFilteredMetrics(c) if err != nil { return err } + c.updateWindow(time.Now()) + + // Get all of the possible queries so we can send groups of 100. + queries := c.getDataQueries(filteredMetrics) if len(queries) == 0 { return nil } @@ -216,32 +143,34 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { wg := sync.WaitGroup{} rLock := sync.Mutex{} - results := []*cloudwatch.MetricDataResult{} + results := map[string][]types.MetricDataResult{} - // 500 is the maximum number of metric data queries a `GetMetricData` request can contain. - batchSize := 500 - var batches [][]*cloudwatch.MetricDataQuery + for namespace, namespacedQueries := range queries { + // 500 is the maximum number of metric data queries a `GetMetricData` request can contain. + batchSize := 500 + var batches [][]types.MetricDataQuery - for batchSize < len(queries) { - queries, batches = queries[batchSize:], append(batches, queries[0:batchSize:batchSize]) - } - batches = append(batches, queries) - - for i := range batches { - wg.Add(1) - <-lmtr.C - go func(inm []*cloudwatch.MetricDataQuery) { - defer wg.Done() - result, err := c.gatherMetrics(c.getDataInputs(inm)) - if err != nil { - acc.AddError(err) - return - } + for batchSize < len(namespacedQueries) { + namespacedQueries, batches = namespacedQueries[batchSize:], append(batches, namespacedQueries[0:batchSize:batchSize]) + } + batches = append(batches, namespacedQueries) + + for i := range batches { + wg.Add(1) + <-lmtr.C + go func(n string, inm []types.MetricDataQuery) { + defer wg.Done() + result, err := c.gatherMetrics(c.getDataInputs(inm)) + if err != nil { + acc.AddError(err) + return + } - rLock.Lock() - results = append(results, result...) - rLock.Unlock() - }(batches[i]) + rLock.Lock() + results[n] = append(results[n], result...) + rLock.Unlock() + }(namespace, batches[i]) + } } wg.Wait() @@ -249,24 +178,24 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { return c.aggregateMetrics(acc, results) } -func (c *CloudWatch) initializeCloudWatch() { - credentialConfig := &internalaws.CredentialConfig{ - Region: c.Region, - AccessKey: c.AccessKey, - SecretKey: c.SecretKey, - RoleARN: c.RoleARN, - Profile: c.Profile, - Filename: c.CredentialPath, - Token: c.Token, - EndpointURL: c.EndpointURL, +func (c *CloudWatch) initializeCloudWatch() error { + proxy, err := c.HTTPProxy.Proxy() + if err != nil { + return err } - configProvider := credentialConfig.Credentials() - cfg := &aws.Config{ - HTTPClient: &http.Client{ + cfg, err := c.CredentialConfig.Credentials() + if err != nil { + return err + } + c.client = cwClient.NewFromConfig(cfg, func(options *cwClient.Options) { + // Disable logging + options.ClientLogMode = 0 + + options.HTTPClient = &http.Client{ // use values from DefaultTransport Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, + Proxy: proxy, DialContext: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, @@ -278,15 +207,26 @@ func (c *CloudWatch) initializeCloudWatch() { ExpectContinueTimeout: 1 * time.Second, }, Timeout: time.Duration(c.Timeout), - }, + } + }) + + // Initialize regex matchers for each Dimension value. + for _, m := range c.Metrics { + for _, dimension := range m.Dimensions { + matcher, err := filter.NewIncludeExcludeFilter([]string{dimension.Value}, nil) + if err != nil { + return err + } + + dimension.valueMatcher = matcher + } } - loglevel := aws.LogOff - c.client = cloudwatch.New(configProvider, cfg.WithLogLevel(loglevel)) + return nil } type filteredMetric struct { - metrics []*cloudwatch.Metric + metrics []types.Metric statFilter filter.Filter } @@ -301,21 +241,23 @@ func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) { // check for provided metric filter if c.Metrics != nil { for _, m := range c.Metrics { - metrics := []*cloudwatch.Metric{} + metrics := []types.Metric{} if !hasWildcard(m.Dimensions) { - dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions)) + dimensions := make([]types.Dimension, len(m.Dimensions)) for k, d := range m.Dimensions { - dimensions[k] = &cloudwatch.Dimension{ + dimensions[k] = types.Dimension{ Name: aws.String(d.Name), Value: aws.String(d.Value), } } for _, name := range m.MetricNames { - metrics = append(metrics, &cloudwatch.Metric{ - Namespace: aws.String(c.Namespace), - MetricName: aws.String(name), - Dimensions: dimensions, - }) + for _, namespace := range c.Namespaces { + metrics = append(metrics, types.Metric{ + Namespace: aws.String(namespace), + MetricName: aws.String(name), + Dimensions: dimensions, + }) + } } } else { allMetrics, err := c.fetchNamespaceMetrics() @@ -325,11 +267,13 @@ func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) { for _, name := range m.MetricNames { for _, metric := range allMetrics { if isSelected(name, metric, m.Dimensions) { - metrics = append(metrics, &cloudwatch.Metric{ - Namespace: aws.String(c.Namespace), - MetricName: aws.String(name), - Dimensions: metric.Dimensions, - }) + for _, namespace := range c.Namespaces { + metrics = append(metrics, types.Metric{ + Namespace: aws.String(namespace), + MetricName: aws.String(name), + Dimensions: metric.Dimensions, + }) + } } } } @@ -357,10 +301,12 @@ func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) { return nil, err } - fMetrics = []filteredMetric{{ - metrics: metrics, - statFilter: c.statFilter, - }} + fMetrics = []filteredMetric{ + { + metrics: metrics, + statFilter: c.statFilter, + }, + } } c.metricCache = &metricCache{ @@ -373,40 +319,38 @@ func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) { } // fetchNamespaceMetrics retrieves available metrics for a given CloudWatch namespace. -func (c *CloudWatch) fetchNamespaceMetrics() ([]*cloudwatch.Metric, error) { - metrics := []*cloudwatch.Metric{} +func (c *CloudWatch) fetchNamespaceMetrics() ([]types.Metric, error) { + metrics := []types.Metric{} var token *string - var params *cloudwatch.ListMetricsInput - var recentlyActive *string = nil - - switch c.RecentlyActive { - case "PT3H": - recentlyActive = &c.RecentlyActive - default: - recentlyActive = nil + + params := &cwClient.ListMetricsInput{ + Dimensions: []types.DimensionFilter{}, + NextToken: token, + MetricName: nil, } - params = &cloudwatch.ListMetricsInput{ - Namespace: aws.String(c.Namespace), - Dimensions: []*cloudwatch.DimensionFilter{}, - NextToken: token, - MetricName: nil, - RecentlyActive: recentlyActive, + if c.RecentlyActive == "PT3H" { + params.RecentlyActive = types.RecentlyActivePt3h } - for { - resp, err := c.client.ListMetrics(params) - if err != nil { - return nil, err - } - metrics = append(metrics, resp.Metrics...) - if resp.NextToken == nil { - break - } + for _, namespace := range c.Namespaces { + params.Namespace = aws.String(namespace) + for { + resp, err := c.client.ListMetrics(context.Background(), params) + if err != nil { + c.Log.Errorf("failed to list metrics with namespace %s: %v", namespace, err) + // skip problem namespace on error and continue to next namespace + break + } - params.NextToken = resp.NextToken - } + metrics = append(metrics, resp.Metrics...) + if resp.NextToken == nil { + break + } + params.NextToken = resp.NextToken + } + } return metrics, nil } @@ -425,75 +369,75 @@ func (c *CloudWatch) updateWindow(relativeTo time.Time) { } // getDataQueries gets all of the possible queries so we can maximize the request payload. -func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) ([]*cloudwatch.MetricDataQuery, error) { +func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) map[string][]types.MetricDataQuery { if c.metricCache != nil && c.metricCache.queries != nil && c.metricCache.isValid() { - return c.metricCache.queries, nil + return c.metricCache.queries } c.queryDimensions = map[string]*map[string]string{} - dataQueries := []*cloudwatch.MetricDataQuery{} + dataQueries := map[string][]types.MetricDataQuery{} for i, filtered := range filteredMetrics { for j, metric := range filtered.metrics { id := strconv.Itoa(j) + "_" + strconv.Itoa(i) dimension := ctod(metric.Dimensions) if filtered.statFilter.Match("average") { c.queryDimensions["average_"+id] = dimension - dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], types.MetricDataQuery{ Id: aws.String("average_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_average")), - MetricStat: &cloudwatch.MetricStat{ - Metric: metric, - Period: aws.Int64(int64(time.Duration(c.Period).Seconds())), - Stat: aws.String(cloudwatch.StatisticAverage), + MetricStat: &types.MetricStat{ + Metric: &filtered.metrics[j], + Period: aws.Int32(int32(time.Duration(c.Period).Seconds())), + Stat: aws.String(StatisticAverage), }, }) } if filtered.statFilter.Match("maximum") { c.queryDimensions["maximum_"+id] = dimension - dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], types.MetricDataQuery{ Id: aws.String("maximum_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_maximum")), - MetricStat: &cloudwatch.MetricStat{ - Metric: metric, - Period: aws.Int64(int64(time.Duration(c.Period).Seconds())), - Stat: aws.String(cloudwatch.StatisticMaximum), + MetricStat: &types.MetricStat{ + Metric: &filtered.metrics[j], + Period: aws.Int32(int32(time.Duration(c.Period).Seconds())), + Stat: aws.String(StatisticMaximum), }, }) } if filtered.statFilter.Match("minimum") { c.queryDimensions["minimum_"+id] = dimension - dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], types.MetricDataQuery{ Id: aws.String("minimum_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_minimum")), - MetricStat: &cloudwatch.MetricStat{ - Metric: metric, - Period: aws.Int64(int64(time.Duration(c.Period).Seconds())), - Stat: aws.String(cloudwatch.StatisticMinimum), + MetricStat: &types.MetricStat{ + Metric: &filtered.metrics[j], + Period: aws.Int32(int32(time.Duration(c.Period).Seconds())), + Stat: aws.String(StatisticMinimum), }, }) } if filtered.statFilter.Match("sum") { c.queryDimensions["sum_"+id] = dimension - dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], types.MetricDataQuery{ Id: aws.String("sum_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_sum")), - MetricStat: &cloudwatch.MetricStat{ - Metric: metric, - Period: aws.Int64(int64(time.Duration(c.Period).Seconds())), - Stat: aws.String(cloudwatch.StatisticSum), + MetricStat: &types.MetricStat{ + Metric: &filtered.metrics[j], + Period: aws.Int32(int32(time.Duration(c.Period).Seconds())), + Stat: aws.String(StatisticSum), }, }) } if filtered.statFilter.Match("sample_count") { c.queryDimensions["sample_count_"+id] = dimension - dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + dataQueries[*metric.Namespace] = append(dataQueries[*metric.Namespace], types.MetricDataQuery{ Id: aws.String("sample_count_" + id), Label: aws.String(snakeCase(*metric.MetricName + "_sample_count")), - MetricStat: &cloudwatch.MetricStat{ - Metric: metric, - Period: aws.Int64(int64(time.Duration(c.Period).Seconds())), - Stat: aws.String(cloudwatch.StatisticSampleCount), + MetricStat: &types.MetricStat{ + Metric: &filtered.metrics[j], + Period: aws.Int32(int32(time.Duration(c.Period).Seconds())), + Stat: aws.String(StatisticSampleCount), }, }) } @@ -502,7 +446,7 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) ([]*cloudw if len(dataQueries) == 0 { c.Log.Debug("no metrics found to collect") - return nil, nil + return nil } if c.metricCache == nil { @@ -515,17 +459,17 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) ([]*cloudw c.metricCache.queries = dataQueries } - return dataQueries, nil + return dataQueries } // gatherMetrics gets metric data from Cloudwatch. func (c *CloudWatch) gatherMetrics( - params *cloudwatch.GetMetricDataInput, -) ([]*cloudwatch.MetricDataResult, error) { - results := []*cloudwatch.MetricDataResult{} + params *cwClient.GetMetricDataInput, +) ([]types.MetricDataResult, error) { + results := []types.MetricDataResult{} for { - resp, err := c.client.GetMetricData(params) + resp, err := c.client.GetMetricData(context.Background(), params) if err != nil { return nil, fmt.Errorf("failed to get metric data: %v", err) } @@ -542,23 +486,28 @@ func (c *CloudWatch) gatherMetrics( func (c *CloudWatch) aggregateMetrics( acc telegraf.Accumulator, - metricDataResults []*cloudwatch.MetricDataResult, + metricDataResults map[string][]types.MetricDataResult, ) error { var ( - grouper = metric.NewSeriesGrouper() - namespace = sanitizeMeasurement(c.Namespace) + grouper = internalMetric.NewSeriesGrouper() ) - for _, result := range metricDataResults { - tags := map[string]string{} + for namespace, results := range metricDataResults { + namespace = sanitizeMeasurement(namespace) - if dimensions, ok := c.queryDimensions[*result.Id]; ok { - tags = *dimensions - } - tags["region"] = c.Region + for _, result := range results { + tags := map[string]string{} + + if dimensions, ok := c.queryDimensions[*result.Id]; ok { + tags = *dimensions + } + tags["region"] = c.Region - for i := range result.Values { - grouper.Add(namespace, tags, *result.Timestamps[i], *result.Label, *result.Values[i]) + for i := range result.Values { + if err := grouper.Add(namespace, tags, result.Timestamps[i], *result.Label, result.Values[i]); err != nil { + acc.AddError(err) + } + } } } @@ -585,25 +534,20 @@ func New() *CloudWatch { } func sanitizeMeasurement(namespace string) string { - namespace = strings.Replace(namespace, "/", "_", -1) + namespace = strings.ReplaceAll(namespace, "/", "_") namespace = snakeCase(namespace) return "cloudwatch_" + namespace } func snakeCase(s string) string { s = internal.SnakeCase(s) - s = strings.Replace(s, " ", "_", -1) - s = strings.Replace(s, "__", "_", -1) + s = strings.ReplaceAll(s, " ", "_") + s = strings.ReplaceAll(s, "__", "_") return s } -type dimension struct { - name string - value string -} - // ctod converts cloudwatch dimensions to regular dimensions. -func ctod(cDimensions []*cloudwatch.Dimension) *map[string]string { +func ctod(cDimensions []types.Dimension) *map[string]string { dimensions := map[string]string{} for i := range cDimensions { dimensions[snakeCase(*cDimensions[i].Name)] = *cDimensions[i].Value @@ -611,8 +555,8 @@ func ctod(cDimensions []*cloudwatch.Dimension) *map[string]string { return &dimensions } -func (c *CloudWatch) getDataInputs(dataQueries []*cloudwatch.MetricDataQuery) *cloudwatch.GetMetricDataInput { - return &cloudwatch.GetMetricDataInput{ +func (c *CloudWatch) getDataInputs(dataQueries []types.MetricDataQuery) *cwClient.GetMetricDataInput { + return &cwClient.GetMetricDataInput{ StartTime: aws.Time(c.windowStart), EndTime: aws.Time(c.windowEnd), MetricDataQueries: dataQueries, @@ -626,14 +570,14 @@ func (f *metricCache) isValid() bool { func hasWildcard(dimensions []*Dimension) bool { for _, d := range dimensions { - if d.Value == "" || d.Value == "*" { + if d.Value == "" || strings.ContainsAny(d.Value, "*?[") { return true } } return false } -func isSelected(name string, metric *cloudwatch.Metric, dimensions []*Dimension) bool { +func isSelected(name string, metric types.Metric, dimensions []*Dimension) bool { if name != *metric.MetricName { return false } @@ -644,7 +588,7 @@ func isSelected(name string, metric *cloudwatch.Metric, dimensions []*Dimension) selected := false for _, d2 := range metric.Dimensions { if d.Name == *d2.Name { - if d.Value == "" || d.Value == "*" || d.Value == *d2.Value { + if d.Value == "" || d.valueMatcher.Match(*d2.Value) { selected = true } } diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go index 2983773ad1bb5..939f146de7e6b 100644 --- a/plugins/inputs/cloudwatch/cloudwatch_test.go +++ b/plugins/inputs/cloudwatch/cloudwatch_test.go @@ -1,28 +1,33 @@ package cloudwatch import ( + "context" + "net/http" + "net/url" "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatch" - "github.com/stretchr/testify/assert" + "github.com/aws/aws-sdk-go-v2/aws" + cwClient "github.com/aws/aws-sdk-go-v2/service/cloudwatch" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/config" + internalaws "github.com/influxdata/telegraf/config/aws" "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/plugins/common/proxy" "github.com/influxdata/telegraf/testutil" ) type mockGatherCloudWatchClient struct{} -func (m *mockGatherCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) { - return &cloudwatch.ListMetricsOutput{ - Metrics: []*cloudwatch.Metric{ +func (m *mockGatherCloudWatchClient) ListMetrics(_ context.Context, params *cwClient.ListMetricsInput, _ ...func(*cwClient.Options)) (*cwClient.ListMetricsOutput, error) { + return &cwClient.ListMetricsOutput{ + Metrics: []types.Metric{ { Namespace: params.Namespace, MetricName: aws.String("Latency"), - Dimensions: []*cloudwatch.Dimension{ + Dimensions: []types.Dimension{ { Name: aws.String("LoadBalancerName"), Value: aws.String("p-example"), @@ -33,78 +38,70 @@ func (m *mockGatherCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsI }, nil } -func (m *mockGatherCloudWatchClient) GetMetricData(params *cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) { - return &cloudwatch.GetMetricDataOutput{ - MetricDataResults: []*cloudwatch.MetricDataResult{ +func (m *mockGatherCloudWatchClient) GetMetricData(_ context.Context, params *cwClient.GetMetricDataInput, _ ...func(*cwClient.Options)) (*cwClient.GetMetricDataOutput, error) { + return &cwClient.GetMetricDataOutput{ + MetricDataResults: []types.MetricDataResult{ { Id: aws.String("minimum_0_0"), Label: aws.String("latency_minimum"), - StatusCode: aws.String("completed"), - Timestamps: []*time.Time{ - params.EndTime, - }, - Values: []*float64{ - aws.Float64(0.1), + StatusCode: types.StatusCodeComplete, + Timestamps: []time.Time{ + *params.EndTime, }, + Values: []float64{0.1}, }, { Id: aws.String("maximum_0_0"), Label: aws.String("latency_maximum"), - StatusCode: aws.String("completed"), - Timestamps: []*time.Time{ - params.EndTime, - }, - Values: []*float64{ - aws.Float64(0.3), + StatusCode: types.StatusCodeComplete, + Timestamps: []time.Time{ + *params.EndTime, }, + Values: []float64{0.3}, }, { Id: aws.String("average_0_0"), Label: aws.String("latency_average"), - StatusCode: aws.String("completed"), - Timestamps: []*time.Time{ - params.EndTime, - }, - Values: []*float64{ - aws.Float64(0.2), + StatusCode: types.StatusCodeComplete, + Timestamps: []time.Time{ + *params.EndTime, }, + Values: []float64{0.2}, }, { Id: aws.String("sum_0_0"), Label: aws.String("latency_sum"), - StatusCode: aws.String("completed"), - Timestamps: []*time.Time{ - params.EndTime, - }, - Values: []*float64{ - aws.Float64(123), + StatusCode: types.StatusCodeComplete, + Timestamps: []time.Time{ + *params.EndTime, }, + Values: []float64{123}, }, { Id: aws.String("sample_count_0_0"), Label: aws.String("latency_sample_count"), - StatusCode: aws.String("completed"), - Timestamps: []*time.Time{ - params.EndTime, - }, - Values: []*float64{ - aws.Float64(100), + StatusCode: types.StatusCodeComplete, + Timestamps: []time.Time{ + *params.EndTime, }, + Values: []float64{100}, }, }, }, nil } func TestSnakeCase(t *testing.T) { - assert.Equal(t, "cluster_name", snakeCase("Cluster Name")) - assert.Equal(t, "broker_id", snakeCase("Broker ID")) + require.Equal(t, "cluster_name", snakeCase("Cluster Name")) + require.Equal(t, "broker_id", snakeCase("Broker ID")) } func TestGather(t *testing.T) { duration, _ := time.ParseDuration("1m") internalDuration := config.Duration(duration) c := &CloudWatch{ - Region: "us-east-1", + CredentialConfig: internalaws.CredentialConfig{ + Region: "us-east-1", + }, Namespace: "AWS/ELB", Delay: internalDuration, Period: internalDuration, @@ -112,9 +109,10 @@ func TestGather(t *testing.T) { } var acc testutil.Accumulator - c.client = &mockGatherCloudWatchClient{} - assert.NoError(t, acc.GatherError(c.Gather)) + require.NoError(t, c.Init()) + c.client = &mockGatherCloudWatchClient{} + require.NoError(t, acc.GatherError(c.Gather)) fields := map[string]interface{}{} fields["latency_minimum"] = 0.1 @@ -127,14 +125,34 @@ func TestGather(t *testing.T) { tags["region"] = "us-east-1" tags["load_balancer_name"] = "p-example" - assert.True(t, acc.HasMeasurement("cloudwatch_aws_elb")) + require.True(t, acc.HasMeasurement("cloudwatch_aws_elb")) acc.AssertContainsTaggedFields(t, "cloudwatch_aws_elb", fields, tags) } +func TestGather_MultipleNamespaces(t *testing.T) { + duration, _ := time.ParseDuration("1m") + internalDuration := config.Duration(duration) + c := &CloudWatch{ + Namespaces: []string{"AWS/ELB", "AWS/EC2"}, + Delay: internalDuration, + Period: internalDuration, + RateLimit: 200, + } + + var acc testutil.Accumulator + + require.NoError(t, c.Init()) + c.client = &mockGatherCloudWatchClient{} + require.NoError(t, acc.GatherError(c.Gather)) + + require.True(t, acc.HasMeasurement("cloudwatch_aws_elb")) + require.True(t, acc.HasMeasurement("cloudwatch_aws_ec2")) +} + type mockSelectMetricsCloudWatchClient struct{} -func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) { - metrics := []*cloudwatch.Metric{} +func (m *mockSelectMetricsCloudWatchClient) ListMetrics(_ context.Context, params *cwClient.ListMetricsInput, _ ...func(*cwClient.Options)) (*cwClient.ListMetricsOutput, error) { + metrics := []types.Metric{} // 4 metrics are available metricNames := []string{"Latency", "RequestCount", "HealthyHostCount", "UnHealthyHostCount"} // for 3 ELBs @@ -144,10 +162,10 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListM for _, m := range metricNames { for _, lb := range loadBalancers { // For each metric/ELB pair, we get an aggregate value across all AZs. - metrics = append(metrics, &cloudwatch.Metric{ + metrics = append(metrics, types.Metric{ Namespace: aws.String("AWS/ELB"), MetricName: aws.String(m), - Dimensions: []*cloudwatch.Dimension{ + Dimensions: []types.Dimension{ { Name: aws.String("LoadBalancerName"), Value: aws.String(lb), @@ -156,10 +174,10 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListM }) for _, az := range availabilityZones { // We get a metric for each metric/ELB/AZ triplet. - metrics = append(metrics, &cloudwatch.Metric{ + metrics = append(metrics, types.Metric{ Namespace: aws.String("AWS/ELB"), MetricName: aws.String(m), - Dimensions: []*cloudwatch.Dimension{ + Dimensions: []types.Dimension{ { Name: aws.String("LoadBalancerName"), Value: aws.String(lb), @@ -174,13 +192,13 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListM } } - result := &cloudwatch.ListMetricsOutput{ + result := &cwClient.ListMetricsOutput{ Metrics: metrics, } return result, nil } -func (m *mockSelectMetricsCloudWatchClient) GetMetricData(params *cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) { +func (m *mockSelectMetricsCloudWatchClient) GetMetricData(_ context.Context, params *cwClient.GetMetricDataInput, _ ...func(*cwClient.Options)) (*cwClient.GetMetricDataOutput, error) { return nil, nil } @@ -188,7 +206,9 @@ func TestSelectMetrics(t *testing.T) { duration, _ := time.ParseDuration("1m") internalDuration := config.Duration(duration) c := &CloudWatch{ - Region: "us-east-1", + CredentialConfig: internalaws.CredentialConfig{ + Region: "us-east-1", + }, Namespace: "AWS/ELB", Delay: internalDuration, Period: internalDuration, @@ -199,96 +219,101 @@ func TestSelectMetrics(t *testing.T) { Dimensions: []*Dimension{ { Name: "LoadBalancerName", - Value: "*", + Value: "lb*", }, { Name: "AvailabilityZone", - Value: "*", + Value: "us-east*", }, }, }, }, } + require.NoError(t, c.Init()) c.client = &mockSelectMetricsCloudWatchClient{} filtered, err := getFilteredMetrics(c) // We've asked for 2 (out of 4) metrics, over all 3 load balancers in all 2 // AZs. We should get 12 metrics. - assert.Equal(t, 12, len(filtered[0].metrics)) - assert.NoError(t, err) + require.Equal(t, 12, len(filtered[0].metrics)) + require.NoError(t, err) } func TestGenerateStatisticsInputParams(t *testing.T) { - d := &cloudwatch.Dimension{ + d := types.Dimension{ Name: aws.String("LoadBalancerName"), Value: aws.String("p-example"), } - m := &cloudwatch.Metric{ + namespace := "AWS/ELB" + m := types.Metric{ MetricName: aws.String("Latency"), - Dimensions: []*cloudwatch.Dimension{d}, + Dimensions: []types.Dimension{d}, + Namespace: aws.String(namespace), } duration, _ := time.ParseDuration("1m") internalDuration := config.Duration(duration) c := &CloudWatch{ - Namespace: "AWS/ELB", - Delay: internalDuration, - Period: internalDuration, + Namespaces: []string{namespace}, + Delay: internalDuration, + Period: internalDuration, } - c.initializeCloudWatch() + require.NoError(t, c.initializeCloudWatch()) now := time.Now() c.updateWindow(now) statFilter, _ := filter.NewIncludeExcludeFilter(nil, nil) - queries, _ := c.getDataQueries([]filteredMetric{{metrics: []*cloudwatch.Metric{m}, statFilter: statFilter}}) - params := c.getDataInputs(queries) + queries := c.getDataQueries([]filteredMetric{{metrics: []types.Metric{m}, statFilter: statFilter}}) + params := c.getDataInputs(queries[namespace]) - assert.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay))) - assert.EqualValues(t, *params.StartTime, now.Add(-time.Duration(c.Period)).Add(-time.Duration(c.Delay))) + require.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay))) + require.EqualValues(t, *params.StartTime, now.Add(-time.Duration(c.Period)).Add(-time.Duration(c.Delay))) require.Len(t, params.MetricDataQueries, 5) - assert.Len(t, params.MetricDataQueries[0].MetricStat.Metric.Dimensions, 1) - assert.EqualValues(t, *params.MetricDataQueries[0].MetricStat.Period, 60) + require.Len(t, params.MetricDataQueries[0].MetricStat.Metric.Dimensions, 1) + require.EqualValues(t, *params.MetricDataQueries[0].MetricStat.Period, 60) } func TestGenerateStatisticsInputParamsFiltered(t *testing.T) { - d := &cloudwatch.Dimension{ + d := types.Dimension{ Name: aws.String("LoadBalancerName"), Value: aws.String("p-example"), } - m := &cloudwatch.Metric{ + namespace := "AWS/ELB" + m := types.Metric{ MetricName: aws.String("Latency"), - Dimensions: []*cloudwatch.Dimension{d}, + Dimensions: []types.Dimension{d}, + Namespace: aws.String(namespace), } duration, _ := time.ParseDuration("1m") internalDuration := config.Duration(duration) c := &CloudWatch{ - Namespace: "AWS/ELB", - Delay: internalDuration, - Period: internalDuration, + Namespaces: []string{namespace}, + Delay: internalDuration, + Period: internalDuration, } - c.initializeCloudWatch() + require.NoError(t, c.initializeCloudWatch()) now := time.Now() c.updateWindow(now) statFilter, _ := filter.NewIncludeExcludeFilter([]string{"average", "sample_count"}, nil) - queries, _ := c.getDataQueries([]filteredMetric{{metrics: []*cloudwatch.Metric{m}, statFilter: statFilter}}) - params := c.getDataInputs(queries) + queries := c.getDataQueries([]filteredMetric{{metrics: []types.Metric{m}, statFilter: statFilter}}) + params := c.getDataInputs(queries[namespace]) - assert.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay))) - assert.EqualValues(t, *params.StartTime, now.Add(-time.Duration(c.Period)).Add(-time.Duration(c.Delay))) + require.EqualValues(t, *params.EndTime, now.Add(-time.Duration(c.Delay))) + require.EqualValues(t, *params.StartTime, now.Add(-time.Duration(c.Period)).Add(-time.Duration(c.Delay))) require.Len(t, params.MetricDataQueries, 2) - assert.Len(t, params.MetricDataQueries[0].MetricStat.Metric.Dimensions, 1) - assert.EqualValues(t, *params.MetricDataQueries[0].MetricStat.Period, 60) + require.Len(t, params.MetricDataQueries[0].MetricStat.Metric.Dimensions, 1) + require.EqualValues(t, *params.MetricDataQueries[0].MetricStat.Period, 60) } func TestMetricsCacheTimeout(t *testing.T) { @@ -298,9 +323,9 @@ func TestMetricsCacheTimeout(t *testing.T) { ttl: time.Minute, } - assert.True(t, cache.isValid()) + require.True(t, cache.isValid()) cache.built = time.Now().Add(-time.Minute) - assert.False(t, cache.isValid()) + require.False(t, cache.isValid()) } func TestUpdateWindow(t *testing.T) { @@ -315,21 +340,46 @@ func TestUpdateWindow(t *testing.T) { now := time.Now() - assert.True(t, c.windowEnd.IsZero()) - assert.True(t, c.windowStart.IsZero()) + require.True(t, c.windowEnd.IsZero()) + require.True(t, c.windowStart.IsZero()) c.updateWindow(now) newStartTime := c.windowEnd // initial window just has a single period - assert.EqualValues(t, c.windowEnd, now.Add(-time.Duration(c.Delay))) - assert.EqualValues(t, c.windowStart, now.Add(-time.Duration(c.Delay)).Add(-time.Duration(c.Period))) + require.EqualValues(t, c.windowEnd, now.Add(-time.Duration(c.Delay))) + require.EqualValues(t, c.windowStart, now.Add(-time.Duration(c.Delay)).Add(-time.Duration(c.Period))) now = time.Now() c.updateWindow(now) // subsequent window uses previous end time as start time - assert.EqualValues(t, c.windowEnd, now.Add(-time.Duration(c.Delay))) - assert.EqualValues(t, c.windowStart, newStartTime) + require.EqualValues(t, c.windowEnd, now.Add(-time.Duration(c.Delay))) + require.EqualValues(t, c.windowStart, newStartTime) +} + +func TestProxyFunction(t *testing.T) { + c := &CloudWatch{ + HTTPProxy: proxy.HTTPProxy{ + HTTPProxyURL: "http://www.penguins.com", + }, + } + + proxyFunction, err := c.HTTPProxy.Proxy() + require.NoError(t, err) + + u, err := url.Parse("https://monitoring.us-west-1.amazonaws.com/") + require.NoError(t, err) + + proxyResult, err := proxyFunction(&http.Request{URL: u}) + require.NoError(t, err) + require.Equal(t, "www.penguins.com", proxyResult.Host) +} + +func TestCombineNamespaces(t *testing.T) { + c := &CloudWatch{Namespace: "AWS/ELB", Namespaces: []string{"AWS/EC2", "AWS/Billing"}} + + require.NoError(t, c.Init()) + require.Equal(t, []string{"AWS/EC2", "AWS/Billing", "AWS/ELB"}, c.Namespaces) } diff --git a/plugins/inputs/cloudwatch/sample.conf b/plugins/inputs/cloudwatch/sample.conf new file mode 100644 index 0000000000000..51fb6142e9473 --- /dev/null +++ b/plugins/inputs/cloudwatch/sample.conf @@ -0,0 +1,96 @@ +# Pull Metric Statistics from Amazon CloudWatch +[[inputs.cloudwatch]] + ## Amazon Region + region = "us-east-1" + + ## Amazon Credentials + ## Credentials are loaded in the following order + ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified + ## 2) Assumed credentials via STS if role_arn is specified + ## 3) explicit credentials from 'access_key' and 'secret_key' + ## 4) shared profile from 'profile' + ## 5) environment variables + ## 6) shared credentials file + ## 7) EC2 Instance Profile + # access_key = "" + # secret_key = "" + # token = "" + # role_arn = "" + # web_identity_token_file = "" + # role_session_name = "" + # profile = "" + # shared_credential_file = "" + + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + + ## Set http_proxy + # use_system_proxy = false + # http_proxy_url = "http://localhost:8888" + + # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all + # metrics are made available to the 1 minute period. Some are collected at + # 3 minute, 5 minute, or larger intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. + # Note that if a period is configured that is smaller than the minimum for a + # particular metric, that metric will not be returned by the Cloudwatch API + # and will not be collected by Telegraf. + # + ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) + period = "5m" + + ## Collection Delay (required - must account for metrics availability via CloudWatch API) + delay = "5m" + + ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid + ## gaps or overlap in pulled data + interval = "5m" + + ## Recommended if "delay" and "period" are both within 3 hours of request time. Invalid values will be ignored. + ## Recently Active feature will only poll for CloudWatch ListMetrics values that occurred within the last 3 Hours. + ## If enabled, it will reduce total API usage of the CloudWatch ListMetrics API and require less memory to retain. + ## Do not enable if "period" or "delay" is longer than 3 hours, as it will not return data more than 3 hours old. + ## See https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_ListMetrics.html + #recently_active = "PT3H" + + ## Configure the TTL for the internal cache of metrics. + # cache_ttl = "1h" + + ## Metric Statistic Namespaces (required) + namespaces = ["AWS/ELB"] + # A single metric statistic namespace that will be appended to namespaces on startup + # namespace = "AWS/ELB" + + ## Maximum requests per second. Note that the global default AWS rate limit is + ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a + ## maximum of 50. + ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html + # ratelimit = 25 + + ## Timeout for http requests made by the cloudwatch client. + # timeout = "5s" + + ## Namespace-wide statistic filters. These allow fewer queries to be made to + ## cloudwatch. + # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] + # statistic_exclude = [] + + ## Metrics to Pull + ## Defaults to all Metrics in Namespace if nothing is provided + ## Refreshes Namespace available metrics every 1h + #[[inputs.cloudwatch.metrics]] + # names = ["Latency", "RequestCount"] + # + # ## Statistic filters for Metric. These allow for retrieving specific + # ## statistics for an individual metric. + # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] + # # statistic_exclude = [] + # + # ## Dimension filters for Metric. All dimensions defined for the metric names + # ## must be specified in order to retrieve the metric statistics. + # ## 'value' has wildcard / 'glob' matching support such as 'p-*'. + # [[inputs.cloudwatch.metrics.dimensions]] + # name = "LoadBalancerName" + # value = "p-example" diff --git a/plugins/inputs/cloudwatch_metric_streams/README.md b/plugins/inputs/cloudwatch_metric_streams/README.md new file mode 100644 index 0000000000000..73c2ba859676e --- /dev/null +++ b/plugins/inputs/cloudwatch_metric_streams/README.md @@ -0,0 +1,142 @@ +# CloudWatch Metric Streams Input Plugin + +The CloudWatch Metric Streams plugin is a service input plugin that +listens for metrics sent via HTTP and performs the required +processing for +[Metric Streams from AWS](#troubleshooting-documentation). + +For cost, see the Metric Streams example in +[CloudWatch pricing](#troubleshooting-documentation). + +## Configuration + +```toml @sample.conf +[[inputs.cloudwatch_metric_streams]] + ## Address and port to host HTTP listener on + service_address = ":443" + + ## Paths to listen to. + # paths = ["/telegraf"] + + ## maximum duration before timing out read of the request + # read_timeout = "10s" + + ## maximum duration before timing out write of the response + # write_timeout = "10s" + + ## Maximum allowed http request body size in bytes. + ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) + # max_body_size = "500MB" + + ## Optional access key for Firehose security. + # access_key = "test-key" + + ## An optional flag to keep Metric Streams metrics compatible with CloudWatch's API naming + # api_compatability = false + + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Add service certificate and key + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" +``` + +## Metrics + +Metrics sent by AWS are Base64 encoded blocks of JSON data. +The JSON block below is the Base64 decoded data in the `data` +field of a `record`. +There can be multiple blocks of JSON for each `data` field +in each `record` and there can be multiple `record` fields in +a `record`. + +The metric when decoded may look like this: + +```json +{ + "metric_stream_name": "sandbox-dev-cloudwatch-metric-stream", + "account_id": "541737779709", + "region": "us-west-2", + "namespace": "AWS/EC2", + "metric_name": "CPUUtilization", + "dimensions": { + "InstanceId": "i-0efc7ghy09c123428" + }, + "timestamp": 1651679580000, + "value": { + "max": 10.011666666666667, + "min": 10.011666666666667, + "sum": 10.011666666666667, + "count": 1 + }, + "unit": "Percent" +} +``` + +### Tags + +All tags in the `dimensions` list are added as tags to the metric. + +The `account_id` and `region` tag are added to each metric as well. + +### Measurements and Fields + +The metric name is a combination of `namespace` and `metric_name`, +separated by `_` and lowercased. + +The fields are each aggregate in the `value` list. + +These fields are optionally renamed to match the CloudWatch API for +easier transition from the API to Metric Streams. This relies on +setting the `api_compatability` flag in the configuration. + +The timestamp applied is the timestamp from the metric, +typically 3-5 minutes older than the time processed due +to CloudWatch delays. + +## Example Output + +Example output based on the above JSON & compatability flag is: + +**Standard Metric Streams format:** + +```text +aws_ec2_cpuutilization,accountId=541737779709,region=us-west-2,InstanceId=i-0efc7ghy09c123428 max=10.011666666666667,min=10.011666666666667,sum=10.011666666666667,count=1 1651679580000 +``` + +**API Compatability format:** + +```text +aws_ec2_cpuutilization,accountId=541737779709,region=us-west-2,InstanceId=i-0efc7ghy09c123428 maximum=10.011666666666667,minimum=10.011666666666667,sum=10.011666666666667,samplecount=1 1651679580000 +``` + +## Troubleshooting + +The plugin has its own internal metrics for troubleshooting: + +* Requests Received + * The number of requests received by the listener. +* Writes Served + * The number of writes served by the listener. +* Bad Requests + * The number of bad requests, separated by the error code as a tag. +* Request Time + * The duration of the request measured in ns. +* Age Max + * The maximum age of a metric in this interval. This is useful for offsetting any lag or latency measurements in a metrics pipeline that measures based on the timestamp. +* Age Min + * The minimum age of a metric in this interval. + +Specific errors will be logged and an error will be returned to AWS. + +### Troubleshooting Documentation + +Additional troubleshooting for a Metric Stream can be found +in AWS's documentation: + +* [CloudWatch Metric Streams](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Metric-Streams.html) +* [AWS HTTP Specifications](https://docs.aws.amazon.com/firehose/latest/dev/httpdeliveryrequestresponse.html) +* [Firehose Troubleshooting](https://docs.aws.amazon.com/firehose/latest/dev/http_troubleshooting.html) +* [CloudWatch Pricing](https://aws.amazon.com/cloudwatch/pricing/) diff --git a/plugins/inputs/cloudwatch_metric_streams/cloudwatch_metric_streams.go b/plugins/inputs/cloudwatch_metric_streams/cloudwatch_metric_streams.go new file mode 100644 index 0000000000000..17fbd9d5b7086 --- /dev/null +++ b/plugins/inputs/cloudwatch_metric_streams/cloudwatch_metric_streams.go @@ -0,0 +1,434 @@ +package cloudwatch_metric_streams + +import ( + "compress/gzip" + "crypto/tls" + _ "embed" + "encoding/base64" + "encoding/json" + "errors" + "math" + "net" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal/choice" + tlsint "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/selfstat" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +// defaultMaxBodySize is the default maximum request body size, in bytes. +// if the request body is over this size, we will return an HTTP 413 error. +// 500 MB +const defaultMaxBodySize = 500 * 1024 * 1024 + +type CloudWatchMetricStreams struct { + ServiceAddress string `toml:"service_address"` + Paths []string `toml:"paths"` + MaxBodySize config.Size `toml:"max_body_size"` + ReadTimeout config.Duration `toml:"read_timeout"` + WriteTimeout config.Duration `toml:"write_timeout"` + AccessKey string `toml:"access_key"` + APICompatability bool `toml:"api_compatability"` + + requestsReceived selfstat.Stat + writesServed selfstat.Stat + requestTime selfstat.Stat + ageMax selfstat.Stat + ageMin selfstat.Stat + + Log telegraf.Logger + tlsint.ServerConfig + wg sync.WaitGroup + close chan struct{} + listener net.Listener + acc telegraf.Accumulator +} + +type Request struct { + RequestID string `json:"requestId"` + Timestamp int64 `json:"timestamp"` + Records []struct { + Data string `json:"data"` + } `json:"records"` +} + +type Data struct { + MetricStreamName string `json:"metric_stream_name"` + AccountID string `json:"account_id"` + Region string `json:"region"` + Namespace string `json:"namespace"` + MetricName string `json:"metric_name"` + Dimensions map[string]string `json:"dimensions"` + Timestamp int64 `json:"timestamp"` + Value map[string]float64 `json:"value"` + Unit string `json:"unit"` +} + +type Response struct { + RequestID string `json:"requestId"` + Timestamp int64 `json:"timestamp"` +} + +type age struct { + max time.Duration + min time.Duration +} + +func (*CloudWatchMetricStreams) SampleConfig() string { + return sampleConfig +} + +func (a *age) Record(t time.Duration) { + if t > a.max { + a.max = t + } + + if t < a.min { + a.min = t + } +} + +func (a *age) SubmitMax(stat selfstat.Stat) { + stat.Incr(a.max.Nanoseconds()) +} + +func (a *age) SubmitMin(stat selfstat.Stat) { + stat.Incr(a.min.Nanoseconds()) +} + +func (cms *CloudWatchMetricStreams) Description() string { + return "HTTP listener & parser for AWS Metric Streams" +} + +func (cms *CloudWatchMetricStreams) Gather(_ telegraf.Accumulator) error { + return nil +} + +// Start starts the http listener service. +func (cms *CloudWatchMetricStreams) Start(acc telegraf.Accumulator) error { + cms.acc = acc + server := cms.createHTTPServer() + + var err error + server.TLSConfig, err = cms.ServerConfig.TLSConfig() + if err != nil { + return err + } + if server.TLSConfig != nil { + cms.listener, err = tls.Listen("tcp", cms.ServiceAddress, server.TLSConfig) + } else { + cms.listener, err = net.Listen("tcp", cms.ServiceAddress) + } + if err != nil { + return err + } + + cms.wg.Add(1) + go func() { + defer cms.wg.Done() + if err := server.Serve(cms.listener); err != nil { + if !errors.Is(err, net.ErrClosed) { + cms.Log.Errorf("Serve failed: %v", err) + } + close(cms.close) + } + }() + + cms.Log.Infof("Listening on %s", cms.listener.Addr().String()) + + return nil +} + +func (cms *CloudWatchMetricStreams) createHTTPServer() *http.Server { + return &http.Server{ + Addr: cms.ServiceAddress, + Handler: cms, + ReadTimeout: time.Duration(cms.ReadTimeout), + WriteTimeout: time.Duration(cms.WriteTimeout), + } +} + +func (cms *CloudWatchMetricStreams) ServeHTTP(res http.ResponseWriter, req *http.Request) { + cms.requestsReceived.Incr(1) + start := time.Now() + defer cms.recordRequestTime(start) + + handler := cms.serveWrite + + if !choice.Contains(req.URL.Path, cms.Paths) { + handler = http.NotFound + } + + cms.authenticateIfSet(handler, res, req) +} + +func (cms *CloudWatchMetricStreams) recordRequestTime(start time.Time) { + elapsed := time.Since(start) + cms.requestTime.Incr(elapsed.Nanoseconds()) +} + +func (cms *CloudWatchMetricStreams) serveWrite(res http.ResponseWriter, req *http.Request) { + select { + case <-cms.close: + res.WriteHeader(http.StatusGone) + return + default: + } + + defer cms.writesServed.Incr(1) + + // Check that the content length is not too large for us to handle. + if req.ContentLength > int64(cms.MaxBodySize) { + cms.Log.Errorf("content length exceeded maximum body size") + if err := tooLarge(res); err != nil { + cms.Log.Debugf("error in too-large: %v", err) + } + return + } + + // Check that the method is a POST + if req.Method != "POST" { + cms.Log.Errorf("incompatible request method") + if err := methodNotAllowed(res); err != nil { + cms.Log.Debugf("error in method-not-allowed: %v", err) + } + return + } + + // Decode GZIP + var body = req.Body + encoding := req.Header.Get("Content-Encoding") + + if encoding == "gzip" { + reader, err := gzip.NewReader(req.Body) + if err != nil { + cms.Log.Errorf("unable to uncompress metric-streams data: %v", err) + if err := badRequest(res); err != nil { + cms.Log.Debugf("error in bad-request: %v", err) + } + return + } + body = reader + defer reader.Close() + } + + // Decode the request + var r Request + err := json.NewDecoder(body).Decode(&r) + if err != nil { + cms.Log.Errorf("unable to decode metric-streams request: %v", err) + if err := badRequest(res); err != nil { + cms.Log.Debugf("error in bad-request: %v", err) + } + return + } + + agesInRequest := &age{max: 0, min: math.MaxInt32} + defer agesInRequest.SubmitMax(cms.ageMax) + defer agesInRequest.SubmitMin(cms.ageMin) + + // For each record, decode the base64 data and store it in a Data struct + // Metrics from Metric Streams are Base64 encoded JSON + // https://docs.aws.amazon.com/firehose/latest/dev/httpdeliveryrequestresponse.html + for _, record := range r.Records { + b, err := base64.StdEncoding.DecodeString(record.Data) + if err != nil { + cms.Log.Errorf("unable to base64 decode metric-streams data: %v", err) + if err := badRequest(res); err != nil { + cms.Log.Debugf("error in bad-request: %v", err) + } + return + } + + list := strings.Split(string(b), "\n") + + // If the last element is empty, remove it to avoid unexpected JSON + if len(list) > 0 { + if list[len(list)-1] == "" { + list = list[:len(list)-1] + } + } + + for _, js := range list { + var d Data + err = json.Unmarshal([]byte(js), &d) + if err != nil { + cms.Log.Errorf("unable to unmarshal metric-streams data: %v", err) + if err := badRequest(res); err != nil { + cms.Log.Debugf("error in bad-request: %v", err) + } + return + } + cms.composeMetrics(d) + agesInRequest.Record(time.Since(time.Unix(d.Timestamp/1000, 0))) + } + } + + // Compose the response to AWS using the request's requestId + // https://docs.aws.amazon.com/firehose/latest/dev/httpdeliveryrequestresponse.html#responseformat + response := Response{ + RequestID: r.RequestID, + Timestamp: time.Now().UnixNano() / 1000000, + } + + marshalled, err := json.Marshal(response) + if err != nil { + cms.Log.Errorf("unable to compose response: %v", err) + if err := badRequest(res); err != nil { + cms.Log.Debugf("error in bad-request: %v", err) + } + return + } + + res.Header().Set("Content-Type", "application/json") + res.WriteHeader(http.StatusOK) + _, err = res.Write(marshalled) + if err != nil { + cms.Log.Debugf("Error writing response to AWS: %s", err.Error()) + return + } +} + +func (cms *CloudWatchMetricStreams) composeMetrics(data Data) { + fields := make(map[string]interface{}) + tags := make(map[string]string) + timestamp := time.Unix(data.Timestamp/1000, 0) + + namespace := strings.Replace(data.Namespace, "/", "_", -1) + measurement := strings.ToLower(namespace + "_" + data.MetricName) + + for field, value := range data.Value { + fields[field] = value + } + + // Rename Statistics to match the CloudWatch API if in API Compatability mode + if cms.APICompatability { + max, ok := fields["max"] + if ok { + fields["maximum"] = max + delete(fields, "max") + } + + min, ok := fields["min"] + if ok { + fields["minimum"] = min + delete(fields, "min") + } + + count, ok := fields["count"] + if ok { + fields["samplecount"] = count + delete(fields, "count") + } + } + + tags["accountId"] = data.AccountID + tags["region"] = data.Region + + for dimension, value := range data.Dimensions { + tags[dimension] = value + } + + cms.acc.AddFields(measurement, fields, tags, timestamp) +} + +func tooLarge(res http.ResponseWriter) error { + tags := map[string]string{ + "status_code": strconv.Itoa(http.StatusRequestEntityTooLarge), + } + selfstat.Register("cloudwatch_metric_streams", "bad_requests", tags).Incr(1) + res.Header().Set("Content-Type", "application/json") + res.WriteHeader(http.StatusRequestEntityTooLarge) + _, err := res.Write([]byte(`{"error":"http: request body too large"}`)) + return err +} + +func methodNotAllowed(res http.ResponseWriter) error { + tags := map[string]string{ + "status_code": strconv.Itoa(http.StatusMethodNotAllowed), + } + selfstat.Register("cloudwatch_metric_streams", "bad_requests", tags).Incr(1) + res.Header().Set("Content-Type", "application/json") + res.WriteHeader(http.StatusMethodNotAllowed) + _, err := res.Write([]byte(`{"error":"http: method not allowed"}`)) + return err +} + +func badRequest(res http.ResponseWriter) error { + tags := map[string]string{ + "status_code": strconv.Itoa(http.StatusBadRequest), + } + selfstat.Register("cloudwatch_metric_streams", "bad_requests", tags).Incr(1) + res.Header().Set("Content-Type", "application/json") + res.WriteHeader(http.StatusBadRequest) + _, err := res.Write([]byte(`{"error":"http: bad request"}`)) + return err +} + +func (cms *CloudWatchMetricStreams) authenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) { + if cms.AccessKey != "" { + auth := req.Header.Get("X-Amz-Firehose-Access-Key") + if auth == "" || auth != cms.AccessKey { + http.Error(res, "Unauthorized.", http.StatusUnauthorized) + return + } + handler(res, req) + } else { + handler(res, req) + } +} + +// Stop cleans up all resources +func (cms *CloudWatchMetricStreams) Stop() { + if cms.listener != nil { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive + cms.listener.Close() + } + cms.wg.Wait() +} + +func (cms *CloudWatchMetricStreams) Init() error { + tags := map[string]string{ + "address": cms.ServiceAddress, + } + cms.requestsReceived = selfstat.Register("cloudwatch_metric_streams", "requests_received", tags) + cms.writesServed = selfstat.Register("cloudwatch_metric_streams", "writes_served", tags) + cms.requestTime = selfstat.Register("cloudwatch_metric_streams", "request_time", tags) + cms.ageMax = selfstat.Register("cloudwatch_metric_streams", "age_max", tags) + cms.ageMin = selfstat.Register("cloudwatch_metric_streams", "age_min", tags) + + if cms.MaxBodySize == 0 { + cms.MaxBodySize = config.Size(defaultMaxBodySize) + } + + if cms.ReadTimeout < config.Duration(time.Second) { + cms.ReadTimeout = config.Duration(time.Second * 10) + } + + if cms.WriteTimeout < config.Duration(time.Second) { + cms.WriteTimeout = config.Duration(time.Second * 10) + } + + return nil +} + +func init() { + inputs.Add("cloudwatch_metric_streams", func() telegraf.Input { + return &CloudWatchMetricStreams{ + ServiceAddress: ":443", + Paths: []string{"/telegraf"}, + } + }) +} diff --git a/plugins/inputs/cloudwatch_metric_streams/cloudwatch_metric_streams_test.go b/plugins/inputs/cloudwatch_metric_streams/cloudwatch_metric_streams_test.go new file mode 100644 index 0000000000000..9e07018336a48 --- /dev/null +++ b/plugins/inputs/cloudwatch_metric_streams/cloudwatch_metric_streams_test.go @@ -0,0 +1,397 @@ +package cloudwatch_metric_streams + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "net/http" + "net/url" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" +) + +const ( + badMsg = "blahblahblah: 42\n" + emptyMsg = "" + accessKey = "super-secure-password!" + badAccessKey = "super-insecure-password!" + maxBodySize = 524288000 +) + +var ( + pki = testutil.NewPKI("../../../testutil/pki") +) + +func newTestCloudWatchMetricStreams() *CloudWatchMetricStreams { + metricStream := &CloudWatchMetricStreams{ + Log: testutil.Logger{}, + ServiceAddress: "localhost:8080", + Paths: []string{"/write"}, + MaxBodySize: config.Size(maxBodySize), + close: make(chan struct{}), + } + return metricStream +} + +func newTestMetricStreamAuth() *CloudWatchMetricStreams { + metricStream := newTestCloudWatchMetricStreams() + metricStream.AccessKey = accessKey + return metricStream +} + +func newTestMetricStreamHTTPS() *CloudWatchMetricStreams { + metricStream := newTestCloudWatchMetricStreams() + metricStream.ServerConfig = *pki.TLSServerConfig() + + return metricStream +} + +func newTestCompatibleCloudWatchMetricStreams() *CloudWatchMetricStreams { + metricStream := newTestCloudWatchMetricStreams() + metricStream.APICompatability = true + return metricStream +} + +func getHTTPSClient() *http.Client { + tlsConfig, err := pki.TLSClientConfig().TLSConfig() + if err != nil { + panic(err) + } + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + } +} + +func createURL(metricStream *CloudWatchMetricStreams, scheme string, path string, rawquery string) string { + u := url.URL{ + Scheme: scheme, + Host: "localhost:8080", + Path: path, + RawQuery: rawquery, + } + return u.String() +} + +func TestInvalidListenerConfig(t *testing.T) { + metricStream := newTestCloudWatchMetricStreams() + metricStream.ServiceAddress = "address_without_port" + + acc := &testutil.Accumulator{} + require.Error(t, metricStream.Start(acc)) + + // Stop is called when any ServiceInput fails to start; it must succeed regardless of state + metricStream.Stop() +} + +func TestWriteHTTPSNoClientAuth(t *testing.T) { + metricStream := newTestMetricStreamHTTPS() + metricStream.TLSAllowedCACerts = nil + + acc := &testutil.Accumulator{} + require.NoError(t, metricStream.Init()) + require.NoError(t, metricStream.Start(acc)) + defer metricStream.Stop() + + cas := x509.NewCertPool() + cas.AppendCertsFromPEM([]byte(pki.ReadServerCert())) + noClientAuthClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: cas, + }, + }, + } + + // post single message to the metric stream listener + resp, err := noClientAuthClient.Post(createURL(metricStream, "https", "/write", ""), "", bytes.NewBuffer([]byte(record))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 200, resp.StatusCode) +} + +func TestWriteHTTPSWithClientAuth(t *testing.T) { + metricStream := newTestMetricStreamHTTPS() + + acc := &testutil.Accumulator{} + require.NoError(t, metricStream.Init()) + require.NoError(t, metricStream.Start(acc)) + defer metricStream.Stop() + + // post single message to the metric stream listener + resp, err := getHTTPSClient().Post(createURL(metricStream, "https", "/write", ""), "", bytes.NewBuffer([]byte(record))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 200, resp.StatusCode) +} + +func TestWriteHTTPSuccessfulAuth(t *testing.T) { + metricStream := newTestMetricStreamAuth() + + acc := &testutil.Accumulator{} + require.NoError(t, metricStream.Init()) + require.NoError(t, metricStream.Start(acc)) + defer metricStream.Stop() + + client := &http.Client{} + + req, err := http.NewRequest("POST", createURL(metricStream, "http", "/write", ""), bytes.NewBuffer([]byte(record))) + require.NoError(t, err) + req.Header.Set("X-Amz-Firehose-Access-Key", accessKey) + + // post single message to the metric stream listener + resp, err := client.Do(req) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, http.StatusOK, resp.StatusCode) +} + +func TestWriteHTTPFailedAuth(t *testing.T) { + metricStream := newTestMetricStreamAuth() + + acc := &testutil.Accumulator{} + require.NoError(t, metricStream.Init()) + require.NoError(t, metricStream.Start(acc)) + defer metricStream.Stop() + + client := &http.Client{} + + req, err := http.NewRequest("POST", createURL(metricStream, "http", "/write", ""), bytes.NewBuffer([]byte(record))) + require.NoError(t, err) + req.Header.Set("X-Amz-Firehose-Access-Key", badAccessKey) + + // post single message to the metric stream listener + resp, err := client.Do(req) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, http.StatusUnauthorized, resp.StatusCode) +} + +func TestWriteHTTP(t *testing.T) { + metricStream := newTestCloudWatchMetricStreams() + + acc := &testutil.Accumulator{} + require.NoError(t, metricStream.Init()) + require.NoError(t, metricStream.Start(acc)) + defer metricStream.Stop() + + // post single message to the metric stream listener + resp, err := http.Post(createURL(metricStream, "http", "/write", ""), "", bytes.NewBuffer([]byte(record))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 200, resp.StatusCode) +} + +func TestWriteHTTPMultipleRecords(t *testing.T) { + metricStream := newTestCloudWatchMetricStreams() + + acc := &testutil.Accumulator{} + require.NoError(t, metricStream.Init()) + require.NoError(t, metricStream.Start(acc)) + defer metricStream.Stop() + + // post multiple records to the metric stream listener + resp, err := http.Post(createURL(metricStream, "http", "/write", ""), "", bytes.NewBuffer([]byte(records))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 200, resp.StatusCode) +} + +func TestWriteHTTPExactMaxBodySize(t *testing.T) { + metricStream := newTestCloudWatchMetricStreams() + metricStream.MaxBodySize = config.Size(len(record)) + + acc := &testutil.Accumulator{} + require.NoError(t, metricStream.Init()) + require.NoError(t, metricStream.Start(acc)) + defer metricStream.Stop() + + // post single message to the metric stream listener + resp, err := http.Post(createURL(metricStream, "http", "/write", ""), "", bytes.NewBuffer([]byte(record))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 200, resp.StatusCode) +} + +func TestWriteHTTPVerySmallMaxBody(t *testing.T) { + metricStream := newTestCloudWatchMetricStreams() + metricStream.MaxBodySize = config.Size(512) + + acc := &testutil.Accumulator{} + require.NoError(t, metricStream.Init()) + require.NoError(t, metricStream.Start(acc)) + defer metricStream.Stop() + + // post single message to the metric stream listener + resp, err := http.Post(createURL(metricStream, "http", "/write", ""), "", bytes.NewBuffer([]byte(record))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 413, resp.StatusCode) +} + +func TestReceive404ForInvalidEndpoint(t *testing.T) { + metricStream := newTestCloudWatchMetricStreams() + + acc := &testutil.Accumulator{} + require.NoError(t, metricStream.Init()) + require.NoError(t, metricStream.Start(acc)) + defer metricStream.Stop() + + // post single message to the metric stream listener + resp, err := http.Post(createURL(metricStream, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(record))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 404, resp.StatusCode) +} + +func TestWriteHTTPInvalid(t *testing.T) { + metricStream := newTestCloudWatchMetricStreams() + + acc := &testutil.Accumulator{} + require.NoError(t, metricStream.Init()) + require.NoError(t, metricStream.Start(acc)) + defer metricStream.Stop() + + // post a badly formatted message to the metric stream listener + resp, err := http.Post(createURL(metricStream, "http", "/write", ""), "", bytes.NewBuffer([]byte(badMsg))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 400, resp.StatusCode) +} + +func TestWriteHTTPEmpty(t *testing.T) { + metricStream := newTestCloudWatchMetricStreams() + + acc := &testutil.Accumulator{} + require.NoError(t, metricStream.Init()) + require.NoError(t, metricStream.Start(acc)) + defer metricStream.Stop() + + // post empty message to the metric stream listener + resp, err := http.Post(createURL(metricStream, "http", "/write", ""), "", bytes.NewBuffer([]byte(emptyMsg))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 400, resp.StatusCode) +} + +func TestComposeMetrics(t *testing.T) { + metricStream := newTestCloudWatchMetricStreams() + + acc := &testutil.Accumulator{} + require.NoError(t, metricStream.Init()) + require.NoError(t, metricStream.Start(acc)) + defer metricStream.Stop() + + // compose a Data object for writing + data := Data{ + MetricStreamName: "cloudwatch-metric-stream", + AccountID: "546734499701", + Region: "us-west-2", + Namespace: "AWS/EC2", + MetricName: "CPUUtilization", + Dimensions: map[string]string{"AutoScalingGroupName": "test-autoscaling-group"}, + Timestamp: 1651679400000, + Value: map[string]float64{"max": 0.4366666666666666, "min": 0.3683333333333333, "sum": 1.9399999999999997, "count": 5.0}, + Unit: "Percent", + } + + // Compose the metrics from data + metricStream.composeMetrics(data) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "aws_ec2_cpuutilization", + map[string]interface{}{"max": 0.4366666666666666, "min": 0.3683333333333333, "sum": 1.9399999999999997, "count": 5.0}, + map[string]string{"AutoScalingGroupName": "test-autoscaling-group", "accountId": "546734499701", "region": "us-west-2"}, + ) +} + +func TestComposeAPICompatibleMetrics(t *testing.T) { + metricStream := newTestCompatibleCloudWatchMetricStreams() + + acc := &testutil.Accumulator{} + require.NoError(t, metricStream.Init()) + require.NoError(t, metricStream.Start(acc)) + defer metricStream.Stop() + + // compose a Data object for writing + data := Data{ + MetricStreamName: "cloudwatch-metric-stream", + AccountID: "546734499701", + Region: "us-west-2", + Namespace: "AWS/EC2", + MetricName: "CPUUtilization", + Dimensions: map[string]string{"AutoScalingGroupName": "test-autoscaling-group"}, + Timestamp: 1651679400000, + Value: map[string]float64{"max": 0.4366666666666666, "min": 0.3683333333333333, "sum": 1.9399999999999997, "count": 5.0}, + Unit: "Percent", + } + + // Compose the metrics from data + metricStream.composeMetrics(data) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "aws_ec2_cpuutilization", + map[string]interface{}{"maximum": 0.4366666666666666, "minimum": 0.3683333333333333, "sum": 1.9399999999999997, "samplecount": 5.0}, + map[string]string{"AutoScalingGroupName": "test-autoscaling-group", "accountId": "546734499701", "region": "us-west-2"}, + ) +} + +// post GZIP encoded data to the metric stream listener +func TestWriteHTTPGzippedData(t *testing.T) { + metricStream := newTestCloudWatchMetricStreams() + + acc := &testutil.Accumulator{} + require.NoError(t, metricStream.Init()) + require.NoError(t, metricStream.Start(acc)) + defer metricStream.Stop() + + data, err := os.ReadFile("./testdata/records.gz") + require.NoError(t, err) + + req, err := http.NewRequest("POST", createURL(metricStream, "http", "/write", ""), bytes.NewBuffer(data)) + require.NoError(t, err) + req.Header.Set("Content-Encoding", "gzip") + + client := &http.Client{} + resp, err := client.Do(req) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 200, resp.StatusCode) +} + +const record = `{ + "requestId": "c8291d2e-8c46-4f2a-a8df-2562550287ad", + "timestamp": 1651679861072, + "records": [ + { + "data": "eyJtZXRyaWNfc3RyZWFtX25hbWUiOiJncnBuLXNhbmRib3gtZGV2LWNsb3Vkd2F0Y2gtbWV0cmljLXN0cmVhbSIsImFjY291bnRfaWQiOiI1NDk3MzQzOTk3MDkiLCJyZWdpb24iOiJ1cy13ZXN0LTIiLCJuYW1lc3BhY2UiOiJBV1MvRUMyIiwibWV0cmljX25hbWUiOiJDUFVVdGlsaXphdGlvbiIsImRpbWVuc2lvbnMiOnsiSW5zdGFuY2VJZCI6ImktMGVmYzdmZGYwOWMxMjM0MjgifSwidGltZXN0YW1wIjoxNjUxNjc5NTgwMDAwLCJ2YWx1ZSI6eyJtYXgiOjEwLjAxMTY2NjY2NjY2NjY2NywibWluIjoxMC4wMTE2NjY2NjY2NjY2NjcsInN1bSI6MTAuMDExNjY2NjY2NjY2NjY3LCJjb3VudCI6MS4wfSwidW5pdCI6IlBlcmNlbnQifQ==" + } + ] + } +` + +const records = `{ + "requestId": "c8291d2e-8c46-4f2a-a8df-2562550287ad", + "timestamp": 1651679861072, + "records": [ + { + "data": "{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{"InstanceType":"m5ad.2xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":3.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-0cbbc7e021a19be2e"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{"InstanceId":"i-023df7a827cbc765c"},"timestamp":1651679340000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2b.ami-0e74bce6c73b03b2e.20220503222059149300000005"},"timestamp":1651679400000,"value":{"max":100.0,"min":99.0,"sum":499.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-0f4906924ef9b78ba"},"timestamp":1651679640000,"value":{"max":9421725.0,"min":9421725.0,"sum":9421725.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceType":"m5n.2xlarge"},"timestamp":1651679640000,"value":{"max":6.0,"min":0.0,"sum":6.0,"count":2.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{"InstanceId":"i-0f7835a54ea38473a"},"timestamp":1651679340000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-07dbb4dfb68b056a6"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot2022020913183532220000010c"},"timestamp":1651679640000,"value":{"max":8.77130197E8,"min":3.597669E7,"sum":9.13106887E8,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-029e8b91893510672"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-0c2cfecbf98d32578"},"timestamp":1651679400000,"value":{"max":2009411.0,"min":1682187.0,"sum":9042632.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-0c2cfecbf98d32578"},"timestamp":1651679400000,"value":{"max":3534584.0,"min":2665334.0,"sum":1.5898017E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-0a2a9750f4427abe7"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2a.ami-0e74bce6c73b03b2e.20220503223301039400000005"},"timestamp":1651679400000,"value":{"max":2520576.0,"min":1998336.0,"sum":1.1177984E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-large-spot202202091317061067000000e3"},"timestamp":1651679640000,"value":{"max":5055203.0,"min":5055203.0,"sum":5055203.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-088bf656e3ecfd30c"},"timestamp":1651679400000,"value":{"max":18395.0,"min":16113.0,"sum":85901.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-0219c7b86248450b2"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-0539f8ff77376e52e"},"timestamp":1651679400000,"value":{"max":762.0,"min":762.0,"sum":3810.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-088bf656e3ecfd30c"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-0ca5801fc2002b099"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":15.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-02ec62d3c402e9eef"},"timestamp":1651679580000,"value":{"max":1.93087E7,"min":1.93087E7,"sum":1.93087E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2c.ami-0e74bce6c73b03b2e.20220503222059148400000002"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-0ca5801fc2002b099"},"timestamp":1651679400000,"value":{"max":41.0,"min":30.0,"sum":174.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadOps","dimensions":{"InstanceId":"i-02ec62d3c402e9eef"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091317245805000000eb"},"timestamp":1651679640000,"value":{"max":123.0,"min":119.0,"sum":242.0,"count":2.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2b.ami-0e74bce6c73b03b2e.20220503223301038800000001"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":495.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-0de3c36dcb1210d28"},"timestamp":1651679640000,"value":{"max":8109835.0,"min":8109835.0,"sum":8109835.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091317245805000000eb"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":3.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2b.ami-0e74bce6c73b03b2e.20220503222059149300000005"},"timestamp":1651679400000,"value":{"max":371.0,"min":267.0,"sum":1491.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceType":"m5.4xlarge"},"timestamp":1651679580000,"value":{"max":5.5249999999999995,"min":5.5249999999999995,"sum":5.5249999999999995,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteOps","dimensions":{"AutoScalingGroupName":"dse-ops-dev-autoscaling-host"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-088bf656e3ecfd30c"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-013e95c2db558957b"},"timestamp":1651679400000,"value":{"max":2357760.0,"min":1806848.0,"sum":1.0000384E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{"InstanceId":"i-023df7a827cbc765c"},"timestamp":1651679340000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-0f898c9b1c511eb41"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-large-spot202202091317061067000000e3"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-07388eeabc8b76323"},"timestamp":1651679400000,"value":{"max":10136.0,"min":8629.0,"sum":46723.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUSurplusCreditsCharged","dimensions":{"InstanceId":"i-023df7a827cbc765c"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-0f4906924ef9b78ba"},"timestamp":1651679640000,"value":{"max":3935092.0,"min":3935092.0,"sum":3935092.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091317245805000000eb"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":2.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-0ebe6ab3e379f48af"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-0f898c9b1c511eb41"},"timestamp":1651679640000,"value":{"max":8.77130197E8,"min":8.77130197E8,"sum":8.77130197E8,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-088bf656e3ecfd30c"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":4.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2c.ami-0e74bce6c73b03b2e.20220503222059148400000002"},"timestamp":1651679400000,"value":{"max":3046400.0,"min":1566208.0,"sum":1.0927104E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2c.ami-0e74bce6c73b03b2e.20220503223301039000000002"},"timestamp":1651679400000,"value":{"max":437.0,"min":410.0,"sum":2106.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceType":"m5n.8xlarge"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-0ae21ab1d3dcd74b4"},"timestamp":1651679400000,"value":{"max":35137.0,"min":34604.0,"sum":174750.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"InstanceId":"i-0ca5801fc2002b099"},"timestamp":1651679400000,"value":{"max":100.0,"min":100.0,"sum":500.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2a.ami-0e74bce6c73b03b2e.20220503222059148300000001"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091316166173000000cc"},"timestamp":1651679640000,"value":{"max":458752.0,"min":0.0,"sum":458752.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-029e8b91893510672"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091316166173000000cc"},"timestamp":1651679340000,"value":{"max":14353.0,"min":11173.0,"sum":62911.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteOps","dimensions":{"InstanceId":"i-0ee25dd52bcde7729"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceType":"t2.micro"},"timestamp":1651679580000,"value":{"max":2.06896551724263,"min":2.06896551724263,"sum":2.06896551724263,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-029e8b91893510672"},"timestamp":1651679640000,"value":{"max":5753273.0,"min":5753273.0,"sum":5753273.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-07388eeabc8b76323"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-07388eeabc8b76323"},"timestamp":1651679640000,"value":{"max":16.563333333333333,"min":16.563333333333333,"sum":16.563333333333333,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-0f898c9b1c511eb41"},"timestamp":1651679640000,"value":{"max":14.726421226312892,"min":14.726421226312892,"sum":14.726421226312892,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{"AutoScalingGroupName":"dse-ops-dev-autoscaling-host"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-0ca5801fc2002b099"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot202202091315515599000000c0"},"timestamp":1651679640000,"value":{"max":286.0,"min":286.0,"sum":286.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"InstanceId":"i-0193dfbe27386aaa1"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":495.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-large-spot202202091317061067000000e3"},"timestamp":1651679400000,"value":{"max":18395.0,"min":16113.0,"sum":85901.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-0f7835a54ea38473a"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-0193dfbe27386aaa1"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteOps","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091316166173000000cc"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":4.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-0c931a63f8b5ea898"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-0aa3731e95e64bb33"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{"InstanceId":"i-0632008fd1891f6ba"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-0ca5801fc2002b099"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2a.ami-0e74bce6c73b03b2e.20220503222059148300000001"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUCreditUsage","dimensions":{"InstanceId":"i-0f7835a54ea38473a"},"timestamp":1651679400000,"value":{"max":0.005495,"min":0.005495,"sum":0.005495,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2a.ami-0e74bce6c73b03b2e.20220503222059148300000001"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2a.ami-0e74bce6c73b03b2e.20220503223301039400000005"},"timestamp":1651679400000,"value":{"max":0.4866666666666667,"min":0.4266595556740721,"sum":2.2533262223407386,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-07dbb4dfb68b056a6"},"timestamp":1651679400000,"value":{"max":8842.0,"min":7379.0,"sum":39478.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"ImageId":"ami-0bce5265bc5705a19"},"timestamp":1651679640000,"value":{"max":5.096501E7,"min":2809264.0,"sum":2.0422487E8,"count":16.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot202202091315515599000000c0"},"timestamp":1651679580000,"value":{"max":5.5249999999999995,"min":5.5249999999999995,"sum":5.5249999999999995,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091316166173000000cc"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2b.ami-0e74bce6c73b03b2e.20220503223301038800000001"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-0bb3c70800f789c6b"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-0219c7b86248450b2"},"timestamp":1651679580000,"value":{"max":1.0818748E7,"min":1.0818748E7,"sum":1.0818748E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot2022020913183532220000010c"},"timestamp":1651679640000,"value":{"max":8109835.0,"min":4409059.0,"sum":1.2518894E7,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2c.ami-0e74bce6c73b03b2e.20220503222059148400000002"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot2022020913183532220000010c"},"timestamp":1651679640000,"value":{"max":14.726421226312892,"min":5.285,"sum":20.01142122631289,"count":2.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{"ImageId":"ami-0940babbb54d69874"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-0f898c9b1c511eb41"},"timestamp":1651679640000,"value":{"max":3.90386688E8,"min":3.90386688E8,"sum":3.90386688E8,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-0ee25dd52bcde7729"},"timestamp":1651679640000,"value":{"max":7.0,"min":7.0,"sum":7.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceType":"m5.2xlarge"},"timestamp":1651679640000,"value":{"max":4409059.0,"min":2809264.0,"sum":7218323.0,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-013e95c2db558957b"},"timestamp":1651679400000,"value":{"max":3077288.0,"min":1945655.0,"sum":1.1540728E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-0c57bc60d35ef1836"},"timestamp":1651679400000,"value":{"max":648054.0,"min":297531.0,"sum":1889831.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-0632008fd1891f6ba"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-029e8b91893510672"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{},"timestamp":1651679640000,"value":{"max":5.096501E7,"min":147506.0,"sum":2.04372376E8,"count":17.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091316166173000000cc"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"InstanceId":"i-0efc7fdf09c123428"},"timestamp":1651679400000,"value":{"max":100.0,"min":99.0,"sum":496.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot20220209131829117500000109"},"timestamp":1651679640000,"value":{"max":320.0,"min":320.0,"sum":320.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2b.ami-0e74bce6c73b03b2e.20220503222059149300000005"},"timestamp":1651679400000,"value":{"max":2567309.0,"min":1980599.0,"sum":1.074249E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-02b5d491fdfc6c1d1"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-0193dfbe27386aaa1"},"timestamp":1651679400000,"value":{"max":0.4866666666666667,"min":0.4266595556740721,"sum":2.2533262223407386,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-0ebe6ab3e379f48af"},"timestamp":1651679400000,"value":{"max":762.0,"min":762.0,"sum":3810.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"InstanceId":"i-029e8b91893510672"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":495.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-0c2cfecbf98d32578"},"timestamp":1651679400000,"value":{"max":3046400.0,"min":1566208.0,"sum":1.0927104E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091317245805000000eb"},"timestamp":1651679640000,"value":{"max":8.49576271185947,"min":5.071666666666666,"sum":19.17576271185947,"count":3.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-02ec62d3c402e9eef"},"timestamp":1651679580000,"value":{"max":1.49877E7,"min":1.49877E7,"sum":1.49877E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-0f4906924ef9b78ba"},"timestamp":1651679640000,"value":{"max":3.373333333333333,"min":3.373333333333333,"sum":3.373333333333333,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadOps","dimensions":{"ImageId":"ami-0bce5265bc5705a19"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":7.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceType":"m5ad.2xlarge"},"timestamp":1651679640000,"value":{"max":2.1287936E7,"min":1694208.0,"sum":2.9919744E7,"count":3.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot20220209131829117500000109"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-0de3c36dcb1210d28"},"timestamp":1651679400000,"value":{"max":42959.0,"min":25416.0,"sum":149133.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-013e95c2db558957b"},"timestamp":1651679400000,"value":{"max":4380540.0,"min":3187890.0,"sum":1.8409361E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-0ca5801fc2002b099"},"timestamp":1651679400000,"value":{"max":0.4583333333333333,"min":0.4333333333333333,"sum":2.2416666666666663,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadOps","dimensions":{"InstanceId":"i-0ee25dd52bcde7729"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-0f4906924ef9b78ba"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2a.ami-0e74bce6c73b03b2e.20220503223301039400000005"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":495.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-07dbb4dfb68b056a6"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot20220209131829117500000109"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{"AutoScalingGroupName":"dse-ops-dev-autoscaling-host"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-0efc7fdf09c123428"},"timestamp":1651679400000,"value":{"max":12252.0,"min":10185.0,"sum":56661.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-013e95c2db558957b"},"timestamp":1651679400000,"value":{"max":10602.0,"min":9250.0,"sum":48210.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot202202091315515599000000c0"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"InstanceId":"i-055c75f015f390582"},"timestamp":1651679400000,"value":{"max":100.0,"min":99.0,"sum":497.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceType":"m4.2xlarge"},"timestamp":1651679580000,"value":{"max":8.42213114753621,"min":5.75000000000007,"sum":14.17213114753628,"count":2.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"InstanceId":"i-07388eeabc8b76323"},"timestamp":1651679400000,"value":{"max":100.0,"min":100.0,"sum":500.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-0ebe6ab3e379f48af"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-0de3c36dcb1210d28"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-0193dfbe27386aaa1"},"timestamp":1651679400000,"value":{"max":869449.0,"min":699094.0,"sum":3691215.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-029e8b91893510672"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-0f898c9b1c511eb41"},"timestamp":1651679640000,"value":{"max":5254.0,"min":5254.0,"sum":5254.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-0aa3731e95e64bb33"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteOps","dimensions":{"InstanceId":"i-055c75f015f390582"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-0c57bc60d35ef1836"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot2022020913183532220000010c"},"timestamp":1651679640000,"value":{"max":6.0,"min":0.0,"sum":6.0,"count":2.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2c.ami-0e74bce6c73b03b2e.20220503222059148400000002"},"timestamp":1651679400000,"value":{"max":612.0,"min":265.0,"sum":1988.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot20220209131829117500000109"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceType":"m5.2xlarge"},"timestamp":1651679640000,"value":{"max":8.77130197E8,"min":1.0640164E7,"sum":8.87770361E8,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-0bb3c70800f789c6b"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":4.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"AutoScalingGroupName":"dse-ops-dev-autoscaling-host"},"timestamp":1651679580000,"value":{"max":50361.0,"min":50361.0,"sum":50361.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot202202091315515599000000c0"},"timestamp":1651679400000,"value":{"max":35137.0,"min":34604.0,"sum":174750.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-0ca5801fc2002b099"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-055c75f015f390582"},"timestamp":1651679400000,"value":{"max":29712.0,"min":27251.0,"sum":140472.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot2022020913183532220000010c"},"timestamp":1651679640000,"value":{"max":5254.0,"min":0.0,"sum":5254.0,"count":2.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-0539f8ff77376e52e"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":15.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-0ae21ab1d3dcd74b4"},"timestamp":1651679400000,"value":{"max":54618.0,"min":53618.0,"sum":270052.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-0c2cfecbf98d32578"},"timestamp":1651679400000,"value":{"max":1.135,"min":1.0616489725171248,"sum":5.475000722824074,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceType":"m5n.2xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-013e95c2db558957b"},"timestamp":1651679400000,"value":{"max":8915.0,"min":7338.0,"sum":38903.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-0bb3c70800f789c6b"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot20220209131829117500000109"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-01a736af3b8a5bf4a"},"timestamp":1651679400000,"value":{"max":99.0,"min":0.0,"sum":294.0,"count":15.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-0bb3c70800f789c6b"},"timestamp":1651679640000,"value":{"max":3905024.0,"min":0.0,"sum":3905024.0,"count":4.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot20220209131829117500000109"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-0efc7fdf09c123428"},"timestamp":1651679640000,"value":{"max":9198080.0,"min":9198080.0,"sum":9198080.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2b.ami-0e74bce6c73b03b2e.20220503222059149300000005"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{"InstanceId":"i-055c75f015f390582"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-029e8b91893510672"},"timestamp":1651679640000,"value":{"max":5.071666666666666,"min":5.071666666666666,"sum":5.071666666666666,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceType":"m5.4xlarge"},"timestamp":1651679640000,"value":{"max":3825152.0,"min":3825152.0,"sum":3825152.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-0193dfbe27386aaa1"},"timestamp":1651679400000,"value":{"max":624613.0,"min":311210.0,"sum":1904884.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{"ImageId":"ami-0940babbb54d69874"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-0193dfbe27386aaa1"},"timestamp":1651679400000,"value":{"max":2520576.0,"min":1998336.0,"sum":1.1177984E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-0c57bc60d35ef1836"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{"InstanceId":"i-0e75f3c45bdc29890"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-0f7835a54ea38473a"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091316166173000000cc"},"timestamp":1651679640000,"value":{"max":7.0,"min":0.0,"sum":7.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2a.ami-0e74bce6c73b03b2e.20220503222059148300000001"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteOps","dimensions":{"InstanceId":"i-0e75f3c45bdc29890"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-0c57bc60d35ef1836"},"timestamp":1651679400000,"value":{"max":865640.0,"min":691932.0,"sum":3680170.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadOps","dimensions":{"InstanceId":"i-0219c7b86248450b2"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-07dbb4dfb68b056a6"},"timestamp":1651679400000,"value":{"max":2150912.0,"min":1775104.0,"sum":9355776.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-0219c7b86248450b2"},"timestamp":1651679580000,"value":{"max":5.75000000000007,"min":5.75000000000007,"sum":5.75000000000007,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceType":"m5a.2xlarge"},"timestamp":1651679640000,"value":{"max":5.608333333333333,"min":5.608333333333333,"sum":5.608333333333333,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"AutoScalingGroupName":"dse-ops-dev-autoscaling-host"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-07388eeabc8b76323"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-compute-large-spot202202091317132830000000e6"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":8.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot20220209131829117500000109"},"timestamp":1651679400000,"value":{"max":7815.0,"min":6629.0,"sum":35607.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"InstanceId":"i-055c75f015f390582"},"timestamp":1651679400000,"value":{"max":100.0,"min":99.0,"sum":497.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-0de3c36dcb1210d28"},"timestamp":1651679640000,"value":{"max":7565824.0,"min":7565824.0,"sum":7565824.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUCreditBalance","dimensions":{"InstanceId":"i-0632008fd1891f6ba"},"timestamp":1651679400000,"value":{"max":144.0,"min":144.0,"sum":144.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2b.ami-0e74bce6c73b03b2e.20220503223301038800000001"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-0539f8ff77376e52e"},"timestamp":1651679400000,"value":{"max":577536.0,"min":0.0,"sum":1398272.0,"count":15.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceType":"m5d.2xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-02b5d491fdfc6c1d1"},"timestamp":1651679640000,"value":{"max":119.0,"min":119.0,"sum":119.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{"InstanceType":"m4.2xlarge"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-0cbbc7e021a19be2e"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-07388eeabc8b76323"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteOps","dimensions":{"ImageId":"ami-0940babbb54d69874"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-large-spot202202091317061067000000e3"},"timestamp":1651679640000,"value":{"max":8433664.0,"min":0.0,"sum":1.1728384E7,"count":4.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-0efc7fdf09c123428"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-088bf656e3ecfd30c"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":4.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-02b5d491fdfc6c1d1"},"timestamp":1651679400000,"value":{"max":21194.0,"min":20914.0,"sum":105449.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-013e95c2db558957b"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-0f7835a54ea38473a"},"timestamp":1651679340000,"value":{"max":0.169491525423719,"min":0.0,"sum":0.502824858757033,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceType":"m5n.2xlarge"},"timestamp":1651679640000,"value":{"max":7565824.0,"min":7049728.0,"sum":1.4615552E7,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-0c931a63f8b5ea898"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{"InstanceType":"m5ad.4xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-0efc7fdf09c123428"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-0ae21ab1d3dcd74b4"},"timestamp":1651679640000,"value":{"max":286.0,"min":286.0,"sum":286.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-0e75f3c45bdc29890"},"timestamp":1651679400000,"value":{"max":7815.0,"min":6629.0,"sum":35607.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-0f4906924ef9b78ba"},"timestamp":1651679640000,"value":{"max":8.9841664E7,"min":8.9841664E7,"sum":8.9841664E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091316166173000000cc"},"timestamp":1651679640000,"value":{"max":901.0,"min":67.0,"sum":1538.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-0193dfbe27386aaa1"},"timestamp":1651679400000,"value":{"max":2225.0,"min":1548.0,"sum":8474.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-088bf656e3ecfd30c"},"timestamp":1651679640000,"value":{"max":3502829.0,"min":3502829.0,"sum":3502829.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2b.ami-0e74bce6c73b03b2e.20220503222059149300000005"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"InstanceId":"i-0aa3731e95e64bb33"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":495.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-0c57bc60d35ef1836"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{"InstanceId":"i-0f7835a54ea38473a"},"timestamp":1651679340000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-01a736af3b8a5bf4a"},"timestamp":1651679400000,"value":{"max":0.4416666666666667,"min":0.40833333333333327,"sum":2.1166666666666667,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-0a2a9750f4427abe7"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-013e95c2db558957b"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-0ee25dd52bcde7729"},"timestamp":1651679640000,"value":{"max":901.0,"min":901.0,"sum":901.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-07388eeabc8b76323"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-0539f8ff77376e52e"},"timestamp":1651679400000,"value":{"max":4720.0,"min":2058.0,"sum":13629.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-013e95c2db558957b"},"timestamp":1651679400000,"value":{"max":383.0,"min":265.0,"sum":1584.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091317245805000000eb"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":3.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUSurplusCreditBalance","dimensions":{"InstanceId":"i-0632008fd1891f6ba"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091316166173000000cc"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2b.ami-0e74bce6c73b03b2e.20220503222059149300000005"},"timestamp":1651679400000,"value":{"max":8842.0,"min":7379.0,"sum":39478.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"InstanceId":"i-07388eeabc8b76323"},"timestamp":1651679400000,"value":{"max":100.0,"min":99.0,"sum":496.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2a.ami-0e74bce6c73b03b2e.20220503223301039400000005"},"timestamp":1651679400000,"value":{"max":624613.0,"min":311210.0,"sum":1904884.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"InstanceId":"i-013e95c2db558957b"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":495.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUCreditUsage","dimensions":{"InstanceId":"i-0cbbc7e021a19be2e"},"timestamp":1651679400000,"value":{"max":0.05867,"min":0.05867,"sum":0.05867,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-0aa3731e95e64bb33"},"timestamp":1651679640000,"value":{"max":1496064.0,"min":1496064.0,"sum":1496064.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-0e75f3c45bdc29890"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-0219c7b86248450b2"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceType":"m5ad.4xlarge"},"timestamp":1651679580000,"value":{"max":3406678.0,"min":3406678.0,"sum":3406678.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{},"timestamp":1651679640000,"value":{"max":3.90386688E8,"min":0.0,"sum":3.90976512E8,"count":20.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{"InstanceId":"i-0ee25dd52bcde7729"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-055c75f015f390582"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-compute-large-spot202202091317132830000000e6"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":2.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{"InstanceId":"i-02ec62d3c402e9eef"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot202202091315515599000000c0"},"timestamp":1651679640000,"value":{"max":1.0,"min":1.0,"sum":1.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceType":"m5a.2xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-07dbb4dfb68b056a6"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-055c75f015f390582"},"timestamp":1651679640000,"value":{"max":160.0,"min":160.0,"sum":160.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-0539f8ff77376e52e"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091316166173000000cc"},"timestamp":1651679640000,"value":{"max":2.1287936E7,"min":1496064.0,"sum":4.5969408E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2a.ami-0e74bce6c73b03b2e.20220503223301039400000005"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-02b5d491fdfc6c1d1"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"ImageId":"ami-0bce5265bc5705a19"},"timestamp":1651679640000,"value":{"max":3.90386688E8,"min":0.0,"sum":3.90976512E8,"count":20.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-0a2a9750f4427abe7"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-0c931a63f8b5ea898"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2a.ami-0e74bce6c73b03b2e.20220503222059148300000001"},"timestamp":1651679400000,"value":{"max":8915.0,"min":7338.0,"sum":38903.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{"InstanceType":"t2.micro"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot20220209131829117500000109"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceType":"m5d.2xlarge"},"timestamp":1651679640000,"value":{"max":3.1478885E7,"min":3.1478885E7,"sum":3.1478885E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-0e75f3c45bdc29890"},"timestamp":1651679640000,"value":{"max":3675136.0,"min":3675136.0,"sum":3675136.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadOps","dimensions":{"InstanceId":"i-0f7835a54ea38473a"},"timestamp":1651679340000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUCreditUsage","dimensions":{"InstanceId":"i-0a2a9750f4427abe7"},"timestamp":1651679400000,"value":{"max":0.057749,"min":0.057749,"sum":0.057749,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteOps","dimensions":{"InstanceType":"m4.2xlarge"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":2.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-0ca5801fc2002b099"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091316166173000000cc"},"timestamp":1651679400000,"value":{"max":100.0,"min":99.0,"sum":2480.0,"count":25.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-0539f8ff77376e52e"},"timestamp":1651679400000,"value":{"max":6.0,"min":6.0,"sum":30.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091317245805000000eb"},"timestamp":1651679640000,"value":{"max":2772480.0,"min":1694208.0,"sum":4466688.0,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-023df7a827cbc765c"},"timestamp":1651679340000,"value":{"max":1.0,"min":1.0,"sum":5.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2c.ami-0e74bce6c73b03b2e.20220503222059148400000002"},"timestamp":1651679400000,"value":{"max":7441.0,"min":6527.0,"sum":35200.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-0c931a63f8b5ea898"},"timestamp":1651679400000,"value":{"max":0.4383333333333333,"min":0.3933333333333333,"sum":2.0833333333333335,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-0193dfbe27386aaa1"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-0c2cfecbf98d32578"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-0632008fd1891f6ba"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceType":"m5ad.2xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":3.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-0efc7fdf09c123428"},"timestamp":1651679400000,"value":{"max":9279.0,"min":7861.0,"sum":43397.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091317245805000000eb"},"timestamp":1651679640000,"value":{"max":8.0,"min":8.0,"sum":8.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2a.ami-0e74bce6c73b03b2e.20220503223301039400000005"},"timestamp":1651679400000,"value":{"max":495.0,"min":401.0,"sum":2153.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-088bf656e3ecfd30c"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2a.ami-0e74bce6c73b03b2e.20220503222059148300000001"},"timestamp":1651679400000,"value":{"max":10602.0,"min":9250.0,"sum":48210.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-01a736af3b8a5bf4a"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":15.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2c.ami-0e74bce6c73b03b2e.20220503223301039000000002"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"InstanceId":"i-07dbb4dfb68b056a6"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":495.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceType":"m5ad.4xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"InstanceId":"i-0c57bc60d35ef1836"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":495.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-0ebe6ab3e379f48af"},"timestamp":1651679400000,"value":{"max":6.0,"min":6.0,"sum":30.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-0ae21ab1d3dcd74b4"},"timestamp":1651679640000,"value":{"max":7899935.0,"min":7899935.0,"sum":7899935.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUSurplusCreditsCharged","dimensions":{"InstanceId":"i-0f7835a54ea38473a"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-0ae21ab1d3dcd74b4"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2a.ami-0e74bce6c73b03b2e.20220503222059148300000001"},"timestamp":1651679400000,"value":{"max":0.945,"min":0.8716666666666666,"sum":4.481666666666667,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceType":"c5n.9xlarge"},"timestamp":1651679640000,"value":{"max":3935092.0,"min":3935092.0,"sum":3935092.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"InstanceId":"i-0de3c36dcb1210d28"},"timestamp":1651679400000,"value":{"max":100.0,"min":99.0,"sum":499.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-055c75f015f390582"},"timestamp":1651679640000,"value":{"max":6937600.0,"min":6937600.0,"sum":6937600.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-0ee25dd52bcde7729"},"timestamp":1651679580000,"value":{"max":23.813333333333333,"min":23.813333333333333,"sum":23.813333333333333,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2c.ami-0e74bce6c73b03b2e.20220503223301039000000002"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2b.ami-0e74bce6c73b03b2e.20220503223301038800000001"},"timestamp":1651679400000,"value":{"max":516.0,"min":395.0,"sum":2179.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-01a736af3b8a5bf4a"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":15.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceType":"m5n.8xlarge"},"timestamp":1651679640000,"value":{"max":5055203.0,"min":5055203.0,"sum":5055203.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-0ebe6ab3e379f48af"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":15.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2a.ami-0e74bce6c73b03b2e.20220503222059148300000001"},"timestamp":1651679400000,"value":{"max":2357760.0,"min":1806848.0,"sum":1.0000384E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-055c75f015f390582"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot202202091315515599000000c0"},"timestamp":1651679640000,"value":{"max":131072.0,"min":131072.0,"sum":131072.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091316166173000000cc"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":4.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot202202091315515599000000c0"},"timestamp":1651679640000,"value":{"max":3825152.0,"min":3825152.0,"sum":3825152.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-0539f8ff77376e52e"},"timestamp":1651679400000,"value":{"max":101.0,"min":0.0,"sum":212.0,"count":15.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-07dbb4dfb68b056a6"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-0632008fd1891f6ba"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"InstanceId":"i-0c931a63f8b5ea898"},"timestamp":1651679400000,"value":{"max":100.0,"min":100.0,"sum":500.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceType":"c5n.9xlarge"},"timestamp":1651679640000,"value":{"max":1514.0,"min":1514.0,"sum":1514.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"ImageId":"ami-0940babbb54d69874"},"timestamp":1651679580000,"value":{"max":92504.0,"min":92504.0,"sum":92504.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2c.ami-0e74bce6c73b03b2e.20220503222059148400000002"},"timestamp":1651679400000,"value":{"max":100.0,"min":99.0,"sum":496.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{"InstanceId":"i-0ee25dd52bcde7729"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-0aa3731e95e64bb33"},"timestamp":1651679640000,"value":{"max":3.1478885E7,"min":3.1478885E7,"sum":3.1478885E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-0219c7b86248450b2"},"timestamp":1651679340000,"value":{"max":33389.0,"min":21745.0,"sum":131619.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"AutoScalingGroupName":"dse-ops-dev-autoscaling-host"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceType":"m5d.2xlarge"},"timestamp":1651679640000,"value":{"max":1.0971367E7,"min":1.0971367E7,"sum":1.0971367E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceType":"m5.2xlarge"},"timestamp":1651679640000,"value":{"max":2.549751808E9,"min":9198080.0,"sum":2.558949888E9,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{},"timestamp":1651679640000,"value":{"max":8.77130197E8,"min":51159.0,"sum":1.141072761E9,"count":17.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceType":"m4.2xlarge"},"timestamp":1651679580000,"value":{"max":1.49877E7,"min":1.0818748E7,"sum":2.5806448E7,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteOps","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot20220209131829117500000109"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-0ebe6ab3e379f48af"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2a.ami-0e74bce6c73b03b2e.20220503222059148300000001"},"timestamp":1651679400000,"value":{"max":4380540.0,"min":3187890.0,"sum":1.8409361E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceType":"c5n.9xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-013e95c2db558957b"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"InstanceId":"i-01a736af3b8a5bf4a"},"timestamp":1651679400000,"value":{"max":100.0,"min":99.0,"sum":496.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-0ee25dd52bcde7729"},"timestamp":1651679640000,"value":{"max":458752.0,"min":458752.0,"sum":458752.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-023df7a827cbc765c"},"timestamp":1651679340000,"value":{"max":9.13793103451287,"min":1.69491525423729,"sum":16.213597214119492,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2a.ami-0e74bce6c73b03b2e.20220503222059148300000001"},"timestamp":1651679400000,"value":{"max":383.0,"min":265.0,"sum":1584.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-0f4906924ef9b78ba"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-029e8b91893510672"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot20220209131829117500000109"},"timestamp":1651679640000,"value":{"max":3675136.0,"min":3675136.0,"sum":3675136.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-0a2a9750f4427abe7"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceType":"m5.2xlarge"},"timestamp":1651679640000,"value":{"max":5254.0,"min":0.0,"sum":5254.0,"count":2.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceType":"m5ad.2xlarge"},"timestamp":1651679640000,"value":{"max":5.096501E7,"min":6104036.0,"sum":7.0718361E7,"count":3.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"InstanceId":"i-02b5d491fdfc6c1d1"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":495.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceType":"m5n.8xlarge"},"timestamp":1651679640000,"value":{"max":8433664.0,"min":0.0,"sum":1.1728384E7,"count":4.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-023df7a827cbc765c"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2c.ami-0e74bce6c73b03b2e.20220503223301039000000002"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceType":"m5.2xlarge"},"timestamp":1651679640000,"value":{"max":14.726421226312892,"min":8.754999999999999,"sum":23.48142122631289,"count":2.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-088bf656e3ecfd30c"},"timestamp":1651679640000,"value":{"max":3.5049999999999994,"min":3.5049999999999994,"sum":3.5049999999999994,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-088bf656e3ecfd30c"},"timestamp":1651679580000,"value":{"max":3.358333333333333,"min":3.358333333333333,"sum":3.358333333333333,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-0539f8ff77376e52e"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{"ImageId":"ami-0bce5265bc5705a19"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":7.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-0539f8ff77376e52e"},"timestamp":1651679400000,"value":{"max":0.7333333333333333,"min":0.4166666666666666,"sum":2.558342916826391,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-0de3c36dcb1210d28"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceType":"m5n.8xlarge"},"timestamp":1651679640000,"value":{"max":315.0,"min":0.0,"sum":371.0,"count":4.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{"InstanceId":"i-02ec62d3c402e9eef"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceType":"m5d.2xlarge"},"timestamp":1651679640000,"value":{"max":2.663333333333333,"min":2.663333333333333,"sum":2.663333333333333,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-0f4906924ef9b78ba"},"timestamp":1651679400000,"value":{"max":25485.0,"min":9155.0,"sum":74011.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-0ebe6ab3e379f48af"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2c.ami-0e74bce6c73b03b2e.20220503223301039000000002"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-0aa3731e95e64bb33"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteOps","dimensions":{"InstanceId":"i-023df7a827cbc765c"},"timestamp":1651679340000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-01a736af3b8a5bf4a"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-0de3c36dcb1210d28"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-0193dfbe27386aaa1"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-02ec62d3c402e9eef"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteOps","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091317245805000000eb"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":2.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadOps","dimensions":{"InstanceType":"m4.2xlarge"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":2.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-0ae21ab1d3dcd74b4"},"timestamp":1651679640000,"value":{"max":131072.0,"min":131072.0,"sum":131072.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceType":"c5.9xlarge"},"timestamp":1651679640000,"value":{"max":308.0,"min":0.0,"sum":308.0,"count":4.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceType":"m5d.2xlarge"},"timestamp":1651679640000,"value":{"max":1496064.0,"min":1496064.0,"sum":1496064.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot2022020913183532220000010c"},"timestamp":1651679640000,"value":{"max":27233.0,"min":169.0,"sum":27402.0,"count":2.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-0c2cfecbf98d32578"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-055c75f015f390582"},"timestamp":1651679580000,"value":{"max":6.035,"min":6.035,"sum":6.035,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-0cbbc7e021a19be2e"},"timestamp":1651679580000,"value":{"max":5.0,"min":5.0,"sum":5.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{"InstanceType":"m5d.2xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-large-spot202202091317061067000000e3"},"timestamp":1651679640000,"value":{"max":315.0,"min":0.0,"sum":371.0,"count":4.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-055c75f015f390582"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-0ebe6ab3e379f48af"},"timestamp":1651679400000,"value":{"max":92.0,"min":0.0,"sum":201.0,"count":15.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-0219c7b86248450b2"},"timestamp":1651679580000,"value":{"max":2.8087745E7,"min":2.8087745E7,"sum":2.8087745E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2b.ami-0e74bce6c73b03b2e.20220503222059149300000005"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceType":"m5a.2xlarge"},"timestamp":1651679580000,"value":{"max":7784805.0,"min":7784805.0,"sum":7784805.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-0de3c36dcb1210d28"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceType":"m5.4xlarge"},"timestamp":1651679640000,"value":{"max":7899935.0,"min":7899935.0,"sum":7899935.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"InstanceId":"i-0aa3731e95e64bb33"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":495.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-088bf656e3ecfd30c"},"timestamp":1651679640000,"value":{"max":5055203.0,"min":5055203.0,"sum":5055203.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-029e8b91893510672"},"timestamp":1651679400000,"value":{"max":13284.0,"min":12457.0,"sum":64556.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-compute-large-spot202202091317132830000000e6"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":2.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-055c75f015f390582"},"timestamp":1651679640000,"value":{"max":5.096501E7,"min":5.096501E7,"sum":5.096501E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-088bf656e3ecfd30c"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2a.ami-0e74bce6c73b03b2e.20220503222059148300000001"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":495.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-0f898c9b1c511eb41"},"timestamp":1651679640000,"value":{"max":2.549751808E9,"min":2.549751808E9,"sum":2.549751808E9,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-0c931a63f8b5ea898"},"timestamp":1651679400000,"value":{"max":631022.0,"min":320118.0,"sum":1956814.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceType":"m5n.2xlarge"},"timestamp":1651679640000,"value":{"max":8109835.0,"min":5234073.0,"sum":1.3343908E7,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-0bb3c70800f789c6b"},"timestamp":1651679400000,"value":{"max":18008.0,"min":15642.0,"sum":83688.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-0c2cfecbf98d32578"},"timestamp":1651679400000,"value":{"max":7441.0,"min":6527.0,"sum":35200.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"InstanceId":"i-029e8b91893510672"},"timestamp":1651679400000,"value":{"max":100.0,"min":99.0,"sum":496.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-compute-large-spot202202091317132830000000e6"},"timestamp":1651679640000,"value":{"max":4636640.0,"min":3935092.0,"sum":8571732.0,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-0539f8ff77376e52e"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-0ebe6ab3e379f48af"},"timestamp":1651679400000,"value":{"max":547328.0,"min":0.0,"sum":1346048.0,"count":15.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-large-spot202202091317061067000000e3"},"timestamp":1651679400000,"value":{"max":16874.0,"min":13835.0,"sum":76950.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{},"timestamp":1651679640000,"value":{"max":5254.0,"min":0.0,"sum":5262.0,"count":20.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-0de3c36dcb1210d28"},"timestamp":1651679640000,"value":{"max":169.0,"min":169.0,"sum":169.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-0c57bc60d35ef1836"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-0ca5801fc2002b099"},"timestamp":1651679400000,"value":{"max":1650.0,"min":1650.0,"sum":8250.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2a.ami-0e74bce6c73b03b2e.20220503222059148300000001"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceType":"m5a.2xlarge"},"timestamp":1651679640000,"value":{"max":2772480.0,"min":2772480.0,"sum":2772480.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-0efc7fdf09c123428"},"timestamp":1651679580000,"value":{"max":10.011666666666667,"min":10.011666666666667,"sum":10.011666666666667,"count":1.0},"unit":"Percent"}
" + }, + { + "data": "{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0de4941b77e817a13"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0a2e12faaee216587"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0dc2d89576991bda8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-05cb8d1e7f31eb843"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-06d0e7fd61dff234a"},"timestamp":1651679580000,"value":{"max":60.0,"min":60.0,"sum":60.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-062b0d51a311d325c"},"timestamp":1651679580000,"value":{"max":59.95,"min":59.95,"sum":59.95,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0225681d9409b6a5c"},"timestamp":1651679640000,"value":{"max":0.250789,"min":0.250789,"sum":0.250789,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0de4941b77e817a13"},"timestamp":1651679640000,"value":{"max":320.0,"min":320.0,"sum":320.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0e6cb3bb1fa5d0a4a"},"timestamp":1651679640000,"value":{"max":0.314007,"min":0.314007,"sum":0.314007,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0bd161379617c17c3"},"timestamp":1651679640000,"value":{"max":0.029599,"min":0.029599,"sum":0.029599,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0f39343b34261fce4"},"timestamp":1651679640000,"value":{"max":0.007667,"min":0.007667,"sum":0.007667,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-05cb8d1e7f31eb843"},"timestamp":1651679640000,"value":{"max":60.092946,"min":60.092946,"sum":60.092946,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-07e55b6e81929109a"},"timestamp":1651679640000,"value":{"max":371.0,"min":371.0,"sum":371.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0fa4d4ec5763824a0"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0031d2fae5d6173ca"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-09aadec74cd779f49"},"timestamp":1651679640000,"value":{"max":5.553364,"min":5.553364,"sum":5.553364,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0b0599dd4d14127a1"},"timestamp":1651679580000,"value":{"max":1.66666666666667E-4,"min":1.66666666666667E-4,"sum":1.66666666666667E-4,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0141691131493b6ce"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-081a322538b797ec0"},"timestamp":1651679640000,"value":{"max":0.126106,"min":0.126106,"sum":0.126106,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-054650c08387fbacc"},"timestamp":1651679640000,"value":{"max":59.959377,"min":59.959377,"sum":59.959377,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-087b4c0b83fbefd35"},"timestamp":1651679640000,"value":{"max":0.253456,"min":0.253456,"sum":0.253456,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0d4a61f5e443c76db"},"timestamp":1651679640000,"value":{"max":59.983183,"min":59.983183,"sum":59.983183,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-094878ac3331924e8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0b0599dd4d14127a1"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.01,"count":15.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-000928c7d42a37f9b"},"timestamp":1651679640000,"value":{"max":0.099924,"min":0.099924,"sum":0.099924,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-081a322538b797ec0"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0bd161379617c17c3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-081a322538b797ec0"},"timestamp":1651679640000,"value":{"max":0.001953125,"min":0.001953125,"sum":0.001953125,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0dc2d89576991bda8"},"timestamp":1651679640000,"value":{"max":60.133701,"min":60.133701,"sum":60.133701,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-03565f5cfb3a07bfa"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0d362378048f7106b"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-054650c08387fbacc"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0225681d9409b6a5c"},"timestamp":1651679640000,"value":{"max":0.001215,"min":0.001215,"sum":0.001215,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0c876911b1410147f"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-08a641cdbe7464fd8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0dc807c09faeb31d3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0de4941b77e817a13"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0de4941b77e817a13"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-0c127e7b050031266"},"timestamp":1651679580000,"value":{"max":100.0,"min":100.0,"sum":100.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-012bf6f8df1a7d3bd"},"timestamp":1651679640000,"value":{"max":99.0,"min":99.0,"sum":99.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0f39343b34261fce4"},"timestamp":1651679640000,"value":{"max":7.0,"min":7.0,"sum":7.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0151f3979c0ff109b"},"timestamp":1651679640000,"value":{"max":0.141003,"min":0.141003,"sum":0.141003,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-000928c7d42a37f9b"},"timestamp":1651679640000,"value":{"max":123.0,"min":123.0,"sum":123.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0c876911b1410147f"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0dc2d89576991bda8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0c364d1ce71aa41d9"},"timestamp":1651679640000,"value":{"max":60.021456,"min":60.021456,"sum":60.021456,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-012bf6f8df1a7d3bd"},"timestamp":1651679640000,"value":{"max":0.005859375,"min":0.005859375,"sum":0.005859375,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0a6a7b384680eb434"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0bd161379617c17c3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0fa4d4ec5763824a0"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0f84098239659e2d2"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0d28501e1527a5b46"},"timestamp":1651679640000,"value":{"max":60.171109,"min":60.171109,"sum":60.171109,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-07bb1f6e9465aa26d"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-0141691131493b6ce"},"timestamp":1651679580000,"value":{"max":100.0,"min":100.0,"sum":100.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0a6a7b384680eb434"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0151f3979c0ff109b"},"timestamp":1651679640000,"value":{"max":59.966875,"min":59.966875,"sum":59.966875,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-00281ceb8682b3b8e"},"timestamp":1651679640000,"value":{"max":59.91,"min":59.91,"sum":59.91,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-00281ceb8682b3b8e"},"timestamp":1651679580000,"value":{"max":59.86,"min":59.86,"sum":59.86,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0c127e7b050031266"},"timestamp":1651679580000,"value":{"max":0.003,"min":0.003,"sum":0.003,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-062b0d51a311d325c"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0031d2fae5d6173ca"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-012bf6f8df1a7d3bd"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0141691131493b6ce"},"timestamp":1651679580000,"value":{"max":59.87,"min":59.87,"sum":59.87,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-06d0e7fd61dff234a"},"timestamp":1651679580000,"value":{"max":1.66666666666667E-4,"min":1.66666666666667E-4,"sum":1.66666666666667E-4,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-060d7e5d0e4771a58"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-000928c7d42a37f9b"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0e6cb3bb1fa5d0a4a"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0f84098239659e2d2"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0031d2fae5d6173ca"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0f84098239659e2d2"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0141691131493b6ce"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":5971456.0,"count":203.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0fa4d4ec5763824a0"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-062b0d51a311d325c"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":167424.0,"count":33.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-087b4c0b83fbefd35"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-06d0e7fd61dff234a"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.01,"count":60.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-05a952be40be232ff"},"timestamp":1651679640000,"value":{"max":0.0029296875,"min":0.0029296875,"sum":0.0029296875,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0dc807c09faeb31d3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0e6cb3bb1fa5d0a4a"},"timestamp":1651679640000,"value":{"max":308.0,"min":308.0,"sum":308.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0d4a61f5e443c76db"},"timestamp":1651679640000,"value":{"max":9.765625E-4,"min":9.765625E-4,"sum":9.765625E-4,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0225681d9409b6a5c"},"timestamp":1651679640000,"value":{"max":0.00390625,"min":0.00390625,"sum":0.00390625,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-00281ceb8682b3b8e"},"timestamp":1651679640000,"value":{"max":100.0,"min":100.0,"sum":100.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-00281ceb8682b3b8e"},"timestamp":1651679580000,"value":{"max":100.0,"min":100.0,"sum":100.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-060d7e5d0e4771a58"},"timestamp":1651679640000,"value":{"max":100.0,"min":100.0,"sum":100.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0c876911b1410147f"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-091b03752c38ef352"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":152064.0,"count":21.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0e904018b2416f8e8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-000928c7d42a37f9b"},"timestamp":1651679640000,"value":{"max":59.991084,"min":59.991084,"sum":59.991084,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0dc2d89576991bda8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-08a641cdbe7464fd8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-012bf6f8df1a7d3bd"},"timestamp":1651679640000,"value":{"max":0.375822,"min":0.375822,"sum":0.375822,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0748c5fbb31150cc3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0d362378048f7106b"},"timestamp":1651679640000,"value":{"max":60.165428,"min":60.165428,"sum":60.165428,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-05a952be40be232ff"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0852deaa95789749e"},"timestamp":1651679640000,"value":{"max":59.945323,"min":59.945323,"sum":59.945323,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0a6e6c0e293fe7380"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-03565f5cfb3a07bfa"},"timestamp":1651679640000,"value":{"max":0.142198,"min":0.142198,"sum":0.142198,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-0a6a7b384680eb434"},"timestamp":1651679640000,"value":{"max":100.0,"min":100.0,"sum":100.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0c876911b1410147f"},"timestamp":1651679640000,"value":{"max":60.171108,"min":60.171108,"sum":60.171108,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0a6a7b384680eb434"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-091b03752c38ef352"},"timestamp":1651679580000,"value":{"max":100.0,"min":100.0,"sum":100.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-062b0d51a311d325c"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.022,"count":33.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-00990532c2573d0e2"},"timestamp":1651679640000,"value":{"max":100.0,"min":100.0,"sum":100.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0b0599dd4d14127a1"},"timestamp":1651679580000,"value":{"max":15.0,"min":15.0,"sum":15.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0a6a7b384680eb434"},"timestamp":1651679640000,"value":{"max":60.092946,"min":60.092946,"sum":60.092946,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-07bb1f6e9465aa26d"},"timestamp":1651679640000,"value":{"max":1514.0,"min":1514.0,"sum":1514.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-00281ceb8682b3b8e"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":588800.0,"count":107.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-00281ceb8682b3b8e"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":462336.0,"count":79.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-03565f5cfb3a07bfa"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0c364d1ce71aa41d9"},"timestamp":1651679640000,"value":{"max":137.0,"min":137.0,"sum":137.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0748c5fbb31150cc3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0141691131493b6ce"},"timestamp":1651679580000,"value":{"max":203.0,"min":203.0,"sum":203.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-05cb8d1e7f31eb843"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-012bf6f8df1a7d3bd"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0d362378048f7106b"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0d4a61f5e443c76db"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-05a952be40be232ff"},"timestamp":1651679640000,"value":{"max":0.219896,"min":0.219896,"sum":0.219896,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0de4941b77e817a13"},"timestamp":1651679640000,"value":{"max":0.00390625,"min":0.00390625,"sum":0.00390625,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-05a952be40be232ff"},"timestamp":1651679640000,"value":{"max":59.926143,"min":59.926143,"sum":59.926143,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-012bf6f8df1a7d3bd"},"timestamp":1651679640000,"value":{"max":59.834087,"min":59.834087,"sum":59.834087,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0151f3979c0ff109b"},"timestamp":1651679640000,"value":{"max":8433664.0,"min":8433664.0,"sum":8433664.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0bd161379617c17c3"},"timestamp":1651679640000,"value":{"max":403968.0,"min":403968.0,"sum":403968.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0c876911b1410147f"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-060d7e5d0e4771a58"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-054650c08387fbacc"},"timestamp":1651679640000,"value":{"max":0.161657,"min":0.161657,"sum":0.161657,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0dc2d89576991bda8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0a6a7b384680eb434"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0a6e6c0e293fe7380"},"timestamp":1651679640000,"value":{"max":17.0,"min":17.0,"sum":17.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-012bf6f8df1a7d3bd"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-09aadec74cd779f49"},"timestamp":1651679640000,"value":{"max":112.892409,"min":112.892409,"sum":112.892409,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0fa4d4ec5763824a0"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0031d2fae5d6173ca"},"timestamp":1651679640000,"value":{"max":315.0,"min":315.0,"sum":315.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-094878ac3331924e8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0a2e12faaee216587"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-03565f5cfb3a07bfa"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-0f84098239659e2d2"},"timestamp":1651679640000,"value":{"max":100.0,"min":100.0,"sum":100.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-087b4c0b83fbefd35"},"timestamp":1651679640000,"value":{"max":401.0,"min":401.0,"sum":401.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0748c5fbb31150cc3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0e904018b2416f8e8"},"timestamp":1651679640000,"value":{"max":0.0029296875,"min":0.0029296875,"sum":0.0029296875,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0f84098239659e2d2"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-060d7e5d0e4771a58"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-087b4c0b83fbefd35"},"timestamp":1651679640000,"value":{"max":0.00390625,"min":0.00390625,"sum":0.00390625,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-094878ac3331924e8"},"timestamp":1651679640000,"value":{"max":2053632.0,"min":2053632.0,"sum":2053632.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-00990532c2573d0e2"},"timestamp":1651679640000,"value":{"max":0.038939,"min":0.038939,"sum":0.038939,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-091b03752c38ef352"},"timestamp":1651679580000,"value":{"max":21.0,"min":21.0,"sum":21.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-03565f5cfb3a07bfa"},"timestamp":1651679640000,"value":{"max":169.0,"min":169.0,"sum":169.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0a6a7b384680eb434"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-087b4c0b83fbefd35"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0c127e7b050031266"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0bd161379617c17c3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-060d7e5d0e4771a58"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-069f29f510c67f9a8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0852deaa95789749e"},"timestamp":1651679640000,"value":{"max":273.0,"min":273.0,"sum":273.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-081a322538b797ec0"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0031d2fae5d6173ca"},"timestamp":1651679640000,"value":{"max":0.0048828125,"min":0.0048828125,"sum":0.0048828125,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0a6e6c0e293fe7380"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0a2e12faaee216587"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-069f29f510c67f9a8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-054650c08387fbacc"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0a2e12faaee216587"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-081a322538b797ec0"},"timestamp":1651679640000,"value":{"max":119.0,"min":119.0,"sum":119.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-069f29f510c67f9a8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0d362378048f7106b"},"timestamp":1651679640000,"value":{"max":17.0,"min":17.0,"sum":17.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0e6cb3bb1fa5d0a4a"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0dc807c09faeb31d3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-060d7e5d0e4771a58"},"timestamp":1651679640000,"value":{"max":60.133701,"min":60.133701,"sum":60.133701,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-08a641cdbe7464fd8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0141691131493b6ce"},"timestamp":1651679580000,"value":{"max":0.00583333333333333,"min":0.00583333333333333,"sum":0.00583333333333333,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-060d7e5d0e4771a58"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0031d2fae5d6173ca"},"timestamp":1651679640000,"value":{"max":0.333578,"min":0.333578,"sum":0.333578,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0d4a61f5e443c76db"},"timestamp":1651679640000,"value":{"max":67.0,"min":67.0,"sum":67.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0c876911b1410147f"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0de4941b77e817a13"},"timestamp":1651679640000,"value":{"max":0.283491,"min":0.283491,"sum":0.283491,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0d28501e1527a5b46"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0a6e6c0e293fe7380"},"timestamp":1651679640000,"value":{"max":0.011277,"min":0.011277,"sum":0.011277,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0a2e12faaee216587"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0748c5fbb31150cc3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0fa4d4ec5763824a0"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0225681d9409b6a5c"},"timestamp":1651679640000,"value":{"max":3825152.0,"min":3825152.0,"sum":3825152.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0d4a61f5e443c76db"},"timestamp":1651679640000,"value":{"max":0.088938,"min":0.088938,"sum":0.088938,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0f39343b34261fce4"},"timestamp":1651679640000,"value":{"max":0.0146484375,"min":0.0146484375,"sum":0.0146484375,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-0dc2d89576991bda8"},"timestamp":1651679640000,"value":{"max":100.0,"min":100.0,"sum":100.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-094878ac3331924e8"},"timestamp":1651679640000,"value":{"max":0.0048828125,"min":0.0048828125,"sum":0.0048828125,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-00990532c2573d0e2"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0151f3979c0ff109b"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-094878ac3331924e8"},"timestamp":1651679640000,"value":{"max":0.331596,"min":0.331596,"sum":0.331596,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-06d0e7fd61dff234a"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0d28501e1527a5b46"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0f84098239659e2d2"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-060d7e5d0e4771a58"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0d28501e1527a5b46"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0dc2d89576991bda8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0c127e7b050031266"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":2730496.0,"count":114.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0bd161379617c17c3"},"timestamp":1651679640000,"value":{"max":60.124818,"min":60.124818,"sum":60.124818,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0d362378048f7106b"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0fa4d4ec5763824a0"},"timestamp":1651679640000,"value":{"max":60.004415,"min":60.004415,"sum":60.004415,"count":1.0},"unit":"Seconds"}
" + }, + { + "data": "{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-07e55b6e81929109a"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0e6cb3bb1fa5d0a4a"},"timestamp":1651679640000,"value":{"max":0.0048828125,"min":0.0048828125,"sum":0.0048828125,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-08a641cdbe7464fd8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0c364d1ce71aa41d9"},"timestamp":1651679640000,"value":{"max":0.001953125,"min":0.001953125,"sum":0.001953125,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0c876911b1410147f"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0dc807c09faeb31d3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0b0599dd4d14127a1"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":103424.0,"count":15.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0c364d1ce71aa41d9"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0748c5fbb31150cc3"},"timestamp":1651679640000,"value":{"max":60.004415,"min":60.004415,"sum":60.004415,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-0a6e6c0e293fe7380"},"timestamp":1651679640000,"value":{"max":99.0,"min":99.0,"sum":99.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0852deaa95789749e"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-05a952be40be232ff"},"timestamp":1651679640000,"value":{"max":1838080.0,"min":1838080.0,"sum":1838080.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-000928c7d42a37f9b"},"timestamp":1651679640000,"value":{"max":1694208.0,"min":1694208.0,"sum":1694208.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-094878ac3331924e8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0151f3979c0ff109b"},"timestamp":1651679640000,"value":{"max":0.001953125,"min":0.001953125,"sum":0.001953125,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0151f3979c0ff109b"},"timestamp":1651679640000,"value":{"max":56.0,"min":56.0,"sum":56.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0e904018b2416f8e8"},"timestamp":1651679640000,"value":{"max":1843200.0,"min":1843200.0,"sum":1843200.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0f39343b34261fce4"},"timestamp":1651679640000,"value":{"max":59.874908,"min":59.874908,"sum":59.874908,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-07e55b6e81929109a"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0b0599dd4d14127a1"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0dc2d89576991bda8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-05cb8d1e7f31eb843"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-091b03752c38ef352"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-08a641cdbe7464fd8"},"timestamp":1651679640000,"value":{"max":60.003072,"min":60.003072,"sum":60.003072,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-0d28501e1527a5b46"},"timestamp":1651679640000,"value":{"max":100.0,"min":100.0,"sum":100.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0852deaa95789749e"},"timestamp":1651679640000,"value":{"max":0.269079,"min":0.269079,"sum":0.269079,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-09aadec74cd779f49"},"timestamp":1651679640000,"value":{"max":3.90386688E8,"min":3.90386688E8,"sum":3.90386688E8,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0a6e6c0e293fe7380"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-087b4c0b83fbefd35"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-081a322538b797ec0"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-00281ceb8682b3b8e"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-00281ceb8682b3b8e"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0e6cb3bb1fa5d0a4a"},"timestamp":1651679640000,"value":{"max":3905024.0,"min":3905024.0,"sum":3905024.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0a6e6c0e293fe7380"},"timestamp":1651679640000,"value":{"max":60.087097,"min":60.087097,"sum":60.087097,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0852deaa95789749e"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0dc807c09faeb31d3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0031d2fae5d6173ca"},"timestamp":1651679640000,"value":{"max":59.923161,"min":59.923161,"sum":59.923161,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0c127e7b050031266"},"timestamp":1651679580000,"value":{"max":114.0,"min":114.0,"sum":114.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-094878ac3331924e8"},"timestamp":1651679640000,"value":{"max":59.830304,"min":59.830304,"sum":59.830304,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0225681d9409b6a5c"},"timestamp":1651679640000,"value":{"max":131072.0,"min":131072.0,"sum":131072.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-069f29f510c67f9a8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-07bb1f6e9465aa26d"},"timestamp":1651679640000,"value":{"max":3.436863,"min":3.436863,"sum":3.436863,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-08a641cdbe7464fd8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-05a952be40be232ff"},"timestamp":1651679640000,"value":{"max":295.0,"min":295.0,"sum":295.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0c364d1ce71aa41d9"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-07bb1f6e9465aa26d"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0fa4d4ec5763824a0"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-07bb1f6e9465aa26d"},"timestamp":1651679640000,"value":{"max":0.056640625,"min":0.056640625,"sum":0.056640625,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-07e55b6e81929109a"},"timestamp":1651679640000,"value":{"max":59.944735,"min":59.944735,"sum":59.944735,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-00990532c2573d0e2"},"timestamp":1651679640000,"value":{"max":56.0,"min":56.0,"sum":56.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0f84098239659e2d2"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0748c5fbb31150cc3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0bd161379617c17c3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-08a641cdbe7464fd8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-087b4c0b83fbefd35"},"timestamp":1651679640000,"value":{"max":99.0,"min":99.0,"sum":99.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-07e55b6e81929109a"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-012bf6f8df1a7d3bd"},"timestamp":1651679640000,"value":{"max":516.0,"min":516.0,"sum":516.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0748c5fbb31150cc3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0fa4d4ec5763824a0"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-09aadec74cd779f49"},"timestamp":1651679640000,"value":{"max":5254.0,"min":5254.0,"sum":5254.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-069f29f510c67f9a8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-0a2e12faaee216587"},"timestamp":1651679640000,"value":{"max":100.0,"min":100.0,"sum":100.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0d4a61f5e443c76db"},"timestamp":1651679640000,"value":{"max":1496064.0,"min":1496064.0,"sum":1496064.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0dc807c09faeb31d3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-0c876911b1410147f"},"timestamp":1651679640000,"value":{"max":100.0,"min":100.0,"sum":100.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-07e55b6e81929109a"},"timestamp":1651679640000,"value":{"max":2150912.0,"min":2150912.0,"sum":2150912.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-05cb8d1e7f31eb843"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0b0599dd4d14127a1"},"timestamp":1651679580000,"value":{"max":59.99,"min":59.99,"sum":59.99,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-054650c08387fbacc"},"timestamp":1651679640000,"value":{"max":160.0,"min":160.0,"sum":160.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-069f29f510c67f9a8"},"timestamp":1651679640000,"value":{"max":60.004415,"min":60.004415,"sum":60.004415,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-00990532c2573d0e2"},"timestamp":1651679640000,"value":{"max":355840.0,"min":355840.0,"sum":355840.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-00281ceb8682b3b8e"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.165,"count":107.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-00281ceb8682b3b8e"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.105,"count":79.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-094878ac3331924e8"},"timestamp":1651679640000,"value":{"max":99.0,"min":99.0,"sum":99.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0852deaa95789749e"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0d4a61f5e443c76db"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0d28501e1527a5b46"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0c364d1ce71aa41d9"},"timestamp":1651679640000,"value":{"max":0.12813,"min":0.12813,"sum":0.12813,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-05cb8d1e7f31eb843"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-054650c08387fbacc"},"timestamp":1651679640000,"value":{"max":6937600.0,"min":6937600.0,"sum":6937600.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0e6cb3bb1fa5d0a4a"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-07bb1f6e9465aa26d"},"timestamp":1651679640000,"value":{"max":8.9841664E7,"min":8.9841664E7,"sum":8.9841664E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0e904018b2416f8e8"},"timestamp":1651679640000,"value":{"max":59.959999,"min":59.959999,"sum":59.959999,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-05cb8d1e7f31eb843"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-060d7e5d0e4771a58"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0d28501e1527a5b46"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-07e55b6e81929109a"},"timestamp":1651679640000,"value":{"max":99.0,"min":99.0,"sum":99.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-091b03752c38ef352"},"timestamp":1651679580000,"value":{"max":59.99,"min":59.99,"sum":59.99,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-05a952be40be232ff"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0748c5fbb31150cc3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0141691131493b6ce"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.35,"count":203.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0c364d1ce71aa41d9"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0f39343b34261fce4"},"timestamp":1651679640000,"value":{"max":458752.0,"min":458752.0,"sum":458752.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0a6e6c0e293fe7380"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-069f29f510c67f9a8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-054650c08387fbacc"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0a2e12faaee216587"},"timestamp":1651679640000,"value":{"max":60.096503,"min":60.096503,"sum":60.096503,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-062b0d51a311d325c"},"timestamp":1651679580000,"value":{"max":33.0,"min":33.0,"sum":33.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0a6e6c0e293fe7380"},"timestamp":1651679640000,"value":{"max":147968.0,"min":147968.0,"sum":147968.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-069f29f510c67f9a8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0151f3979c0ff109b"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-000928c7d42a37f9b"},"timestamp":1651679640000,"value":{"max":9.765625E-4,"min":9.765625E-4,"sum":9.765625E-4,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-0b0599dd4d14127a1"},"timestamp":1651679580000,"value":{"max":100.0,"min":100.0,"sum":100.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0c127e7b050031266"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.18,"count":114.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-00990532c2573d0e2"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-03565f5cfb3a07bfa"},"timestamp":1651679640000,"value":{"max":7565824.0,"min":7565824.0,"sum":7565824.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-054650c08387fbacc"},"timestamp":1651679640000,"value":{"max":0.001953125,"min":0.001953125,"sum":0.001953125,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0031d2fae5d6173ca"},"timestamp":1651679640000,"value":{"max":3294720.0,"min":3294720.0,"sum":3294720.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0151f3979c0ff109b"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0bd161379617c17c3"},"timestamp":1651679640000,"value":{"max":56.0,"min":56.0,"sum":56.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0dc2d89576991bda8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-07e55b6e81929109a"},"timestamp":1651679640000,"value":{"max":0.246245,"min":0.246245,"sum":0.246245,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0c876911b1410147f"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-081a322538b797ec0"},"timestamp":1651679640000,"value":{"max":2772480.0,"min":2772480.0,"sum":2772480.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-07e55b6e81929109a"},"timestamp":1651679640000,"value":{"max":0.00390625,"min":0.00390625,"sum":0.00390625,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-062b0d51a311d325c"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-091b03752c38ef352"},"timestamp":1651679580000,"value":{"max":1.66666666666667E-4,"min":1.66666666666667E-4,"sum":1.66666666666667E-4,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-0d362378048f7106b"},"timestamp":1651679640000,"value":{"max":99.0,"min":99.0,"sum":99.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-09aadec74cd779f49"},"timestamp":1651679640000,"value":{"max":85.299326,"min":85.299326,"sum":85.299326,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0c127e7b050031266"},"timestamp":1651679580000,"value":{"max":59.95,"min":59.95,"sum":59.95,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0a6a7b384680eb434"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-00281ceb8682b3b8e"},"timestamp":1651679640000,"value":{"max":107.0,"min":107.0,"sum":107.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-00281ceb8682b3b8e"},"timestamp":1651679580000,"value":{"max":79.0,"min":79.0,"sum":79.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0225681d9409b6a5c"},"timestamp":1651679640000,"value":{"max":286.0,"min":286.0,"sum":286.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-05a952be40be232ff"},"timestamp":1651679640000,"value":{"max":99.0,"min":99.0,"sum":99.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0dc807c09faeb31d3"},"timestamp":1651679640000,"value":{"max":60.003072,"min":60.003072,"sum":60.003072,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-05cb8d1e7f31eb843"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-00281ceb8682b3b8e"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-00281ceb8682b3b8e"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-07bb1f6e9465aa26d"},"timestamp":1651679640000,"value":{"max":59.391939,"min":59.391939,"sum":59.391939,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0e904018b2416f8e8"},"timestamp":1651679640000,"value":{"max":0.195864,"min":0.195864,"sum":0.195864,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0f39343b34261fce4"},"timestamp":1651679640000,"value":{"max":2.1287936E7,"min":2.1287936E7,"sum":2.1287936E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-00990532c2573d0e2"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0225681d9409b6a5c"},"timestamp":1651679640000,"value":{"max":59.945921,"min":59.945921,"sum":59.945921,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0e6cb3bb1fa5d0a4a"},"timestamp":1651679640000,"value":{"max":59.918918,"min":59.918918,"sum":59.918918,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0f39343b34261fce4"},"timestamp":1651679640000,"value":{"max":0.927894,"min":0.927894,"sum":0.927894,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-0e904018b2416f8e8"},"timestamp":1651679640000,"value":{"max":99.0,"min":99.0,"sum":99.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-00990532c2573d0e2"},"timestamp":1651679640000,"value":{"max":60.082502,"min":60.082502,"sum":60.082502,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0de4941b77e817a13"},"timestamp":1651679640000,"value":{"max":59.939991,"min":59.939991,"sum":59.939991,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-094878ac3331924e8"},"timestamp":1651679640000,"value":{"max":410.0,"min":410.0,"sum":410.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0a6a7b384680eb434"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0d362378048f7106b"},"timestamp":1651679640000,"value":{"max":123392.0,"min":123392.0,"sum":123392.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0d362378048f7106b"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-03565f5cfb3a07bfa"},"timestamp":1651679640000,"value":{"max":59.982212,"min":59.982212,"sum":59.982212,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-05a952be40be232ff"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-087b4c0b83fbefd35"},"timestamp":1651679640000,"value":{"max":1998336.0,"min":1998336.0,"sum":1998336.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-09aadec74cd779f49"},"timestamp":1651679640000,"value":{"max":3.3046875,"min":3.3046875,"sum":3.3046875,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0d28501e1527a5b46"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0852deaa95789749e"},"timestamp":1651679640000,"value":{"max":9198080.0,"min":9198080.0,"sum":9198080.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-0f84098239659e2d2"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-03565f5cfb3a07bfa"},"timestamp":1651679640000,"value":{"max":0.001953125,"min":0.001953125,"sum":0.001953125,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0d28501e1527a5b46"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-09aadec74cd779f49"},"timestamp":1651679640000,"value":{"max":2.549751808E9,"min":2.549751808E9,"sum":2.549751808E9,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-0f84098239659e2d2"},"timestamp":1651679640000,"value":{"max":60.096503,"min":60.096503,"sum":60.096503,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0e904018b2416f8e8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-000928c7d42a37f9b"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-091b03752c38ef352"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.01,"count":21.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0dc807c09faeb31d3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-062b0d51a311d325c"},"timestamp":1651679580000,"value":{"max":99.999962962963,"min":99.999962962963,"sum":99.999962962963,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-07bb1f6e9465aa26d"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-087b4c0b83fbefd35"},"timestamp":1651679640000,"value":{"max":59.9025,"min":59.9025,"sum":59.9025,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-0bd161379617c17c3"},"timestamp":1651679640000,"value":{"max":99.0,"min":99.0,"sum":99.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalReadTime","dimensions":{"VolumeId":"vol-0e904018b2416f8e8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeTotalWriteTime","dimensions":{"VolumeId":"vol-0d362378048f7106b"},"timestamp":1651679640000,"value":{"max":0.009348,"min":0.009348,"sum":0.009348,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0f39343b34261fce4"},"timestamp":1651679640000,"value":{"max":901.0,"min":901.0,"sum":901.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0d4a61f5e443c76db"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0de4941b77e817a13"},"timestamp":1651679640000,"value":{"max":3675136.0,"min":3675136.0,"sum":3675136.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0a2e12faaee216587"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-000928c7d42a37f9b"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0a2e12faaee216587"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-09aadec74cd779f49"},"timestamp":1651679640000,"value":{"max":27233.0,"min":27233.0,"sum":27233.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeQueueLength","dimensions":{"VolumeId":"vol-0852deaa95789749e"},"timestamp":1651679640000,"value":{"max":0.00390625,"min":0.00390625,"sum":0.00390625,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadBytes","dimensions":{"VolumeId":"vol-00990532c2573d0e2"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeReadOps","dimensions":{"VolumeId":"vol-0225681d9409b6a5c"},"timestamp":1651679640000,"value":{"max":1.0,"min":1.0,"sum":1.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-08a641cdbe7464fd8"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-0c364d1ce71aa41d9"},"timestamp":1651679640000,"value":{"max":7049728.0,"min":7049728.0,"sum":7049728.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteOps","dimensions":{"VolumeId":"vol-0e904018b2416f8e8"},"timestamp":1651679640000,"value":{"max":294.0,"min":294.0,"sum":294.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"BurstBalance","dimensions":{"VolumeId":"vol-05cb8d1e7f31eb843"},"timestamp":1651679640000,"value":{"max":100.0,"min":100.0,"sum":100.0,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeIdleTime","dimensions":{"VolumeId":"vol-081a322538b797ec0"},"timestamp":1651679640000,"value":{"max":59.995993,"min":59.995993,"sum":59.995993,"count":1.0},"unit":"Seconds"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EBS","metric_name":"VolumeWriteBytes","dimensions":{"VolumeId":"vol-012bf6f8df1a7d3bd"},"timestamp":1651679640000,"value":{"max":2546688.0,"min":2546688.0,"sum":2546688.0,"count":1.0},"unit":"Bytes"}
" + }, + { + "data": "{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-0c931a63f8b5ea898"},"timestamp":1651679400000,"value":{"max":2225664.0,"min":2053632.0,"sum":1.0626048E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-0e75f3c45bdc29890"},"timestamp":1651679400000,"value":{"max":8191.0,"min":6980.0,"sum":37261.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-0ebe6ab3e379f48af"},"timestamp":1651679400000,"value":{"max":0.4333333333333333,"min":0.3916666666666666,"sum":2.083333333333333,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-0e75f3c45bdc29890"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-0c57bc60d35ef1836"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-0ca5801fc2002b099"},"timestamp":1651679400000,"value":{"max":542720.0,"min":0.0,"sum":1795072.0,"count":15.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-0f898c9b1c511eb41"},"timestamp":1651679640000,"value":{"max":27233.0,"min":27233.0,"sum":27233.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-0aa3731e95e64bb33"},"timestamp":1651679640000,"value":{"max":1.0971367E7,"min":1.0971367E7,"sum":1.0971367E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-0ebe6ab3e379f48af"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":15.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{"InstanceId":"i-0632008fd1891f6ba"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-large-spot202202091317061067000000e3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceType":"m5ad.4xlarge"},"timestamp":1651679640000,"value":{"max":320.0,"min":320.0,"sum":320.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-0f898c9b1c511eb41"},"timestamp":1651679640000,"value":{"max":4409059.0,"min":4409059.0,"sum":4409059.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-0c2cfecbf98d32578"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-large-spot202202091317061067000000e3"},"timestamp":1651679640000,"value":{"max":3502829.0,"min":3502829.0,"sum":3502829.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUSurplusCreditBalance","dimensions":{"InstanceId":"i-0a2a9750f4427abe7"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"ImageId":"ami-0bce5265bc5705a19"},"timestamp":1651679640000,"value":{"max":6.0,"min":0.0,"sum":6.0,"count":14.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot20220209131829117500000109"},"timestamp":1651679640000,"value":{"max":6.963333333333334,"min":6.963333333333334,"sum":6.963333333333334,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceType":"m5.2xlarge"},"timestamp":1651679640000,"value":{"max":27233.0,"min":273.0,"sum":27506.0,"count":2.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-01a736af3b8a5bf4a"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-0ae21ab1d3dcd74b4"},"timestamp":1651679640000,"value":{"max":5.421666666666667,"min":5.421666666666667,"sum":5.421666666666667,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-0aa3731e95e64bb33"},"timestamp":1651679640000,"value":{"max":67.0,"min":67.0,"sum":67.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceType":"t2.micro"},"timestamp":1651679580000,"value":{"max":92504.0,"min":92504.0,"sum":92504.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"InstanceId":"i-0efc7fdf09c123428"},"timestamp":1651679400000,"value":{"max":100.0,"min":100.0,"sum":500.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-0efc7fdf09c123428"},"timestamp":1651679640000,"value":{"max":273.0,"min":273.0,"sum":273.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-0e75f3c45bdc29890"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-0ae21ab1d3dcd74b4"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteOps","dimensions":{"InstanceType":"m5ad.2xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":3.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceType":"m5ad.4xlarge"},"timestamp":1651679640000,"value":{"max":3675136.0,"min":3675136.0,"sum":3675136.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"ImageId":"ami-0bce5265bc5705a19"},"timestamp":1651679640000,"value":{"max":27233.0,"min":0.0,"sum":31981.0,"count":20.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"InstanceId":"i-0c2cfecbf98d32578"},"timestamp":1651679400000,"value":{"max":100.0,"min":100.0,"sum":500.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-0c931a63f8b5ea898"},"timestamp":1651679400000,"value":{"max":2224.0,"min":1513.0,"sum":8389.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-0539f8ff77376e52e"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":15.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceType":"m5d.2xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-0efc7fdf09c123428"},"timestamp":1651679640000,"value":{"max":1.0640164E7,"min":1.0640164E7,"sum":1.0640164E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-0ae21ab1d3dcd74b4"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-compute-large-spot202202091317132830000000e6"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUCreditBalance","dimensions":{"InstanceId":"i-0f7835a54ea38473a"},"timestamp":1651679400000,"value":{"max":151.725482,"min":151.725482,"sum":151.725482,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceType":"m5ad.4xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-0ca5801fc2002b099"},"timestamp":1651679400000,"value":{"max":80.0,"min":0.0,"sum":270.0,"count":15.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-0efc7fdf09c123428"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2b.ami-0e74bce6c73b03b2e.20220503223301038800000001"},"timestamp":1651679400000,"value":{"max":865640.0,"min":691932.0,"sum":3680170.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-0efc7fdf09c123428"},"timestamp":1651679640000,"value":{"max":2809264.0,"min":2809264.0,"sum":2809264.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2a.ami-0e74bce6c73b03b2e.20220503223301039400000005"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot2022020913183532220000010c"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":693.0,"count":7.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2b.ami-0e74bce6c73b03b2e.20220503222059149300000005"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-07dbb4dfb68b056a6"},"timestamp":1651679400000,"value":{"max":2567309.0,"min":1980599.0,"sum":1.074249E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{"InstanceId":"i-055c75f015f390582"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2c.ami-0e74bce6c73b03b2e.20220503223301039000000002"},"timestamp":1651679400000,"value":{"max":0.4383333333333333,"min":0.3933333333333333,"sum":2.0833333333333335,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceType":"c5.9xlarge"},"timestamp":1651679640000,"value":{"max":4605221.0,"min":4605221.0,"sum":4605221.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091317245805000000eb"},"timestamp":1651679400000,"value":{"max":100.0,"min":99.0,"sum":991.0,"count":10.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-0193dfbe27386aaa1"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2c.ami-0e74bce6c73b03b2e.20220503222059148400000002"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-0bb3c70800f789c6b"},"timestamp":1651679640000,"value":{"max":4605221.0,"min":4605221.0,"sum":4605221.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-01a736af3b8a5bf4a"},"timestamp":1651679400000,"value":{"max":619520.0,"min":0.0,"sum":1953792.0,"count":15.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadOps","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot20220209131829117500000109"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-0193dfbe27386aaa1"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2b.ami-0e74bce6c73b03b2e.20220503223301038800000001"},"timestamp":1651679400000,"value":{"max":648054.0,"min":297531.0,"sum":1889831.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-07dbb4dfb68b056a6"},"timestamp":1651679400000,"value":{"max":10993.0,"min":9393.0,"sum":50023.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceType":"m5n.8xlarge"},"timestamp":1651679640000,"value":{"max":3502829.0,"min":3502829.0,"sum":3502829.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-01a736af3b8a5bf4a"},"timestamp":1651679400000,"value":{"max":6471.0,"min":3735.0,"sum":24061.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceType":"c5n.9xlarge"},"timestamp":1651679640000,"value":{"max":9421725.0,"min":9421725.0,"sum":9421725.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-0bb3c70800f789c6b"},"timestamp":1651679400000,"value":{"max":15830.0,"min":13116.0,"sum":71896.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-088bf656e3ecfd30c"},"timestamp":1651679400000,"value":{"max":16874.0,"min":13835.0,"sum":76950.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUCreditBalance","dimensions":{"InstanceId":"i-0cbbc7e021a19be2e"},"timestamp":1651679400000,"value":{"max":144.0,"min":144.0,"sum":144.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2b.ami-0e74bce6c73b03b2e.20220503222059149300000005"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-01a736af3b8a5bf4a"},"timestamp":1651679400000,"value":{"max":20.0,"min":18.0,"sum":94.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-0f4906924ef9b78ba"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-compute-large-spot202202091317132830000000e6"},"timestamp":1651679640000,"value":{"max":4.670077834630577,"min":3.373333333333333,"sum":8.04341116796391,"count":2.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-0632008fd1891f6ba"},"timestamp":1651679580000,"value":{"max":50361.0,"min":50361.0,"sum":50361.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091316166173000000cc"},"timestamp":1651679640000,"value":{"max":5.096501E7,"min":2809264.0,"sum":1.05778184E8,"count":6.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceType":"m5ad.4xlarge"},"timestamp":1651679640000,"value":{"max":6.963333333333334,"min":6.963333333333334,"sum":6.963333333333334,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-01a736af3b8a5bf4a"},"timestamp":1651679400000,"value":{"max":42.0,"min":30.0,"sum":177.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091317245805000000eb"},"timestamp":1651679640000,"value":{"max":1.6496908E7,"min":5753273.0,"sum":2.9848319E7,"count":3.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-0ca5801fc2002b099"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":15.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2c.ami-0e74bce6c73b03b2e.20220503222059148400000002"},"timestamp":1651679400000,"value":{"max":1.135,"min":1.0616489725171248,"sum":5.475000722824074,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2b.ami-0e74bce6c73b03b2e.20220503222059149300000005"},"timestamp":1651679400000,"value":{"max":2150912.0,"min":1775104.0,"sum":9355776.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2c.ami-0e74bce6c73b03b2e.20220503222059148400000002"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{"InstanceType":"m4.2xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"InstanceId":"i-013e95c2db558957b"},"timestamp":1651679400000,"value":{"max":100.0,"min":100.0,"sum":500.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-0539f8ff77376e52e"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{"InstanceId":"i-0e75f3c45bdc29890"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-0c931a63f8b5ea898"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-0ae21ab1d3dcd74b4"},"timestamp":1651679640000,"value":{"max":3825152.0,"min":3825152.0,"sum":3825152.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-compute-large-spot202202091317132830000000e6"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":2.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-01a736af3b8a5bf4a"},"timestamp":1651679400000,"value":{"max":1834.0,"min":1650.0,"sum":8618.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceType":"c5n.9xlarge"},"timestamp":1651679640000,"value":{"max":3.373333333333333,"min":3.373333333333333,"sum":3.373333333333333,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot20220209131829117500000109"},"timestamp":1651679640000,"value":{"max":3210299.0,"min":3210299.0,"sum":3210299.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-055c75f015f390582"},"timestamp":1651679400000,"value":{"max":29807.0,"min":27487.0,"sum":141477.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-023df7a827cbc765c"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"ImageId":"ami-0bce5265bc5705a19"},"timestamp":1651679640000,"value":{"max":2.549751808E9,"min":0.0,"sum":2.720729088E9,"count":20.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot202202091315515599000000c0"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"InstanceId":"i-0c931a63f8b5ea898"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":495.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-0de3c36dcb1210d28"},"timestamp":1651679640000,"value":{"max":5.285,"min":5.285,"sum":5.285,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-0c57bc60d35ef1836"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUCreditBalance","dimensions":{"InstanceId":"i-023df7a827cbc765c"},"timestamp":1651679400000,"value":{"max":144.0,"min":144.0,"sum":144.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot20220209131829117500000109"},"timestamp":1651679400000,"value":{"max":8191.0,"min":6980.0,"sum":37261.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2b.ami-0e74bce6c73b03b2e.20220503222059149300000005"},"timestamp":1651679400000,"value":{"max":1.176647055882402,"min":1.0516491391810137,"sum":5.518332973454166,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot2022020913183532220000010c"},"timestamp":1651679400000,"value":{"max":100.0,"min":99.0,"sum":699.0,"count":7.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-0e75f3c45bdc29890"},"timestamp":1651679640000,"value":{"max":3109652.0,"min":3109652.0,"sum":3109652.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceType":"m5.4xlarge"},"timestamp":1651679640000,"value":{"max":7728072.0,"min":7728072.0,"sum":7728072.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadOps","dimensions":{"InstanceType":"m5ad.2xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":3.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091317245805000000eb"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":990.0,"count":10.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceType":"m5n.2xlarge"},"timestamp":1651679640000,"value":{"max":3.597669E7,"min":3.0637316E7,"sum":6.6614006E7,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceType":"m5n.2xlarge"},"timestamp":1651679640000,"value":{"max":169.0,"min":137.0,"sum":306.0,"count":2.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-0219c7b86248450b2"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-088bf656e3ecfd30c"},"timestamp":1651679640000,"value":{"max":8433664.0,"min":0.0,"sum":1.1728384E7,"count":4.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUSurplusCreditsCharged","dimensions":{"InstanceId":"i-0632008fd1891f6ba"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceType":"c5n.9xlarge"},"timestamp":1651679640000,"value":{"max":8.9841664E7,"min":8.9841664E7,"sum":8.9841664E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2a.ami-0e74bce6c73b03b2e.20220503222059148300000001"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-0e75f3c45bdc29890"},"timestamp":1651679640000,"value":{"max":6.963333333333334,"min":6.963333333333334,"sum":6.963333333333334,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2b.ami-0e74bce6c73b03b2e.20220503223301038800000001"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot202202091315515599000000c0"},"timestamp":1651679400000,"value":{"max":54618.0,"min":53618.0,"sum":270052.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot20220209131829117500000109"},"timestamp":1651679640000,"value":{"max":3109652.0,"min":3109652.0,"sum":3109652.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-02b5d491fdfc6c1d1"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-0219c7b86248450b2"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-0ee25dd52bcde7729"},"timestamp":1651679640000,"value":{"max":2.1287936E7,"min":2.1287936E7,"sum":2.1287936E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-0e75f3c45bdc29890"},"timestamp":1651679640000,"value":{"max":320.0,"min":320.0,"sum":320.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot2022020913183532220000010c"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2a.ami-0e74bce6c73b03b2e.20220503222059148300000001"},"timestamp":1651679400000,"value":{"max":3077288.0,"min":1945655.0,"sum":1.1540728E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{"InstanceId":"i-0219c7b86248450b2"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadOps","dimensions":{"InstanceId":"i-055c75f015f390582"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-0ca5801fc2002b099"},"timestamp":1651679400000,"value":{"max":18.0,"min":18.0,"sum":90.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2a.ami-0e74bce6c73b03b2e.20220503223301039400000005"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-07388eeabc8b76323"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceType":"m5.4xlarge"},"timestamp":1651679640000,"value":{"max":131072.0,"min":131072.0,"sum":131072.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-0de3c36dcb1210d28"},"timestamp":1651679400000,"value":{"max":26556.0,"min":14040.0,"sum":85302.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-0ae21ab1d3dcd74b4"},"timestamp":1651679640000,"value":{"max":1.0,"min":1.0,"sum":1.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"InstanceId":"i-0de3c36dcb1210d28"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":495.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-0f4906924ef9b78ba"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2c.ami-0e74bce6c73b03b2e.20220503222059148400000002"},"timestamp":1651679400000,"value":{"max":3534584.0,"min":2665334.0,"sum":1.5898017E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{"InstanceType":"m5d.2xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-0f7835a54ea38473a"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-0c2cfecbf98d32578"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceType":"c5.9xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":4.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-088bf656e3ecfd30c"},"timestamp":1651679640000,"value":{"max":315.0,"min":0.0,"sum":371.0,"count":4.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-023df7a827cbc765c"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-055c75f015f390582"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceType":"m5ad.2xlarge"},"timestamp":1651679640000,"value":{"max":6.533333333333332,"min":5.071666666666666,"sum":17.613333333333333,"count":3.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-0c57bc60d35ef1836"},"timestamp":1651679400000,"value":{"max":2546688.0,"min":1961472.0,"sum":1.099264E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceType":"c5n.9xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2c.ami-0e74bce6c73b03b2e.20220503223301039000000002"},"timestamp":1651679400000,"value":{"max":2224.0,"min":1513.0,"sum":8389.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{"InstanceId":"i-029e8b91893510672"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-07388eeabc8b76323"},"timestamp":1651679640000,"value":{"max":5234073.0,"min":5234073.0,"sum":5234073.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2a.ami-0e74bce6c73b03b2e.20220503223301039400000005"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2c.ami-0e74bce6c73b03b2e.20220503223301039000000002"},"timestamp":1651679400000,"value":{"max":2225664.0,"min":2053632.0,"sum":1.0626048E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091317245805000000eb"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-0aa3731e95e64bb33"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-0efc7fdf09c123428"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUSurplusCreditBalance","dimensions":{"InstanceId":"i-0f7835a54ea38473a"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-029e8b91893510672"},"timestamp":1651679400000,"value":{"max":6054.0,"min":4862.0,"sum":27826.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-0e75f3c45bdc29890"},"timestamp":1651679640000,"value":{"max":3210299.0,"min":3210299.0,"sum":3210299.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceType":"m5.2xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":2.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{"InstanceType":"t2.micro"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2c.ami-0e74bce6c73b03b2e.20220503223301039000000002"},"timestamp":1651679400000,"value":{"max":858751.0,"min":695103.0,"sum":3675190.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2c.ami-0e74bce6c73b03b2e.20220503223301039000000002"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-01a736af3b8a5bf4a"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-0f7835a54ea38473a"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-compute-large-spot202202091317132830000000e6"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":2.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-0f4906924ef9b78ba"},"timestamp":1651679640000,"value":{"max":1514.0,"min":1514.0,"sum":1514.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091316166173000000cc"},"timestamp":1651679640000,"value":{"max":16.563333333333333,"min":2.663333333333333,"sum":44.87749999999992,"count":6.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-large-spot202202091317061067000000e3"},"timestamp":1651679640000,"value":{"max":3.5049999999999994,"min":3.5049999999999994,"sum":3.5049999999999994,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"InstanceId":"i-0c57bc60d35ef1836"},"timestamp":1651679400000,"value":{"max":100.0,"min":100.0,"sum":500.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2a.ami-0e74bce6c73b03b2e.20220503223301039400000005"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{"InstanceType":"m5ad.2xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":3.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"ImageId":"ami-0bce5265bc5705a19"},"timestamp":1651679640000,"value":{"max":5254.0,"min":0.0,"sum":5262.0,"count":20.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceType":"m5.4xlarge"},"timestamp":1651679640000,"value":{"max":1.0,"min":1.0,"sum":1.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2b.ami-0e74bce6c73b03b2e.20220503223301038800000001"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-0632008fd1891f6ba"},"timestamp":1651679580000,"value":{"max":2.06896551724263,"min":2.06896551724263,"sum":2.06896551724263,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceType":"m5n.8xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":4.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2b.ami-0e74bce6c73b03b2e.20220503223301038800000001"},"timestamp":1651679400000,"value":{"max":1996.0,"min":1085.0,"sum":6500.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-0193dfbe27386aaa1"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2c.ami-0e74bce6c73b03b2e.20220503223301039000000002"},"timestamp":1651679400000,"value":{"max":100.0,"min":100.0,"sum":500.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot2022020913183532220000010c"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091316166173000000cc"},"timestamp":1651679400000,"value":{"max":100.0,"min":99.0,"sum":2487.0,"count":25.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"InstanceId":"i-0ebe6ab3e379f48af"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":495.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-0c2cfecbf98d32578"},"timestamp":1651679400000,"value":{"max":612.0,"min":265.0,"sum":1988.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-02b5d491fdfc6c1d1"},"timestamp":1651679640000,"value":{"max":5.608333333333333,"min":5.608333333333333,"sum":5.608333333333333,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteOps","dimensions":{"InstanceId":"i-0aa3731e95e64bb33"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-013e95c2db558957b"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadOps","dimensions":{"ImageId":"ami-0940babbb54d69874"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2b.ami-0e74bce6c73b03b2e.20220503222059149300000005"},"timestamp":1651679400000,"value":{"max":10993.0,"min":9393.0,"sum":50023.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceType":"m5ad.4xlarge"},"timestamp":1651679640000,"value":{"max":3109652.0,"min":3109652.0,"sum":3109652.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"AutoScalingGroupName":"dse-ops-dev-autoscaling-host"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-01a736af3b8a5bf4a"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceType":"m5.2xlarge"},"timestamp":1651679640000,"value":{"max":3.90386688E8,"min":0.0,"sum":3.90386688E8,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-large-spot202202091317061067000000e3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-compute-large-spot202202091317132830000000e6"},"timestamp":1651679640000,"value":{"max":1514.0,"min":0.0,"sum":1822.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot202202091315515599000000c0"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"InstanceId":"i-01a736af3b8a5bf4a"},"timestamp":1651679400000,"value":{"max":100.0,"min":99.0,"sum":496.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-0ebe6ab3e379f48af"},"timestamp":1651679400000,"value":{"max":4967.0,"min":2058.0,"sum":15779.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceType":"m5a.2xlarge"},"timestamp":1651679640000,"value":{"max":1.9861962E7,"min":1.9861962E7,"sum":1.9861962E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"AutoScalingGroupName":"dse-ops-dev-autoscaling-host"},"timestamp":1651679580000,"value":{"max":2.06896551724263,"min":2.06896551724263,"sum":2.06896551724263,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteOps","dimensions":{"InstanceId":"i-0219c7b86248450b2"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-029e8b91893510672"},"timestamp":1651679640000,"value":{"max":1694208.0,"min":1694208.0,"sum":1694208.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteOps","dimensions":{"InstanceType":"m5ad.4xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-0193dfbe27386aaa1"},"timestamp":1651679400000,"value":{"max":495.0,"min":401.0,"sum":2153.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-07388eeabc8b76323"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteOps","dimensions":{"InstanceType":"t2.micro"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-0aa3731e95e64bb33"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-02b5d491fdfc6c1d1"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-0ae21ab1d3dcd74b4"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{"InstanceId":"i-029e8b91893510672"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2c.ami-0e74bce6c73b03b2e.20220503222059148400000002"},"timestamp":1651679400000,"value":{"max":9456.0,"min":8298.0,"sum":45069.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{"InstanceType":"m5ad.4xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-07dbb4dfb68b056a6"},"timestamp":1651679400000,"value":{"max":371.0,"min":267.0,"sum":1491.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091316166173000000cc"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-0aa3731e95e64bb33"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-0aa3731e95e64bb33"},"timestamp":1651679400000,"value":{"max":25270.0,"min":12683.0,"sum":77222.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-0c2cfecbf98d32578"},"timestamp":1651679400000,"value":{"max":9456.0,"min":8298.0,"sum":45069.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-large-spot202202091317061067000000e3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteOps","dimensions":{"InstanceId":"i-0632008fd1891f6ba"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-07dbb4dfb68b056a6"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-013e95c2db558957b"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-0bb3c70800f789c6b"},"timestamp":1651679640000,"value":{"max":4.670077834630577,"min":4.670077834630577,"sum":4.670077834630577,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadOps","dimensions":{"InstanceType":"m5ad.4xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUSurplusCreditBalance","dimensions":{"InstanceId":"i-023df7a827cbc765c"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-02b5d491fdfc6c1d1"},"timestamp":1651679640000,"value":{"max":2772480.0,"min":2772480.0,"sum":2772480.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteOps","dimensions":{"InstanceId":"i-029e8b91893510672"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-0efc7fdf09c123428"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2a.ami-0e74bce6c73b03b2e.20220503223301039400000005"},"timestamp":1651679400000,"value":{"max":100.0,"min":100.0,"sum":500.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadOps","dimensions":{"InstanceId":"i-0aa3731e95e64bb33"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadOps","dimensions":{"InstanceId":"i-0e75f3c45bdc29890"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2c.ami-0e74bce6c73b03b2e.20220503223301039000000002"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":495.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091317245805000000eb"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceType":"m5.4xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-0193dfbe27386aaa1"},"timestamp":1651679400000,"value":{"max":1922.0,"min":1120.0,"sum":6514.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-02b5d491fdfc6c1d1"},"timestamp":1651679400000,"value":{"max":15335.0,"min":14536.0,"sum":74846.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUCreditBalance","dimensions":{"AutoScalingGroupName":"dse-ops-dev-autoscaling-host"},"timestamp":1651679400000,"value":{"max":144.0,"min":144.0,"sum":144.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091317245805000000eb"},"timestamp":1651679640000,"value":{"max":2.7679346E7,"min":1.3649315E7,"sum":6.1190623E7,"count":3.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-large-spot202202091317061067000000e3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":4.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{},"timestamp":1651679640000,"value":{"max":6.0,"min":0.0,"sum":6.0,"count":14.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceType":"c5.9xlarge"},"timestamp":1651679640000,"value":{"max":3905024.0,"min":0.0,"sum":3905024.0,"count":4.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-07dbb4dfb68b056a6"},"timestamp":1651679400000,"value":{"max":4275743.0,"min":3327748.0,"sum":1.8540927E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadOps","dimensions":{"AutoScalingGroupName":"dse-ops-dev-autoscaling-host"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{"ImageId":"ami-0bce5265bc5705a19"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":7.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-055c75f015f390582"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-0aa3731e95e64bb33"},"timestamp":1651679400000,"value":{"max":10874.0,"min":8145.0,"sum":46380.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUSurplusCreditsCharged","dimensions":{"InstanceId":"i-0cbbc7e021a19be2e"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadOps","dimensions":{},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":8.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceType":"c5.9xlarge"},"timestamp":1651679640000,"value":{"max":4636640.0,"min":4636640.0,"sum":4636640.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-07dbb4dfb68b056a6"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"ImageId":"ami-0bce5265bc5705a19"},"timestamp":1651679640000,"value":{"max":8.77130197E8,"min":3109652.0,"sum":1.141021602E9,"count":16.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091317245805000000eb"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":3.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteOps","dimensions":{"ImageId":"ami-0bce5265bc5705a19"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":7.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-02ec62d3c402e9eef"},"timestamp":1651679640000,"value":{"max":8.49576271185947,"min":8.49576271185947,"sum":8.49576271185947,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2b.ami-0e74bce6c73b03b2e.20220503222059149300000005"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-compute-large-spot202202091317132830000000e6"},"timestamp":1651679640000,"value":{"max":9421725.0,"min":4605221.0,"sum":1.4026946E7,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceType":"m5ad.2xlarge"},"timestamp":1651679640000,"value":{"max":458752.0,"min":0.0,"sum":458752.0,"count":3.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2c.ami-0e74bce6c73b03b2e.20220503222059148400000002"},"timestamp":1651679400000,"value":{"max":100.0,"min":100.0,"sum":500.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2b.ami-0e74bce6c73b03b2e.20220503223301038800000001"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUCreditUsage","dimensions":{"InstanceId":"i-023df7a827cbc765c"},"timestamp":1651679400000,"value":{"max":0.087678,"min":0.087678,"sum":0.087678,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-0e75f3c45bdc29890"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadOps","dimensions":{"InstanceType":"t2.micro"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-0c931a63f8b5ea898"},"timestamp":1651679400000,"value":{"max":437.0,"min":410.0,"sum":2106.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2c.ami-0e74bce6c73b03b2e.20220503222059148400000002"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{"InstanceId":"i-0aa3731e95e64bb33"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-0bb3c70800f789c6b"},"timestamp":1651679640000,"value":{"max":308.0,"min":0.0,"sum":308.0,"count":4.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot2022020913183532220000010c"},"timestamp":1651679400000,"value":{"max":253904.0,"min":25416.0,"sum":403037.0,"count":6.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-07388eeabc8b76323"},"timestamp":1651679640000,"value":{"max":3.0637316E7,"min":3.0637316E7,"sum":3.0637316E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-0f4906924ef9b78ba"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUCreditUsage","dimensions":{"InstanceId":"i-0632008fd1891f6ba"},"timestamp":1651679400000,"value":{"max":0.107583,"min":0.107583,"sum":0.107583,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceType":"m5n.8xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":4.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-0c57bc60d35ef1836"},"timestamp":1651679400000,"value":{"max":0.4366666666666666,"min":0.3683333333333333,"sum":1.9399999999999997,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-0f4906924ef9b78ba"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-07388eeabc8b76323"},"timestamp":1651679640000,"value":{"max":137.0,"min":137.0,"sum":137.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadBytes","dimensions":{"InstanceId":"i-0aa3731e95e64bb33"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-0632008fd1891f6ba"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUSurplusCreditsCharged","dimensions":{"AutoScalingGroupName":"dse-ops-dev-autoscaling-host"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceType":"t2.micro"},"timestamp":1651679580000,"value":{"max":50361.0,"min":50361.0,"sum":50361.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot20220209131829117500000109"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091317245805000000eb"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot2022020913183532220000010c"},"timestamp":1651679640000,"value":{"max":3.90386688E8,"min":0.0,"sum":3.90386688E8,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-0e75f3c45bdc29890"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUSurplusCreditBalance","dimensions":{"AutoScalingGroupName":"dse-ops-dev-autoscaling-host"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-0bb3c70800f789c6b"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-large-spot202202091317061067000000e3"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":4.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-0de3c36dcb1210d28"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadOps","dimensions":{"InstanceId":"i-0632008fd1891f6ba"},"timestamp":1651679580000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceType":"m4.2xlarge"},"timestamp":1651679640000,"value":{"max":2.6717217E7,"min":1.6496908E7,"sum":4.3214125E7,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUCreditBalance","dimensions":{"InstanceId":"i-0a2a9750f4427abe7"},"timestamp":1651679400000,"value":{"max":144.0,"min":144.0,"sum":144.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-compute-large-spot202202091317132830000000e6"},"timestamp":1651679400000,"value":{"max":25485.0,"min":9155.0,"sum":145907.0,"count":10.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot202202091315515599000000c0"},"timestamp":1651679640000,"value":{"max":7899935.0,"min":7899935.0,"sum":7899935.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-0ebe6ab3e379f48af"},"timestamp":1651679400000,"value":{"max":17.0,"min":6.0,"sum":53.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceType":"c5.9xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":4.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-0bb3c70800f789c6b"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-07dbb4dfb68b056a6"},"timestamp":1651679400000,"value":{"max":1.176647055882402,"min":1.0516491391810137,"sum":5.518332973454166,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-055c75f015f390582"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2b.ami-0e74bce6c73b03b2e.20220503222059149300000005"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":495.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-0f898c9b1c511eb41"},"timestamp":1651679400000,"value":{"max":253904.0,"min":253904.0,"sum":253904.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-0bb3c70800f789c6b"},"timestamp":1651679640000,"value":{"max":4636640.0,"min":4636640.0,"sum":4636640.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceType":"c5n.9xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceType":"m5n.2xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":2.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"InstanceId":"i-0539f8ff77376e52e"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":495.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot202202091315515599000000c0"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-0539f8ff77376e52e"},"timestamp":1651679400000,"value":{"max":16.0,"min":6.0,"sum":43.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{},"timestamp":1651679640000,"value":{"max":2.549751808E9,"min":0.0,"sum":2.720729088E9,"count":20.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"InstanceId":"i-0539f8ff77376e52e"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":495.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-0ee25dd52bcde7729"},"timestamp":1651679640000,"value":{"max":5.3001619E7,"min":5.3001619E7,"sum":5.3001619E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadOps","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091316166173000000cc"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":4.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-0ae21ab1d3dcd74b4"},"timestamp":1651679640000,"value":{"max":7728072.0,"min":7728072.0,"sum":7728072.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-0c2cfecbf98d32578"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-0ee25dd52bcde7729"},"timestamp":1651679640000,"value":{"max":6104036.0,"min":6104036.0,"sum":6104036.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-0efc7fdf09c123428"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"InstanceId":"i-0ca5801fc2002b099"},"timestamp":1651679400000,"value":{"max":100.0,"min":100.0,"sum":500.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot2022020913183532220000010c"},"timestamp":1651679400000,"value":{"max":58454.0,"min":14040.0,"sum":143756.0,"count":6.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"InstanceId":"i-07388eeabc8b76323"},"timestamp":1651679640000,"value":{"max":7049728.0,"min":7049728.0,"sum":7049728.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"AutoScalingGroupName":"dse-ops-dev-autoscaling-host"},"timestamp":1651679580000,"value":{"max":92504.0,"min":92504.0,"sum":92504.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceType":"m5ad.2xlarge"},"timestamp":1651679640000,"value":{"max":901.0,"min":123.0,"sum":1184.0,"count":3.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceType":"m5n.8xlarge"},"timestamp":1651679640000,"value":{"max":3.5049999999999994,"min":3.5049999999999994,"sum":3.5049999999999994,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2c.ami-0e74bce6c73b03b2e.20220503223301039000000002"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceType":"c5.9xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-compute-large-spot202202091317132830000000e6"},"timestamp":1651679640000,"value":{"max":8.9841664E7,"min":0.0,"sum":9.3746688E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-0c57bc60d35ef1836"},"timestamp":1651679400000,"value":{"max":2272.0,"min":1502.0,"sum":8432.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceType":"m5ad.2xlarge"},"timestamp":1651679640000,"value":{"max":5.3001619E7,"min":5753273.0,"sum":9.6486106E7,"count":3.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-0c57bc60d35ef1836"},"timestamp":1651679400000,"value":{"max":1996.0,"min":1085.0,"sum":6500.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2b.ami-0e74bce6c73b03b2e.20220503223301038800000001"},"timestamp":1651679400000,"value":{"max":100.0,"min":100.0,"sum":500.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot202202091315515599000000c0"},"timestamp":1651679640000,"value":{"max":7728072.0,"min":7728072.0,"sum":7728072.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2a.ami-0e74bce6c73b03b2e.20220503223301039400000005"},"timestamp":1651679400000,"value":{"max":2225.0,"min":1548.0,"sum":8474.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-02ec62d3c402e9eef"},"timestamp":1651679640000,"value":{"max":8.0,"min":8.0,"sum":8.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-02b5d491fdfc6c1d1"},"timestamp":1651679640000,"value":{"max":7598138.0,"min":7598138.0,"sum":7598138.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-02b5d491fdfc6c1d1"},"timestamp":1651679640000,"value":{"max":1.9861962E7,"min":1.9861962E7,"sum":1.9861962E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2b.ami-0e74bce6c73b03b2e.20220503223301038800000001"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2c.ami-0e74bce6c73b03b2e.20220503223301039000000002"},"timestamp":1651679400000,"value":{"max":1921.0,"min":1095.0,"sum":6386.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"InstanceId":"i-0ebe6ab3e379f48af"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":495.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{"InstanceId":"i-0219c7b86248450b2"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUSurplusCreditsCharged","dimensions":{"InstanceId":"i-0a2a9750f4427abe7"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2c.ami-0e74bce6c73b03b2e.20220503223301039000000002"},"timestamp":1651679400000,"value":{"max":631022.0,"min":320118.0,"sum":1956814.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2b.ami-0e74bce6c73b03b2e.20220503222059149300000005"},"timestamp":1651679400000,"value":{"max":4275743.0,"min":3327748.0,"sum":1.8540927E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091316166173000000cc"},"timestamp":1651679640000,"value":{"max":5.3001619E7,"min":1.0640164E7,"sum":1.69698897E8,"count":6.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-0c931a63f8b5ea898"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2a.ami-0e74bce6c73b03b2e.20220503223301039400000005"},"timestamp":1651679400000,"value":{"max":869449.0,"min":699094.0,"sum":3691215.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceType":"m5a.2xlarge"},"timestamp":1651679640000,"value":{"max":119.0,"min":119.0,"sum":119.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2b.ami-0e74bce6c73b03b2e.20220503223301038800000001"},"timestamp":1651679400000,"value":{"max":2546688.0,"min":1961472.0,"sum":1.099264E7,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"InstanceId":"i-02b5d491fdfc6c1d1"},"timestamp":1651679400000,"value":{"max":99.0,"min":99.0,"sum":495.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-0c931a63f8b5ea898"},"timestamp":1651679400000,"value":{"max":858751.0,"min":695103.0,"sum":3675190.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-0ca5801fc2002b099"},"timestamp":1651679400000,"value":{"max":6166.0,"min":3594.0,"sum":23626.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-029e8b91893510672"},"timestamp":1651679640000,"value":{"max":123.0,"min":123.0,"sum":123.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2b.ami-0e74bce6c73b03b2e.20220503223301038800000001"},"timestamp":1651679400000,"value":{"max":2272.0,"min":1502.0,"sum":8432.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteOps","dimensions":{"InstanceType":"m5d.2xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-055c75f015f390582"},"timestamp":1651679640000,"value":{"max":3.7731214E7,"min":3.7731214E7,"sum":3.7731214E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-02ec62d3c402e9eef"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2b.ami-0e74bce6c73b03b2e.20220503223301038800000001"},"timestamp":1651679400000,"value":{"max":0.4366666666666666,"min":0.3683333333333333,"sum":1.9399999999999997,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceType":"m5n.2xlarge"},"timestamp":1651679640000,"value":{"max":16.563333333333333,"min":5.285,"sum":21.848333333333333,"count":2.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-013e95c2db558957b"},"timestamp":1651679400000,"value":{"max":0.945,"min":0.8716666666666666,"sum":4.481666666666667,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteOps","dimensions":{"InstanceId":"i-02ec62d3c402e9eef"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-029e8b91893510672"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-029e8b91893510672"},"timestamp":1651679640000,"value":{"max":1.3649315E7,"min":1.3649315E7,"sum":1.3649315E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-013e95c2db558957b"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceId":"i-0aa3731e95e64bb33"},"timestamp":1651679640000,"value":{"max":2.663333333333333,"min":2.663333333333333,"sum":2.663333333333333,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2a.ami-0e74bce6c73b03b2e.20220503223301039400000005"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"InstanceId":"i-07dbb4dfb68b056a6"},"timestamp":1651679400000,"value":{"max":100.0,"min":99.0,"sum":499.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-0e75f3c45bdc29890"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"InstanceType":"c5.9xlarge"},"timestamp":1651679640000,"value":{"max":4.670077834630577,"min":4.670077834630577,"sum":4.670077834630577,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-02b5d491fdfc6c1d1"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"InstanceId":"i-0193dfbe27386aaa1"},"timestamp":1651679400000,"value":{"max":100.0,"min":100.0,"sum":500.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceType":"m5ad.4xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-0c931a63f8b5ea898"},"timestamp":1651679400000,"value":{"max":1921.0,"min":1095.0,"sum":6386.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceId":"i-0c57bc60d35ef1836"},"timestamp":1651679400000,"value":{"max":516.0,"min":395.0,"sum":2179.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceType":"m5a.2xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSByteBalance%","dimensions":{"InstanceId":"i-0c2cfecbf98d32578"},"timestamp":1651679400000,"value":{"max":100.0,"min":99.0,"sum":496.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadOps","dimensions":{"InstanceType":"m5d.2xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskReadOps","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091317245805000000eb"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":2.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"ImageId":"ami-0940babbb54d69874"},"timestamp":1651679580000,"value":{"max":2.06896551724263,"min":2.06896551724263,"sum":2.06896551724263,"count":1.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadBytes","dimensions":{"InstanceId":"i-02b5d491fdfc6c1d1"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceType":"m5.4xlarge"},"timestamp":1651679640000,"value":{"max":286.0,"min":286.0,"sum":286.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkOut","dimensions":{"InstanceId":"i-0632008fd1891f6ba"},"timestamp":1651679580000,"value":{"max":92504.0,"min":92504.0,"sum":92504.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSIOBalance%","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2a.ami-0e74bce6c73b03b2e.20220503222059148300000001"},"timestamp":1651679400000,"value":{"max":100.0,"min":100.0,"sum":500.0,"count":5.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUSurplusCreditBalance","dimensions":{"InstanceId":"i-0cbbc7e021a19be2e"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{},"timestamp":1651679640000,"value":{"max":27233.0,"min":0.0,"sum":31981.0,"count":20.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-0f4906924ef9b78ba"},"timestamp":1651679400000,"value":{"max":32477.0,"min":12995.0,"sum":106578.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteOps","dimensions":{},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":8.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-compute-large-spot202202091317132830000000e6"},"timestamp":1651679400000,"value":{"max":32477.0,"min":12995.0,"sum":190266.0,"count":10.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{},"timestamp":1651679640000,"value":{"max":16.563333333333333,"min":2.09677419354721,"sum":110.0948692996834,"count":17.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-general-medium-spot20220209131829117500000109"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteBytes","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot2022020913183532220000010c"},"timestamp":1651679640000,"value":{"max":2.549751808E9,"min":7565824.0,"sum":2.557317632E9,"count":2.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSWriteOps","dimensions":{"InstanceType":"m5d.2xlarge"},"timestamp":1651679640000,"value":{"max":67.0,"min":67.0,"sum":67.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceType":"m5ad.2xlarge"},"timestamp":1651679640000,"value":{"max":7.0,"min":0.0,"sum":7.0,"count":3.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"InstanceId":"i-07388eeabc8b76323"},"timestamp":1651679400000,"value":{"max":12303.0,"min":10523.0,"sum":56331.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-0cbbc7e021a19be2e"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-0de3c36dcb1210d28"},"timestamp":1651679640000,"value":{"max":6.0,"min":6.0,"sum":6.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":8.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceId":"i-0ee25dd52bcde7729"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot2022020913183532220000010c"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"InstanceId":"i-0de3c36dcb1210d28"},"timestamp":1651679640000,"value":{"max":3.597669E7,"min":3.597669E7,"sum":3.597669E7,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceType":"m5d.2xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"InstanceType":"m5a.2xlarge"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"ImageId":"ami-0940babbb54d69874"},"timestamp":1651679580000,"value":{"max":50361.0,"min":50361.0,"sum":50361.0,"count":1.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-0bb3c70800f789c6b"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":4.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"InstanceId":"i-0c931a63f8b5ea898"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUCreditUsage","dimensions":{"AutoScalingGroupName":"dse-ops-dev-autoscaling-host"},"timestamp":1651679400000,"value":{"max":0.107583,"min":0.107583,"sum":0.107583,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_System","dimensions":{"InstanceId":"i-02ec62d3c402e9eef"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"EBSReadOps","dimensions":{"InstanceId":"i-0c2cfecbf98d32578"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"CPUUtilization","dimensions":{"ImageId":"ami-0bce5265bc5705a19"},"timestamp":1651679640000,"value":{"max":16.563333333333333,"min":2.663333333333333,"sum":107.99809510613619,"count":16.0},"unit":"Percent"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsIn","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.public.us-west-2a.ami-0e74bce6c73b03b2e.20220503223301039400000005"},"timestamp":1651679400000,"value":{"max":1922.0,"min":1120.0,"sum":6514.0,"count":5.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"DiskWriteBytes","dimensions":{"AutoScalingGroupName":"conveyor-rapid69-conveyor-platform-spot202202091316166173000000cc"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":4.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"MetadataNoToken","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2c.ami-0e74bce6c73b03b2e.20220503222059148400000002"},"timestamp":1651679400000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":5.0},"unit":"None"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkPacketsOut","dimensions":{"InstanceId":"i-0f898c9b1c511eb41"},"timestamp":1651679400000,"value":{"max":58454.0,"min":58454.0,"sum":58454.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"NetworkIn","dimensions":{"AutoScalingGroupName":"edge-proxy.gensandbox.default.us-west-2c.ami-0e74bce6c73b03b2e.20220503222059148400000002"},"timestamp":1651679400000,"value":{"max":2009411.0,"min":1682187.0,"sum":9042632.0,"count":5.0},"unit":"Bytes"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed","dimensions":{"InstanceId":"i-023df7a827cbc765c"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
{"metric_stream_name":"grpn-sandbox-dev-cloudwatch-metric-stream","account_id":"549734399709","region":"us-west-2","namespace":"AWS/EC2","metric_name":"StatusCheckFailed_Instance","dimensions":{"AutoScalingGroupName":"dse-ops-dev-autoscaling-host"},"timestamp":1651679640000,"value":{"max":0.0,"min":0.0,"sum":0.0,"count":1.0},"unit":"Count"}
" + } + ] + } +` diff --git a/plugins/inputs/cloudwatch_metric_streams/sample.conf b/plugins/inputs/cloudwatch_metric_streams/sample.conf new file mode 100644 index 0000000000000..26424cb4acaba --- /dev/null +++ b/plugins/inputs/cloudwatch_metric_streams/sample.conf @@ -0,0 +1,31 @@ +# AWS Metric Streams listener +[[inputs.cloudwatch_metric_streams]] + ## Address and port to host HTTP listener on + service_address = ":443" + + ## Paths to listen to. + # paths = ["/telegraf"] + + ## maximum duration before timing out read of the request + # read_timeout = "10s" + + ## maximum duration before timing out write of the response + # write_timeout = "10s" + + ## Maximum allowed http request body size in bytes. + ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) + # max_body_size = "500MB" + + ## Optional access key for Firehose security. + # access_key = "test-key" + + ## An optional flag to keep Metric Streams metrics compatible with CloudWatch's API naming + # api_compatability = false + + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Add service certificate and key + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" diff --git a/plugins/inputs/cloudwatch_metric_streams/testdata/records.gz b/plugins/inputs/cloudwatch_metric_streams/testdata/records.gz new file mode 100644 index 0000000000000000000000000000000000000000..674059c57e8aef8dec147ed99f56aeb4d3aa00d5 GIT binary patch literal 38037 zcmXuJcQhN`|36MCwTqUjRkYNqrfL%}rL<<18Z{duwMS}%5UN(yuG(}`wW~IX9csj$ zA!cF+K|+FG-=FjS{d3Q`f8BG>eVqGzEa8MJSL!iQAR0P<7bhQoX9&%9s}1DabQ;4c zhGO~VkZ4G+w3D3^2Tfu5u8Z;j^xkTjgi0B--jV$rSZEi>GTB8XrN;-*&hYTuvbo&F zzT+-3^Qku@+hGAg#UjrR=8)&)i-L3V{%g5#N~Hrl_oDdtvE6Gpl>#41L|!!SbL`9^ z;S_YY=D8g|)sCO_+K%SM%G`22JgP9n7PY61UDjMG9!GMFBQH>y7u4oo)YxO6naaEi zqC4fgP^hirt17UA!v{%kda-;K26H zo}4>{G5SXphJj{1JND1Jmc;nR6cXZxT3_j3lJu9xsH=8Ny45uV?{{?vEyK!sXF|$@Zxm2iq@+mB7V{<9|3Tb}yCnE=+KJ#zd zleRE?X)G3;Dtk`lP_wXv!{+-J_iY@vT@$OBazbOenuyC$1hcS%spa#Y_L>M%4C5GqYCJ!%BG)4v6h*jeLk<&(!q@LV zT;6{YMxGCQiAhIAE2bw@>i?Z2TbnY5=ra-rR)_Y-+MUZ{US{-lz+-2p`xEo^mQsTcKNeJ$&V0u6Xf=#I7APdxk2+svsV zR5gLc#z-VdFo4Mcja@O6Fr6g5p#w`(WOhB5=B4C`CoS9P{qvfTeM9Tl4x>KI(^c|s zGFXECGgQ2FX;t>CLPn`Vvsy{%tfXCly3(Bm^&df(RU~z+R5`=#gR~|CAN?p7>XX@s zVecf}NPa{%l$JYR)_k2j(%Cf5cnZg0oe#qITulZYuOAl?zHNzN>w(8%QAnX-LY@A- zF-`pJipDg3GzCuq-or7Z)?7a-;v~jt4K~V*6fcnM?0wj$M{FZx>#qZG9#R&I^{_-} zS&T`DX5e?ZDkuIUFb&pee@Z<(ujLLeDZEJir+Hf|l_+9QWhZOr+k|dQ(jZ?`fu|k_ z3$s8tQG)S;UYRevKo3suFgi!c72eBVVGKLCq6{T|jzR;xV4ec#v@RR{O^uag@W3lC z{XM)};qk<9$vdpaOS)g=9xdTM?nQna;<`x70!%c?UeL+RPbP5ZzSPM6#Lm0`_RGGzV_kA4SmCJ&D-)u=PgIx^zm*3 zbWJ$Xhsukv%|W&XC0|YXnXfX!_+ViT27f12#}2T@I|?!)%Ow)7*|{J_=S&--M$hkY z2YqgM#=Ucunn#;8^4U$Czmdk4P1&C=%T#Q1$oT|@T!e^JPz5i}w>Rd(j#ZH7ewv9m zHygk?Z-8IYy+o!ib@jJa>VYjUu8>TA_*-}4BZ2ys0ftJIW|Grd4aOhede8IiA%iF=oj2!M zM$+#By5w}!MwjS8Ayq2|KXO%DP=79JXfB@LnPLE+x^bczIGWCsllAzO=W-y$pds0} zcunspXN6?mk*(lBb}O#^RhTp>H*UPGW}L1uG$L^f=DFeX6W@uweR6|)s2~}y6DY4& zliADyr#S32sdGrPv4Pd&wS*ORP4|!jFhb(b$P6wntLJY>jn~z49R?5%R0q?=^wvyuum_Prg~N7a?glfWm>FpD$RW0@o^E9t(Ir6 zc}nU#WkAf0YMBdvfZ(Npvo6Byox&2${a=%=*{(~i#=(a7ca5`eu;-1PYzQut$>wIB zpe=9+Dcp;gJ-#?ykmUoW8@G-b)w8;btKa(Mbuiao+N$XH!n6*SV)*DpW3w{HIqHT& z{M9doG~XC`=tC$oo^Fl;hRsCmn)g}j!9Gl%F@QQI7I}QtdqaaP+9lASKutDS)~=g;jg9|%+}9UDD;E=^QuT=-spbQHkKgg5*BG1r_%(Xf&CvHIPT zv!lj8?;HR8t@{&siWLAU9VVUw9XgEX{-&RBEzkV6%B^A!)6;3#NF6k6ekEVcA=~Ne zp!JEbAiHG*WsQOl!3)cUdwlDNg-Uh%o*)gWul~$g-BVe4233uFSvvJrl)+PU4A*%^ z1e6QJ)j5MVj(wV_&N_XR9`+igVI;V%tyZtAkVtc;f79eLw}pO7Zc1F*&51rl(Vbp? z$V~Wxu9Kmrui;l7q6^P-;tvdN!gzYyqDML-w5D=6Nhw1wCBGWGDSE~L$S=m)e%-x( z5K+ma^>w^z{stY}m1)_kCAGQo$*TD@{jHzg8tB5y+m^;nd~~Kkin)>Aci`uo$sY$K z^5iuk;Jfe-gFE^Whiwr(eGfap&H zG+*GE&m=h3kwyFK=-7Rk7RwD9pyi?OP|L^0hgPglhF2o0UEiWAZaa;lT7|LN2O)za z-%?iw=_3hG+ZErIj~Vz5ZD0y+No7FFf12@3+qyo3SnmbgQln~&0*dt-vEJiNMJrFj z=RlwBI675SLcbb0{c3N$&JrmfZ$6;ENzqYRq9{hIYl~o~rbJ7}MH{Wy^8kGJ0bHNe zm^eJ6$FqDkJyIW^kbN(Bkm&EF<)TFzN!C}-7TXVn88ywl6!)VjKSDeY+H1L#(_`cd z+BLGSfIXw;Z@a~RFs7*cWTk-QzG+2=eav)HCHEK~KW^L{9GC+8_HcE5^bIV6-wQ|O zaSU+`b+Dj=lqR)@0cWZsKBt?Wx~$5)+xA(b`!a!-Y=U~yC*0IKjF&jG2N$wly==Rq zT(i!`sCV|9PBg3viK^iX<7mH*H^}W!6)Xvw$Y0AUZ$w!d4#)MLM6gUbx*Xxdj}H%8 ze>gD!Ncs`2hFOQ)TmzHie>TSp3T~YnSK-!!6H?PVZ`Xq=xRd`I5Z*NNf&SEL%>(Sb z6l8|n4ca^DWcCY=22#?)2_KX(J0IY4xRiQqrC6B1|4-*8`+*-zO7D;-qM83j_93FQ z@lPjTi9J}k`|07Cu8ZF#Y)#Ke%InZr$Ih61KMS3GsSvKp=IP7 zz;sQrhvNBx+<8*?`V3(N#}{;WvvcNK`nL0l_6iY|UG;PQ1Z;qKR|&i}{@*_IxofqIFl?(%`6R zdMm(HVvkL{qYD5a2C``2v-hXkPIY*cceMu4_bb)ZUfY^X}o`Ou!j&Ijd# z=G=vjR_D*(6t?e7A|F<@MkI!{C30_!z;qxt3A-gZPPqK8>S zh}E|Si~I_!0TP#>s^#PD z3#pw0P5vx_{L6o@h4X~nZt+pMwPk$Tp^hsvGh@N$T#!Fwdv~nums%!aN|hPEE?_ho zt9XwKEK|t24*w z`j7_rLN~_U)#-20Qp-)t+ev!bLMEagE(@erS~(uI&_sIe?2*ZUYf9-EhmRueSX=ZjKpc7VA&YC&;C4&+-dHHomC z5$xO!lK56$@wMReGjEMTI*|Np=6&$1#1Ni!=%j&?A#<~27uSi{!*3B+&`US)%qw((~N>7xTORfmB zfZg&U5WZE?f43Bry4*TONz_)Smk)BW=9>Cb2dSAe4v=Un<&5vA0)*^7?>I<*Sq4Zy z-yC0J%bvBm72dPX94M9*xJ<7D29>xdUs-k*-@b|9-K}pl6o6DHThuXnPBr_&!jQt- z>mgGOZpRQ7iC}iQT|Gc--C(l%oNgr#%E*tmrvTY;nH6ZB0O0Nw9?EdK$9QOUzGd8d zab!EouS(_iUpzdiG3hI`%Qw2avmcu!9WbEHs{31@IYZ!xTDZ#K>5;s`XS*y1k#YT* z0)XLyFBL-GwV)(gjY0^ z5B#D15{%GN!y*-&)XKH(Sxk56GDay_NlesLMCt4r415K6&C@oYx~5)f6Y>S6w&N^( z9=f-0qZTzv@`L4%OGM`^au~s%#i=+9N}$b_W&f#V-)3&rH*s3uYFt0v#F~o6_NF^3 zKeq3t?W$9Df_Ig^El!qr5ZVuI=h4i}$KL%P`pU zHr|ENwy^hWwxZ=-d>NE`_UJPj21ZZ92j~@2z<-yDsvGe5CU)CtWx2Xs zhqGdEa==YANwIpS zaW*#&rGwvpDDHaO7p|(Xqbh%wspV7V$mgs=27dFNYRqI4Tucq-fYt_S3diC($aa1X z&-N_XM`Pf!tGIdGt=IrE`nKfe%K6D851}xqKAh|-FkELhD0%GIL6GV>vXArYU3{GQ zH&1$+;}ByO__e+=5uy+fG?taFz-CMS@GUF3@iX8lxp<$Z-cQL$qoRTtGU1X3V8*2- zhkSvw(o9Qxbt(w2ZkOKjA%{i42?trz7OAZXUl&*~9`6Nimd#Olz_{1J_v2ryzvLdw zG}i?vSW4SV=!wmkUJ#E@j7h5xBnw};yHMZ$gRJBPnHV1@3jUxBjt||XNq+0_OGJX4 zRJ`wLy<0{XT=yv7I3=q`{jHA2D)VfrGAlX`;50_+SG@poT!{d<}Y(s zgtC*?d55kH(MVItffo|fxNY@6Adat5h*4rdN+Y{pl2+@pzO{rZxIPM|)1Fzual|sb z^X_k^COppJiKQ|Jx|AEYgq;8k=n-%u4j7H7n}&B)|zNP zqtK?wZW~}p?%K+G?}4qUu5y80LGoW(&at?J*G6l$#6EVv1@DojR#DF5ryQ_L zg-UQlDD^k`O#g>~yzqUFz@C<~1lv##VkO=$Yc*8rt!c2dc+V%sN_N63_wj`(h9tnV z{hgsc-?TlR zIEtKFsm-XKcp{#312Y2yO&Dr?A;Mw1ZT<(B=CI6xbjVQM# ztxS#|+;rB|&mIa->4#d^e0u1*cM=o9=@rfB+17=*yJOvK1=r>Gcv@45NSmQn`c^(D zRA{8h9NU6mEixJjtzPBU9J?nInyy&q%k~A~Jk$p@Al|h%fZzGa4J6P0z6j-A$o3m) zfM?wbUeLDvPMDfmA7LDcL#FQp4Gz7-Y4{o{!3^WwIMfW^4)qYk7+w?I=0K zo}MnuETmUQMdp?1IC^zH_w|vx9_SxOdseJn?nKJM6BkUTIo^hC?qQWne{z~;VK>+{ zqc!(;sd7J5qNXTO)%B3;?bf#PjW5mOA@O(Ql_ZEIlSNrUbDG#!*+y=KAe*C#NDag5 zr~0AnpWQ<_jVD7K(8etHuKnEwMyy=fQ0ey;?+fp1i4wR3NYQ^#Ui=i}%vFL~XLgO0_*joiYOnSw~b!FCk z#n#d;iNW77-u8}qb}!xZ?7TBgnPN$OR3?MoreR{qbq}q{wwYsaPh$05(^J_kC0x`)cLoM`R6x17X-b!Rhlu65`@N+U8v+RU<#|}=;ScWn zQ7A)HYt6&s7hF?wmegH=#wj)j67vw$gf*+uNk`{{h2e)sUyUv+;hDMbw2n!*tV&0y zBt8n`4gJuv?5U1P7_5NTdyrSpC?1;c#+XP)4{SC_nLd;v)eX!l=dn3GruMlgHmYn8R!r)RRxQG@@vC z74g#d)@S)_q!O~9clZUeE4?-q?ymE>kA#0!1CXJ4R+(iY&KioH9}(qxT%`54=R*CZ zVhYiuBRz-g!R^K>U{Yy)M^Z}qNm)D5KEmY-FY~Gc{U?5UpTIlN+s7@$g=c;PYkt&`)UYRv;q6@WA$>kxk6=IsTOJdd1fyzWk7qN) zEn5Q_EcxE;rIm#T`8=m3BHnF+J-@3MuX*TQH5n2qQpPpx_TJkhpa!CyuPP&vkvt$@qI;NIKm6@QmYZaI?LV*jzjQTgo3U4?K+j0FmK3wOfIA=AcP zVf-gtICw6k_3o@@WI(jnkdxuoVaSFL2|B1Ylm5IKe0+)W0=+yozhkN3PkCVgiVyex zBPF*r)3PdrYybHf#~L2NDSH)}cWnr-CJF1lsM6Q~(yB@wb7K(_+I}O%VHS-+mqRFA} zYt^3m@qSY;2q5TE?J6?^uaH!@%xl`iXPCR#rTFFo8dBSmb{ac_k}(BUW0((zLv7jR z9b4Ube^~}bCR?*!xT`ld;%0tjhxy!+1Hh_Rif*C*4fv_Rh2YlE+I*t20338qvcS>g ztPTC@$e-=sdg-rt6^*_&jNHL={QE#u_iru8_bnv*C5H_F$Gv+=VWAHXlNZ%&0xyB> zISboP)ZC4SwgwYDEgtaJ)A0yhqX@v*JOd?&UyqbKqz~WbcXOVDu$c=JVGb%oHPVZP z1*Lnyyo7s^7YYYwYUm**dO|MfSXw3xiOKlJ~UR;Zv_0!7QYMc|)ITU9b{Slm*r z5$}Q|2+%a+W6zl6a7L=B!p#G$R?%Szff->5F;2%vUrKA56N%c6LgSg;h#w%1TNuWw ze&QdPYgxu|pfIn+M&%k|!2trew`S49Ls;<$(|`|l$$VcYExQfe=DqD}us!#5=0980 ztXhmFb_;zB#cyLy!kimne<=0^u|@8I^NY}*aZl}&Y1ea>sDhL4{x6m@-%L2oTU9~C zc95+00Wfj+-jpYG&4Jp|NW1N&7<}I*GLKapceQ%vAf1{*SE%5g4O?jTS^A5ods(7J z$mjU^aSVk}XGaSoKCK=aF;rh0<4`$E%Ngtw=;MMZjJyWuJ^y%=pZd&=|1;_Dcy3U# z{G#kdk-+v!bkKd0hR%et&`|gOKNGEMS{tQt6Oh80TH7Fl-t+R7*rPy?a*IIazlG#H zo~;`61*;|#k3BB^^=v+5|5n>2&u-*R{X#G@hp!&^>9mh5VZB&<`$jo!p#iW+WHR0@zfrzg zX7>h00}FY!Yfs4euu0hcd0#B}H$3X^nm3qik(s5dW0j{xoQ^5V-{vpxmhXsGyCu$l z6_D5vH)&q+H^l4jaSxlo*yG(&uW+A!(_mOwDOe7RwF~d#8C>qn{{8-}*o}6pR3{MP z1Ey0cEo!Vwfj^f$Mzv?lIrjf}w6G%F9ud7PR3KbN)?PiTQmskyLwPLTS&3e5o$-S33vv6AD?(S%EXZj;O zaX^47aPTXAtSOXz>a0z2JCBYMU@7{|0)MykAeb}5ojdM<_ajkogq>_uZyNMZ!p1Xc zVk#k5kJ6y}p9_0S;Avw>Ku!?}8UefZUadVL4;5nXIu27&;<$1A8SEiO+j2TfSTFGM z{KGM-&o_N#bG~c*joGinNlY%O)mB(7_lyB8ohaZVeg&qtc@nd764E#HDZwwgPQK7u z13y5g`J?^e_XYaph5xbP@*11&$m!OGdMB&to$%mRClx#~SjWDQ?_-y5k9-3o_tFP1 zja(7+#2L!$VC>$g)`0Z4X+GkOL&jw(p-5oH_3yiLY-6YWW|j!`p(*1g@Q&fpQti&FbfAh<5ES+)H2W zFvR@*&)@4SB=fQYAMGmh6J&QKav8U+hy5&6pW9aX;fb;$$WdRqK!w{@4s2OH%Uns@ zMaNY^)_T}QKxN&EoI`!{32pf8@L>s`?Gq;}F(VqhC$U@U>-U1;Ylw{}dA>v88+8*L zpBNNanp6L)#OcAbL>4Q&Cq4Z>sdc^&uMK$=J^lS1+Bx{oMfI}_It|ISL@#O7y4tbj z#PB>pNh*O`IXMc0&nxp@Qum@1Tw|!_NQ^gr7)qD?5zA+zw(Ar34n7hds@<6Em%>q} zyKr5_Te3^-a-MAYyDO#ZovpswgKKJgfzlp)S%v-XhKyxvg%c31hw%UC(hdMHi#D6y zT6?h=!E0M&Ic4N{g@3pRiO9~2Ui1{1BB+Gz$rN1q^{TS#_2ZV;gIjAipT*MXUEjJv z4>mXmU>TNF=6=ak3GoPX3P@5+NzF6h`s{sjbFn~S8g-M1;yFbOj!*&1x7n6|I1R~> z?X=6k33v_qUs%kPOiX101B*YoNf`h4M*LHQ4fh@b`D!`JUu`~RL?{$d>JY9dIDnc# zYm7QxLxiGto@PhnE49;kuKbx_FpoQXNVfiV1Kr49R<1Vx2TsVTEv@z;_i26L`0YC| z(F-*&dG17mH9}#ZKEmGZr9CR8BifwUnP0~ViS5ov2{%j8Vo)O-PeX^%Qwj-$U&j%q zKH@*~y1V|E-|MCEgY<=e>)&Daa4IJ}pqBgKe-V7fyQkrp(~>Ftlka!pt%MHJoRH4o z=Vq9)K&9n!)n*?fvP$1`?`6-0`)l^s{^1E+hdl|quBPe_v)`i>!QZmCRKN?8IjWS@ z&ct$6F<&m9HA^2Tyh)RPL*PJq4z1C=c7?wX4pNv1eOkMb6YtjRWCQ83GFvG{&yFn& z<+vZ&fA6XOhI(%WBEbqdoaeWjPt7G#`v^@bXlVcGRD$Ri^3Yol*_@;wWg zA5b4{lf@axHORv95G;4e)0QuB?+zkA*vZdAxg|#LqoNj7l zT@N7{qpp93T$O%ev8}}gNzst>FVioIO4v44F2}9zUloxCzp|Dk%8BD zU_)7u(i(DJrLXn@>{yR1oZ_&F^!Xu!cLDjNt0`}4W9`(suE%=V^_RBYF4Gr!q|(PSsB8DiXyg0*AH~NohrF5&Aer|8 zVX^ExM|J!E)O`9%-J^2Sp^Vcjl#=wSuIhX3dre{oGU2{%(yuKy5S9N})kW}w9)nba zgq;4B0@h)06(QQTr1TcYXD1>Ean=Jam}YDBLGv@3_F_d)H2W*z9IrT}iQ1 z^rgzsNMzYZR9(JmNRnW=Spo;N%`)MZsW(xjAZrmn=qlnX!3=X{bC$`z(c#23{~oNt z(3-I)hpX2r^gH*kOb{+oz&QoP`7mpI^^F-7ozgVrtPiNUC>PR`PXBV7)=@N(&u~C= zcuA`d`1N^`#p19s}#^BuB49r2iAi)xAf_K3z5{lleOQ;TDj@G(S+*dTk<%L^PEH^H38X&J&@9j zWq1%tVXKaF>h5;40bzB6Dc>>3L@V$r#HjWN36g3L%3+K{yjCVDF4mFin8i&VOjUp>-X=9=Xl-sVF*!|jW^Ai9q|r` zqny8OzOct%T$>vpsgeNoWl^eG6#{bFkD{d!M!ZGnSOv@;!iqS@p{(%tPG7 zjC6gu3bJovD=4G1{`rsu*5~gfQvCVj%qnCgl3Lk6R`EL5V~+))s;P zvx&2U4Xk4=zhDbz=)q@OR5uw03eR*}s7}zOBIOlRTe|eu(JvR9o-uC!)cnxV8ndN)f1ruTgM$Y_nLB07!djk|YSEo~|aQNn+A*sHa(C9mwn z)U)Zb?mP6KOAg!rYa3ag0ai@k*)IJ*HBZ~g zLP{rR&sr4*2XW}k`AgoSJo^%KQd2eVFyj-tUU=$$cPK%yiD-Omu}xX{D7X2w#Q~F&dd+^f^S;n--~_mFUE&4LWi1w+93DJ<2rTlr z6wC;Ud0C4(m{)#fxw1n#=7L`l=D+?~K27ilUqgGG+KFCuDT*B0ICSB7rKBA0aeUQc z`W39n{#^^8DBx@C@TWz5nwiGC+X}49t-+|LGj$W_e3Z?>*jfc;)`7oh2)4nSk>TdIsRc&VZ5yKt9vN^U*k0nzAOQEfhAoWgHcT6&XJtgK*x!@ zML73v%_583H$Mx%MxOK(-czEtVus#KQYp#gm-?jdcRmslavs|C*`g<5u68~@08D3o z^Xh5??Kbm%DXX8QQ{TW`cObpWp7pm+efWG0(~OX8KurAN3CAJDk#}6_G^-uCBqp#8 zrE?Z!7hC1y-|Pl9bkyd1<0=CND{6dm zENYlktHUFKEL7x=B<28*7HUnqPrsiCT0mwGsR>Kkf<-H-cn1kfXAWY= zOuQ>2RBp}M1dPcVw%I)V!=8Vrm&9l_^oHfEiyl&PS;a-(L?67rpoW*Y*ERyT*YHP= z5ESJN%x2X(nr77w7OapZLhr&k4jGTK)rUzCu~xFl|=avQLn z7)nm-iW@_e^Khh2`#hIdu8CI+VzIeXHL{i-N?QkmI}5q`;l<~dg9AJ$H3E&8wO*g7 z2?s~+)8O4pwuyx7^rkj%mH~(M>_4}!C$xmY{KAUCsHtXGVd?F<-lLZh8m9|wu=UyA zp*t;8ZDHSr?Cu5bJ!+_v+2lb8gTlysoihkzO-rqh-Q2s;Mn`M#H!Zk6atKe+0ytmNeF zte0^HZJJ}={%ML;PN|WyH{FxK(&~IPnG$X?1NW=ttX>6>*MAIWto;*% zUs{3v5!FEQ*9kfh-6|K0eW4SL1IxwB({NM)qjH6=ETRe$Dvaa}^d>Z%Fsn5;TR4T3 z!cQHgv9qzKAtsmOWyyKxF)WrM5nrsfx6bb6nB*q-vIb?jquk zL-np51w>Z0_hAObuP>hq>}){ zjxL$5ti*pQ^V54z7AO0ZINqy8*{|A<-^1JgBK`NUTdBG$-{|J`-MMKY5G)R@Rru*| zDo`?ixL6k)I)t5@Lpcj;)`v&u*<{Y~j_Br>r%4PF6k0rrPjVvy!Ar12i{- zk6-c7+UsdXwO|@iTCta*UvUq0^yeG+eI=M_j&1oOD$S)3bzF&YOj@x?>C#UvxJ}u{U zn%+CWgdE%5BDwqsK6O{sY2N%Eg#I$uKxlRAtk7lF5QndHXo~x6H!dI+;m8z@qXqU37$r6AvB1c4zz}707Vkeg zEsCm9&8AaMmLN~Gytxvrd$muyNJRIKBIZvbP%ey3fS%d^>J7Fo&a`Vm1fHF*c@lbn zh5i;pY2s)$_SZm|$#V8*5EmEQP?Ab~o+D4jUW9!GM^$pWL(d}H1bZ9ywRuaA0kYQ5 zr!{=f(uWEo1@AaydjNwJH5gj{eCh256m%OqZe7isn#s)V2vU4q-IMaH^8QqrSzdtT zoW%4HY~4m)azS`c?S!b(v}oTh&^FdQ8Zr=60f~CQeA4H9$kC+mVG0H#li)XmH<< zz^GXrREbWt-5228f7FM94QlXv9&k}v$i>MsKjso&l5M87RM!8Rg+zwkygv^3|yNsgTO zuYNqhB1o2G()HkKSVX^$!?54Ut1CY07Gnb7qoLCswPzn}&%^Bc0fmjITj&wrsVka) zZsQJgyj)+usZoTa7(6v;e+)hGtt-shjzjW$#@%q&6Ike_YneIH3i7hp3{x1=$CyI} zenW30UlW2q#jX~ooqr;__M5zz>xPdMuq{!OPp3hwoS8niPqjet12`k7YqwUol#{l-t2eO z&~xl3z=*@42y92ksH`3MuIKD!vYPv``s5a;iiV~wX3eUX|1G->>HwNO=HmiuE$l;* z?c>L4;gmi=EJFq)`V=a(&O1d?AJ&^{u5%!<=2?9!mMG9w7b@XEHk&|g+9soRFfo;5 zo`F#;QX4s0dvUb9K-7vkKm}vEdtj3=fbNC*+=KmNY}=0d{4C z)wQo?jpqO9v8a1~u5^_C#2;cgi5}C>*GOhmMyIMA-$~b5aT=CG8~08d83aB)hP4TI z89}6r*!I8!YAJv`!7`!?7qY?HN!g>sCSnF0BXE?xws3AH%^#rFESLTHU zt4juV58Uz!zZW+COp=pJv1}|&v+SF(tr-eWsH&PeS_|J=331?6Ig;M4zk0PYu`eP% zwN3DydR0am;_w$md~6P{%tzhwU$F}J*1NPjsC}_2k)@O$vb1pnnV}JW9WsAQEsodH ze0Z8~PKx8|+&zzVzJvNmA>K_+dXKB|X4`k@<_1}p1xabG!_5|xkW#`A9pkYeoPr4? zXW3qR<*o!BKx{~QfW=RYtb5`Ke^$?oDUu5vo-4PxN8N$9Zyji;eS|=>6}UgY^1q_E zBiVj zL*qv%))xbM$_rOKD_(Sr4fhwnhb^=V*49L$t2={IrYv2w>6!w2ygOPSf6&-vCHYTg zYwd2{dmGo%9@#LFU@d7mzI6~k&-OPaS7ymc$VrY?Es@Uz#PPE0;H7D-RWlg+*O|U< z>yacPr({RXZ%I@YD0b!rOuuZo ze|||>1De$Cx^A$r-UyqJdsVjWk)*;5nA)YU>uGsVvGoAyti7$Fkg@jHbW3;*QX2>3$y^fP+1Gd1y(&s||hl zGp{lD!~Z~dI54pwY>Jhz&)69ybVb*f{w4H>? zHUIaqq<{sKOJf2tfCC@6DF61{bZoK;*rrimup0@yPBi~_k9jrQt%}_j-DzwnC%y%x z&TGC&fhjgIM8?lf%yz=AqNW4^w9vEk02_kTnre(H36{Z>1bx&}&jk3EHZM?o5>_H} zShri~vsMdVn8?%X&NClqaeIQ2*J^FlU43xb@b?Df`BZwksQA+cm3ZsL(k#+vu9m0y zs@8?+?&EqY?XSB{;j8Apkn@3B3vJ*bJLv&;%J1AlHlXd?ng%mrs{(#N0FQ`fai&a= zx`UxfJ2u>7!pgL>nr+8L8P5mx-4CyHw!*7@Yzh}7^(#J|9QsR;KyS7LSDB~x$U(2o z_l4c?Uij=2PHvEWGTW0L*-dsDibolbiKQjDRW6SgkjX@>GC^eyO*07E=oqqZb-Nbi zAKl=kVwPrUlLab{{CNvq6ij7Hc)}%O`^%Z?4+F1&1anJK?pn`YC4U#ZE0d{!9Xv zXWlE;%uKZE=$83fB%Ph-0kcg=W(XMQJr4gKtKu~dU<7dl3rx&@;-ciMdCAu|uCiu+ z7@BORRmg0jgUio35|-ElFKSNzo2_tC@KCfS>_tQk?X#N9P7?zGtWS51A+Jg2+u!ZI zeq(mC)-i9)R!dezrI!A?J0ZQwY~*I@O7UoH={tWJ8&D2%*ck2&dHlez>Ls1=IP^cD zG~>)JV)UiLpQl$yz;>veluZ6p*!YIhPfnA1A%_p<;hG68``htbWAK8EXNV}dJ& zOo+UPlxI?X5;bs%lT`bu(h26n+plKd47N5t>VC*zNPZf=2`;ckS`k`(2SmD%owUi- za-5-WfRa4kN=bKDz)-Q6M`sLYoRIv`etL>3&?~8O;<(&caP=3iZ!RbItf?V3)Nx0} z1f81P7Q#s4nU=^n65%2r2AqC-l1c};fT36R-saYMh4#ydNVK)@-!1~kle6v&*u2xp zeW!4^zG_?{XyV1_Vvd$zT5S)yqkRr(=Mox=o9x^A0p+`Xtg_K+@(YN6tgfkLZOX^= zRvstRReJ>bhQGl=xyz2&_*aW2`V+Q%fnR4^o{rTz>;-&ZFReq95bl%~LZ60&wu z;Z>hM;t}X-y-fFjhFVHPiCa>!^zu))0NL|&Eh|VoQ=-pIWvdM9U%rck0i$Q{VoJ}OH&d?-n&OIuFOJue>u5^ zjiFSC)Faq}I=r>?cwYmSQHdYYVE3>2;nbAlw4h_y@S8n}fZsR94WaYFc95~rD~p;{ zp-dB3fI#Wuac`<+*ltCk$~luW{jt|daR0~8X9r*7y1Zxm*WG<@?b5f|wR7&n@@!tT zb7ZW#8u;DX{oIou_E_MGGB@PbV}Urev@#6-Rd?gh3~{> zbIkma=;sZ|4zx;O%9o*jI#H}ymb042z_SPKkC`-~eCU7?hTQ_f!F9?Mw$FNAR7GK| z(A$mG0bmBpFj1D8<`no`EJ*>3svYJZ{&CJXRG%{3ctBfmy;Xyf|+^&rv-}Gag20xG=fjE z|6{LJZQ=cCVo?6mq2Jct_oAiwxT>dxeRueCPGya!!{9SfiN}5SO2(AMa*>kbHFQlg0CuVFAn%XH`h=)EN@P?e2qbJxEyccvD(i7gB_BwsyQjx6= z(ny{+$;95DiOEffV`!l3o+uGms^|wI{SwLFph9Uv@P{)ATYiy#0Pb&Dw7R#QNYj~=9 zz3UdJ=~sX0xKoU7z*DhmHb7c3Y?O(7_o>c|Q1h>*cLxDxA)ch$8|ECLt-*mM@__5k zp8Ps19zuI5y=kg12!%iw@rsSs(mQo>ef?5fT3Z%|Hckk~x=XpAeotHW26IK`(G*YR zRd$3t*lnLZHd{SpA&vfje0^s)TwmO65JYcLqeeuci{2xIh#oy^2&3097(LOV6TJt~ zM~mLOC^L!P2ZK>(Fc^$-{om)k&;52ko%4A=XP>>wZ>`Pom(tVCRwQ{M8pRQy!w5=A3Q58Y6v^JezJ2jT@Z_v z7bHh|UrpSfIknw2IN$)Vx3lf}##KD;`XCm>n8&NM8BPF8abNa!R-t4wwUwhstu=7r zS=XP2E2OMP^uc7Z((x2<>fX|~Ix8K8!^$`AlMWuyExv4vYJAo%-@Bae0_am*Mm=sd zMH3vR$99u%!a8#VI7@%+ci%tLW7?=Fv7i0SpS!y~+V-ROfEKw6cV!SWXLF3$z2NZC z%zQ>bkx^`+7FfLJN+0z{F*uczIo~JNsey-E?coK6cUp>E)5$1ulpL$g@G6~-kh5R+8#m( zXS3*ihA5YwYO!fQySB4nRBn5GoNyJUOpewZu)=#CP6`WAYOM%93`P@PR-bj`jtIo1W>P^Qw zLjCln(v=fA`A`*B=37*0MlXAKJK`{UY^^rj|oF8orcdzad>;ze24y;)O3WlWEKmr9Z$8hl+AoTbri_JhR z{UHOVJo=~i6Hdc3%enr_MU^{)%Jcaz)x|Y(799YLP3!x3Pc7y1mJJr;zodi@ZHh9A zA448Pj#H$M-4l2Pep_x50IkS!9{9zW=PbN|%oLLkP_#gp=1aJE(s2G@rAF z6B4H>#&06CqKeJ>HA!IOJ(zC?w#=?CTjVn0C7P5a_RN-VKN&EsdVrnT;`%dTOYg&S z0BH9L;#F8$nenaIkWcz0Wm_-b{lhv~#sBx+TcIdI@^+p)>_Zo_4Sc^gx@FJXU!$_f z3G049p3f1@_6G&40p~lC<*$W`Q+>Crc#zXy`yI#`gdRVw5Yha%%aA|W86-~dhu-dR z>Mq`YYjS?q_B{*u0i(o?RI$fV-(+T8WK9B9zrcQn`(`>x!rK4I`fO zIj=wJlGl2}ijLoo29T37-9s#K@?Iq)M}saW5JLJw^sK>-eW_YW z3N8ZIf$YE#kq7spHIy*txYp-{rWoH%4#xN}?*qlu~+GR~_BCL6lU^c3x#vba@ z4&VCjZLO{p|L)bf-f)pHA?PrpU)tNcbACz+iY+gOi*0;Fm(A(RINSrAbSz7))#H

Pr)%2@wn;O9+$A?kYZp1V*@v3|3fg4EUTTOlEN(krWKW{pGC zA{3xf0B&t2K^Jd{jnmYeF}S3=^EkKMbFRN@kQR$D1-)org~Wj6T1zV**1-wSDv zeV(w04MKfDxAS(m+Qs%3i2cG+Or?lE_8d|4KKYvjbxCWKh2vC z<3amd>7uWEyz>sx$Fw{MBD8#*P1~VVF%xz|pjM<7#d%$O)d_E1Q#Is`SRio-70jAN zJ6M-dNAGa4ffkI{JDSN!wExx{lWVyHNO=f zn`k~%5cu}d#sVdPKT(@R*yqiIK?^%o(C~c@m`*Y0?Q1}n;Lun@3#7%tVSv-Vow|Z5 zAYgVJSZS+I#cQSq{yB3}zW22(${LsANbt#XMr8lTv%tN%eua&UpwZz5S_q9^6mR(# z!p4iSf$ON?&p`tUuTYYABVMZFhdWnR7_H~mzp44!2t$tB!u@E^WfK3D>M<03LU z0NqO$A#B^p4q}&MyldI^^V0))@EC%fV)Ze4y=)=)E(XdK`Lj+3Rny(JEr%7r#wlvltx1 zF+KMbdu7U%UP=4$FSx#8Gkc+V^iE-KyFPV7b@eB7mz70-IsMd}eYVCM72Ip7xj*fO zWgZOQcrO2(K$t!R^R$P$!CM?N3yTE8H(a6i6v zYdYPjR1JujJh&hFhgeqO=vFpu+CML!$#*lWg~c>Mb#Ke>l0kvnN>!K;)~MkQwVSx$ zdke81ZaC@M{|^EzW80oI7#zfqM;liRFuxr@dq`A_elwk%Td;{s7-pL9MmlM%Dl30W z9+sa++OAkIZB9-)P{IFGQaG?qEJI8Nmkox`&S?gs_%cEd{+{!n1;t-fINnH$8HEu9 z+(-BukZ#5>iNHAVqpX&lMORb>s+#tQagZA5+}D{fdrud22?>AZc+h|TWSGJ5@VLt; zgElSRp)$(mDkzRgay1A10(nXv5T2#4^QZq~qx$lo>jvpjeSyOyW@FrgKM z1H>gQzmT1Cyk!i7%;kD$l>K?U%zHABCL}bk2;p z^W1!@*}tPo>1c4|?JFwxZttJ1PmqDL@lZoVXv4+jb>LJ`Uy|wvm9SVpjYRE^=XB-E z@TEOzTe!*yL%==qsY7y-r z#@c*gxq_rFrkg`C4LX$>xvY`j*j+ahROWi;-Ow7{b$5jedXSV(bdVcimNlPmSEB!8 z=YxR;_jYoIyTYuh^p?r{(psfK7FZmL339dJGAPUsFEBE(40u0bo4n5!fUKkkD|W;q z7AprdZHzv|rfzyFO&DmDG!}Z~i?oGwXB~NbCe*$sZ(J z*psY;tda<16gWkA4?Fwxe%Kq`Dceqhfpp9KHvLv(&h7&gbx*q&88)r^U`+oDgD)Je zfz@f+-f}j{A!hC>&^a}skD(oJN}KXbCu6j4YI4|+# z1;_J&pM7#jA_JLJMA>iyN7QFxRp<2y8aiWQBF94<6bP6dpZx?T=)2p}`c|4Q?4Vn3 zFx(3g7Ah$S`zMM5+_yBP>1`sJB!RYG$rAr$f9WQ?v^aRJrzRVo|MoyuVoiicOUH}w zW5P?O!N;O06Q3vKfc@4AT%Oc(eOs)adgO6Wn`%jOR=)1z$F66LxcxrB3h^B7D>Ht3 zc_Ema?K&zOwtTan1t!jNa%`=1HDwW2Sv7&wi`*#?0mo6^q;nrdF1{ED>7WnL@riU`$7a@b@I#aFxlMs zH_XABbuLQ%Z0vtZP+~Y7JrgVopN&U=KD)j1}8K8rH}>F4=$;`65C0pxqWFlH}I0+ zNU+yP#It!Vdf}!4={o+U3sx&z_%r&0rIm9w8PF@5%MWt3eZLfSL8ul^JaK zBuP{b&Z(56r{&NKq3ZFxwVQKmN1Q9?2}~JEoA=j9J1q-=K@cOpYH3VZkvVzjY55+-lBXuSgr4I?qmL>EF* zBSia0pWYXjB=yVk%pMaeQ*Qe0{64H~$yT=NOb5kyM-XZp+*or-EvGalee*^Bvv6$A z=s&XJ_^IrK^v_b*zZJG_u9}3a! z@T9aFO9i_)(U8owSwN*FROi2Rc0;MezZ2buqTWPBb5n`A%`$uVj>DNw)Nn+$4`KJQ zZ|%c;pn;YF!e5|QZ*q80bM`R%T>D1W5#Cxd|Dh$ilXfkpKG$e!rgVnJ^d9S9IW_*t zdZ+=nDAN%(6^73GYBK63tb=~T7h3wxa$IR)uzy+ZgV$D5%VE$4j}+hJ|89hV;nL;TYpuKfZZ%sfDn%A-uCe zPJ7ltuA-mb*T8GRrG~{7O@o*%tokuXiTQ3YGHCx+%&7k3C8w`8RWP?|$J0MC=A0`P zss=ywraIySJCB}XQ^7Ch=FS6@8r$w8h^|5JV{YE@;K0nrrFs?)y(?76Zom6;EoK4B zhRw4*DOiu~2k{i%@*y(NOeb~XZ`%PZXfv15t!W=Ki_Cn!VoF$YNcplC?N6%QFqZPc zI)@0Row5&`+<0v45lsA}WJt25zv;{eD|__q`EO&aR32ytSjMIU{=~0$y^3?r3Hb_M zNPg&=4|CE{L1IjZuWo3asV|KDR|B-Bu&+C8Al;w-868?GgyIZ0{BSr-(K}?xZqa4{ zP7ig3c0%F-p<1c#$+@Q{>|UEVC&Y3MFSpm$fu)1F*=fYayc{JS?(|nLA%iH9-T|Ix zp*3F1h%m2T`At(LGZ~#5t3$t9Z|k!_@mSkCSqT|RpB!`vQ@+r=JsJAlfN~J_-YTh@ zDSzvCFk)H989dl#f+|~Ddipb?2k-L(hOZplLHt2OMRNR?t+1QC^y_6*5L7+VOLN9! z$USkQV=S7zZ!UU!-Q+l=sru$de8q#jq zf930EfMCVBe7spAV}1|H_b~Tp-0Lg;!jG5djyxTP0?jor%i16}9XIMeps`e5jf@pu z8Dk*b+y_->%fYrHDf4v*F4INGe%XK08nFG;d)N+i*FqHZ=gITs2K{BP_y6INM!}Yu z&S0pxTg(C!Vd5CH&Hu+|q12D>)A}mNq&7Iug)-sG$L6M+U1)1BT8dBq*8+irD%H&E z?>L?hgFXfpAKq461r0@@we-IwzxUA>vAZ{s7OA?Mw|$jMe|ES|x?+vJ^V@1dgVg?? z6$sW7*#&4mnX8UKFyz(@_4MWxSdEIpZnSc+sshm4Kcw3;de8Ptxgr%O=5S`10BsRL>_hw0mjM`QfmR~ zhkyXEAYJ>fjMi$O8ZD^alozJ``fzsj@T7Su!zq3L(6w97z;!qRlVWR(Q`Zkh1Y%?p zxQ_fH;qdqE-1b9Ma{r@=&1*;J3^e*b0(F0(>Q*js5|WDHpi6rgIF%8!$P6^S`Jg1F zcrZA_41ELsAZhwV^WU(!ql8To-gBn`S}rC#tF?S#0`>qz-d)i#Ve z8A>@zd7!_Oy(kJKz89tFG(%k{!L$k|^SWL(aTDFi?dBMkN&4%W#Zt3i1izVj=j_(9 z%RSCX1QDjzZE9G1CXoJ&|5SM_>@cM`1s&UXGc`ZyZprNV>dnDpuUhH>1< z!AsGek-mii*sIKjI5~)Y8(ZQ#%OV`T z%%M5uhFS$7@L)_rH`J8%&_Az|0mydfKkG1rH*;(b_4fZEkO+GEd^Y{xFFN3d$6&-4 z@WpNX_AeeUmry?U1ov^H4Mer5iQ0w z$+M!Q3;sspUy*IzsH%nMe#2mI-g`)T*~?2U=C#%y`%y!4vl zpfE|D*ZVEmlI?m1r5LF-#kn0{w%2h1s#7^hj1=?%uyh$*qKhxq^}p>oh4UUsSaHvg zBi>Y;c|`rmiZxGtnD&fy;Pa%Ry7lgq-&$8f(PgLlsy!LPqI6Z6vh_Rr?T0@OcitaW zfDtO|Kj>KB7jEO$e;P*pD+PW}u2fR!ykM~wZaZ5>{%N68?cnWoRqSZ`f8L|fGFrZq z+pcm1D{5tBQzAsTFrO-Pwi%a#zdBI(HtZ?k*Zv}1evJ@(*- z|2TW7Ob^SX#t|*q-jQ`k6-k5Az0764sDR4K=@LM$vl^$&^!uKEQ}7}{>SfYbJj+v_ z?zrFGY0tJjaxI#x!Oph72M=Vx1^jjjp~q%H3OJpvxv`Gc9pxb{MgULAwc{6k=CBUB zVbZ1TvCoq|HmuzW!ES#~q8SaRWKR6*#B=T5XU=;1jm)merZnet+3yqiIu^k9nme zNbBLeXAj3RsbcU?vAF8`tP`1*kLEMS2b(Wo;2x)!QrnJVy(%P?Q-P8*M}IVxZE6!3 zjb3dRE|Z~ud0#ER@}qQ<^3CIxUZd&ZBan~ZoRF6{n?t%{q@VWo{k$E{fe< z;IN~Lc&;g^%DoyItUnSgx+=K1Zv>aDb8rnMHR98ITE(n2?vB5vEc0i501)Iir4>Er zl4I=Mby?xNEh7XoyZ`hGvZ{sr8hkXOe!xw}HGj??n}?K=_N0AG>V8z)srI=JI2s13 zf5<#4ec0_dU7C%|Pj3zjVf##w+A38yofzUIx;ZRA5-&?7rL0krK0evh=6d5y4~^{| za@}J`=s)o7YvAz7t6tB;1gIDd03M#Gtz)ZewW(@3ayWoJPKQr$O#$R_KkxeT!nTH9 zBFP8yaHBZWSTKN83?aijh9pDtDWIMvgn0tGLF{x^r4)B*_w$2rjKc$8rN6Yo^iHgE zd2R&eCtPb^8bc^z<|sPZ2KUVy6B>QluX6nq@FxIG}i4+dO*C10BO-g%*kZQKBd1_<5;&Al$9 z5ALokXuqs{-+3XC1SjLzt|xu&9PTUYGt~7D<7cVHM}S#4cK-V3?YoIT-$&jaJd|rb zn3?Gv7y4xVq8C-HLH(6bW3pa;Fr^{AbW6XAe`%%v$yrOIQpDNg|FC0?ua)|8`-z%( zO?BV{?`^^C)o78Cy=SaBYM>a#al{0H&70k@8h8S$;gQ^E^Ebw6YyptSADv*1tUQ_Qd}FN#zLHm_gKqmqqF zdt;7=X!{IIGfX1*4{JQhc;cBHbNafyIMIdgPnm5Uk0A~2khgm$xnBh%P@Q~h{Wpz$ z@u4f&HUP=~p^kIFEXC;z&wT$1I*`m6+X1!BF)%XB7^W7mqY^X!vuOs0%iq&vZ(Dg4 znZxHA_dL5~72#e}3Z+D3L>m)Tv=ZOf*}Bs^zyQ8)10#$|l&X>3RW01@yB2*v-ec{J znyORafeY4oK7jzAxLfV7{xpdT_FKO45aRvm*gem8TdjBKK+(t>obZgjT7`m!lgfjJ ztlZ$kK*J(t{}!3IUgNrtd;{VC4Vcw4{n_oh2guctGi*viBI%=UR_tzZV2PTW?%8#^01-R{r9g5=HKOiH*D?Ys=6c%JzHV)UxLZSWk1 zq)hpi{4h;Ll%P#ACZn18$3+vs?CzJ_m=C%Ap!rz~eWZN>8o!Gmk$>PXX)8gzB=3#* zR@pe<3HAehr|&Pm--5dmw6-^oYNr15UwNB%VDFZ&P1^%W0!xU?puvGl{CDqY^e7NG z=^@nX8jZCAT5oeAU!6X9S>7EF_6&VxeT!Xq!l~m)(`MEF>@DFxvKAK|N7$SN3B;3+#?&dDHgXh%=cv zdwS@YA*(Ek5n{)DL*F0#^(L>nuz$zfNrDm!>_WQ@bT!O}JGp-E;RqZ9$T`W5{`2!i zXQyNJn%GP4K4W`EU;#gefg_(ZHXq4cl9|BXXQxd)Lcc*03G4mYjWd%p;^|=MFip5h zNhH#1j&)y|=-+FQp$A`HXLCDyGlu3fizr-C^_&U1tK>+dX?@?{mu7`R#nTk0k+FL+ zoCn#<$$EJSHeXs-w2uls`}07~U=E`S%HiczpES=_!VX22sp`B@-Q2(pRM5V~EWZ@o z)@$HVyvvhqe(Z*lRFP?jpYMHUBCp_syUE%xeq(&O(u$O8MgmzxjnWz9Vj` zI91-CZ5uAyey!u{-`gzGX)_`l2MEc#6{dnm^C-5Sg*sK^*L7~uQux-6u77IJZ@NS+ zH*{wt{BNPUI`{2H*XzK5a`C48&ea&))VS_TELUD|_MqjD1@~ac{IK3v#Lk>^$+LsY zbySblRgpr1g|*!1vT*-$g-$K8)IA35)`Ne${9D&d< z6qVRGLCiJ>G9OA|3cK0}SqYl)4~~}@Gm)(cLsKf}c_Z^njddp{bAn*`dx5&!^pY;C z+^WCVulvMOF%kYFIEti3lg-5uohoGM0iE+IL!!hm8^+jmk=83gx9hF^0%ORCng+b5 zv@#@;X?iqj@XtD6WM6k>52fWFrFjaJ*gWC1mA~B(@;-&P*W8I6Y6nf^vsFz$_sDl- z1zoAbuBBpBoi3Iu&Wf+`rPLongJrMWZYjGES0!%TWjLf1wb6y_l%+5C%+U)=nH)nE z)T^s+HG7n2v$@JR3#eE}eL;k0KPQ$~y6m=JO`pqF^aNhn+-i`u-1VWLt?d~Igc#&= z>lQha$WgFe{-<2`8&aoA5~gs)G7eP0Nvdsa0;ni|2_1?qjxYZZ-1l-ZTVYwh@1jZ| z5Bdj53|Zb}D82KKUWv%1SWG6I^F(9vkIR}Tydg`_{%gOhF#`^P*rK@t3GFp6aPAH8 zmwUY8Y3Uu8UqBuo0REy__L>NB$<%|UzwWv{b&E0p?Bkx=4ltgVH&l}3DAEQ<=Gd08 zLOBp@Jb&WW*U7zO`68R01P-t_bNMIU&4^Z3qt_XeU(SB|3UmY^8`dhaQHW6?HT zLscBzi=IA&3W8fwzqh>DN>OK?P7mEGphHV{HL8Zvn=zt(trqlVhpMLJd%X*<1aTrW zxGo1M&$dqLM&qh>J8E17uizmDT|H7hI3m;o#cTsp84Oh15JT;T_xZL;q~(#V!FgAm zrIj6^bq>9VaKcw%Q&V}xz;f1g10Ton+dt&?$&IBq=%&lIioh}Gw_ZnUSp85= z=Xp-;*g}E_A^cr{ktZ-0vPG^?u0YIosk}6y3{Q&8aFdb-{dlSsSw_6At zvZEZE_G4f`9RK4@)mvhK4Hn33m;GiqL^3 z;ZctuFQUpHEYag!^sAAplnOBS>Nu$06^`WGr}p=5&>b}G4jTa6My;ccgMUg4WEFEZt1IfL%lUkMmWi){D5{MTup4tPBmTaJFJ?*j|P{ zvC^QWzO7tjmK329eq%~`(qjr|S!Gl)dN#R5#RXdOhYplfitvyxjm)v(DA@k6GjB>= zx1+1*W!E?75R9)q>Q8`K&)1gTXr#bwSyB+kNVrq%G0Nmuu>xwSD$_euaytWC{xnsc_mPO@$(?SPW5-+p<@ zuD9mnyjPkZaLu{z8`9tG<@w*mldlav8{$tTV-V%9_WuPF%HIH(5^jSSLF->foj6>ZZYnYThfT01NsCw;o41^IOkC0aFoox1E(C;ZCWXetr!(qm~-r z?<8=IVjX1Yn307}_Y&X*<6z=96I^JKFQaB!ukho30}m`+Zfe_63+Yuw5R~nEDY2aW z0OsqgK?G2wTnIGZu#B>c?=*;E^1pn8S|()sqVv;vB3UV^1%J$Mw;|Yn_yib6^(hkXNsO4EKb@N(dVk*zg>Jn%t++UW>vkFHFwc8)^q_JJeWPZKON zptfA6IpoHg?AiKK3P11%!)Vun@lKke_~AhIA&TV5?YY8H*9+;f5*|-p?S6J~1E*L7 zBcGR0=p5)BLK{Hf?K0jZPtg)cMQ@O6#0z^b(W+mwkFogcEVxpz?H8`Mg#Ku{ySldJs8SY zc(@6y{uJq2|CA>b+GLy}JRe8p!Gt*d0N&12!uKTfyWL78n=h_X*yB5e%Wb3`{4-Dx zu_-LvX5FNv(v@Q!Xex^!WUNRN-e<{WwALyXDU#JcaSB(-%UqA|aFngydw30(4a<38 zy`VGPCcupt`9pMAgs#6IM83iDSGFqm15lzGD^V13q8=5tOYxWM{EGrx+qx##kba6ID&b`cdb7LkhE_yL~87MdVDc4&h7Dm9GSxPZlFbj|#IIg-2HzNjS+D%@6 zuM%B(K(_do8%O5Xi0jITFst#s#o zUPBCd_ByW?fEvDb!UsQ)*I~aSc=8_YS{7J&`YVZq5M#(yUGXV+OvLG3J3^XDV0LzR z(SSd*0U3A;g0T(y{G7!0*X}Py?@5BSfb?Ttior9R-$CLEm@{r5%*RzbDMaBl-BeA%qSx2`F@mTy3RI7oa(q%5 ztJyk_#Y$nf&%(HEBR%BD_@}waFZAF#SbTC2o2g4=m%{?1{YnOcEa#ZYF7~A-d8>{d z>1gULh|}xlRok!CX8RkvU}Zm(nLa@dR;lTAK+Ee7=6UL<(%Z91tV>>?`2y@RSF{(u zHwzWj9{0#gX$>{P{P-D13!onE?Zlr}C<=jw-!t#H-iRyG){ugrnh)(ow`;E_e{%)Q zHpFJiVnri5n;;72N9Aa@yxJMthX8TApi?~O{a|v0-ZwOh=M!%xJ?}?SJ2?#*r!O(S zkyz}bu2A4#B~ynbV_b#G^{NH`w!~m?Vm=$$IRi%?9X`%Vr4XQDO>&twQ-NtGb{y$yM{67Y#9kqQ2jYpgaSigmx@D@_PIH8UcwJlkU)bOV<1 zDeg;;WhI{#P3&~JI6{k2Qd}o2LauiWI}tF=qICvT-9EO&Z@!D7C1L-jY4cO~ zMG!KYgR;N*=BXdDH598Tmj|*&(L*|Oy`aefuLQ|2TA3~~%QDs>MwarWCTwEK?6;%L z^ew86;~)LC2+8nE?{wZ&9_OAFr5J9iMV)0xb`Bkv?Z2|jVusZU`Z|C0y*(Up7`;;8 zgjMtt5N)P&Ot(8X$27t~mQ#lTer2wf+FnVizdpzhlVwrh79I4$IJ$0;UG!b#hx&`j?I6cSQ#NxG&dv&cy;d(E0dt_Kaf& zJ(+K8jCxFlLltKYF7WvJ=^pN%^2%-}nCA6`*T(Mp44?gNZ!isQe7*c*3Dd*o{C8;e zsv4HqPH3Z}NGiW!7#m7Aa6S~4CB4@B-C(!ji`;RH$DU!_ZyBrQ->R%(sBdWa)Pcpt zpZ)d*X}?Zy-{%*sdS_X!r*Hsr&{`8fi`oIl*norpft%kMWwWy)cd_ z;%Z1=|2b4>)n{ldyY=v3)RTbrg#MhkDJWL_&~}?8eHE6wK8O)S$i-D(BD+bhiNuI1 z=7A!CH>H{3bVC61iNUqDLmhGG#3?1* zS5J?i!F5;}VKSspEBy))iGZHj$ob9ig=}#2UOwG`wbRS=Y)&djLc`^o{(N|2_gWM+*+b|B4QcD}!V_TYe+mWZRdWqZs_ zG7QeWdxptq#xi!5A5!0K9`6(80xr**W?D0I!+tW`C${@X{W{PMroL~h5?^my1kb#V zH?0pm%x@;EGz!v2&}_K1tzNfpmzI19&40g(8}n7bofiC|KBzzbo%a}lGV8pVqfsRnZwZOd zh|bC820|Hl-_+I(s^)5P+Bj|nZV8(|6(HPueV=LGe^XZK<%rpz*E`G-P*WExc)ypd z9~B)e_kwS==jP?7x%BYb{gDzB=|C4nc7i#|5dvIyVeRQ``-=-Rt3mqat;he7z&QOtc*x;4BanYE>9eJisjKJnTiWT!z(;tIo=?LP%eM*qKH&CG z^Ng2JkE6=RElCm1>fQymuR9%>^G#$fq{*AkYy&f~>t^6r-ygB@GqQy%=brS0Fbo|M zdtd3)SKDZ&)6MQmYR_LqU+0zq>q;sm-CF40vm(6oFd3sulzWb(`eVWzsL#@yF=( zcbCqrZe_^f?GPut_^X7~Wa+6WsfP!o&UKZ!Fc!2=s11{67`ye}v!)!|M~w|{-jWZW z&g9J<3fFSUgBb+2)?kum`O@wi_*abF?R0U~t)mmegjr(uwIzf+o99*{IP+slwDQUW zgceQo!b{AT4Hh!j3x~`8^yg#BLUw`MeAE?_w3|UZXYGvVE1-+;Qz3QtejmW;s`PK~ z9ggyZWjTXO3znm?gb}X2*N@jShk5!DsrEb`kezRq|C*dVYvAHt^9`+eZwy9wFYpHV z1&JlC13g8F!D!)rx$$W|Gs306Kd_urO5d=uQY&&!!}4#!8HtBieD+%_y-*_*pZa09 zgsb$xJ-uqg?IvM>C1XnJysEdD7s>Ja2w zpDG$*DzfXv^_G$tu)3k>FO^twC4opd4HLhxNi05W4=-(B z-)Z5hUIzReH~0l#Ka!?#6rv3@Os^FZ7`VTRqQf!~*s5&qm#dC@!jhRXG1OV;9GIyy zUF;&2Fp!$}SLWD8eev5%g{ERn-}%xB$0tqIf;x__FOENVi{$Nmllh&*^f5BG)f7O7 z70GSJaBG=nQdM`ZHZqzh}S&+9hXNJ}zSp2g{Viu74Q#!fY zPTTw50>)N*!9CrO?lb@1l=?A%;He}_nRe_~R-?7eHAUnsd^)&uz_oubDa@aM+HQ5W zuxpMeUj-+@P@fMi+2QqtZ0)+Z?eilaP;vGSDd=zj3aerP1-mfFYH#*w`{fV+Chqc8 z(w)lA?}r=)92kGFti|e0m z8Dr>x`UbIOwxf)afO@L(f{6!>6q|p2ww;?3NHL<0^Q8W&sD05XFaJ&C$1a7VPnKt# znw_eYh@;J*OX50@Y&}ilv$g#p0IL(*z2KSg!&`v7@KSc&$meyC3?#Gdi>y{1L0l}> zv`SGpH%Gg4W^Uwm(?!PXcBe)ZuzCoBgPY$-;w|A>@(>VvMHsXI)`6{^V~v&B#prsa z8%OZcCz-*EEb=P}xg-85Cg)#p-j^_Rxg3~w3P&5BRVR7sr0@bP=CSGn%JrnC3zy1Usea^y~le)^d~a%{waaJ2&@92&jy6m?^k3>w$c%FAZo@_(HFRfbMwJ5wfG( zs`>jTR%v+lo;25k%M5tE%V@lr8W=n^6-Aj6(h{e98tS!6Np6V%%jRxnhS;03%m*8u z&GI=Y$V&Nu)Z~P<`nmJi?tm@y4#%_B;d(OP?C*CQtL7uhUD@+c#Et-BU?=p;tg5l@ zwALOQoXXS&KHt6L0s?S>&Q^)9&yqcusD!8RT%E>iCMzk_swuelCK+|C!7nh%`wtRC)V0DYwY zvQ-G4#i=C7&$6{=(H(Z@FWwy>Fv8RQ20pSfTKid2R_X^C%W-3+<@4vBGP#Y13(vE( zNPmXXI?n!`5tyeKsHL>tep3=qsF^!BbF-v^e4C!5nz|2s1Ev8Hq30BU!YEmYIjMe> zx%<#d$jaef9}poaI96C(g|I?PNuH5ymM;b+^!i5G(~=LQ+|d3RlAyajbe#>zH=%{R z!t=cigDvFWlKFVA{yL6jrtbncP8EOm2>?uaBk88|C|IBg+NUGaO?yc!j)P-2Uon|4 zdoi2$>{d~8)SRW6qN_R8j1V8HwlP#gh*1b=%)#GOEPUTreQWf6P% zEk@?)Ipu8oCeT;T;jt6U z=G_8peiHn|{nL1W*07H_2O%%q8m(v{X@!nb#$QK1K2Gd@is;oHx1p3M)NOouoFR>((!Y9*J$o26$OxViV`CXD# zM)bdm#;VP^$nd83uq{>Lp`rz`yw`IEwjLX5xs&Eh5^sXABYsv=B)+LSw&!SM*W;k& zGX#;SL*HKUJ2IpL{z1TjcSL<|)WnoOK-k-=S6Y65(MLH}YFgNiVjnP?&Rff!y5(VI zk=vTsH7@K@p}&!?GQM4#$}oy`R}Yqk4B~qG%0O-kZj8PJJNuAD zuegON5TGrV7`N=-)5xc|j0hWyu`ewD)~3RPd75XP01Pn}pXZCn#}?C6e9Qj$QUb|> zT2pWDm#Mwj6#K+u*r)L(^{T+0b@bthj4_;jdG=gh&m`w7bnm=<)d*UvQT=y|vqTaC zcIuvOZ1-=Z|G<^t_RIpj7=PFQz3_P+gFPis$5I*aw5;BA8MH}?>{Se|Gzhz$a>t(5 z!h?@*e(1sP+47(XZP~wPe4mdD_HU$u#$?(qy*tmh&*esf#U){CP4n3GywWNLI<4Fx zeU>*C@(8}?fuo%{tg7Z+bu0h_G7=B-Ph?z}BPv8$@ zHqkNHr_e8&9ezfwuk2tQw42#xc}e6ke0U+vK%HF~`K&Wwkx zGhu3A*{AzSRRP5v5o$_J__mO`<7N(X=-sC&h5DlL(#JJeTYz4d6OPEtydaZxnQ`Bf z3h)=0W&wA^*tb}hX4b{Hi7cy6Y^m9ASN&Mb1377x12YoLU4=8BSbW%d)pV zTh$CO{xk%>@mI|u<)3owkZ*8#2Hw&~RLn@-tJHI5v&qTeOOJr%ZOOwmqktvz`@2Lv z=BKz5m6W~RA$v=02}|i%v8My_PM*oYyc}K5K$twLqn`7NIINh*b6uNbPV``zEQcyW$0L1zuaU z8)4?c1%xVd8~cBaB7GUaeFGXq7gk?I2*wyg%=Q1k+85rebS6>fkobMXD>LUnkw`W@ zMUnJ$CMu}9JVUPYA@`qbqzUKqj-(fA8ETAkmZggCpljJOr?|oDPVJf=yKMqqcXAu! zKWUqE(;8BUFf;yOd2Yq-hb9)ICcvhG z`>FZ=3Kkt7w-ar#F46=DxT)pTz5zG+mf-qr5Iz~pOsJAI&*pYf(#2zRPGv|*I(zo% z>5Sq04ENM?l^kJY;XgSX-D1;HAjn|I9E945y=g~su@nUYJ9llKX$M5_u?YiwyYE6?s4|<606A+#jet5tmR^jPBH!q!VhG%~LGLe*Q zN_E3qjBJC#(+BdSTRFR!e^U#dwwz6<-LiOLvzGJ-RWxpkwxp*W;r&a4ZRbRNj-|K# zTlS$(zY@p1K8*KYmCJ9Y%9hswzM#y1khE0~9!-_IR(_fb%a4%Wjp7Bc+u&kQ&DxQr zTC1tlEmzk5T`$3_#>~i=cy5fmZ+{YZZL{wG2_X~S?2LDnh5?^1EyL_Hbpm-9)Kyhx zAaFk5&*A45QcbmgEt6{UsRr7?x7w}ntwnbB z?!Py?5Y&0|n{U2yxUN($a2K(0@*Wc8-(<(Tn)ZOeS3^BHJ34>XuJbR5Z^QjeYwW@2 zo^Z&2`jo@DRoNd&P`?xGT@2N!GdrBu@$0ESJ^A?#D{{MU#ht8yYB^>v@R`o|Ov~j> z?c!ch&0Pcje)#=)>cRa4dFGvYZS$}^&Y%yqp^)O=;kCm|`tVxYa>>Fxs#%TCmzAcy zFQ4N{e$vPpMMeHpj(oOEuytS%sL|QRGKgA@!_h`8qk8nno*BHq5aj5eM@f$yV?roB za(0T^h8sBK97X)*r;2Nh^^qmH+GoNtmJxh+u*cxSU2~qckby?1#JEY#-?2!kNxLues@X# zAg4bP&Q$m3OaDRa@~tJ}jAtp%I8pxn0=ZhpFa1Hijhc0=kw=y8Fk!p=0cQd#k+}L0 z$G|q7AG{AHAGJdvZ(hFOcSa0@e;$vIdY!}>_V;*vXYfv#?{?T>7LR`izMBu^Jt|Ni ztXk3cx+kwuk#p9FaCch2+jHKKag|zwK+TRgGp0fflij(J&pgp|@bMaC0Gc&)wHBU# zb-k!hg+Xd$eTcnjXZ{TF12GkS#P3GE1BY^#+x`wQwWKca`I;YUd?p~r!+6MNn&lJq zo%o7dQcvz@b)~tJ8pHM1#PhvS%RDPohtCzg6zXA5Bd8Juh1kdVFd@!ENO%seu#121 zqv_D;jedfC!L#$%UOoF_<}3UR?&oi4Pv)_uxF^iRHRSaf9$WjB;LjSvWXEsYwtPQ} zbJ*{Z70NxF$r60Bg0nm1V`Tm$qLp~EI^OB?Imn)O+J?XV54oQbAJ=b#UQ#33(42>J}%|Q~iKlKH9eo#8(idl|A-i zHh)165d3jGS`pP1R75c~mdl{h?(Hm_^QiLPEotR`ONCu|4mO~7xU@%T))j5&amSosAh|B9zMYt!I^T0dLC*D$g{qMuQ<;?QW$P}AWf)YBd| z6CFnBzWOUSX&2nlK8I@7K4nc>TeMS;_BOq}O7qxl3~)D)M`}|iyb!-oe-vsuVS+s_ zc0YH}L?`Xg)kEyRHZ>lh+SD$)lfdTWxiNZqa+_N8tJLVSnGV8dLs3v!6!R z1!e7%fqfExv?9JOf-w!T|41v%xxv1TL0{&tp7jY|V62no+!T4vjakylo=2_tO9C+` zh?qKwEoT`L0dZS&_{-1GQ>;DyAtu@w2w6v)Z3yy=3yC94CC&hO!cKIxTVY=Xdsmsc zcND~=AH>s`7!fCq37^r_x>?&N>0yoOa*I#)1CF?65+hzKN1N<=jl7oo^ZVzAedj#w zxac98!S`rht^6+d%bRmZ%OJ{5TVc2J{E(AFUaj>M%di!BmNx96A3eg}LS5+|`_NMj zx)%3JF5y#%SZIOoEm=%7#%C&=ov7ePA+DoeGBv5EB?fp65=*k#*?OA~@MC_|6aU&s z99Rw5*&22uUF}+*g@9j`Z7)n{%6pn>l{uyYXWq&)#2C5bh%P*PV?JE7zp88CTFzi2 z?)KU6nc-UCkD`XzH-+!9494)8T>Y0f{_;;yXtxh{H#$7Z$K|8Hkv`*BO@}%Jane1B zLtL`K@Rf*JZ^zNnzgmknb52-&wmv2bMLjS65E9`<0Y|=_N;`Z>+Rsk$@!*>!9dkHW zvFn3hIj=_MM`d@SYs+0P&IxgESD1a0$o2((Sljki!=s((lYF)<_Em6>B-}pK`6?OP zg8E*()y1f^a|e0sO76>9hO{TC-lBQg*uG3J`Xe7R@!M%#b{eSnoB!UGkCHkrKHz>& z=lf(`rPn9VhSOf3G+R!4ebQ{XZGECMJ1T$7pWjXUc3H|FI0sE+TE!n=4kqEXhRCas zjS(6(X2;waEr}7Ob}BMhRQT?KC4oUX?hqFBxX>fdgzQViEUSyA5xdmqKY zIN;zTH$mPW;B_5|S2zb1a%7c+GUwCL>lytP>Gig0Tb0&qslU%#XxBIG^J>9=Puu1~t+# z7U{&#wzlr6k{f~lirKUGWB#>5uNm$U%Nar}B(?OGkk^&M-zAb>AKZU@)<5boK@PP5 zKRVFCCyjD6F&ln=)Z2Uh!2L4ik>h4>&3piASGY%P zsdfSzh59*^`LzWDA7XBHwQt+7HAVGtvpCjeJskFsZ^?6<^m&?prO(?oBgmZ1Ri54Y z^YT@hynLY@^73jtcXX>;Ce4gC5VJQloe8J53-{ z2VoEsm5EVU1*4Jnrgmqh%XVz*xjweB*z#NA@7cZwhni-?9GvtmSJd~GkYncTqh9TM zXw+FiZ6oQts+QwoZ#i4n2(VN5ePKSvs*(9@`SW7i-KSB^1mro!1bk+gqs@Btyr2eQ z5|0xRu71S#31?*zioa~c+WBT`raI)-5lVWKc$WbcICxjFG|31P7QIeia$*>C<1R2jC3(BqS=_U&bV|d@st;&gpbj3&lced!7f^a`X3}@u;`8 z*;d5C!7E%-l|qD(9;q|TkGrS^Nx0vukXV(ww}ECK02qt^ppBpEqhiF zUtcNIrpKptuz9?%7D}d8!FgHl2_G9{66| z<+S(Et1GIXZ0dotT%q0-YBCaM06y`fFk+3Y0ZVf$*51E)(gbQIBUipuZydjV$VZNT zzNA8Es2;3;=4Nap(+_qR*jzls!msoPs1QKRfw%?>IBPbJPz{C{^{;Q*w)>J zk{UthIg7V_SC#ar5Ajo&I>gT^`}bY_P=i)){d{0H#Jm~2KkHitLCrw0&sk>w^Wys{ z{^HX6UCb=iD=&3ZdN;kh~6Q`uM!g>#uigwKR!1WV=*b9QI1?@JOx$~E@ScW0!e z&s!MFFz3kAn7Po09)*v5`oUa@jm!gU;PeWi-W#gm*)+zzh&c=0x~>hA$LW=jrz1`; zN9Ob*Vv&zyYO1d*iDRmpdkud^#+&VQ&r*Bh!~QWIyCry)y`Q_44`1RoeEM*_N{4|y zYd;W^V~?|#=@!@!;cma0pUbOxb2$~7nkQ^$WCi9h*hw{GtF}#2@{Y z`o}kZnt@J@2l(&Fioxf^v$c#_75xvyzp9o2XNbI!!t4>wq~1R|v z9MDQzQ@#K@ca^vM##Cke$VuF5sy{89kKB<*uFFem)NrY%CN?#hLwWx^ouQ@A+ZOXm zV-Dz+A7r2fvAwa-c6hL2*L!}7b#-6vpmCJ$%w@EZQhxG-Hd5O#2JgXj9Z#bm9w{&1 z>et7-E*}Ss{K^RX~aB2E->Z{br!ox z4j%S;?br42i}$tqrE?D4tr9Xvk95tUt_U@9%pHI~(}J005pK)#m>&lB2IT3E*$aH8 zGd|N;T*apm$kk{4qa8{8s0WNVKGd&nC-3Aq65sDhP4dT?p2<}{-~0EY7hsQXbzOSSEU9qL zD{bq~;Ab^rEJxPUzTSt|m|_{G%t?kn5B5YH)Xko3fBwpTT-QIl4>y1+$57Un^rrZ% zl~~AvyHpZiTutZFznsS&p9yKjnN+Uz48+JSBZ%)SCYCEIAaCJ0`yJQ&R|ye=BR-sg zLb${+1s)tSqIP0Q&f~ueX`0d%i%CB~4r_n7vHVFy;%+wn(K^r>Tz(ihc!vieQ%&S0 zSqFxh!`*g09!XrL(k$l~Q*siEnyAx#qy9sM8t(S|Tx-WXrYg^~7ZL7-f#D`BreGha z-xzip=^wn4``gu1-JInT$6mo6|1=^{VbXZY8Rl#dvk1@OUB7CN;_Oo3 zd#jX3S>Ale4_(NInQFPM)TOJiH&KsHdS^$q#J4+eqIYqvxPMcY!QQvM=?>tWgvC(P z#$d~1TeXn;_&T=xDqaV*_$ZgSlM(-+lBXkVhG3i4;&l}K{L_LQNDnK3v_;7|YO;Kz#WSC-)=?{?0M?wqHZc^>35 z!Nv2*!M=T?(W8&O+MVCPT)&^@`m>+k$KZ3S?7Mu`HpG~L-z3@dJG^#?TbZA0Szb{q zS5n>D>|NilQeJ+~U9#Kr^Oo$%R@q~DK#gUEaHg=BDB!ad#C%qWSsRLTT7HJJHuZxm zO+EYm9^*0SKk`tAoKuJUVGX$@=9ju;bGR?qj(wIQJL|t6kJa^gnNGN@6F?kuEzg_b z_vC8VHPRp7tn+;TJ|;sx+zl0}&gcVwr#@z{9ra^vIIl9Av%Hk%tZ~pQ%;6usTdZ#) z7VHeBp{N#9j5R3}(Te!U-8+gH&)adlimQd%#a5_vPA=-kOV};yEPC`rE4m#u>zy^k z!@v)gbAQu&%=(#_yzcV5h>9uZcq$V!!T10kljxDZVXd)u2meO<)rn(kbRV||Nrig? z_W>cc4>vN_g=vHjwH Tested on Consul 1.10.4 . + +## Configuration + +```toml @sample.conf +# Read metrics from the Consul Agent API +[[inputs.consul_agent]] + ## URL for the Consul agent + # url = "http://127.0.0.1:8500" + + ## Use auth token for authorization. + ## If both are set, an error is thrown. + ## If both are empty, no token will be used. + # token_file = "/path/to/auth/token" + ## OR + # token = "a1234567-40c7-9048-7bae-378687048181" + + ## Set timeout (default 5 seconds) + # timeout = "5s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile +``` + +## Metrics + +Consul collects various metrics. For every details, please have a look at Consul +following documentation: + +- [https://www.consul.io/api/agent#view-metrics](https://www.consul.io/api/agent#view-metrics) diff --git a/plugins/inputs/consul_agent/consul_agent.go b/plugins/inputs/consul_agent/consul_agent.go new file mode 100644 index 0000000000000..d82c2f7f7d7c5 --- /dev/null +++ b/plugins/inputs/consul_agent/consul_agent.go @@ -0,0 +1,177 @@ +//go:generate ../../../tools/readme_config_includer/generator +package consul_agent + +import ( + _ "embed" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +// consul_agent configuration object +type ConsulAgent struct { + URL string `toml:"url"` + + TokenFile string `toml:"token_file"` + Token string `toml:"token"` + + ResponseTimeout config.Duration `toml:"timeout"` + + tls.ClientConfig + + roundTripper http.RoundTripper +} + +const timeLayout = "2006-01-02 15:04:05 -0700 MST" + +func init() { + inputs.Add("consul_agent", func() telegraf.Input { + return &ConsulAgent{ + ResponseTimeout: config.Duration(5 * time.Second), + } + }) +} + +func (*ConsulAgent) SampleConfig() string { + return sampleConfig +} + +func (n *ConsulAgent) Init() error { + if n.URL == "" { + n.URL = "http://127.0.0.1:8500" + } + + if n.TokenFile != "" && n.Token != "" { + return errors.New("config error: both token_file and token are set") + } + + if n.TokenFile != "" { + token, err := os.ReadFile(n.TokenFile) + if err != nil { + return fmt.Errorf("reading file failed: %v", err) + } + n.Token = strings.TrimSpace(string(token)) + } + + tlsCfg, err := n.ClientConfig.TLSConfig() + if err != nil { + return fmt.Errorf("setting up TLS configuration failed: %v", err) + } + + n.roundTripper = &http.Transport{ + TLSHandshakeTimeout: time.Duration(n.ResponseTimeout), + TLSClientConfig: tlsCfg, + ResponseHeaderTimeout: time.Duration(n.ResponseTimeout), + } + + return nil +} + +// Gather, collects metrics from Consul endpoint +func (n *ConsulAgent) Gather(acc telegraf.Accumulator) error { + summaryMetrics, err := n.loadJSON(n.URL + "/v1/agent/metrics") + if err != nil { + return err + } + + return buildConsulAgent(acc, summaryMetrics) +} + +func (n *ConsulAgent) loadJSON(url string) (*AgentInfo, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + + req.Header.Add("X-Consul-Token", n.Token) + req.Header.Add("Accept", "application/json") + + resp, err := n.roundTripper.RoundTrip(req) + if err != nil { + return nil, fmt.Errorf("error making HTTP request to %s: %s", url, err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s returned HTTP status %s", url, resp.Status) + } + + var metrics AgentInfo + err = json.NewDecoder(resp.Body).Decode(&metrics) + if err != nil { + return nil, fmt.Errorf("error parsing json response: %s", err) + } + + return &metrics, nil +} + +// buildConsulAgent, it builds all the metrics and adds them to the accumulator) +func buildConsulAgent(acc telegraf.Accumulator, agentInfo *AgentInfo) error { + t, err := time.Parse(timeLayout, agentInfo.Timestamp) + if err != nil { + return fmt.Errorf("error parsing time: %s", err) + } + + for _, counters := range agentInfo.Counters { + fields := map[string]interface{}{ + "count": counters.Count, + "sum": counters.Sum, + "max": counters.Max, + "mean": counters.Mean, + "min": counters.Min, + "rate": counters.Rate, + "stddev": counters.Stddev, + } + tags := counters.Labels + + acc.AddCounter(counters.Name, fields, tags, t) + } + + for _, gauges := range agentInfo.Gauges { + fields := map[string]interface{}{ + "value": gauges.Value, + } + tags := gauges.Labels + + acc.AddGauge(gauges.Name, fields, tags, t) + } + + for _, points := range agentInfo.Points { + fields := map[string]interface{}{ + "value": points.Points, + } + tags := make(map[string]string) + + acc.AddFields(points.Name, fields, tags, t) + } + + for _, samples := range agentInfo.Samples { + fields := map[string]interface{}{ + "count": samples.Count, + "sum": samples.Sum, + "max": samples.Max, + "mean": samples.Mean, + "min": samples.Min, + "rate": samples.Rate, + "stddev": samples.Stddev, + } + tags := samples.Labels + + acc.AddCounter(samples.Name, fields, tags, t) + } + + return nil +} diff --git a/plugins/inputs/consul_agent/consul_agent_test.go b/plugins/inputs/consul_agent/consul_agent_test.go new file mode 100644 index 0000000000000..5b4a2eac3ea84 --- /dev/null +++ b/plugins/inputs/consul_agent/consul_agent_test.go @@ -0,0 +1,97 @@ +package consul_agent + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestConsulStats(t *testing.T) { + var applyTests = []struct { + name string + expected []telegraf.Metric + }{ + { + name: "Metrics", + expected: []telegraf.Metric{ + testutil.MustMetric( + "consul.rpc.request", + map[string]string{}, + map[string]interface{}{ + "count": int(5), + "max": float64(1), + "mean": float64(1), + "min": float64(1), + "rate": float64(0.5), + "stddev": float64(0), + "sum": float64(5), + }, + time.Unix(1639218930, 0), + 1, + ), + testutil.MustMetric( + "consul.consul.members.clients", + map[string]string{ + "datacenter": "dc1", + }, + map[string]interface{}{ + "value": float64(0), + }, + time.Unix(1639218930, 0), + 2, + ), + testutil.MustMetric( + "consul.api.http", + map[string]string{ + "method": "GET", + "path": "v1_agent_self", + }, + map[string]interface{}{ + "count": int(1), + "max": float64(4.14815616607666), + "mean": float64(4.14815616607666), + "min": float64(4.14815616607666), + "rate": float64(0.414815616607666), + "stddev": float64(0), + "sum": float64(4.14815616607666), + }, + time.Unix(1639218930, 0), + 1, + ), + }, + }, + } + + for _, tt := range applyTests { + t.Run(tt.name, func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.RequestURI == "/v1/agent/metrics" { + w.WriteHeader(http.StatusOK) + responseKeyMetrics, _ := ioutil.ReadFile("testdata/response_key_metrics.json") + _, err := fmt.Fprintln(w, string(responseKeyMetrics)) + require.NoError(t, err) + } + })) + defer ts.Close() + + plugin := &ConsulAgent{ + URL: ts.URL, + } + err := plugin.Init() + require.NoError(t, err) + + acc := testutil.Accumulator{} + err = plugin.Gather(&acc) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics()) + }) + } +} diff --git a/plugins/inputs/consul_agent/consul_structs.go b/plugins/inputs/consul_agent/consul_structs.go new file mode 100644 index 0000000000000..c17509189ed46 --- /dev/null +++ b/plugins/inputs/consul_agent/consul_structs.go @@ -0,0 +1,32 @@ +package consul_agent + +type AgentInfo struct { + Timestamp string + Gauges []GaugeValue + Points []PointValue + Counters []SampledValue + Samples []SampledValue +} + +type GaugeValue struct { + Name string + Value float32 + Labels map[string]string +} + +type PointValue struct { + Name string + Points []float32 +} + +type SampledValue struct { + Name string + Count int + Sum float64 + Min float64 + Max float64 + Mean float64 + Rate float64 + Stddev float64 + Labels map[string]string +} diff --git a/plugins/inputs/consul_agent/sample.conf b/plugins/inputs/consul_agent/sample.conf new file mode 100644 index 0000000000000..f49c70f5380ef --- /dev/null +++ b/plugins/inputs/consul_agent/sample.conf @@ -0,0 +1,19 @@ +# Read metrics from the Consul Agent API +[[inputs.consul_agent]] + ## URL for the Consul agent + # url = "http://127.0.0.1:8500" + + ## Use auth token for authorization. + ## If both are set, an error is thrown. + ## If both are empty, no token will be used. + # token_file = "/path/to/auth/token" + ## OR + # token = "a1234567-40c7-9048-7bae-378687048181" + + ## Set timeout (default 5 seconds) + # timeout = "5s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile diff --git a/plugins/inputs/consul_agent/testdata/response_key_metrics.json b/plugins/inputs/consul_agent/testdata/response_key_metrics.json new file mode 100644 index 0000000000000..0234d17f4f0ba --- /dev/null +++ b/plugins/inputs/consul_agent/testdata/response_key_metrics.json @@ -0,0 +1,42 @@ +{ + "Timestamp": "2021-12-11 10:35:30 +0000 UTC", + "Gauges": [ + { + "Name": "consul.consul.members.clients", + "Value": 0, + "Labels": { + "datacenter": "dc1" + } + } + ], + "Points": [], + "Counters": [ + { + "Name": "consul.rpc.request", + "Count": 5, + "Rate": 0.5, + "Sum": 5, + "Min": 1, + "Max": 1, + "Mean": 1, + "Stddev": 0, + "Labels": {} + } + ], + "Samples": [ + { + "Name": "consul.api.http", + "Count": 1, + "Rate": 0.414815616607666, + "Sum": 4.14815616607666, + "Min": 4.14815616607666, + "Max": 4.14815616607666, + "Mean": 4.14815616607666, + "Stddev": 0, + "Labels": { + "method": "GET", + "path": "v1_agent_self" + } + } + ] + } diff --git a/plugins/inputs/couchbase/README.md b/plugins/inputs/couchbase/README.md index 659b87c3a1fb6..fadad1b806a1b 100644 --- a/plugins/inputs/couchbase/README.md +++ b/plugins/inputs/couchbase/README.md @@ -1,8 +1,12 @@ # Couchbase Input Plugin -## Configuration: +Couchbase is a distributed NoSQL database. This plugin gets metrics for each +Couchbase node, as well as detailed metrics for each bucket, for a given +couchbase server. -```toml +## Configuration + +```toml @sample.conf # Read per-node and per-bucket metrics from Couchbase [[inputs.couchbase]] ## specify servers via a url matching: @@ -15,27 +19,51 @@ ## If no protocol is specified, HTTP is used. ## If no port is specified, 8091 is used. servers = ["http://localhost:8091"] + + ## Filter bucket fields to include only here. + # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification (defaults to false) + ## If set to false, tls_cert and tls_key are required + # insecure_skip_verify = false + + ## Whether to collect cluster-wide bucket statistics + ## It is recommended to disable this in favor of node_stats + ## to get a better view of the cluster. + cluster_bucket_stats = true + + ## Whether to collect bucket stats for each individual node + node_bucket_stats = false ``` -## Measurements: +## Metrics ### couchbase_node Tags: + - cluster: sanitized string from `servers` configuration field e.g.: `http://user:password@couchbase-0.example.com:8091/endpoint` -> `http://couchbase-0.example.com:8091/endpoint` - hostname: Couchbase's name for the node and port, e.g., `172.16.10.187:8091` Fields: + - memory_free (unit: bytes, example: 23181365248.0) - memory_total (unit: bytes, example: 64424656896.0) -### couchbase_bucket +### couchbase_bucket and couchbase_node_bucket Tags: + - cluster: whatever you called it in `servers` in the configuration, e.g.: `http://couchbase-0.example.com/`) - bucket: the name of the couchbase bucket, e.g., `blastro-df` +- hostname: the hostname of the node the bucket metrics were collected from, e.g., `172.16.10.187:8091` (only present in `couchbase_node_bucket`) + +Default bucket fields: -Fields: - quota_percent_used (unit: percent, example: 68.85424936294555) - ops_per_sec (unit: count, example: 5686.789686789687) - disk_fetches (unit: count, example: 0.0) @@ -44,10 +72,228 @@ Fields: - data_used (unit: bytes, example: 212179309111.0) - mem_used (unit: bytes, example: 202156957464.0) +Additional fields that can be configured with the `bucket_stats_included` +option: -## Example output +- couch_total_disk_size +- couch_docs_fragmentation +- couch_views_fragmentation +- hit_ratio +- ep_cache_miss_rate +- ep_resident_items_rate +- vb_avg_active_queue_age +- vb_avg_replica_queue_age +- vb_avg_pending_queue_age +- vb_avg_total_queue_age +- vb_active_resident_items_ratio +- vb_replica_resident_items_ratio +- vb_pending_resident_items_ratio +- avg_disk_update_time +- avg_disk_commit_time +- avg_bg_wait_time +- avg_active_timestamp_drift +- avg_replica_timestamp_drift +- ep_dcp_views+indexes_count +- ep_dcp_views+indexes_items_remaining +- ep_dcp_views+indexes_producer_count +- ep_dcp_views+indexes_total_backlog_size +- ep_dcp_views+indexes_items_sent +- ep_dcp_views+indexes_total_bytes +- ep_dcp_views+indexes_backoff +- bg_wait_count +- bg_wait_total +- bytes_read +- bytes_written +- cas_badval +- cas_hits +- cas_misses +- cmd_get +- cmd_lookup +- cmd_set +- couch_docs_actual_disk_size +- couch_docs_data_size +- couch_docs_disk_size +- couch_spatial_data_size +- couch_spatial_disk_size +- couch_spatial_ops +- couch_views_actual_disk_size +- couch_views_data_size +- couch_views_disk_size +- couch_views_ops +- curr_connections +- curr_items +- curr_items_tot +- decr_hits +- decr_misses +- delete_hits +- delete_misses +- disk_commit_count +- disk_commit_total +- disk_update_count +- disk_update_total +- disk_write_queue +- ep_active_ahead_exceptions +- ep_active_hlc_drift +- ep_active_hlc_drift_count +- ep_bg_fetched +- ep_clock_cas_drift_threshold_exceeded +- ep_data_read_failed +- ep_data_write_failed +- ep_dcp_2i_backoff +- ep_dcp_2i_count +- ep_dcp_2i_items_remaining +- ep_dcp_2i_items_sent +- ep_dcp_2i_producer_count +- ep_dcp_2i_total_backlog_size +- ep_dcp_2i_total_bytes +- ep_dcp_cbas_backoff +- ep_dcp_cbas_count +- ep_dcp_cbas_items_remaining +- ep_dcp_cbas_items_sent +- ep_dcp_cbas_producer_count +- ep_dcp_cbas_total_backlog_size +- ep_dcp_cbas_total_bytes +- ep_dcp_eventing_backoff +- ep_dcp_eventing_count +- ep_dcp_eventing_items_remaining +- ep_dcp_eventing_items_sent +- ep_dcp_eventing_producer_count +- ep_dcp_eventing_total_backlog_size +- ep_dcp_eventing_total_bytes +- ep_dcp_fts_backoff +- ep_dcp_fts_count +- ep_dcp_fts_items_remaining +- ep_dcp_fts_items_sent +- ep_dcp_fts_producer_count +- ep_dcp_fts_total_backlog_size +- ep_dcp_fts_total_bytes +- ep_dcp_other_backoff +- ep_dcp_other_count +- ep_dcp_other_items_remaining +- ep_dcp_other_items_sent +- ep_dcp_other_producer_count +- ep_dcp_other_total_backlog_size +- ep_dcp_other_total_bytes +- ep_dcp_replica_backoff +- ep_dcp_replica_count +- ep_dcp_replica_items_remaining +- ep_dcp_replica_items_sent +- ep_dcp_replica_producer_count +- ep_dcp_replica_total_backlog_size +- ep_dcp_replica_total_bytes +- ep_dcp_views_backoff +- ep_dcp_views_count +- ep_dcp_views_items_remaining +- ep_dcp_views_items_sent +- ep_dcp_views_producer_count +- ep_dcp_views_total_backlog_size +- ep_dcp_views_total_bytes +- ep_dcp_xdcr_backoff +- ep_dcp_xdcr_count +- ep_dcp_xdcr_items_remaining +- ep_dcp_xdcr_items_sent +- ep_dcp_xdcr_producer_count +- ep_dcp_xdcr_total_backlog_size +- ep_dcp_xdcr_total_bytes +- ep_diskqueue_drain +- ep_diskqueue_fill +- ep_diskqueue_items +- ep_flusher_todo +- ep_item_commit_failed +- ep_kv_size +- ep_max_size +- ep_mem_high_wat +- ep_mem_low_wat +- ep_meta_data_memory +- ep_num_non_resident +- ep_num_ops_del_meta +- ep_num_ops_del_ret_meta +- ep_num_ops_get_meta +- ep_num_ops_set_meta +- ep_num_ops_set_ret_meta +- ep_num_value_ejects +- ep_oom_errors +- ep_ops_create +- ep_ops_update +- ep_overhead +- ep_queue_size +- ep_replica_ahead_exceptions +- ep_replica_hlc_drift +- ep_replica_hlc_drift_count +- ep_tmp_oom_errors +- ep_vb_total +- evictions +- get_hits +- get_misses +- incr_hits +- incr_misses +- mem_used +- misses +- ops +- timestamp +- vb_active_eject +- vb_active_itm_memory +- vb_active_meta_data_memory +- vb_active_num +- vb_active_num_non_resident +- vb_active_ops_create +- vb_active_ops_update +- vb_active_queue_age +- vb_active_queue_drain +- vb_active_queue_fill +- vb_active_queue_size +- vb_active_sync_write_aborted_count +- vb_active_sync_write_accepted_count +- vb_active_sync_write_committed_count +- vb_pending_curr_items +- vb_pending_eject +- vb_pending_itm_memory +- vb_pending_meta_data_memory +- vb_pending_num +- vb_pending_num_non_resident +- vb_pending_ops_create +- vb_pending_ops_update +- vb_pending_queue_age +- vb_pending_queue_drain +- vb_pending_queue_fill +- vb_pending_queue_size +- vb_replica_curr_items +- vb_replica_eject +- vb_replica_itm_memory +- vb_replica_meta_data_memory +- vb_replica_num +- vb_replica_num_non_resident +- vb_replica_ops_create +- vb_replica_ops_update +- vb_replica_queue_age +- vb_replica_queue_drain +- vb_replica_queue_fill +- vb_replica_queue_size +- vb_total_queue_age +- xdc_ops +- allocstall +- cpu_cores_available +- cpu_irq_rate +- cpu_stolen_rate +- cpu_sys_rate +- cpu_user_rate +- cpu_utilization_rate +- hibernated_requests +- hibernated_waked +- mem_actual_free +- mem_actual_used +- mem_free +- mem_limit +- mem_total +- mem_used_sys +- odp_report_failed +- rest_requests +- swap_total +- swap_used -``` +## Example Output + +```shell couchbase_node,cluster=http://localhost:8091/,hostname=172.17.0.2:8091 memory_free=7705575424,memory_total=16558182400 1547829754000000000 couchbase_bucket,bucket=beer-sample,cluster=http://localhost:8091/ quota_percent_used=27.09285736083984,ops_per_sec=0,disk_fetches=0,item_count=7303,disk_used=21662946,data_used=9325087,mem_used=28408920 1547829754000000000 ``` diff --git a/plugins/inputs/couchbase/couchbase.go b/plugins/inputs/couchbase/couchbase.go index de7f0bec0c9fa..e3af5bbfeb232 100644 --- a/plugins/inputs/couchbase/couchbase.go +++ b/plugins/inputs/couchbase/couchbase.go @@ -1,56 +1,59 @@ +//go:generate ../../../tools/readme_config_includer/generator package couchbase import ( + _ "embed" + "encoding/json" + "net/http" "regexp" "sync" + "time" + + couchbaseClient "github.com/couchbase/go-couchbase" - couchbase "github.com/couchbase/go-couchbase" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Couchbase struct { Servers []string -} -var sampleConfig = ` - ## specify servers via a url matching: - ## [protocol://][:password]@address[:port] - ## e.g. - ## http://couchbase-0.example.com/ - ## http://admin:secret@couchbase-0.example.com:8091/ - ## - ## If no servers are specified, then localhost is used as the host. - ## If no protocol is specified, HTTP is used. - ## If no port is specified, 8091 is used. - servers = ["http://localhost:8091"] -` + BucketStatsIncluded []string `toml:"bucket_stats_included"` -var regexpURI = regexp.MustCompile(`(\S+://)?(\S+\:\S+@)`) + ClusterBucketStats bool `toml:"cluster_bucket_stats"` + NodeBucketStats bool `toml:"node_bucket_stats"` -func (r *Couchbase) SampleConfig() string { - return sampleConfig + bucketInclude filter.Filter + client *http.Client + + tls.ClientConfig } -func (r *Couchbase) Description() string { - return "Read metrics from one or many couchbase clusters" +var regexpURI = regexp.MustCompile(`(\S+://)?(\S+\:\S+@)`) + +func (*Couchbase) SampleConfig() string { + return sampleConfig } // Reads stats from all configured clusters. Accumulates stats. // Returns one of the errors encountered while gathering stats (if any). -func (r *Couchbase) Gather(acc telegraf.Accumulator) error { - if len(r.Servers) == 0 { - r.gatherServer("http://localhost:8091/", acc, nil) - return nil +func (cb *Couchbase) Gather(acc telegraf.Accumulator) error { + if len(cb.Servers) == 0 { + return cb.gatherServer(acc, "http://localhost:8091/") } var wg sync.WaitGroup - - for _, serv := range r.Servers { + for _, serv := range cb.Servers { wg.Add(1) go func(serv string) { defer wg.Done() - acc.AddError(r.gatherServer(serv, acc, nil)) + acc.AddError(cb.gatherServer(acc, serv)) }(serv) } @@ -59,50 +62,376 @@ func (r *Couchbase) Gather(acc telegraf.Accumulator) error { return nil } -func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *couchbase.Pool) error { - if pool == nil { - client, err := couchbase.Connect(addr) - if err != nil { - return err - } +func (cb *Couchbase) gatherServer(acc telegraf.Accumulator, addr string) error { + escapedAddr := regexpURI.ReplaceAllString(addr, "${1}") - // `default` is the only possible pool name. It's a - // placeholder for a possible future Couchbase feature. See - // http://stackoverflow.com/a/16990911/17498. - p, err := client.GetPool("default") - if err != nil { - return err - } - pool = &p + client, err := couchbaseClient.Connect(addr) + if err != nil { + return err } + // `default` is the only possible pool name. It's a + // placeholder for a possible future Couchbase feature. See + // http://stackoverflow.com/a/16990911/17498. + pool, err := client.GetPool("default") + if err != nil { + return err + } + defer pool.Close() + for i := 0; i < len(pool.Nodes); i++ { node := pool.Nodes[i] - tags := map[string]string{"cluster": regexpURI.ReplaceAllString(addr, "${1}"), "hostname": node.Hostname} + tags := map[string]string{"cluster": escapedAddr, "hostname": node.Hostname} fields := make(map[string]interface{}) fields["memory_free"] = node.MemoryFree fields["memory_total"] = node.MemoryTotal acc.AddFields("couchbase_node", fields, tags) } - for bucketName := range pool.BucketMap { - tags := map[string]string{"cluster": regexpURI.ReplaceAllString(addr, "${1}"), "bucket": bucketName} - bs := pool.BucketMap[bucketName].BasicStats - fields := make(map[string]interface{}) - fields["quota_percent_used"] = bs["quotaPercentUsed"] - fields["ops_per_sec"] = bs["opsPerSec"] - fields["disk_fetches"] = bs["diskFetches"] - fields["item_count"] = bs["itemCount"] - fields["disk_used"] = bs["diskUsed"] - fields["data_used"] = bs["dataUsed"] - fields["mem_used"] = bs["memUsed"] - acc.AddFields("couchbase_bucket", fields, tags) + for name, bucket := range pool.BucketMap { + cluster := regexpURI.ReplaceAllString(addr, "${1}") + + if cb.ClusterBucketStats { + fields := cb.basicBucketStats(bucket.BasicStats) + tags := map[string]string{"cluster": cluster, "bucket": name} + + err := cb.gatherDetailedBucketStats(addr, name, nil, fields) + if err != nil { + return err + } + + acc.AddFields("couchbase_bucket", fields, tags) + } + + if cb.NodeBucketStats { + for _, node := range bucket.Nodes() { + fields := cb.basicBucketStats(bucket.BasicStats) + tags := map[string]string{"cluster": cluster, "bucket": name, "hostname": node.Hostname} + + err := cb.gatherDetailedBucketStats(addr, name, &node.Hostname, fields) + if err != nil { + return err + } + + acc.AddFields("couchbase_node_bucket", fields, tags) + } + } } + + return nil +} + +// basicBucketStats gets the basic bucket statistics +func (cb *Couchbase) basicBucketStats(basicStats map[string]interface{}) map[string]interface{} { + fields := make(map[string]interface{}) + cb.addBucketField(fields, "quota_percent_used", basicStats["quotaPercentUsed"]) + cb.addBucketField(fields, "ops_per_sec", basicStats["opsPerSec"]) + cb.addBucketField(fields, "disk_fetches", basicStats["diskFetches"]) + cb.addBucketField(fields, "item_count", basicStats["itemCount"]) + cb.addBucketField(fields, "disk_used", basicStats["diskUsed"]) + cb.addBucketField(fields, "data_used", basicStats["dataUsed"]) + cb.addBucketField(fields, "mem_used", basicStats["memUsed"]) + return fields +} + +func (cb *Couchbase) gatherDetailedBucketStats(server, bucket string, nodeHostname *string, fields map[string]interface{}) error { + extendedBucketStats := &BucketStats{} + err := cb.queryDetailedBucketStats(server, bucket, nodeHostname, extendedBucketStats) + if err != nil { + return err + } + + cb.addBucketFieldChecked(fields, "couch_total_disk_size", extendedBucketStats.Op.Samples.CouchTotalDiskSize) + cb.addBucketFieldChecked(fields, "couch_docs_fragmentation", extendedBucketStats.Op.Samples.CouchDocsFragmentation) + cb.addBucketFieldChecked(fields, "couch_views_fragmentation", extendedBucketStats.Op.Samples.CouchViewsFragmentation) + cb.addBucketFieldChecked(fields, "hit_ratio", extendedBucketStats.Op.Samples.HitRatio) + cb.addBucketFieldChecked(fields, "ep_cache_miss_rate", extendedBucketStats.Op.Samples.EpCacheMissRate) + cb.addBucketFieldChecked(fields, "ep_resident_items_rate", extendedBucketStats.Op.Samples.EpResidentItemsRate) + cb.addBucketFieldChecked(fields, "vb_avg_active_queue_age", extendedBucketStats.Op.Samples.VbAvgActiveQueueAge) + cb.addBucketFieldChecked(fields, "vb_avg_replica_queue_age", extendedBucketStats.Op.Samples.VbAvgReplicaQueueAge) + cb.addBucketFieldChecked(fields, "vb_avg_pending_queue_age", extendedBucketStats.Op.Samples.VbAvgPendingQueueAge) + cb.addBucketFieldChecked(fields, "vb_avg_total_queue_age", extendedBucketStats.Op.Samples.VbAvgTotalQueueAge) + cb.addBucketFieldChecked(fields, "vb_active_resident_items_ratio", extendedBucketStats.Op.Samples.VbActiveResidentItemsRatio) + cb.addBucketFieldChecked(fields, "vb_replica_resident_items_ratio", extendedBucketStats.Op.Samples.VbReplicaResidentItemsRatio) + cb.addBucketFieldChecked(fields, "vb_pending_resident_items_ratio", extendedBucketStats.Op.Samples.VbPendingResidentItemsRatio) + cb.addBucketFieldChecked(fields, "avg_disk_update_time", extendedBucketStats.Op.Samples.AvgDiskUpdateTime) + cb.addBucketFieldChecked(fields, "avg_disk_commit_time", extendedBucketStats.Op.Samples.AvgDiskCommitTime) + cb.addBucketFieldChecked(fields, "avg_bg_wait_time", extendedBucketStats.Op.Samples.AvgBgWaitTime) + cb.addBucketFieldChecked(fields, "avg_active_timestamp_drift", extendedBucketStats.Op.Samples.AvgActiveTimestampDrift) + cb.addBucketFieldChecked(fields, "avg_replica_timestamp_drift", extendedBucketStats.Op.Samples.AvgReplicaTimestampDrift) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_count", extendedBucketStats.Op.Samples.EpDcpViewsIndexesCount) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_items_remaining", extendedBucketStats.Op.Samples.EpDcpViewsIndexesItemsRemaining) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_producer_count", extendedBucketStats.Op.Samples.EpDcpViewsIndexesProducerCount) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpViewsIndexesTotalBacklogSize) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_items_sent", extendedBucketStats.Op.Samples.EpDcpViewsIndexesItemsSent) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_total_bytes", extendedBucketStats.Op.Samples.EpDcpViewsIndexesTotalBytes) + cb.addBucketFieldChecked(fields, "ep_dcp_views+indexes_backoff", extendedBucketStats.Op.Samples.EpDcpViewsIndexesBackoff) + cb.addBucketFieldChecked(fields, "bg_wait_count", extendedBucketStats.Op.Samples.BgWaitCount) + cb.addBucketFieldChecked(fields, "bg_wait_total", extendedBucketStats.Op.Samples.BgWaitTotal) + cb.addBucketFieldChecked(fields, "bytes_read", extendedBucketStats.Op.Samples.BytesRead) + cb.addBucketFieldChecked(fields, "bytes_written", extendedBucketStats.Op.Samples.BytesWritten) + cb.addBucketFieldChecked(fields, "cas_badval", extendedBucketStats.Op.Samples.CasBadval) + cb.addBucketFieldChecked(fields, "cas_hits", extendedBucketStats.Op.Samples.CasHits) + cb.addBucketFieldChecked(fields, "cas_misses", extendedBucketStats.Op.Samples.CasMisses) + cb.addBucketFieldChecked(fields, "cmd_get", extendedBucketStats.Op.Samples.CmdGet) + cb.addBucketFieldChecked(fields, "cmd_lookup", extendedBucketStats.Op.Samples.CmdLookup) + cb.addBucketFieldChecked(fields, "cmd_set", extendedBucketStats.Op.Samples.CmdSet) + cb.addBucketFieldChecked(fields, "couch_docs_actual_disk_size", extendedBucketStats.Op.Samples.CouchDocsActualDiskSize) + cb.addBucketFieldChecked(fields, "couch_docs_data_size", extendedBucketStats.Op.Samples.CouchDocsDataSize) + cb.addBucketFieldChecked(fields, "couch_docs_disk_size", extendedBucketStats.Op.Samples.CouchDocsDiskSize) + cb.addBucketFieldChecked(fields, "couch_spatial_data_size", extendedBucketStats.Op.Samples.CouchSpatialDataSize) + cb.addBucketFieldChecked(fields, "couch_spatial_disk_size", extendedBucketStats.Op.Samples.CouchSpatialDiskSize) + cb.addBucketFieldChecked(fields, "couch_spatial_ops", extendedBucketStats.Op.Samples.CouchSpatialOps) + cb.addBucketFieldChecked(fields, "couch_views_actual_disk_size", extendedBucketStats.Op.Samples.CouchViewsActualDiskSize) + cb.addBucketFieldChecked(fields, "couch_views_data_size", extendedBucketStats.Op.Samples.CouchViewsDataSize) + cb.addBucketFieldChecked(fields, "couch_views_disk_size", extendedBucketStats.Op.Samples.CouchViewsDiskSize) + cb.addBucketFieldChecked(fields, "couch_views_ops", extendedBucketStats.Op.Samples.CouchViewsOps) + cb.addBucketFieldChecked(fields, "curr_connections", extendedBucketStats.Op.Samples.CurrConnections) + cb.addBucketFieldChecked(fields, "curr_items", extendedBucketStats.Op.Samples.CurrItems) + cb.addBucketFieldChecked(fields, "curr_items_tot", extendedBucketStats.Op.Samples.CurrItemsTot) + cb.addBucketFieldChecked(fields, "decr_hits", extendedBucketStats.Op.Samples.DecrHits) + cb.addBucketFieldChecked(fields, "decr_misses", extendedBucketStats.Op.Samples.DecrMisses) + cb.addBucketFieldChecked(fields, "delete_hits", extendedBucketStats.Op.Samples.DeleteHits) + cb.addBucketFieldChecked(fields, "delete_misses", extendedBucketStats.Op.Samples.DeleteMisses) + cb.addBucketFieldChecked(fields, "disk_commit_count", extendedBucketStats.Op.Samples.DiskCommitCount) + cb.addBucketFieldChecked(fields, "disk_commit_total", extendedBucketStats.Op.Samples.DiskCommitTotal) + cb.addBucketFieldChecked(fields, "disk_update_count", extendedBucketStats.Op.Samples.DiskUpdateCount) + cb.addBucketFieldChecked(fields, "disk_update_total", extendedBucketStats.Op.Samples.DiskUpdateTotal) + cb.addBucketFieldChecked(fields, "disk_write_queue", extendedBucketStats.Op.Samples.DiskWriteQueue) + cb.addBucketFieldChecked(fields, "ep_active_ahead_exceptions", extendedBucketStats.Op.Samples.EpActiveAheadExceptions) + cb.addBucketFieldChecked(fields, "ep_active_hlc_drift", extendedBucketStats.Op.Samples.EpActiveHlcDrift) + cb.addBucketFieldChecked(fields, "ep_active_hlc_drift_count", extendedBucketStats.Op.Samples.EpActiveHlcDriftCount) + cb.addBucketFieldChecked(fields, "ep_bg_fetched", extendedBucketStats.Op.Samples.EpBgFetched) + cb.addBucketFieldChecked(fields, "ep_clock_cas_drift_threshold_exceeded", extendedBucketStats.Op.Samples.EpClockCasDriftThresholdExceeded) + cb.addBucketFieldChecked(fields, "ep_data_read_failed", extendedBucketStats.Op.Samples.EpDataReadFailed) + cb.addBucketFieldChecked(fields, "ep_data_write_failed", extendedBucketStats.Op.Samples.EpDataWriteFailed) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_backoff", extendedBucketStats.Op.Samples.EpDcp2IBackoff) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_count", extendedBucketStats.Op.Samples.EpDcp2ICount) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_items_remaining", extendedBucketStats.Op.Samples.EpDcp2IItemsRemaining) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_items_sent", extendedBucketStats.Op.Samples.EpDcp2IItemsSent) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_producer_count", extendedBucketStats.Op.Samples.EpDcp2IProducerCount) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_total_backlog_size", extendedBucketStats.Op.Samples.EpDcp2ITotalBacklogSize) + cb.addBucketFieldChecked(fields, "ep_dcp_2i_total_bytes", extendedBucketStats.Op.Samples.EpDcp2ITotalBytes) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_backoff", extendedBucketStats.Op.Samples.EpDcpCbasBackoff) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_count", extendedBucketStats.Op.Samples.EpDcpCbasCount) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_items_remaining", extendedBucketStats.Op.Samples.EpDcpCbasItemsRemaining) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_items_sent", extendedBucketStats.Op.Samples.EpDcpCbasItemsSent) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_producer_count", extendedBucketStats.Op.Samples.EpDcpCbasProducerCount) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpCbasTotalBacklogSize) + cb.addBucketFieldChecked(fields, "ep_dcp_cbas_total_bytes", extendedBucketStats.Op.Samples.EpDcpCbasTotalBytes) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_backoff", extendedBucketStats.Op.Samples.EpDcpEventingBackoff) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_count", extendedBucketStats.Op.Samples.EpDcpEventingCount) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_items_remaining", extendedBucketStats.Op.Samples.EpDcpEventingItemsRemaining) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_items_sent", extendedBucketStats.Op.Samples.EpDcpEventingItemsSent) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_producer_count", extendedBucketStats.Op.Samples.EpDcpEventingProducerCount) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpEventingTotalBacklogSize) + cb.addBucketFieldChecked(fields, "ep_dcp_eventing_total_bytes", extendedBucketStats.Op.Samples.EpDcpEventingTotalBytes) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_backoff", extendedBucketStats.Op.Samples.EpDcpFtsBackoff) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_count", extendedBucketStats.Op.Samples.EpDcpFtsCount) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_items_remaining", extendedBucketStats.Op.Samples.EpDcpFtsItemsRemaining) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_items_sent", extendedBucketStats.Op.Samples.EpDcpFtsItemsSent) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_producer_count", extendedBucketStats.Op.Samples.EpDcpFtsProducerCount) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpFtsTotalBacklogSize) + cb.addBucketFieldChecked(fields, "ep_dcp_fts_total_bytes", extendedBucketStats.Op.Samples.EpDcpFtsTotalBytes) + cb.addBucketFieldChecked(fields, "ep_dcp_other_backoff", extendedBucketStats.Op.Samples.EpDcpOtherBackoff) + cb.addBucketFieldChecked(fields, "ep_dcp_other_count", extendedBucketStats.Op.Samples.EpDcpOtherCount) + cb.addBucketFieldChecked(fields, "ep_dcp_other_items_remaining", extendedBucketStats.Op.Samples.EpDcpOtherItemsRemaining) + cb.addBucketFieldChecked(fields, "ep_dcp_other_items_sent", extendedBucketStats.Op.Samples.EpDcpOtherItemsSent) + cb.addBucketFieldChecked(fields, "ep_dcp_other_producer_count", extendedBucketStats.Op.Samples.EpDcpOtherProducerCount) + cb.addBucketFieldChecked(fields, "ep_dcp_other_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpOtherTotalBacklogSize) + cb.addBucketFieldChecked(fields, "ep_dcp_other_total_bytes", extendedBucketStats.Op.Samples.EpDcpOtherTotalBytes) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_backoff", extendedBucketStats.Op.Samples.EpDcpReplicaBackoff) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_count", extendedBucketStats.Op.Samples.EpDcpReplicaCount) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_items_remaining", extendedBucketStats.Op.Samples.EpDcpReplicaItemsRemaining) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_items_sent", extendedBucketStats.Op.Samples.EpDcpReplicaItemsSent) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_producer_count", extendedBucketStats.Op.Samples.EpDcpReplicaProducerCount) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpReplicaTotalBacklogSize) + cb.addBucketFieldChecked(fields, "ep_dcp_replica_total_bytes", extendedBucketStats.Op.Samples.EpDcpReplicaTotalBytes) + cb.addBucketFieldChecked(fields, "ep_dcp_views_backoff", extendedBucketStats.Op.Samples.EpDcpViewsBackoff) + cb.addBucketFieldChecked(fields, "ep_dcp_views_count", extendedBucketStats.Op.Samples.EpDcpViewsCount) + cb.addBucketFieldChecked(fields, "ep_dcp_views_items_remaining", extendedBucketStats.Op.Samples.EpDcpViewsItemsRemaining) + cb.addBucketFieldChecked(fields, "ep_dcp_views_items_sent", extendedBucketStats.Op.Samples.EpDcpViewsItemsSent) + cb.addBucketFieldChecked(fields, "ep_dcp_views_producer_count", extendedBucketStats.Op.Samples.EpDcpViewsProducerCount) + cb.addBucketFieldChecked(fields, "ep_dcp_views_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpViewsTotalBacklogSize) + cb.addBucketFieldChecked(fields, "ep_dcp_views_total_bytes", extendedBucketStats.Op.Samples.EpDcpViewsTotalBytes) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_backoff", extendedBucketStats.Op.Samples.EpDcpXdcrBackoff) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_count", extendedBucketStats.Op.Samples.EpDcpXdcrCount) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_items_remaining", extendedBucketStats.Op.Samples.EpDcpXdcrItemsRemaining) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_items_sent", extendedBucketStats.Op.Samples.EpDcpXdcrItemsSent) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_producer_count", extendedBucketStats.Op.Samples.EpDcpXdcrProducerCount) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_total_backlog_size", extendedBucketStats.Op.Samples.EpDcpXdcrTotalBacklogSize) + cb.addBucketFieldChecked(fields, "ep_dcp_xdcr_total_bytes", extendedBucketStats.Op.Samples.EpDcpXdcrTotalBytes) + cb.addBucketFieldChecked(fields, "ep_diskqueue_drain", extendedBucketStats.Op.Samples.EpDiskqueueDrain) + cb.addBucketFieldChecked(fields, "ep_diskqueue_fill", extendedBucketStats.Op.Samples.EpDiskqueueFill) + cb.addBucketFieldChecked(fields, "ep_diskqueue_items", extendedBucketStats.Op.Samples.EpDiskqueueItems) + cb.addBucketFieldChecked(fields, "ep_flusher_todo", extendedBucketStats.Op.Samples.EpFlusherTodo) + cb.addBucketFieldChecked(fields, "ep_item_commit_failed", extendedBucketStats.Op.Samples.EpItemCommitFailed) + cb.addBucketFieldChecked(fields, "ep_kv_size", extendedBucketStats.Op.Samples.EpKvSize) + cb.addBucketFieldChecked(fields, "ep_max_size", extendedBucketStats.Op.Samples.EpMaxSize) + cb.addBucketFieldChecked(fields, "ep_mem_high_wat", extendedBucketStats.Op.Samples.EpMemHighWat) + cb.addBucketFieldChecked(fields, "ep_mem_low_wat", extendedBucketStats.Op.Samples.EpMemLowWat) + cb.addBucketFieldChecked(fields, "ep_meta_data_memory", extendedBucketStats.Op.Samples.EpMetaDataMemory) + cb.addBucketFieldChecked(fields, "ep_num_non_resident", extendedBucketStats.Op.Samples.EpNumNonResident) + cb.addBucketFieldChecked(fields, "ep_num_ops_del_meta", extendedBucketStats.Op.Samples.EpNumOpsDelMeta) + cb.addBucketFieldChecked(fields, "ep_num_ops_del_ret_meta", extendedBucketStats.Op.Samples.EpNumOpsDelRetMeta) + cb.addBucketFieldChecked(fields, "ep_num_ops_get_meta", extendedBucketStats.Op.Samples.EpNumOpsGetMeta) + cb.addBucketFieldChecked(fields, "ep_num_ops_set_meta", extendedBucketStats.Op.Samples.EpNumOpsSetMeta) + cb.addBucketFieldChecked(fields, "ep_num_ops_set_ret_meta", extendedBucketStats.Op.Samples.EpNumOpsSetRetMeta) + cb.addBucketFieldChecked(fields, "ep_num_value_ejects", extendedBucketStats.Op.Samples.EpNumValueEjects) + cb.addBucketFieldChecked(fields, "ep_oom_errors", extendedBucketStats.Op.Samples.EpOomErrors) + cb.addBucketFieldChecked(fields, "ep_ops_create", extendedBucketStats.Op.Samples.EpOpsCreate) + cb.addBucketFieldChecked(fields, "ep_ops_update", extendedBucketStats.Op.Samples.EpOpsUpdate) + cb.addBucketFieldChecked(fields, "ep_overhead", extendedBucketStats.Op.Samples.EpOverhead) + cb.addBucketFieldChecked(fields, "ep_queue_size", extendedBucketStats.Op.Samples.EpQueueSize) + cb.addBucketFieldChecked(fields, "ep_replica_ahead_exceptions", extendedBucketStats.Op.Samples.EpReplicaAheadExceptions) + cb.addBucketFieldChecked(fields, "ep_replica_hlc_drift", extendedBucketStats.Op.Samples.EpReplicaHlcDrift) + cb.addBucketFieldChecked(fields, "ep_replica_hlc_drift_count", extendedBucketStats.Op.Samples.EpReplicaHlcDriftCount) + cb.addBucketFieldChecked(fields, "ep_tmp_oom_errors", extendedBucketStats.Op.Samples.EpTmpOomErrors) + cb.addBucketFieldChecked(fields, "ep_vb_total", extendedBucketStats.Op.Samples.EpVbTotal) + cb.addBucketFieldChecked(fields, "evictions", extendedBucketStats.Op.Samples.Evictions) + cb.addBucketFieldChecked(fields, "get_hits", extendedBucketStats.Op.Samples.GetHits) + cb.addBucketFieldChecked(fields, "get_misses", extendedBucketStats.Op.Samples.GetMisses) + cb.addBucketFieldChecked(fields, "incr_hits", extendedBucketStats.Op.Samples.IncrHits) + cb.addBucketFieldChecked(fields, "incr_misses", extendedBucketStats.Op.Samples.IncrMisses) + cb.addBucketFieldChecked(fields, "misses", extendedBucketStats.Op.Samples.Misses) + cb.addBucketFieldChecked(fields, "ops", extendedBucketStats.Op.Samples.Ops) + cb.addBucketFieldChecked(fields, "timestamp", extendedBucketStats.Op.Samples.Timestamp) + cb.addBucketFieldChecked(fields, "vb_active_eject", extendedBucketStats.Op.Samples.VbActiveEject) + cb.addBucketFieldChecked(fields, "vb_active_itm_memory", extendedBucketStats.Op.Samples.VbActiveItmMemory) + cb.addBucketFieldChecked(fields, "vb_active_meta_data_memory", extendedBucketStats.Op.Samples.VbActiveMetaDataMemory) + cb.addBucketFieldChecked(fields, "vb_active_num", extendedBucketStats.Op.Samples.VbActiveNum) + cb.addBucketFieldChecked(fields, "vb_active_num_non_resident", extendedBucketStats.Op.Samples.VbActiveNumNonResident) + cb.addBucketFieldChecked(fields, "vb_active_ops_create", extendedBucketStats.Op.Samples.VbActiveOpsCreate) + cb.addBucketFieldChecked(fields, "vb_active_ops_update", extendedBucketStats.Op.Samples.VbActiveOpsUpdate) + cb.addBucketFieldChecked(fields, "vb_active_queue_age", extendedBucketStats.Op.Samples.VbActiveQueueAge) + cb.addBucketFieldChecked(fields, "vb_active_queue_drain", extendedBucketStats.Op.Samples.VbActiveQueueDrain) + cb.addBucketFieldChecked(fields, "vb_active_queue_fill", extendedBucketStats.Op.Samples.VbActiveQueueFill) + cb.addBucketFieldChecked(fields, "vb_active_queue_size", extendedBucketStats.Op.Samples.VbActiveQueueSize) + cb.addBucketFieldChecked(fields, "vb_active_sync_write_aborted_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteAbortedCount) + cb.addBucketFieldChecked(fields, "vb_active_sync_write_accepted_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteAcceptedCount) + cb.addBucketFieldChecked(fields, "vb_active_sync_write_committed_count", extendedBucketStats.Op.Samples.VbActiveSyncWriteCommittedCount) + cb.addBucketFieldChecked(fields, "vb_pending_curr_items", extendedBucketStats.Op.Samples.VbPendingCurrItems) + cb.addBucketFieldChecked(fields, "vb_pending_eject", extendedBucketStats.Op.Samples.VbPendingEject) + cb.addBucketFieldChecked(fields, "vb_pending_itm_memory", extendedBucketStats.Op.Samples.VbPendingItmMemory) + cb.addBucketFieldChecked(fields, "vb_pending_meta_data_memory", extendedBucketStats.Op.Samples.VbPendingMetaDataMemory) + cb.addBucketFieldChecked(fields, "vb_pending_num", extendedBucketStats.Op.Samples.VbPendingNum) + cb.addBucketFieldChecked(fields, "vb_pending_num_non_resident", extendedBucketStats.Op.Samples.VbPendingNumNonResident) + cb.addBucketFieldChecked(fields, "vb_pending_ops_create", extendedBucketStats.Op.Samples.VbPendingOpsCreate) + cb.addBucketFieldChecked(fields, "vb_pending_ops_update", extendedBucketStats.Op.Samples.VbPendingOpsUpdate) + cb.addBucketFieldChecked(fields, "vb_pending_queue_age", extendedBucketStats.Op.Samples.VbPendingQueueAge) + cb.addBucketFieldChecked(fields, "vb_pending_queue_drain", extendedBucketStats.Op.Samples.VbPendingQueueDrain) + cb.addBucketFieldChecked(fields, "vb_pending_queue_fill", extendedBucketStats.Op.Samples.VbPendingQueueFill) + cb.addBucketFieldChecked(fields, "vb_pending_queue_size", extendedBucketStats.Op.Samples.VbPendingQueueSize) + cb.addBucketFieldChecked(fields, "vb_replica_curr_items", extendedBucketStats.Op.Samples.VbReplicaCurrItems) + cb.addBucketFieldChecked(fields, "vb_replica_eject", extendedBucketStats.Op.Samples.VbReplicaEject) + cb.addBucketFieldChecked(fields, "vb_replica_itm_memory", extendedBucketStats.Op.Samples.VbReplicaItmMemory) + cb.addBucketFieldChecked(fields, "vb_replica_meta_data_memory", extendedBucketStats.Op.Samples.VbReplicaMetaDataMemory) + cb.addBucketFieldChecked(fields, "vb_replica_num", extendedBucketStats.Op.Samples.VbReplicaNum) + cb.addBucketFieldChecked(fields, "vb_replica_num_non_resident", extendedBucketStats.Op.Samples.VbReplicaNumNonResident) + cb.addBucketFieldChecked(fields, "vb_replica_ops_create", extendedBucketStats.Op.Samples.VbReplicaOpsCreate) + cb.addBucketFieldChecked(fields, "vb_replica_ops_update", extendedBucketStats.Op.Samples.VbReplicaOpsUpdate) + cb.addBucketFieldChecked(fields, "vb_replica_queue_age", extendedBucketStats.Op.Samples.VbReplicaQueueAge) + cb.addBucketFieldChecked(fields, "vb_replica_queue_drain", extendedBucketStats.Op.Samples.VbReplicaQueueDrain) + cb.addBucketFieldChecked(fields, "vb_replica_queue_fill", extendedBucketStats.Op.Samples.VbReplicaQueueFill) + cb.addBucketFieldChecked(fields, "vb_replica_queue_size", extendedBucketStats.Op.Samples.VbReplicaQueueSize) + cb.addBucketFieldChecked(fields, "vb_total_queue_age", extendedBucketStats.Op.Samples.VbTotalQueueAge) + cb.addBucketFieldChecked(fields, "xdc_ops", extendedBucketStats.Op.Samples.XdcOps) + cb.addBucketFieldChecked(fields, "allocstall", extendedBucketStats.Op.Samples.Allocstall) + cb.addBucketFieldChecked(fields, "cpu_cores_available", extendedBucketStats.Op.Samples.CPUCoresAvailable) + cb.addBucketFieldChecked(fields, "cpu_irq_rate", extendedBucketStats.Op.Samples.CPUIrqRate) + cb.addBucketFieldChecked(fields, "cpu_stolen_rate", extendedBucketStats.Op.Samples.CPUStolenRate) + cb.addBucketFieldChecked(fields, "cpu_sys_rate", extendedBucketStats.Op.Samples.CPUSysRate) + cb.addBucketFieldChecked(fields, "cpu_user_rate", extendedBucketStats.Op.Samples.CPUUserRate) + cb.addBucketFieldChecked(fields, "cpu_utilization_rate", extendedBucketStats.Op.Samples.CPUUtilizationRate) + cb.addBucketFieldChecked(fields, "hibernated_requests", extendedBucketStats.Op.Samples.HibernatedRequests) + cb.addBucketFieldChecked(fields, "hibernated_waked", extendedBucketStats.Op.Samples.HibernatedWaked) + cb.addBucketFieldChecked(fields, "mem_actual_free", extendedBucketStats.Op.Samples.MemActualFree) + cb.addBucketFieldChecked(fields, "mem_actual_used", extendedBucketStats.Op.Samples.MemActualUsed) + cb.addBucketFieldChecked(fields, "mem_free", extendedBucketStats.Op.Samples.MemFree) + cb.addBucketFieldChecked(fields, "mem_limit", extendedBucketStats.Op.Samples.MemLimit) + cb.addBucketFieldChecked(fields, "mem_total", extendedBucketStats.Op.Samples.MemTotal) + cb.addBucketFieldChecked(fields, "mem_used_sys", extendedBucketStats.Op.Samples.MemUsedSys) + cb.addBucketFieldChecked(fields, "odp_report_failed", extendedBucketStats.Op.Samples.OdpReportFailed) + cb.addBucketFieldChecked(fields, "rest_requests", extendedBucketStats.Op.Samples.RestRequests) + cb.addBucketFieldChecked(fields, "swap_total", extendedBucketStats.Op.Samples.SwapTotal) + cb.addBucketFieldChecked(fields, "swap_used", extendedBucketStats.Op.Samples.SwapUsed) + + return nil +} + +func (cb *Couchbase) addBucketField(fields map[string]interface{}, fieldKey string, value interface{}) { + if !cb.bucketInclude.Match(fieldKey) { + return + } + + fields[fieldKey] = value +} + +func (cb *Couchbase) addBucketFieldChecked(fields map[string]interface{}, fieldKey string, values []float64) { + if values == nil { + return + } + + cb.addBucketField(fields, fieldKey, values[len(values)-1]) +} + +func (cb *Couchbase) queryDetailedBucketStats(server, bucket string, nodeHostname *string, bucketStats *BucketStats) error { + url := server + "/pools/default/buckets/" + bucket + if nodeHostname != nil { + url += "/nodes/" + *nodeHostname + } + url += "/stats?" + + // Set up an HTTP request to get the complete set of bucket stats. + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return err + } + + r, err := cb.client.Do(req) + if err != nil { + return err + } + + defer r.Body.Close() + + return json.NewDecoder(r.Body).Decode(bucketStats) +} + +func (cb *Couchbase) Init() error { + f, err := filter.NewIncludeExcludeFilter(cb.BucketStatsIncluded, []string{}) + if err != nil { + return err + } + + cb.bucketInclude = f + + tlsConfig, err := cb.TLSConfig() + if err != nil { + return err + } + + cb.client = &http.Client{ + Timeout: 10 * time.Second, + Transport: &http.Transport{ + MaxIdleConnsPerHost: couchbaseClient.MaxIdleConnsPerHost, + TLSClientConfig: tlsConfig, + }, + } + + couchbaseClient.SetSkipVerify(cb.ClientConfig.InsecureSkipVerify) + couchbaseClient.SetCertFile(cb.ClientConfig.TLSCert) + couchbaseClient.SetKeyFile(cb.ClientConfig.TLSKey) + couchbaseClient.SetRootFile(cb.ClientConfig.TLSCA) + return nil } func init() { inputs.Add("couchbase", func() telegraf.Input { - return &Couchbase{} + return &Couchbase{ + BucketStatsIncluded: []string{"quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"}, + ClusterBucketStats: true, + } }) } diff --git a/plugins/inputs/couchbase/couchbase_data.go b/plugins/inputs/couchbase/couchbase_data.go new file mode 100644 index 0000000000000..2b1227f5c8cdc --- /dev/null +++ b/plugins/inputs/couchbase/couchbase_data.go @@ -0,0 +1,228 @@ +package couchbase + +type BucketStats struct { + Op struct { + Samples struct { + CouchTotalDiskSize []float64 `json:"couch_total_disk_size"` + CouchDocsFragmentation []float64 `json:"couch_docs_fragmentation"` + CouchViewsFragmentation []float64 `json:"couch_views_fragmentation"` + HitRatio []float64 `json:"hit_ratio"` + EpCacheMissRate []float64 `json:"ep_cache_miss_rate"` + EpResidentItemsRate []float64 `json:"ep_resident_items_rate"` + VbAvgActiveQueueAge []float64 `json:"vb_avg_active_queue_age"` + VbAvgReplicaQueueAge []float64 `json:"vb_avg_replica_queue_age"` + VbAvgPendingQueueAge []float64 `json:"vb_avg_pending_queue_age"` + VbAvgTotalQueueAge []float64 `json:"vb_avg_total_queue_age"` + VbActiveResidentItemsRatio []float64 `json:"vb_active_resident_items_ratio"` + VbReplicaResidentItemsRatio []float64 `json:"vb_replica_resident_items_ratio"` + VbPendingResidentItemsRatio []float64 `json:"vb_pending_resident_items_ratio"` + AvgDiskUpdateTime []float64 `json:"avg_disk_update_time"` + AvgDiskCommitTime []float64 `json:"avg_disk_commit_time"` + AvgBgWaitTime []float64 `json:"avg_bg_wait_time"` + AvgActiveTimestampDrift []float64 `json:"avg_active_timestamp_drift"` + AvgReplicaTimestampDrift []float64 `json:"avg_replica_timestamp_drift"` + EpDcpViewsIndexesCount []float64 `json:"ep_dcp_views+indexes_count"` + EpDcpViewsIndexesItemsRemaining []float64 `json:"ep_dcp_views+indexes_items_remaining"` + EpDcpViewsIndexesProducerCount []float64 `json:"ep_dcp_views+indexes_producer_count"` + EpDcpViewsIndexesTotalBacklogSize []float64 `json:"ep_dcp_views+indexes_total_backlog_size"` + EpDcpViewsIndexesItemsSent []float64 `json:"ep_dcp_views+indexes_items_sent"` + EpDcpViewsIndexesTotalBytes []float64 `json:"ep_dcp_views+indexes_total_bytes"` + EpDcpViewsIndexesBackoff []float64 `json:"ep_dcp_views+indexes_backoff"` + BgWaitCount []float64 `json:"bg_wait_count"` + BgWaitTotal []float64 `json:"bg_wait_total"` + BytesRead []float64 `json:"bytes_read"` + BytesWritten []float64 `json:"bytes_written"` + CasBadval []float64 `json:"cas_badval"` + CasHits []float64 `json:"cas_hits"` + CasMisses []float64 `json:"cas_misses"` + CmdGet []float64 `json:"cmd_get"` + CmdLookup []float64 `json:"cmd_lookup"` + CmdSet []float64 `json:"cmd_set"` + CouchDocsActualDiskSize []float64 `json:"couch_docs_actual_disk_size"` + CouchDocsDataSize []float64 `json:"couch_docs_data_size"` + CouchDocsDiskSize []float64 `json:"couch_docs_disk_size"` + CouchSpatialDataSize []float64 `json:"couch_spatial_data_size"` + CouchSpatialDiskSize []float64 `json:"couch_spatial_disk_size"` + CouchSpatialOps []float64 `json:"couch_spatial_ops"` + CouchViewsActualDiskSize []float64 `json:"couch_views_actual_disk_size"` + CouchViewsDataSize []float64 `json:"couch_views_data_size"` + CouchViewsDiskSize []float64 `json:"couch_views_disk_size"` + CouchViewsOps []float64 `json:"couch_views_ops"` + CurrConnections []float64 `json:"curr_connections"` + CurrItems []float64 `json:"curr_items"` + CurrItemsTot []float64 `json:"curr_items_tot"` + DecrHits []float64 `json:"decr_hits"` + DecrMisses []float64 `json:"decr_misses"` + DeleteHits []float64 `json:"delete_hits"` + DeleteMisses []float64 `json:"delete_misses"` + DiskCommitCount []float64 `json:"disk_commit_count"` + DiskCommitTotal []float64 `json:"disk_commit_total"` + DiskUpdateCount []float64 `json:"disk_update_count"` + DiskUpdateTotal []float64 `json:"disk_update_total"` + DiskWriteQueue []float64 `json:"disk_write_queue"` + EpActiveAheadExceptions []float64 `json:"ep_active_ahead_exceptions"` + EpActiveHlcDrift []float64 `json:"ep_active_hlc_drift"` + EpActiveHlcDriftCount []float64 `json:"ep_active_hlc_drift_count"` + EpBgFetched []float64 `json:"ep_bg_fetched"` + EpClockCasDriftThresholdExceeded []float64 `json:"ep_clock_cas_drift_threshold_exceeded"` + EpDataReadFailed []float64 `json:"ep_data_read_failed"` + EpDataWriteFailed []float64 `json:"ep_data_write_failed"` + EpDcp2IBackoff []float64 `json:"ep_dcp_2i_backoff"` + EpDcp2ICount []float64 `json:"ep_dcp_2i_count"` + EpDcp2IItemsRemaining []float64 `json:"ep_dcp_2i_items_remaining"` + EpDcp2IItemsSent []float64 `json:"ep_dcp_2i_items_sent"` + EpDcp2IProducerCount []float64 `json:"ep_dcp_2i_producer_count"` + EpDcp2ITotalBacklogSize []float64 `json:"ep_dcp_2i_total_backlog_size"` + EpDcp2ITotalBytes []float64 `json:"ep_dcp_2i_total_bytes"` + EpDcpCbasBackoff []float64 `json:"ep_dcp_cbas_backoff"` + EpDcpCbasCount []float64 `json:"ep_dcp_cbas_count"` + EpDcpCbasItemsRemaining []float64 `json:"ep_dcp_cbas_items_remaining"` + EpDcpCbasItemsSent []float64 `json:"ep_dcp_cbas_items_sent"` + EpDcpCbasProducerCount []float64 `json:"ep_dcp_cbas_producer_count"` + EpDcpCbasTotalBacklogSize []float64 `json:"ep_dcp_cbas_total_backlog_size"` + EpDcpCbasTotalBytes []float64 `json:"ep_dcp_cbas_total_bytes"` + EpDcpEventingBackoff []float64 `json:"ep_dcp_eventing_backoff"` + EpDcpEventingCount []float64 `json:"ep_dcp_eventing_count"` + EpDcpEventingItemsRemaining []float64 `json:"ep_dcp_eventing_items_remaining"` + EpDcpEventingItemsSent []float64 `json:"ep_dcp_eventing_items_sent"` + EpDcpEventingProducerCount []float64 `json:"ep_dcp_eventing_producer_count"` + EpDcpEventingTotalBacklogSize []float64 `json:"ep_dcp_eventing_total_backlog_size"` + EpDcpEventingTotalBytes []float64 `json:"ep_dcp_eventing_total_bytes"` + EpDcpFtsBackoff []float64 `json:"ep_dcp_fts_backoff"` + EpDcpFtsCount []float64 `json:"ep_dcp_fts_count"` + EpDcpFtsItemsRemaining []float64 `json:"ep_dcp_fts_items_remaining"` + EpDcpFtsItemsSent []float64 `json:"ep_dcp_fts_items_sent"` + EpDcpFtsProducerCount []float64 `json:"ep_dcp_fts_producer_count"` + EpDcpFtsTotalBacklogSize []float64 `json:"ep_dcp_fts_total_backlog_size"` + EpDcpFtsTotalBytes []float64 `json:"ep_dcp_fts_total_bytes"` + EpDcpOtherBackoff []float64 `json:"ep_dcp_other_backoff"` + EpDcpOtherCount []float64 `json:"ep_dcp_other_count"` + EpDcpOtherItemsRemaining []float64 `json:"ep_dcp_other_items_remaining"` + EpDcpOtherItemsSent []float64 `json:"ep_dcp_other_items_sent"` + EpDcpOtherProducerCount []float64 `json:"ep_dcp_other_producer_count"` + EpDcpOtherTotalBacklogSize []float64 `json:"ep_dcp_other_total_backlog_size"` + EpDcpOtherTotalBytes []float64 `json:"ep_dcp_other_total_bytes"` + EpDcpReplicaBackoff []float64 `json:"ep_dcp_replica_backoff"` + EpDcpReplicaCount []float64 `json:"ep_dcp_replica_count"` + EpDcpReplicaItemsRemaining []float64 `json:"ep_dcp_replica_items_remaining"` + EpDcpReplicaItemsSent []float64 `json:"ep_dcp_replica_items_sent"` + EpDcpReplicaProducerCount []float64 `json:"ep_dcp_replica_producer_count"` + EpDcpReplicaTotalBacklogSize []float64 `json:"ep_dcp_replica_total_backlog_size"` + EpDcpReplicaTotalBytes []float64 `json:"ep_dcp_replica_total_bytes"` + EpDcpViewsBackoff []float64 `json:"ep_dcp_views_backoff"` + EpDcpViewsCount []float64 `json:"ep_dcp_views_count"` + EpDcpViewsItemsRemaining []float64 `json:"ep_dcp_views_items_remaining"` + EpDcpViewsItemsSent []float64 `json:"ep_dcp_views_items_sent"` + EpDcpViewsProducerCount []float64 `json:"ep_dcp_views_producer_count"` + EpDcpViewsTotalBacklogSize []float64 `json:"ep_dcp_views_total_backlog_size"` + EpDcpViewsTotalBytes []float64 `json:"ep_dcp_views_total_bytes"` + EpDcpXdcrBackoff []float64 `json:"ep_dcp_xdcr_backoff"` + EpDcpXdcrCount []float64 `json:"ep_dcp_xdcr_count"` + EpDcpXdcrItemsRemaining []float64 `json:"ep_dcp_xdcr_items_remaining"` + EpDcpXdcrItemsSent []float64 `json:"ep_dcp_xdcr_items_sent"` + EpDcpXdcrProducerCount []float64 `json:"ep_dcp_xdcr_producer_count"` + EpDcpXdcrTotalBacklogSize []float64 `json:"ep_dcp_xdcr_total_backlog_size"` + EpDcpXdcrTotalBytes []float64 `json:"ep_dcp_xdcr_total_bytes"` + EpDiskqueueDrain []float64 `json:"ep_diskqueue_drain"` + EpDiskqueueFill []float64 `json:"ep_diskqueue_fill"` + EpDiskqueueItems []float64 `json:"ep_diskqueue_items"` + EpFlusherTodo []float64 `json:"ep_flusher_todo"` + EpItemCommitFailed []float64 `json:"ep_item_commit_failed"` + EpKvSize []float64 `json:"ep_kv_size"` + EpMaxSize []float64 `json:"ep_max_size"` + EpMemHighWat []float64 `json:"ep_mem_high_wat"` + EpMemLowWat []float64 `json:"ep_mem_low_wat"` + EpMetaDataMemory []float64 `json:"ep_meta_data_memory"` + EpNumNonResident []float64 `json:"ep_num_non_resident"` + EpNumOpsDelMeta []float64 `json:"ep_num_ops_del_meta"` + EpNumOpsDelRetMeta []float64 `json:"ep_num_ops_del_ret_meta"` + EpNumOpsGetMeta []float64 `json:"ep_num_ops_get_meta"` + EpNumOpsSetMeta []float64 `json:"ep_num_ops_set_meta"` + EpNumOpsSetRetMeta []float64 `json:"ep_num_ops_set_ret_meta"` + EpNumValueEjects []float64 `json:"ep_num_value_ejects"` + EpOomErrors []float64 `json:"ep_oom_errors"` + EpOpsCreate []float64 `json:"ep_ops_create"` + EpOpsUpdate []float64 `json:"ep_ops_update"` + EpOverhead []float64 `json:"ep_overhead"` + EpQueueSize []float64 `json:"ep_queue_size"` + EpReplicaAheadExceptions []float64 `json:"ep_replica_ahead_exceptions"` + EpReplicaHlcDrift []float64 `json:"ep_replica_hlc_drift"` + EpReplicaHlcDriftCount []float64 `json:"ep_replica_hlc_drift_count"` + EpTmpOomErrors []float64 `json:"ep_tmp_oom_errors"` + EpVbTotal []float64 `json:"ep_vb_total"` + Evictions []float64 `json:"evictions"` + GetHits []float64 `json:"get_hits"` + GetMisses []float64 `json:"get_misses"` + IncrHits []float64 `json:"incr_hits"` + IncrMisses []float64 `json:"incr_misses"` + MemUsed []float64 `json:"mem_used"` + Misses []float64 `json:"misses"` + Ops []float64 `json:"ops"` + Timestamp []float64 `json:"timestamp"` + VbActiveEject []float64 `json:"vb_active_eject"` + VbActiveItmMemory []float64 `json:"vb_active_itm_memory"` + VbActiveMetaDataMemory []float64 `json:"vb_active_meta_data_memory"` + VbActiveNum []float64 `json:"vb_active_num"` + VbActiveNumNonResident []float64 `json:"vb_active_num_non_resident"` + VbActiveOpsCreate []float64 `json:"vb_active_ops_create"` + VbActiveOpsUpdate []float64 `json:"vb_active_ops_update"` + VbActiveQueueAge []float64 `json:"vb_active_queue_age"` + VbActiveQueueDrain []float64 `json:"vb_active_queue_drain"` + VbActiveQueueFill []float64 `json:"vb_active_queue_fill"` + VbActiveQueueSize []float64 `json:"vb_active_queue_size"` + VbActiveSyncWriteAbortedCount []float64 `json:"vb_active_sync_write_aborted_count"` + VbActiveSyncWriteAcceptedCount []float64 `json:"vb_active_sync_write_accepted_count"` + VbActiveSyncWriteCommittedCount []float64 `json:"vb_active_sync_write_committed_count"` + VbPendingCurrItems []float64 `json:"vb_pending_curr_items"` + VbPendingEject []float64 `json:"vb_pending_eject"` + VbPendingItmMemory []float64 `json:"vb_pending_itm_memory"` + VbPendingMetaDataMemory []float64 `json:"vb_pending_meta_data_memory"` + VbPendingNum []float64 `json:"vb_pending_num"` + VbPendingNumNonResident []float64 `json:"vb_pending_num_non_resident"` + VbPendingOpsCreate []float64 `json:"vb_pending_ops_create"` + VbPendingOpsUpdate []float64 `json:"vb_pending_ops_update"` + VbPendingQueueAge []float64 `json:"vb_pending_queue_age"` + VbPendingQueueDrain []float64 `json:"vb_pending_queue_drain"` + VbPendingQueueFill []float64 `json:"vb_pending_queue_fill"` + VbPendingQueueSize []float64 `json:"vb_pending_queue_size"` + VbReplicaCurrItems []float64 `json:"vb_replica_curr_items"` + VbReplicaEject []float64 `json:"vb_replica_eject"` + VbReplicaItmMemory []float64 `json:"vb_replica_itm_memory"` + VbReplicaMetaDataMemory []float64 `json:"vb_replica_meta_data_memory"` + VbReplicaNum []float64 `json:"vb_replica_num"` + VbReplicaNumNonResident []float64 `json:"vb_replica_num_non_resident"` + VbReplicaOpsCreate []float64 `json:"vb_replica_ops_create"` + VbReplicaOpsUpdate []float64 `json:"vb_replica_ops_update"` + VbReplicaQueueAge []float64 `json:"vb_replica_queue_age"` + VbReplicaQueueDrain []float64 `json:"vb_replica_queue_drain"` + VbReplicaQueueFill []float64 `json:"vb_replica_queue_fill"` + VbReplicaQueueSize []float64 `json:"vb_replica_queue_size"` + VbTotalQueueAge []float64 `json:"vb_total_queue_age"` + XdcOps []float64 `json:"xdc_ops"` + Allocstall []float64 `json:"allocstall"` + CPUCoresAvailable []float64 `json:"cpu_cores_available"` + CPUIrqRate []float64 `json:"cpu_irq_rate"` + CPUStolenRate []float64 `json:"cpu_stolen_rate"` + CPUSysRate []float64 `json:"cpu_sys_rate"` + CPUUserRate []float64 `json:"cpu_user_rate"` + CPUUtilizationRate []float64 `json:"cpu_utilization_rate"` + HibernatedRequests []float64 `json:"hibernated_requests"` + HibernatedWaked []float64 `json:"hibernated_waked"` + MemActualFree []float64 `json:"mem_actual_free"` + MemActualUsed []float64 `json:"mem_actual_used"` + MemFree []float64 `json:"mem_free"` + MemLimit []float64 `json:"mem_limit"` + MemTotal []float64 `json:"mem_total"` + MemUsedSys []float64 `json:"mem_used_sys"` + OdpReportFailed []float64 `json:"odp_report_failed"` + RestRequests []float64 `json:"rest_requests"` + SwapTotal []float64 `json:"swap_total"` + SwapUsed []float64 `json:"swap_used"` + } `json:"samples"` + Samplescount int `json:"samplesCount"` + Ispersistent bool `json:"isPersistent"` + Lasttstamp int64 `json:"lastTStamp"` + Interval int `json:"interval"` + } `json:"op"` + HotKeys []interface{} `json:"hot_keys"` +} diff --git a/plugins/inputs/couchbase/couchbase_test.go b/plugins/inputs/couchbase/couchbase_test.go index df7f1b4c14cf7..9363e12c7f37e 100644 --- a/plugins/inputs/couchbase/couchbase_test.go +++ b/plugins/inputs/couchbase/couchbase_test.go @@ -2,31 +2,47 @@ package couchbase import ( "encoding/json" + "net/http" + "net/http/httptest" "testing" - "github.com/influxdata/telegraf/testutil" + "github.com/influxdata/telegraf/plugins/common/tls" - "github.com/couchbase/go-couchbase" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestGatherServer(t *testing.T) { - var pool couchbase.Pool - if err := json.Unmarshal([]byte(poolsDefaultResponse), &pool); err != nil { - t.Fatal("parse poolsDefaultResponse", err) - } + bucket := "blastro-df" + fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/pools" { + _, _ = w.Write([]byte(poolsResponse)) + } else if r.URL.Path == "/pools/default" { + _, _ = w.Write([]byte(poolsDefaultResponse)) + } else if r.URL.Path == "/pools/default/buckets" { + _, _ = w.Write([]byte(bucketsResponse)) + } else if r.URL.Path == "/pools/default/buckets/"+bucket+"/stats" { + _, _ = w.Write([]byte(bucketStatsResponse)) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) - if err := json.Unmarshal([]byte(bucketResponse), &pool.BucketMap); err != nil { - t.Fatal("parse bucketResponse", err) + cb := Couchbase{ + ClusterBucketStats: true, + BucketStatsIncluded: []string{"quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"}, } - var cb Couchbase + require.NoError(t, cb.Init()) + var acc testutil.Accumulator - cb.gatherServer("mycluster", &acc, &pool) + require.NoError(t, cb.gatherServer(&acc, fakeServer.URL)) + acc.AssertContainsTaggedFields(t, "couchbase_node", map[string]interface{}{"memory_free": 23181365248.0, "memory_total": 64424656896.0}, - map[string]string{"cluster": "mycluster", "hostname": "172.16.10.187:8091"}) + map[string]string{"cluster": fakeServer.URL, "hostname": "172.16.10.187:8091"}) acc.AssertContainsTaggedFields(t, "couchbase_node", map[string]interface{}{"memory_free": 23665811456.0, "memory_total": 64424656896.0}, - map[string]string{"cluster": "mycluster", "hostname": "172.16.10.65:8091"}) + map[string]string{"cluster": fakeServer.URL, "hostname": "172.16.10.65:8091"}) acc.AssertContainsTaggedFields(t, "couchbase_bucket", map[string]interface{}{ "quota_percent_used": 68.85424936294555, @@ -37,11 +53,10 @@ func TestGatherServer(t *testing.T) { "data_used": 212179309111.0, "mem_used": 202156957464.0, }, - map[string]string{"cluster": "mycluster", "bucket": "blastro-df"}) + map[string]string{"cluster": fakeServer.URL, "bucket": "blastro-df"}) } func TestSanitizeURI(t *testing.T) { - var sanitizeTest = []struct { input string expected string @@ -64,8 +79,103 @@ func TestSanitizeURI(t *testing.T) { } } +func TestGatherDetailedBucketMetrics(t *testing.T) { + bucket := "Ducks" + node := "172.94.77.2:8091" + + tests := []struct { + name string + node *string + response string + }{ + { + name: "cluster-level with all fields", + response: bucketStatsResponse, + }, + { + name: "cluster-level with missing fields", + response: bucketStatsResponseWithMissing, + }, + { + name: "node-level with all fields", + response: nodeBucketStatsResponse, + node: &node, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/pools/default/buckets/"+bucket+"/stats" || r.URL.Path == "/pools/default/buckets/"+bucket+"/nodes/"+node+"/stats" { + _, _ = w.Write([]byte(test.response)) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + + var err error + var cb Couchbase + cb.BucketStatsIncluded = []string{"couch_total_disk_size"} + cb.ClientConfig = tls.ClientConfig{ + InsecureSkipVerify: true, + } + err = cb.Init() + require.NoError(t, err) + var acc testutil.Accumulator + bucketStats := &BucketStats{} + if err := json.Unmarshal([]byte(test.response), bucketStats); err != nil { + t.Fatal("parse bucketResponse", err) + } + + fields := make(map[string]interface{}) + err = cb.gatherDetailedBucketStats(fakeServer.URL, bucket, test.node, fields) + require.NoError(t, err) + + acc.AddFields("couchbase_bucket", fields, nil) + + // Ensure we gathered only one metric (the one that we configured). + require.Equal(t, len(acc.Metrics), 1) + require.Equal(t, len(acc.Metrics[0].Fields), 1) + }) + } +} + +func TestGatherNodeOnly(t *testing.T) { + faker := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/pools" { + _, _ = w.Write([]byte(poolsResponse)) + } else if r.URL.Path == "/pools/default" { + _, _ = w.Write([]byte(poolsDefaultResponse)) + } else if r.URL.Path == "/pools/default/buckets" { + _, _ = w.Write([]byte(bucketsResponse)) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + + cb := Couchbase{ + Servers: []string{faker.URL}, + } + require.NoError(t, cb.Init()) + + var acc testutil.Accumulator + require.NoError(t, cb.gatherServer(&acc, faker.URL)) + + require.Equal(t, 0, len(acc.Errors)) + require.Equal(t, 7, len(acc.Metrics)) + acc.AssertDoesNotContainMeasurement(t, "couchbase_bucket") +} + +// From `/pools` +const poolsResponse string = `{"pools":[{"name":"default","uri":"/pools/default"}]}` + // From `/pools/default` on a real cluster -const poolsDefaultResponse string = `{"storageTotals":{"ram":{"total":450972598272,"quotaTotal":360777252864,"quotaUsed":360777252864,"used":446826622976,"usedByData":255061495696,"quotaUsedPerNode":51539607552,"quotaTotalPerNode":51539607552},"hdd":{"total":1108766539776,"quotaTotal":1108766539776,"used":559135126484,"usedByData":515767865143,"free":498944942902}},"serverGroupsUri":"/pools/default/serverGroups?v=98656394","name":"default","alerts":["Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.148\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.65\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.173\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.75\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.105\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.127\" is taken up by keys and metadata.","Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.120\" is taken up by keys and metadata.","Metadata overhead warning. Over 66% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.187\" is taken up by keys and metadata."],"alertsSilenceURL":"/controller/resetAlerts?token=2814&uuid=2bec87861652b990cf6aa5c7ee58c253","nodes":[{"systemStats":{"cpu_utilization_rate":35.43307086614173,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23181365248},"interestingStats":{"cmd_get":17.98201798201798,"couch_docs_actual_disk_size":68506048063,"couch_docs_data_size":38718796110,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140158886,"curr_items_tot":279374646,"ep_bg_fetched":0.999000999000999,"get_hits":10.98901098901099,"mem_used":36497390640,"ops":829.1708291708292,"vb_replica_curr_items":139215760},"uptime":"341236","memoryTotal":64424656896,"memoryFree":23181365248,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.187:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.38255033557047,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23665811456},"interestingStats":{"cmd_get":172.8271728271728,"couch_docs_actual_disk_size":79360565405,"couch_docs_data_size":38736382876,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140174377,"curr_items_tot":279383025,"ep_bg_fetched":0.999000999000999,"get_hits":167.8321678321678,"mem_used":36650059656,"ops":1685.314685314685,"vb_replica_curr_items":139208648},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23665811456,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.65:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":25.5586592178771,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23726600192},"interestingStats":{"cmd_get":63.06306306306306,"couch_docs_actual_disk_size":79345105217,"couch_docs_data_size":38728086130,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139195268,"curr_items_tot":279349113,"ep_bg_fetched":0,"get_hits":53.05305305305306,"mem_used":36476665576,"ops":1878.878878878879,"vb_replica_curr_items":140153845},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23726600192,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.105:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":26.45803698435277,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23854841856},"interestingStats":{"cmd_get":51.05105105105105,"couch_docs_actual_disk_size":74465931949,"couch_docs_data_size":38723830730,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139209869,"curr_items_tot":279380019,"ep_bg_fetched":0,"get_hits":47.04704704704704,"mem_used":36471784896,"ops":1831.831831831832,"vb_replica_curr_items":140170150},"uptime":"340526","memoryTotal":64424656896,"memoryFree":23854841856,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.173:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.31034482758621,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23773573120},"interestingStats":{"cmd_get":77.07707707707708,"couch_docs_actual_disk_size":74743093945,"couch_docs_data_size":38594660087,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139215932,"curr_items_tot":278427644,"ep_bg_fetched":0,"get_hits":53.05305305305305,"mem_used":36306500344,"ops":1981.981981981982,"vb_replica_curr_items":139211712},"uptime":"340495","memoryTotal":64424656896,"memoryFree":23773573120,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.15.120:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":17.60660247592847,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23662190592},"interestingStats":{"cmd_get":146.8531468531468,"couch_docs_actual_disk_size":72932847344,"couch_docs_data_size":38581771457,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139226879,"curr_items_tot":278436540,"ep_bg_fetched":0,"get_hits":144.8551448551448,"mem_used":36421860496,"ops":1495.504495504495,"vb_replica_curr_items":139209661},"uptime":"337174","memoryTotal":64424656896,"memoryFree":23662190592,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.127:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":21.68831168831169,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":24049729536},"interestingStats":{"cmd_get":11.98801198801199,"couch_docs_actual_disk_size":66414273220,"couch_docs_data_size":38587642702,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139193759,"curr_items_tot":278398926,"ep_bg_fetched":0,"get_hits":9.990009990009991,"mem_used":36237234088,"ops":883.1168831168832,"vb_replica_curr_items":139205167},"uptime":"341228","memoryTotal":64424656896,"memoryFree":24049729536,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.148:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"buckets":{"uri":"/pools/default/buckets?v=74117050&uuid=2bec87861652b990cf6aa5c7ee58c253","terseBucketsBase":"/pools/default/b/","terseStreamingBucketsBase":"/pools/default/bs/"},"remoteClusters":{"uri":"/pools/default/remoteClusters?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/pools/default/remoteClusters?just_validate=1"},"controllers":{"addNode":{"uri":"/controller/addNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"rebalance":{"uri":"/controller/rebalance?uuid=2bec87861652b990cf6aa5c7ee58c253"},"failOver":{"uri":"/controller/failOver?uuid=2bec87861652b990cf6aa5c7ee58c253"},"startGracefulFailover":{"uri":"/controller/startGracefulFailover?uuid=2bec87861652b990cf6aa5c7ee58c253"},"reAddNode":{"uri":"/controller/reAddNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"reFailOver":{"uri":"/controller/reFailOver?uuid=2bec87861652b990cf6aa5c7ee58c253"},"ejectNode":{"uri":"/controller/ejectNode?uuid=2bec87861652b990cf6aa5c7ee58c253"},"setRecoveryType":{"uri":"/controller/setRecoveryType?uuid=2bec87861652b990cf6aa5c7ee58c253"},"setAutoCompaction":{"uri":"/controller/setAutoCompaction?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/setAutoCompaction?just_validate=1"},"clusterLogsCollection":{"startURI":"/controller/startLogsCollection?uuid=2bec87861652b990cf6aa5c7ee58c253","cancelURI":"/controller/cancelLogsCollection?uuid=2bec87861652b990cf6aa5c7ee58c253"},"replication":{"createURI":"/controller/createReplication?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/createReplication?just_validate=1"},"setFastWarmup":{"uri":"/controller/setFastWarmup?uuid=2bec87861652b990cf6aa5c7ee58c253","validateURI":"/controller/setFastWarmup?just_validate=1"}},"rebalanceStatus":"none","rebalanceProgressUri":"/pools/default/rebalanceProgress","stopRebalanceUri":"/controller/stopRebalance?uuid=2bec87861652b990cf6aa5c7ee58c253","nodeStatusesUri":"/nodeStatuses","maxBucketCount":10,"autoCompactionSettings":{"parallelDBAndViewCompaction":false,"databaseFragmentationThreshold":{"percentage":50,"size":"undefined"},"viewFragmentationThreshold":{"percentage":50,"size":"undefined"}},"fastWarmupSettings":{"fastWarmupEnabled":true,"minMemoryThreshold":10,"minItemsThreshold":10},"tasks":{"uri":"/pools/default/tasks?v=97479372"},"visualSettingsUri":"/internalSettings/visual?v=7111573","counters":{"rebalance_success":4,"rebalance_start":6,"rebalance_stop":2}}` +const poolsDefaultResponse string = `{"storageTotals":{"ram":{"total":450972598272,"quotaTotal":360777252864,"quotaUsed":360777252864,"used":446826622976,"usedByData":255061495696,"quotaUsedPerNode":51539607552,"quotaTotalPerNode":51539607552},"hdd":{"total":1108766539776,"quotaTotal":1108766539776,"used":559135126484,"usedByData":515767865143,"free":498944942902}},"serverGroupsUri":"/pools/default/serverGroups","name":"default","alerts":["Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.148\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.65\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.173\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.75\" is taken up by keys and metadata.","Metadata overhead warning. Over 65% of RAM allocated to bucket \"blastro-df\" on node \"172.16.13.105\" is taken up by keys and metadata.","Metadata overhead warning. Over 64% of RAM allocated to bucket \"blastro-df\" on node \"172.16.8.127\" is taken up by keys and metadata.","Metadata overhead warning. Over 63% of RAM allocated to bucket \"blastro-df\" on node \"172.16.15.120\" is taken up by keys and metadata.","Metadata overhead warning. Over 66% of RAM allocated to bucket \"blastro-df\" on node \"172.16.10.187\" is taken up by keys and metadata."],"alertsSilenceURL":"/controller/resetAlerts","nodes":[{"systemStats":{"cpu_utilization_rate":35.43307086614173,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23181365248},"interestingStats":{"cmd_get":17.98201798201798,"couch_docs_actual_disk_size":68506048063,"couch_docs_data_size":38718796110,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140158886,"curr_items_tot":279374646,"ep_bg_fetched":0.999000999000999,"get_hits":10.98901098901099,"mem_used":36497390640,"ops":829.1708291708292,"vb_replica_curr_items":139215760},"uptime":"341236","memoryTotal":64424656896,"memoryFree":23181365248,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.187:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.38255033557047,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23665811456},"interestingStats":{"cmd_get":172.8271728271728,"couch_docs_actual_disk_size":79360565405,"couch_docs_data_size":38736382876,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140174377,"curr_items_tot":279383025,"ep_bg_fetched":0.999000999000999,"get_hits":167.8321678321678,"mem_used":36650059656,"ops":1685.314685314685,"vb_replica_curr_items":139208648},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23665811456,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.10.65:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":25.5586592178771,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23726600192},"interestingStats":{"cmd_get":63.06306306306306,"couch_docs_actual_disk_size":79345105217,"couch_docs_data_size":38728086130,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139195268,"curr_items_tot":279349113,"ep_bg_fetched":0,"get_hits":53.05305305305306,"mem_used":36476665576,"ops":1878.878878878879,"vb_replica_curr_items":140153845},"uptime":"341210","memoryTotal":64424656896,"memoryFree":23726600192,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.105:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":26.45803698435277,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23854841856},"interestingStats":{"cmd_get":51.05105105105105,"couch_docs_actual_disk_size":74465931949,"couch_docs_data_size":38723830730,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139209869,"curr_items_tot":279380019,"ep_bg_fetched":0,"get_hits":47.04704704704704,"mem_used":36471784896,"ops":1831.831831831832,"vb_replica_curr_items":140170150},"uptime":"340526","memoryTotal":64424656896,"memoryFree":23854841856,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.13.173:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":47.31034482758621,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23773573120},"interestingStats":{"cmd_get":77.07707707707708,"couch_docs_actual_disk_size":74743093945,"couch_docs_data_size":38594660087,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139215932,"curr_items_tot":278427644,"ep_bg_fetched":0,"get_hits":53.05305305305305,"mem_used":36306500344,"ops":1981.981981981982,"vb_replica_curr_items":139211712},"uptime":"340495","memoryTotal":64424656896,"memoryFree":23773573120,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.15.120:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":17.60660247592847,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23662190592},"interestingStats":{"cmd_get":146.8531468531468,"couch_docs_actual_disk_size":72932847344,"couch_docs_data_size":38581771457,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139226879,"curr_items_tot":278436540,"ep_bg_fetched":0,"get_hits":144.8551448551448,"mem_used":36421860496,"ops":1495.504495504495,"vb_replica_curr_items":139209661},"uptime":"337174","memoryTotal":64424656896,"memoryFree":23662190592,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.127:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"systemStats":{"cpu_utilization_rate":21.68831168831169,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":24049729536},"interestingStats":{"cmd_get":11.98801198801199,"couch_docs_actual_disk_size":66414273220,"couch_docs_data_size":38587642702,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139193759,"curr_items_tot":278398926,"ep_bg_fetched":0,"get_hits":9.990009990009991,"mem_used":36237234088,"ops":883.1168831168832,"vb_replica_curr_items":139205167},"uptime":"341228","memoryTotal":64424656896,"memoryFree":24049729536,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"couchApiBase":"http://172.16.8.148:8092/","clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"buckets":{"uri":"/pools/default/buckets","terseBucketsBase":"/pools/default/b/","terseStreamingBucketsBase":"/pools/default/bs/"},"remoteClusters":{"uri":"/pools/default/remoteClusters","validateURI":"/pools/default/remoteClusters?just_validate=1"},"controllers":{"addNode":{"uri":"/controller/addNode"},"rebalance":{"uri":"/controller/rebalance"},"failOver":{"uri":"/controller/failOver"},"startGracefulFailover":{"uri":"/controller/startGracefulFailover"},"reAddNode":{"uri":"/controller/reAddNode"},"reFailOver":{"uri":"/controller/reFailOver"},"ejectNode":{"uri":"/controller/ejectNode"},"setRecoveryType":{"uri":"/controller/setRecoveryType"},"setAutoCompaction":{"uri":"/controller/setAutoCompaction","validateURI":"/controller/setAutoCompaction?just_validate=1"},"clusterLogsCollection":{"startURI":"/controller/startLogsCollection","cancelURI":"/controller/cancelLogsCollection"},"replication":{"createURI":"/controller/createReplication","validateURI":"/controller/createReplication?just_validate=1"},"setFastWarmup":{"uri":"/controller/setFastWarmup","validateURI":"/controller/setFastWarmup?just_validate=1"}},"rebalanceStatus":"none","rebalanceProgressUri":"/pools/default/rebalanceProgress","stopRebalanceUri":"/controller/stopRebalance","nodeStatusesUri":"/nodeStatuses","maxBucketCount":10,"autoCompactionSettings":{"parallelDBAndViewCompaction":false,"databaseFragmentationThreshold":{"percentage":50,"size":"undefined"},"viewFragmentationThreshold":{"percentage":50,"size":"undefined"}},"fastWarmupSettings":{"fastWarmupEnabled":true,"minMemoryThreshold":10,"minItemsThreshold":10},"tasks":{"uri":"/pools/default/tasks"},"visualSettingsUri":"/internalSettings/visual","counters":{"rebalance_success":4,"rebalance_start":6,"rebalance_stop":2}}` + +// From `/pools/default/buckets` on a real cluster +const bucketsResponse string = `[{"name":"blastro-df","bucketType":"membase","authType":"sasl","saslPassword":"","proxyPort":0,"replicaIndex":false,"uri":"/pools/default/buckets/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","streamingUri":"/pools/default/bucketsStreaming/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","localRandomKeyUri":"/pools/default/buckets/blastro-df/localRandomKey","controllers":{"compactAll":"/pools/default/buckets/blastro-df/controller/compactBucket","compactDB":"/pools/default/buckets/default/controller/compactDatabases","purgeDeletes":"/pools/default/buckets/blastro-df/controller/unsafePurgeBucket","startRecovery":"/pools/default/buckets/blastro-df/controller/startRecovery"},"nodes":[{"couchApiBase":"http://172.16.8.148:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":18.39557399723375,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23791935488},"interestingStats":{"cmd_get":10.98901098901099,"couch_docs_actual_disk_size":79525832077,"couch_docs_data_size":38633186946,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139229304,"curr_items_tot":278470058,"ep_bg_fetched":0,"get_hits":5.994005994005994,"mem_used":36284362960,"ops":1275.724275724276,"vb_replica_curr_items":139240754},"uptime":"343968","memoryTotal":64424656896,"memoryFree":23791935488,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.8.127:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.97183098591549,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23533023232},"interestingStats":{"cmd_get":39.96003996003996,"couch_docs_actual_disk_size":63322357663,"couch_docs_data_size":38603481061,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139262616,"curr_items_tot":278508069,"ep_bg_fetched":0.999000999000999,"get_hits":30.96903096903097,"mem_used":36475078736,"ops":1370.629370629371,"vb_replica_curr_items":139245453},"uptime":"339914","memoryTotal":64424656896,"memoryFree":23533023232,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.15.120:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":23.38028169014084,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23672963072},"interestingStats":{"cmd_get":88.08808808808809,"couch_docs_actual_disk_size":80260594761,"couch_docs_data_size":38632863189,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139251563,"curr_items_tot":278498913,"ep_bg_fetched":0,"get_hits":74.07407407407408,"mem_used":36348663000,"ops":1707.707707707708,"vb_replica_curr_items":139247350},"uptime":"343235","memoryTotal":64424656896,"memoryFree":23672963072,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.173:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":22.15988779803646,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23818825728},"interestingStats":{"cmd_get":103.1031031031031,"couch_docs_actual_disk_size":68247785524,"couch_docs_data_size":38747583467,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139245453,"curr_items_tot":279451313,"ep_bg_fetched":1.001001001001001,"get_hits":86.08608608608608,"mem_used":36524715864,"ops":1749.74974974975,"vb_replica_curr_items":140205860},"uptime":"343266","memoryTotal":64424656896,"memoryFree":23818825728,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.105:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.94444444444444,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23721426944},"interestingStats":{"cmd_get":113.1131131131131,"couch_docs_actual_disk_size":68102832275,"couch_docs_data_size":38747477407,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139230887,"curr_items_tot":279420530,"ep_bg_fetched":0,"get_hits":106.1061061061061,"mem_used":36524887624,"ops":1799.7997997998,"vb_replica_curr_items":140189643},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23721426944,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.65:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":60.62176165803109,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23618203648},"interestingStats":{"cmd_get":30.96903096903097,"couch_docs_actual_disk_size":69052175561,"couch_docs_data_size":38755695030,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140210194,"curr_items_tot":279454253,"ep_bg_fetched":0,"get_hits":26.97302697302698,"mem_used":36543072472,"ops":1337.662337662338,"vb_replica_curr_items":139244059},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23618203648,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.187:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.83588317107093,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23062269952},"interestingStats":{"cmd_get":33.03303303303304,"couch_docs_actual_disk_size":74422029546,"couch_docs_data_size":38758172837,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140194321,"curr_items_tot":279445526,"ep_bg_fetched":0,"get_hits":21.02102102102102,"mem_used":36527676832,"ops":1088.088088088088,"vb_replica_curr_items":139251205},"uptime":"343971","memoryTotal":64424656896,"memoryFree":23062269952,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"stats":{"uri":"/pools/default/buckets/blastro-df/stats","directoryURI":"/pools/default/buckets/blastro-df/statsDirectory","nodeStatsListURI":"/pools/default/buckets/blastro-df/nodes"},"ddocs":{"uri":"/pools/default/buckets/blastro-df/ddocs"},"nodeLocator":"vbucket","fastWarmupSettings":false,"autoCompactionSettings":false,"uuid":"2e6b9dc4c278300ce3a4f27ad540323f","vBucketServerMap":{"hashAlgorithm":"CRC","numReplicas":1,"serverList":["172.16.10.187:11210","172.16.10.65:11210","172.16.13.105:11210","172.16.13.173:11210","172.16.15.120:11210","172.16.8.127:11210","172.16.8.148:11210"],"vBucketMap":[[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,6],[0,6],[0,6],[0,6],[0,6],[1,3],[1,3],[1,3],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[2,3],[2,3],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[2,5],[2,5],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,6],[3,6],[3,6],[3,6],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[5,3],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[0,3],[0,3],[0,3],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,5],[4,5],[4,5],[4,5],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[2,6],[2,6],[3,2],[3,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[2,0],[2,0],[2,0],[2,0],[2,0],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,2],[4,3],[4,3],[4,3],[4,5],[4,5],[4,5],[4,5],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[5,4],[5,4],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[6,5],[6,5],[6,5],[6,5],[6,5],[4,0],[4,0],[4,0],[4,0],[4,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,6],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[4,6],[4,6],[4,6],[4,6],[4,6],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[5,0],[5,0],[5,0],[2,0],[2,0],[3,0],[3,0],[3,0],[5,3],[5,3],[5,3],[5,3],[5,3],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,5],[4,5],[1,0],[3,0],[3,1],[3,1],[3,1],[3,1],[5,4],[5,4],[5,4],[5,4],[5,4],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[5,6],[5,6],[5,6],[6,2],[6,2],[6,3],[6,3],[6,3],[4,0],[4,0],[4,0],[4,0],[4,0],[4,1],[4,1],[4,1],[5,6],[5,6],[5,6],[5,6],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[0,5],[0,5],[0,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,1],[0,1],[4,6],[4,6],[4,6],[4,6],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,6],[2,0],[2,0],[5,2],[5,3],[5,3],[5,3],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,1],[4,1],[4,1],[5,3],[5,3],[5,3],[5,3],[5,3],[2,0],[5,2],[5,2],[5,2],[5,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,2],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,1],[4,1],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,1],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,2],[0,2],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[0,2],[0,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,1],[4,1],[4,2],[4,2],[4,2],[6,3],[6,3],[6,3],[6,3],[6,3],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,2],[6,2],[6,2],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,1],[2,3],[2,3],[1,2],[1,2],[1,2],[1,3],[1,3],[1,3],[1,3],[1,3],[3,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[3,1],[3,1],[3,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[6,3],[6,3],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[5,1],[5,1],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[5,2],[6,2],[6,2],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,3],[1,3],[1,3],[6,2],[6,2],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[1,3],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,5],[6,5],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,2]]},"replicaNumber":1,"threadsNumber":3,"quota":{"ram":293601280000,"rawRAM":41943040000},"basicStats":{"quotaPercentUsed":68.85424936294555,"opsPerSec":5686.789686789687,"diskFetches":0,"itemCount":943239752,"diskUsed":409178772321,"dataUsed":212179309111,"memUsed":202156957464},"evictionPolicy":"valueOnly","bucketCapabilitiesVer":"","bucketCapabilities":["cbhello","touch","couchapi","cccp","xdcrCheckpointing","nodesExt"]}]` + +const bucketStatsResponse string = `{"op":{"samples":{"couch_total_disk_size":[559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341],"couch_docs_fragmentation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_fragmentation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"hit_ratio":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_cache_miss_rate":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_resident_items_rate":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"vb_avg_active_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_avg_replica_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_avg_pending_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_avg_total_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_resident_items_ratio":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"vb_replica_resident_items_ratio":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"vb_pending_resident_items_ratio":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"avg_disk_update_time":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_disk_commit_time":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_bg_wait_time":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_active_timestamp_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_replica_timestamp_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views+indexes_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bg_wait_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bg_wait_total":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bytes_read":[118.1818181818182,142.2805247225025,180.8080808080808,197.7800201816347,141.9939577039275,118.5410334346505,142.4242424242424,148.4848484848485,197.3816717019134,202.4291497975709,118.0625630676085,142.4242424242424,179.6165489404642,197.979797979798,142.4242424242424,118.1818181818182,142.2805247225025,148.4848484848485,197.979797979798,201.816347124117,118.1818181818182,142.4242424242424,148.4848484848485,197.7800201816347,142.4242424242424,118.1818181818182,142.2805247225025,179.7979797979798,197.1830985915493,202.6342451874367,118.1818181818182,142.2805247225025,180.4435483870968,198.3805668016194,142.2805247225025,118.1818181818182,142.2805247225025,148.4848484848485,197.979797979798,202.020202020202,118.0625630676085,118.1818181818182,204.040404040404,197.7800201816347,142.1370967741935,118.4210526315789,118.1818181818182,172.5529767911201,197.5806451612903,202.4291497975709,118.0625630676085,118.1818181818182,172.7272727272727,197.7800201816347,142.4242424242424,118.0625630676085,118.1818181818182,204.040404040404,197.979797979798,201.816347124117],"bytes_written":[36420.20202020202,37762.86579212916,37225.25252525252,50460.14127144299,37686.80765357502,36530.90172239109,37801.0101010101,37111.11111111111,50358.50956696878,60511.13360323886,36383.45105953582,37801.0101010101,37393.54187689203,50511.11111111111,37801.0101010101,36420.20202020202,37762.86579212916,37111.11111111111,50511.11111111111,60327.95156407669,36420.20202020202,37801.0101010101,37111.11111111111,50460.14127144299,37801.0101010101,36420.20202020202,37762.86579212916,37431.31313131313,50307.84708249497,60572.44174265451,36420.20202020202,37762.86579212916,37150.20161290323,50613.36032388664,37762.86579212916,36420.20202020202,37762.86579212916,37111.11111111111,50511.11111111111,60388.88888888889,36383.45105953582,36420.20202020202,38812.12121212122,50460.14127144299,37724.79838709677,36493.92712550607,36420.20202020202,38453.07769929364,50409.27419354839,60511.13360323886,36383.45105953582,36420.20202020202,38491.91919191919,50460.14127144299,37801.0101010101,36383.45105953582,36420.20202020202,38812.12121212122,50511.11111111111,60327.95156407669],"cas_badval":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cas_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cas_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cmd_get":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cmd_lookup":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cmd_set":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_docs_actual_disk_size":[559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341],"couch_docs_data_size":[531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373],"couch_docs_disk_size":[531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373,531373],"couch_spatial_data_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_spatial_disk_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_spatial_ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_actual_disk_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_data_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_disk_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"curr_connections":[14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14],"curr_items":[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],"curr_items_tot":[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],"decr_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"decr_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"delete_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"delete_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_commit_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_commit_total":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_update_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_update_total":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_write_queue":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_active_ahead_exceptions":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_active_hlc_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_active_hlc_drift_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_bg_fetched":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_clock_cas_drift_threshold_exceeded":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_data_read_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_data_write_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_cbas_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_eventing_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_fts_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_other_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_producer_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_total_backlog_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_xdcr_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_diskqueue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_diskqueue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_diskqueue_items":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_flusher_todo":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_item_commit_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_kv_size":[10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340,10340],"ep_max_size":[8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032,8264876032],"ep_mem_high_wat":[7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627,7025144627],"ep_mem_low_wat":[6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024,6198657024],"ep_meta_data_memory":[68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68],"ep_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_del_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_del_ret_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_get_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_set_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_set_ret_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_value_ejects":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_oom_errors":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_overhead":[403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824,403824],"ep_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_replica_ahead_exceptions":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_replica_hlc_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_replica_hlc_drift_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_tmp_oom_errors":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_vb_total":[64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64],"evictions":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"get_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"get_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"incr_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"incr_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"mem_used":[4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016,4937016],"misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"timestamp":[1615918120012,1615918121003,1615918121993,1615918122984,1615918123977,1615918124964,1615918125954,1615918126944,1615918127937,1615918128925,1615918129916,1615918130906,1615918131897,1615918132887,1615918133877,1615918134867,1615918135858,1615918136848,1615918137838,1615918138829,1615918139819,1615918140809,1615918141799,1615918142790,1615918143780,1615918144770,1615918145761,1615918146751,1615918147745,1615918148732,1615918149722,1615918150713,1615918151705,1615918152693,1615918153684,1615918154674,1615918155665,1615918156655,1615918157645,1615918158635,1615918159626,1615918160616,1615918161606,1615918162597,1615918163589,1615918164577,1615918165567,1615918166558,1615918167550,1615918168538,1615918169529,1615918170519,1615918171509,1615918172500,1615918173490,1615918174481,1615918175471,1615918176461,1615918177451,1615918178442],"vb_active_eject":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_itm_memory":[88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88],"vb_active_meta_data_memory":[68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68],"vb_active_num":[64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64],"vb_active_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_sync_write_aborted_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_sync_write_accepted_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_sync_write_committed_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_curr_items":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_eject":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_itm_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_meta_data_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_num":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_curr_items":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_eject":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_itm_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_meta_data_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_num":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_total_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"xdc_ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"allocstall":[18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615,18446744073709551615],"cpu_cores_available":[12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12],"cpu_irq_rate":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cpu_stolen_rate":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cpu_sys_rate":[4.942965779467681,5.243268776570619,6.823027718550106,4.815073272854153,4.853128991060026,5.068836045056321,4.983108108108108,4.110738255033557,3.201347935973041,3.959561920808762,3.610411418975651,3.459915611814346,3.691275167785235,4.553119730185498,6.470588235294118,4.545454545454546,5.046257359125315,5.976430976430977,5.564924114671164,3.703703703703704,3.529411764705882,3.544303797468354,3.826787512588117,5.118961788031723,7.166947723440135,5.87248322147651,4.289318755256518,5.485232067510548,4.765886287625418,4.672897196261682,4.184100418410042,4.560810810810811,7.02928870292887,6.081081081081081,5.378151260504202,6.239460370994941,8.984047019311502,6.896551724137931,9.636517328825022,9.335576114381833,7.64063811922754,8.684654300168635,6.543624161073826,6.465155331654072,5.961376994122586,3.807106598984772,3.36417157275021,3.700588730025231,3.775167785234899,9.45945945945946,3.114478114478115,3.451178451178451,4.465037910699242,3.852596314907873,3.462837837837838,5.205709487825357,5.218855218855219,6.532663316582915,5.885057471264368,4.030226700251889],"cpu_user_rate":[15.20912547528517,9.58904109589041,10.76759061833689,8.443824145150035,8.301404853128991,10.95118898623279,9.797297297297296,6.879194630872483,6.823925863521483,6.908171861836562,6.54911838790932,6.835443037974684,7.382550335570469,10.28667790893761,16.97478991596639,11.53198653198653,9.75609756097561,11.11111111111111,12.05733558178752,7.154882154882155,6.890756302521009,6.666666666666667,7.150050352467271,10.23792357606345,12.7318718381113,9.479865771812081,7.905803195962994,8.016877637130802,9.19732441471572,9.600679694137638,7.364016736401673,8.108108108108109,15.31380753138075,13.85135135135135,10.58823529411765,12.64755480607083,18.47187237615449,13.28847771236333,19.8647506339814,21.86711522287637,23.5936188077246,22.17537942664418,12.08053691275168,16.96053736356003,32.49370277078086,8.20642978003384,10.17661900756939,7.653490328006728,10.82214765100671,14.27364864864865,6.986531986531986,7.407407407407407,10.02527379949452,11.55778894472362,8.192567567567568,12.34256926952141,14.05723905723906,28.64321608040201,13.14942528735632,7.388748950461797],"cpu_utilization_rate":[20.15209125475285,14.83230987246103,17.59061833688699,13.25889741800419,13.15453384418902,16.02002503128911,14.78040540540541,10.98993288590604,10.02527379949452,10.86773378264532,10.15952980688497,10.29535864978903,11.0738255033557,14.8397976391231,23.4453781512605,16.07744107744108,14.80235492010092,17.08754208754209,17.62225969645868,10.85858585858586,10.42016806722689,10.21097046413502,10.97683786505539,15.35688536409517,19.89881956155143,15.35234899328859,12.19512195121951,13.50210970464135,13.96321070234114,14.27357689039932,11.54811715481171,12.66891891891892,22.34309623430962,19.93243243243243,15.96638655462185,18.88701517706577,27.45591939546599,20.18502943650126,29.50126796280642,31.2026913372582,31.23425692695214,30.86003372681282,18.6241610738255,23.42569269521411,38.45507976490345,12.01353637901861,13.5407905803196,11.35407905803196,14.59731543624161,23.73310810810811,10.1010101010101,10.85858585858586,14.49031171019377,15.41038525963149,11.65540540540541,17.54827875734677,19.27609427609428,35.17587939698493,19.03448275862069,11.41897565071369],"hibernated_requests":[7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7],"hibernated_waked":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"mem_actual_free":[7004864512,6998364160,7056683008,7055605760,7059243008,7078457344,7079067648,7079514112,7078977536,7088099328,7091081216,7091773440,7091589120,7080108032,7073554432,7073914880,7080144896,7065124864,7063183360,7072677888,7073767424,7073542144,7073542144,7074902016,7053836288,7050895360,7055720448,7056822272,7057281024,7053025280,7052763136,7051984896,7049113600,7040618496,7045636096,7050907648,7021027328,7001329664,6985895936,6985895936,6955642880,7059750912,7058616320,7050067968,7047163904,7045873664,7050272768,7068528640,7073677312,7079116800,7078252544,7075880960,7065079808,7066251264,7065726976,7063486464,7064797184,7066206208,7068819456,7071809536],"mem_actual_used":[10175004672,10181505024,10123186176,10124263424,10120626176,10101411840,10100801536,10100355072,10100891648,10091769856,10088787968,10088095744,10088280064,10099761152,10106314752,10105954304,10099724288,10114744320,10116685824,10107191296,10106101760,10106327040,10106327040,10104967168,10126032896,10128973824,10124148736,10123046912,10122588160,10126843904,10127106048,10127884288,10130755584,10139250688,10134233088,10128961536,10158841856,10178539520,10193973248,10193973248,10224226304,10120118272,10121252864,10129801216,10132705280,10133995520,10129596416,10111340544,10106191872,10100752384,10101616640,10103988224,10114789376,10113617920,10114142208,10116382720,10115072000,10113662976,10111049728,10108059648],"mem_free":[7004864512,6998364160,7056683008,7055605760,7059243008,7078457344,7079067648,7079514112,7078977536,7088099328,7091081216,7091773440,7091589120,7080108032,7073554432,7073914880,7080144896,7065124864,7063183360,7072677888,7073767424,7073542144,7073542144,7074902016,7053836288,7050895360,7055720448,7056822272,7057281024,7053025280,7052763136,7051984896,7049113600,7040618496,7045636096,7050907648,7021027328,7001329664,6985895936,6985895936,6955642880,7059750912,7058616320,7050067968,7047163904,7045873664,7050272768,7068528640,7073677312,7079116800,7078252544,7075880960,7065079808,7066251264,7065726976,7063486464,7064797184,7066206208,7068819456,7071809536],"mem_limit":[17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184],"mem_total":[17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184,17179869184],"mem_used_sys":[16694517760,16707862528,16608030720,16610041856,16604663808,16553811968,16553463808,16553369600,16553861120,16539238400,16536092672,16535760896,16535707648,16553418752,16559439872,16558895104,16554569728,16580468736,16582680576,16565084160,16564649984,16565272576,16565272576,16562823168,16599863296,16602157056,16597528576,16596774912,16595107840,16593002496,16593485824,16596668416,16598691840,16607469568,16599904256,16590753792,16644947968,16684613632,16714768384,16714768384,16781234176,16573353984,16575979520,16593072128,16603037696,16605077504,16599199744,16581554176,16570187776,16560140288,16561221632,16565153792,16577990656,16577200128,16578031616,16582909952,16569671680,16565702656,16560218112,16554315776],"odp_report_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"rest_requests":[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,8,2,2,2,2,2,2,2,2,3,2,2,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,2,2,2,2,2],"swap_total":[1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824,1073741824],"swap_used":[122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392,122683392]},"samplesCount":60,"isPersistent":true,"lastTStamp":1615918178442,"interval":1000},"hot_keys":[{"name":"first-duck","ops":6.003482019571351e-05}]}` +const bucketStatsResponseWithMissing string = `{"op":{"samples":{"couch_total_disk_size":[559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341,559341]},"samplesCount":60,"isPersistent":true,"lastTStamp":1615918178442,"interval":1000},"hot_keys":[{"name":"first-duck","ops":6.003482019571351e-05}]}` -// From `/pools/default/buckets/blastro-df` on a real cluster -const bucketResponse string = `{"blastro-df": {"name":"blastro-df","bucketType":"membase","authType":"sasl","saslPassword":"","proxyPort":0,"replicaIndex":false,"uri":"/pools/default/buckets/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","streamingUri":"/pools/default/bucketsStreaming/blastro-df?bucket_uuid=2e6b9dc4c278300ce3a4f27ad540323f","localRandomKeyUri":"/pools/default/buckets/blastro-df/localRandomKey","controllers":{"compactAll":"/pools/default/buckets/blastro-df/controller/compactBucket","compactDB":"/pools/default/buckets/default/controller/compactDatabases","purgeDeletes":"/pools/default/buckets/blastro-df/controller/unsafePurgeBucket","startRecovery":"/pools/default/buckets/blastro-df/controller/startRecovery"},"nodes":[{"couchApiBase":"http://172.16.8.148:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":18.39557399723375,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23791935488},"interestingStats":{"cmd_get":10.98901098901099,"couch_docs_actual_disk_size":79525832077,"couch_docs_data_size":38633186946,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139229304,"curr_items_tot":278470058,"ep_bg_fetched":0,"get_hits":5.994005994005994,"mem_used":36284362960,"ops":1275.724275724276,"vb_replica_curr_items":139240754},"uptime":"343968","memoryTotal":64424656896,"memoryFree":23791935488,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.148","hostname":"172.16.8.148:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.8.127:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.97183098591549,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23533023232},"interestingStats":{"cmd_get":39.96003996003996,"couch_docs_actual_disk_size":63322357663,"couch_docs_data_size":38603481061,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139262616,"curr_items_tot":278508069,"ep_bg_fetched":0.999000999000999,"get_hits":30.96903096903097,"mem_used":36475078736,"ops":1370.629370629371,"vb_replica_curr_items":139245453},"uptime":"339914","memoryTotal":64424656896,"memoryFree":23533023232,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.8.127","hostname":"172.16.8.127:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.15.120:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":23.38028169014084,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23672963072},"interestingStats":{"cmd_get":88.08808808808809,"couch_docs_actual_disk_size":80260594761,"couch_docs_data_size":38632863189,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139251563,"curr_items_tot":278498913,"ep_bg_fetched":0,"get_hits":74.07407407407408,"mem_used":36348663000,"ops":1707.707707707708,"vb_replica_curr_items":139247350},"uptime":"343235","memoryTotal":64424656896,"memoryFree":23672963072,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.15.120","hostname":"172.16.15.120:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.173:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":22.15988779803646,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23818825728},"interestingStats":{"cmd_get":103.1031031031031,"couch_docs_actual_disk_size":68247785524,"couch_docs_data_size":38747583467,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139245453,"curr_items_tot":279451313,"ep_bg_fetched":1.001001001001001,"get_hits":86.08608608608608,"mem_used":36524715864,"ops":1749.74974974975,"vb_replica_curr_items":140205860},"uptime":"343266","memoryTotal":64424656896,"memoryFree":23818825728,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.173","hostname":"172.16.13.173:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.13.105:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.94444444444444,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23721426944},"interestingStats":{"cmd_get":113.1131131131131,"couch_docs_actual_disk_size":68102832275,"couch_docs_data_size":38747477407,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":139230887,"curr_items_tot":279420530,"ep_bg_fetched":0,"get_hits":106.1061061061061,"mem_used":36524887624,"ops":1799.7997997998,"vb_replica_curr_items":140189643},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23721426944,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.13.105","hostname":"172.16.13.105:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.65:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":60.62176165803109,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23618203648},"interestingStats":{"cmd_get":30.96903096903097,"couch_docs_actual_disk_size":69052175561,"couch_docs_data_size":38755695030,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140210194,"curr_items_tot":279454253,"ep_bg_fetched":0,"get_hits":26.97302697302698,"mem_used":36543072472,"ops":1337.662337662338,"vb_replica_curr_items":139244059},"uptime":"343950","memoryTotal":64424656896,"memoryFree":23618203648,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.65","hostname":"172.16.10.65:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}},{"couchApiBase":"http://172.16.10.187:8092/blastro-df%2B2e6b9dc4c278300ce3a4f27ad540323f","systemStats":{"cpu_utilization_rate":21.83588317107093,"swap_total":0,"swap_used":0,"mem_total":64424656896,"mem_free":23062269952},"interestingStats":{"cmd_get":33.03303303303304,"couch_docs_actual_disk_size":74422029546,"couch_docs_data_size":38758172837,"couch_views_actual_disk_size":0,"couch_views_data_size":0,"curr_items":140194321,"curr_items_tot":279445526,"ep_bg_fetched":0,"get_hits":21.02102102102102,"mem_used":36527676832,"ops":1088.088088088088,"vb_replica_curr_items":139251205},"uptime":"343971","memoryTotal":64424656896,"memoryFree":23062269952,"mcdMemoryReserved":49152,"mcdMemoryAllocated":49152,"replication":1,"clusterMembership":"active","recoveryType":"none","status":"healthy","otpNode":"ns_1@172.16.10.187","thisNode":true,"hostname":"172.16.10.187:8091","clusterCompatibility":196608,"version":"3.0.1-1444-rel-community","os":"x86_64-unknown-linux-gnu","ports":{"proxy":11211,"direct":11210}}],"stats":{"uri":"/pools/default/buckets/blastro-df/stats","directoryURI":"/pools/default/buckets/blastro-df/statsDirectory","nodeStatsListURI":"/pools/default/buckets/blastro-df/nodes"},"ddocs":{"uri":"/pools/default/buckets/blastro-df/ddocs"},"nodeLocator":"vbucket","fastWarmupSettings":false,"autoCompactionSettings":false,"uuid":"2e6b9dc4c278300ce3a4f27ad540323f","vBucketServerMap":{"hashAlgorithm":"CRC","numReplicas":1,"serverList":["172.16.10.187:11210","172.16.10.65:11210","172.16.13.105:11210","172.16.13.173:11210","172.16.15.120:11210","172.16.8.127:11210","172.16.8.148:11210"],"vBucketMap":[[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,6],[0,6],[0,6],[0,6],[0,6],[1,3],[1,3],[1,3],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[2,3],[2,3],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[2,5],[2,5],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[3,5],[3,5],[3,5],[3,5],[3,5],[3,5],[3,6],[3,6],[3,6],[3,6],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[0,6],[5,3],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[0,3],[0,3],[0,3],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,5],[4,5],[4,5],[4,5],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[1,3],[2,6],[2,6],[3,2],[3,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[2,0],[2,0],[2,0],[2,0],[2,0],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,2],[4,3],[4,3],[4,3],[4,5],[4,5],[4,5],[4,5],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[1,6],[5,4],[5,4],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[6,5],[6,5],[6,5],[6,5],[6,5],[4,0],[4,0],[4,0],[4,0],[4,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[2,0],[0,4],[0,4],[0,4],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[0,5],[4,5],[4,5],[4,5],[4,5],[4,5],[4,6],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,4],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[4,6],[4,6],[4,6],[4,6],[4,6],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[3,4],[3,4],[3,4],[3,5],[3,5],[3,5],[3,5],[5,0],[5,0],[5,0],[2,0],[2,0],[3,0],[3,0],[3,0],[5,3],[5,3],[5,3],[5,3],[5,3],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[2,4],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,5],[4,5],[1,0],[3,0],[3,1],[3,1],[3,1],[3,1],[5,4],[5,4],[5,4],[5,4],[5,4],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[2,6],[5,6],[5,6],[5,6],[6,2],[6,2],[6,3],[6,3],[6,3],[4,0],[4,0],[4,0],[4,0],[4,0],[4,1],[4,1],[4,1],[5,6],[5,6],[5,6],[5,6],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[3,0],[0,5],[0,5],[0,5],[0,6],[0,6],[0,6],[0,6],[0,6],[0,1],[0,1],[4,6],[4,6],[4,6],[4,6],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[3,1],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,5],[1,6],[2,0],[2,0],[5,2],[5,3],[5,3],[5,3],[5,3],[5,1],[5,1],[5,1],[5,1],[5,1],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[2,5],[4,1],[4,1],[4,1],[5,3],[5,3],[5,3],[5,3],[5,3],[2,0],[5,2],[5,2],[5,2],[5,2],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,0],[1,2],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[5,4],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[3,6],[4,1],[4,1],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,1],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[5,6],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[4,0],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,1],[0,2],[0,2],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[5,0],[0,2],[0,2],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[4,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[2,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[4,1],[4,1],[4,2],[4,2],[4,2],[6,3],[6,3],[6,3],[6,3],[6,3],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[5,3],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[4,6],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[5,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,2],[6,2],[6,2],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[6,0],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[1,2],[2,1],[2,3],[2,3],[1,2],[1,2],[1,2],[1,3],[1,3],[1,3],[1,3],[1,3],[3,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[6,1],[3,1],[3,1],[3,1],[3,1],[4,2],[4,2],[4,2],[4,2],[4,2],[4,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[3,2],[6,3],[6,3],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[6,2],[5,1],[5,1],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[5,2],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[5,2],[6,2],[6,2],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[6,3],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,2],[0,3],[1,3],[1,3],[6,2],[6,2],[0,3],[0,3],[0,3],[0,3],[0,3],[0,3],[1,3],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,4],[6,5],[6,5],[2,3],[2,3],[2,3],[2,3],[2,3],[2,3],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,5],[6,2]]},"replicaNumber":1,"threadsNumber":3,"quota":{"ram":293601280000,"rawRAM":41943040000},"basicStats":{"quotaPercentUsed":68.85424936294555,"opsPerSec":5686.789686789687,"diskFetches":0,"itemCount":943239752,"diskUsed":409178772321,"dataUsed":212179309111,"memUsed":202156957464},"evictionPolicy":"valueOnly","bucketCapabilitiesVer":"","bucketCapabilities":["cbhello","touch","couchapi","cccp","xdcrCheckpointing","nodesExt"]}}` +const nodeBucketStatsResponse string = `{"hostname":"172.94.77.2:8091","hot_keys":[{"name":"anheuser_busch-michelob_ultra_amber","ops":0.0001942501942501943},{"name":"brouwerij_de_gouden_boom-blanche_de_bruges","ops":0.0001942501942501943},{"name":"cigar_city_brewing","ops":0.0001942501942501943},{"name":"eel_river_brewing-climax_california_classic","ops":0.0001942501942501943},{"name":"kaiserdom_privatbrauerei_bamberg","ops":0.0001942501942501943},{"name":"jack_s_brewing-grid_iron_amber_ale","ops":0.0001942501942501943},{"name":"niagara_falls_brewing-maple_wheat","ops":0.0001942501942501943},{"name":"rahr_sons_brewing_company-blind_salamander_pale_ale","ops":0.0001942501942501943},{"name":"middle_ages_brewing-tripel_crown","ops":0.0001942501942501943},{"name":"gordon_biersch_brewing-maibock","ops":0.0001942501942501943}],"op":{"samples":{"couch_total_disk_size":[20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202,20822202],"couch_docs_fragmentation":[83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83],"couch_views_fragmentation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"hit_ratio":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_cache_miss_rate":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_resident_items_rate":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"vb_avg_active_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_avg_replica_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_avg_pending_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_avg_total_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_resident_items_ratio":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"vb_replica_resident_items_ratio":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"vb_pending_resident_items_ratio":[100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100,100],"avg_disk_update_time":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_disk_commit_time":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_bg_wait_time":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_active_timestamp_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"avg_replica_timestamp_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bg_wait_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bg_wait_total":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"bytes_read":[151.3,151.3,151.3,151.3,151.3,151.3,151.3,147.6,147.6,147.6,147.6,147.6,147.6,147.6,147.6,147.6,147.6,151.6,151.6,151.6,151.6,151.6,151.6,151.6,151.6,151.6,151.6,138.9,138.9,138.9,138.9,138.9,138.9,138.9,138.9,138.9,138.9,154.8,154.8,154.8,154.8,154.8,154.8,154.8,154.8,154.8,154.8,139.2,139.2,139.2,139.2,139.2,139.2,139.2,139.2,139.2,139.2,154.1,154.1,154.1],"bytes_written":[36915.2,36915.2,36915.2,36915.2,36915.2,36915.2,36915.2,38742.3,38742.3,38742.3,38742.3,38742.3,38742.3,38742.3,38742.3,38742.3,38742.3,37827.3,37827.3,37827.3,37827.3,37827.3,37827.3,37827.3,37827.3,37827.3,37827.3,36561.2,36561.2,36561.2,36561.2,36561.2,36561.2,36561.2,36561.2,36561.2,36561.2,38749.5,38749.5,38749.5,38749.5,38749.5,38749.5,38749.5,38749.5,38749.5,38749.5,37473.3,37473.3,37473.3,37473.3,37473.3,37473.3,37473.3,37473.3,37473.3,37473.3,37259.6,37259.6,37259.6],"cas_badval":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cas_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cas_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cmd_get":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cmd_lookup":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cmd_set":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_docs_actual_disk_size":[20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061,20549061],"couch_docs_data_size":[2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072,2541072],"couch_docs_disk_size":[14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780,14747780],"couch_spatial_data_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_spatial_disk_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_spatial_ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"couch_views_actual_disk_size":[273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141],"couch_views_data_size":[273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141],"couch_views_disk_size":[273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141],"couch_views_ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"curr_connections":[67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67],"curr_items":[2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475,2475],"curr_items_tot":[4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897,4897],"decr_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"decr_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"delete_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"delete_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_commit_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_commit_total":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_update_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_update_total":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"disk_write_queue":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_active_ahead_exceptions":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_active_hlc_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_active_hlc_drift_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_bg_fetched":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_clock_cas_drift_threshold_exceeded":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_data_read_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_data_write_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_count":[5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5],"ep_dcp_2i_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_2i_producer_count":[5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5],"ep_dcp_2i_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_count":[4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4],"ep_dcp_replica_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_replica_producer_count":[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2],"ep_dcp_replica_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_backoff":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_count":[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],"ep_dcp_views_items_remaining":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_items_sent":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_dcp_views_producer_count":[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],"ep_dcp_views_total_bytes":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_diskqueue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_diskqueue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_diskqueue_items":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_flusher_todo":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_item_commit_failed":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_kv_size":[2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370,2665370],"ep_max_size":[209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200,209715200],"ep_mem_high_wat":[178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920,178257920],"ep_mem_low_wat":[157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400,157286400],"ep_meta_data_memory":[457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082,457082],"ep_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_del_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_del_ret_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_get_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_set_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_ops_set_ret_meta":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_num_value_ejects":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_oom_errors":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_overhead":[3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424,3650424],"ep_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_replica_ahead_exceptions":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_replica_hlc_drift":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_replica_hlc_drift_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_tmp_oom_errors":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ep_vb_total":[684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684,684],"get_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"get_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"incr_hits":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"incr_misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"mem_used":[25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010296,25010456,25010456,25010456],"misses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"timestamp":[1630694373240,1630694374241,1630694375242,1630694376243,1630694377244,1630694378245,1630694379246,1630694380247,1630694381248,1630694382249,1630694383250,1630694384251,1630694385252,1630694386253,1630694387254,1630694388255,1630694389256,1630694390257,1630694391258,1630694392259,1630694393260,1630694394261,1630694395262,1630694396263,1630694397264,1630694398265,1630694399266,1630694400267,1630694401268,1630694402269,1630694403270,1630694404271,1630694405272,1630694406273,1630694407274,1630694408275,1630694409276,1630694410277,1630694411278,1630694412279,1630694413280,1630694414281,1630694415282,1630694416283,1630694417284,1630694418285,1630694419286,1630694420287,1630694421288,1630694422289,1630694423290,1630694424291,1630694425292,1630694426293,1630694427294,1630694428295,1630694429296,1630694430297,1630694431298,1630694432299],"vb_active_eject":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_itm_memory":[1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356,1082356],"vb_active_meta_data_memory":[231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761,231761],"vb_active_num":[342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342],"vb_active_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_sync_write_aborted_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_sync_write_accepted_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_active_sync_write_committed_count":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_curr_items":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_eject":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_itm_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_meta_data_memory":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_num":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_pending_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_curr_items":[2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422,2422],"vb_replica_eject":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_itm_memory":[1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366,1067366],"vb_replica_meta_data_memory":[225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321,225321],"vb_replica_num":[342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342,342],"vb_replica_num_non_resident":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_ops_create":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_ops_update":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_drain":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_fill":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_replica_queue_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"vb_total_queue_age":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"xdc_ops":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"spatial/d41d8cd98f00b204e9800998ecf8427e/accesses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"spatial/d41d8cd98f00b204e9800998ecf8427e/data_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"spatial/d41d8cd98f00b204e9800998ecf8427e/disk_size":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"views/5a222b8c920aa5e3a28b51ee7eb609a0/accesses":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"views/5a222b8c920aa5e3a28b51ee7eb609a0/data_size":[273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141],"views/5a222b8c920aa5e3a28b51ee7eb609a0/disk_size":[273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141,273141],"allocstall":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cpu_cores_available":[8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8],"cpu_irq_rate":[0.2062139125653011,0.2062139125653011,0.2062139125653011,0.2062139125653011,0.2873150409423934,0.2873150409423934,0.2873150409423934,0.2873150409423934,0.2873150409423934,0.2873150409423934,0.2873150409423934,0.2873150409423934,0.2873150409423934,0.2873150409423934,0.1979078314956177,0.1979078314956177,0.1979078314956177,0.1979078314956177,0.1979078314956177,0.1979078314956177,0.1979078314956177,0.1979078314956177,0.1979078314956177,0.1979078314956177,0.1676445934618609,0.1676445934618609,0.1676445934618609,0.1676445934618609,0.1676445934618609,0.1676445934618609,0.1676445934618609,0.1676445934618609,0.1676445934618609,0.1676445934618609,0.1733853489380147,0.1733853489380147,0.1733853489380147,0.1733853489380147,0.1733853489380147,0.1733853489380147,0.1733853489380147,0.1733853489380147,0.1733853489380147,0.1733853489380147,0.1717721156598912,0.1717721156598912,0.1717721156598912,0.1717721156598912,0.1717721156598912,0.1717721156598912,0.1717721156598912,0.1717721156598912,0.1717721156598912,0.1717721156598912,0.1727115716753022,0.1727115716753022,0.1727115716753022,0.1727115716753022,0.1727115716753022,0.1727115716753022],"cpu_stolen_rate":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"cpu_sys_rate":[3.8355787737146,3.8355787737146,3.8355787737146,3.8355787737146,5.15730498491596,5.15730498491596,5.15730498491596,5.15730498491596,5.15730498491596,5.15730498491596,5.15730498491596,5.15730498491596,5.15730498491596,5.15730498491596,4.368108566581849,4.368108566581849,4.368108566581849,4.368108566581849,4.368108566581849,4.368108566581849,4.368108566581849,4.368108566581849,4.368108566581849,4.368108566581849,4.121262922604079,4.121262922604079,4.121262922604079,4.121262922604079,4.121262922604079,4.121262922604079,4.121262922604079,4.121262922604079,4.121262922604079,4.121262922604079,3.395463083369455,3.395463083369455,3.395463083369455,3.395463083369455,3.395463083369455,3.395463083369455,3.395463083369455,3.395463083369455,3.395463083369455,3.395463083369455,5.969081019181219,5.969081019181219,5.969081019181219,5.969081019181219,5.969081019181219,5.969081019181219,5.969081019181219,5.969081019181219,5.969081019181219,5.969081019181219,4.404145077720207,4.404145077720207,4.404145077720207,4.404145077720207,4.404145077720207,4.404145077720207],"cpu_user_rate":[15.38355787737146,15.38355787737146,15.38355787737146,15.38355787737146,17.48312024134463,17.48312024134463,17.48312024134463,17.48312024134463,17.48312024134463,17.48312024134463,17.48312024134463,17.48312024134463,17.48312024134463,17.48312024134463,17.14729997172745,17.14729997172745,17.14729997172745,17.14729997172745,17.14729997172745,17.14729997172745,17.14729997172745,17.14729997172745,17.14729997172745,17.14729997172745,20.82984073763621,20.82984073763621,20.82984073763621,20.82984073763621,20.82984073763621,20.82984073763621,20.82984073763621,20.82984073763621,20.82984073763621,20.82984073763621,11.40008669267447,11.40008669267447,11.40008669267447,11.40008669267447,11.40008669267447,11.40008669267447,11.40008669267447,11.40008669267447,11.40008669267447,11.40008669267447,20.82736902376181,20.82736902376181,20.82736902376181,20.82736902376181,20.82736902376181,20.82736902376181,20.82736902376181,20.82736902376181,20.82736902376181,20.82736902376181,15.01151410477835,15.01151410477835,15.01151410477835,15.01151410477835,15.01151410477835,15.01151410477835],"cpu_utilization_rate":[19.42535056365136,19.42535056365136,19.42535056365136,19.42535056365136,22.92774026720299,22.92774026720299,22.92774026720299,22.92774026720299,22.92774026720299,22.92774026720299,22.92774026720299,22.92774026720299,22.92774026720299,22.92774026720299,21.71331636980492,21.71331636980492,21.71331636980492,21.71331636980492,21.71331636980492,21.71331636980492,21.71331636980492,21.71331636980492,21.71331636980492,21.71331636980492,25.11874825370215,25.11874825370215,25.11874825370215,25.11874825370215,25.11874825370215,25.11874825370215,25.11874825370215,25.11874825370215,25.11874825370215,25.11874825370215,14.96893512498194,14.96893512498194,14.96893512498194,14.96893512498194,14.96893512498194,14.96893512498194,14.96893512498194,14.96893512498194,14.96893512498194,14.96893512498194,26.96822215860292,26.96822215860292,26.96822215860292,26.96822215860292,26.96822215860292,26.96822215860292,26.96822215860292,26.96822215860292,26.96822215860292,26.96822215860292,19.58837075417386,19.58837075417386,19.58837075417386,19.58837075417386,19.58837075417386,19.58837075417386],"mem_actual_free":[4297187328,4297187328,4297187328,4297187328,4303872000,4303872000,4303872000,4303872000,4303872000,4303872000,4303872000,4303872000,4303872000,4303872000,4292362240,4292362240,4292362240,4292362240,4292362240,4292362240,4292362240,4292362240,4292362240,4292362240,4269993984,4269993984,4269993984,4269993984,4269993984,4269993984,4269993984,4269993984,4269993984,4269993984,4310884352,4310884352,4310884352,4310884352,4310884352,4310884352,4310884352,4310884352,4310884352,4310884352,4253233152,4253233152,4253233152,4253233152,4253233152,4253233152,4253233152,4253233152,4253233152,4253233152,4279697408,4279697408,4279697408,4279697408,4279697408,4279697408],"mem_actual_used":[12190720000,12190720000,12190720000,12190720000,12184035328,12184035328,12184035328,12184035328,12184035328,12184035328,12184035328,12184035328,12184035328,12184035328,12195545088,12195545088,12195545088,12195545088,12195545088,12195545088,12195545088,12195545088,12195545088,12195545088,12217913344,12217913344,12217913344,12217913344,12217913344,12217913344,12217913344,12217913344,12217913344,12217913344,12177022976,12177022976,12177022976,12177022976,12177022976,12177022976,12177022976,12177022976,12177022976,12177022976,12234674176,12234674176,12234674176,12234674176,12234674176,12234674176,12234674176,12234674176,12234674176,12234674176,12208209920,12208209920,12208209920,12208209920,12208209920,12208209920],"mem_free":[4297187328,4297187328,4297187328,4297187328,4303872000,4303872000,4303872000,4303872000,4303872000,4303872000,4303872000,4303872000,4303872000,4303872000,4292362240,4292362240,4292362240,4292362240,4292362240,4292362240,4292362240,4292362240,4292362240,4292362240,4269993984,4269993984,4269993984,4269993984,4269993984,4269993984,4269993984,4269993984,4269993984,4269993984,4310884352,4310884352,4310884352,4310884352,4310884352,4310884352,4310884352,4310884352,4310884352,4310884352,4253233152,4253233152,4253233152,4253233152,4253233152,4253233152,4253233152,4253233152,4253233152,4253233152,4279697408,4279697408,4279697408,4279697408,4279697408,4279697408],"mem_limit":[16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328],"mem_total":[16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328,16487907328],"mem_used_sys":[16111206400,16111206400,16111206400,16111206400,16107708416,16107708416,16107708416,16107708416,16107708416,16107708416,16107708416,16107708416,16107708416,16107708416,16139010048,16139010048,16139010048,16139010048,16139010048,16139010048,16139010048,16139010048,16139010048,16139010048,16140214272,16140214272,16140214272,16140214272,16140214272,16140214272,16140214272,16140214272,16140214272,16140214272,16101511168,16101511168,16101511168,16101511168,16101511168,16101511168,16101511168,16101511168,16101511168,16101511168,16175067136,16175067136,16175067136,16175067136,16175067136,16175067136,16175067136,16175067136,16175067136,16175067136,16130224128,16130224128,16130224128,16130224128,16130224128,16130224128],"swap_total":[2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552,2147479552],"swap_used":[2146836480,2146836480,2146836480,2146836480,2146832384,2146832384,2146832384,2146832384,2146832384,2146832384,2146832384,2146832384,2146832384,2146832384,2146832384,2146832384,2146832384,2146832384,2146832384,2146832384,2146832384,2146832384,2146832384,2146832384,2146824192,2146824192,2146824192,2146824192,2146824192,2146824192,2146824192,2146824192,2146824192,2146824192,2146824192,2146824192,2146824192,2146824192,2146824192,2146824192,2146824192,2146824192,2146824192,2146824192,2146799616,2146799616,2146799616,2146799616,2146799616,2146799616,2146799616,2146799616,2146799616,2146799616,2146791424,2146791424,2146791424,2146791424,2146791424,2146791424]},"samplesCount":60,"isPersistent":true,"lastTStamp":1630694432299,"interval":1000}}` diff --git a/plugins/inputs/couchbase/sample.conf b/plugins/inputs/couchbase/sample.conf new file mode 100644 index 0000000000000..ccbdd52025377 --- /dev/null +++ b/plugins/inputs/couchbase/sample.conf @@ -0,0 +1,31 @@ +# Read per-node and per-bucket metrics from Couchbase +[[inputs.couchbase]] + ## specify servers via a url matching: + ## [protocol://][:password]@address[:port] + ## e.g. + ## http://couchbase-0.example.com/ + ## http://admin:secret@couchbase-0.example.com:8091/ + ## + ## If no servers are specified, then localhost is used as the host. + ## If no protocol is specified, HTTP is used. + ## If no port is specified, 8091 is used. + servers = ["http://localhost:8091"] + + ## Filter bucket fields to include only here. + # bucket_stats_included = ["quota_percent_used", "ops_per_sec", "disk_fetches", "item_count", "disk_used", "data_used", "mem_used"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification (defaults to false) + ## If set to false, tls_cert and tls_key are required + # insecure_skip_verify = false + + ## Whether to collect cluster-wide bucket statistics + ## It is recommended to disable this in favor of node_stats + ## to get a better view of the cluster. + cluster_bucket_stats = true + + ## Whether to collect bucket stats for each individual node + node_bucket_stats = false diff --git a/plugins/inputs/couchdb/README.md b/plugins/inputs/couchdb/README.md index 3a7f127dbc3db..e2396524eb248 100644 --- a/plugins/inputs/couchdb/README.md +++ b/plugins/inputs/couchdb/README.md @@ -2,9 +2,10 @@ The CouchDB plugin gathers metrics of CouchDB using [_stats] endpoint. -### Configuration +## Configuration -```toml +```toml @sample.conf +# Read CouchDB Stats from one or more servers [[inputs.couchdb]] ## Works with CouchDB stats endpoints out of the box ## Multiple Hosts from which to read CouchDB stats: @@ -15,7 +16,7 @@ The CouchDB plugin gathers metrics of CouchDB using [_stats] endpoint. # basic_password = "p@ssw0rd" ``` -### Measurements & Fields: +## Metrics Statistics specific to the internals of CouchDB: @@ -60,19 +61,21 @@ httpd statistics: - httpd_bulk_requests - httpd_view_reads -### Tags: +## Tags - server (url of the couchdb _stats endpoint) -### Example output: +## Example Output -**Post Couchdb 2.0** -``` +### Post Couchdb 2.0 + +```shell couchdb,server=http://couchdb22:5984/_node/_local/_stats couchdb_auth_cache_hits_value=0,httpd_request_methods_delete_value=0,couchdb_auth_cache_misses_value=0,httpd_request_methods_get_value=42,httpd_status_codes_304_value=0,httpd_status_codes_400_value=0,httpd_request_methods_head_value=0,httpd_status_codes_201_value=0,couchdb_database_reads_value=0,httpd_request_methods_copy_value=0,couchdb_request_time_max=0,httpd_status_codes_200_value=42,httpd_status_codes_301_value=0,couchdb_open_os_files_value=2,httpd_request_methods_put_value=0,httpd_request_methods_post_value=0,httpd_status_codes_202_value=0,httpd_status_codes_403_value=0,httpd_status_codes_409_value=0,couchdb_database_writes_value=0,couchdb_request_time_min=0,httpd_status_codes_412_value=0,httpd_status_codes_500_value=0,httpd_status_codes_401_value=0,httpd_status_codes_404_value=0,httpd_status_codes_405_value=0,couchdb_open_databases_value=0 1536707179000000000 ``` -**Pre Couchdb 2.0** -``` +### Pre Couchdb 2.0 + +```shell couchdb,server=http://couchdb16:5984/_stats couchdb_request_time_sum=96,httpd_status_codes_200_sum=37,httpd_status_codes_200_min=0,httpd_requests_mean=0.005,httpd_requests_min=0,couchdb_request_time_stddev=3.833,couchdb_request_time_min=1,httpd_request_methods_get_stddev=0.073,httpd_request_methods_get_min=0,httpd_status_codes_200_mean=0.005,httpd_status_codes_200_max=1,httpd_requests_sum=37,couchdb_request_time_current=96,httpd_request_methods_get_sum=37,httpd_request_methods_get_mean=0.005,httpd_request_methods_get_max=1,httpd_status_codes_200_stddev=0.073,couchdb_request_time_mean=2.595,couchdb_request_time_max=25,httpd_request_methods_get_current=37,httpd_status_codes_200_current=37,httpd_requests_current=37,httpd_requests_stddev=0.073,httpd_requests_max=1 1536707179000000000 ``` diff --git a/plugins/inputs/couchdb/couchdb.go b/plugins/inputs/couchdb/couchdb.go index 1b542d042dd30..a17523a3a8bc8 100644 --- a/plugins/inputs/couchdb/couchdb.go +++ b/plugins/inputs/couchdb/couchdb.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package couchdb import ( + _ "embed" "encoding/json" "fmt" "net/http" @@ -11,6 +13,10 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type ( metaData struct { Current *float64 `json:"current"` @@ -88,20 +94,8 @@ type ( } ) -func (*CouchDB) Description() string { - return "Read CouchDB Stats from one or more servers" -} - func (*CouchDB) SampleConfig() string { - return ` - ## Works with CouchDB stats endpoints out of the box - ## Multiple Hosts from which to read CouchDB stats: - hosts = ["http://localhost:8086/_stats"] - - ## Use HTTP Basic Authentication. - # basic_username = "telegraf" - # basic_password = "p@ssw0rd" -` + return sampleConfig } func (c *CouchDB) Gather(accumulator telegraf.Accumulator) error { @@ -125,9 +119,9 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri if c.client == nil { c.client = &http.Client{ Transport: &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: 3 * time.Second, }, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, } } @@ -140,19 +134,21 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri req.SetBasicAuth(c.BasicUsername, c.BasicPassword) } - response, error := c.client.Do(req) - if error != nil { - return error + response, err := c.client.Do(req) + if err != nil { + return err } defer response.Body.Close() if response.StatusCode != 200 { - return fmt.Errorf("Failed to get stats from couchdb: HTTP responded %d", response.StatusCode) + return fmt.Errorf("failed to get stats from couchdb: HTTP responded %d", response.StatusCode) } stats := Stats{} decoder := json.NewDecoder(response.Body) - decoder.Decode(&stats) + if err := decoder.Decode(&stats); err != nil { + return fmt.Errorf("failed to decode stats from couchdb: HTTP body %q", response.Body) + } fields := map[string]interface{}{} @@ -287,9 +283,9 @@ func init() { return &CouchDB{ client: &http.Client{ Transport: &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: 3 * time.Second, }, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, }, } }) diff --git a/plugins/inputs/couchdb/sample.conf b/plugins/inputs/couchdb/sample.conf new file mode 100644 index 0000000000000..7a6f23920fbb7 --- /dev/null +++ b/plugins/inputs/couchdb/sample.conf @@ -0,0 +1,9 @@ +# Read CouchDB Stats from one or more servers +[[inputs.couchdb]] + ## Works with CouchDB stats endpoints out of the box + ## Multiple Hosts from which to read CouchDB stats: + hosts = ["http://localhost:8086/_stats"] + + ## Use HTTP Basic Authentication. + # basic_username = "telegraf" + # basic_password = "p@ssw0rd" diff --git a/plugins/inputs/cpu/README.md b/plugins/inputs/cpu/README.md index bc86ae898021c..a5c1e64a2ef9e 100644 --- a/plugins/inputs/cpu/README.md +++ b/plugins/inputs/cpu/README.md @@ -2,20 +2,24 @@ The `cpu` plugin gather metrics on the system CPUs. -#### Configuration -```toml +## Configuration + +```toml @sample.conf +# Read metrics about cpu usage [[inputs.cpu]] ## Whether to report per-cpu stats or not percpu = true ## Whether to report total system cpu stats or not totalcpu = true - ## If true, collect raw CPU time metrics. + ## If true, collect raw CPU time metrics collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. + ## If true, compute and report the sum of all non-idle CPU states report_active = false + ## If true and the info is available then add core_id and physical_id tags + core_tags = false ``` -### Metrics +## Metrics On Linux, consult `man proc` for details on the meanings of these values. @@ -46,14 +50,15 @@ On Linux, consult `man proc` for details on the meanings of these values. - usage_guest (float, percent) - usage_guest_nice (float, percent) -### Troubleshooting +## Troubleshooting On Linux systems the `/proc/stat` file is used to gather CPU times. Percentages are based on the last 2 samples. +Tags core_id and physical_id are read from `/proc/cpuinfo` on Linux systems -### Example Output +## Example Output -``` +```shell cpu,cpu=cpu0,host=loaner time_active=202224.15999999992,time_guest=30250.35,time_guest_nice=0,time_idle=1527035.04,time_iowait=1352,time_irq=0,time_nice=169.28,time_softirq=6281.4,time_steal=0,time_system=40097.14,time_user=154324.34 1568760922000000000 cpu,cpu=cpu0,host=loaner usage_active=31.249999981810106,usage_guest=2.083333333080696,usage_guest_nice=0,usage_idle=68.7500000181899,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=4.166666666161392,usage_user=25.000000002273737 1568760922000000000 cpu,cpu=cpu1,host=loaner time_active=201890.02000000002,time_guest=30508.41,time_guest_nice=0,time_idle=264641.18,time_iowait=210.44,time_irq=0,time_nice=181.75,time_softirq=4537.88,time_steal=0,time_system=39480.7,time_user=157479.25 1568760922000000000 diff --git a/plugins/inputs/cpu/cpu.go b/plugins/inputs/cpu/cpu.go index e073309e47e3b..b71a4012318d9 100644 --- a/plugins/inputs/cpu/cpu.go +++ b/plugins/inputs/cpu/cpu.go @@ -1,23 +1,36 @@ +//go:generate ../../../tools/readme_config_includer/generator package cpu import ( + _ "embed" "fmt" "time" + cpuUtil "github.com/shirou/gopsutil/v3/cpu" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/system" - "github.com/shirou/gopsutil/cpu" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type CPUStats struct { - ps system.PS - lastStats map[string]cpu.TimesStat + ps system.PS + lastStats map[string]cpuUtil.TimesStat + cpuInfo map[string]cpuUtil.InfoStat + coreID bool + physicalID bool PerCPU bool `toml:"percpu"` TotalCPU bool `toml:"totalcpu"` CollectCPUTime bool `toml:"collect_cpu_time"` ReportActive bool `toml:"report_active"` + CoreTags bool `toml:"core_tags"` + + Log telegraf.Logger `toml:"-"` } func NewCPUStats(ps system.PS) *CPUStats { @@ -28,27 +41,12 @@ func NewCPUStats(ps system.PS) *CPUStats { } } -func (_ *CPUStats) Description() string { - return "Read metrics about cpu usage" -} - -var sampleConfig = ` - ## Whether to report per-cpu stats or not - percpu = true - ## Whether to report total system cpu stats or not - totalcpu = true - ## If true, collect raw CPU time metrics. - collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. - report_active = false -` - -func (_ *CPUStats) SampleConfig() string { +func (*CPUStats) SampleConfig() string { return sampleConfig } -func (s *CPUStats) Gather(acc telegraf.Accumulator) error { - times, err := s.ps.CPUTimes(s.PerCPU, s.TotalCPU) +func (c *CPUStats) Gather(acc telegraf.Accumulator) error { + times, err := c.ps.CPUTimes(c.PerCPU, c.TotalCPU) if err != nil { return fmt.Errorf("error getting CPU info: %s", err) } @@ -58,11 +56,17 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error { tags := map[string]string{ "cpu": cts.CPU, } + if c.coreID { + tags["core_id"] = c.cpuInfo[cts.CPU].CoreID + } + if c.physicalID { + tags["physical_id"] = c.cpuInfo[cts.CPU].PhysicalID + } - total := totalCpuTime(cts) - active := activeCpuTime(cts) + total := totalCPUTime(cts) + active := activeCPUTime(cts) - if s.CollectCPUTime { + if c.CollectCPUTime { // Add cpu time metrics fieldsC := map[string]interface{}{ "time_user": cts.User, @@ -76,28 +80,28 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error { "time_guest": cts.Guest, "time_guest_nice": cts.GuestNice, } - if s.ReportActive { - fieldsC["time_active"] = activeCpuTime(cts) + if c.ReportActive { + fieldsC["time_active"] = activeCPUTime(cts) } acc.AddCounter("cpu", fieldsC, tags, now) } // Add in percentage - if len(s.lastStats) == 0 { + if len(c.lastStats) == 0 { // If it's the 1st gather, can't get CPU Usage stats yet continue } - lastCts, ok := s.lastStats[cts.CPU] + lastCts, ok := c.lastStats[cts.CPU] if !ok { continue } - lastTotal := totalCpuTime(lastCts) - lastActive := activeCpuTime(lastCts) + lastTotal := totalCPUTime(lastCts) + lastActive := activeCPUTime(lastCts) totalDelta := total - lastTotal if totalDelta < 0 { - err = fmt.Errorf("Error: current total CPU time is less than previous total CPU time") + err = fmt.Errorf("current total CPU time is less than previous total CPU time") break } @@ -117,28 +121,46 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error { "usage_guest": 100 * (cts.Guest - lastCts.Guest) / totalDelta, "usage_guest_nice": 100 * (cts.GuestNice - lastCts.GuestNice) / totalDelta, } - if s.ReportActive { + if c.ReportActive { fieldsG["usage_active"] = 100 * (active - lastActive) / totalDelta } acc.AddGauge("cpu", fieldsG, tags, now) } - s.lastStats = make(map[string]cpu.TimesStat) + c.lastStats = make(map[string]cpuUtil.TimesStat) for _, cts := range times { - s.lastStats[cts.CPU] = cts + c.lastStats[cts.CPU] = cts } return err } -func totalCpuTime(t cpu.TimesStat) float64 { - total := t.User + t.System + t.Nice + t.Iowait + t.Irq + t.Softirq + t.Steal + - t.Idle +func (c *CPUStats) Init() error { + if c.CoreTags { + cpuInfo, err := cpuUtil.Info() + if err == nil { + c.coreID = cpuInfo[0].CoreID != "" + c.physicalID = cpuInfo[0].PhysicalID != "" + + c.cpuInfo = make(map[string]cpuUtil.InfoStat) + for _, ci := range cpuInfo { + c.cpuInfo[fmt.Sprintf("cpu%d", ci.CPU)] = ci + } + } else { + c.Log.Warnf("Failed to gather info about CPUs: %s", err) + } + } + + return nil +} + +func totalCPUTime(t cpuUtil.TimesStat) float64 { + total := t.User + t.System + t.Nice + t.Iowait + t.Irq + t.Softirq + t.Steal + t.Idle return total } -func activeCpuTime(t cpu.TimesStat) float64 { - active := totalCpuTime(t) - t.Idle +func activeCPUTime(t cpuUtil.TimesStat) float64 { + active := totalCPUTime(t) - t.Idle return active } diff --git a/plugins/inputs/cpu/cpu_test.go b/plugins/inputs/cpu/cpu_test.go index bf356ec7b945c..3dc3242a6ed94 100644 --- a/plugins/inputs/cpu/cpu_test.go +++ b/plugins/inputs/cpu/cpu_test.go @@ -4,11 +4,11 @@ import ( "fmt" "testing" + cpuUtil "github.com/shirou/gopsutil/v3/cpu" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/cpu" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestCPUStats(t *testing.T) { @@ -16,7 +16,7 @@ func TestCPUStats(t *testing.T) { defer mps.AssertExpectations(t) var acc testutil.Accumulator - cts := cpu.TimesStat{ + cts := cpuUtil.TimesStat{ CPU: "cpu0", User: 8.8, System: 8.2, @@ -30,7 +30,7 @@ func TestCPUStats(t *testing.T) { GuestNice: 0.324, } - cts2 := cpu.TimesStat{ + cts2 := cpuUtil.TimesStat{ CPU: "cpu0", User: 24.9, // increased by 16.1 System: 10.9, // increased by 2.7 @@ -44,62 +44,58 @@ func TestCPUStats(t *testing.T) { GuestNice: 2.524, // increased by 2.2 } - mps.On("CPUTimes").Return([]cpu.TimesStat{cts}, nil) + mps.On("CPUTimes").Return([]cpuUtil.TimesStat{cts}, nil) cs := NewCPUStats(&mps) - cputags := map[string]string{ - "cpu": "cpu0", - } - err := cs.Gather(&acc) require.NoError(t, err) // Computed values are checked with delta > 0 because of floating point arithmetic // imprecision - assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 8.8, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_system", 8.2, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 80.1, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_active", 19.9, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_nice", 1.3, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 0.8389, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_irq", 0.6, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_softirq", 0.11, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_steal", 0.0511, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_guest", 3.1, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_guest_nice", 0.324, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_user", 8.8, 0) + assertContainsTaggedFloat(t, &acc, "time_system", 8.2, 0) + assertContainsTaggedFloat(t, &acc, "time_idle", 80.1, 0) + assertContainsTaggedFloat(t, &acc, "time_active", 19.9, 0.0005) + assertContainsTaggedFloat(t, &acc, "time_nice", 1.3, 0) + assertContainsTaggedFloat(t, &acc, "time_iowait", 0.8389, 0) + assertContainsTaggedFloat(t, &acc, "time_irq", 0.6, 0) + assertContainsTaggedFloat(t, &acc, "time_softirq", 0.11, 0) + assertContainsTaggedFloat(t, &acc, "time_steal", 0.0511, 0) + assertContainsTaggedFloat(t, &acc, "time_guest", 3.1, 0) + assertContainsTaggedFloat(t, &acc, "time_guest_nice", 0.324, 0) mps2 := system.MockPS{} - mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil) + mps2.On("CPUTimes").Return([]cpuUtil.TimesStat{cts2}, nil) cs.ps = &mps2 // Should have added cpu percentages too err = cs.Gather(&acc) require.NoError(t, err) - assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 24.9, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_system", 10.9, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 157.9798, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_active", 42.0202, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_nice", 3.5, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 0.929, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_irq", 1.2, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_softirq", 0.31, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_steal", 0.2812, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_guest", 11.4, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_guest_nice", 2.524, 0, cputags) - - assertContainsTaggedFloat(t, &acc, "cpu", "usage_user", 7.8, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_system", 2.7, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_idle", 77.8798, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_active", 22.1202, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_nice", 0, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_iowait", 0.0901, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_irq", 0.6, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_softirq", 0.2, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_steal", 0.2301, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_guest", 8.3, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_guest_nice", 2.2, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "time_user", 24.9, 0) + assertContainsTaggedFloat(t, &acc, "time_system", 10.9, 0) + assertContainsTaggedFloat(t, &acc, "time_idle", 157.9798, 0) + assertContainsTaggedFloat(t, &acc, "time_active", 42.0202, 0.0005) + assertContainsTaggedFloat(t, &acc, "time_nice", 3.5, 0) + assertContainsTaggedFloat(t, &acc, "time_iowait", 0.929, 0) + assertContainsTaggedFloat(t, &acc, "time_irq", 1.2, 0) + assertContainsTaggedFloat(t, &acc, "time_softirq", 0.31, 0) + assertContainsTaggedFloat(t, &acc, "time_steal", 0.2812, 0) + assertContainsTaggedFloat(t, &acc, "time_guest", 11.4, 0) + assertContainsTaggedFloat(t, &acc, "time_guest_nice", 2.524, 0) + + assertContainsTaggedFloat(t, &acc, "usage_user", 7.8, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_system", 2.7, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_idle", 77.8798, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_active", 22.1202, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_nice", 0, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_iowait", 0.0901, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_irq", 0.6, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_softirq", 0.2, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_steal", 0.2301, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_guest", 8.3, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_guest_nice", 2.2, 0.0005) } // Asserts that a given accumulator contains a measurement of type float64 with @@ -109,24 +105,21 @@ func TestCPUStats(t *testing.T) { // Parameters: // t *testing.T : Testing object to use // acc testutil.Accumulator: Accumulator to examine -// measurement string : Name of the measurement to examine +// field string : Name of field to examine // expectedValue float64 : Value to search for within the measurement // delta float64 : Maximum acceptable distance of an accumulated value // from the expectedValue parameter. Useful when // floating-point arithmetic imprecision makes looking // for an exact match impractical -// tags map[string]string : Tag set the found measurement must have. Set to nil to -// ignore the tag set. func assertContainsTaggedFloat( t *testing.T, acc *testutil.Accumulator, - measurement string, field string, expectedValue float64, delta float64, - tags map[string]string, ) { var actualValue float64 + measurement := "cpu" // always cpu for _, pt := range acc.Metrics { if pt.Measurement == measurement { for fieldname, value := range pt.Fields { @@ -138,8 +131,7 @@ func assertContainsTaggedFloat( return } } else { - assert.Fail(t, fmt.Sprintf("Measurement \"%s\" does not have type float64", - measurement)) + require.Fail(t, fmt.Sprintf("Measurement \"%s\" does not have type float64", measurement)) } } } @@ -148,7 +140,7 @@ func assertContainsTaggedFloat( msg := fmt.Sprintf( "Could not find measurement \"%s\" with requested tags within %f of %f, Actual: %f", measurement, delta, expectedValue, actualValue) - assert.Fail(t, msg) + require.Fail(t, msg) } // TestCPUCountChange tests that no errors are encountered if the number of @@ -162,7 +154,7 @@ func TestCPUCountIncrease(t *testing.T) { cs := NewCPUStats(&mps) mps.On("CPUTimes").Return( - []cpu.TimesStat{ + []cpuUtil.TimesStat{ { CPU: "cpu0", }, @@ -172,7 +164,7 @@ func TestCPUCountIncrease(t *testing.T) { require.NoError(t, err) mps2.On("CPUTimes").Return( - []cpu.TimesStat{ + []cpuUtil.TimesStat{ { CPU: "cpu0", }, @@ -193,46 +185,42 @@ func TestCPUTimesDecrease(t *testing.T) { defer mps.AssertExpectations(t) var acc testutil.Accumulator - cts := cpu.TimesStat{ + cts := cpuUtil.TimesStat{ CPU: "cpu0", User: 18, Idle: 80, Iowait: 2, } - cts2 := cpu.TimesStat{ + cts2 := cpuUtil.TimesStat{ CPU: "cpu0", User: 38, // increased by 20 Idle: 40, // decreased by 40 Iowait: 1, // decreased by 1 } - cts3 := cpu.TimesStat{ + cts3 := cpuUtil.TimesStat{ CPU: "cpu0", User: 56, // increased by 18 Idle: 120, // increased by 80 Iowait: 3, // increased by 2 } - mps.On("CPUTimes").Return([]cpu.TimesStat{cts}, nil) + mps.On("CPUTimes").Return([]cpuUtil.TimesStat{cts}, nil) cs := NewCPUStats(&mps) - cputags := map[string]string{ - "cpu": "cpu0", - } - err := cs.Gather(&acc) require.NoError(t, err) // Computed values are checked with delta > 0 because of floating point arithmetic // imprecision - assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 18, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 80, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 2, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_user", 18, 0) + assertContainsTaggedFloat(t, &acc, "time_idle", 80, 0) + assertContainsTaggedFloat(t, &acc, "time_iowait", 2, 0) mps2 := system.MockPS{} - mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil) + mps2.On("CPUTimes").Return([]cpuUtil.TimesStat{cts2}, nil) cs.ps = &mps2 // CPU times decreased. An error should be raised @@ -240,17 +228,17 @@ func TestCPUTimesDecrease(t *testing.T) { require.Error(t, err) mps3 := system.MockPS{} - mps3.On("CPUTimes").Return([]cpu.TimesStat{cts3}, nil) + mps3.On("CPUTimes").Return([]cpuUtil.TimesStat{cts3}, nil) cs.ps = &mps3 err = cs.Gather(&acc) require.NoError(t, err) - assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 56, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 120, 0, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 3, 0, cputags) + assertContainsTaggedFloat(t, &acc, "time_user", 56, 0) + assertContainsTaggedFloat(t, &acc, "time_idle", 120, 0) + assertContainsTaggedFloat(t, &acc, "time_iowait", 3, 0) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_user", 18, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_idle", 80, 0.0005, cputags) - assertContainsTaggedFloat(t, &acc, "cpu", "usage_iowait", 2, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "usage_user", 18, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_idle", 80, 0.0005) + assertContainsTaggedFloat(t, &acc, "usage_iowait", 2, 0.0005) } diff --git a/plugins/inputs/cpu/sample.conf b/plugins/inputs/cpu/sample.conf new file mode 100644 index 0000000000000..b0254b95de8ca --- /dev/null +++ b/plugins/inputs/cpu/sample.conf @@ -0,0 +1,12 @@ +# Read metrics about cpu usage +[[inputs.cpu]] + ## Whether to report per-cpu stats or not + percpu = true + ## Whether to report total system cpu stats or not + totalcpu = true + ## If true, collect raw CPU time metrics + collect_cpu_time = false + ## If true, compute and report the sum of all non-idle CPU states + report_active = false + ## If true and the info is available then add core_id and physical_id tags + core_tags = false diff --git a/plugins/inputs/csgo/README.md b/plugins/inputs/csgo/README.md new file mode 100644 index 0000000000000..776ccf2500582 --- /dev/null +++ b/plugins/inputs/csgo/README.md @@ -0,0 +1,40 @@ +# Counter-Strike: Global Offensive (CSGO) Input Plugin + +The `csgo` plugin gather metrics from Counter-Strike: Global Offensive servers. + +## Configuration + +```toml @sample.conf +# Fetch metrics from a CSGO SRCDS +[[inputs.csgo]] + ## Specify servers using the following format: + ## servers = [ + ## ["ip1:port1", "rcon_password1"], + ## ["ip2:port2", "rcon_password2"], + ## ] + # + ## If no servers are specified, no data will be collected + servers = [] +``` + +## Metrics + +The plugin retrieves the output of the `stats` command that is executed via +rcon. + +If no servers are specified, no data will be collected + +- csgo + - tags: + - host + - fields: + - cpu (float) + - net_in (float) + - net_out (float) + - uptime_minutes (float) + - maps (float) + - fps (float) + - players (float) + - sv_ms (float) + - variance_ms (float) + - tick_ms (float) diff --git a/plugins/inputs/csgo/csgo.go b/plugins/inputs/csgo/csgo.go new file mode 100644 index 0000000000000..1c05f467859e0 --- /dev/null +++ b/plugins/inputs/csgo/csgo.go @@ -0,0 +1,184 @@ +//go:generate ../../../tools/readme_config_includer/generator +package csgo + +import ( + _ "embed" + "encoding/json" + "errors" + "strconv" + "strings" + "sync" + "time" + + "github.com/james4k/rcon" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +type statsData struct { + CPU float64 `json:"cpu"` + NetIn float64 `json:"net_in"` + NetOut float64 `json:"net_out"` + UptimeMinutes float64 `json:"uptime_minutes"` + Maps float64 `json:"maps"` + FPS float64 `json:"fps"` + Players float64 `json:"players"` + Sim float64 `json:"sv_ms"` + Variance float64 `json:"variance_ms"` + Tick float64 `json:"tick_ms"` +} + +type CSGO struct { + Servers [][]string `toml:"servers"` +} + +func (*CSGO) SampleConfig() string { + return sampleConfig +} + +func (s *CSGO) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + + // Loop through each server and collect metrics + for _, server := range s.Servers { + wg.Add(1) + go func(ss []string) { + defer wg.Done() + acc.AddError(s.gatherServer(acc, ss, requestServer)) + }(server) + } + + wg.Wait() + return nil +} + +func init() { + inputs.Add("csgo", func() telegraf.Input { + return &CSGO{} + }) +} + +func (s *CSGO) gatherServer( + acc telegraf.Accumulator, + server []string, + request func(string, string) (string, error), +) error { + if len(server) != 2 { + return errors.New("incorrect server config") + } + + url, rconPw := server[0], server[1] + resp, err := request(url, rconPw) + if err != nil { + return err + } + + rows := strings.Split(resp, "\n") + if len(rows) < 2 { + return errors.New("bad response") + } + + fields := strings.Fields(rows[1]) + if len(fields) != 10 { + return errors.New("bad response") + } + + cpu, err := strconv.ParseFloat(fields[0], 32) + if err != nil { + return err + } + netIn, err := strconv.ParseFloat(fields[1], 64) + if err != nil { + return err + } + netOut, err := strconv.ParseFloat(fields[2], 64) + if err != nil { + return err + } + uptimeMinutes, err := strconv.ParseFloat(fields[3], 64) + if err != nil { + return err + } + maps, err := strconv.ParseFloat(fields[4], 64) + if err != nil { + return err + } + fps, err := strconv.ParseFloat(fields[5], 64) + if err != nil { + return err + } + players, err := strconv.ParseFloat(fields[6], 64) + if err != nil { + return err + } + svms, err := strconv.ParseFloat(fields[7], 64) + if err != nil { + return err + } + msVar, err := strconv.ParseFloat(fields[8], 64) + if err != nil { + return err + } + tick, err := strconv.ParseFloat(fields[9], 64) + if err != nil { + return err + } + + now := time.Now() + stats := statsData{ + CPU: cpu, + NetIn: netIn, + NetOut: netOut, + UptimeMinutes: uptimeMinutes, + Maps: maps, + FPS: fps, + Players: players, + Sim: svms, + Variance: msVar, + Tick: tick, + } + + tags := map[string]string{ + "host": url, + } + + var statsMap map[string]interface{} + marshalled, err := json.Marshal(stats) + if err != nil { + return err + } + err = json.Unmarshal(marshalled, &statsMap) + if err != nil { + return err + } + + acc.AddGauge("csgo", statsMap, tags, now) + return nil +} + +func requestServer(url string, rconPw string) (string, error) { + remoteConsole, err := rcon.Dial(url, rconPw) + if err != nil { + return "", err + } + defer remoteConsole.Close() + + reqID, err := remoteConsole.Write("stats") + if err != nil { + return "", err + } + + resp, respReqID, err := remoteConsole.Read() + if err != nil { + return "", err + } else if reqID != respReqID { + return "", errors.New("response/request mismatch") + } else { + return resp, nil + } +} diff --git a/plugins/inputs/csgo/csgo_test.go b/plugins/inputs/csgo/csgo_test.go new file mode 100644 index 0000000000000..ca849819842ed --- /dev/null +++ b/plugins/inputs/csgo/csgo_test.go @@ -0,0 +1,55 @@ +package csgo + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" +) + +const testInput = `CPU NetIn NetOut Uptime Maps FPS Players Svms +-ms ~tick +10.0 1.2 3.4 100 1 120.20 15 5.23 0.01 0.02` + +var ( + expectedOutput = statsData{ + 10.0, 1.2, 3.4, 100.0, 1, 120.20, 15, 5.23, 0.01, 0.02, + } +) + +func TestCPUStats(t *testing.T) { + c := NewCSGOStats() + var acc testutil.Accumulator + err := c.gatherServer(&acc, c.Servers[0], requestMock) + if err != nil { + t.Error(err) + } + + if !acc.HasMeasurement("csgo") { + t.Errorf("acc.HasMeasurement: expected csgo") + } + + require.Equal(t, "1.2.3.4:1234", acc.Metrics[0].Tags["host"]) + require.Equal(t, expectedOutput.CPU, acc.Metrics[0].Fields["cpu"]) + require.Equal(t, expectedOutput.NetIn, acc.Metrics[0].Fields["net_in"]) + require.Equal(t, expectedOutput.NetOut, acc.Metrics[0].Fields["net_out"]) + require.Equal(t, expectedOutput.UptimeMinutes, acc.Metrics[0].Fields["uptime_minutes"]) + require.Equal(t, expectedOutput.Maps, acc.Metrics[0].Fields["maps"]) + require.Equal(t, expectedOutput.FPS, acc.Metrics[0].Fields["fps"]) + require.Equal(t, expectedOutput.Players, acc.Metrics[0].Fields["players"]) + require.Equal(t, expectedOutput.Sim, acc.Metrics[0].Fields["sv_ms"]) + require.Equal(t, expectedOutput.Variance, acc.Metrics[0].Fields["variance_ms"]) + require.Equal(t, expectedOutput.Tick, acc.Metrics[0].Fields["tick_ms"]) +} + +func requestMock(_ string, _ string) (string, error) { + return testInput, nil +} + +func NewCSGOStats() *CSGO { + return &CSGO{ + Servers: [][]string{ + {"1.2.3.4:1234", "password"}, + }, + } +} diff --git a/plugins/inputs/csgo/sample.conf b/plugins/inputs/csgo/sample.conf new file mode 100644 index 0000000000000..696c1b8b2179d --- /dev/null +++ b/plugins/inputs/csgo/sample.conf @@ -0,0 +1,10 @@ +# Fetch metrics from a CSGO SRCDS +[[inputs.csgo]] + ## Specify servers using the following format: + ## servers = [ + ## ["ip1:port1", "rcon_password1"], + ## ["ip2:port2", "rcon_password2"], + ## ] + # + ## If no servers are specified, no data will be collected + servers = [] diff --git a/plugins/inputs/dcos/README.md b/plugins/inputs/dcos/README.md index 790590aeaf94b..77e1af37e8a91 100644 --- a/plugins/inputs/dcos/README.md +++ b/plugins/inputs/dcos/README.md @@ -1,8 +1,9 @@ # DC/OS Input Plugin -This input plugin gathers metrics from a DC/OS cluster's [metrics component](https://docs.mesosphere.com/1.10/metrics/). +This input plugin gathers metrics from a DC/OS cluster's [metrics +component](https://docs.mesosphere.com/1.10/metrics/). -**Series Cardinality Warning** +## Series Cardinality Warning Depending on the work load of your DC/OS cluster, this plugin can quickly create a high number of series which, when unchecked, can cause high load on @@ -13,16 +14,15 @@ your database. options to exclude unneeded tags. - Write to a database with an appropriate [retention policy](https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/). -- Limit series cardinality in your database using the - [`max-series-per-database`](https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000) and - [`max-values-per-tag`](https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000) settings. - Consider using the [Time Series Index](https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/). - Monitor your databases [series cardinality](https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality). -### Configuration: -```toml +## Configuration + +```toml @sample.conf +# Input plugin for DC/OS metrics [[inputs.dcos]] ## The DC/OS cluster URL. cluster_url = "https://dcos-master-1" @@ -66,28 +66,32 @@ your database. # path = ["/var/lib/mesos/slave/slaves/*"] ``` -#### Enterprise Authentication +### Enterprise Authentication When using Enterprise DC/OS, it is recommended to use a service account to authenticate with the cluster. The plugin requires the following permissions: -``` + +```text dcos:adminrouter:ops:system-metrics full dcos:adminrouter:ops:mesos full ``` -Follow the directions to [create a service account and assign permissions](https://docs.mesosphere.com/1.10/security/service-auth/custom-service-auth/). +Follow the directions to [create a service account and assign permissions][1]. Quick configuration using the Enterprise CLI: -``` + +```text dcos security org service-accounts keypair telegraf-sa-key.pem telegraf-sa-cert.pem dcos security org service-accounts create -p telegraf-sa-cert.pem -d "Telegraf DC/OS input plugin" telegraf dcos security org users grant telegraf dcos:adminrouter:ops:system-metrics full dcos security org users grant telegraf dcos:adminrouter:ops:mesos full ``` -#### Open Source Authentication +[1]: https://docs.mesosphere.com/1.10/security/service-auth/custom-service-auth/ + +### Open Source Authentication The Open Source DC/OS does not provide service accounts. Instead you can use of the following options: @@ -98,7 +102,8 @@ of the following options: Then `token_file` can be set by using the [dcos cli] to login periodically. The cli can login for at most XXX days, you will need to ensure the cli performs a new login before this time expires. -``` + +```shell dcos auth login --username foo --password bar dcos config show core.dcos_acs_token > ~/.dcos/token ``` @@ -108,12 +113,14 @@ cluster secret. This will allow you to set the expiration date manually or even create a never expiring token. However, if the cluster secret or the token is compromised it cannot be revoked and may require a full reinstall of the cluster. For more information on this technique reference -[this blog post](https://medium.com/@richardgirges/authenticating-open-source-dc-os-with-third-party-services-125fa33a5add). +[this blog post][2]. + +[2]: https://medium.com/@richardgirges/authenticating-open-source-dc-os-with-third-party-services-125fa33a5add -### Metrics: +## Metrics -Please consult the [Metrics Reference](https://docs.mesosphere.com/1.10/metrics/reference/) -for details about field interpretation. +Please consult the [Metrics Reference][3] for details about field +interpretation. - dcos_node - tags: @@ -188,9 +195,11 @@ for details about field interpretation. - fields: - fields are application specific -### Example Output: +[3]: https://docs.mesosphere.com/1.10/metrics/reference/ -``` +## Example Output + +```shell dcos_node,cluster=enterprise,hostname=192.168.122.18,path=/boot filesystem_capacity_free_bytes=918188032i,filesystem_capacity_total_bytes=1063256064i,filesystem_capacity_used_bytes=145068032i,filesystem_inode_free=523958,filesystem_inode_total=524288,filesystem_inode_used=330 1511859222000000000 dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=dummy0 network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000 dcos_node,cluster=enterprise,hostname=192.168.122.18,interface=docker0 network_in_bytes=0i,network_in_dropped=0,network_in_errors=0,network_in_packets=0,network_out_bytes=0i,network_out_dropped=0,network_out_errors=0,network_out_packets=0 1511859222000000000 diff --git a/plugins/inputs/dcos/client.go b/plugins/inputs/dcos/client.go index 8f171638a5844..34ab30ea52274 100644 --- a/plugins/inputs/dcos/client.go +++ b/plugins/inputs/dcos/client.go @@ -10,7 +10,7 @@ import ( "net/url" "time" - jwt "github.com/dgrijalva/jwt-go/v4" + "github.com/golang-jwt/jwt/v4" ) const ( @@ -92,16 +92,15 @@ type AuthToken struct { // ClusterClient is a Client that uses the cluster URL. type ClusterClient struct { - clusterURL *url.URL - httpClient *http.Client - credentials *Credentials - token string - semaphore chan struct{} + clusterURL *url.URL + httpClient *http.Client + token string + semaphore chan struct{} } type claims struct { UID string `json:"uid"` - jwt.StandardClaims + jwt.RegisteredClaims } func (e APIError) Error() string { @@ -157,7 +156,7 @@ func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthTok return nil, err } - loc := c.url("/acs/api/v1/auth/login") + loc := c.toURL("/acs/api/v1/auth/login") req, err := http.NewRequest("POST", loc, bytes.NewBuffer(octets)) if err != nil { return nil, err @@ -209,7 +208,7 @@ func (c *ClusterClient) Login(ctx context.Context, sa *ServiceAccount) (*AuthTok func (c *ClusterClient) GetSummary(ctx context.Context) (*Summary, error) { summary := &Summary{} - err := c.doGet(ctx, c.url("/mesos/master/state-summary"), summary) + err := c.doGet(ctx, c.toURL("/mesos/master/state-summary"), summary) if err != nil { return nil, err } @@ -221,7 +220,7 @@ func (c *ClusterClient) GetContainers(ctx context.Context, node string) ([]Conta list := []string{} path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers", node) - err := c.doGet(ctx, c.url(path), &list) + err := c.doGet(ctx, c.toURL(path), &list) if err != nil { return nil, err } @@ -229,16 +228,15 @@ func (c *ClusterClient) GetContainers(ctx context.Context, node string) ([]Conta containers := make([]Container, 0, len(list)) for _, c := range list { containers = append(containers, Container{ID: c}) - } return containers, nil } -func (c *ClusterClient) getMetrics(ctx context.Context, url string) (*Metrics, error) { +func (c *ClusterClient) getMetrics(ctx context.Context, address string) (*Metrics, error) { metrics := &Metrics{} - err := c.doGet(ctx, url, metrics) + err := c.doGet(ctx, address, metrics) if err != nil { return nil, err } @@ -248,21 +246,21 @@ func (c *ClusterClient) getMetrics(ctx context.Context, url string) (*Metrics, e func (c *ClusterClient) GetNodeMetrics(ctx context.Context, node string) (*Metrics, error) { path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/node", node) - return c.getMetrics(ctx, c.url(path)) + return c.getMetrics(ctx, c.toURL(path)) } func (c *ClusterClient) GetContainerMetrics(ctx context.Context, node, container string) (*Metrics, error) { path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers/%s", node, container) - return c.getMetrics(ctx, c.url(path)) + return c.getMetrics(ctx, c.toURL(path)) } func (c *ClusterClient) GetAppMetrics(ctx context.Context, node, container string) (*Metrics, error) { path := fmt.Sprintf("/system/v1/agent/%s/metrics/v0/containers/%s/app", node, container) - return c.getMetrics(ctx, c.url(path)) + return c.getMetrics(ctx, c.toURL(path)) } -func createGetRequest(url string, token string) (*http.Request, error) { - req, err := http.NewRequest("GET", url, nil) +func createGetRequest(address string, token string) (*http.Request, error) { + req, err := http.NewRequest("GET", address, nil) if err != nil { return nil, err } @@ -275,8 +273,8 @@ func createGetRequest(url string, token string) (*http.Request, error) { return req, nil } -func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) error { - req, err := createGetRequest(url, c.token) +func (c *ClusterClient) doGet(ctx context.Context, address string, v interface{}) error { + req, err := createGetRequest(address, c.token) if err != nil { return err } @@ -294,6 +292,7 @@ func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) er return err } defer func() { + //nolint:errcheck,revive // we cannot do anything if the closing fails resp.Body.Close() <-c.semaphore }() @@ -305,7 +304,7 @@ func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) er if resp.StatusCode < 200 || resp.StatusCode >= 300 { return &APIError{ - URL: url, + URL: address, StatusCode: resp.StatusCode, Title: resp.Status, } @@ -319,18 +318,18 @@ func (c *ClusterClient) doGet(ctx context.Context, url string, v interface{}) er return err } -func (c *ClusterClient) url(path string) string { - url := *c.clusterURL - url.Path = path - return url.String() +func (c *ClusterClient) toURL(path string) string { + clusterURL := *c.clusterURL + clusterURL.Path = path + return clusterURL.String() } func (c *ClusterClient) createLoginToken(sa *ServiceAccount) (string, error) { token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims{ UID: sa.AccountID, - StandardClaims: jwt.StandardClaims{ + RegisteredClaims: jwt.RegisteredClaims{ // How long we have to login with this token - ExpiresAt: jwt.At(time.Now().Add(5 * time.Minute)), + ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Minute * 5)), }, }) return token.SignedString(sa.PrivateKey) diff --git a/plugins/inputs/dcos/client_test.go b/plugins/inputs/dcos/client_test.go index 0b7772dccb994..70cf9ce7cfccd 100644 --- a/plugins/inputs/dcos/client_test.go +++ b/plugins/inputs/dcos/client_test.go @@ -8,7 +8,7 @@ import ( "net/url" "testing" - jwt "github.com/dgrijalva/jwt-go/v4" + jwt "github.com/golang-jwt/jwt/v4" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -142,7 +142,6 @@ func TestGetSummary(t *testing.T) { require.Equal(t, tt.expectedValue, summary) }) } - } func TestGetNodeMetrics(t *testing.T) { @@ -184,7 +183,6 @@ func TestGetNodeMetrics(t *testing.T) { require.Equal(t, tt.expectedValue, m) }) } - } func TestGetContainerMetrics(t *testing.T) { @@ -226,5 +224,4 @@ func TestGetContainerMetrics(t *testing.T) { require.Equal(t, tt.expectedValue, m) }) } - } diff --git a/plugins/inputs/dcos/creds.go b/plugins/inputs/dcos/creds.go index 0178315bb7076..328ce394a4cf6 100644 --- a/plugins/inputs/dcos/creds.go +++ b/plugins/inputs/dcos/creds.go @@ -4,7 +4,7 @@ import ( "context" "crypto/rsa" "fmt" - "io/ioutil" + "os" "strings" "time" "unicode/utf8" @@ -47,13 +47,13 @@ func (c *ServiceAccount) IsExpired() bool { return c.auth.Text != "" || c.auth.Expire.Add(relogDuration).After(time.Now()) } -func (c *TokenCreds) Token(ctx context.Context, client Client) (string, error) { - octets, err := ioutil.ReadFile(c.Path) +func (c *TokenCreds) Token(_ context.Context, _ Client) (string, error) { + octets, err := os.ReadFile(c.Path) if err != nil { - return "", fmt.Errorf("Error reading token file %q: %s", c.Path, err) + return "", fmt.Errorf("error reading token file %q: %s", c.Path, err) } if !utf8.Valid(octets) { - return "", fmt.Errorf("Token file does not contain utf-8 encoded text: %s", c.Path) + return "", fmt.Errorf("token file does not contain utf-8 encoded text: %s", c.Path) } token := strings.TrimSpace(string(octets)) return token, nil @@ -63,7 +63,7 @@ func (c *TokenCreds) IsExpired() bool { return true } -func (c *NullCreds) Token(ctx context.Context, client Client) (string, error) { +func (c *NullCreds) Token(_ context.Context, _ Client) (string, error) { return "", nil } diff --git a/plugins/inputs/dcos/dcos.go b/plugins/inputs/dcos/dcos.go index 1cdd40f1112fc..34014d0c5a5b8 100644 --- a/plugins/inputs/dcos/dcos.go +++ b/plugins/inputs/dcos/dcos.go @@ -1,22 +1,29 @@ +//go:generate ../../../tools/readme_config_includer/generator package dcos import ( "context" - "io/ioutil" + _ "embed" "net/url" + "os" "sort" "strings" "sync" "time" - jwt "github.com/dgrijalva/jwt-go/v4" + "github.com/golang-jwt/jwt/v4" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const ( defaultMaxConnections = 10 defaultResponseTimeout = 20 * time.Second @@ -56,7 +63,7 @@ type DCOS struct { AppExclude []string MaxConnections int - ResponseTimeout internal.Duration + ResponseTimeout config.Duration tls.ClientConfig client Client @@ -66,57 +73,9 @@ type DCOS struct { nodeFilter filter.Filter containerFilter filter.Filter appFilter filter.Filter - taskNameFilter filter.Filter } -func (d *DCOS) Description() string { - return "Input plugin for DC/OS metrics" -} - -var sampleConfig = ` - ## The DC/OS cluster URL. - cluster_url = "https://dcos-ee-master-1" - - ## The ID of the service account. - service_account_id = "telegraf" - ## The private key file for the service account. - service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem" - - ## Path containing login token. If set, will read on every gather. - # token_file = "/home/dcos/.dcos/token" - - ## In all filter options if both include and exclude are empty all items - ## will be collected. Arrays may contain glob patterns. - ## - ## Node IDs to collect metrics from. If a node is excluded, no metrics will - ## be collected for its containers or apps. - # node_include = [] - # node_exclude = [] - ## Container IDs to collect container metrics from. - # container_include = [] - # container_exclude = [] - ## Container IDs to collect app metrics from. - # app_include = [] - # app_exclude = [] - - ## Maximum concurrent connections to the cluster. - # max_connections = 10 - ## Maximum time to receive a response from cluster. - # response_timeout = "20s" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## If false, skip chain & host verification - # insecure_skip_verify = true - - ## Recommended filtering to reduce series cardinality. - # [inputs.dcos.tagdrop] - # path = ["/var/lib/mesos/slave/slaves/*"] -` - -func (d *DCOS) SampleConfig() string { +func (*DCOS) SampleConfig() string { return sampleConfig } @@ -223,10 +182,10 @@ type point struct { fields map[string]interface{} } -func (d *DCOS) createPoints(acc telegraf.Accumulator, m *Metrics) []*point { +func (d *DCOS) createPoints(m *Metrics) []*point { points := make(map[string]*point) for _, dp := range m.Datapoints { - fieldKey := strings.Replace(dp.Name, ".", "_", -1) + fieldKey := strings.ReplaceAll(dp.Name, ".", "_") tags := dp.Tags if tags == nil { @@ -237,9 +196,7 @@ func (d *DCOS) createPoints(acc telegraf.Accumulator, m *Metrics) []*point { fieldKey = fieldKey + "_bytes" } - if strings.HasPrefix(fieldKey, "dcos_metrics_module_") { - fieldKey = strings.TrimPrefix(fieldKey, "dcos_metrics_module_") - } + fieldKey = strings.TrimPrefix(fieldKey, "dcos_metrics_module_") tagset := make([]string, 0, len(tags)) for k, v := range tags { @@ -288,7 +245,7 @@ func (d *DCOS) createPoints(acc telegraf.Accumulator, m *Metrics) []*point { func (d *DCOS) addMetrics(acc telegraf.Accumulator, cluster, mname string, m *Metrics, tagDimensions []string) { tm := time.Now() - points := d.createPoints(acc, m) + points := d.createPoints(m) for _, p := range points { tags := make(map[string]string) @@ -353,14 +310,14 @@ func (d *DCOS) createClient() (Client, error) { return nil, err } - url, err := url.Parse(d.ClusterURL) + address, err := url.Parse(d.ClusterURL) if err != nil { return nil, err } client := NewClusterClient( - url, - d.ResponseTimeout.Duration, + address, + time.Duration(d.ResponseTimeout), d.MaxConnections, tlsCfg, ) @@ -370,7 +327,7 @@ func (d *DCOS) createClient() (Client, error) { func (d *DCOS) createCredentials() (Credentials, error) { if d.ServiceAccountID != "" && d.ServiceAccountPrivateKey != "" { - bs, err := ioutil.ReadFile(d.ServiceAccountPrivateKey) + bs, err := os.ReadFile(d.ServiceAccountPrivateKey) if err != nil { return nil, err } @@ -422,10 +379,8 @@ func (d *DCOS) createFilters() error { func init() { inputs.Add("dcos", func() telegraf.Input { return &DCOS{ - MaxConnections: defaultMaxConnections, - ResponseTimeout: internal.Duration{ - Duration: defaultResponseTimeout, - }, + MaxConnections: defaultMaxConnections, + ResponseTimeout: config.Duration(defaultResponseTimeout), } }) } diff --git a/plugins/inputs/dcos/dcos_test.go b/plugins/inputs/dcos/dcos_test.go index 3914fa5777714..828fd0af647ab 100644 --- a/plugins/inputs/dcos/dcos_test.go +++ b/plugins/inputs/dcos/dcos_test.go @@ -203,7 +203,6 @@ func TestAddNodeMetrics(t *testing.T) { } }) } - } func TestAddContainerMetrics(t *testing.T) { diff --git a/plugins/inputs/dcos/sample.conf b/plugins/inputs/dcos/sample.conf new file mode 100644 index 0000000000000..40afecfc13a49 --- /dev/null +++ b/plugins/inputs/dcos/sample.conf @@ -0,0 +1,42 @@ +# Input plugin for DC/OS metrics +[[inputs.dcos]] + ## The DC/OS cluster URL. + cluster_url = "https://dcos-master-1" + + ## The ID of the service account. + service_account_id = "telegraf" + ## The private key file for the service account. + service_account_private_key = "/etc/telegraf/telegraf-sa-key.pem" + + ## Path containing login token. If set, will read on every gather. + # token_file = "/home/dcos/.dcos/token" + + ## In all filter options if both include and exclude are empty all items + ## will be collected. Arrays may contain glob patterns. + ## + ## Node IDs to collect metrics from. If a node is excluded, no metrics will + ## be collected for its containers or apps. + # node_include = [] + # node_exclude = [] + ## Container IDs to collect container metrics from. + # container_include = [] + # container_exclude = [] + ## Container IDs to collect app metrics from. + # app_include = [] + # app_exclude = [] + + ## Maximum concurrent connections to the cluster. + # max_connections = 10 + ## Maximum time to receive a response from cluster. + # response_timeout = "20s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## If false, skip chain & host verification + # insecure_skip_verify = true + + ## Recommended filtering to reduce series cardinality. + # [inputs.dcos.tagdrop] + # path = ["/var/lib/mesos/slave/slaves/*"] diff --git a/plugins/inputs/deprecations.go b/plugins/inputs/deprecations.go new file mode 100644 index 0000000000000..9c9a056344633 --- /dev/null +++ b/plugins/inputs/deprecations.go @@ -0,0 +1,56 @@ +package inputs + +import "github.com/influxdata/telegraf" + +// Deprecations lists the deprecated plugins +var Deprecations = map[string]telegraf.DeprecationInfo{ + "cassandra": { + Since: "1.7.0", + Notice: "use 'inputs.jolokia2' with the 'cassandra.conf' example configuration instead", + }, + "cisco_telemetry_gnmi": { + Since: "1.15.0", + Notice: "has been renamed to 'gnmi'", + }, + "http_listener": { + Since: "1.9.0", + Notice: "has been renamed to 'influxdb_listener', use 'inputs.influxdb_listener' or 'inputs.http_listener_v2' instead", + }, + "httpjson": { + Since: "1.6.0", + Notice: "use 'inputs.http' instead", + }, + "io": { + Since: "0.10.0", + RemovalIn: "2.0.0", + Notice: "use 'inputs.diskio' instead", + }, + "jolokia": { + Since: "1.5.0", + Notice: "use 'inputs.jolokia2' instead", + }, + "kafka_consumer_legacy": { + Since: "1.4.0", + Notice: "use 'inputs.kafka_consumer' instead, NOTE: 'kafka_consumer' only supports Kafka v0.8+", + }, + "KNXListener": { + Since: "1.20.1", + Notice: "has been renamed to 'knx_listener'", + }, + "logparser": { + Since: "1.15.0", + Notice: "use 'inputs.tail' with 'grok' data format instead", + }, + "snmp_legacy": { + Since: "1.0.0", + Notice: "use 'inputs.snmp' instead", + }, + "tcp_listener": { + Since: "1.3.0", + Notice: "use 'inputs.socket_listener' instead", + }, + "udp_listener": { + Since: "1.3.0", + Notice: "use 'inputs.socket_listener' instead", + }, +} diff --git a/plugins/inputs/directory_monitor/README.md b/plugins/inputs/directory_monitor/README.md new file mode 100644 index 0000000000000..08d0ead074de9 --- /dev/null +++ b/plugins/inputs/directory_monitor/README.md @@ -0,0 +1,71 @@ +# Directory Monitor Input Plugin + +This plugin monitors a single directory (without looking at sub-directories), +and takes in each file placed in the directory. The plugin will gather all +files in the directory at the configured interval, and parse the ones that +haven't been picked up yet. + +This plugin is intended to read files that are moved or copied to the monitored +directory, and thus files should also not be used by another process or else +they may fail to be gathered. Please be advised that this plugin pulls files +directly after they've been in the directory for the length of the configurable +`directory_duration_threshold`, and thus files should not be written 'live' to +the monitored directory. If you absolutely must write files directly, they must +be guaranteed to finish writing before the `directory_duration_threshold`. + +## Configuration + +```toml @sample.conf +# Ingests files in a directory and then moves them to a target directory. +[[inputs.directory_monitor]] + ## The directory to monitor and read files from. + directory = "" + # + ## The directory to move finished files to. + finished_directory = "" + # + ## The directory to move files to upon file error. + ## If not provided, erroring files will stay in the monitored directory. + # error_directory = "" + # + ## The amount of time a file is allowed to sit in the directory before it is picked up. + ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow, + ## set this higher so that the plugin will wait until the file is fully copied to the directory. + # directory_duration_threshold = "50ms" + # + ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested. + # files_to_monitor = ["^.*\.csv"] + # + ## A list of files to ignore, if necessary. Supports regex. + # files_to_ignore = [".DS_Store"] + # + ## Maximum lines of the file to process that have not yet be written by the + ## output. For best throughput set to the size of the output's metric_buffer_limit. + ## Warning: setting this number higher than the output's metric_buffer_limit can cause dropped metrics. + # max_buffered_metrics = 10000 + # + ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. + ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. + # file_queue_size = 100000 + # + ## Name a tag containing the name of the file the data was parsed from. Leave empty + ## to disable. Cautious when file name variation is high, this can increase the cardinality + ## significantly. Read more about cardinality here: + ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality + # file_tag = "" + # + ## Specify if the file can be read completely at once or if it needs to be read line by line (default). + ## Possible values: "line-by-line", "at-once" + # parse_method = "line-by-line" + # + ## The dataformat to be read from the files. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +``` + +## Metrics + +The format of metrics produced by this plugin depends on the content and data +format of the file. diff --git a/plugins/inputs/directory_monitor/directory_monitor.go b/plugins/inputs/directory_monitor/directory_monitor.go new file mode 100644 index 0000000000000..fdfb397ee7cbb --- /dev/null +++ b/plugins/inputs/directory_monitor/directory_monitor.go @@ -0,0 +1,410 @@ +//go:generate ../../../tools/readme_config_includer/generator +package directory_monitor + +import ( + "bufio" + "compress/gzip" + "context" + _ "embed" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "sync" + "time" + + "github.com/djherbis/times" + "golang.org/x/sync/semaphore" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/csv" + "github.com/influxdata/telegraf/selfstat" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +var ( + defaultFilesToMonitor = []string{} + defaultFilesToIgnore = []string{} + defaultMaxBufferedMetrics = 10000 + defaultDirectoryDurationThreshold = config.Duration(0 * time.Millisecond) + defaultFileQueueSize = 100000 + defaultParseMethod = "line-by-line" +) + +type DirectoryMonitor struct { + Directory string `toml:"directory"` + FinishedDirectory string `toml:"finished_directory"` + ErrorDirectory string `toml:"error_directory"` + FileTag string `toml:"file_tag"` + + FilesToMonitor []string `toml:"files_to_monitor"` + FilesToIgnore []string `toml:"files_to_ignore"` + MaxBufferedMetrics int `toml:"max_buffered_metrics"` + DirectoryDurationThreshold config.Duration `toml:"directory_duration_threshold"` + Log telegraf.Logger `toml:"-"` + FileQueueSize int `toml:"file_queue_size"` + ParseMethod string `toml:"parse_method"` + + filesInUse sync.Map + cancel context.CancelFunc + context context.Context + parserFunc parsers.ParserFunc + filesProcessed selfstat.Stat + filesDropped selfstat.Stat + waitGroup *sync.WaitGroup + acc telegraf.TrackingAccumulator + sem *semaphore.Weighted + fileRegexesToMatch []*regexp.Regexp + fileRegexesToIgnore []*regexp.Regexp + filesToProcess chan string +} + +func (*DirectoryMonitor) SampleConfig() string { + return sampleConfig +} + +func (monitor *DirectoryMonitor) Gather(_ telegraf.Accumulator) error { + // Get all files sitting in the directory. + files, err := os.ReadDir(monitor.Directory) + if err != nil { + return fmt.Errorf("unable to monitor the targeted directory: %w", err) + } + + for _, file := range files { + filePath := monitor.Directory + "/" + file.Name() + + // We've been cancelled via Stop(). + if monitor.context.Err() != nil { + //nolint:nilerr // context cancelation is not an error + return nil + } + + stat, err := times.Stat(filePath) + if err != nil { + continue + } + + timeThresholdExceeded := time.Since(stat.AccessTime()) >= time.Duration(monitor.DirectoryDurationThreshold) + + // If file is decaying, process it. + if timeThresholdExceeded { + monitor.processFile(file) + } + } + + return nil +} + +func (monitor *DirectoryMonitor) Start(acc telegraf.Accumulator) error { + // Use tracking to determine when more metrics can be added without overflowing the outputs. + monitor.acc = acc.WithTracking(monitor.MaxBufferedMetrics) + go func() { + for range monitor.acc.Delivered() { + monitor.sem.Release(1) + } + }() + + // Monitor the files channel and read what they receive. + monitor.waitGroup.Add(1) + go func() { + monitor.Monitor() + monitor.waitGroup.Done() + }() + + return nil +} + +func (monitor *DirectoryMonitor) Stop() { + // Before stopping, wrap up all file-reading routines. + monitor.cancel() + close(monitor.filesToProcess) + monitor.Log.Warnf("Exiting the Directory Monitor plugin. Waiting to quit until all current files are finished.") + monitor.waitGroup.Wait() +} + +func (monitor *DirectoryMonitor) Monitor() { + for filePath := range monitor.filesToProcess { + if monitor.context.Err() != nil { + return + } + + // Prevent goroutines from taking the same file as another. + if _, exists := monitor.filesInUse.LoadOrStore(filePath, true); exists { + continue + } + + monitor.read(filePath) + + // We've finished reading the file and moved it away, delete it from files in use. + monitor.filesInUse.Delete(filePath) + } +} + +func (monitor *DirectoryMonitor) processFile(file os.DirEntry) { + if file.IsDir() { + return + } + + filePath := monitor.Directory + "/" + file.Name() + + // File must be configured to be monitored, if any configuration... + if !monitor.isMonitoredFile(file.Name()) { + return + } + + // ...and should not be configured to be ignored. + if monitor.isIgnoredFile(file.Name()) { + return + } + + select { + case monitor.filesToProcess <- filePath: + default: + } +} + +func (monitor *DirectoryMonitor) read(filePath string) { + // Open, read, and parse the contents of the file. + err := monitor.ingestFile(filePath) + if _, isPathError := err.(*os.PathError); isPathError { + return + } + + // Handle a file read error. We don't halt execution but do document, log, and move the problematic file. + if err != nil { + monitor.Log.Errorf("Error while reading file: '" + filePath + "'. " + err.Error()) + monitor.filesDropped.Incr(1) + if monitor.ErrorDirectory != "" { + monitor.moveFile(filePath, monitor.ErrorDirectory) + } + return + } + + // File is finished, move it to the 'finished' directory. + monitor.moveFile(filePath, monitor.FinishedDirectory) + monitor.filesProcessed.Incr(1) +} + +func (monitor *DirectoryMonitor) ingestFile(filePath string) error { + file, err := os.Open(filePath) + if err != nil { + return err + } + defer file.Close() + + parser, err := monitor.parserFunc() + if err != nil { + return fmt.Errorf("creating parser: %w", err) + } + + // Handle gzipped files. + var reader io.Reader + if filepath.Ext(filePath) == ".gz" { + reader, err = gzip.NewReader(file) + if err != nil { + return err + } + } else { + reader = file + } + + return monitor.parseFile(parser, reader, file.Name()) +} + +func (monitor *DirectoryMonitor) parseFile(parser parsers.Parser, reader io.Reader, fileName string) error { + var splitter bufio.SplitFunc + + // Decide on how to split the file + switch monitor.ParseMethod { + case "at-once": + return monitor.parseAtOnce(parser, reader, fileName) + case "line-by-line": + splitter = bufio.ScanLines + default: + return fmt.Errorf("unknown parse method %q", monitor.ParseMethod) + } + + scanner := bufio.NewScanner(reader) + scanner.Split(splitter) + + for scanner.Scan() { + metrics, err := monitor.parseMetrics(parser, scanner.Bytes(), fileName) + if err != nil { + return err + } + + if err := monitor.sendMetrics(metrics); err != nil { + return err + } + } + + return scanner.Err() +} + +func (monitor *DirectoryMonitor) parseAtOnce(parser parsers.Parser, reader io.Reader, fileName string) error { + bytes, err := io.ReadAll(reader) + if err != nil { + return err + } + + metrics, err := monitor.parseMetrics(parser, bytes, fileName) + if err != nil { + return err + } + + return monitor.sendMetrics(metrics) +} + +func (monitor *DirectoryMonitor) parseMetrics(parser parsers.Parser, line []byte, fileName string) (metrics []telegraf.Metric, err error) { + switch parser.(type) { + case *csv.Parser: + metrics, err = parser.Parse(line) + if err != nil { + if errors.Is(err, io.EOF) { + return nil, nil + } + return nil, err + } + default: + metrics, err = parser.Parse(line) + } + + if monitor.FileTag != "" { + for _, m := range metrics { + m.AddTag(monitor.FileTag, filepath.Base(fileName)) + } + } + + return metrics, err +} + +func (monitor *DirectoryMonitor) sendMetrics(metrics []telegraf.Metric) error { + // Report the metrics for the file. + for _, m := range metrics { + // Block until metric can be written. + if err := monitor.sem.Acquire(monitor.context, 1); err != nil { + return err + } + monitor.acc.AddTrackingMetricGroup([]telegraf.Metric{m}) + } + return nil +} + +func (monitor *DirectoryMonitor) moveFile(filePath string, directory string) { + err := os.Rename(filePath, directory+"/"+filepath.Base(filePath)) + + if err != nil { + monitor.Log.Errorf("Error while moving file '" + filePath + "' to another directory. Error: " + err.Error()) + } +} + +func (monitor *DirectoryMonitor) isMonitoredFile(fileName string) bool { + if len(monitor.fileRegexesToMatch) == 0 { + return true + } + + // Only monitor matching files. + for _, regex := range monitor.fileRegexesToMatch { + if regex.MatchString(fileName) { + return true + } + } + + return false +} + +func (monitor *DirectoryMonitor) isIgnoredFile(fileName string) bool { + // Skip files that are set to be ignored. + for _, regex := range monitor.fileRegexesToIgnore { + if regex.MatchString(fileName) { + return true + } + } + + return false +} + +func (monitor *DirectoryMonitor) SetParserFunc(fn parsers.ParserFunc) { + monitor.parserFunc = fn +} + +func (monitor *DirectoryMonitor) Init() error { + if monitor.Directory == "" || monitor.FinishedDirectory == "" { + return errors.New("missing one of the following required config options: directory, finished_directory") + } + + if monitor.FileQueueSize <= 0 { + return errors.New("file queue size needs to be more than 0") + } + + // Finished directory can be created if not exists for convenience. + if _, err := os.Stat(monitor.FinishedDirectory); os.IsNotExist(err) { + err = os.Mkdir(monitor.FinishedDirectory, 0755) + if err != nil { + return err + } + } + + monitor.filesDropped = selfstat.Register("directory_monitor", "files_dropped", map[string]string{}) + monitor.filesProcessed = selfstat.Register("directory_monitor", "files_processed", map[string]string{}) + + // If an error directory should be used but has not been configured yet, create one ourselves. + if monitor.ErrorDirectory != "" { + if _, err := os.Stat(monitor.ErrorDirectory); os.IsNotExist(err) { + err := os.Mkdir(monitor.ErrorDirectory, 0755) + if err != nil { + return err + } + } + } + + monitor.waitGroup = &sync.WaitGroup{} + monitor.sem = semaphore.NewWeighted(int64(monitor.MaxBufferedMetrics)) + monitor.context, monitor.cancel = context.WithCancel(context.Background()) + monitor.filesToProcess = make(chan string, monitor.FileQueueSize) + + // Establish file matching / exclusion regexes. + for _, matcher := range monitor.FilesToMonitor { + regex, err := regexp.Compile(matcher) + if err != nil { + return err + } + monitor.fileRegexesToMatch = append(monitor.fileRegexesToMatch, regex) + } + + for _, matcher := range monitor.FilesToIgnore { + regex, err := regexp.Compile(matcher) + if err != nil { + return err + } + monitor.fileRegexesToIgnore = append(monitor.fileRegexesToIgnore, regex) + } + + if err := choice.Check(monitor.ParseMethod, []string{"line-by-line", "at-once"}); err != nil { + return fmt.Errorf("config option parse_method: %w", err) + } + + return nil +} + +func init() { + inputs.Add("directory_monitor", func() telegraf.Input { + return &DirectoryMonitor{ + FilesToMonitor: defaultFilesToMonitor, + FilesToIgnore: defaultFilesToIgnore, + MaxBufferedMetrics: defaultMaxBufferedMetrics, + DirectoryDurationThreshold: defaultDirectoryDurationThreshold, + FileQueueSize: defaultFileQueueSize, + ParseMethod: defaultParseMethod, + } + }) +} diff --git a/plugins/inputs/directory_monitor/directory_monitor_test.go b/plugins/inputs/directory_monitor/directory_monitor_test.go new file mode 100644 index 0000000000000..27b4bb37f68cf --- /dev/null +++ b/plugins/inputs/directory_monitor/directory_monitor_test.go @@ -0,0 +1,459 @@ +package directory_monitor + +import ( + "bytes" + "compress/gzip" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/csv" + "github.com/influxdata/telegraf/plugins/parsers/json" + "github.com/influxdata/telegraf/testutil" +) + +func TestCreator(t *testing.T) { + creator, found := inputs.Inputs["directory_monitor"] + require.True(t, found) + + expected := &DirectoryMonitor{ + FilesToMonitor: defaultFilesToMonitor, + FilesToIgnore: defaultFilesToIgnore, + MaxBufferedMetrics: defaultMaxBufferedMetrics, + DirectoryDurationThreshold: defaultDirectoryDurationThreshold, + FileQueueSize: defaultFileQueueSize, + ParseMethod: defaultParseMethod, + } + + require.Equal(t, expected, creator()) +} + +func TestCSVGZImport(t *testing.T) { + acc := testutil.Accumulator{} + testCsvFile := "test.csv" + testCsvGzFile := "test.csv.gz" + + // Establish process directory and finished directory. + finishedDirectory := t.TempDir() + processDirectory := t.TempDir() + + // Init plugin. + r := DirectoryMonitor{ + Directory: processDirectory, + FinishedDirectory: finishedDirectory, + MaxBufferedMetrics: defaultMaxBufferedMetrics, + FileQueueSize: defaultFileQueueSize, + ParseMethod: defaultParseMethod, + } + err := r.Init() + require.NoError(t, err) + + r.SetParserFunc(func() (parsers.Parser, error) { + parser := csv.Parser{ + HeaderRowCount: 1, + } + err := parser.Init() + return &parser, err + }) + r.Log = testutil.Logger{} + + // Write csv file to process into the 'process' directory. + f, err := os.Create(filepath.Join(processDirectory, testCsvFile)) + require.NoError(t, err) + _, err = f.WriteString("thing,color\nsky,blue\ngrass,green\nclifford,red\n") + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + + // Write csv.gz file to process into the 'process' directory. + var b bytes.Buffer + w := gzip.NewWriter(&b) + _, err = w.Write([]byte("thing,color\nsky,blue\ngrass,green\nclifford,red\n")) + require.NoError(t, err) + err = w.Close() + require.NoError(t, err) + err = os.WriteFile(filepath.Join(processDirectory, testCsvGzFile), b.Bytes(), 0666) + require.NoError(t, err) + + // Start plugin before adding file. + err = r.Start(&acc) + require.NoError(t, err) + err = r.Gather(&acc) + require.NoError(t, err) + acc.Wait(6) + r.Stop() + + // Verify that we read both files once. + require.Equal(t, len(acc.Metrics), 6) + + // File should have gone back to the test directory, as we configured. + _, err = os.Stat(filepath.Join(finishedDirectory, testCsvFile)) + require.NoError(t, err) + + _, err = os.Stat(filepath.Join(finishedDirectory, testCsvGzFile)) + require.NoError(t, err) +} + +func TestMultipleJSONFileImports(t *testing.T) { + acc := testutil.Accumulator{} + testJSONFile := "test.json" + + // Establish process directory and finished directory. + finishedDirectory := t.TempDir() + processDirectory := t.TempDir() + + // Init plugin. + r := DirectoryMonitor{ + Directory: processDirectory, + FinishedDirectory: finishedDirectory, + MaxBufferedMetrics: defaultMaxBufferedMetrics, + FileQueueSize: defaultFileQueueSize, + ParseMethod: defaultParseMethod, + } + err := r.Init() + require.NoError(t, err) + + r.SetParserFunc(func() (parsers.Parser, error) { + p := &json.Parser{NameKey: "Name"} + err := p.Init() + return p, err + }) + + // Let's drop a 5-line LINE-DELIMITED json. + // Write csv file to process into the 'process' directory. + f, err := os.Create(filepath.Join(processDirectory, testJSONFile)) + require.NoError(t, err) + _, err = f.WriteString("{\"Name\": \"event1\",\"Speed\": 100.1,\"Length\": 20.1}\n{\"Name\": \"event2\",\"Speed\": 500,\"Length\": 1.4}\n{\"Name\": \"event3\",\"Speed\": 200,\"Length\": 10.23}\n{\"Name\": \"event4\",\"Speed\": 80,\"Length\": 250}\n{\"Name\": \"event5\",\"Speed\": 120.77,\"Length\": 25.97}") + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + + err = r.Start(&acc) + r.Log = testutil.Logger{} + require.NoError(t, err) + err = r.Gather(&acc) + require.NoError(t, err) + acc.Wait(5) + r.Stop() + + // Verify that we read each JSON line once to a single metric. + require.Equal(t, len(acc.Metrics), 5) +} + +func TestFileTag(t *testing.T) { + acc := testutil.Accumulator{} + testJSONFile := "test.json" + + // Establish process directory and finished directory. + finishedDirectory := t.TempDir() + processDirectory := t.TempDir() + + // Init plugin. + r := DirectoryMonitor{ + Directory: processDirectory, + FinishedDirectory: finishedDirectory, + FileTag: "filename", + MaxBufferedMetrics: defaultMaxBufferedMetrics, + FileQueueSize: defaultFileQueueSize, + ParseMethod: defaultParseMethod, + } + err := r.Init() + require.NoError(t, err) + + r.SetParserFunc(func() (parsers.Parser, error) { + p := &json.Parser{NameKey: "Name"} + err := p.Init() + return p, err + }) + + // Let's drop a 1-line LINE-DELIMITED json. + // Write csv file to process into the 'process' directory. + f, err := os.Create(filepath.Join(processDirectory, testJSONFile)) + require.NoError(t, err) + _, err = f.WriteString("{\"Name\": \"event1\",\"Speed\": 100.1,\"Length\": 20.1}") + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + + err = r.Start(&acc) + r.Log = testutil.Logger{} + require.NoError(t, err) + err = r.Gather(&acc) + require.NoError(t, err) + acc.Wait(1) + r.Stop() + + // Verify that we read each JSON line once to a single metric. + require.Equal(t, len(acc.Metrics), 1) + for _, m := range acc.Metrics { + for key, value := range m.Tags { + require.Equal(t, r.FileTag, key) + require.Equal(t, filepath.Base(testJSONFile), value) + } + } +} + +func TestCSVNoSkipRows(t *testing.T) { + acc := testutil.Accumulator{} + testCsvFile := "test.csv" + + // Establish process directory and finished directory. + finishedDirectory := t.TempDir() + processDirectory := t.TempDir() + + // Init plugin. + r := DirectoryMonitor{ + Directory: processDirectory, + FinishedDirectory: finishedDirectory, + MaxBufferedMetrics: defaultMaxBufferedMetrics, + FileQueueSize: defaultFileQueueSize, + ParseMethod: defaultParseMethod, + } + err := r.Init() + require.NoError(t, err) + + r.SetParserFunc(func() (parsers.Parser, error) { + parser := csv.Parser{ + HeaderRowCount: 1, + SkipRows: 0, + TagColumns: []string{"line1"}, + } + err := parser.Init() + return &parser, err + }) + r.Log = testutil.Logger{} + + testCSV := `line1,line2,line3 +hello,80,test_name2` + + expectedFields := map[string]interface{}{ + "line2": int64(80), + "line3": "test_name2", + } + + // Write csv file to process into the 'process' directory. + f, err := os.Create(filepath.Join(processDirectory, testCsvFile)) + require.NoError(t, err) + _, err = f.WriteString(testCSV) + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + + // Start plugin before adding file. + err = r.Start(&acc) + require.NoError(t, err) + err = r.Gather(&acc) + require.NoError(t, err) + acc.Wait(1) + r.Stop() + + // Verify that we read both files once. + require.Equal(t, len(acc.Metrics), 1) + + // File should have gone back to the test directory, as we configured. + _, err = os.Stat(filepath.Join(finishedDirectory, testCsvFile)) + require.NoError(t, err) + for _, m := range acc.Metrics { + for key, value := range m.Tags { + require.Equal(t, "line1", key) + require.Equal(t, "hello", value) + } + require.Equal(t, expectedFields, m.Fields) + } +} + +func TestCSVSkipRows(t *testing.T) { + acc := testutil.Accumulator{} + testCsvFile := "test.csv" + + // Establish process directory and finished directory. + finishedDirectory := t.TempDir() + processDirectory := t.TempDir() + + // Init plugin. + r := DirectoryMonitor{ + Directory: processDirectory, + FinishedDirectory: finishedDirectory, + MaxBufferedMetrics: defaultMaxBufferedMetrics, + FileQueueSize: defaultFileQueueSize, + ParseMethod: defaultParseMethod, + } + err := r.Init() + require.NoError(t, err) + + r.SetParserFunc(func() (parsers.Parser, error) { + parser := csv.Parser{ + HeaderRowCount: 1, + SkipRows: 2, + TagColumns: []string{"line1"}, + } + err := parser.Init() + return &parser, err + }) + r.Log = testutil.Logger{} + + testCSV := `garbage nonsense 1 +garbage,nonsense,2 +line1,line2,line3 +hello,80,test_name2` + + expectedFields := map[string]interface{}{ + "line2": int64(80), + "line3": "test_name2", + } + + // Write csv file to process into the 'process' directory. + f, err := os.Create(filepath.Join(processDirectory, testCsvFile)) + require.NoError(t, err) + _, err = f.WriteString(testCSV) + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + + // Start plugin before adding file. + err = r.Start(&acc) + require.NoError(t, err) + err = r.Gather(&acc) + require.NoError(t, err) + acc.Wait(1) + r.Stop() + + // Verify that we read both files once. + require.Equal(t, len(acc.Metrics), 1) + + // File should have gone back to the test directory, as we configured. + _, err = os.Stat(filepath.Join(finishedDirectory, testCsvFile)) + require.NoError(t, err) + for _, m := range acc.Metrics { + for key, value := range m.Tags { + require.Equal(t, "line1", key) + require.Equal(t, "hello", value) + } + require.Equal(t, expectedFields, m.Fields) + } +} + +func TestCSVMultiHeader(t *testing.T) { + acc := testutil.Accumulator{} + testCsvFile := "test.csv" + + // Establish process directory and finished directory. + finishedDirectory := t.TempDir() + processDirectory := t.TempDir() + + // Init plugin. + r := DirectoryMonitor{ + Directory: processDirectory, + FinishedDirectory: finishedDirectory, + MaxBufferedMetrics: defaultMaxBufferedMetrics, + FileQueueSize: defaultFileQueueSize, + ParseMethod: defaultParseMethod, + } + err := r.Init() + require.NoError(t, err) + + r.SetParserFunc(func() (parsers.Parser, error) { + parser := csv.Parser{ + HeaderRowCount: 2, + TagColumns: []string{"line1"}, + } + err := parser.Init() + return &parser, err + }) + r.Log = testutil.Logger{} + + testCSV := `line,line,line +1,2,3 +hello,80,test_name2` + + expectedFields := map[string]interface{}{ + "line2": int64(80), + "line3": "test_name2", + } + + // Write csv file to process into the 'process' directory. + f, err := os.Create(filepath.Join(processDirectory, testCsvFile)) + require.NoError(t, err) + _, err = f.WriteString(testCSV) + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + + // Start plugin before adding file. + err = r.Start(&acc) + require.NoError(t, err) + err = r.Gather(&acc) + require.NoError(t, err) + acc.Wait(1) + r.Stop() + + // Verify that we read both files once. + require.Equal(t, len(acc.Metrics), 1) + + // File should have gone back to the test directory, as we configured. + _, err = os.Stat(filepath.Join(finishedDirectory, testCsvFile)) + require.NoError(t, err) + for _, m := range acc.Metrics { + for key, value := range m.Tags { + require.Equal(t, "line1", key) + require.Equal(t, "hello", value) + } + require.Equal(t, expectedFields, m.Fields) + } +} + +func TestParseCompleteFile(t *testing.T) { + acc := testutil.Accumulator{} + + // Establish process directory and finished directory. + finishedDirectory := t.TempDir() + processDirectory := t.TempDir() + + // Init plugin. + r := DirectoryMonitor{ + Directory: processDirectory, + FinishedDirectory: finishedDirectory, + MaxBufferedMetrics: defaultMaxBufferedMetrics, + FileQueueSize: defaultFileQueueSize, + ParseMethod: "at-once", + } + err := r.Init() + require.NoError(t, err) + r.Log = testutil.Logger{} + + parserConfig := parsers.Config{ + DataFormat: "json", + JSONNameKey: "name", + TagKeys: []string{"tag1"}, + } + + r.SetParserFunc(func() (parsers.Parser, error) { + return parsers.NewParser(&parserConfig) + }) + + testJSON := `{ + "name": "test1", + "value": 100.1, + "tag1": "value1" + }` + + // Write json file to process into the 'process' directory. + f, _ := os.CreateTemp(processDirectory, "test.json") + _, _ = f.WriteString(testJSON) + _ = f.Close() + + err = r.Start(&acc) + require.NoError(t, err) + err = r.Gather(&acc) + require.NoError(t, err) + acc.Wait(1) + r.Stop() + + require.NoError(t, acc.FirstError()) + require.Len(t, acc.Metrics, 1) + testutil.RequireMetricEqual(t, testutil.TestMetric(100.1), acc.GetTelegrafMetrics()[0], testutil.IgnoreTime()) +} diff --git a/plugins/inputs/directory_monitor/sample.conf b/plugins/inputs/directory_monitor/sample.conf new file mode 100644 index 0000000000000..265fab791f971 --- /dev/null +++ b/plugins/inputs/directory_monitor/sample.conf @@ -0,0 +1,47 @@ +# Ingests files in a directory and then moves them to a target directory. +[[inputs.directory_monitor]] + ## The directory to monitor and read files from. + directory = "" + # + ## The directory to move finished files to. + finished_directory = "" + # + ## The directory to move files to upon file error. + ## If not provided, erroring files will stay in the monitored directory. + # error_directory = "" + # + ## The amount of time a file is allowed to sit in the directory before it is picked up. + ## This time can generally be low but if you choose to have a very large file written to the directory and it's potentially slow, + ## set this higher so that the plugin will wait until the file is fully copied to the directory. + # directory_duration_threshold = "50ms" + # + ## A list of the only file names to monitor, if necessary. Supports regex. If left blank, all files are ingested. + # files_to_monitor = ["^.*\.csv"] + # + ## A list of files to ignore, if necessary. Supports regex. + # files_to_ignore = [".DS_Store"] + # + ## Maximum lines of the file to process that have not yet be written by the + ## output. For best throughput set to the size of the output's metric_buffer_limit. + ## Warning: setting this number higher than the output's metric_buffer_limit can cause dropped metrics. + # max_buffered_metrics = 10000 + # + ## The maximum amount of file paths to queue up for processing at once, before waiting until files are processed to find more files. + ## Lowering this value will result in *slightly* less memory use, with a potential sacrifice in speed efficiency, if absolutely necessary. + # file_queue_size = 100000 + # + ## Name a tag containing the name of the file the data was parsed from. Leave empty + ## to disable. Cautious when file name variation is high, this can increase the cardinality + ## significantly. Read more about cardinality here: + ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality + # file_tag = "" + # + ## Specify if the file can be read completely at once or if it needs to be read line by line (default). + ## Possible values: "line-by-line", "at-once" + # parse_method = "line-by-line" + # + ## The dataformat to be read from the files. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" diff --git a/plugins/inputs/disk/README.md b/plugins/inputs/disk/README.md index b0a8ac05a6c19..375c20e3b8089 100644 --- a/plugins/inputs/disk/README.md +++ b/plugins/inputs/disk/README.md @@ -4,11 +4,12 @@ The disk input plugin gathers metrics about disk usage. Note that `used_percent` is calculated by doing `used / (used + free)`, _not_ `used / total`, which is how the unix `df` command does it. See -https://en.wikipedia.org/wiki/Df_(Unix) for more details. +[wikipedia - df](https://en.wikipedia.org/wiki/Df_(Unix)) for more details. -### Configuration: +## Configuration -```toml +```toml @sample.conf +# Read metrics about disk usage by mount point [[inputs.disk]] ## By default stats will be gathered for all mount points. ## Set mount_points will restrict the stats to only the specified mount points. @@ -16,22 +17,27 @@ https://en.wikipedia.org/wiki/Df_(Unix) for more details. ## Ignore mount points by filesystem type. ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] + + ## Ignore mount points by mount options. + ## The 'mount' command reports options of all mounts in parathesis. + ## Bind mounts can be ignored with the special 'bind' option. + # ignore_mount_opts = [] ``` -#### Docker container +### Docker container -To monitor the Docker engine host from within a container you will need to -mount the host's filesystem into the container and set the `HOST_PROC` -environment variable to the location of the `/proc` filesystem. If desired, you can -also set the `HOST_MOUNT_PREFIX` environment variable to the prefix containing -the `/proc` directory, when present this variable is stripped from the -reported `path` tag. +To monitor the Docker engine host from within a container you will need to mount +the host's filesystem into the container and set the `HOST_PROC` environment +variable to the location of the `/proc` filesystem. If desired, you can also +set the `HOST_MOUNT_PREFIX` environment variable to the prefix containing the +`/proc` directory, when present this variable is stripped from the reported +`path` tag. -``` +```shell docker run -v /:/hostfs:ro -e HOST_MOUNT_PREFIX=/hostfs -e HOST_PROC=/hostfs/proc telegraf ``` -### Metrics: +## Metrics - disk - tags: @@ -48,25 +54,27 @@ docker run -v /:/hostfs:ro -e HOST_MOUNT_PREFIX=/hostfs -e HOST_PROC=/hostfs/pro - inodes_total (integer, files) - inodes_used (integer, files) -### Troubleshooting +## Troubleshooting On Linux, the list of disks is taken from the `/proc/self/mounts` file and a [statfs] call is made on the second column. If any expected filesystems are missing ensure that the `telegraf` user can read these files: -``` + +```shell $ sudo -u telegraf cat /proc/self/mounts | grep sda2 /dev/sda2 /home ext4 rw,relatime,data=ordered 0 0 $ sudo -u telegraf stat /home ``` It may be desired to use POSIX ACLs to provide additional access: -``` + +```shell sudo setfacl -R -m u:telegraf:X /var/lib/docker/volumes/ ``` -### Example Output: +## Example Output -``` +```shell disk,fstype=hfs,mode=ro,path=/ free=398407520256i,inodes_free=97267461i,inodes_total=121847806i,inodes_used=24580345i,total=499088621568i,used=100418957312i,used_percent=20.131039916242397 1453832006274071563 disk,fstype=devfs,mode=rw,path=/dev free=0i,inodes_free=0i,inodes_total=628i,inodes_used=628i,total=185856i,used=185856i,used_percent=100 1453832006274137913 disk,fstype=autofs,mode=rw,path=/net free=0i,inodes_free=0i,inodes_total=0i,inodes_used=0i,total=0i,used=0i,used_percent=0 1453832006274157077 diff --git a/plugins/inputs/disk/disk.go b/plugins/inputs/disk/disk.go index b2c7e540038bb..11c13189b9a84 100644 --- a/plugins/inputs/disk/disk.go +++ b/plugins/inputs/disk/disk.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package disk import ( + _ "embed" "fmt" "strings" @@ -9,59 +11,59 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/system" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type DiskStats struct { ps system.PS - // Legacy support - Mountpoints []string `toml:"mountpoints"` + LegacyMountPoints []string `toml:"mountpoints" deprecated:"0.10.2;2.0.0;use 'mount_points' instead"` - MountPoints []string `toml:"mount_points"` - IgnoreFS []string `toml:"ignore_fs"` -} + MountPoints []string `toml:"mount_points"` + IgnoreFS []string `toml:"ignore_fs"` + IgnoreMountOpts []string `toml:"ignore_mount_opts"` -func (_ *DiskStats) Description() string { - return "Read metrics about disk usage by mount point" + Log telegraf.Logger `toml:"-"` } -var diskSampleConfig = ` - ## By default stats will be gathered for all mount points. - ## Set mount_points will restrict the stats to only the specified mount points. - # mount_points = ["/"] - - ## Ignore mount points by filesystem type. - ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] -` - -func (_ *DiskStats) SampleConfig() string { - return diskSampleConfig +func (*DiskStats) SampleConfig() string { + return sampleConfig } -func (s *DiskStats) Gather(acc telegraf.Accumulator) error { +func (ds *DiskStats) Init() error { // Legacy support: - if len(s.Mountpoints) != 0 { - s.MountPoints = s.Mountpoints + if len(ds.LegacyMountPoints) != 0 { + ds.MountPoints = ds.LegacyMountPoints } - disks, partitions, err := s.ps.DiskUsage(s.MountPoints, s.IgnoreFS) + ps := system.NewSystemPS() + ps.Log = ds.Log + ds.ps = ps + + return nil +} + +func (ds *DiskStats) Gather(acc telegraf.Accumulator) error { + disks, partitions, err := ds.ps.DiskUsage(ds.MountPoints, ds.IgnoreMountOpts, ds.IgnoreFS) if err != nil { return fmt.Errorf("error getting disk usage info: %s", err) } - for i, du := range disks { if du.Total == 0 { // Skip dummy filesystem (procfs, cgroupfs, ...) continue } - mountOpts := parseOptions(partitions[i].Opts) + mountOpts := MountOptions(partitions[i].Opts) tags := map[string]string{ "path": du.Path, - "device": strings.Replace(partitions[i].Device, "/dev/", "", -1), + "device": strings.ReplaceAll(partitions[i].Device, "/dev/", ""), "fstype": du.Fstype, "mode": mountOpts.Mode(), } - var used_percent float64 + var usedPercent float64 if du.Used+du.Free > 0 { - used_percent = float64(du.Used) / + usedPercent = float64(du.Used) / (float64(du.Used) + float64(du.Free)) * 100 } @@ -69,7 +71,7 @@ func (s *DiskStats) Gather(acc telegraf.Accumulator) error { "total": du.Total, "free": du.Free, "used": du.Used, - "used_percent": used_percent, + "used_percent": usedPercent, "inodes_total": du.InodesTotal, "inodes_free": du.InodesFree, "inodes_used": du.InodesUsed, @@ -101,13 +103,8 @@ func (opts MountOptions) exists(opt string) bool { return false } -func parseOptions(opts string) MountOptions { - return strings.Split(opts, ",") -} - func init() { - ps := system.NewSystemPS() inputs.Add("disk", func() telegraf.Input { - return &DiskStats{ps: ps} + return &DiskStats{} }) } diff --git a/plugins/inputs/disk/disk_test.go b/plugins/inputs/disk/disk_test.go index aeb2ae92bd77f..8ab2d13cb233b 100644 --- a/plugins/inputs/disk/disk_test.go +++ b/plugins/inputs/disk/disk_test.go @@ -1,15 +1,21 @@ package disk import ( + "fmt" "os" + "path/filepath" + "runtime" + "strings" "testing" + "time" - "github.com/influxdata/telegraf/plugins/inputs/system" - "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/disk" - "github.com/stretchr/testify/assert" + diskUtil "github.com/shirou/gopsutil/v3/disk" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs/system" + "github.com/influxdata/telegraf/testutil" ) type MockFileInfo struct { @@ -24,21 +30,27 @@ func TestDiskUsage(t *testing.T) { var acc testutil.Accumulator var err error - psAll := []disk.PartitionStat{ + psAll := []diskUtil.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/", Fstype: "ext4", - Opts: "ro,noatime,nodiratime", + Opts: []string{"ro", "noatime", "nodiratime"}, }, { Device: "/dev/sdb", Mountpoint: "/home", Fstype: "ext4", - Opts: "rw,noatime,nodiratime,errors=remount-ro", + Opts: []string{"rw", "noatime", "nodiratime", "errors=remount-ro"}, + }, + { + Device: "/dev/sda", + Mountpoint: "/var/rootbind", + Fstype: "ext4", + Opts: []string{"ro", "noatime", "nodiratime", "bind"}, }, } - duAll := []disk.UsageStat{ + duAll := []diskUtil.UsageStat{ { Path: "/", Fstype: "ext4", @@ -59,32 +71,49 @@ func TestDiskUsage(t *testing.T) { InodesFree: 468, InodesUsed: 2000, }, + { + Path: "/var/rootbind", + Fstype: "ext4", + Total: 128, + Free: 23, + Used: 100, + InodesTotal: 1234, + InodesFree: 234, + InodesUsed: 1000, + }, } mps.On("Partitions", true).Return(psAll, nil) mps.On("OSGetenv", "HOST_MOUNT_PREFIX").Return("") mps.On("PSDiskUsage", "/").Return(&duAll[0], nil) mps.On("PSDiskUsage", "/home").Return(&duAll[1], nil) + mps.On("PSDiskUsage", "/var/rootbind").Return(&duAll[2], nil) err = (&DiskStats{ps: mps}).Gather(&acc) require.NoError(t, err) numDiskMetrics := acc.NFields() - expectedAllDiskMetrics := 14 - assert.Equal(t, expectedAllDiskMetrics, numDiskMetrics) + expectedAllDiskMetrics := 21 + require.Equal(t, expectedAllDiskMetrics, numDiskMetrics) tags1 := map[string]string{ - "path": "/", + "path": string(os.PathSeparator), "fstype": "ext4", "device": "sda", "mode": "ro", } tags2 := map[string]string{ - "path": "/home", + "path": fmt.Sprintf("%chome", os.PathSeparator), "fstype": "ext4", "device": "sdb", "mode": "rw", } + tags3 := map[string]string{ + "path": fmt.Sprintf("%cvar%crootbind", os.PathSeparator, os.PathSeparator), + "fstype": "ext4", + "device": "sda", + "mode": "ro", + } fields1 := map[string]interface{}{ "total": uint64(128), @@ -104,47 +133,64 @@ func TestDiskUsage(t *testing.T) { "inodes_used": uint64(2000), "used_percent": float64(81.30081300813008), } + fields3 := map[string]interface{}{ + "total": uint64(128), + "used": uint64(100), + "free": uint64(23), + "inodes_total": uint64(1234), + "inodes_free": uint64(234), + "inodes_used": uint64(1000), + "used_percent": float64(81.30081300813008), + } acc.AssertContainsTaggedFields(t, "disk", fields1, tags1) acc.AssertContainsTaggedFields(t, "disk", fields2, tags2) + acc.AssertContainsTaggedFields(t, "disk", fields3, tags3) - // We expect 6 more DiskMetrics to show up with an explicit match on "/" + // We expect 7 more DiskMetrics to show up with an explicit match on "/" // and /home not matching the /dev in MountPoints err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/dev"}}).Gather(&acc) - assert.Equal(t, expectedAllDiskMetrics+7, acc.NFields()) + require.NoError(t, err) + require.Equal(t, expectedAllDiskMetrics+7, acc.NFields()) // We should see all the diskpoints as MountPoints includes both - // / and /home - err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/home"}}).Gather(&acc) - assert.Equal(t, 2*expectedAllDiskMetrics+7, acc.NFields()) + // /, /home, and /var/rootbind + err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/home", "/var/rootbind"}}).Gather(&acc) + require.NoError(t, err) + require.Equal(t, expectedAllDiskMetrics+7*4, acc.NFields()) + + // We should see all the mounts as MountPoints except the bind mound + err = (&DiskStats{ps: &mps, IgnoreMountOpts: []string{"bind"}}).Gather(&acc) + require.NoError(t, err) + require.Equal(t, expectedAllDiskMetrics+7*6, acc.NFields()) } func TestDiskUsageHostMountPrefix(t *testing.T) { tests := []struct { name string - partitionStats []disk.PartitionStat - usageStats []*disk.UsageStat + partitionStats []diskUtil.PartitionStat + usageStats []*diskUtil.UsageStat hostMountPrefix string expectedTags map[string]string expectedFields map[string]interface{} }{ { name: "no host mount prefix", - partitionStats: []disk.PartitionStat{ + partitionStats: []diskUtil.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/", Fstype: "ext4", - Opts: "ro", + Opts: []string{"ro"}, }, }, - usageStats: []*disk.UsageStat{ + usageStats: []*diskUtil.UsageStat{ { Path: "/", Total: 42, }, }, expectedTags: map[string]string{ - "path": "/", + "path": string(os.PathSeparator), "device": "sda", "fstype": "ext4", "mode": "ro", @@ -161,15 +207,15 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { }, { name: "host mount prefix", - partitionStats: []disk.PartitionStat{ + partitionStats: []diskUtil.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/hostfs/var", Fstype: "ext4", - Opts: "ro", + Opts: []string{"ro"}, }, }, - usageStats: []*disk.UsageStat{ + usageStats: []*diskUtil.UsageStat{ { Path: "/hostfs/var", Total: 42, @@ -177,7 +223,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { }, hostMountPrefix: "/hostfs", expectedTags: map[string]string{ - "path": "/var", + "path": fmt.Sprintf("%cvar", os.PathSeparator), "device": "sda", "fstype": "ext4", "mode": "ro", @@ -194,15 +240,15 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { }, { name: "host mount prefix exact match", - partitionStats: []disk.PartitionStat{ + partitionStats: []diskUtil.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/hostfs", Fstype: "ext4", - Opts: "ro", + Opts: []string{"ro"}, }, }, - usageStats: []*disk.UsageStat{ + usageStats: []*diskUtil.UsageStat{ { Path: "/hostfs", Total: 42, @@ -210,7 +256,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { }, hostMountPrefix: "/hostfs", expectedTags: map[string]string{ - "path": "/", + "path": string(os.PathSeparator), "device": "sda", "fstype": "ext4", "mode": "ro", @@ -258,7 +304,7 @@ func TestDiskStats(t *testing.T) { var acc testutil.Accumulator var err error - duAll := []*disk.UsageStat{ + duAll := []*diskUtil.UsageStat{ { Path: "/", Fstype: "ext4", @@ -279,8 +325,18 @@ func TestDiskStats(t *testing.T) { InodesFree: 468, InodesUsed: 2000, }, + { + Path: "/var/rootbind", + Fstype: "ext4", + Total: 128, + Free: 23, + Used: 100, + InodesTotal: 1234, + InodesFree: 234, + InodesUsed: 1000, + }, } - duFiltered := []*disk.UsageStat{ + duMountFiltered := []*diskUtil.UsageStat{ { Path: "/", Fstype: "ext4", @@ -292,41 +348,84 @@ func TestDiskStats(t *testing.T) { InodesUsed: 1000, }, } + duOptFiltered := []*diskUtil.UsageStat{ + { + Path: "/", + Fstype: "ext4", + Total: 128, + Free: 23, + Used: 100, + InodesTotal: 1234, + InodesFree: 234, + InodesUsed: 1000, + }, + { + Path: "/home", + Fstype: "ext4", + Total: 256, + Free: 46, + Used: 200, + InodesTotal: 2468, + InodesFree: 468, + InodesUsed: 2000, + }, + } - psAll := []*disk.PartitionStat{ + psAll := []*diskUtil.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/", Fstype: "ext4", - Opts: "ro,noatime,nodiratime", + Opts: []string{"ro", "noatime", "nodiratime"}, }, { Device: "/dev/sdb", Mountpoint: "/home", Fstype: "ext4", - Opts: "rw,noatime,nodiratime,errors=remount-ro", + Opts: []string{"rw", "noatime", "nodiratime", "errors=remount-ro"}, + }, + { + Device: "/dev/sda", + Mountpoint: "/var/rootbind", + Fstype: "ext4", + Opts: []string{"ro", "noatime", "nodiratime", "bind"}, }, } - psFiltered := []*disk.PartitionStat{ + psMountFiltered := []*diskUtil.PartitionStat{ + { + Device: "/dev/sda", + Mountpoint: "/", + Fstype: "ext4", + Opts: []string{"ro", "noatime", "nodiratime"}, + }, + } + psOptFiltered := []*diskUtil.PartitionStat{ { Device: "/dev/sda", Mountpoint: "/", Fstype: "ext4", - Opts: "ro,noatime,nodiratime", + Opts: []string{"ro", "noatime", "nodiratime"}, + }, + { + Device: "/dev/sdb", + Mountpoint: "/home", + Fstype: "ext4", + Opts: []string{"rw", "noatime", "nodiratime", "errors=remount-ro"}, }, } - mps.On("DiskUsage", []string(nil), []string(nil)).Return(duAll, psAll, nil) - mps.On("DiskUsage", []string{"/", "/dev"}, []string(nil)).Return(duFiltered, psFiltered, nil) - mps.On("DiskUsage", []string{"/", "/home"}, []string(nil)).Return(duAll, psAll, nil) + mps.On("DiskUsage", []string(nil), []string(nil), []string(nil)).Return(duAll, psAll, nil) + mps.On("DiskUsage", []string{"/", "/dev"}, []string(nil), []string(nil)).Return(duMountFiltered, psMountFiltered, nil) + mps.On("DiskUsage", []string{"/", "/home", "/var/rootbind"}, []string(nil), []string(nil)).Return(duAll, psAll, nil) + mps.On("DiskUsage", []string(nil), []string{"bind"}, []string(nil)).Return(duOptFiltered, psOptFiltered, nil) err = (&DiskStats{ps: &mps}).Gather(&acc) require.NoError(t, err) numDiskMetrics := acc.NFields() - expectedAllDiskMetrics := 14 - assert.Equal(t, expectedAllDiskMetrics, numDiskMetrics) + expectedAllDiskMetrics := 21 + require.Equal(t, expectedAllDiskMetrics, numDiskMetrics) tags1 := map[string]string{ "path": "/", @@ -362,13 +461,185 @@ func TestDiskStats(t *testing.T) { acc.AssertContainsTaggedFields(t, "disk", fields1, tags1) acc.AssertContainsTaggedFields(t, "disk", fields2, tags2) - // We expect 6 more DiskMetrics to show up with an explicit match on "/" - // and /home not matching the /dev in MountPoints + // We expect 7 more DiskMetrics to show up with an explicit match on "/" + // and /home and /var/rootbind not matching the /dev in MountPoints err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/dev"}}).Gather(&acc) - assert.Equal(t, expectedAllDiskMetrics+7, acc.NFields()) + require.NoError(t, err) + require.Equal(t, expectedAllDiskMetrics+7, acc.NFields()) // We should see all the diskpoints as MountPoints includes both - // / and /home - err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/home"}}).Gather(&acc) - assert.Equal(t, 2*expectedAllDiskMetrics+7, acc.NFields()) + // /, /home, and /var/rootbind + err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/home", "/var/rootbind"}}).Gather(&acc) + require.NoError(t, err) + require.Equal(t, expectedAllDiskMetrics+7*4, acc.NFields()) + + // We should see all the mounts as MountPoints except the bind mound + err = (&DiskStats{ps: &mps, IgnoreMountOpts: []string{"bind"}}).Gather(&acc) + require.NoError(t, err) + require.Equal(t, expectedAllDiskMetrics+7*6, acc.NFields()) +} + +func TestDiskUsageIssues(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip("Skipping due to Linux-only test-cases...") + } + + tests := []struct { + name string + prefix string + du diskUtil.UsageStat + expected []telegraf.Metric + }{ + { + name: "success", + prefix: "", + du: diskUtil.UsageStat{ + Total: 256, + Free: 46, + Used: 200, + InodesTotal: 2468, + InodesFree: 468, + InodesUsed: 2000, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "disk", + map[string]string{ + "device": "tmpfs", + "fstype": "tmpfs", + "mode": "rw", + "path": "/tmp", + }, + map[string]interface{}{ + "total": uint64(256), + "used": uint64(200), + "free": uint64(46), + "inodes_total": uint64(2468), + "inodes_free": uint64(468), + "inodes_used": uint64(2000), + "used_percent": float64(81.30081300813008), + }, + time.Unix(0, 0), + telegraf.Gauge, + ), + testutil.MustMetric( + "disk", + map[string]string{ + "device": "nvme0n1p4", + "fstype": "ext4", + "mode": "rw", + "path": "/", + }, + map[string]interface{}{ + "total": uint64(256), + "used": uint64(200), + "free": uint64(46), + "inodes_total": uint64(2468), + "inodes_free": uint64(468), + "inodes_used": uint64(2000), + "used_percent": float64(81.30081300813008), + }, + time.Unix(0, 0), + telegraf.Gauge, + ), + }, + }, + { + name: "issue 10297", + prefix: "/host", + du: diskUtil.UsageStat{ + Total: 256, + Free: 46, + Used: 200, + InodesTotal: 2468, + InodesFree: 468, + InodesUsed: 2000, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "disk", + map[string]string{ + "device": "sda1", + "fstype": "ext4", + "mode": "rw", + "path": "/", + }, + map[string]interface{}{ + "total": uint64(256), + "used": uint64(200), + "free": uint64(46), + "inodes_total": uint64(2468), + "inodes_free": uint64(468), + "inodes_used": uint64(2000), + "used_percent": float64(81.30081300813008), + }, + time.Unix(0, 0), + telegraf.Gauge, + ), + testutil.MustMetric( + "disk", + map[string]string{ + "device": "sdb", + "fstype": "ext4", + "mode": "rw", + "path": "/mnt/storage", + }, + map[string]interface{}{ + "total": uint64(256), + "used": uint64(200), + "free": uint64(46), + "inodes_total": uint64(2468), + "inodes_free": uint64(468), + "inodes_used": uint64(2000), + "used_percent": float64(81.30081300813008), + }, + time.Unix(0, 0), + telegraf.Gauge, + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Setup the environment + hostMountPrefix := tt.prefix + hostProcPrefix, err := filepath.Abs(filepath.Join("testdata", strings.ReplaceAll(tt.name, " ", "_"))) + require.NoError(t, err) + + // Get the partitions in the test-case + os.Clearenv() + require.NoError(t, os.Setenv("HOST_PROC", hostProcPrefix)) + partitions, err := diskUtil.Partitions(true) + require.NoError(t, err) + + // Mock the disk usage + mck := &mock.Mock{} + mps := system.MockPSDisk{SystemPS: &system.SystemPS{PSDiskDeps: &system.MockDiskUsage{Mock: mck}}, Mock: mck} + defer mps.AssertExpectations(t) + + mps.On("Partitions", true).Return(partitions, nil) + + for _, partition := range partitions { + mountpoint := partition.Mountpoint + if hostMountPrefix != "" { + mountpoint = filepath.Join(hostMountPrefix, partition.Mountpoint) + } + diskUsage := tt.du + diskUsage.Path = mountpoint + diskUsage.Fstype = partition.Fstype + mps.On("PSDiskUsage", mountpoint).Return(&diskUsage, nil) + } + mps.On("OSGetenv", "HOST_MOUNT_PREFIX").Return(hostMountPrefix) + + // Setup the plugin and run the test + var acc testutil.Accumulator + plugin := &DiskStats{ps: &mps} + require.NoError(t, plugin.Gather(&acc)) + + actual := acc.GetTelegrafMetrics() + testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime(), testutil.SortMetrics()) + }) + } + os.Clearenv() } diff --git a/plugins/inputs/disk/sample.conf b/plugins/inputs/disk/sample.conf new file mode 100644 index 0000000000000..e3e2e7610793c --- /dev/null +++ b/plugins/inputs/disk/sample.conf @@ -0,0 +1,13 @@ +# Read metrics about disk usage by mount point +[[inputs.disk]] + ## By default stats will be gathered for all mount points. + ## Set mount_points will restrict the stats to only the specified mount points. + # mount_points = ["/"] + + ## Ignore mount points by filesystem type. + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] + + ## Ignore mount points by mount options. + ## The 'mount' command reports options of all mounts in parathesis. + ## Bind mounts can be ignored with the special 'bind' option. + # ignore_mount_opts = [] diff --git a/plugins/inputs/disk/testdata/issue_10297/1/mountinfo b/plugins/inputs/disk/testdata/issue_10297/1/mountinfo new file mode 100644 index 0000000000000..012aae7ce2f66 --- /dev/null +++ b/plugins/inputs/disk/testdata/issue_10297/1/mountinfo @@ -0,0 +1,2 @@ +31 1 8:1 / / rw,relatime shared:1 - ext4 /dev/sda1 rw,discard,errors=remount-ro +126 31 8:16 / /mnt/storage rw,relatime shared:67 - ext4 /dev/sdb rw,discard diff --git a/plugins/inputs/disk/testdata/success/1/mountinfo b/plugins/inputs/disk/testdata/success/1/mountinfo new file mode 100644 index 0000000000000..70c532242dcf8 --- /dev/null +++ b/plugins/inputs/disk/testdata/success/1/mountinfo @@ -0,0 +1,2 @@ +26 1 259:4 / / rw,relatime shared:1 - ext4 /dev/nvme0n1p4 rw +39 26 0:32 / /tmp rw,nosuid,nodev shared:17 - tmpfs tmpfs rw,size=16427752k,nr_inodes=409600,inode64 diff --git a/plugins/inputs/diskio/README.md b/plugins/inputs/diskio/README.md index 11e68d6961ee0..8f87935a7a236 100644 --- a/plugins/inputs/diskio/README.md +++ b/plugins/inputs/diskio/README.md @@ -2,15 +2,15 @@ The diskio input plugin gathers metrics about disk traffic and timing. -### Configuration: +## Configuration -```toml +```toml @sample.conf # Read metrics about disk IO by device [[inputs.diskio]] ## By default, telegraf will gather stats for all devices including ## disk partitions. ## Setting devices will restrict the stats to the specified devices. - # devices = ["sda", "sdb"] + # devices = ["sda", "sdb", "vd*"] ## Uncomment the following line if you need disk serial numbers. # skip_serial_number = false # @@ -34,7 +34,7 @@ The diskio input plugin gathers metrics about disk traffic and timing. # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] ``` -#### Docker container +### Docker container To monitor the Docker engine host from within a container you will need to mount the host's filesystem into the container and set the `HOST_PROC` @@ -44,11 +44,11 @@ it is required to use privileged mode to provide access to `/dev`. If you are using the `device_tags` or `name_templates` options, you will need to bind mount `/run/udev` into the container. -``` +```shell docker run --privileged -v /:/hostfs:ro -v /run/udev:/run/udev:ro -e HOST_PROC=/hostfs/proc telegraf ``` -### Metrics: +## Metrics - diskio - tags: @@ -67,21 +67,23 @@ docker run --privileged -v /:/hostfs:ro -v /run/udev:/run/udev:ro -e HOST_PROC=/ - merged_reads (integer, counter) - merged_writes (integer, counter) -On linux these values correspond to the values in -[`/proc/diskstats`](https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats) -and -[`/sys/block//stat`](https://www.kernel.org/doc/Documentation/block/stat.txt). +On linux these values correspond to the values in [`/proc/diskstats`][1] and +[`/sys/block//stat`][2]. + +[1]: https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats + +[2]: https://www.kernel.org/doc/Documentation/block/stat.txt -#### `reads` & `writes`: +### `reads` & `writes` These values increment when an I/O request completes. -#### `read_bytes` & `write_bytes`: +### `read_bytes` & `write_bytes` These values count the number of bytes read from or written to this block device. -#### `read_time` & `write_time`: +### `read_time` & `write_time` These values count the number of milliseconds that I/O requests have waited on this block device. If there are multiple I/O requests waiting, @@ -89,49 +91,52 @@ these values will increase at a rate greater than 1000/second; for example, if 60 read requests wait for an average of 30 ms, the read_time field will increase by 60*30 = 1800. -#### `io_time`: +### `io_time` This value counts the number of milliseconds during which the device has had I/O requests queued. -#### `weighted_io_time`: +### `weighted_io_time` This value counts the number of milliseconds that I/O requests have waited on this block device. If there are multiple I/O requests waiting, this value will increase as the product of the number of milliseconds times the number of requests waiting (see `read_time` above for an example). -#### `iops_in_progress`: +### `iops_in_progress` This value counts the number of I/O requests that have been issued to the device driver but have not yet completed. It does not include I/O requests that are in the queue but not yet issued to the device driver. -#### `merged_reads` & `merged_writes`: +### `merged_reads` & `merged_writes` Reads and writes which are adjacent to each other may be merged for efficiency. Thus two 4K reads may become one 8K read before it is ultimately handed to the disk, and so it will be counted (and queued) as only one I/O. These fields lets you know how often this was done. -### Sample Queries: +## Sample Queries -#### Calculate percent IO utilization per disk and host: -``` +### Calculate percent IO utilization per disk and host + +```sql SELECT non_negative_derivative(last("io_time"),1ms) FROM "diskio" WHERE time > now() - 30m GROUP BY "host","name",time(60s) ``` -#### Calculate average queue depth: -`iops_in_progress` will give you an instantaneous value. This will give you the average between polling intervals. -``` +### Calculate average queue depth + +`iops_in_progress` will give you an instantaneous value. This will give you the +average between polling intervals. + +```sql SELECT non_negative_derivative(last("weighted_io_time"),1ms) from "diskio" WHERE time > now() - 30m GROUP BY "host","name",time(60s) ``` -### Example Output: +## Example Output -``` +```shell diskio,name=sda1 merged_reads=0i,reads=2353i,writes=10i,write_bytes=2117632i,write_time=49i,io_time=1271i,weighted_io_time=1350i,read_bytes=31350272i,read_time=1303i,iops_in_progress=0i,merged_writes=0i 1578326400000000000 diskio,name=centos/var_log reads=1063077i,writes=591025i,read_bytes=139325491712i,write_bytes=144233131520i,read_time=650221i,write_time=24368817i,io_time=852490i,weighted_io_time=25037394i,iops_in_progress=1i,merged_reads=0i,merged_writes=0i 1578326400000000000 diskio,name=sda write_time=49i,io_time=1317i,weighted_io_time=1404i,reads=2495i,read_time=1357i,write_bytes=2117632i,iops_in_progress=0i,merged_reads=0i,merged_writes=0i,writes=10i,read_bytes=38956544i 1578326400000000000 - ``` diff --git a/plugins/inputs/diskio/diskio.go b/plugins/inputs/diskio/diskio.go index 9c1e20ebdc5de..a129b7a09dc28 100644 --- a/plugins/inputs/diskio/diskio.go +++ b/plugins/inputs/diskio/diskio.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package diskio import ( + _ "embed" "fmt" "regexp" "strings" @@ -11,6 +13,10 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/system" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + var ( varRegex = regexp.MustCompile(`\$(?:\w+|\{\w+\})`) ) @@ -27,96 +33,54 @@ type DiskIO struct { infoCache map[string]diskInfoCache deviceFilter filter.Filter - initialized bool -} - -func (_ *DiskIO) Description() string { - return "Read metrics about disk IO by device" -} - -var diskIOsampleConfig = ` - ## By default, telegraf will gather stats for all devices including - ## disk partitions. - ## Setting devices will restrict the stats to the specified devices. - # devices = ["sda", "sdb", "vd*"] - ## Uncomment the following line if you need disk serial numbers. - # skip_serial_number = false - # - ## On systems which support it, device metadata can be added in the form of - ## tags. - ## Currently only Linux is supported via udev properties. You can view - ## available properties for a device by running: - ## 'udevadm info -q property -n /dev/sda' - ## Note: Most, but not all, udev properties can be accessed this way. Properties - ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. - # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] - # - ## Using the same metadata source as device_tags, you can also customize the - ## name of the device via templates. - ## The 'name_templates' parameter is a list of templates to try and apply to - ## the device. The template may contain variables in the form of '$PROPERTY' or - ## '${PROPERTY}'. The first template which does not contain any variables not - ## present for the device is used as the device name tag. - ## The typical use case is for LVM volumes, to get the VG/LV name instead of - ## the near-meaningless DM-0 name. - # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] -` - -func (_ *DiskIO) SampleConfig() string { - return diskIOsampleConfig } // hasMeta reports whether s contains any special glob characters. func hasMeta(s string) bool { - return strings.IndexAny(s, "*?[") >= 0 + return strings.ContainsAny(s, "*?[") +} + +func (*DiskIO) SampleConfig() string { + return sampleConfig } -func (s *DiskIO) init() error { - for _, device := range s.Devices { +func (d *DiskIO) Init() error { + for _, device := range d.Devices { if hasMeta(device) { - filter, err := filter.Compile(s.Devices) + deviceFilter, err := filter.Compile(d.Devices) if err != nil { return fmt.Errorf("error compiling device pattern: %s", err.Error()) } - s.deviceFilter = filter + d.deviceFilter = deviceFilter } } - s.initialized = true return nil } -func (s *DiskIO) Gather(acc telegraf.Accumulator) error { - if !s.initialized { - err := s.init() - if err != nil { - return err - } - } - +func (d *DiskIO) Gather(acc telegraf.Accumulator) error { devices := []string{} - if s.deviceFilter == nil { - devices = s.Devices + if d.deviceFilter == nil { + devices = d.Devices } - diskio, err := s.ps.DiskIO(devices) + diskio, err := d.ps.DiskIO(devices) if err != nil { return fmt.Errorf("error getting disk io info: %s", err.Error()) } for _, io := range diskio { - match := false - if s.deviceFilter != nil && s.deviceFilter.Match(io.Name) { + if d.deviceFilter != nil && d.deviceFilter.Match(io.Name) { match = true } tags := map[string]string{} var devLinks []string - tags["name"], devLinks = s.diskName(io.Name) + tags["name"], devLinks = d.diskName(io.Name) - if s.deviceFilter != nil && !match { + if d.deviceFilter != nil && !match { for _, devLink := range devLinks { - if s.deviceFilter.Match(devLink) { + if d.deviceFilter.Match(devLink) { match = true break } @@ -126,11 +90,11 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error { } } - for t, v := range s.diskTags(io.Name) { + for t, v := range d.diskTags(io.Name) { tags[t] = v } - if !s.SkipSerialNumber { + if !d.SkipSerialNumber { if len(io.SerialNumber) != 0 { tags["serial"] = io.SerialNumber } else { @@ -157,23 +121,23 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error { return nil } -func (s *DiskIO) diskName(devName string) (string, []string) { - di, err := s.diskInfo(devName) +func (d *DiskIO) diskName(devName string) (string, []string) { + di, err := d.diskInfo(devName) devLinks := strings.Split(di["DEVLINKS"], " ") for i, devLink := range devLinks { devLinks[i] = strings.TrimPrefix(devLink, "/dev/") } - if len(s.NameTemplates) == 0 { + if len(d.NameTemplates) == 0 { return devName, devLinks } if err != nil { - s.Log.Warnf("Error gathering disk info: %s", err) + d.Log.Warnf("Error gathering disk info: %s", err) return devName, devLinks } - for _, nt := range s.NameTemplates { + for _, nt := range d.NameTemplates { miss := false name := varRegex.ReplaceAllStringFunc(nt, func(sub string) string { sub = sub[1:] // strip leading '$' @@ -195,19 +159,19 @@ func (s *DiskIO) diskName(devName string) (string, []string) { return devName, devLinks } -func (s *DiskIO) diskTags(devName string) map[string]string { - if len(s.DeviceTags) == 0 { +func (d *DiskIO) diskTags(devName string) map[string]string { + if len(d.DeviceTags) == 0 { return nil } - di, err := s.diskInfo(devName) + di, err := d.diskInfo(devName) if err != nil { - s.Log.Warnf("Error gathering disk info: %s", err) + d.Log.Warnf("Error gathering disk info: %s", err) return nil } tags := map[string]string{} - for _, dt := range s.DeviceTags { + for _, dt := range d.DeviceTags { if v, ok := di[dt]; ok { tags[dt] = v } @@ -221,4 +185,8 @@ func init() { inputs.Add("diskio", func() telegraf.Input { return &DiskIO{ps: ps, SkipSerialNumber: true} }) + // Backwards compatible alias + inputs.Add("io", func() telegraf.Input { + return &DiskIO{ps: ps, SkipSerialNumber: true} + }) } diff --git a/plugins/inputs/diskio/diskio_linux.go b/plugins/inputs/diskio/diskio_linux.go index f2499ca17c1c2..c356d49cb7b68 100644 --- a/plugins/inputs/diskio/diskio_linux.go +++ b/plugins/inputs/diskio/diskio_linux.go @@ -16,9 +16,7 @@ type diskInfoCache struct { values map[string]string } -var udevPath = "/run/udev/data" - -func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { +func (d *DiskIO) diskInfo(devName string) (map[string]string, error) { var err error var stat unix.Stat_t @@ -28,33 +26,51 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { return nil, err } - if s.infoCache == nil { - s.infoCache = map[string]diskInfoCache{} + if d.infoCache == nil { + d.infoCache = map[string]diskInfoCache{} } - ic, ok := s.infoCache[devName] + ic, ok := d.infoCache[devName] if ok && stat.Mtim.Nano() == ic.modifiedAt { return ic.values, nil } - major := unix.Major(uint64(stat.Rdev)) - minor := unix.Minor(uint64(stat.Rdev)) - udevDataPath := fmt.Sprintf("%s/b%d:%d", udevPath, major, minor) - - di := map[string]string{} + var udevDataPath string + if ok && len(ic.udevDataPath) > 0 { + // We can reuse the udev data path from a "previous" entry. + // This allows us to also "poison" it during test scenarios + udevDataPath = ic.udevDataPath + } else { + major := unix.Major(uint64(stat.Rdev)) //nolint:unconvert // Conversion needed for some architectures + minor := unix.Minor(uint64(stat.Rdev)) //nolint:unconvert // Conversion needed for some architectures + udevDataPath = fmt.Sprintf("/run/udev/data/b%d:%d", major, minor) - s.infoCache[devName] = diskInfoCache{ - modifiedAt: stat.Mtim.Nano(), - udevDataPath: udevDataPath, - values: di, + _, err := os.Stat(udevDataPath) + if err != nil { + // This path failed, try the fallback .udev style (non-systemd) + udevDataPath = fmt.Sprintf("/dev/.udev/db/block:%s", devName) + _, err := os.Stat(udevDataPath) + if err != nil { + // Giving up, cannot retrieve disk info + return nil, err + } + } } - + // Final open of the confirmed (or the previously detected/used) udev file f, err := os.Open(udevDataPath) if err != nil { return nil, err } defer f.Close() + di := map[string]string{} + + d.infoCache[devName] = diskInfoCache{ + modifiedAt: stat.Mtim.Nano(), + udevDataPath: udevDataPath, + values: di, + } + scnr := bufio.NewScanner(f) var devlinks bytes.Buffer for scnr.Scan() { @@ -64,9 +80,12 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { } if l[:2] == "S:" { if devlinks.Len() > 0 { + //nolint:errcheck,revive // this will never fail devlinks.WriteString(" ") } + //nolint:errcheck,revive // this will never fail devlinks.WriteString("/dev/") + //nolint:errcheck,revive // this will never fail devlinks.WriteString(l[2:]) continue } diff --git a/plugins/inputs/diskio/diskio_linux_test.go b/plugins/inputs/diskio/diskio_linux_test.go index 1cb031bdce553..8a76e230cbb98 100644 --- a/plugins/inputs/diskio/diskio_linux_test.go +++ b/plugins/inputs/diskio/diskio_linux_test.go @@ -1,13 +1,12 @@ +//go:build linux // +build linux package diskio import ( - "io/ioutil" "os" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -19,19 +18,32 @@ S:foo/bar/devlink1 `) // setupNullDisk sets up fake udev info as if /dev/null were a disk. -func setupNullDisk(t *testing.T) func() error { - td, err := ioutil.TempDir("", ".telegraf.TestDiskInfo") +func setupNullDisk(t *testing.T, s *DiskIO, devName string) func() { + td, err := os.CreateTemp("", ".telegraf.DiskInfoTest") require.NoError(t, err) - origUdevPath := udevPath + if s.infoCache == nil { + s.infoCache = make(map[string]diskInfoCache) + } + ic, ok := s.infoCache[devName] + if !ok { + // No previous calls for the device were done, easy to poison the cache + s.infoCache[devName] = diskInfoCache{ + modifiedAt: 0, + udevDataPath: td.Name(), + values: map[string]string{}, + } + } + origUdevPath := ic.udevDataPath - cleanFunc := func() error { - udevPath = origUdevPath - return os.RemoveAll(td) + cleanFunc := func() { + ic.udevDataPath = origUdevPath + //nolint:errcheck,revive // we cannot do anything if file cannot be removed + os.Remove(td.Name()) } - udevPath = td - err = ioutil.WriteFile(td+"/b1:3", nullDiskInfo, 0644) // 1:3 is the 'null' device + ic.udevDataPath = td.Name() + _, err = td.Write(nullDiskInfo) if err != nil { cleanFunc() t.Fatal(err) @@ -41,34 +53,29 @@ func setupNullDisk(t *testing.T) func() error { } func TestDiskInfo(t *testing.T) { - clean := setupNullDisk(t) - defer clean() - s := &DiskIO{} + clean := setupNullDisk(t, s, "null") + defer clean() di, err := s.diskInfo("null") require.NoError(t, err) - assert.Equal(t, "myval1", di["MY_PARAM_1"]) - assert.Equal(t, "myval2", di["MY_PARAM_2"]) - assert.Equal(t, "/dev/foo/bar/devlink /dev/foo/bar/devlink1", di["DEVLINKS"]) + require.Equal(t, "myval1", di["MY_PARAM_1"]) + require.Equal(t, "myval2", di["MY_PARAM_2"]) + require.Equal(t, "/dev/foo/bar/devlink /dev/foo/bar/devlink1", di["DEVLINKS"]) // test that data is cached - err = clean() - require.NoError(t, err) + clean() di, err = s.diskInfo("null") require.NoError(t, err) - assert.Equal(t, "myval1", di["MY_PARAM_1"]) - assert.Equal(t, "myval2", di["MY_PARAM_2"]) - assert.Equal(t, "/dev/foo/bar/devlink /dev/foo/bar/devlink1", di["DEVLINKS"]) - + require.Equal(t, "myval1", di["MY_PARAM_1"]) + require.Equal(t, "myval2", di["MY_PARAM_2"]) + require.Equal(t, "/dev/foo/bar/devlink /dev/foo/bar/devlink1", di["DEVLINKS"]) // unfortunately we can't adjust mtime on /dev/null to test cache invalidation } // DiskIOStats.diskName isn't a linux specific function, but dependent // functions are a no-op on non-Linux. func TestDiskIOStats_diskName(t *testing.T) { - defer setupNullDisk(t)() - tests := []struct { templates []string expected string @@ -85,22 +92,24 @@ func TestDiskIOStats_diskName(t *testing.T) { } for _, tc := range tests { - s := DiskIO{ - NameTemplates: tc.templates, - } - name, _ := s.diskName("null") - assert.Equal(t, tc.expected, name, "Templates: %#v", tc.templates) + func() { + s := DiskIO{ + NameTemplates: tc.templates, + } + defer setupNullDisk(t, &s, "null")() //nolint:revive // done on purpose, cleaning will be executed properly + name, _ := s.diskName("null") + require.Equal(t, tc.expected, name, "Templates: %#v", tc.templates) + }() } } // DiskIOStats.diskTags isn't a linux specific function, but dependent // functions are a no-op on non-Linux. func TestDiskIOStats_diskTags(t *testing.T) { - defer setupNullDisk(t)() - s := &DiskIO{ DeviceTags: []string{"MY_PARAM_2"}, } + defer setupNullDisk(t, s, "null")() //nolint:revive // done on purpose, cleaning will be executed properly dt := s.diskTags("null") - assert.Equal(t, map[string]string{"MY_PARAM_2": "myval2"}, dt) + require.Equal(t, map[string]string{"MY_PARAM_2": "myval2"}, dt) } diff --git a/plugins/inputs/diskio/diskio_other.go b/plugins/inputs/diskio/diskio_other.go index 07fb8c3b87faa..458a64c13f7bb 100644 --- a/plugins/inputs/diskio/diskio_other.go +++ b/plugins/inputs/diskio/diskio_other.go @@ -1,9 +1,10 @@ +//go:build !linux // +build !linux package diskio type diskInfoCache struct{} -func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { +func (d *DiskIO) diskInfo(devName string) (map[string]string, error) { return nil, nil } diff --git a/plugins/inputs/diskio/diskio_test.go b/plugins/inputs/diskio/diskio_test.go index 3ad203de09362..c597a41525032 100644 --- a/plugins/inputs/diskio/diskio_test.go +++ b/plugins/inputs/diskio/diskio_test.go @@ -5,7 +5,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/disk" + "github.com/shirou/gopsutil/v3/disk" "github.com/stretchr/testify/require" ) @@ -111,6 +111,7 @@ func TestDiskIO(t *testing.T) { ps: &mps, Devices: tt.devices, } + require.NoError(t, diskio.Init()) err := diskio.Gather(&acc) require.Equal(t, tt.err, err) diff --git a/plugins/inputs/diskio/sample.conf b/plugins/inputs/diskio/sample.conf new file mode 100644 index 0000000000000..ff5521d52a725 --- /dev/null +++ b/plugins/inputs/diskio/sample.conf @@ -0,0 +1,27 @@ +# Read metrics about disk IO by device +[[inputs.diskio]] + ## By default, telegraf will gather stats for all devices including + ## disk partitions. + ## Setting devices will restrict the stats to the specified devices. + # devices = ["sda", "sdb", "vd*"] + ## Uncomment the following line if you need disk serial numbers. + # skip_serial_number = false + # + ## On systems which support it, device metadata can be added in the form of + ## tags. + ## Currently only Linux is supported via udev properties. You can view + ## available properties for a device by running: + ## 'udevadm info -q property -n /dev/sda' + ## Note: Most, but not all, udev properties can be accessed this way. Properties + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. + # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] + # + ## Using the same metadata source as device_tags, you can also customize the + ## name of the device via templates. + ## The 'name_templates' parameter is a list of templates to try and apply to + ## the device. The template may contain variables in the form of '$PROPERTY' or + ## '${PROPERTY}'. The first template which does not contain any variables not + ## present for the device is used as the device name tag. + ## The typical use case is for LVM volumes, to get the VG/LV name instead of + ## the near-meaningless DM-0 name. + # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] diff --git a/plugins/inputs/disque/README.md b/plugins/inputs/disque/README.md index ad05658cc2b14..8e60b182254e4 100644 --- a/plugins/inputs/disque/README.md +++ b/plugins/inputs/disque/README.md @@ -1,12 +1,13 @@ # Disque Input Plugin -[Disque](https://github.com/antirez/disque) is an ongoing experiment to build a distributed, in-memory, message broker. +[Disque](https://github.com/antirez/disque) is an ongoing experiment to build a +distributed, in-memory, message broker. +## Configuration -### Configuration: - -```toml -[[inputs.disque]] +```toml @sample.conf +# Read metrics from one or many disque servers +[[inputs.disque]] ## An array of URI to gather stats about. Specify an ip or hostname ## with optional port and password. ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. @@ -14,8 +15,7 @@ servers = ["localhost"] ``` -### Metrics - +## Metrics - disque - disque_host diff --git a/plugins/inputs/disque/disque.go b/plugins/inputs/disque/disque.go index 6585ab88eb587..3414ed6ee67af 100644 --- a/plugins/inputs/disque/disque.go +++ b/plugins/inputs/disque/disque.go @@ -1,7 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package disque import ( "bufio" + _ "embed" "errors" "fmt" "net" @@ -15,31 +17,18 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Disque struct { Servers []string - c net.Conn - buf []byte + c net.Conn } -var sampleConfig = ` - ## An array of URI to gather stats about. Specify an ip or hostname - ## with optional port and password. - ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. - ## If no servers are specified, then localhost is used as the host. - servers = ["localhost"] -` - var defaultTimeout = 5 * time.Second -func (r *Disque) SampleConfig() string { - return sampleConfig -} - -func (r *Disque) Description() string { - return "Read metrics from one or many disque servers" -} - var Tracking = map[string]string{ "uptime_in_seconds": "uptime", "connected_clients": "clients", @@ -62,23 +51,26 @@ var Tracking = map[string]string{ var ErrProtocolError = errors.New("disque protocol error") +func (*Disque) SampleConfig() string { + return sampleConfig +} + // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). -func (g *Disque) Gather(acc telegraf.Accumulator) error { - if len(g.Servers) == 0 { - url := &url.URL{ +func (d *Disque) Gather(acc telegraf.Accumulator) error { + if len(d.Servers) == 0 { + address := &url.URL{ Host: ":7711", } - g.gatherServer(url, acc) - return nil + return d.gatherServer(address, acc) } var wg sync.WaitGroup - for _, serv := range g.Servers { + for _, serv := range d.Servers { u, err := url.Parse(serv) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse to address '%s': %s", serv, err)) + acc.AddError(fmt.Errorf("unable to parse to address '%s': %s", serv, err)) continue } else if u.Scheme == "" { // fallback to simple string based address (i.e. "10.0.0.1:10000") @@ -87,10 +79,10 @@ func (g *Disque) Gather(acc telegraf.Accumulator) error { u.Path = "" } wg.Add(1) - go func(serv string) { + go func() { defer wg.Done() - acc.AddError(g.gatherServer(u, acc)) - }(serv) + acc.AddError(d.gatherServer(u, acc)) + }() } wg.Wait() @@ -100,9 +92,8 @@ func (g *Disque) Gather(acc telegraf.Accumulator) error { const defaultPort = "7711" -func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { - if g.c == nil { - +func (d *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { + if d.c == nil { _, _, err := net.SplitHostPort(addr.Host) if err != nil { addr.Host = addr.Host + ":" + defaultPort @@ -110,13 +101,15 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { c, err := net.DialTimeout("tcp", addr.Host, defaultTimeout) if err != nil { - return fmt.Errorf("Unable to connect to disque server '%s': %s", addr.Host, err) + return fmt.Errorf("unable to connect to disque server '%s': %s", addr.Host, err) } if addr.User != nil { pwd, set := addr.User.Password() if set && pwd != "" { - c.Write([]byte(fmt.Sprintf("AUTH %s\r\n", pwd))) + if _, err := c.Write([]byte(fmt.Sprintf("AUTH %s\r\n", pwd))); err != nil { + return err + } r := bufio.NewReader(c) @@ -130,15 +123,19 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { } } - g.c = c + d.c = c } // Extend connection - g.c.SetDeadline(time.Now().Add(defaultTimeout)) + if err := d.c.SetDeadline(time.Now().Add(defaultTimeout)); err != nil { + return err + } - g.c.Write([]byte("info\r\n")) + if _, err := d.c.Write([]byte("info\r\n")); err != nil { + return err + } - r := bufio.NewReader(g.c) + r := bufio.NewReader(d.c) line, err := r.ReadString('\n') if err != nil { @@ -176,7 +173,7 @@ func (g *Disque) gatherServer(addr *url.URL, acc telegraf.Accumulator) error { parts := strings.SplitN(line, ":", 2) - name := string(parts[0]) + name := parts[0] metric, ok := Tracking[name] if !ok { diff --git a/plugins/inputs/disque/disque_test.go b/plugins/inputs/disque/disque_test.go index 1e5b764f9c820..4eacbd76c6a1e 100644 --- a/plugins/inputs/disque/disque_test.go +++ b/plugins/inputs/disque/disque_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestDisqueGeneratesMetrics(t *testing.T) { +func TestDisqueGeneratesMetricsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -38,8 +38,12 @@ func TestDisqueGeneratesMetrics(t *testing.T) { return } - fmt.Fprintf(c, "$%d\n", len(testOutput)) - c.Write([]byte(testOutput)) + if _, err := fmt.Fprintf(c, "$%d\n", len(testOutput)); err != nil { + return + } + if _, err := c.Write([]byte(testOutput)); err != nil { + return + } } }() @@ -76,7 +80,7 @@ func TestDisqueGeneratesMetrics(t *testing.T) { acc.AssertContainsFields(t, "disque", fields) } -func TestDisqueCanPullStatsFromMultipleServers(t *testing.T) { +func TestDisqueCanPullStatsFromMultipleServersIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -104,8 +108,12 @@ func TestDisqueCanPullStatsFromMultipleServers(t *testing.T) { return } - fmt.Fprintf(c, "$%d\n", len(testOutput)) - c.Write([]byte(testOutput)) + if _, err := fmt.Fprintf(c, "$%d\n", len(testOutput)); err != nil { + return + } + if _, err := c.Write([]byte(testOutput)); err != nil { + return + } } }() diff --git a/plugins/inputs/disque/sample.conf b/plugins/inputs/disque/sample.conf new file mode 100644 index 0000000000000..b83ae141d30f2 --- /dev/null +++ b/plugins/inputs/disque/sample.conf @@ -0,0 +1,7 @@ +# Read metrics from one or many disque servers +[[inputs.disque]] + ## An array of URI to gather stats about. Specify an ip or hostname + ## with optional port and password. + ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. + ## If no servers are specified, then localhost is used as the host. + servers = ["localhost"] diff --git a/plugins/inputs/dmcache/README.md b/plugins/inputs/dmcache/README.md index 536d3f518bcaa..8953b86972326 100644 --- a/plugins/inputs/dmcache/README.md +++ b/plugins/inputs/dmcache/README.md @@ -1,46 +1,50 @@ # DMCache Input Plugin -This plugin provide a native collection for dmsetup based statistics for dm-cache. +This plugin provide a native collection for dmsetup based statistics for +dm-cache. -This plugin requires sudo, that is why you should setup and be sure that the telegraf is able to execute sudo without a password. +This plugin requires sudo, that is why you should setup and be sure that the +telegraf is able to execute sudo without a password. -`sudo /sbin/dmsetup status --target cache` is the full command that telegraf will run for debugging purposes. +`sudo /sbin/dmsetup status --target cache` is the full command that telegraf +will run for debugging purposes. -### Configuration +## Configuration -```toml +```toml @sample.conf +# Provide a native collection for dmsetup based statistics for dm-cache [[inputs.dmcache]] ## Whether to report per-device stats or not per_device = true ``` -### Measurements & Fields: +## Metrics - dmcache - - length - - target - - metadata_blocksize - - metadata_used - - metadata_total - - cache_blocksize - - cache_used - - cache_total - - read_hits - - read_misses - - write_hits - - write_misses - - demotions - - promotions - - dirty - -### Tags: + - length + - target + - metadata_blocksize + - metadata_used + - metadata_total + - cache_blocksize + - cache_used + - cache_total + - read_hits + - read_misses + - write_hits + - write_misses + - demotions + - promotions + - dirty + +## Tags - All measurements have the following tags: - - device + - device -### Example Output: +## Example Output -``` +```shell $ ./telegraf --test --config /etc/telegraf/telegraf.conf --input-filter dmcache * Plugin: inputs.dmcache, Collection 1 > dmcache,device=example cache_blocksize=0i,read_hits=995134034411520i,read_misses=916807089127424i,write_hits=195107267543040i,metadata_used=12861440i,write_misses=563725346013184i,promotions=3265223720960i,dirty=0i,metadata_blocksize=0i,cache_used=1099511627776ii,cache_total=0i,length=0i,metadata_total=1073741824i,demotions=3265223720960i 1491482035000000000 diff --git a/plugins/inputs/dmcache/dmcache.go b/plugins/inputs/dmcache/dmcache.go index 25a398194edf8..1cfab4d263ade 100644 --- a/plugins/inputs/dmcache/dmcache.go +++ b/plugins/inputs/dmcache/dmcache.go @@ -1,28 +1,26 @@ +//go:generate ../../../tools/readme_config_includer/generator package dmcache import ( + _ "embed" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type DMCache struct { PerDevice bool `toml:"per_device"` getCurrentStatus func() ([]string, error) } -var sampleConfig = ` - ## Whether to report per-device stats or not - per_device = true -` - -func (c *DMCache) SampleConfig() string { +func (*DMCache) SampleConfig() string { return sampleConfig } -func (c *DMCache) Description() string { - return "Provide a native collection for dmsetup based statistics for dm-cache" -} - func init() { inputs.Add("dmcache", func() telegraf.Input { return &DMCache{ diff --git a/plugins/inputs/dmcache/dmcache_linux.go b/plugins/inputs/dmcache/dmcache_linux.go index 8e8d7de918560..349cb55a96c43 100644 --- a/plugins/inputs/dmcache/dmcache_linux.go +++ b/plugins/inputs/dmcache/dmcache_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dmcache @@ -61,7 +62,7 @@ func (c *DMCache) Gather(acc telegraf.Accumulator) error { func parseDMSetupStatus(line string) (cacheStatus, error) { var err error - parseError := errors.New("Output from dmsetup could not be parsed") + parseError := errors.New("output from dmsetup could not be parsed") status := cacheStatus{} values := strings.Fields(line) if len(values) < 15 { diff --git a/plugins/inputs/dmcache/dmcache_linux_test.go b/plugins/inputs/dmcache/dmcache_linux_test.go index 30e32b1e876a4..93cd1e85e79bb 100644 --- a/plugins/inputs/dmcache/dmcache_linux_test.go +++ b/plugins/inputs/dmcache/dmcache_linux_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package dmcache diff --git a/plugins/inputs/dmcache/dmcache_notlinux.go b/plugins/inputs/dmcache/dmcache_notlinux.go index ee1065638cab7..96aa0c65712ff 100644 --- a/plugins/inputs/dmcache/dmcache_notlinux.go +++ b/plugins/inputs/dmcache/dmcache_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package dmcache diff --git a/plugins/inputs/dmcache/sample.conf b/plugins/inputs/dmcache/sample.conf new file mode 100644 index 0000000000000..d21d2ba7e2888 --- /dev/null +++ b/plugins/inputs/dmcache/sample.conf @@ -0,0 +1,4 @@ +# Provide a native collection for dmsetup based statistics for dm-cache +[[inputs.dmcache]] + ## Whether to report per-device stats or not + per_device = true diff --git a/plugins/inputs/dns_query/README.md b/plugins/inputs/dns_query/README.md index dc8ddd90373e9..25b3f2577153a 100644 --- a/plugins/inputs/dns_query/README.md +++ b/plugins/inputs/dns_query/README.md @@ -1,9 +1,11 @@ # DNS Query Input Plugin -The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wikipedia.org/wiki/Dig_\(command\)) +The DNS plugin gathers dns query times in miliseconds - like +[Dig](https://en.wikipedia.org/wiki/Dig_\(command\)) -### Configuration: -```toml +## Configuration + +```toml @sample.conf # Query given DNS server and gives statistics [[inputs.dns_query]] ## servers to query @@ -26,7 +28,7 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi # timeout = 2 ``` -### Metrics: +## Metrics - dns_query - tags: @@ -40,8 +42,8 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi - result_code (int, success = 0, timeout = 1, error = 2) - rcode_value (int) +## Rcode Descriptions -### Rcode Descriptions |rcode_value|rcode|Description| |---|-----------|-----------------------------------| |0 | NoError | No Error | @@ -65,9 +67,8 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi |22 | BADTRUNC | Bad Truncation | |23 | BADCOOKIE | Bad/missing Server Cookie | +## Example Output -### Example Output: - -``` +```shell dns_query,domain=google.com,rcode=NOERROR,record_type=A,result=success,server=127.0.0.1 rcode_value=0i,result_code=0i,query_time_ms=0.13746 1550020750001000000 ``` diff --git a/plugins/inputs/dns_query/dns_query.go b/plugins/inputs/dns_query/dns_query.go index c5657277073c2..60e0712c488a7 100644 --- a/plugins/inputs/dns_query/dns_query.go +++ b/plugins/inputs/dns_query/dns_query.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package dns_query import ( + _ "embed" "fmt" "net" "strconv" @@ -13,15 +15,19 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type ResultType uint64 const ( - Success ResultType = 0 - Timeout = 1 - Error = 2 + Success ResultType = iota + Timeout + Error ) -type DnsQuery struct { +type DNSQuery struct { // Domains or subdomains to query Domains []string @@ -41,35 +47,11 @@ type DnsQuery struct { Timeout int } -var sampleConfig = ` - ## servers to query - servers = ["8.8.8.8"] - - ## Network is the network protocol name. - # network = "udp" - - ## Domains or subdomains to query. - # domains = ["."] - - ## Query record type. - ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. - # record_type = "A" - - ## Dns server port. - # port = 53 - - ## Query timeout in seconds. - # timeout = 2 -` - -func (d *DnsQuery) SampleConfig() string { +func (*DNSQuery) SampleConfig() string { return sampleConfig } -func (d *DnsQuery) Description() string { - return "Query given DNS server and gives statistics" -} -func (d *DnsQuery) Gather(acc telegraf.Accumulator) error { +func (d *DNSQuery) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup d.setDefaultValues() @@ -84,7 +66,7 @@ func (d *DnsQuery) Gather(acc telegraf.Accumulator) error { "record_type": d.RecordType, } - dnsQueryTime, rcode, err := d.getDnsQueryTime(domain, server) + dnsQueryTime, rcode, err := d.getDNSQueryTime(domain, server) if rcode >= 0 { tags["rcode"] = dns.RcodeToString[rcode] fields["rcode_value"] = rcode @@ -110,7 +92,7 @@ func (d *DnsQuery) Gather(acc telegraf.Accumulator) error { return nil } -func (d *DnsQuery) setDefaultValues() { +func (d *DNSQuery) setDefaultValues() { if d.Network == "" { d.Network = "udp" } @@ -133,7 +115,7 @@ func (d *DnsQuery) setDefaultValues() { } } -func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, int, error) { +func (d *DNSQuery) getDNSQueryTime(domain string, server string) (float64, int, error) { dnsQueryTime := float64(0) c := new(dns.Client) @@ -159,7 +141,7 @@ func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, int, return dnsQueryTime, r.Rcode, nil } -func (d *DnsQuery) parseRecordType() (uint16, error) { +func (d *DNSQuery) parseRecordType() (uint16, error) { var recordType uint16 var err error @@ -210,6 +192,6 @@ func setResult(result ResultType, fields map[string]interface{}, tags map[string func init() { inputs.Add("dns_query", func() telegraf.Input { - return &DnsQuery{} + return &DNSQuery{} }) } diff --git a/plugins/inputs/dns_query/dns_query_test.go b/plugins/inputs/dns_query/dns_query_test.go index 5a1379764cff0..2e57e2f7b07ba 100644 --- a/plugins/inputs/dns_query/dns_query_test.go +++ b/plugins/inputs/dns_query/dns_query_test.go @@ -4,11 +4,10 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" - "github.com/miekg/dns" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) var servers = []string{"8.8.8.8"} @@ -18,26 +17,26 @@ func TestGathering(t *testing.T) { if testing.Short() { t.Skip("Skipping network-dependent test in short mode.") } - var dnsConfig = DnsQuery{ + var dnsConfig = DNSQuery{ Servers: servers, Domains: domains, } var acc testutil.Accumulator err := acc.GatherError(dnsConfig.Gather) - assert.NoError(t, err) + require.NoError(t, err) metric, ok := acc.Get("dns_query") require.True(t, ok) queryTime, _ := metric.Fields["query_time_ms"].(float64) - assert.NotEqual(t, 0, queryTime) + require.NotEqual(t, 0, queryTime) } func TestGatheringMxRecord(t *testing.T) { if testing.Short() { t.Skip("Skipping network-dependent test in short mode.") } - var dnsConfig = DnsQuery{ + var dnsConfig = DNSQuery{ Servers: servers, Domains: domains, } @@ -45,19 +44,19 @@ func TestGatheringMxRecord(t *testing.T) { dnsConfig.RecordType = "MX" err := acc.GatherError(dnsConfig.Gather) - assert.NoError(t, err) + require.NoError(t, err) metric, ok := acc.Get("dns_query") require.True(t, ok) queryTime, _ := metric.Fields["query_time_ms"].(float64) - assert.NotEqual(t, 0, queryTime) + require.NotEqual(t, 0, queryTime) } func TestGatheringRootDomain(t *testing.T) { if testing.Short() { t.Skip("Skipping network-dependent test in short mode.") } - var dnsConfig = DnsQuery{ + var dnsConfig = DNSQuery{ Servers: servers, Domains: []string{"."}, RecordType: "MX", @@ -67,11 +66,16 @@ func TestGatheringRootDomain(t *testing.T) { "server": "8.8.8.8", "domain": ".", "record_type": "MX", + "rcode": "NOERROR", + "result": "success", + } + fields := map[string]interface{}{ + "rcode_value": 0, + "result_code": uint64(0), } - fields := map[string]interface{}{} err := acc.GatherError(dnsConfig.Gather) - assert.NoError(t, err) + require.NoError(t, err) metric, ok := acc.Get("dns_query") require.True(t, ok) queryTime, _ := metric.Fields["query_time_ms"].(float64) @@ -84,7 +88,7 @@ func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) { if testing.Short() { t.Skip("Skipping network-dependent test in short mode.") } - var dnsConfig = DnsQuery{ + var dnsConfig = DNSQuery{ Servers: servers, Domains: domains, } @@ -93,11 +97,16 @@ func TestMetricContainsServerAndDomainAndRecordTypeTags(t *testing.T) { "server": "8.8.8.8", "domain": "google.com", "record_type": "NS", + "rcode": "NOERROR", + "result": "success", + } + fields := map[string]interface{}{ + "rcode_value": 0, + "result_code": uint64(0), } - fields := map[string]interface{}{} err := acc.GatherError(dnsConfig.Gather) - assert.NoError(t, err) + require.NoError(t, err) metric, ok := acc.Get("dns_query") require.True(t, ok) queryTime, _ := metric.Fields["query_time_ms"].(float64) @@ -110,7 +119,7 @@ func TestGatheringTimeout(t *testing.T) { if testing.Short() { t.Skip("Skipping network-dependent test in short mode.") } - var dnsConfig = DnsQuery{ + var dnsConfig = DNSQuery{ Servers: servers, Domains: domains, } @@ -124,83 +133,83 @@ func TestGatheringTimeout(t *testing.T) { }() select { case err := <-channel: - assert.NoError(t, err) + require.NoError(t, err) case <-time.After(time.Second * 2): - assert.Fail(t, "DNS query did not timeout") + require.Fail(t, "DNS query did not timeout") } } func TestSettingDefaultValues(t *testing.T) { - dnsConfig := DnsQuery{} + dnsConfig := DNSQuery{} dnsConfig.setDefaultValues() - assert.Equal(t, []string{"."}, dnsConfig.Domains, "Default domain not equal \".\"") - assert.Equal(t, "NS", dnsConfig.RecordType, "Default record type not equal 'NS'") - assert.Equal(t, 53, dnsConfig.Port, "Default port number not equal 53") - assert.Equal(t, 2, dnsConfig.Timeout, "Default timeout not equal 2") + require.Equal(t, []string{"."}, dnsConfig.Domains, "Default domain not equal \".\"") + require.Equal(t, "NS", dnsConfig.RecordType, "Default record type not equal 'NS'") + require.Equal(t, 53, dnsConfig.Port, "Default port number not equal 53") + require.Equal(t, 2, dnsConfig.Timeout, "Default timeout not equal 2") - dnsConfig = DnsQuery{Domains: []string{"."}} + dnsConfig = DNSQuery{Domains: []string{"."}} dnsConfig.setDefaultValues() - assert.Equal(t, "NS", dnsConfig.RecordType, "Default record type not equal 'NS'") + require.Equal(t, "NS", dnsConfig.RecordType, "Default record type not equal 'NS'") } func TestRecordTypeParser(t *testing.T) { - var dnsConfig = DnsQuery{} + var dnsConfig = DNSQuery{} var recordType uint16 dnsConfig.RecordType = "A" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeA, recordType) + require.Equal(t, dns.TypeA, recordType) dnsConfig.RecordType = "AAAA" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeAAAA, recordType) + require.Equal(t, dns.TypeAAAA, recordType) dnsConfig.RecordType = "ANY" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeANY, recordType) + require.Equal(t, dns.TypeANY, recordType) dnsConfig.RecordType = "CNAME" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeCNAME, recordType) + require.Equal(t, dns.TypeCNAME, recordType) dnsConfig.RecordType = "MX" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeMX, recordType) + require.Equal(t, dns.TypeMX, recordType) dnsConfig.RecordType = "NS" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeNS, recordType) + require.Equal(t, dns.TypeNS, recordType) dnsConfig.RecordType = "PTR" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypePTR, recordType) + require.Equal(t, dns.TypePTR, recordType) dnsConfig.RecordType = "SOA" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeSOA, recordType) + require.Equal(t, dns.TypeSOA, recordType) dnsConfig.RecordType = "SPF" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeSPF, recordType) + require.Equal(t, dns.TypeSPF, recordType) dnsConfig.RecordType = "SRV" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeSRV, recordType) + require.Equal(t, dns.TypeSRV, recordType) dnsConfig.RecordType = "TXT" recordType, _ = dnsConfig.parseRecordType() - assert.Equal(t, dns.TypeTXT, recordType) + require.Equal(t, dns.TypeTXT, recordType) } func TestRecordTypeParserError(t *testing.T) { - var dnsConfig = DnsQuery{} + var dnsConfig = DNSQuery{} var err error dnsConfig.RecordType = "nil" _, err = dnsConfig.parseRecordType() - assert.Error(t, err) + require.Error(t, err) } diff --git a/plugins/inputs/dns_query/sample.conf b/plugins/inputs/dns_query/sample.conf new file mode 100644 index 0000000000000..60ac2cc022986 --- /dev/null +++ b/plugins/inputs/dns_query/sample.conf @@ -0,0 +1,20 @@ +# Query given DNS server and gives statistics +[[inputs.dns_query]] + ## servers to query + servers = ["8.8.8.8"] + + ## Network is the network protocol name. + # network = "udp" + + ## Domains or subdomains to query. + # domains = ["."] + + ## Query record type. + ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. + # record_type = "A" + + ## Dns server port. + # port = 53 + + ## Query timeout in seconds. + # timeout = 2 diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index 95394c94e9c44..63a39577eaeba 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -3,12 +3,16 @@ The docker plugin uses the Docker Engine API to gather metrics on running docker containers. -The docker plugin uses the [Official Docker Client](https://github.com/moby/moby/tree/master/client) -to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/). +The docker plugin uses the [Official Docker Client][1] to gather stats from the +[Engine API][2]. -### Configuration: +[1]: https://github.com/moby/moby/tree/master/client -```toml +[2]: https://docs.docker.com/engine/api/v1.24/ + +## Configuration + +```toml @sample.conf # Read metrics about docker containers [[inputs.docker]] ## Docker Endpoint @@ -43,13 +47,30 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/) ## Timeout for docker list, info, and stats commands timeout = "5s" - ## Whether to report for each container per-device blkio (8:0, 8:1...) and - ## network (eth0, eth1, ...) stats or not + ## Whether to report for each container per-device blkio (8:0, 8:1...), + ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. + ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. + ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting + ## is honored. perdevice = true - ## Whether to report for each container total blkio and network stats or not + ## Specifies for which classes a per-device metric should be issued + ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) + ## Please note that this setting has no effect if 'perdevice' is set to 'true' + # perdevice_include = ["cpu"] + + ## Whether to report for each container total blkio and network stats or not. + ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. + ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting + ## is honored. total = false + ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. + ## Possible values are 'cpu', 'blkio' and 'network' + ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. + ## Please note that this setting has no effect if 'total' is set to 'false' + # total_include = ["cpu", "blkio", "network"] + ## docker labels to include and exclude as tags. Globs accepted. ## Note that an empty array for both will include all labels as tags docker_label_include = [] @@ -66,23 +87,29 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/) # insecure_skip_verify = false ``` -#### Environment Configuration +### Environment Configuration + +When using the `"ENV"` endpoint, the connection is configured using the [cli +Docker environment variables][3]. -When using the `"ENV"` endpoint, the connection is configured using the -[cli Docker environment variables](https://godoc.org/github.com/moby/moby/client#NewEnvClient). +[3]: https://godoc.org/github.com/moby/moby/client#NewEnvClient -#### Security +### Security -Giving telegraf access to the Docker daemon expands the [attack surface](https://docs.docker.com/engine/security/security/#docker-daemon-attack-surface) that could result in an attacker gaining root access to a machine. This is especially relevant if the telegraf configuration can be changed by untrusted users. +Giving telegraf access to the Docker daemon expands the [attack surface][4] that +could result in an attacker gaining root access to a machine. This is especially +relevant if the telegraf configuration can be changed by untrusted users. -#### Docker Daemon Permissions +[4]: https://docs.docker.com/engine/security/security/#docker-daemon-attack-surface + +### Docker Daemon Permissions Typically, telegraf must be given permission to access the docker daemon unix socket when using the default endpoint. This can be done by adding the `telegraf` unix user (created when installing a Telegraf package) to the `docker` unix group with the following command: -``` +```shell sudo usermod -aG docker telegraf ``` @@ -91,38 +118,57 @@ within the telegraf container. This can be done in the docker CLI by add the option `-v /var/run/docker.sock:/var/run/docker.sock` or adding the following lines to the telegraf container definition in a docker compose file: -``` +```yaml volumes: - /var/run/docker.sock:/var/run/docker.sock ``` -#### source tag +### source tag -Selecting the containers measurements can be tricky if you have many containers with the same name. -To alleviate this issue you can set the below value to `true` +Selecting the containers measurements can be tricky if you have many containers +with the same name. To alleviate this issue you can set the below value to +`true` ```toml source_tag = true ``` -This will cause all measurements to have the `source` tag be set to the first 12 characters of the container id. The first 12 characters is the common hostname for containers that have no explicit hostname set, as defined by docker. +This will cause all measurements to have the `source` tag be set to the first 12 +characters of the container id. The first 12 characters is the common hostname +for containers that have no explicit hostname set, as defined by docker. -#### Kubernetes Labels +### Kubernetes Labels Kubernetes may add many labels to your containers, if they are not needed you may prefer to exclude them: -``` + +```json docker_label_exclude = ["annotation.kubernetes*"] ``` -### Metrics: +### Docker-compose Labels + +Docker-compose will add labels to your containers. You can limit restrict labels +to selected ones, e.g. + +```json + docker_label_include = [ + "com.docker.compose.config-hash", + "com.docker.compose.container-number", + "com.docker.compose.oneoff", + "com.docker.compose.project", + "com.docker.compose.service", + ] +``` + +## Metrics - docker - tags: - unit - engine_host - server_version - + fields: + - fields: - n_used_file_descriptors - n_cpus - n_containers @@ -138,12 +184,12 @@ may prefer to exclude them: The `docker_data` and `docker_metadata` measurements are available only for some storage drivers such as devicemapper. -+ docker_data (deprecated see: `docker_devicemapper`) +- docker_data (deprecated see: `docker_devicemapper`) - tags: - unit - engine_host - server_version - + fields: + - fields: - available - total - used @@ -153,19 +199,20 @@ some storage drivers such as devicemapper. - unit - engine_host - server_version - + fields: + - fields: - available - total - used -The above measurements for the devicemapper storage driver can now be found in the new `docker_devicemapper` measurement +The above measurements for the devicemapper storage driver can now be found in +the new `docker_devicemapper` measurement - docker_devicemapper - tags: - engine_host - server_version - pool_name - + fields: + - fields: - pool_blocksize_bytes - data_space_used_bytes - data_space_total_bytes @@ -175,7 +222,7 @@ The above measurements for the devicemapper storage driver can now be found in t - metadata_space_available_bytes - thin_pool_minimum_free_space_bytes -+ docker_container_mem +- docker_container_mem - tags: - engine_host - server_version @@ -183,7 +230,7 @@ The above measurements for the devicemapper storage driver can now be found in t - container_name - container_status - container_version - + fields: + - fields: - total_pgmajfault - cache - mapped_file @@ -228,7 +275,7 @@ The above measurements for the devicemapper storage driver can now be found in t - container_status - container_version - cpu - + fields: + - fields: - throttling_periods - throttling_throttled_periods - throttling_throttled_time @@ -239,7 +286,7 @@ The above measurements for the devicemapper storage driver can now be found in t - usage_percent - container_id -+ docker_container_net +- docker_container_net - tags: - engine_host - server_version @@ -248,7 +295,7 @@ The above measurements for the devicemapper storage driver can now be found in t - container_status - container_version - network - + fields: + - fields: - rx_dropped - rx_bytes - rx_errors @@ -294,8 +341,8 @@ status if configured. - container_status - container_version - fields: - - health_status (string) - - failing_streak (integer) + - health_status (string) + - failing_streak (integer) - docker_container_status - tags: @@ -323,9 +370,9 @@ status if configured. - tasks_desired - tasks_running -### Example Output: +## Example Output -``` +```shell docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce n_containers=6i,n_containers_paused=0i,n_containers_running=1i,n_containers_stopped=5i,n_cpus=2i,n_goroutines=41i,n_images=2i,n_listener_events=0i,n_used_file_descriptors=27i 1524002041000000000 docker,engine_host=debian-stretch-docker,server_version=17.09.0-ce,unit=bytes memory_total=2101661696i 1524002041000000000 docker_container_mem,container_image=telegraf,container_name=zen_ritchie,container_status=running,container_version=unknown,engine_host=debian-stretch-docker,server_version=17.09.0-ce active_anon=8327168i,active_file=2314240i,cache=27402240i,container_id="adc4ba9593871bf2ab95f3ffde70d1b638b897bb225d21c2c9c84226a10a8cf4",hierarchical_memory_limit=9223372036854771712i,inactive_anon=0i,inactive_file=25088000i,limit=2101661696i,mapped_file=20582400i,max_usage=36646912i,pgfault=4193i,pgmajfault=214i,pgpgin=9243i,pgpgout=520i,rss=8327168i,rss_huge=0i,total_active_anon=8327168i,total_active_file=2314240i,total_cache=27402240i,total_inactive_anon=0i,total_inactive_file=25088000i,total_mapped_file=20582400i,total_pgfault=4193i,total_pgmajfault=214i,total_pgpgin=9243i,total_pgpgout=520i,total_rss=8327168i,total_rss_huge=0i,total_unevictable=0i,total_writeback=0i,unevictable=0i,usage=36528128i,usage_percent=0.4342225020025297,writeback=0i 1524002042000000000 diff --git a/plugins/inputs/docker/client.go b/plugins/inputs/docker/client.go index 3ea24ea742530..5c66b55d7581f 100644 --- a/plugins/inputs/docker/client.go +++ b/plugins/inputs/docker/client.go @@ -7,11 +7,11 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - docker "github.com/docker/docker/client" + dockerClient "github.com/docker/docker/client" ) var ( - version = "1.21" // 1.24 is when server first started returning its version + version = "1.24" // https://docs.docker.com/engine/api/ defaultHeaders = map[string]string{"User-Agent": "engine-api-cli-1.0"} ) @@ -23,10 +23,11 @@ type Client interface { ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) + Close() error } func NewEnvClient() (Client, error) { - client, err := docker.NewClientWithOpts(docker.FromEnv) + client, err := dockerClient.NewClientWithOpts(dockerClient.FromEnv) if err != nil { return nil, err } @@ -39,11 +40,11 @@ func NewClient(host string, tlsConfig *tls.Config) (Client, error) { } httpClient := &http.Client{Transport: transport} - client, err := docker.NewClientWithOpts( - docker.WithHTTPHeaders(defaultHeaders), - docker.WithHTTPClient(httpClient), - docker.WithVersion(version), - docker.WithHost(host)) + client, err := dockerClient.NewClientWithOpts( + dockerClient.WithHTTPHeaders(defaultHeaders), + dockerClient.WithHTTPClient(httpClient), + dockerClient.WithVersion(version), + dockerClient.WithHost(host)) if err != nil { return nil, err } @@ -52,7 +53,7 @@ func NewClient(host string, tlsConfig *tls.Config) (Client, error) { } type SocketClient struct { - client *docker.Client + client *dockerClient.Client } func (c *SocketClient) Info(ctx context.Context) (types.Info, error) { @@ -76,3 +77,6 @@ func (c *SocketClient) TaskList(ctx context.Context, options types.TaskListOptio func (c *SocketClient) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { return c.client.NodeList(ctx, options) } +func (c *SocketClient) Close() error { + return c.client.Close() +} diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index dafedacafb3f1..4ecc448e2b9a0 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -1,12 +1,13 @@ +//go:generate ../../../tools/readme_config_includer/generator package docker import ( "context" "crypto/tls" + _ "embed" "encoding/json" "fmt" "io" - "net/http" "regexp" "strconv" "strings" @@ -16,27 +17,35 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/internal/docker" + "github.com/influxdata/telegraf/internal/choice" + dockerint "github.com/influxdata/telegraf/internal/docker" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // Docker object type Docker struct { Endpoint string - ContainerNames []string // deprecated in 1.4; use container_name_include + ContainerNames []string `toml:"container_names" deprecated:"1.4.0;use 'container_name_include' instead"` GatherServices bool `toml:"gather_services"` - Timeout internal.Duration - PerDevice bool `toml:"perdevice"` - Total bool `toml:"total"` - TagEnvironment []string `toml:"tag_env"` - LabelInclude []string `toml:"docker_label_include"` - LabelExclude []string `toml:"docker_label_exclude"` + Timeout config.Duration + PerDevice bool `toml:"perdevice" deprecated:"1.18.0;use 'perdevice_include' instead"` + PerDeviceInclude []string `toml:"perdevice_include"` + Total bool `toml:"total" deprecated:"1.18.0;use 'total_include' instead"` + TotalInclude []string `toml:"total_include"` + TagEnvironment []string `toml:"tag_env"` + LabelInclude []string `toml:"docker_label_include"` + LabelExclude []string `toml:"docker_label_exclude"` ContainerInclude []string `toml:"container_name_include"` ContainerExclude []string `toml:"container_name_exclude"` @@ -54,7 +63,6 @@ type Docker struct { newClient func(string, *tls.Config) (Client, error) client Client - httpClient *http.Client engineHost string serverVersion string filtersCreated bool @@ -75,70 +83,47 @@ const ( ) var ( - sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`) - containerStates = []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"} - now = time.Now + sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`) + containerStates = []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"} + containerMetricClasses = []string{"cpu", "network", "blkio"} + now = time.Now ) -var sampleConfig = ` - ## Docker Endpoint - ## To use TCP, set endpoint = "tcp://[ip]:[port]" - ## To use environment variables (ie, docker-machine), set endpoint = "ENV" - endpoint = "unix:///var/run/docker.sock" - - ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) - gather_services = false - - ## Only collect metrics for these containers, collect all if empty - container_names = [] - - ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars - source_tag = false - - ## Containers to include and exclude. Globs accepted. - ## Note that an empty array for both will include all containers - container_name_include = [] - container_name_exclude = [] - - ## Container states to include and exclude. Globs accepted. - ## When empty only containers in the "running" state will be captured. - ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] - ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] - # container_state_include = [] - # container_state_exclude = [] - - ## Timeout for docker list, info, and stats commands - timeout = "5s" - - ## Whether to report for each container per-device blkio (8:0, 8:1...) and - ## network (eth0, eth1, ...) stats or not - perdevice = true - - ## Whether to report for each container total blkio and network stats or not - total = false - - ## Which environment variables should we use as a tag - ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] - - ## docker labels to include and exclude as tags. Globs accepted. - ## Note that an empty array for both will include all labels as tags - docker_label_include = [] - docker_label_exclude = [] - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - -// SampleConfig returns the default Docker TOML configuration. -func (d *Docker) SampleConfig() string { return sampleConfig } - -// Description the metrics returned. -func (d *Docker) Description() string { - return "Read metrics about docker containers" +func (*Docker) SampleConfig() string { + return sampleConfig +} + +func (d *Docker) Init() error { + err := choice.CheckSlice(d.PerDeviceInclude, containerMetricClasses) + if err != nil { + return fmt.Errorf("error validating 'perdevice_include' setting : %v", err) + } + + err = choice.CheckSlice(d.TotalInclude, containerMetricClasses) + if err != nil { + return fmt.Errorf("error validating 'total_include' setting : %v", err) + } + + // Temporary logic needed for backwards compatibility until 'perdevice' setting is removed. + if d.PerDevice { + if !choice.Contains("network", d.PerDeviceInclude) { + d.PerDeviceInclude = append(d.PerDeviceInclude, "network") + } + if !choice.Contains("blkio", d.PerDeviceInclude) { + d.PerDeviceInclude = append(d.PerDeviceInclude, "blkio") + } + } + + // Temporary logic needed for backwards compatibility until 'total' setting is removed. + if !d.Total { + if choice.Contains("cpu", d.TotalInclude) { + d.TotalInclude = []string{"cpu"} + } else { + d.TotalInclude = []string{} + } + } + + return nil } // Gather metrics from the docker server. @@ -151,6 +136,9 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { d.client = c } + // Close any idle connections in the end of gathering + defer d.client.Close() + // Create label filters if not already created if !d.filtersCreated { err := d.createLabelFilters() @@ -197,7 +185,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { opts := types.ContainerListOptions{ Filters: filterArgs, } - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(d.Timeout)) defer cancel() containers, err := d.client.ContainerList(ctx, opts) @@ -225,7 +213,7 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { } func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error { - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(d.Timeout)) defer cancel() services, err := d.client.ServiceList(ctx, types.ServiceListOptions{}) @@ -248,7 +236,7 @@ func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error { } running := map[string]int{} - tasksNoShutdown := map[string]int{} + tasksNoShutdown := map[string]uint64{} activeNodes := make(map[string]struct{}) for _, n := range nodes { @@ -302,7 +290,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { now := time.Now() // Get info from docker daemon - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(d.Timeout)) defer cancel() info, err := d.client.Info(ctx) @@ -350,7 +338,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { ) for _, rawData := range info.DriverStatus { - name := strings.ToLower(strings.Replace(rawData[0], " ", "_", -1)) + name := strings.ToLower(strings.ReplaceAll(rawData[0], " ", "_")) if name == "pool_name" { poolName = rawData[1] continue @@ -434,8 +422,7 @@ func (d *Docker) gatherContainer( var cname string for _, name := range container.Names { trimmedName := strings.TrimPrefix(name, "/") - match := d.containerFilter.Match(trimmedName) - if match { + if !strings.Contains(trimmedName, "/") { cname = trimmedName break } @@ -445,7 +432,11 @@ func (d *Docker) gatherContainer( return nil } - imageName, imageVersion := docker.ParseImage(container.Image) + if !d.containerFilter.Match(cname) { + return nil + } + + imageName, imageVersion := dockerint.ParseImage(container.Image) tags := map[string]string{ "engine_host": d.engineHost, @@ -459,7 +450,7 @@ func (d *Docker) gatherContainer( tags["source"] = hostnameFromID(container.ID) } - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(d.Timeout)) defer cancel() r, err := d.client.ContainerStats(ctx, container.ID, false) @@ -480,11 +471,6 @@ func (d *Docker) gatherContainer( } daemonOSType := r.OSType - // use common (printed at `docker ps`) name for container - if v.Name != "" { - tags["container_name"] = strings.TrimPrefix(v.Name, "/") - } - // Add labels to tags for k, label := range container.Labels { if d.labelFilter.Match(k) { @@ -502,7 +488,7 @@ func (d *Docker) gatherContainerInspect( daemonOSType string, v *types.StatsJSON, ) error { - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(d.Timeout)) defer cancel() info, err := d.client.ContainerInspect(ctx, container.ID) @@ -518,7 +504,7 @@ func (d *Docker) gatherContainerInspect( for _, envvar := range info.Config.Env { for _, configvar := range d.TagEnvironment { dockEnv := strings.SplitN(envvar, "=", 2) - //check for presence of tag in whitelist + // check for presence of tag in whitelist if len(dockEnv) == 2 && len(strings.TrimSpace(dockEnv[1])) != 0 && configvar == dockEnv[0] { tags[dockEnv[0]] = dockEnv[1] } @@ -565,18 +551,16 @@ func (d *Docker) gatherContainerInspect( } } - parseContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total, daemonOSType) + d.parseContainerStats(v, acc, tags, container.ID, daemonOSType) return nil } -func parseContainerStats( +func (d *Docker) parseContainerStats( stat *types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, id string, - perDevice bool, - total bool, daemonOSType string, ) { tm := stat.Read @@ -645,48 +629,52 @@ func parseContainerStats( acc.AddFields("docker_container_mem", memfields, tags, tm) - cpufields := map[string]interface{}{ - "usage_total": stat.CPUStats.CPUUsage.TotalUsage, - "usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode, - "usage_in_kernelmode": stat.CPUStats.CPUUsage.UsageInKernelmode, - "usage_system": stat.CPUStats.SystemUsage, - "throttling_periods": stat.CPUStats.ThrottlingData.Periods, - "throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods, - "throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime, - "container_id": id, - } + if choice.Contains("cpu", d.TotalInclude) { + cpufields := map[string]interface{}{ + "usage_total": stat.CPUStats.CPUUsage.TotalUsage, + "usage_in_usermode": stat.CPUStats.CPUUsage.UsageInUsermode, + "usage_in_kernelmode": stat.CPUStats.CPUUsage.UsageInKernelmode, + "usage_system": stat.CPUStats.SystemUsage, + "throttling_periods": stat.CPUStats.ThrottlingData.Periods, + "throttling_throttled_periods": stat.CPUStats.ThrottlingData.ThrottledPeriods, + "throttling_throttled_time": stat.CPUStats.ThrottlingData.ThrottledTime, + "container_id": id, + } + + if daemonOSType != "windows" { + previousCPU := stat.PreCPUStats.CPUUsage.TotalUsage + previousSystem := stat.PreCPUStats.SystemUsage + cpuPercent := CalculateCPUPercentUnix(previousCPU, previousSystem, stat) + cpufields["usage_percent"] = cpuPercent + } else { + cpuPercent := calculateCPUPercentWindows(stat) + cpufields["usage_percent"] = cpuPercent + } - if daemonOSType != "windows" { - previousCPU := stat.PreCPUStats.CPUUsage.TotalUsage - previousSystem := stat.PreCPUStats.SystemUsage - cpuPercent := CalculateCPUPercentUnix(previousCPU, previousSystem, stat) - cpufields["usage_percent"] = cpuPercent - } else { - cpuPercent := calculateCPUPercentWindows(stat) - cpufields["usage_percent"] = cpuPercent + cputags := copyTags(tags) + cputags["cpu"] = "cpu-total" + acc.AddFields("docker_container_cpu", cpufields, cputags, tm) } - cputags := copyTags(tags) - cputags["cpu"] = "cpu-total" - acc.AddFields("docker_container_cpu", cpufields, cputags, tm) - - // If we have OnlineCPUs field, then use it to restrict stats gathering to only Online CPUs - // (https://github.com/moby/moby/commit/115f91d7575d6de6c7781a96a082f144fd17e400) - var percpuusage []uint64 - if stat.CPUStats.OnlineCPUs > 0 { - percpuusage = stat.CPUStats.CPUUsage.PercpuUsage[:stat.CPUStats.OnlineCPUs] - } else { - percpuusage = stat.CPUStats.CPUUsage.PercpuUsage - } + if choice.Contains("cpu", d.PerDeviceInclude) && len(stat.CPUStats.CPUUsage.PercpuUsage) > 0 { + // If we have OnlineCPUs field, then use it to restrict stats gathering to only Online CPUs + // (https://github.com/moby/moby/commit/115f91d7575d6de6c7781a96a082f144fd17e400) + var percpuusage []uint64 + if stat.CPUStats.OnlineCPUs > 0 { + percpuusage = stat.CPUStats.CPUUsage.PercpuUsage[:stat.CPUStats.OnlineCPUs] + } else { + percpuusage = stat.CPUStats.CPUUsage.PercpuUsage + } - for i, percpu := range percpuusage { - percputags := copyTags(tags) - percputags["cpu"] = fmt.Sprintf("cpu%d", i) - fields := map[string]interface{}{ - "usage_total": percpu, - "container_id": id, + for i, percpu := range percpuusage { + percputags := copyTags(tags) + percputags["cpu"] = fmt.Sprintf("cpu%d", i) + fields := map[string]interface{}{ + "usage_total": percpu, + "container_id": id, + } + acc.AddFields("docker_container_cpu", fields, percputags, tm) } - acc.AddFields("docker_container_cpu", fields, percputags, tm) } totalNetworkStatMap := make(map[string]interface{}) @@ -703,12 +691,12 @@ func parseContainerStats( "container_id": id, } // Create a new network tag dictionary for the "network" tag - if perDevice { + if choice.Contains("network", d.PerDeviceInclude) { nettags := copyTags(tags) nettags["network"] = network acc.AddFields("docker_container_net", netfields, nettags, tm) } - if total { + if choice.Contains("network", d.TotalInclude) { for field, value := range netfields { if field == "container_id" { continue @@ -735,27 +723,18 @@ func parseContainerStats( } // totalNetworkStatMap could be empty if container is running with --net=host. - if total && len(totalNetworkStatMap) != 0 { + if choice.Contains("network", d.TotalInclude) && len(totalNetworkStatMap) != 0 { nettags := copyTags(tags) nettags["network"] = "total" totalNetworkStatMap["container_id"] = id acc.AddFields("docker_container_net", totalNetworkStatMap, nettags, tm) } - gatherBlockIOMetrics(stat, acc, tags, tm, id, perDevice, total) + d.gatherBlockIOMetrics(acc, stat, tags, tm, id) } -func gatherBlockIOMetrics( - stat *types.StatsJSON, - acc telegraf.Accumulator, - tags map[string]string, - tm time.Time, - id string, - perDevice bool, - total bool, -) { - blkioStats := stat.BlkioStats - // Make a map of devices to their block io stats +// Make a map of devices to their block io stats +func getDeviceStatMap(blkioStats types.BlkioStats) map[string]map[string]interface{} { deviceStatMap := make(map[string]map[string]interface{}) for _, metric := range blkioStats.IoServiceBytesRecursive { @@ -813,16 +792,30 @@ func gatherBlockIOMetrics( device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) deviceStatMap[device]["sectors_recursive"] = metric.Value } + return deviceStatMap +} + +func (d *Docker) gatherBlockIOMetrics( + acc telegraf.Accumulator, + stat *types.StatsJSON, + tags map[string]string, + tm time.Time, + id string, +) { + perDeviceBlkio := choice.Contains("blkio", d.PerDeviceInclude) + totalBlkio := choice.Contains("blkio", d.TotalInclude) + blkioStats := stat.BlkioStats + deviceStatMap := getDeviceStatMap(blkioStats) totalStatMap := make(map[string]interface{}) for device, fields := range deviceStatMap { fields["container_id"] = id - if perDevice { + if perDeviceBlkio { iotags := copyTags(tags) iotags["device"] = device acc.AddFields("docker_container_blkio", fields, iotags, tm) } - if total { + if totalBlkio { for field, value := range fields { if field == "container_id" { continue @@ -847,7 +840,7 @@ func gatherBlockIOMetrics( } } } - if total { + if totalBlkio { totalStatMap["container_id"] = id iotags := copyTags(tags) iotags["device"] = "total" @@ -863,15 +856,6 @@ func copyTags(in map[string]string) map[string]string { return out } -func sliceContains(in string, sl []string) bool { - for _, str := range sl { - if str == in { - return true - } - } - return false -} - // Parses the human-readable size string into the amount it represents. func parseSize(sizeStr string) (int64, error) { matches := sizeRegex.FindStringSubmatch(sizeStr) @@ -899,20 +883,20 @@ func (d *Docker) createContainerFilters() error { d.ContainerInclude = append(d.ContainerInclude, d.ContainerNames...) } - filter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude) + containerFilter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude) if err != nil { return err } - d.containerFilter = filter + d.containerFilter = containerFilter return nil } func (d *Docker) createLabelFilters() error { - filter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude) + labelFilter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude) if err != nil { return err } - d.labelFilter = filter + d.labelFilter = labelFilter return nil } @@ -920,11 +904,11 @@ func (d *Docker) createContainerStateFilters() error { if len(d.ContainerStateInclude) == 0 && len(d.ContainerStateExclude) == 0 { d.ContainerStateInclude = []string{"running"} } - filter, err := filter.NewIncludeExcludeFilter(d.ContainerStateInclude, d.ContainerStateExclude) + stateFilter, err := filter.NewIncludeExcludeFilter(d.ContainerStateInclude, d.ContainerStateExclude) if err != nil { return err } - d.stateFilter = filter + d.stateFilter = stateFilter return nil } @@ -944,12 +928,14 @@ func (d *Docker) getNewClient() (Client, error) { func init() { inputs.Add("docker", func() telegraf.Input { return &Docker{ - PerDevice: true, - Timeout: internal.Duration{Duration: time.Second * 5}, - Endpoint: defaultEndpoint, - newEnvClient: NewEnvClient, - newClient: NewClient, - filtersCreated: false, + PerDevice: true, + PerDeviceInclude: []string{"cpu"}, + TotalInclude: []string{"cpu", "blkio", "network"}, + Timeout: config.Duration(time.Second * 5), + Endpoint: defaultEndpoint, + newEnvClient: NewEnvClient, + newClient: NewClient, + filtersCreated: false, } }) } diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index d8700217c307d..2241758ac777a 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -3,7 +3,8 @@ package docker import ( "context" "crypto/tls" - "io/ioutil" + "io" + "reflect" "sort" "strings" "testing" @@ -11,9 +12,11 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) type MockClient struct { @@ -24,6 +27,7 @@ type MockClient struct { ServiceListF func(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) TaskListF func(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) NodeListF func(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) + CloseF func() error } func (c *MockClient) Info(ctx context.Context) (types.Info, error) { @@ -73,6 +77,10 @@ func (c *MockClient) NodeList( return c.NodeListF(ctx, options) } +func (c *MockClient) Close() error { + return c.CloseF() +} + var baseClient = MockClient{ InfoF: func(context.Context) (types.Info, error) { return info, nil @@ -95,9 +103,12 @@ var baseClient = MockClient{ NodeListF: func(context.Context, types.NodeListOptions) ([]swarm.Node, error) { return NodeList, nil }, + CloseF: func() error { + return nil + }, } -func newClient(host string, tlsConfig *tls.Config) (Client, error) { +func newClient(_ string, _ *tls.Config) (Client, error) { return &baseClient, nil } @@ -110,7 +121,12 @@ func TestDockerGatherContainerStats(t *testing.T) { "container_image": "redis/image", } - parseContainerStats(stats, &acc, tags, "123456789", true, true, "linux") + d := &Docker{ + Log: testutil.Logger{}, + PerDeviceInclude: containerMetricClasses, + TotalInclude: containerMetricClasses, + } + d.parseContainerStats(stats, &acc, tags, "123456789", "linux") // test docker_container_net measurement netfields := map[string]interface{}{ @@ -249,6 +265,162 @@ func TestDockerGatherContainerStats(t *testing.T) { acc.AssertDoesNotContainsTaggedFields(t, "docker_container_cpu", cpu3fields, cputags) } +func TestDockerMemoryExcludesCache(t *testing.T) { + var acc testutil.Accumulator + stats := testStats() + + tags := map[string]string{ + "container_name": "redis", + "container_image": "redis/image", + } + + d := &Docker{ + Log: testutil.Logger{}, + } + + delete(stats.MemoryStats.Stats, "cache") + delete(stats.MemoryStats.Stats, "inactive_file") + delete(stats.MemoryStats.Stats, "total_inactive_file") + + // set cgroup v2 cache value + stats.MemoryStats.Stats["inactive_file"] = 9 + + d.parseContainerStats(stats, &acc, tags, "123456789", "linux") + + // test docker_container_mem measurement + memfields := map[string]interface{}{ + "active_anon": uint64(0), + "active_file": uint64(1), + "container_id": "123456789", + "fail_count": uint64(1), + "hierarchical_memory_limit": uint64(0), + "inactive_anon": uint64(0), + "inactive_file": uint64(9), + "limit": uint64(2000), + "mapped_file": uint64(0), + "max_usage": uint64(1001), + "pgfault": uint64(2), + "pgmajfault": uint64(0), + "pgpgin": uint64(0), + "pgpgout": uint64(0), + "rss_huge": uint64(0), + "rss": uint64(0), + "total_active_anon": uint64(0), + "total_active_file": uint64(0), + "total_cache": uint64(0), + "total_inactive_anon": uint64(0), + "total_mapped_file": uint64(0), + "total_pgfault": uint64(0), + "total_pgmajfault": uint64(0), + "total_pgpgin": uint64(4), + "total_pgpgout": uint64(0), + "total_rss_huge": uint64(444), + "total_rss": uint64(44), + "total_unevictable": uint64(0), + "total_writeback": uint64(55), + "unevictable": uint64(0), + "usage_percent": float64(55.1), // 1102 / 2000 + "usage": uint64(1102), + "writeback": uint64(0), + } + + acc.AssertContainsTaggedFields(t, "docker_container_mem", memfields, tags) + acc.ClearMetrics() + + // set cgroup v1 cache value (has priority over cgroups v2) + stats.MemoryStats.Stats["total_inactive_file"] = 7 + + d.parseContainerStats(stats, &acc, tags, "123456789", "linux") + + // test docker_container_mem measurement + memfields = map[string]interface{}{ + "active_anon": uint64(0), + "active_file": uint64(1), + // "cache": uint64(0), + "container_id": "123456789", + "fail_count": uint64(1), + "hierarchical_memory_limit": uint64(0), + "inactive_anon": uint64(0), + "inactive_file": uint64(9), + "limit": uint64(2000), + "mapped_file": uint64(0), + "max_usage": uint64(1001), + "pgfault": uint64(2), + "pgmajfault": uint64(0), + "pgpgin": uint64(0), + "pgpgout": uint64(0), + "rss_huge": uint64(0), + "rss": uint64(0), + "total_active_anon": uint64(0), + "total_active_file": uint64(0), + "total_cache": uint64(0), + "total_inactive_anon": uint64(0), + "total_inactive_file": uint64(7), + "total_mapped_file": uint64(0), + "total_pgfault": uint64(0), + "total_pgmajfault": uint64(0), + "total_pgpgin": uint64(4), + "total_pgpgout": uint64(0), + "total_rss_huge": uint64(444), + "total_rss": uint64(44), + "total_unevictable": uint64(0), + "total_writeback": uint64(55), + "unevictable": uint64(0), + "usage_percent": float64(55.2), // 1104 / 2000 + "usage": uint64(1104), + "writeback": uint64(0), + } + + acc.AssertContainsTaggedFields(t, "docker_container_mem", memfields, tags) + acc.ClearMetrics() + + // set Docker 19.03 and older cache value (has priority over cgroups v1 and v2) + stats.MemoryStats.Stats["cache"] = 16 + + d.parseContainerStats(stats, &acc, tags, "123456789", "linux") + + // test docker_container_mem measurement + memfields = map[string]interface{}{ + "active_anon": uint64(0), + "active_file": uint64(1), + "cache": uint64(16), + "container_id": "123456789", + "fail_count": uint64(1), + "hierarchical_memory_limit": uint64(0), + "inactive_anon": uint64(0), + "inactive_file": uint64(9), + "limit": uint64(2000), + "mapped_file": uint64(0), + "max_usage": uint64(1001), + "pgfault": uint64(2), + "pgmajfault": uint64(0), + "pgpgin": uint64(0), + "pgpgout": uint64(0), + "rss_huge": uint64(0), + "rss": uint64(0), + "total_active_anon": uint64(0), + "total_active_file": uint64(0), + "total_cache": uint64(0), + "total_inactive_anon": uint64(0), + "total_inactive_file": uint64(7), + "total_mapped_file": uint64(0), + "total_pgfault": uint64(0), + "total_pgmajfault": uint64(0), + "total_pgpgin": uint64(4), + "total_pgpgout": uint64(0), + "total_rss_huge": uint64(444), + "total_rss": uint64(44), + "total_unevictable": uint64(0), + "total_writeback": uint64(55), + "unevictable": uint64(0), + "usage_percent": float64(54.75), // 1095 / 2000 + "usage": uint64(1095), + "writeback": uint64(0), + } + + acc.AssertContainsTaggedFields(t, "docker_container_mem", memfields, tags) +} + func TestDocker_WindowsMemoryContainerStats(t *testing.T) { var acc testutil.Accumulator @@ -277,6 +449,9 @@ func TestDocker_WindowsMemoryContainerStats(t *testing.T) { NodeListF: func(context.Context, types.NodeListOptions) ([]swarm.Node, error) { return NodeList, nil }, + CloseF: func() error { + return nil + }, }, nil }, } @@ -396,6 +571,8 @@ func TestContainerLabels(t *testing.T) { newClient: newClientFunc, LabelInclude: tt.include, LabelExclude: tt.exclude, + Total: true, + TotalInclude: []string{"cpu"}, } err := d.Gather(&acc) @@ -751,6 +928,9 @@ func TestDockerGatherInfo(t *testing.T) { newClient: newClient, TagEnvironment: []string{"ENVVAR1", "ENVVAR2", "ENVVAR3", "ENVVAR5", "ENVVAR6", "ENVVAR7", "ENVVAR8", "ENVVAR9"}, + PerDeviceInclude: []string{"cpu", "network", "blkio"}, + Total: true, + TotalInclude: []string{""}, } err := acc.GatherError(d.Gather) @@ -904,7 +1084,7 @@ func TestDockerGatherSwarmInfo(t *testing.T) { err := acc.GatherError(d.Gather) require.NoError(t, err) - d.gatherSwarmInfo(&acc) + require.NoError(t, d.gatherSwarmInfo(&acc)) // test docker_container_net measurement acc.AssertContainsTaggedFields(t, @@ -924,7 +1104,7 @@ func TestDockerGatherSwarmInfo(t *testing.T) { "docker_swarm", map[string]interface{}{ "tasks_running": int(1), - "tasks_desired": int(1), + "tasks_desired": uint64(1), }, map[string]string{ "service_id": "qolkls9g5iasdiuihcyz9rn3", @@ -1036,7 +1216,7 @@ func TestContainerName(t *testing.T) { } client.ContainerStatsF = func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { return types.ContainerStats{ - Body: ioutil.NopCloser(strings.NewReader(`{"name": "logspout"}`)), + Body: io.NopCloser(strings.NewReader(`{"name": "logspout"}`)), }, nil } return &client, nil @@ -1056,7 +1236,7 @@ func TestContainerName(t *testing.T) { } client.ContainerStatsF = func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { return types.ContainerStats{ - Body: ioutil.NopCloser(strings.NewReader(`{}`)), + Body: io.NopCloser(strings.NewReader(`{}`)), }, nil } return &client, nil @@ -1115,5 +1295,245 @@ func TestHostnameFromID(t *testing.T) { } }) } +} +func Test_parseContainerStatsPerDeviceAndTotal(t *testing.T) { + type args struct { + stat *types.StatsJSON + tags map[string]string + id string + perDeviceInclude []string + totalInclude []string + daemonOSType string + } + + var ( + testDate = time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC) + metricCPUTotal = testutil.MustMetric( + "docker_container_cpu", + map[string]string{ + "cpu": "cpu-total", + }, + map[string]interface{}{}, + testDate) + + metricCPU0 = testutil.MustMetric( + "docker_container_cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{}, + testDate) + metricCPU1 = testutil.MustMetric( + "docker_container_cpu", + map[string]string{ + "cpu": "cpu1", + }, + map[string]interface{}{}, + testDate) + + metricNetworkTotal = testutil.MustMetric( + "docker_container_net", + map[string]string{ + "network": "total", + }, + map[string]interface{}{}, + testDate) + + metricNetworkEth0 = testutil.MustMetric( + "docker_container_net", + map[string]string{ + "network": "eth0", + }, + map[string]interface{}{}, + testDate) + + metricNetworkEth1 = testutil.MustMetric( + "docker_container_net", + map[string]string{ + "network": "eth0", + }, + map[string]interface{}{}, + testDate) + metricBlkioTotal = testutil.MustMetric( + "docker_container_blkio", + map[string]string{ + "device": "total", + }, + map[string]interface{}{}, + testDate) + metricBlkio6_0 = testutil.MustMetric( + "docker_container_blkio", + map[string]string{ + "device": "6:0", + }, + map[string]interface{}{}, + testDate) + metricBlkio6_1 = testutil.MustMetric( + "docker_container_blkio", + map[string]string{ + "device": "6:1", + }, + map[string]interface{}{}, + testDate) + ) + stats := testStats() + tests := []struct { + name string + args args + expected []telegraf.Metric + }{ + { + name: "Per device and total metrics enabled", + args: args{ + stat: stats, + perDeviceInclude: containerMetricClasses, + totalInclude: containerMetricClasses, + }, + expected: []telegraf.Metric{ + metricCPUTotal, metricCPU0, metricCPU1, + metricNetworkTotal, metricNetworkEth0, metricNetworkEth1, + metricBlkioTotal, metricBlkio6_0, metricBlkio6_1, + }, + }, + { + name: "Per device metrics enabled", + args: args{ + stat: stats, + perDeviceInclude: containerMetricClasses, + totalInclude: []string{}, + }, + expected: []telegraf.Metric{ + metricCPU0, metricCPU1, + metricNetworkEth0, metricNetworkEth1, + metricBlkio6_0, metricBlkio6_1, + }, + }, + { + name: "Total metrics enabled", + args: args{ + stat: stats, + perDeviceInclude: []string{}, + totalInclude: containerMetricClasses, + }, + expected: []telegraf.Metric{metricCPUTotal, metricNetworkTotal, metricBlkioTotal}, + }, + { + name: "Per device and total metrics disabled", + args: args{ + stat: stats, + perDeviceInclude: []string{}, + totalInclude: []string{}, + }, + expected: []telegraf.Metric{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + d := &Docker{ + Log: testutil.Logger{}, + PerDeviceInclude: tt.args.perDeviceInclude, + TotalInclude: tt.args.totalInclude, + } + d.parseContainerStats(tt.args.stat, &acc, tt.args.tags, tt.args.id, tt.args.daemonOSType) + + actual := FilterMetrics(acc.GetTelegrafMetrics(), func(m telegraf.Metric) bool { + return choice.Contains(m.Name(), + []string{"docker_container_cpu", "docker_container_net", "docker_container_blkio"}) + }) + testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.OnlyTags(), testutil.SortMetrics()) + }) + } +} + +func TestDocker_Init(t *testing.T) { + type fields struct { + PerDevice bool + PerDeviceInclude []string + Total bool + TotalInclude []string + } + tests := []struct { + name string + fields fields + wantErr bool + wantPerDeviceInclude []string + wantTotalInclude []string + }{ + { + "Unsupported perdevice_include setting", + fields{ + PerDevice: false, + PerDeviceInclude: []string{"nonExistentClass"}, + Total: false, + TotalInclude: []string{"cpu"}, + }, + true, + []string{}, + []string{}, + }, + { + "Unsupported total_include setting", + fields{ + PerDevice: false, + PerDeviceInclude: []string{"cpu"}, + Total: false, + TotalInclude: []string{"nonExistentClass"}, + }, + true, + []string{}, + []string{}, + }, + { + "PerDevice true adds network and blkio", + fields{ + PerDevice: true, + PerDeviceInclude: []string{"cpu"}, + Total: true, + TotalInclude: []string{"cpu"}, + }, + false, + []string{"cpu", "network", "blkio"}, + []string{"cpu"}, + }, + { + "Total false removes network and blkio", + fields{ + PerDevice: false, + PerDeviceInclude: []string{"cpu"}, + Total: false, + TotalInclude: []string{"cpu", "network", "blkio"}, + }, + false, + []string{"cpu"}, + []string{"cpu"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &Docker{ + Log: testutil.Logger{}, + PerDevice: tt.fields.PerDevice, + PerDeviceInclude: tt.fields.PerDeviceInclude, + Total: tt.fields.Total, + TotalInclude: tt.fields.TotalInclude, + } + err := d.Init() + if (err != nil) != tt.wantErr { + t.Errorf("Init() error = %v, wantErr %v", err, tt.wantErr) + } + + if err == nil { + if !reflect.DeepEqual(d.PerDeviceInclude, tt.wantPerDeviceInclude) { + t.Errorf("Perdevice include: got '%v', want '%v'", d.PerDeviceInclude, tt.wantPerDeviceInclude) + } + + if !reflect.DeepEqual(d.TotalInclude, tt.wantTotalInclude) { + t.Errorf("Total include: got '%v', want '%v'", d.TotalInclude, tt.wantTotalInclude) + } + } + }) + } } diff --git a/plugins/inputs/docker/docker_testdata.go b/plugins/inputs/docker/docker_testdata.go index bde0bd312c788..826f34f6703d4 100644 --- a/plugins/inputs/docker/docker_testdata.go +++ b/plugins/inputs/docker/docker_testdata.go @@ -2,7 +2,7 @@ package docker import ( "fmt" - "io/ioutil" + "io" "strings" "time" @@ -344,7 +344,7 @@ func containerStats(s string) types.ContainerStats { }, "read": "2016-02-24T11:42:27.472459608-05:00" }`, name) - stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat)) + stat.Body = io.NopCloser(strings.NewReader(jsonStat)) return stat } @@ -488,7 +488,7 @@ func containerStatsWindows() types.ContainerStats { }, "name":"/gt_test_iis", }` - stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat)) + stat.Body = io.NopCloser(strings.NewReader(jsonStat)) return stat } diff --git a/plugins/inputs/docker/sample.conf b/plugins/inputs/docker/sample.conf new file mode 100644 index 0000000000000..d9f1e53339863 --- /dev/null +++ b/plugins/inputs/docker/sample.conf @@ -0,0 +1,72 @@ +# Read metrics about docker containers +[[inputs.docker]] + ## Docker Endpoint + ## To use TCP, set endpoint = "tcp://[ip]:[port]" + ## To use environment variables (ie, docker-machine), set endpoint = "ENV" + endpoint = "unix:///var/run/docker.sock" + + ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) + ## Note: configure this in one of the manager nodes in a Swarm cluster. + ## configuring in multiple Swarm managers results in duplication of metrics. + gather_services = false + + ## Only collect metrics for these containers. Values will be appended to + ## container_name_include. + ## Deprecated (1.4.0), use container_name_include + container_names = [] + + ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars + source_tag = false + + ## Containers to include and exclude. Collect all if empty. Globs accepted. + container_name_include = [] + container_name_exclude = [] + + ## Container states to include and exclude. Globs accepted. + ## When empty only containers in the "running" state will be captured. + ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] + ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] + # container_state_include = [] + # container_state_exclude = [] + + ## Timeout for docker list, info, and stats commands + timeout = "5s" + + ## Whether to report for each container per-device blkio (8:0, 8:1...), + ## network (eth0, eth1, ...) and cpu (cpu0, cpu1, ...) stats or not. + ## Usage of this setting is discouraged since it will be deprecated in favor of 'perdevice_include'. + ## Default value is 'true' for backwards compatibility, please set it to 'false' so that 'perdevice_include' setting + ## is honored. + perdevice = true + + ## Specifies for which classes a per-device metric should be issued + ## Possible values are 'cpu' (cpu0, cpu1, ...), 'blkio' (8:0, 8:1, ...) and 'network' (eth0, eth1, ...) + ## Please note that this setting has no effect if 'perdevice' is set to 'true' + # perdevice_include = ["cpu"] + + ## Whether to report for each container total blkio and network stats or not. + ## Usage of this setting is discouraged since it will be deprecated in favor of 'total_include'. + ## Default value is 'false' for backwards compatibility, please set it to 'true' so that 'total_include' setting + ## is honored. + total = false + + ## Specifies for which classes a total metric should be issued. Total is an aggregated of the 'perdevice' values. + ## Possible values are 'cpu', 'blkio' and 'network' + ## Total 'cpu' is reported directly by Docker daemon, and 'network' and 'blkio' totals are aggregated by this plugin. + ## Please note that this setting has no effect if 'total' is set to 'false' + # total_include = ["cpu", "blkio", "network"] + + ## docker labels to include and exclude as tags. Globs accepted. + ## Note that an empty array for both will include all labels as tags + docker_label_include = [] + docker_label_exclude = [] + + ## Which environment variables should we use as a tag + tag_env = ["JAVA_HOME", "HEAP_SIZE"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/docker/stats_helpers.go b/plugins/inputs/docker/stats_helpers.go index 93ea2f2196baf..e5e21ee783154 100644 --- a/plugins/inputs/docker/stats_helpers.go +++ b/plugins/inputs/docker/stats_helpers.go @@ -1,4 +1,4 @@ -// Helper functions copied from +// Package docker contains few helper functions copied from // https://github.com/docker/cli/blob/master/cli/command/container/stats_helpers.go package docker @@ -40,9 +40,28 @@ func calculateCPUPercentWindows(v *types.StatsJSON) float64 { } // CalculateMemUsageUnixNoCache calculate memory usage of the container. -// Page cache is intentionally excluded to avoid misinterpretation of the output. +// Cache is intentionally excluded to avoid misinterpretation of the output. +// +// On Docker 19.03 and older, the result is `mem.Usage - mem.Stats["cache"]`. +// On new docker with cgroup v1 host, the result is `mem.Usage - mem.Stats["total_inactive_file"]`. +// On new docker with cgroup v2 host, the result is `mem.Usage - mem.Stats["inactive_file"]`. +// +// This definition is designed to be consistent with past values and the latest docker CLI +// * https://github.com/docker/cli/blob/6e2838e18645e06f3e4b6c5143898ccc44063e3b/cli/command/container/stats_helpers.go#L239 func CalculateMemUsageUnixNoCache(mem types.MemoryStats) float64 { - return float64(mem.Usage - mem.Stats["cache"]) + // Docker 19.03 and older + if v, isOldDocker := mem.Stats["cache"]; isOldDocker && v < mem.Usage { + return float64(mem.Usage - v) + } + // cgroup v1 + if v, isCgroup1 := mem.Stats["total_inactive_file"]; isCgroup1 && v < mem.Usage { + return float64(mem.Usage - v) + } + // cgroup v2 + if v := mem.Stats["inactive_file"]; v < mem.Usage { + return float64(mem.Usage - v) + } + return float64(mem.Usage) } func CalculateMemPercentUnixNoCache(limit float64, usedNoCache float64) float64 { diff --git a/plugins/inputs/docker_log/README.md b/plugins/inputs/docker_log/README.md index d2f0dc6144ff9..c65bb3e6d8b96 100644 --- a/plugins/inputs/docker_log/README.md +++ b/plugins/inputs/docker_log/README.md @@ -12,9 +12,10 @@ The docker plugin uses the [Official Docker Client][] to gather logs from the [Official Docker Client]: https://github.com/moby/moby/tree/master/client [Engine API]: https://docs.docker.com/engine/api/v1.24/ -### Configuration +## Configuration -```toml +```toml @sample.conf +# Read logging output from the Docker engine [[inputs.docker_log]] ## Docker Endpoint ## To use TCP, set endpoint = "tcp://[ip]:[port]" @@ -54,25 +55,27 @@ The docker plugin uses the [Official Docker Client][] to gather logs from the # insecure_skip_verify = false ``` -#### Environment Configuration +### Environment Configuration When using the `"ENV"` endpoint, the connection is configured using the [CLI Docker environment variables][env] [env]: https://godoc.org/github.com/moby/moby/client#NewEnvClient -### source tag +## source tag -Selecting the containers can be tricky if you have many containers with the same name. -To alleviate this issue you can set the below value to `true` +Selecting the containers can be tricky if you have many containers with the same +name. To alleviate this issue you can set the below value to `true` ```toml source_tag = true ``` -This will cause all data points to have the `source` tag be set to the first 12 characters of the container id. The first 12 characters is the common hostname for containers that have no explicit hostname set, as defined by docker. +This will cause all data points to have the `source` tag be set to the first 12 +characters of the container id. The first 12 characters is the common hostname +for containers that have no explicit hostname set, as defined by docker. -### Metrics +## Metrics - docker_log - tags: @@ -85,9 +88,9 @@ This will cause all data points to have the `source` tag be set to the first 12 - container_id - message -### Example Output +## Example Output -``` +```shell docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! [agent] Config: Interval:10s, Quiet:false, Hostname:\"371ee5d3e587\", Flush Interval:10s" 1560913872000000000 docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Tags enabled: host=371ee5d3e587" 1560913872000000000 docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Loaded outputs: file" 1560913872000000000 diff --git a/plugins/inputs/docker_log/docker_log.go b/plugins/inputs/docker_log/docker_log.go index 27462ec5a66e7..14441029d17e4 100644 --- a/plugins/inputs/docker_log/docker_log.go +++ b/plugins/inputs/docker_log/docker_log.go @@ -1,3 +1,4 @@ +//go:generate ../../../tools/readme_config_includer/generator package docker_log import ( @@ -5,6 +6,7 @@ import ( "bytes" "context" "crypto/tls" + _ "embed" "fmt" "io" "strings" @@ -15,60 +17,21 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/pkg/stdcopy" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/docker" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) -var sampleConfig = ` - ## Docker Endpoint - ## To use TCP, set endpoint = "tcp://[ip]:[port]" - ## To use environment variables (ie, docker-machine), set endpoint = "ENV" - # endpoint = "unix:///var/run/docker.sock" - - ## When true, container logs are read from the beginning; otherwise - ## reading begins at the end of the log. - # from_beginning = false - - ## Timeout for Docker API calls. - # timeout = "5s" - - ## Containers to include and exclude. Globs accepted. - ## Note that an empty array for both will include all containers - # container_name_include = [] - # container_name_exclude = [] - - ## Container states to include and exclude. Globs accepted. - ## When empty only containers in the "running" state will be captured. - # container_state_include = [] - # container_state_exclude = [] - - ## docker labels to include and exclude as tags. Globs accepted. - ## Note that an empty array for both will include all labels as tags - # docker_label_include = [] - # docker_label_exclude = [] - - ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars - source_tag = false - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string const ( defaultEndpoint = "unix:///var/run/docker.sock" - - // Maximum bytes of a log line before it will be split, size is mirroring - // docker code: - // https://github.com/moby/moby/blob/master/daemon/logger/copier.go#L21 - maxLineBytes = 16 * 1024 ) var ( @@ -78,16 +41,16 @@ var ( ) type DockerLogs struct { - Endpoint string `toml:"endpoint"` - FromBeginning bool `toml:"from_beginning"` - Timeout internal.Duration `toml:"timeout"` - LabelInclude []string `toml:"docker_label_include"` - LabelExclude []string `toml:"docker_label_exclude"` - ContainerInclude []string `toml:"container_name_include"` - ContainerExclude []string `toml:"container_name_exclude"` - ContainerStateInclude []string `toml:"container_state_include"` - ContainerStateExclude []string `toml:"container_state_exclude"` - IncludeSourceTag bool `toml:"source_tag"` + Endpoint string `toml:"endpoint"` + FromBeginning bool `toml:"from_beginning"` + Timeout config.Duration `toml:"timeout"` + LabelInclude []string `toml:"docker_label_include"` + LabelExclude []string `toml:"docker_label_exclude"` + ContainerInclude []string `toml:"container_name_include"` + ContainerExclude []string `toml:"container_name_exclude"` + ContainerStateInclude []string `toml:"container_state_include"` + ContainerStateExclude []string `toml:"container_state_exclude"` + IncludeSourceTag bool `toml:"source_tag"` tlsint.ClientConfig @@ -104,11 +67,7 @@ type DockerLogs struct { containerList map[string]context.CancelFunc } -func (d *DockerLogs) Description() string { - return "Read logging output from the Docker engine" -} - -func (d *DockerLogs) SampleConfig() string { +func (*DockerLogs) SampleConfig() string { return sampleConfig } @@ -160,18 +119,16 @@ func (d *DockerLogs) Init() error { return nil } -func (d *DockerLogs) addToContainerList(containerID string, cancel context.CancelFunc) error { +func (d *DockerLogs) addToContainerList(containerID string, cancel context.CancelFunc) { d.mu.Lock() defer d.mu.Unlock() d.containerList[containerID] = cancel - return nil } -func (d *DockerLogs) removeFromContainerList(containerID string) error { +func (d *DockerLogs) removeFromContainerList(containerID string) { d.mu.Lock() defer d.mu.Unlock() delete(d.containerList, containerID) - return nil } func (d *DockerLogs) containerInContainerList(containerID string) bool { @@ -181,13 +138,12 @@ func (d *DockerLogs) containerInContainerList(containerID string) bool { return ok } -func (d *DockerLogs) cancelTails() error { +func (d *DockerLogs) cancelTails() { d.mu.Lock() defer d.mu.Unlock() for _, cancel := range d.containerList { cancel() } - return nil } func (d *DockerLogs) matchedContainerName(names []string) string { @@ -207,7 +163,7 @@ func (d *DockerLogs) Gather(acc telegraf.Accumulator) error { ctx := context.Background() acc.SetPrecision(time.Nanosecond) - ctx, cancel := context.WithTimeout(ctx, d.Timeout.Duration) + ctx, cancel := context.WithTimeout(ctx, time.Duration(d.Timeout)) defer cancel() containers, err := d.client.ContainerList(ctx, d.opts) if err != nil { @@ -243,7 +199,7 @@ func (d *DockerLogs) Gather(acc telegraf.Accumulator) error { } func (d *DockerLogs) hasTTY(ctx context.Context, container types.Container) (bool, error) { - ctx, cancel := context.WithTimeout(ctx, d.Timeout.Duration) + ctx, cancel := context.WithTimeout(ctx, time.Duration(d.Timeout)) defer cancel() c, err := d.client.ContainerInspect(ctx, container.ID) if err != nil { @@ -308,16 +264,14 @@ func (d *DockerLogs) tailContainerLogs( // multiplexed. if hasTTY { return tailStream(acc, tags, container.ID, logReader, "tty") - } else { - return tailMultiplexed(acc, tags, container.ID, logReader) } + return tailMultiplexed(acc, tags, container.ID, logReader) } func parseLine(line []byte) (time.Time, string, error) { parts := bytes.SplitN(line, []byte(" "), 2) - switch len(parts) { - case 1: + if len(parts) == 1 { parts = append(parts, []byte("")) } @@ -407,8 +361,11 @@ func tailMultiplexed( }() _, err := stdcopy.StdCopy(outWriter, errWriter, src) + //nolint:errcheck,revive // we cannot do anything if the closing fails outWriter.Close() + //nolint:errcheck,revive // we cannot do anything if the closing fails errWriter.Close() + //nolint:errcheck,revive // we cannot do anything if the closing fails src.Close() wg.Wait() return err @@ -427,20 +384,20 @@ func (d *DockerLogs) Stop() { // Following few functions have been inherited from telegraf docker input plugin func (d *DockerLogs) createContainerFilters() error { - filter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude) + containerFilter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude) if err != nil { return err } - d.containerFilter = filter + d.containerFilter = containerFilter return nil } func (d *DockerLogs) createLabelFilters() error { - filter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude) + labelFilter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude) if err != nil { return err } - d.labelFilter = filter + d.labelFilter = labelFilter return nil } @@ -448,18 +405,18 @@ func (d *DockerLogs) createContainerStateFilters() error { if len(d.ContainerStateInclude) == 0 && len(d.ContainerStateExclude) == 0 { d.ContainerStateInclude = []string{"running"} } - filter, err := filter.NewIncludeExcludeFilter(d.ContainerStateInclude, d.ContainerStateExclude) + stateFilter, err := filter.NewIncludeExcludeFilter(d.ContainerStateInclude, d.ContainerStateExclude) if err != nil { return err } - d.stateFilter = filter + d.stateFilter = stateFilter return nil } func init() { inputs.Add("docker_log", func() telegraf.Input { return &DockerLogs{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), Endpoint: defaultEndpoint, newEnvClient: NewEnvClient, newClient: NewClient, diff --git a/plugins/inputs/docker_log/docker_log_test.go b/plugins/inputs/docker_log/docker_log_test.go index c8903c9d8ec28..49a73ebe9f1bb 100644 --- a/plugins/inputs/docker_log/docker_log_test.go +++ b/plugins/inputs/docker_log/docker_log_test.go @@ -12,7 +12,7 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/pkg/stdcopy" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -138,8 +138,8 @@ func Test(t *testing.T) { ContainerLogsF: func(ctx context.Context, containerID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { var buf bytes.Buffer w := stdcopy.NewStdWriter(&buf, stdcopy.Stdout) - w.Write([]byte("2020-04-28T18:42:16.432691200Z hello from stdout")) - return &Response{Reader: &buf}, nil + _, err := w.Write([]byte("2020-04-28T18:42:16.432691200Z hello from stdout")) + return &Response{Reader: &buf}, err }, }, expected: []telegraf.Metric{ @@ -165,7 +165,7 @@ func Test(t *testing.T) { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator plugin := &DockerLogs{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), newClient: func(string, *tls.Config) (Client, error) { return tt.client, nil }, containerList: make(map[string]context.CancelFunc), IncludeSourceTag: true, diff --git a/plugins/inputs/docker_log/sample.conf b/plugins/inputs/docker_log/sample.conf new file mode 100644 index 0000000000000..a3bdcd6c377ff --- /dev/null +++ b/plugins/inputs/docker_log/sample.conf @@ -0,0 +1,38 @@ +# Read logging output from the Docker engine +[[inputs.docker_log]] + ## Docker Endpoint + ## To use TCP, set endpoint = "tcp://[ip]:[port]" + ## To use environment variables (ie, docker-machine), set endpoint = "ENV" + # endpoint = "unix:///var/run/docker.sock" + + ## When true, container logs are read from the beginning; otherwise + ## reading begins at the end of the log. + # from_beginning = false + + ## Timeout for Docker API calls. + # timeout = "5s" + + ## Containers to include and exclude. Globs accepted. + ## Note that an empty array for both will include all containers + # container_name_include = [] + # container_name_exclude = [] + + ## Container states to include and exclude. Globs accepted. + ## When empty only containers in the "running" state will be captured. + # container_state_include = [] + # container_state_exclude = [] + + ## docker labels to include and exclude as tags. Globs accepted. + ## Note that an empty array for both will include all labels as tags + # docker_label_include = [] + # docker_label_exclude = [] + + ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars + source_tag = false + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/dovecot/README.md b/plugins/inputs/dovecot/README.md index 3b6129488dae3..5bb514b625057 100644 --- a/plugins/inputs/dovecot/README.md +++ b/plugins/inputs/dovecot/README.md @@ -6,64 +6,66 @@ metrics on configured domains. When using Dovecot v2.3 you are still able to use this protocol by following the [upgrading steps][upgrading]. -### Configuration: +## Configuration -```toml +```toml @sample.conf # Read metrics about dovecot servers [[inputs.dovecot]] ## specify dovecot servers via an address:port list ## e.g. ## localhost:24242 + ## or as an UDS socket + ## e.g. + ## /var/run/dovecot/old-stats ## ## If no servers are specified, then localhost is used as the host. servers = ["localhost:24242"] ## Type is one of "user", "domain", "ip", or "global" type = "global" - + ## Wildcard matches like "*.com". An empty string "" is same as "*" ## If type = "ip" filters should be filters = [""] ``` -### Metrics: +## Metrics - dovecot - tags: - - server (hostname) - - type (query type) - - ip (ip addr) - - user (username) - - domain (domain name) + - server (hostname) + - type (query type) + - ip (ip addr) + - user (username) + - domain (domain name) - fields: - - reset_timestamp (string) - - last_update (string) - - num_logins (integer) - - num_cmds (integer) - - num_connected_sessions (integer) - - user_cpu (float) - - sys_cpu (float) - - clock_time (float) - - min_faults (integer) - - maj_faults (integer) - - vol_cs (integer) - - invol_cs (integer) - - disk_input (integer) - - disk_output (integer) - - read_count (integer) - - read_bytes (integer) - - write_count (integer) - - write_bytes (integer) - - mail_lookup_path (integer) - - mail_lookup_attr (integer) - - mail_read_count (integer) - - mail_read_bytes (integer) - - mail_cache_hits (integer) + - reset_timestamp (string) + - last_update (string) + - num_logins (integer) + - num_cmds (integer) + - num_connected_sessions (integer) + - user_cpu (float) + - sys_cpu (float) + - clock_time (float) + - min_faults (integer) + - maj_faults (integer) + - vol_cs (integer) + - invol_cs (integer) + - disk_input (integer) + - disk_output (integer) + - read_count (integer) + - read_bytes (integer) + - write_count (integer) + - write_bytes (integer) + - mail_lookup_path (integer) + - mail_lookup_attr (integer) + - mail_read_count (integer) + - mail_read_bytes (integer) + - mail_cache_hits (integer) +## Example Output -### Example Output: - -``` +```shell dovecot,server=dovecot-1.domain.test,type=global clock_time=101196971074203.94,disk_input=6493168218112i,disk_output=17978638815232i,invol_cs=1198855447i,last_update="2016-04-08 11:04:13.000379245 +0200 CEST",mail_cache_hits=68192209i,mail_lookup_attr=0i,mail_lookup_path=653861i,mail_read_bytes=86705151847i,mail_read_count=566125i,maj_faults=17208i,min_faults=1286179702i,num_cmds=917469i,num_connected_sessions=8896i,num_logins=174827i,read_bytes=30327690466186i,read_count=1772396430i,reset_timestamp="2016-04-08 10:28:45 +0200 CEST",sys_cpu=157965.692,user_cpu=219337.48,vol_cs=2827615787i,write_bytes=17150837661940i,write_count=992653220i 1460106266642153907 ``` diff --git a/plugins/inputs/dovecot/dovecot.go b/plugins/inputs/dovecot/dovecot.go index 66282c43423b2..3494ed8434f9a 100644 --- a/plugins/inputs/dovecot/dovecot.go +++ b/plugins/inputs/dovecot/dovecot.go @@ -1,7 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package dovecot import ( "bytes" + _ "embed" "fmt" "io" "net" @@ -14,47 +16,30 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Dovecot struct { Type string Filters []string Servers []string } -func (d *Dovecot) Description() string { - return "Read statistics from one or many dovecot servers" -} - -var sampleConfig = ` - ## specify dovecot servers via an address:port list - ## e.g. - ## localhost:24242 - ## - ## If no servers are specified, then localhost is used as the host. - servers = ["localhost:24242"] - - ## Type is one of "user", "domain", "ip", or "global" - type = "global" - - ## Wildcard matches like "*.com". An empty string "" is same as "*" - ## If type = "ip" filters should be - filters = [""] -` - var defaultTimeout = time.Second * time.Duration(5) var validQuery = map[string]bool{ "user": true, "domain": true, "global": true, "ip": true, } -func (d *Dovecot) SampleConfig() string { return sampleConfig } - -const defaultPort = "24242" +func (*Dovecot) SampleConfig() string { + return sampleConfig +} // Reads stats from all configured servers. func (d *Dovecot) Gather(acc telegraf.Accumulator) error { if !validQuery[d.Type] { - return fmt.Errorf("Error: %s is not a valid query type\n", - d.Type) + return fmt.Errorf("error: %s is not a valid query type", d.Type) } if len(d.Servers) == 0 { @@ -81,19 +66,29 @@ func (d *Dovecot) Gather(acc telegraf.Accumulator) error { } func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype string, filter string) error { - _, _, err := net.SplitHostPort(addr) - if err != nil { - return fmt.Errorf("%q on url %s", err.Error(), addr) + var proto string + + if strings.HasPrefix(addr, "/") { + proto = "unix" + } else { + proto = "tcp" + + _, _, err := net.SplitHostPort(addr) + if err != nil { + return fmt.Errorf("%q on url %s", err.Error(), addr) + } } - c, err := net.DialTimeout("tcp", addr, defaultTimeout) + c, err := net.DialTimeout(proto, addr, defaultTimeout) if err != nil { - return fmt.Errorf("enable to connect to dovecot server '%s': %s", addr, err) + return fmt.Errorf("unable to connect to dovecot server '%s': %s", addr, err) } defer c.Close() // Extend connection - c.SetDeadline(time.Now().Add(defaultTimeout)) + if err := c.SetDeadline(time.Now().Add(defaultTimeout)); err != nil { + return fmt.Errorf("setting deadline failed for dovecot server '%s': %s", addr, err) + } msg := fmt.Sprintf("EXPORT\t%s", qtype) if len(filter) > 0 { @@ -101,17 +96,30 @@ func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype stri } msg += "\n" - c.Write([]byte(msg)) + if _, err := c.Write([]byte(msg)); err != nil { + return fmt.Errorf("writing message %q failed for dovecot server '%s': %s", msg, addr, err) + } var buf bytes.Buffer - io.Copy(&buf, c) + if _, err := io.Copy(&buf, c); err != nil { + // We need to accept the timeout here as reading from the connection will only terminate on EOF + // or on a timeout to happen. As EOF for TCP connections will only be sent on connection closing, + // the only way to get the whole message is to wait for the timeout to happen. + if nerr, ok := err.(net.Error); !ok || !nerr.Timeout() { + return fmt.Errorf("copying message failed for dovecot server '%s': %s", addr, err) + } + } - host, _, _ := net.SplitHostPort(addr) + var host string + if strings.HasPrefix(addr, "/") { + host = addr + } else { + host, _, _ = net.SplitHostPort(addr) + } return gatherStats(&buf, acc, host, qtype) } func gatherStats(buf *bytes.Buffer, acc telegraf.Accumulator, host string, qtype string) error { - lines := strings.Split(buf.String(), "\n") head := strings.Split(lines[0], "\t") vals := lines[1:] @@ -170,13 +178,11 @@ func splitSec(tm string) (sec int64, msec int64) { } func timeParser(tm string) time.Time { - sec, msec := splitSec(tm) return time.Unix(sec, msec) } func secParser(tm string) float64 { - sec, msec := splitSec(tm) return float64(sec) + (float64(msec) / 1000000.0) } diff --git a/plugins/inputs/dovecot/dovecot_test.go b/plugins/inputs/dovecot/dovecot_test.go index c801d4f0ca5f7..f9ce76de947d6 100644 --- a/plugins/inputs/dovecot/dovecot_test.go +++ b/plugins/inputs/dovecot/dovecot_test.go @@ -1,7 +1,12 @@ package dovecot import ( + "bufio" "bytes" + "io" + "net" + "net/textproto" + "os" "testing" "time" @@ -9,8 +14,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestDovecot(t *testing.T) { - +func TestDovecotIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -43,11 +47,49 @@ func TestDovecot(t *testing.T) { var acc testutil.Accumulator + // Test type=global server=unix + addr := "/tmp/socket" + wait := make(chan int) + go func() { + defer close(wait) + + la, err := net.ResolveUnixAddr("unix", addr) + require.NoError(t, err) + + l, err := net.ListenUnix("unix", la) + require.NoError(t, err) + defer l.Close() + defer os.Remove(addr) + + wait <- 0 + conn, err := l.Accept() + require.NoError(t, err) + defer conn.Close() + + readertp := textproto.NewReader(bufio.NewReader(conn)) + _, err = readertp.ReadLine() + require.NoError(t, err) + + buf := bytes.NewBufferString(sampleGlobal) + _, err = io.Copy(conn, buf) + require.NoError(t, err) + }() + + // Wait for server to start + <-wait + + d := &Dovecot{Servers: []string{addr}, Type: "global"} + err := d.Gather(&acc) + require.NoError(t, err) + + tags := map[string]string{"server": addr, "type": "global"} + acc.AssertContainsTaggedFields(t, "dovecot", fields, tags) + // Test type=global - tags := map[string]string{"server": "dovecot.test", "type": "global"} + tags = map[string]string{"server": "dovecot.test", "type": "global"} buf := bytes.NewBufferString(sampleGlobal) - err := gatherStats(buf, &acc, "dovecot.test", "global") + err = gatherStats(buf, &acc, "dovecot.test", "global") require.NoError(t, err) acc.AssertContainsTaggedFields(t, "dovecot", fields, tags) @@ -63,7 +105,7 @@ func TestDovecot(t *testing.T) { // Test type=ip tags = map[string]string{"server": "dovecot.test", "type": "ip", "ip": "192.168.0.100"} - buf = bytes.NewBufferString(sampleIp) + buf = bytes.NewBufferString(sampleIP) err = gatherStats(buf, &acc, "dovecot.test", "ip") require.NoError(t, err) @@ -103,7 +145,6 @@ func TestDovecot(t *testing.T) { require.NoError(t, err) acc.AssertContainsTaggedFields(t, "dovecot", fields, tags) - } const sampleGlobal = `reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits @@ -112,7 +153,7 @@ const sampleGlobal = `reset_timestamp last_update num_logins num_cmds num_connec const sampleDomain = `domain reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits domain.test 1453969886 1454603963.039864 7503897 52595715 1204 100831175.372000 83849071.112000 4326001931528183.495762 763950011 1112443 4120386897 3685239306 41679480946688 1819070669176832 2368906465 2957928122981169 3545389615 1666822498251286 24396105 302845 20155768 669946617705 1557255080` -const sampleIp = `ip reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits +const sampleIP = `ip reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits 192.168.0.100 1453969886 1454603963.039864 7503897 52595715 1204 100831175.372000 83849071.112000 4326001931528183.495762 763950011 1112443 4120386897 3685239306 41679480946688 1819070669176832 2368906465 2957928122981169 3545389615 1666822498251286 24396105 302845 20155768 669946617705 1557255080` const sampleUser = `user reset_timestamp last_update num_logins num_cmds user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits diff --git a/plugins/inputs/dovecot/sample.conf b/plugins/inputs/dovecot/sample.conf new file mode 100644 index 0000000000000..efa29d95b1cc3 --- /dev/null +++ b/plugins/inputs/dovecot/sample.conf @@ -0,0 +1,18 @@ +# Read metrics about dovecot servers +[[inputs.dovecot]] + ## specify dovecot servers via an address:port list + ## e.g. + ## localhost:24242 + ## or as an UDS socket + ## e.g. + ## /var/run/dovecot/old-stats + ## + ## If no servers are specified, then localhost is used as the host. + servers = ["localhost:24242"] + + ## Type is one of "user", "domain", "ip", or "global" + type = "global" + + ## Wildcard matches like "*.com". An empty string "" is same as "*" + ## If type = "ip" filters should be + filters = [""] diff --git a/plugins/inputs/dpdk/README.md b/plugins/inputs/dpdk/README.md new file mode 100644 index 0000000000000..3ba785604e2f3 --- /dev/null +++ b/plugins/inputs/dpdk/README.md @@ -0,0 +1,251 @@ +# Data Plane Development Kit (DPDK) Input Plugin + +The `dpdk` plugin collects metrics exposed by applications built with [Data +Plane Development Kit](https://www.dpdk.org/) which is an extensive set of open +source libraries designed for accelerating packet processing workloads. + +DPDK provides APIs that enable exposing various statistics from the devices used +by DPDK applications and enable exposing KPI metrics directly from +applications. Device statistics include e.g. common statistics available across +NICs, like: received and sent packets, received and sent bytes etc. In addition +to this generic statistics, an extended statistics API is available that allows +providing more detailed, driver-specific metrics that are not available as +generic statistics. + +[DPDK Release 20.05](https://doc.dpdk.org/guides/rel_notes/release_20_05.html) +introduced updated telemetry interface that enables DPDK libraries and +applications to provide their telemetry. This is referred to as `v2` version of +this socket-based telemetry interface. This release enabled e.g. reading +driver-specific extended stats (`/ethdev/xstats`) via this new interface. + +[DPDK Release 20.11](https://doc.dpdk.org/guides/rel_notes/release_20_11.html) +introduced reading via `v2` interface common statistics (`/ethdev/stats`) in +addition to existing (`/ethdev/xstats`). + +The example usage of `v2` telemetry interface can be found in [Telemetry User +Guide](https://doc.dpdk.org/guides/howto/telemetry.html). A variety of [DPDK +Sample Applications](https://doc.dpdk.org/guides/sample_app_ug/index.html) is +also available for users to discover and test the capabilities of DPDK libraries +and to explore the exposed metrics. + +> **DPDK Version Info:** This plugin uses this `v2` interface to read telemetry data from applications build with +> `DPDK version >= 20.05`. The default configuration include reading common statistics from `/ethdev/stats` that is +> available from `DPDK version >= 20.11`. When using `DPDK 20.05 <= version < DPDK 20.11` it is recommended to disable +> querying `/ethdev/stats` by setting corresponding `exclude_commands` configuration option. +> **NOTE:** Since DPDK will most likely run with root privileges, the socket telemetry interface exposed by DPDK +> will also require root access. This means that either access permissions have to be adjusted for socket telemetry +> interface to allow Telegraf to access it, or Telegraf should run with root privileges. +> **NOTE:** The DPDK socket must exist for Telegraf to start successfully. Telegraf will attempt +> to connect to the DPDK socket during the initialization phase. + +## Configuration + +```toml @sample.conf +# Reads metrics from DPDK applications using v2 telemetry interface. +[[inputs.dpdk]] + ## Path to DPDK telemetry socket. This shall point to v2 version of DPDK telemetry interface. + # socket_path = "/var/run/dpdk/rte/dpdk_telemetry.v2" + + ## Duration that defines how long the connected socket client will wait for a response before terminating connection. + ## This includes both writing to and reading from socket. Since it's local socket access + ## to a fast packet processing application, the timeout should be sufficient for most users. + ## Setting the value to 0 disables the timeout (not recommended) + # socket_access_timeout = "200ms" + + ## Enables telemetry data collection for selected device types. + ## Adding "ethdev" enables collection of telemetry from DPDK NICs (stats, xstats, link_status). + ## Adding "rawdev" enables collection of telemetry from DPDK Raw Devices (xstats). + # device_types = ["ethdev"] + + ## List of custom, application-specific telemetry commands to query + ## The list of available commands depend on the application deployed. Applications can register their own commands + ## via telemetry library API http://doc.dpdk.org/guides/prog_guide/telemetry_lib.html#registering-commands + ## For e.g. L3 Forwarding with Power Management Sample Application this could be: + ## additional_commands = ["/l3fwd-power/stats"] + # additional_commands = [] + + ## Allows turning off collecting data for individual "ethdev" commands. + ## Remove "/ethdev/link_status" from list to start getting link status metrics. + [inputs.dpdk.ethdev] + exclude_commands = ["/ethdev/link_status"] + + ## When running multiple instances of the plugin it's recommended to add a unique tag to each instance to identify + ## metrics exposed by an instance of DPDK application. This is useful when multiple DPDK apps run on a single host. + ## [inputs.dpdk.tags] + ## dpdk_instance = "my-fwd-app" +``` + +This plugin offers multiple configuration options, please review examples below +for additional usage information. + +### Example: Minimal Configuration for NIC metrics + +This configuration allows getting metrics for all devices reported via +`/ethdev/list` command: + +* `/ethdev/stats` - basic device statistics (since `DPDK 20.11`) +* `/ethdev/xstats` - extended device statistics +* `/ethdev/link_status` - up/down link status + +```toml +[[inputs.dpdk]] + device_types = ["ethdev"] +``` + +Since this configuration will query `/ethdev/link_status` it's recommended to +increase timeout to `socket_access_timeout = "10s"`. + +The [plugin collecting interval](../../../docs/CONFIGURATION.md#input-plugins) +should be adjusted accordingly (e.g. `interval = "30s"`). + +### Example: Excluding NIC link status from being collected + +Checking link status depending on underlying implementation may take more time +to complete. This configuration can be used to exclude this telemetry command +to allow faster response for metrics. + +```toml +[[inputs.dpdk]] + device_types = ["ethdev"] + + [inputs.dpdk.ethdev] + exclude_commands = ["/ethdev/link_status"] +``` + +A separate plugin instance with higher timeout settings can be used to get +`/ethdev/link_status` independently. Consult [Independent NIC link status +configuration](#example-independent-nic-link-status-configuration) and [Getting +metrics from multiple DPDK instances running on same +host](#example-getting-metrics-from-multiple-dpdk-instances-on-same-host) +examples for further details. + +### Example: Independent NIC link status configuration + +This configuration allows getting `/ethdev/link_status` using separate +configuration, with higher timeout. + +```toml +[[inputs.dpdk]] + interval = "30s" + socket_access_timeout = "10s" + device_types = ["ethdev"] + + [inputs.dpdk.ethdev] + exclude_commands = ["/ethdev/stats", "/ethdev/xstats"] +``` + +### Example: Getting application-specific metrics + +This configuration allows reading custom metrics exposed by +applications. Example telemetry command obtained from [L3 Forwarding with Power +Management Sample Application][sample-app]. + +```toml +[[inputs.dpdk]] + device_types = ["ethdev"] + additional_commands = ["/l3fwd-power/stats"] + + [inputs.dpdk.ethdev] + exclude_commands = ["/ethdev/link_status"] +``` + +Command entries specified in `additional_commands` should match DPDK command +format: + +* Command entry format: either `command` or `command,params` for commands that expect parameters, where comma (`,`) separates command from params. +* Command entry length (command with params) should be `< 1024` characters. +* Command length (without params) should be `< 56` characters. +* Commands have to start with `/`. + +Providing invalid commands will prevent the plugin from starting. Additional +commands allow duplicates, but they will be removed during execution so each +command will be executed only once during each metric gathering interval. + +[sample-app]: https://doc.dpdk.org/guides/sample_app_ug/l3_forward_power_man.html + +### Example: Getting metrics from multiple DPDK instances on same host + +This configuration allows getting metrics from two separate applications +exposing their telemetry interfaces via separate sockets. For each plugin +instance a unique tag `[inputs.dpdk.tags]` allows distinguishing between them. + +```toml +# Instance #1 - L3 Forwarding with Power Management Application +[[inputs.dpdk]] + socket_path = "/var/run/dpdk/rte/l3fwd-power_telemetry.v2" + device_types = ["ethdev"] + additional_commands = ["/l3fwd-power/stats"] + + [inputs.dpdk.ethdev] + exclude_commands = ["/ethdev/link_status"] + + [inputs.dpdk.tags] + dpdk_instance = "l3fwd-power" + +# Instance #2 - L2 Forwarding with Intel Cache Allocation Technology (CAT) Application +[[inputs.dpdk]] + socket_path = "/var/run/dpdk/rte/l2fwd-cat_telemetry.v2" + device_types = ["ethdev"] + +[inputs.dpdk.ethdev] + exclude_commands = ["/ethdev/link_status"] + + [inputs.dpdk.tags] + dpdk_instance = "l2fwd-cat" +``` + +This utilizes Telegraf's standard capability of [adding custom +tags](../../../docs/CONFIGURATION.md#input-plugins) to input plugin's +measurements. + +## Metrics + +The DPDK socket accepts `command,params` requests and returns metric data in +JSON format. All metrics from DPDK socket become flattened using [Telegraf's +JSON Flattener](../../parsers/json/README.md) and exposed as fields. If DPDK +response contains no information (is empty or is null) then such response will +be discarded. + +> **NOTE:** Since DPDK allows registering custom metrics in its telemetry framework the JSON response from DPDK +> may contain various sets of metrics. While metrics from `/ethdev/stats` should be most stable, the `/ethdev/xstats` +> may contain driver-specific metrics (depending on DPDK application configuration). The application-specific commands +> like `/l3fwd-power/stats` can return their own specific set of metrics. + +## Example Output + +The output consists of plugin name (`dpdk`), and a set of tags that identify +querying hierarchy: + +```shell +dpdk,host=dpdk-host,dpdk_instance=l3fwd-power,command=/ethdev/stats,params=0 [fields] [timestamp] +``` + +| Tag | Description | +|-----|-------------| +| `host` | hostname of the machine (consult [Telegraf Agent configuration](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#agent) for additional details) | +| `dpdk_instance` | custom tag from `[inputs.dpdk.tags]` (optional) | +| `command` | executed command (without params) | +| `params` | command parameter, e.g. for `/ethdev/stats` it is the id of NIC as exposed by `/ethdev/list`. For DPDK app that uses 2 NICs the metrics will output e.g. `params=0`, `params=1`. | + +When running plugin configuration below... + +```toml +[[inputs.dpdk]] + device_types = ["ethdev"] + additional_commands = ["/l3fwd-power/stats"] + [inputs.dpdk.tags] + dpdk_instance = "l3fwd-power" +``` + +...expected output for `dpdk` plugin instance running on host named +`host=dpdk-host`: + +```shell +dpdk,command=/ethdev/stats,dpdk_instance=l3fwd-power,host=dpdk-host,params=0 q_opackets_0=0,q_ipackets_5=0,q_errors_11=0,ierrors=0,q_obytes_5=0,q_obytes_10=0,q_opackets_10=0,q_ipackets_4=0,q_ipackets_7=0,q_ipackets_15=0,q_ibytes_5=0,q_ibytes_6=0,q_ibytes_9=0,obytes=0,q_opackets_1=0,q_opackets_11=0,q_obytes_7=0,q_errors_5=0,q_errors_10=0,q_ibytes_4=0,q_obytes_6=0,q_errors_1=0,q_opackets_5=0,q_errors_3=0,q_errors_12=0,q_ipackets_11=0,q_ipackets_12=0,q_obytes_14=0,q_opackets_15=0,q_obytes_2=0,q_errors_8=0,q_opackets_12=0,q_errors_0=0,q_errors_9=0,q_opackets_14=0,q_ibytes_3=0,q_ibytes_15=0,q_ipackets_13=0,q_ipackets_14=0,q_obytes_3=0,q_errors_13=0,q_opackets_3=0,q_ibytes_0=7092,q_ibytes_2=0,q_ibytes_8=0,q_ipackets_8=0,q_ipackets_10=0,q_obytes_4=0,q_ibytes_10=0,q_ibytes_13=0,q_ibytes_1=0,q_ibytes_12=0,opackets=0,q_obytes_1=0,q_errors_15=0,q_opackets_2=0,oerrors=0,rx_nombuf=0,q_opackets_8=0,q_ibytes_11=0,q_ipackets_3=0,q_obytes_0=0,q_obytes_12=0,q_obytes_11=0,q_obytes_13=0,q_errors_6=0,q_ipackets_1=0,q_ipackets_6=0,q_ipackets_9=0,q_obytes_15=0,q_opackets_7=0,q_ibytes_14=0,ipackets=98,q_ipackets_2=0,q_opackets_6=0,q_ibytes_7=0,imissed=0,q_opackets_4=0,q_opackets_9=0,q_obytes_8=0,q_obytes_9=0,q_errors_4=0,q_errors_14=0,q_opackets_13=0,ibytes=7092,q_ipackets_0=98,q_errors_2=0,q_errors_7=0 1606310780000000000 +dpdk,command=/ethdev/stats,dpdk_instance=l3fwd-power,host=dpdk-host,params=1 q_opackets_0=0,q_ipackets_5=0,q_errors_11=0,ierrors=0,q_obytes_5=0,q_obytes_10=0,q_opackets_10=0,q_ipackets_4=0,q_ipackets_7=0,q_ipackets_15=0,q_ibytes_5=0,q_ibytes_6=0,q_ibytes_9=0,obytes=0,q_opackets_1=0,q_opackets_11=0,q_obytes_7=0,q_errors_5=0,q_errors_10=0,q_ibytes_4=0,q_obytes_6=0,q_errors_1=0,q_opackets_5=0,q_errors_3=0,q_errors_12=0,q_ipackets_11=0,q_ipackets_12=0,q_obytes_14=0,q_opackets_15=0,q_obytes_2=0,q_errors_8=0,q_opackets_12=0,q_errors_0=0,q_errors_9=0,q_opackets_14=0,q_ibytes_3=0,q_ibytes_15=0,q_ipackets_13=0,q_ipackets_14=0,q_obytes_3=0,q_errors_13=0,q_opackets_3=0,q_ibytes_0=7092,q_ibytes_2=0,q_ibytes_8=0,q_ipackets_8=0,q_ipackets_10=0,q_obytes_4=0,q_ibytes_10=0,q_ibytes_13=0,q_ibytes_1=0,q_ibytes_12=0,opackets=0,q_obytes_1=0,q_errors_15=0,q_opackets_2=0,oerrors=0,rx_nombuf=0,q_opackets_8=0,q_ibytes_11=0,q_ipackets_3=0,q_obytes_0=0,q_obytes_12=0,q_obytes_11=0,q_obytes_13=0,q_errors_6=0,q_ipackets_1=0,q_ipackets_6=0,q_ipackets_9=0,q_obytes_15=0,q_opackets_7=0,q_ibytes_14=0,ipackets=98,q_ipackets_2=0,q_opackets_6=0,q_ibytes_7=0,imissed=0,q_opackets_4=0,q_opackets_9=0,q_obytes_8=0,q_obytes_9=0,q_errors_4=0,q_errors_14=0,q_opackets_13=0,ibytes=7092,q_ipackets_0=98,q_errors_2=0,q_errors_7=0 1606310780000000000 +dpdk,command=/ethdev/xstats,dpdk_instance=l3fwd-power,host=dpdk-host,params=0 out_octets_encrypted=0,rx_fcoe_mbuf_allocation_errors=0,tx_q1packets=0,rx_priority0_xoff_packets=0,rx_priority7_xoff_packets=0,rx_errors=0,mac_remote_errors=0,in_pkts_invalid=0,tx_priority3_xoff_packets=0,tx_errors=0,rx_fcoe_bytes=0,rx_flow_control_xon_packets=0,rx_priority4_xoff_packets=0,tx_priority2_xoff_packets=0,rx_illegal_byte_errors=0,rx_xoff_packets=0,rx_management_packets=0,rx_priority7_dropped=0,rx_priority4_dropped=0,in_pkts_unchecked=0,rx_error_bytes=0,rx_size_256_to_511_packets=0,tx_priority4_xoff_packets=0,rx_priority6_xon_packets=0,tx_priority4_xon_to_xoff_packets=0,in_pkts_delayed=0,rx_priority0_mbuf_allocation_errors=0,out_octets_protected=0,tx_priority7_xon_to_xoff_packets=0,tx_priority1_xon_to_xoff_packets=0,rx_fcoe_no_direct_data_placement_ext_buff=0,tx_priority6_xon_to_xoff_packets=0,flow_director_filter_add_errors=0,rx_total_packets=99,rx_crc_errors=0,flow_director_filter_remove_errors=0,rx_missed_errors=0,tx_size_64_packets=0,rx_priority3_dropped=0,flow_director_matched_filters=0,tx_priority2_xon_to_xoff_packets=0,rx_priority1_xon_packets=0,rx_size_65_to_127_packets=99,rx_fragment_errors=0,in_pkts_notusingsa=0,rx_q0bytes=7162,rx_fcoe_dropped=0,rx_priority1_dropped=0,rx_fcoe_packets=0,rx_priority5_xoff_packets=0,out_pkts_protected=0,tx_total_packets=0,rx_priority2_dropped=0,in_pkts_late=0,tx_q1bytes=0,in_pkts_badtag=0,rx_multicast_packets=99,rx_priority6_xoff_packets=0,tx_flow_control_xoff_packets=0,rx_flow_control_xoff_packets=0,rx_priority0_xon_packets=0,in_pkts_untagged=0,tx_fcoe_packets=0,rx_priority7_mbuf_allocation_errors=0,tx_priority0_xon_to_xoff_packets=0,tx_priority5_xon_to_xoff_packets=0,tx_flow_control_xon_packets=0,tx_q0packets=0,tx_xoff_packets=0,rx_size_512_to_1023_packets=0,rx_priority3_xon_packets=0,rx_q0errors=0,rx_oversize_errors=0,tx_priority4_xon_packets=0,tx_priority5_xoff_packets=0,rx_priority5_xon_packets=0,rx_total_missed_packets=0,rx_priority4_mbuf_allocation_errors=0,tx_priority1_xon_packets=0,tx_management_packets=0,rx_priority5_mbuf_allocation_errors=0,rx_fcoe_no_direct_data_placement=0,rx_undersize_errors=0,tx_priority1_xoff_packets=0,rx_q0packets=99,tx_q2packets=0,tx_priority6_xon_packets=0,rx_good_packets=99,tx_priority5_xon_packets=0,tx_size_256_to_511_packets=0,rx_priority6_dropped=0,rx_broadcast_packets=0,tx_size_512_to_1023_packets=0,tx_priority3_xon_to_xoff_packets=0,in_pkts_unknownsci=0,in_octets_validated=0,tx_priority6_xoff_packets=0,tx_priority7_xoff_packets=0,rx_jabber_errors=0,tx_priority7_xon_packets=0,tx_priority0_xon_packets=0,in_pkts_unusedsa=0,tx_priority0_xoff_packets=0,mac_local_errors=33,rx_total_bytes=7162,in_pkts_notvalid=0,rx_length_errors=0,in_octets_decrypted=0,rx_size_128_to_255_packets=0,rx_good_bytes=7162,tx_size_65_to_127_packets=0,rx_mac_short_packet_dropped=0,tx_size_1024_to_max_packets=0,rx_priority2_mbuf_allocation_errors=0,flow_director_added_filters=0,tx_multicast_packets=0,rx_fcoe_crc_errors=0,rx_priority1_xoff_packets=0,flow_director_missed_filters=0,rx_xon_packets=0,tx_size_128_to_255_packets=0,out_pkts_encrypted=0,rx_priority4_xon_packets=0,rx_priority0_dropped=0,rx_size_1024_to_max_packets=0,tx_good_bytes=0,rx_management_dropped=0,rx_mbuf_allocation_errors=0,tx_xon_packets=0,rx_priority3_xoff_packets=0,tx_good_packets=0,tx_fcoe_bytes=0,rx_priority6_mbuf_allocation_errors=0,rx_priority2_xon_packets=0,tx_broadcast_packets=0,tx_q2bytes=0,rx_priority7_xon_packets=0,out_pkts_untagged=0,rx_priority2_xoff_packets=0,rx_priority1_mbuf_allocation_errors=0,tx_q0bytes=0,rx_size_64_packets=0,rx_priority5_dropped=0,tx_priority2_xon_packets=0,in_pkts_nosci=0,flow_director_removed_filters=0,in_pkts_ok=0,rx_l3_l4_xsum_error=0,rx_priority3_mbuf_allocation_errors=0,tx_priority3_xon_packets=0 1606310780000000000 +dpdk,command=/ethdev/xstats,dpdk_instance=l3fwd-power,host=dpdk-host,params=1 tx_priority5_xoff_packets=0,in_pkts_unknownsci=0,tx_q0packets=0,tx_total_packets=0,rx_crc_errors=0,rx_priority4_xoff_packets=0,rx_priority5_dropped=0,tx_size_65_to_127_packets=0,rx_good_packets=98,tx_priority6_xoff_packets=0,tx_fcoe_bytes=0,out_octets_protected=0,out_pkts_encrypted=0,rx_priority1_xon_packets=0,tx_size_128_to_255_packets=0,rx_flow_control_xoff_packets=0,rx_priority7_xoff_packets=0,tx_priority0_xon_to_xoff_packets=0,rx_broadcast_packets=0,tx_priority1_xon_packets=0,rx_xon_packets=0,rx_fragment_errors=0,tx_flow_control_xoff_packets=0,tx_q0bytes=0,out_pkts_untagged=0,rx_priority4_xon_packets=0,tx_priority5_xon_packets=0,rx_priority1_xoff_packets=0,rx_good_bytes=7092,rx_priority4_mbuf_allocation_errors=0,in_octets_decrypted=0,tx_priority2_xon_to_xoff_packets=0,rx_priority3_dropped=0,tx_multicast_packets=0,mac_local_errors=33,in_pkts_ok=0,rx_illegal_byte_errors=0,rx_xoff_packets=0,rx_q0errors=0,flow_director_added_filters=0,rx_size_256_to_511_packets=0,rx_priority3_xon_packets=0,rx_l3_l4_xsum_error=0,rx_priority6_dropped=0,in_pkts_notvalid=0,rx_size_64_packets=0,tx_management_packets=0,rx_length_errors=0,tx_priority7_xon_to_xoff_packets=0,rx_mbuf_allocation_errors=0,rx_missed_errors=0,rx_priority1_mbuf_allocation_errors=0,rx_fcoe_no_direct_data_placement=0,tx_priority3_xoff_packets=0,in_pkts_delayed=0,tx_errors=0,rx_size_512_to_1023_packets=0,tx_priority4_xon_packets=0,rx_q0bytes=7092,in_pkts_unchecked=0,tx_size_512_to_1023_packets=0,rx_fcoe_packets=0,in_pkts_nosci=0,rx_priority6_mbuf_allocation_errors=0,rx_priority1_dropped=0,tx_q2packets=0,rx_priority7_dropped=0,tx_size_1024_to_max_packets=0,rx_management_packets=0,rx_multicast_packets=98,rx_total_bytes=7092,mac_remote_errors=0,tx_priority3_xon_packets=0,rx_priority2_mbuf_allocation_errors=0,rx_priority5_mbuf_allocation_errors=0,tx_q2bytes=0,rx_size_128_to_255_packets=0,in_pkts_badtag=0,out_pkts_protected=0,rx_management_dropped=0,rx_fcoe_bytes=0,flow_director_removed_filters=0,tx_priority2_xoff_packets=0,rx_fcoe_crc_errors=0,rx_priority0_mbuf_allocation_errors=0,rx_priority0_xon_packets=0,rx_fcoe_dropped=0,tx_priority1_xon_to_xoff_packets=0,rx_size_65_to_127_packets=98,rx_q0packets=98,tx_priority0_xoff_packets=0,rx_priority6_xon_packets=0,rx_total_packets=98,rx_undersize_errors=0,flow_director_missed_filters=0,rx_jabber_errors=0,in_pkts_invalid=0,in_pkts_late=0,rx_priority5_xon_packets=0,tx_priority4_xoff_packets=0,out_octets_encrypted=0,tx_q1packets=0,rx_priority5_xoff_packets=0,rx_priority6_xoff_packets=0,rx_errors=0,in_octets_validated=0,rx_priority3_xoff_packets=0,tx_priority4_xon_to_xoff_packets=0,tx_priority5_xon_to_xoff_packets=0,tx_flow_control_xon_packets=0,rx_priority0_dropped=0,flow_director_filter_add_errors=0,tx_q1bytes=0,tx_priority6_xon_to_xoff_packets=0,flow_director_matched_filters=0,tx_priority2_xon_packets=0,rx_fcoe_mbuf_allocation_errors=0,rx_priority2_xoff_packets=0,tx_priority7_xoff_packets=0,rx_priority0_xoff_packets=0,rx_oversize_errors=0,in_pkts_notusingsa=0,tx_size_64_packets=0,rx_size_1024_to_max_packets=0,tx_priority6_xon_packets=0,rx_priority2_dropped=0,rx_priority4_dropped=0,rx_priority7_mbuf_allocation_errors=0,rx_flow_control_xon_packets=0,tx_good_bytes=0,tx_priority3_xon_to_xoff_packets=0,rx_total_missed_packets=0,rx_error_bytes=0,tx_priority7_xon_packets=0,rx_mac_short_packet_dropped=0,tx_priority1_xoff_packets=0,tx_good_packets=0,tx_broadcast_packets=0,tx_xon_packets=0,in_pkts_unusedsa=0,rx_priority2_xon_packets=0,in_pkts_untagged=0,tx_fcoe_packets=0,flow_director_filter_remove_errors=0,rx_priority3_mbuf_allocation_errors=0,tx_priority0_xon_packets=0,rx_priority7_xon_packets=0,rx_fcoe_no_direct_data_placement_ext_buff=0,tx_xoff_packets=0,tx_size_256_to_511_packets=0 1606310780000000000 +dpdk,command=/ethdev/link_status,dpdk_instance=l3fwd-power,host=dpdk-host,params=0 status="UP",speed=10000,duplex="full-duplex" 1606310780000000000 +dpdk,command=/ethdev/link_status,dpdk_instance=l3fwd-power,host=dpdk-host,params=1 status="UP",speed=10000,duplex="full-duplex" 1606310780000000000 +dpdk,command=/l3fwd-power/stats,dpdk_instance=l3fwd-power,host=dpdk-host empty_poll=49506395979901,full_poll=0,busy_percent=0 1606310780000000000 +``` diff --git a/plugins/inputs/dpdk/dpdk.go b/plugins/inputs/dpdk/dpdk.go new file mode 100644 index 0000000000000..f6e7b4ecd7b9c --- /dev/null +++ b/plugins/inputs/dpdk/dpdk.go @@ -0,0 +1,233 @@ +//go:generate ../../../tools/readme_config_includer/generator +//go:build linux +// +build linux + +package dpdk + +import ( + _ "embed" + "encoding/json" + "fmt" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/plugins/inputs" + jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +const ( + defaultPathToSocket = "/var/run/dpdk/rte/dpdk_telemetry.v2" + defaultAccessTimeout = config.Duration(200 * time.Millisecond) + maxCommandLength = 56 + maxCommandLengthWithParams = 1024 + pluginName = "dpdk" + ethdevListCommand = "/ethdev/list" + rawdevListCommand = "/rawdev/list" +) + +type dpdk struct { + SocketPath string `toml:"socket_path"` + AccessTimeout config.Duration `toml:"socket_access_timeout"` + DeviceTypes []string `toml:"device_types"` + EthdevConfig ethdevConfig `toml:"ethdev"` + AdditionalCommands []string `toml:"additional_commands"` + Log telegraf.Logger `toml:"-"` + + connector *dpdkConnector + rawdevCommands []string + ethdevCommands []string + ethdevExcludedCommandsFilter filter.Filter +} + +type ethdevConfig struct { + EthdevExcludeCommands []string `toml:"exclude_commands"` +} + +func init() { + inputs.Add(pluginName, func() telegraf.Input { + dpdk := &dpdk{ + // Setting it here (rather than in `Init()`) to distinguish between "zero" value, + // default value and don't having value in config at all. + AccessTimeout: defaultAccessTimeout, + } + return dpdk + }) +} + +func (*dpdk) SampleConfig() string { + return sampleConfig +} + +// Performs validation of all parameters from configuration +func (dpdk *dpdk) Init() error { + if dpdk.SocketPath == "" { + dpdk.SocketPath = defaultPathToSocket + dpdk.Log.Debugf("using default '%v' path for socket_path", defaultPathToSocket) + } + + if dpdk.DeviceTypes == nil { + dpdk.DeviceTypes = []string{"ethdev"} + } + + var err error + if err = isSocket(dpdk.SocketPath); err != nil { + return err + } + + dpdk.rawdevCommands = []string{"/rawdev/xstats"} + dpdk.ethdevCommands = []string{"/ethdev/stats", "/ethdev/xstats", "/ethdev/link_status"} + + if err = dpdk.validateCommands(); err != nil { + return err + } + + if dpdk.AccessTimeout < 0 { + return fmt.Errorf("socket_access_timeout should be positive number or equal to 0 (to disable timeouts)") + } + + if len(dpdk.AdditionalCommands) == 0 && len(dpdk.DeviceTypes) == 0 { + return fmt.Errorf("plugin was configured with nothing to read") + } + + dpdk.ethdevExcludedCommandsFilter, err = filter.Compile(dpdk.EthdevConfig.EthdevExcludeCommands) + if err != nil { + return fmt.Errorf("error occurred during filter prepation for ethdev excluded commands - %v", err) + } + + dpdk.connector = newDpdkConnector(dpdk.SocketPath, dpdk.AccessTimeout) + initMessage, err := dpdk.connector.connect() + if initMessage != nil { + dpdk.Log.Debugf("Successfully connected to %v running as process with PID %v with len %v", + initMessage.Version, initMessage.Pid, initMessage.MaxOutputLen) + } + return err +} + +// Checks that user-supplied commands are unique and match DPDK commands format +func (dpdk *dpdk) validateCommands() error { + dpdk.AdditionalCommands = uniqueValues(dpdk.AdditionalCommands) + + for _, commandWithParams := range dpdk.AdditionalCommands { + if len(commandWithParams) == 0 { + return fmt.Errorf("got empty command") + } + + if commandWithParams[0] != '/' { + return fmt.Errorf("'%v' command should start with '/'", commandWithParams) + } + + if commandWithoutParams := stripParams(commandWithParams); len(commandWithoutParams) >= maxCommandLength { + return fmt.Errorf("'%v' command is too long. It shall be less than %v characters", commandWithoutParams, maxCommandLength) + } + + if len(commandWithParams) >= maxCommandLengthWithParams { + return fmt.Errorf("command with parameters '%v' shall be less than %v characters", commandWithParams, maxCommandLengthWithParams) + } + } + + return nil +} + +// Gathers all unique commands and processes each command sequentially +// Parallel processing could be achieved by running several instances of this plugin with different settings +func (dpdk *dpdk) Gather(acc telegraf.Accumulator) error { + // This needs to be done during every `Gather(...)`, because DPDK can be restarted between consecutive + // `Gather(...)` cycles which can cause that it will be exposing different set of metrics. + commands := dpdk.gatherCommands(acc) + + for _, command := range commands { + dpdk.processCommand(acc, command) + } + + return nil +} + +// Gathers all unique commands +func (dpdk *dpdk) gatherCommands(acc telegraf.Accumulator) []string { + var commands []string + if choice.Contains("ethdev", dpdk.DeviceTypes) { + ethdevCommands := removeSubset(dpdk.ethdevCommands, dpdk.ethdevExcludedCommandsFilter) + ethdevCommands, err := dpdk.appendCommandsWithParamsFromList(ethdevListCommand, ethdevCommands) + if err != nil { + acc.AddError(fmt.Errorf("error occurred during fetching of %v params - %v", ethdevListCommand, err)) + } + + commands = append(commands, ethdevCommands...) + } + + if choice.Contains("rawdev", dpdk.DeviceTypes) { + rawdevCommands, err := dpdk.appendCommandsWithParamsFromList(rawdevListCommand, dpdk.rawdevCommands) + if err != nil { + acc.AddError(fmt.Errorf("error occurred during fetching of %v params - %v", rawdevListCommand, err)) + } + + commands = append(commands, rawdevCommands...) + } + + commands = append(commands, dpdk.AdditionalCommands...) + return uniqueValues(commands) +} + +// Fetches all identifiers of devices and then creates all possible combinations of commands for each device +func (dpdk *dpdk) appendCommandsWithParamsFromList(listCommand string, commands []string) ([]string, error) { + response, err := dpdk.connector.getCommandResponse(listCommand) + if err != nil { + return nil, err + } + + params, err := jsonToArray(response, listCommand) + if err != nil { + return nil, err + } + + result := make([]string, 0, len(commands)*len(params)) + for _, command := range commands { + for _, param := range params { + result = append(result, commandWithParams(command, param)) + } + } + + return result, nil +} + +// Executes command, parses response and creates/writes metric from response +func (dpdk *dpdk) processCommand(acc telegraf.Accumulator, commandWithParams string) { + buf, err := dpdk.connector.getCommandResponse(commandWithParams) + if err != nil { + acc.AddError(err) + return + } + + var parsedResponse map[string]interface{} + err = json.Unmarshal(buf, &parsedResponse) + if err != nil { + acc.AddError(fmt.Errorf("failed to unmarshall json response from %v command - %v", commandWithParams, err)) + return + } + + command := stripParams(commandWithParams) + value := parsedResponse[command] + if isEmpty(value) { + acc.AddError(fmt.Errorf("got empty json on '%v' command", commandWithParams)) + return + } + + jf := jsonparser.JSONFlattener{} + err = jf.FullFlattenJSON("", value, true, true) + if err != nil { + acc.AddError(fmt.Errorf("failed to flatten response - %v", err)) + return + } + + acc.AddFields(pluginName, jf.Fields, map[string]string{ + "command": command, + "params": getParams(commandWithParams), + }) +} diff --git a/plugins/inputs/dpdk/dpdk_connector.go b/plugins/inputs/dpdk/dpdk_connector.go new file mode 100644 index 0000000000000..9cd9c81c4362b --- /dev/null +++ b/plugins/inputs/dpdk/dpdk_connector.go @@ -0,0 +1,163 @@ +//go:build linux +// +build linux + +package dpdk + +import ( + "encoding/json" + "fmt" + "net" + "time" + + "github.com/influxdata/telegraf/config" +) + +const maxInitMessageLength = 1024 + +type initMessage struct { + Version string `json:"version"` + Pid int `json:"pid"` + MaxOutputLen uint32 `json:"max_output_len"` +} + +type dpdkConnector struct { + pathToSocket string + maxOutputLen uint32 + messageShowed bool + accessTimeout time.Duration + connection net.Conn +} + +func newDpdkConnector(pathToSocket string, accessTimeout config.Duration) *dpdkConnector { + return &dpdkConnector{ + pathToSocket: pathToSocket, + messageShowed: false, + accessTimeout: time.Duration(accessTimeout), + } +} + +// Connects to the socket +// Since DPDK is a local unix socket, it is instantly returns error or connection, so there's no need to set timeout for it +func (conn *dpdkConnector) connect() (*initMessage, error) { + connection, err := net.Dial("unixpacket", conn.pathToSocket) + if err != nil { + return nil, fmt.Errorf("failed to connect to the socket - %v", err) + } + + conn.connection = connection + result, err := conn.readMaxOutputLen() + if err != nil { + if closeErr := conn.tryClose(); closeErr != nil { + return nil, fmt.Errorf("%v and failed to close connection - %v", err, closeErr) + } + return nil, err + } + + return result, nil +} + +// Executes command using provided connection and returns response +// If error (such as timeout) occurred, then connection is discarded and recreated +// because otherwise behaviour of connection is undefined (e.g. it could return result of timed out command instead of latest) +func (conn *dpdkConnector) getCommandResponse(fullCommand string) ([]byte, error) { + connection, err := conn.getConnection() + if err != nil { + return nil, fmt.Errorf("failed to get connection to execute %v command - %v", fullCommand, err) + } + + err = conn.setTimeout() + if err != nil { + return nil, fmt.Errorf("failed to set timeout for %v command - %v", fullCommand, err) + } + + _, err = connection.Write([]byte(fullCommand)) + if err != nil { + if closeErr := conn.tryClose(); closeErr != nil { + return nil, fmt.Errorf("failed to send '%v' command - %v and failed to close connection - %v", + fullCommand, err, closeErr) + } + return nil, fmt.Errorf("failed to send '%v' command - %v", fullCommand, err) + } + + buf := make([]byte, conn.maxOutputLen) + messageLength, err := connection.Read(buf) + if err != nil { + if closeErr := conn.tryClose(); closeErr != nil { + return nil, fmt.Errorf("failed read response of '%v' command - %v and failed to close connection - %v", + fullCommand, err, closeErr) + } + return nil, fmt.Errorf("failed to read response of '%v' command - %v", fullCommand, err) + } + + if messageLength == 0 { + return nil, fmt.Errorf("got empty response during execution of '%v' command", fullCommand) + } + return buf[:messageLength], nil +} + +func (conn *dpdkConnector) tryClose() error { + if conn.connection == nil { + return nil + } + + err := conn.connection.Close() + conn.connection = nil + if err != nil { + return err + } + return nil +} + +func (conn *dpdkConnector) setTimeout() error { + if conn.connection == nil { + return fmt.Errorf("connection had not been established before") + } + + if conn.accessTimeout == 0 { + return conn.connection.SetDeadline(time.Time{}) + } + return conn.connection.SetDeadline(time.Now().Add(conn.accessTimeout)) +} + +// Returns connections, if connection is not created then function tries to recreate it +func (conn *dpdkConnector) getConnection() (net.Conn, error) { + if conn.connection == nil { + _, err := conn.connect() + if err != nil { + return nil, err + } + } + return conn.connection, nil +} + +// Reads InitMessage for connection. Should be read for each connection, otherwise InitMessage is returned as response for first command. +func (conn *dpdkConnector) readMaxOutputLen() (*initMessage, error) { + buf := make([]byte, maxInitMessageLength) + err := conn.setTimeout() + if err != nil { + return nil, fmt.Errorf("failed to set timeout - %v", err) + } + + messageLength, err := conn.connection.Read(buf) + if err != nil { + return nil, fmt.Errorf("failed to read InitMessage - %v", err) + } + + var initMessage initMessage + err = json.Unmarshal(buf[:messageLength], &initMessage) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal response - %v", err) + } + + if initMessage.MaxOutputLen == 0 { + return nil, fmt.Errorf("failed to read maxOutputLen information") + } + + if !conn.messageShowed { + conn.maxOutputLen = initMessage.MaxOutputLen + conn.messageShowed = true + return &initMessage, nil + } + + return nil, nil +} diff --git a/plugins/inputs/dpdk/dpdk_connector_test.go b/plugins/inputs/dpdk/dpdk_connector_test.go new file mode 100644 index 0000000000000..f5580417c3c67 --- /dev/null +++ b/plugins/inputs/dpdk/dpdk_connector_test.go @@ -0,0 +1,183 @@ +//go:build linux +// +build linux + +package dpdk + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/plugins/inputs/dpdk/mocks" +) + +func Test_readMaxOutputLen(t *testing.T) { + t.Run("should return error if timeout occurred", func(t *testing.T) { + conn := &mocks.Conn{} + conn.On("Read", mock.Anything).Return(0, fmt.Errorf("timeout")) + conn.On("SetDeadline", mock.Anything).Return(nil) + connector := dpdkConnector{connection: conn} + + _, err := connector.readMaxOutputLen() + + require.Error(t, err) + require.Contains(t, err.Error(), "timeout") + }) + + t.Run("should pass and set maxOutputLen if provided with valid InitMessage", func(t *testing.T) { + maxOutputLen := uint32(4567) + initMessage := initMessage{ + Version: "DPDK test version", + Pid: 1234, + MaxOutputLen: maxOutputLen, + } + message, err := json.Marshal(initMessage) + require.NoError(t, err) + conn := &mocks.Conn{} + conn.On("Read", mock.Anything).Run(func(arg mock.Arguments) { + elem := arg.Get(0).([]byte) + copy(elem, message) + }).Return(len(message), nil) + conn.On("SetDeadline", mock.Anything).Return(nil) + connector := dpdkConnector{connection: conn} + + _, err = connector.readMaxOutputLen() + + require.NoError(t, err) + require.Equal(t, maxOutputLen, connector.maxOutputLen) + }) + + t.Run("should fail if received invalid json", func(t *testing.T) { + message := `{notAJson}` + conn := &mocks.Conn{} + conn.On("Read", mock.Anything).Run(func(arg mock.Arguments) { + elem := arg.Get(0).([]byte) + copy(elem, message) + }).Return(len(message), nil) + conn.On("SetDeadline", mock.Anything).Return(nil) + connector := dpdkConnector{connection: conn} + + _, err := connector.readMaxOutputLen() + + require.Error(t, err) + require.Contains(t, err.Error(), "looking for beginning of object key string") + }) + + t.Run("should fail if received maxOutputLen equals to 0", func(t *testing.T) { + message, err := json.Marshal(initMessage{ + Version: "test", + Pid: 1, + MaxOutputLen: 0, + }) + require.NoError(t, err) + conn := &mocks.Conn{} + conn.On("Read", mock.Anything).Run(func(arg mock.Arguments) { + elem := arg.Get(0).([]byte) + copy(elem, message) + }).Return(len(message), nil) + conn.On("SetDeadline", mock.Anything).Return(nil) + connector := dpdkConnector{connection: conn} + + _, err = connector.readMaxOutputLen() + + require.Error(t, err) + require.Contains(t, err.Error(), "failed to read maxOutputLen information") + }) +} + +func Test_connect(t *testing.T) { + t.Run("should pass if PathToSocket points to socket", func(t *testing.T) { + pathToSocket, socket := createSocketForTest(t) + defer socket.Close() + dpdk := dpdk{ + SocketPath: pathToSocket, + connector: newDpdkConnector(pathToSocket, 0), + } + go simulateSocketResponse(socket, t) + + _, err := dpdk.connector.connect() + + require.NoError(t, err) + }) +} + +func Test_getCommandResponse(t *testing.T) { + command := "/" + response := "myResponseString" + + t.Run("should return proper buffer size and value if no error occurred", func(t *testing.T) { + mockConn, dpdk, _ := prepareEnvironment() + defer mockConn.AssertExpectations(t) + simulateResponse(mockConn, response, nil) + + buf, err := dpdk.connector.getCommandResponse(command) + + require.NoError(t, err) + require.Equal(t, len(response), len(buf)) + require.Equal(t, response, string(buf)) + }) + + t.Run("should return error if failed to get connection handler", func(t *testing.T) { + _, dpdk, _ := prepareEnvironment() + dpdk.connector.connection = nil + + buf, err := dpdk.connector.getCommandResponse(command) + + require.Error(t, err) + require.Contains(t, err.Error(), "failed to get connection to execute / command") + require.Equal(t, 0, len(buf)) + }) + + t.Run("should return error if failed to set timeout duration", func(t *testing.T) { + mockConn, dpdk, _ := prepareEnvironment() + defer mockConn.AssertExpectations(t) + mockConn.On("SetDeadline", mock.Anything).Return(fmt.Errorf("deadline error")) + + buf, err := dpdk.connector.getCommandResponse(command) + + require.Error(t, err) + require.Contains(t, err.Error(), "deadline error") + require.Equal(t, 0, len(buf)) + }) + + t.Run("should return error if timeout occurred during Write operation", func(t *testing.T) { + mockConn, dpdk, _ := prepareEnvironment() + defer mockConn.AssertExpectations(t) + mockConn.On("Write", mock.Anything).Return(0, fmt.Errorf("write timeout")) + mockConn.On("SetDeadline", mock.Anything).Return(nil) + mockConn.On("Close").Return(nil) + + buf, err := dpdk.connector.getCommandResponse(command) + + require.Error(t, err) + require.Contains(t, err.Error(), "write timeout") + require.Equal(t, 0, len(buf)) + }) + + t.Run("should return error if timeout occurred during Read operation", func(t *testing.T) { + mockConn, dpdk, _ := prepareEnvironment() + defer mockConn.AssertExpectations(t) + simulateResponse(mockConn, "", fmt.Errorf("read timeout")) + + buf, err := dpdk.connector.getCommandResponse(command) + + require.Error(t, err) + require.Contains(t, err.Error(), "read timeout") + require.Equal(t, 0, len(buf)) + }) + + t.Run("should return error if got empty response", func(t *testing.T) { + mockConn, dpdk, _ := prepareEnvironment() + defer mockConn.AssertExpectations(t) + simulateResponse(mockConn, "", nil) + + buf, err := dpdk.connector.getCommandResponse(command) + + require.Error(t, err) + require.Equal(t, 0, len(buf)) + require.Contains(t, err.Error(), "got empty response during execution of") + }) +} diff --git a/plugins/inputs/dpdk/dpdk_notlinux.go b/plugins/inputs/dpdk/dpdk_notlinux.go new file mode 100644 index 0000000000000..1831b1212ae78 --- /dev/null +++ b/plugins/inputs/dpdk/dpdk_notlinux.go @@ -0,0 +1,4 @@ +//go:build !linux +// +build !linux + +package dpdk diff --git a/plugins/inputs/dpdk/dpdk_test.go b/plugins/inputs/dpdk/dpdk_test.go new file mode 100644 index 0000000000000..ea000f5be155a --- /dev/null +++ b/plugins/inputs/dpdk/dpdk_test.go @@ -0,0 +1,388 @@ +//go:build linux +// +build linux + +package dpdk + +import ( + "encoding/json" + "fmt" + "net" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/plugins/inputs/dpdk/mocks" + "github.com/influxdata/telegraf/testutil" +) + +func Test_Init(t *testing.T) { + t.Run("when SocketPath field isn't set then it should be set to default value", func(t *testing.T) { + _, dpdk, _ := prepareEnvironment() + dpdk.SocketPath = "" + require.Equal(t, "", dpdk.SocketPath) + + _ = dpdk.Init() + + require.Equal(t, defaultPathToSocket, dpdk.SocketPath) + }) + + t.Run("when commands are in invalid format (doesn't start with '/') then error should be returned", func(t *testing.T) { + pathToSocket, socket := createSocketForTest(t) + defer socket.Close() + dpdk := dpdk{ + SocketPath: pathToSocket, + AdditionalCommands: []string{"invalid"}, + } + + err := dpdk.Init() + + require.Error(t, err) + require.Contains(t, err.Error(), "command should start with '/'") + }) + + t.Run("when all values are valid, then no error should be returned", func(t *testing.T) { + pathToSocket, socket := createSocketForTest(t) + defer socket.Close() + dpdk := dpdk{ + SocketPath: pathToSocket, + DeviceTypes: []string{"ethdev"}, + Log: testutil.Logger{}, + } + go simulateSocketResponse(socket, t) + + err := dpdk.Init() + + require.NoError(t, err) + }) + + t.Run("when device_types and additional_commands are empty, then error should be returned", func(t *testing.T) { + pathToSocket, socket := createSocketForTest(t) + defer socket.Close() + dpdk := dpdk{ + SocketPath: pathToSocket, + DeviceTypes: []string{}, + AdditionalCommands: []string{}, + Log: testutil.Logger{}, + } + + err := dpdk.Init() + + require.Error(t, err) + require.Contains(t, err.Error(), "plugin was configured with nothing to read") + }) +} + +func Test_validateCommands(t *testing.T) { + t.Run("when validating commands in correct format then no error should be returned", func(t *testing.T) { + dpdk := dpdk{ + AdditionalCommands: []string{"/test", "/help"}, + } + + err := dpdk.validateCommands() + + require.NoError(t, err) + }) + + t.Run("when validating command that doesn't begin with slash then error should be returned", func(t *testing.T) { + dpdk := dpdk{ + AdditionalCommands: []string{ + "/test", "commandWithoutSlash", + }, + } + + err := dpdk.validateCommands() + + require.Error(t, err) + require.Contains(t, err.Error(), "command should start with '/'") + }) + + t.Run("when validating long command (without parameters) then error should be returned", func(t *testing.T) { + dpdk := dpdk{ + AdditionalCommands: []string{ + "/test", "/" + strings.Repeat("a", maxCommandLength), + }, + } + + err := dpdk.validateCommands() + + require.Error(t, err) + require.Contains(t, err.Error(), "command is too long") + }) + + t.Run("when validating long command (with params) then error should be returned", func(t *testing.T) { + dpdk := dpdk{ + AdditionalCommands: []string{ + "/test", "/," + strings.Repeat("a", maxCommandLengthWithParams), + }, + } + + err := dpdk.validateCommands() + + require.Error(t, err) + require.Contains(t, err.Error(), "shall be less than 1024 characters") + }) + + t.Run("when validating empty command then error should be returned", func(t *testing.T) { + dpdk := dpdk{ + AdditionalCommands: []string{ + "/test", "", + }, + } + + err := dpdk.validateCommands() + + require.Error(t, err) + require.Contains(t, err.Error(), "got empty command") + }) + + t.Run("when validating commands with duplicates then duplicates should be removed and no error should be returned", func(t *testing.T) { + dpdk := dpdk{ + AdditionalCommands: []string{ + "/test", "/test", + }, + } + require.Equal(t, 2, len(dpdk.AdditionalCommands)) + + err := dpdk.validateCommands() + + require.Equal(t, 1, len(dpdk.AdditionalCommands)) + require.NoError(t, err) + }) +} + +func prepareEnvironment() (*mocks.Conn, dpdk, *testutil.Accumulator) { + mockConnection := &mocks.Conn{} + dpdk := dpdk{ + connector: &dpdkConnector{ + connection: mockConnection, + maxOutputLen: 1024, + accessTimeout: 2 * time.Second, + }, + Log: testutil.Logger{}, + } + mockAcc := &testutil.Accumulator{} + return mockConnection, dpdk, mockAcc +} + +func Test_processCommand(t *testing.T) { + t.Run("should pass if received valid response", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + response := `{"/": ["/", "/eal/app_params", "/eal/params", "/ethdev/link_status"]}` + simulateResponse(mockConn, response, nil) + + dpdk.processCommand(mockAcc, "/") + + require.Equal(t, 0, len(mockAcc.Errors)) + }) + + t.Run("if received a non-JSON object then should return error", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + response := `notAJson` + simulateResponse(mockConn, response, nil) + + dpdk.processCommand(mockAcc, "/") + + require.Equal(t, 1, len(mockAcc.Errors)) + require.Contains(t, mockAcc.Errors[0].Error(), "invalid character") + }) + + t.Run("if failed to get command response then accumulator should contain error", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + mockConn.On("Write", mock.Anything).Return(0, fmt.Errorf("deadline exceeded")) + mockConn.On("SetDeadline", mock.Anything).Return(nil) + mockConn.On("Close").Return(nil) + + dpdk.processCommand(mockAcc, "/") + + require.Equal(t, 1, len(mockAcc.Errors)) + require.Contains(t, mockAcc.Errors[0].Error(), "deadline exceeded") + }) + + t.Run("if response contains nil or empty value then error should be returned in accumulator", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + response := `{"/test": null}` + simulateResponse(mockConn, response, nil) + + dpdk.processCommand(mockAcc, "/test,param") + + require.Equal(t, 1, len(mockAcc.Errors)) + require.Contains(t, mockAcc.Errors[0].Error(), "got empty json on") + }) +} + +func Test_appendCommandsWithParams(t *testing.T) { + t.Run("when got valid data, then valid commands with params should be created", func(t *testing.T) { + mockConn, dpdk, _ := prepareEnvironment() + defer mockConn.AssertExpectations(t) + response := `{"/testendpoint": [1,123]}` + simulateResponse(mockConn, response, nil) + expectedCommands := []string{"/action1,1", "/action1,123", "/action2,1", "/action2,123"} + + result, err := dpdk.appendCommandsWithParamsFromList("/testendpoint", []string{"/action1", "/action2"}) + + require.NoError(t, err) + require.Equal(t, 4, len(result)) + require.ElementsMatch(t, result, expectedCommands) + }) +} + +func Test_getCommandsAndParamsCombinations(t *testing.T) { + t.Run("when 2 ethdev commands are enabled, then 2*numberOfIds new commands should be appended", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + response := fmt.Sprintf(`{"%s": [1, 123]}`, ethdevListCommand) + simulateResponse(mockConn, response, nil) + expectedCommands := []string{"/ethdev/stats,1", "/ethdev/stats,123", "/ethdev/xstats,1", "/ethdev/xstats,123"} + + dpdk.DeviceTypes = []string{"ethdev"} + dpdk.ethdevCommands = []string{"/ethdev/stats", "/ethdev/xstats"} + dpdk.ethdevExcludedCommandsFilter, _ = filter.Compile([]string{}) + dpdk.AdditionalCommands = []string{} + commands := dpdk.gatherCommands(mockAcc) + + require.ElementsMatch(t, commands, expectedCommands) + require.Equal(t, 0, len(mockAcc.Errors)) + }) + + t.Run("when 1 rawdev command is enabled, then 2*numberOfIds new commands should be appended", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + response := fmt.Sprintf(`{"%s": [1, 123]}`, rawdevListCommand) + simulateResponse(mockConn, response, nil) + expectedCommands := []string{"/rawdev/xstats,1", "/rawdev/xstats,123"} + + dpdk.DeviceTypes = []string{"rawdev"} + dpdk.rawdevCommands = []string{"/rawdev/xstats"} + dpdk.AdditionalCommands = []string{} + commands := dpdk.gatherCommands(mockAcc) + + require.ElementsMatch(t, commands, expectedCommands) + require.Equal(t, 0, len(mockAcc.Errors)) + }) + + t.Run("when 2 ethdev commands are enabled but one command is disabled, then numberOfIds new commands should be appended", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + response := fmt.Sprintf(`{"%s": [1, 123]}`, ethdevListCommand) + simulateResponse(mockConn, response, nil) + expectedCommands := []string{"/ethdev/stats,1", "/ethdev/stats,123"} + + dpdk.DeviceTypes = []string{"ethdev"} + dpdk.ethdevCommands = []string{"/ethdev/stats", "/ethdev/xstats"} + dpdk.ethdevExcludedCommandsFilter, _ = filter.Compile([]string{"/ethdev/xstats"}) + dpdk.AdditionalCommands = []string{} + commands := dpdk.gatherCommands(mockAcc) + + require.ElementsMatch(t, commands, expectedCommands) + require.Equal(t, 0, len(mockAcc.Errors)) + }) + + t.Run("when ethdev commands are enabled but params fetching command returns error then error should be logged in accumulator", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + simulateResponse(mockConn, `{notAJson}`, fmt.Errorf("some error")) + + dpdk.DeviceTypes = []string{"ethdev"} + dpdk.ethdevCommands = []string{"/ethdev/stats", "/ethdev/xstats"} + dpdk.ethdevExcludedCommandsFilter, _ = filter.Compile([]string{}) + dpdk.AdditionalCommands = []string{} + commands := dpdk.gatherCommands(mockAcc) + + require.Equal(t, 0, len(commands)) + require.Equal(t, 1, len(mockAcc.Errors)) + }) +} + +func Test_Gather(t *testing.T) { + t.Run("When parsing a plain json without nested object, then its key should be equal to \"\"", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + dpdk.AdditionalCommands = []string{"/endpoint1"} + simulateResponse(mockConn, `{"/endpoint1":"myvalue"}`, nil) + + err := dpdk.Gather(mockAcc) + + require.NoError(t, err) + require.Equal(t, 0, len(mockAcc.Errors)) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "dpdk", + map[string]string{ + "command": "/endpoint1", + "params": "", + }, + map[string]interface{}{ + "": "myvalue", + }, + time.Unix(0, 0), + ), + } + + actual := mockAcc.GetTelegrafMetrics() + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) + }) + + t.Run("When parsing a list of value in nested object then list should be flattened", func(t *testing.T) { + mockConn, dpdk, mockAcc := prepareEnvironment() + defer mockConn.AssertExpectations(t) + dpdk.AdditionalCommands = []string{"/endpoint1"} + simulateResponse(mockConn, `{"/endpoint1":{"myvalue":[0,1,123]}}`, nil) + + err := dpdk.Gather(mockAcc) + require.NoError(t, err) + require.Equal(t, 0, len(mockAcc.Errors)) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "dpdk", + map[string]string{ + "command": "/endpoint1", + "params": "", + }, + map[string]interface{}{ + "myvalue_0": float64(0), + "myvalue_1": float64(1), + "myvalue_2": float64(123), + }, + time.Unix(0, 0), + ), + } + + actual := mockAcc.GetTelegrafMetrics() + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) + }) +} + +func simulateResponse(mockConn *mocks.Conn, response string, readErr error) { + mockConn.On("Write", mock.Anything).Return(0, nil) + mockConn.On("Read", mock.Anything).Run(func(arg mock.Arguments) { + elem := arg.Get(0).([]byte) + copy(elem, response) + }).Return(len(response), readErr) + mockConn.On("SetDeadline", mock.Anything).Return(nil) + + if readErr != nil { + mockConn.On("Close").Return(nil) + } +} + +func simulateSocketResponse(socket net.Listener, t *testing.T) { + conn, err := socket.Accept() + require.NoError(t, err) + + initMessage, err := json.Marshal(initMessage{MaxOutputLen: 1}) + require.NoError(t, err) + + _, err = conn.Write(initMessage) + require.NoError(t, err) +} diff --git a/plugins/inputs/dpdk/dpdk_utils.go b/plugins/inputs/dpdk/dpdk_utils.go new file mode 100644 index 0000000000000..b7049d8365597 --- /dev/null +++ b/plugins/inputs/dpdk/dpdk_utils.go @@ -0,0 +1,117 @@ +//go:build linux +// +build linux + +package dpdk + +import ( + "encoding/json" + "fmt" + "os" + "reflect" + "strconv" + "strings" + + "github.com/influxdata/telegraf/filter" +) + +func commandWithParams(command string, params string) string { + if params != "" { + return command + "," + params + } + return command +} + +func stripParams(command string) string { + index := strings.IndexRune(command, ',') + if index == -1 { + return command + } + return command[:index] +} + +// Since DPDK is an open-source project, developers can use their own format of params +// so it could "/command,1,3,5,123" or "/command,userId=1, count=1234". +// To avoid issues with different formats of params, all params are returned as single string +func getParams(command string) string { + index := strings.IndexRune(command, ',') + if index == -1 { + return "" + } + return command[index+1:] +} + +// Checks if provided path points to socket +func isSocket(path string) error { + pathInfo, err := os.Lstat(path) + if os.IsNotExist(err) { + return fmt.Errorf("provided path does not exist: '%v'", path) + } + + if err != nil { + return fmt.Errorf("cannot get system information of '%v' file: %v", path, err) + } + + if pathInfo.Mode()&os.ModeSocket != os.ModeSocket { + return fmt.Errorf("provided path does not point to a socket file: '%v'", path) + } + + return nil +} + +// Converts JSON array containing devices identifiers from DPDK response to string slice +func jsonToArray(input []byte, command string) ([]string, error) { + if len(input) == 0 { + return nil, fmt.Errorf("got empty object instead of json") + } + + var rawMessage map[string]json.RawMessage + err := json.Unmarshal(input, &rawMessage) + if err != nil { + return nil, err + } + + var intArray []int64 + var stringArray []string + err = json.Unmarshal(rawMessage[command], &intArray) + if err != nil { + return nil, fmt.Errorf("failed to unmarshall json response - %v", err) + } + + for _, value := range intArray { + stringArray = append(stringArray, strconv.FormatInt(value, 10)) + } + + return stringArray, nil +} + +func removeSubset(elements []string, excludedFilter filter.Filter) []string { + if excludedFilter == nil { + return elements + } + + var result []string + for _, element := range elements { + if !excludedFilter.Match(element) { + result = append(result, element) + } + } + + return result +} + +func uniqueValues(values []string) []string { + in := make(map[string]bool) + result := make([]string, 0, len(values)) + + for _, value := range values { + if !in[value] { + in[value] = true + result = append(result, value) + } + } + return result +} + +func isEmpty(value interface{}) bool { + return value == nil || (reflect.ValueOf(value).Kind() == reflect.Ptr && reflect.ValueOf(value).IsNil()) +} diff --git a/plugins/inputs/dpdk/dpdk_utils_test.go b/plugins/inputs/dpdk/dpdk_utils_test.go new file mode 100644 index 0000000000000..87e8a6c8248c3 --- /dev/null +++ b/plugins/inputs/dpdk/dpdk_utils_test.go @@ -0,0 +1,138 @@ +//go:build linux +// +build linux + +package dpdk + +import ( + "fmt" + "net" + "os" + "strconv" + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_isSocket(t *testing.T) { + t.Run("when path points to non-existing file then error should be returned", func(t *testing.T) { + err := isSocket("/tmp/file-that-doesnt-exists") + + require.Error(t, err) + require.Contains(t, err.Error(), "provided path does not exist") + }) + + t.Run("should pass if path points to socket", func(t *testing.T) { + pathToSocket, socket := createSocketForTest(t) + defer socket.Close() + + err := isSocket(pathToSocket) + + require.NoError(t, err) + }) + + t.Run("if path points to regular file instead of socket then error should be returned", func(t *testing.T) { + pathToFile := "/tmp/dpdk-text-file.txt" + _, err := os.Create(pathToFile) + require.NoError(t, err) + defer os.Remove(pathToFile) + + err = isSocket(pathToFile) + + require.Error(t, err) + require.Contains(t, err.Error(), "provided path does not point to a socket file") + }) +} + +func Test_stripParams(t *testing.T) { + command := "/mycommand" + params := "myParams" + t.Run("when passed string without params then passed string should be returned", func(t *testing.T) { + strippedCommand := stripParams(command) + + require.Equal(t, command, strippedCommand) + }) + + t.Run("when passed string with params then string without params should be returned", func(t *testing.T) { + strippedCommand := stripParams(commandWithParams(command, params)) + + require.Equal(t, command, strippedCommand) + }) +} + +func Test_commandWithParams(t *testing.T) { + command := "/mycommand" + params := "myParams" + t.Run("when passed string with params then command with comma should be returned", func(t *testing.T) { + commandWithParams := commandWithParams(command, params) + + require.Equal(t, command+","+params, commandWithParams) + }) + + t.Run("when passed command with no params then command should be returned", func(t *testing.T) { + commandWithParams := commandWithParams(command, "") + + require.Equal(t, command, commandWithParams) + }) +} + +func Test_getParams(t *testing.T) { + command := "/mycommand" + params := "myParams" + t.Run("when passed string with params then command with comma should be returned", func(t *testing.T) { + commandParams := getParams(commandWithParams(command, params)) + + require.Equal(t, params, commandParams) + }) + + t.Run("when passed command with no params then empty string (representing empty params) should be returned", func(t *testing.T) { + commandParams := getParams(commandWithParams(command, "")) + + require.Equal(t, "", commandParams) + }) +} + +func Test_jsonToArray(t *testing.T) { + key := "/ethdev/list" + t.Run("when got numeric array then string array should be returned", func(t *testing.T) { + firstValue := int64(0) + secondValue := int64(1) + jsonString := fmt.Sprintf(`{"%s": [%d, %d]}`, key, firstValue, secondValue) + + arr, err := jsonToArray([]byte(jsonString), key) + + require.NoError(t, err) + require.Equal(t, strconv.FormatInt(firstValue, 10), arr[0]) + require.Equal(t, strconv.FormatInt(secondValue, 10), arr[1]) + }) + + t.Run("if non-json string is supplied as input then error should be returned", func(t *testing.T) { + _, err := jsonToArray([]byte("{notAJson}"), key) + + require.Error(t, err) + }) + + t.Run("when empty string is supplied as input then error should be returned", func(t *testing.T) { + jsonString := "" + + _, err := jsonToArray([]byte(jsonString), key) + + require.Error(t, err) + require.Contains(t, err.Error(), "got empty object instead of json") + }) + + t.Run("when valid json with json-object is supplied as input then error should be returned", func(t *testing.T) { + jsonString := fmt.Sprintf(`{"%s": {"testKey": "testValue"}}`, key) + + _, err := jsonToArray([]byte(jsonString), key) + + require.Error(t, err) + require.Contains(t, err.Error(), "failed to unmarshall json response") + }) +} + +func createSocketForTest(t *testing.T) (string, net.Listener) { + pathToSocket := "/tmp/dpdk-test-socket" + socket, err := net.Listen("unixpacket", pathToSocket) + require.NoError(t, err) + return pathToSocket, socket +} diff --git a/plugins/inputs/dpdk/mocks/conn.go b/plugins/inputs/dpdk/mocks/conn.go new file mode 100644 index 0000000000000..58961039dce86 --- /dev/null +++ b/plugins/inputs/dpdk/mocks/conn.go @@ -0,0 +1,146 @@ +// Code generated by mockery v0.0.0-dev. DO NOT EDIT. + +package mocks + +import ( + net "net" + + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// Conn is an autogenerated mock type for the Conn type +type Conn struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *Conn) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// LocalAddr provides a mock function with given fields: +func (_m *Conn) LocalAddr() net.Addr { + ret := _m.Called() + + var r0 net.Addr + if rf, ok := ret.Get(0).(func() net.Addr); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(net.Addr) + } + } + + return r0 +} + +// Read provides a mock function with given fields: b +func (_m *Conn) Read(b []byte) (int, error) { + ret := _m.Called(b) + + var r0 int + if rf, ok := ret.Get(0).(func([]byte) int); ok { + r0 = rf(b) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(b) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RemoteAddr provides a mock function with given fields: +func (_m *Conn) RemoteAddr() net.Addr { + ret := _m.Called() + + var r0 net.Addr + if rf, ok := ret.Get(0).(func() net.Addr); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(net.Addr) + } + } + + return r0 +} + +// SetDeadline provides a mock function with given fields: t +func (_m *Conn) SetDeadline(t time.Time) error { + ret := _m.Called(t) + + var r0 error + if rf, ok := ret.Get(0).(func(time.Time) error); ok { + r0 = rf(t) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetReadDeadline provides a mock function with given fields: t +func (_m *Conn) SetReadDeadline(t time.Time) error { + ret := _m.Called(t) + + var r0 error + if rf, ok := ret.Get(0).(func(time.Time) error); ok { + r0 = rf(t) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetWriteDeadline provides a mock function with given fields: t +func (_m *Conn) SetWriteDeadline(t time.Time) error { + ret := _m.Called(t) + + var r0 error + if rf, ok := ret.Get(0).(func(time.Time) error); ok { + r0 = rf(t) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Write provides a mock function with given fields: b +func (_m *Conn) Write(b []byte) (int, error) { + ret := _m.Called(b) + + var r0 int + if rf, ok := ret.Get(0).(func([]byte) int); ok { + r0 = rf(b) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(b) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/plugins/inputs/dpdk/sample.conf b/plugins/inputs/dpdk/sample.conf new file mode 100644 index 0000000000000..1391ce7a791c0 --- /dev/null +++ b/plugins/inputs/dpdk/sample.conf @@ -0,0 +1,32 @@ +# Reads metrics from DPDK applications using v2 telemetry interface. +[[inputs.dpdk]] + ## Path to DPDK telemetry socket. This shall point to v2 version of DPDK telemetry interface. + # socket_path = "/var/run/dpdk/rte/dpdk_telemetry.v2" + + ## Duration that defines how long the connected socket client will wait for a response before terminating connection. + ## This includes both writing to and reading from socket. Since it's local socket access + ## to a fast packet processing application, the timeout should be sufficient for most users. + ## Setting the value to 0 disables the timeout (not recommended) + # socket_access_timeout = "200ms" + + ## Enables telemetry data collection for selected device types. + ## Adding "ethdev" enables collection of telemetry from DPDK NICs (stats, xstats, link_status). + ## Adding "rawdev" enables collection of telemetry from DPDK Raw Devices (xstats). + # device_types = ["ethdev"] + + ## List of custom, application-specific telemetry commands to query + ## The list of available commands depend on the application deployed. Applications can register their own commands + ## via telemetry library API http://doc.dpdk.org/guides/prog_guide/telemetry_lib.html#registering-commands + ## For e.g. L3 Forwarding with Power Management Sample Application this could be: + ## additional_commands = ["/l3fwd-power/stats"] + # additional_commands = [] + + ## Allows turning off collecting data for individual "ethdev" commands. + ## Remove "/ethdev/link_status" from list to start getting link status metrics. + [inputs.dpdk.ethdev] + exclude_commands = ["/ethdev/link_status"] + + ## When running multiple instances of the plugin it's recommended to add a unique tag to each instance to identify + ## metrics exposed by an instance of DPDK application. This is useful when multiple DPDK apps run on a single host. + ## [inputs.dpdk.tags] + ## dpdk_instance = "my-fwd-app" diff --git a/plugins/inputs/ecs/README.md b/plugins/inputs/ecs/README.md index 9e3188eec30bf..0e652b147f586 100644 --- a/plugins/inputs/ecs/README.md +++ b/plugins/inputs/ecs/README.md @@ -1,22 +1,21 @@ # Amazon ECS Input Plugin -Amazon ECS, Fargate compatible, input plugin which uses the Amazon ECS metadata and -stats [v2][task-metadata-endpoint-v2] or [v3][task-metadata-endpoint-v3] API endpoints -to gather stats on running containers in a Task. +Amazon ECS, Fargate compatible, input plugin which uses the Amazon ECS metadata +and stats [v2][task-metadata-endpoint-v2] or [v3][task-metadata-endpoint-v3] API +endpoints to gather stats on running containers in a Task. The telegraf container must be run in the same Task as the workload it is inspecting. -This is similar to (and reuses a few pieces of) the [Docker][docker-input] -input plugin, with some ECS specific modifications for AWS metadata and stats -formats. +This is similar to (and reuses a few pieces of) the [Docker][docker-input] input +plugin, with some ECS specific modifications for AWS metadata and stats formats. The amazon-ecs-agent (though it _is_ a container running on the host) is not present in the metadata/stats endpoints. -### Configuration +## Configuration -```toml +```toml @sample.conf # Read metrics about ECS containers [[inputs.ecs]] ## ECS metadata url. @@ -45,7 +44,7 @@ present in the metadata/stats endpoints. # timeout = "5s" ``` -### Configuration (enforce v2 metadata) +## Configuration (enforce v2 metadata) ```toml # Read metrics about ECS containers @@ -76,7 +75,7 @@ present in the metadata/stats endpoints. # timeout = "5s" ``` -### Metrics +## Metrics - ecs_task - tags: @@ -87,13 +86,12 @@ present in the metadata/stats endpoints. - id - name - fields: - - revision (string) - desired_status (string) - known_status (string) - limit_cpu (float) - limit_mem (float) -+ ecs_container_mem +- ecs_container_mem - tags: - cluster - task_arn @@ -159,7 +157,7 @@ present in the metadata/stats endpoints. - usage_percent - usage_total -+ ecs_container_net +- ecs_container_net - tags: - cluster - task_arn @@ -201,7 +199,7 @@ present in the metadata/stats endpoints. - io_serviced_recursive_total - io_serviced_recursive_write -+ ecs_container_meta +- ecs_container_meta - tags: - cluster - task_arn @@ -222,11 +220,10 @@ present in the metadata/stats endpoints. - started_at - type +## Example -### Example Output - -``` -ecs_task,cluster=test,family=nginx,host=c4b301d4a123,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a revision="2",desired_status="RUNNING",known_status="RUNNING",limit_cpu=0.5,limit_mem=512 1542641488000000000 +```shell +ecs_task,cluster=test,family=nginx,host=c4b301d4a123,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a desired_status="RUNNING",known_status="RUNNING",limit_cpu=0.5,limit_mem=512 1542641488000000000 ecs_container_mem,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a active_anon=40960i,active_file=8192i,cache=790528i,pgpgin=1243i,total_pgfault=1298i,total_rss=40960i,limit=1033658368i,max_usage=4825088i,hierarchical_memory_limit=536870912i,rss=40960i,total_active_file=8192i,total_mapped_file=618496i,usage_percent=0.05349543109392212,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",pgfault=1298i,pgmajfault=6i,pgpgout=1040i,total_active_anon=40960i,total_inactive_file=782336i,total_pgpgin=1243i,usage=552960i,inactive_file=782336i,mapped_file=618496i,total_cache=790528i,total_pgpgout=1040i 1542642001000000000 ecs_container_cpu,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,cpu=cpu-total,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a usage_in_kernelmode=0i,throttling_throttled_periods=0i,throttling_periods=0i,throttling_throttled_time=0i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",usage_percent=0,usage_total=26426156i,usage_in_usermode=20000000i,usage_system=2336100000000i 1542642001000000000 ecs_container_cpu,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,cpu=cpu0,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",usage_total=26426156i 1542642001000000000 @@ -243,4 +240,4 @@ ecs_container_meta,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs [docker-input]: /plugins/inputs/docker/README.md [task-metadata-endpoint-v2]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v2.html -[task-metadata-endpoint-v3] https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v3.html +[task-metadata-endpoint-v3]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v3.html diff --git a/plugins/inputs/ecs/client.go b/plugins/inputs/ecs/client.go index d7ce10cb2a2e0..b5521c5ea3f3a 100644 --- a/plugins/inputs/ecs/client.go +++ b/plugins/inputs/ecs/client.go @@ -3,7 +3,6 @@ package ecs import ( "fmt" "io" - "io/ioutil" "net/http" "net/url" "time" @@ -113,7 +112,7 @@ func (c *EcsClient) Task() (*Task, error) { if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return nil, fmt.Errorf("%s returned HTTP status %s: %q", c.taskURL, resp.Status, body) } @@ -137,7 +136,7 @@ func (c *EcsClient) ContainerStats() (map[string]types.StatsJSON, error) { if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return nil, fmt.Errorf("%s returned HTTP status %s: %q", c.statsURL, resp.Status, body) } @@ -152,7 +151,6 @@ func (c *EcsClient) ContainerStats() (map[string]types.StatsJSON, error) { // PollSync executes Task and ContainerStats in parallel. If both succeed, both structs are returned. // If either errors, a single error is returned. func PollSync(c Client) (*Task, map[string]types.StatsJSON, error) { - var task *Task var stats map[string]types.StatsJSON var err error diff --git a/plugins/inputs/ecs/client_test.go b/plugins/inputs/ecs/client_test.go index 333aec80c2709..14b32c6851db7 100644 --- a/plugins/inputs/ecs/client_test.go +++ b/plugins/inputs/ecs/client_test.go @@ -3,14 +3,14 @@ package ecs import ( "bytes" "errors" - "io/ioutil" + "io" "net/http" "net/url" "os" "testing" "github.com/docker/docker/api/types" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type pollMock struct { @@ -27,7 +27,6 @@ func (p *pollMock) ContainerStats() (map[string]types.StatsJSON, error) { } func TestEcsClient_PollSync(t *testing.T) { - tests := []struct { name string mock *pollMock @@ -81,8 +80,8 @@ func TestEcsClient_PollSync(t *testing.T) { t.Errorf("EcsClient.PollSync() error = %v, wantErr %v", err, tt.wantErr) return } - assert.Equal(t, tt.want, got, "EcsClient.PollSync() got = %v, want %v", got, tt.want) - assert.Equal(t, tt.want1, got1, "EcsClient.PollSync() got1 = %v, want %v", got1, tt.want1) + require.Equal(t, tt.want, got, "EcsClient.PollSync() got = %v, want %v", got, tt.want) + require.Equal(t, tt.want1, got1, "EcsClient.PollSync() got1 = %v, want %v", got1, tt.want1) }) } } @@ -109,7 +108,7 @@ func TestEcsClient_Task(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusOK, - Body: ioutil.NopCloser(rc), + Body: io.NopCloser(rc), }, nil }, }, @@ -130,7 +129,7 @@ func TestEcsClient_Task(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusInternalServerError, - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + Body: io.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, @@ -142,7 +141,7 @@ func TestEcsClient_Task(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + Body: io.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, @@ -161,7 +160,7 @@ func TestEcsClient_Task(t *testing.T) { t.Errorf("EcsClient.Task() error = %v, wantErr %v", err, tt.wantErr) return } - assert.Equal(t, tt.want, got, "EcsClient.Task() = %v, want %v", got, tt.want) + require.Equal(t, tt.want, got, "EcsClient.Task() = %v, want %v", got, tt.want) }) } } @@ -180,7 +179,7 @@ func TestEcsClient_ContainerStats(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusOK, - Body: ioutil.NopCloser(rc), + Body: io.NopCloser(rc), }, nil }, }, @@ -202,7 +201,7 @@ func TestEcsClient_ContainerStats(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + Body: io.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, @@ -215,7 +214,7 @@ func TestEcsClient_ContainerStats(t *testing.T) { do: func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusInternalServerError, - Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + Body: io.NopCloser(bytes.NewReader([]byte("foo"))), }, nil }, }, @@ -235,7 +234,7 @@ func TestEcsClient_ContainerStats(t *testing.T) { t.Errorf("EcsClient.ContainerStats() error = %v, wantErr %v", err, tt.wantErr) return } - assert.Equal(t, tt.want, got, "EcsClient.ContainerStats() = %v, want %v", got, tt.want) + require.Equal(t, tt.want, got, "EcsClient.ContainerStats() = %v, want %v", got, tt.want) }) } } @@ -269,10 +268,10 @@ func TestResolveTaskURL(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { baseURL, err := url.Parse(tt.base) - assert.NoError(t, err) + require.NoError(t, err) act := resolveTaskURL(baseURL, tt.ver) - assert.Equal(t, tt.exp, act) + require.Equal(t, tt.exp, act) }) } } @@ -306,10 +305,10 @@ func TestResolveStatsURL(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { baseURL, err := url.Parse(tt.base) - assert.NoError(t, err) + require.NoError(t, err) act := resolveStatsURL(baseURL, tt.ver) - assert.Equal(t, tt.exp, act) + require.Equal(t, tt.exp, act) }) } } diff --git a/plugins/inputs/ecs/ecs.go b/plugins/inputs/ecs/ecs.go index 5fa53d4fd58bc..7c93f18f4bd17 100644 --- a/plugins/inputs/ecs/ecs.go +++ b/plugins/inputs/ecs/ecs.go @@ -1,20 +1,26 @@ +//go:generate ../../../tools/readme_config_includer/generator package ecs import ( + _ "embed" "os" "strings" "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // Ecs config object type Ecs struct { EndpointURL string `toml:"endpoint_url"` - Timeout internal.Duration + Timeout config.Duration ContainerNameInclude []string `toml:"container_name_include"` ContainerNameExclude []string `toml:"container_name_exclude"` @@ -45,40 +51,7 @@ const ( v2Endpoint = "http://169.254.170.2" ) -var sampleConfig = ` - ## ECS metadata url. - ## Metadata v2 API is used if set explicitly. Otherwise, - ## v3 metadata endpoint API is used if available. - # endpoint_url = "" - - ## Containers to include and exclude. Globs accepted. - ## Note that an empty array for both will include all containers - # container_name_include = [] - # container_name_exclude = [] - - ## Container states to include and exclude. Globs accepted. - ## When empty only containers in the "RUNNING" state will be captured. - ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING", - ## "RESOURCES_PROVISIONED", "STOPPED". - # container_status_include = [] - # container_status_exclude = [] - - ## ecs labels to include and exclude as tags. Globs accepted. - ## Note that an empty array for both will include all labels as tags - ecs_label_include = [ "com.amazonaws.ecs.*" ] - ecs_label_exclude = [] - - ## Timeout for queries. - # timeout = "5s" -` - -// Description describes ECS plugin -func (ecs *Ecs) Description() string { - return "Read metrics about docker containers from Fargate/ECS v2, v3 meta endpoints." -} - -// SampleConfig returns the ECS example config -func (ecs *Ecs) SampleConfig() string { +func (*Ecs) SampleConfig() string { return sampleConfig } @@ -114,7 +87,7 @@ func initSetup(ecs *Ecs) error { if ecs.client == nil { resolveEndpoint(ecs) - c, err := ecs.newClient(ecs.Timeout.Duration, ecs.EndpointURL, ecs.metadataVersion) + c, err := ecs.newClient(time.Duration(ecs.Timeout), ecs.EndpointURL, ecs.metadataVersion) if err != nil { return err } @@ -166,14 +139,13 @@ func resolveEndpoint(ecs *Ecs) { func (ecs *Ecs) accTask(task *Task, tags map[string]string, acc telegraf.Accumulator) { taskFields := map[string]interface{}{ - "revision": task.Revision, "desired_status": task.DesiredStatus, "known_status": task.KnownStatus, "limit_cpu": task.Limits["CPU"], "limit_mem": task.Limits["Memory"], } - acc.AddFields("ecs_task", taskFields, tags, task.PullStoppedAt) + acc.AddFields("ecs_task", taskFields, tags) } func (ecs *Ecs) accContainers(task *Task, taskTags map[string]string, acc telegraf.Accumulator) { @@ -221,20 +193,20 @@ func mergeTags(a map[string]string, b map[string]string) map[string]string { } func (ecs *Ecs) createContainerNameFilters() error { - filter, err := filter.NewIncludeExcludeFilter(ecs.ContainerNameInclude, ecs.ContainerNameExclude) + containerNameFilter, err := filter.NewIncludeExcludeFilter(ecs.ContainerNameInclude, ecs.ContainerNameExclude) if err != nil { return err } - ecs.containerNameFilter = filter + ecs.containerNameFilter = containerNameFilter return nil } func (ecs *Ecs) createLabelFilters() error { - filter, err := filter.NewIncludeExcludeFilter(ecs.LabelInclude, ecs.LabelExclude) + labelFilter, err := filter.NewIncludeExcludeFilter(ecs.LabelInclude, ecs.LabelExclude) if err != nil { return err } - ecs.labelFilter = filter + ecs.labelFilter = labelFilter return nil } @@ -251,11 +223,11 @@ func (ecs *Ecs) createContainerStatusFilters() error { ecs.ContainerStatusExclude[i] = strings.ToUpper(exclude) } - filter, err := filter.NewIncludeExcludeFilter(ecs.ContainerStatusInclude, ecs.ContainerStatusExclude) + statusFilter, err := filter.NewIncludeExcludeFilter(ecs.ContainerStatusInclude, ecs.ContainerStatusExclude) if err != nil { return err } - ecs.statusFilter = filter + ecs.statusFilter = statusFilter return nil } @@ -263,7 +235,7 @@ func init() { inputs.Add("ecs", func() telegraf.Input { return &Ecs{ EndpointURL: "", - Timeout: internal.Duration{Duration: 5 * time.Second}, + Timeout: config.Duration(5 * time.Second), newClient: NewClient, filtersCreated: false, } diff --git a/plugins/inputs/ecs/ecs_test.go b/plugins/inputs/ecs/ecs_test.go index 5d64fef01efad..5a837d1ae4517 100644 --- a/plugins/inputs/ecs/ecs_test.go +++ b/plugins/inputs/ecs/ecs_test.go @@ -6,7 +6,7 @@ import ( "time" "github.com/docker/docker/api/types" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // codified golden objects for tests @@ -800,10 +800,10 @@ func TestResolveEndpoint(t *testing.T) { { name: "Endpoint is not set, ECS_CONTAINER_METADATA_URI is set => use v3 metadata", preF: func() { - os.Setenv("ECS_CONTAINER_METADATA_URI", "v3-endpoint.local") + require.NoError(t, os.Setenv("ECS_CONTAINER_METADATA_URI", "v3-endpoint.local")) }, afterF: func() { - os.Unsetenv("ECS_CONTAINER_METADATA_URI") + require.NoError(t, os.Unsetenv("ECS_CONTAINER_METADATA_URI")) }, given: Ecs{ EndpointURL: "", @@ -825,7 +825,7 @@ func TestResolveEndpoint(t *testing.T) { act := tt.given resolveEndpoint(&act) - assert.Equal(t, tt.exp, act) + require.Equal(t, tt.exp, act) }) } } diff --git a/plugins/inputs/ecs/sample.conf b/plugins/inputs/ecs/sample.conf new file mode 100644 index 0000000000000..004a41eb691d1 --- /dev/null +++ b/plugins/inputs/ecs/sample.conf @@ -0,0 +1,26 @@ +# Read metrics about ECS containers +[[inputs.ecs]] + ## ECS metadata url. + ## Metadata v2 API is used if set explicitly. Otherwise, + ## v3 metadata endpoint API is used if available. + # endpoint_url = "" + + ## Containers to include and exclude. Globs accepted. + ## Note that an empty array for both will include all containers + # container_name_include = [] + # container_name_exclude = [] + + ## Container states to include and exclude. Globs accepted. + ## When empty only containers in the "RUNNING" state will be captured. + ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING", + ## "RESOURCES_PROVISIONED", "STOPPED". + # container_status_include = [] + # container_status_exclude = [] + + ## ecs labels to include and exclude as tags. Globs accepted. + ## Note that an empty array for both will include all labels as tags + ecs_label_include = [ "com.amazonaws.ecs.*" ] + ecs_label_exclude = [] + + ## Timeout for queries. + # timeout = "5s" diff --git a/plugins/inputs/ecs/stats.go b/plugins/inputs/ecs/stats.go index d2a8ee5d34cfd..13d9aa3bc5326 100644 --- a/plugins/inputs/ecs/stats.go +++ b/plugins/inputs/ecs/stats.go @@ -284,7 +284,6 @@ func blkstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags m } else { totalStatMap[field] = uintV } - } } diff --git a/plugins/inputs/elasticsearch/README.md b/plugins/inputs/elasticsearch/README.md index 54285c3b9e8e3..ca551fe4f3232 100644 --- a/plugins/inputs/elasticsearch/README.md +++ b/plugins/inputs/elasticsearch/README.md @@ -1,28 +1,36 @@ # Elasticsearch Input Plugin The [elasticsearch](https://www.elastic.co/) plugin queries endpoints to obtain -[Node Stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) -and optionally -[Cluster-Health](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) -metrics. +[Node Stats][1] and optionally [Cluster-Health][2] metrics. In addition, the following optional queries are only made by the master node: - [Cluster Stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) - [Indices Stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html) - [Shard Stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html) + [Cluster Stats][3] [Indices Stats][4] [Shard Stats][5] Specific Elasticsearch endpoints that are queried: -- Node: either /_nodes/stats or /_nodes/_local/stats depending on 'local' configuration setting -- Cluster Heath: /_cluster/health?level=indices -- Cluster Stats: /_cluster/stats -- Indices Stats: /_all/_stats -- Shard Stats: /_all/_stats?level=shards -Note that specific statistics information can change between Elasticsearch versions. In general, this plugin attempts to stay as version-generic as possible by tagging high-level categories only and using a generic json parser to make unique field names of whatever statistics names are provided at the mid-low level. - -### Configuration - -```toml +- Node: either /_nodes/stats or /_nodes/_local/stats depending on 'local' + configuration setting +- Cluster Heath: /_cluster/health?level=indices +- Cluster Stats: /_cluster/stats +- Indices Stats: /_all/_stats +- Shard Stats: /_all/_stats?level=shards + +Note that specific statistics information can change between Elasticsearch +versions. In general, this plugin attempts to stay as version-generic as +possible by tagging high-level categories only and using a generic json parser +to make unique field names of whatever statistics names are provided at the +mid-low level. + +[1]: https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html +[2]: https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html +[3]: https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html +[4]: https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html +[5]: https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html + +## Configuration + +```toml @sample.conf +# Read stats from one or more Elasticsearch servers or clusters [[inputs.elasticsearch]] ## specify a list of one or more Elasticsearch servers ## you can add username and password to your url to use basic authentication: @@ -53,6 +61,7 @@ Note that specific statistics information can change between Elasticsearch versi cluster_stats_only_from_master = true ## Indices to collect; can be one or more indices names or _all + ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index names that end with a changing value, like a date. indices_include = ["_all"] ## One of "shards", "cluster", "indices" @@ -74,9 +83,15 @@ Note that specific statistics information can change between Elasticsearch versi # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false + + ## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix. + ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and + ## sort them by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most + ## recent indices. + # num_most_recent_indices = 0 ``` -### Metrics +## Metrics Emitted when `cluster_health = true`: @@ -164,7 +179,7 @@ Emitted when `cluster_stats = true`: - shards_total (float) - store_size_in_bytes (float) -+ elasticsearch_clusterstats_nodes +- elasticsearch_clusterstats_nodes - tags: - cluster_name - node_name @@ -225,7 +240,7 @@ Emitted when the appropriate `node_stats` options are set. - tx_count (float) - tx_size_in_bytes (float) -+ elasticsearch_breakers +- elasticsearch_breakers - tags: - cluster_name - node_attribute_ml.enabled @@ -286,7 +301,7 @@ Emitted when the appropriate `node_stats` options are set. - total_free_in_bytes (float) - total_total_in_bytes (float) -+ elasticsearch_http +- elasticsearch_http - tags: - cluster_name - node_attribute_ml.enabled @@ -397,7 +412,7 @@ Emitted when the appropriate `node_stats` options are set. - warmer_total (float) - warmer_total_time_in_millis (float) -+ elasticsearch_jvm +- elasticsearch_jvm - tags: - cluster_name - node_attribute_ml.enabled @@ -475,7 +490,7 @@ Emitted when the appropriate `node_stats` options are set. - swap_used_in_bytes (float) - timestamp (float) -+ elasticsearch_process +- elasticsearch_process - tags: - cluster_name - node_attribute_ml.enabled diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index b6dfd2a81b11f..00aae60f2db72 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -1,9 +1,11 @@ +//go:generate ../../../tools/readme_config_includer/generator package elasticsearch import ( + _ "embed" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "regexp" "sort" @@ -12,12 +14,17 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // mask for masking username/password from error messages var mask = regexp.MustCompile(`https?:\/\/\S+:\S+@`) @@ -85,78 +92,29 @@ type indexStat struct { Shards map[string][]interface{} `json:"shards"` } -const sampleConfig = ` - ## specify a list of one or more Elasticsearch servers - # you can add username and password to your url to use basic authentication: - # servers = ["http://user:pass@localhost:9200"] - servers = ["http://localhost:9200"] - - ## Timeout for HTTP requests to the elastic search server(s) - http_timeout = "5s" - - ## When local is true (the default), the node will read only its own stats. - ## Set local to false when you want to read the node stats from all nodes - ## of the cluster. - local = true - - ## Set cluster_health to true when you want to also obtain cluster health stats - cluster_health = false - - ## Adjust cluster_health_level when you want to also obtain detailed health stats - ## The options are - ## - indices (default) - ## - cluster - # cluster_health_level = "indices" - - ## Set cluster_stats to true when you want to also obtain cluster stats. - cluster_stats = false - - ## Only gather cluster_stats from the master node. To work this require local = true - cluster_stats_only_from_master = true - - ## Indices to collect; can be one or more indices names or _all - indices_include = ["_all"] - - ## One of "shards", "cluster", "indices" - indices_level = "shards" - - ## node_stats is a list of sub-stats that you want to have gathered. Valid options - ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", - ## "breaker". Per default, all stats are gathered. - # node_stats = ["jvm", "http"] - - ## HTTP Basic Authentication username and password. - # username = "" - # password = "" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - // Elasticsearch is a plugin to read stats from one or many Elasticsearch // servers. type Elasticsearch struct { - Local bool `toml:"local"` - Servers []string `toml:"servers"` - HTTPTimeout internal.Duration `toml:"http_timeout"` - ClusterHealth bool `toml:"cluster_health"` - ClusterHealthLevel string `toml:"cluster_health_level"` - ClusterStats bool `toml:"cluster_stats"` - ClusterStatsOnlyFromMaster bool `toml:"cluster_stats_only_from_master"` - IndicesInclude []string `toml:"indices_include"` - IndicesLevel string `toml:"indices_level"` - NodeStats []string `toml:"node_stats"` - Username string `toml:"username"` - Password string `toml:"password"` + Local bool `toml:"local"` + Servers []string `toml:"servers"` + HTTPTimeout config.Duration `toml:"http_timeout"` + ClusterHealth bool `toml:"cluster_health"` + ClusterHealthLevel string `toml:"cluster_health_level"` + ClusterStats bool `toml:"cluster_stats"` + ClusterStatsOnlyFromMaster bool `toml:"cluster_stats_only_from_master"` + IndicesInclude []string `toml:"indices_include"` + IndicesLevel string `toml:"indices_level"` + NodeStats []string `toml:"node_stats"` + Username string `toml:"username"` + Password string `toml:"password"` + NumMostRecentIndices int `toml:"num_most_recent_indices"` + tls.ClientConfig client *http.Client serverInfo map[string]serverInfo serverInfoMutex sync.Mutex + indexMatchers map[string]filter.Filter } type serverInfo struct { nodeID string @@ -170,7 +128,7 @@ func (i serverInfo) isMaster() bool { // NewElasticsearch return a new instance of Elasticsearch func NewElasticsearch() *Elasticsearch { return &Elasticsearch{ - HTTPTimeout: internal.Duration{Duration: time.Second * 5}, + HTTPTimeout: config.Duration(time.Second * 5), ClusterStatsOnlyFromMaster: true, ClusterHealthLevel: "indices", } @@ -204,14 +162,21 @@ func mapShardStatusToCode(s string) int { return 0 } -// SampleConfig returns sample configuration for this plugin. -func (e *Elasticsearch) SampleConfig() string { +func (*Elasticsearch) SampleConfig() string { return sampleConfig } -// Description returns the plugin description. -func (e *Elasticsearch) Description() string { - return "Read stats from one or more Elasticsearch servers or clusters" +// Init the plugin. +func (e *Elasticsearch) Init() error { + // Compile the configured indexes to match for sorting. + indexMatchers, err := e.compileIndexMatchers() + if err != nil { + return err + } + + e.indexMatchers = indexMatchers + + return nil } // Gather reads the stats from Elasticsearch and writes it to the @@ -254,7 +219,6 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { e.serverInfoMutex.Lock() e.serverInfo[s] = info e.serverInfoMutex.Unlock() - }(serv, acc) } wgC.Wait() @@ -318,12 +282,12 @@ func (e *Elasticsearch) createHTTPClient() (*http.Client, error) { return nil, err } tr := &http.Transport{ - ResponseHeaderTimeout: e.HTTPTimeout.Duration, + ResponseHeaderTimeout: time.Duration(e.HTTPTimeout), TLSClientConfig: tlsCfg, } client := &http.Client{ Transport: tr, - Timeout: e.HTTPTimeout.Duration, + Timeout: time.Duration(e.HTTPTimeout), } return client, nil @@ -527,66 +491,131 @@ func (e *Elasticsearch) gatherIndicesStats(url string, acc telegraf.Accumulator) acc.AddFields("elasticsearch_indices_stats_"+m, jsonParser.Fields, map[string]string{"index_name": "_all"}, now) } - // Individual Indices stats - for id, index := range indicesStats.Indices { - indexTag := map[string]string{"index_name": id} - stats := map[string]interface{}{ - "primaries": index.Primaries, - "total": index.Total, + // Gather stats for each index. + err := e.gatherIndividualIndicesStats(indicesStats.Indices, now, acc) + + return err +} + +// gatherSortedIndicesStats gathers stats for all indices in no particular order. +func (e *Elasticsearch) gatherIndividualIndicesStats(indices map[string]indexStat, now time.Time, acc telegraf.Accumulator) error { + // Sort indices into buckets based on their configured prefix, if any matches. + categorizedIndexNames := e.categorizeIndices(indices) + for _, matchingIndices := range categorizedIndexNames { + // Establish the number of each category of indices to use. User can configure to use only the latest 'X' amount. + indicesCount := len(matchingIndices) + indicesToTrackCount := indicesCount + + // Sort the indices if configured to do so. + if e.NumMostRecentIndices > 0 { + if e.NumMostRecentIndices < indicesToTrackCount { + indicesToTrackCount = e.NumMostRecentIndices + } + sort.Strings(matchingIndices) } - for m, s := range stats { - f := jsonparser.JSONFlattener{} - // parse Json, getting strings and bools - err := f.FullFlattenJSON("", s, true, true) + + // Gather only the number of indexes that have been configured, in descending order (most recent, if date-stamped). + for i := indicesCount - 1; i >= indicesCount-indicesToTrackCount; i-- { + indexName := matchingIndices[i] + + err := e.gatherSingleIndexStats(indexName, indices[indexName], now, acc) if err != nil { return err } - acc.AddFields("elasticsearch_indices_stats_"+m, f.Fields, indexTag, now) } + } - if e.IndicesLevel == "shards" { - for shardNumber, shards := range index.Shards { - for _, shard := range shards { + return nil +} - // Get Shard Stats - flattened := jsonparser.JSONFlattener{} - err := flattened.FullFlattenJSON("", shard, true, true) - if err != nil { - return err - } +func (e *Elasticsearch) categorizeIndices(indices map[string]indexStat) map[string][]string { + categorizedIndexNames := map[string][]string{} - // determine shard tag and primary/replica designation - shardType := "replica" - if flattened.Fields["routing_primary"] == true { - shardType = "primary" - } - delete(flattened.Fields, "routing_primary") + // If all indices are configured to be gathered, bucket them all together. + if len(e.IndicesInclude) == 0 || e.IndicesInclude[0] == "_all" { + for indexName := range indices { + categorizedIndexNames["_all"] = append(categorizedIndexNames["_all"], indexName) + } - routingState, ok := flattened.Fields["routing_state"].(string) - if ok { - flattened.Fields["routing_state"] = mapShardStatusToCode(routingState) - } + return categorizedIndexNames + } - routingNode, _ := flattened.Fields["routing_node"].(string) - shardTags := map[string]string{ - "index_name": id, - "node_id": routingNode, - "shard_name": string(shardNumber), - "type": shardType, - } + // Bucket each returned index with its associated configured index (if any match). + for indexName := range indices { + match := indexName + for name, matcher := range e.indexMatchers { + // If a configured index matches one of the returned indexes, mark it as a match. + if matcher.Match(match) { + match = name + break + } + } - for key, field := range flattened.Fields { - switch field.(type) { - case string, bool: - delete(flattened.Fields, key) - } - } + // Bucket all matching indices together for sorting. + categorizedIndexNames[match] = append(categorizedIndexNames[match], indexName) + } + + return categorizedIndexNames +} + +func (e *Elasticsearch) gatherSingleIndexStats(name string, index indexStat, now time.Time, acc telegraf.Accumulator) error { + indexTag := map[string]string{"index_name": name} + stats := map[string]interface{}{ + "primaries": index.Primaries, + "total": index.Total, + } + for m, s := range stats { + f := jsonparser.JSONFlattener{} + // parse Json, getting strings and bools + err := f.FullFlattenJSON("", s, true, true) + if err != nil { + return err + } + acc.AddFields("elasticsearch_indices_stats_"+m, f.Fields, indexTag, now) + } - acc.AddFields("elasticsearch_indices_stats_shards", - flattened.Fields, - shardTags, - now) + if e.IndicesLevel == "shards" { + for shardNumber, shards := range index.Shards { + for _, shard := range shards { + // Get Shard Stats + flattened := jsonparser.JSONFlattener{} + err := flattened.FullFlattenJSON("", shard, true, true) + if err != nil { + return err } + + // determine shard tag and primary/replica designation + shardType := "replica" + routingPrimary, _ := flattened.Fields["routing_primary"].(bool) + if routingPrimary { + shardType = "primary" + } + delete(flattened.Fields, "routing_primary") + + routingState, ok := flattened.Fields["routing_state"].(string) + if ok { + flattened.Fields["routing_state"] = mapShardStatusToCode(routingState) + } + + routingNode, _ := flattened.Fields["routing_node"].(string) + shardTags := map[string]string{ + "index_name": name, + "node_id": routingNode, + "shard_name": shardNumber, + "type": shardType, + } + + for key, field := range flattened.Fields { + switch field.(type) { + case string, bool: + delete(flattened.Fields, key) + } + } + + acc.AddFields("elasticsearch_indices_stats_shards", + flattened.Fields, + shardTags, + now) } } } @@ -615,7 +644,7 @@ func (e *Elasticsearch) getCatMaster(url string) (string, error) { // future calls. return "", fmt.Errorf("elasticsearch: Unable to retrieve master node information. API responded with status-code %d, expected %d", r.StatusCode, http.StatusOK) } - response, err := ioutil.ReadAll(r.Body) + response, err := io.ReadAll(r.Body) if err != nil { return "", err @@ -649,11 +678,24 @@ func (e *Elasticsearch) gatherJSONData(url string, v interface{}) error { r.StatusCode, http.StatusOK) } - if err = json.NewDecoder(r.Body).Decode(v); err != nil { - return err + return json.NewDecoder(r.Body).Decode(v) +} + +func (e *Elasticsearch) compileIndexMatchers() (map[string]filter.Filter, error) { + indexMatchers := map[string]filter.Filter{} + var err error + + // Compile each configured index into a glob matcher. + for _, configuredIndex := range e.IndicesInclude { + if _, exists := indexMatchers[configuredIndex]; !exists { + indexMatchers[configuredIndex], err = filter.Compile([]string{configuredIndex}) + if err != nil { + return nil, err + } + } } - return nil + return indexMatchers, nil } func init() { diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go index ad91c898a1a5c..1ed61e731ce1f 100644 --- a/plugins/inputs/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -1,17 +1,14 @@ package elasticsearch import ( - "io/ioutil" + "io" "net/http" "strings" "testing" - "github.com/influxdata/telegraf/testutil" - - "fmt" - - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func defaultTags() map[string]string { @@ -33,9 +30,9 @@ type transportMock struct { body string } -func newTransportMock(statusCode int, body string) http.RoundTripper { +func newTransportMock(body string) http.RoundTripper { return &transportMock{ - statusCode: statusCode, + statusCode: http.StatusOK, body: body, } } @@ -47,19 +44,11 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { StatusCode: t.statusCode, } res.Header.Set("Content-Type", "application/json") - res.Body = ioutil.NopCloser(strings.NewReader(t.body)) + res.Body = io.NopCloser(strings.NewReader(t.body)) return res, nil } -func (t *transportMock) CancelRequest(_ *http.Request) { -} - -func checkIsMaster(es *Elasticsearch, server string, expected bool, t *testing.T) { - if es.serverInfo[server].isMaster() != expected { - msg := fmt.Sprintf("IsMaster set incorrectly") - assert.Fail(t, msg) - } -} +func (t *transportMock) CancelRequest(_ *http.Request) {} func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) { tags := defaultTags() @@ -77,16 +66,13 @@ func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) { func TestGather(t *testing.T) { es := newElasticsearchWithClient() es.Servers = []string{"http://example.com:9200"} - es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse) + es.client.Transport = newTransportMock(nodeStatsResponse) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator - if err := acc.GatherError(es.Gather); err != nil { - t.Fatal(err) - } - - checkIsMaster(es, es.Servers[0], false, t) + require.NoError(t, acc.GatherError(es.Gather)) + require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") checkNodeStatsResult(t, &acc) } @@ -94,16 +80,13 @@ func TestGatherIndividualStats(t *testing.T) { es := newElasticsearchWithClient() es.Servers = []string{"http://example.com:9200"} es.NodeStats = []string{"jvm", "process"} - es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponseJVMProcess) + es.client.Transport = newTransportMock(nodeStatsResponseJVMProcess) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator - if err := acc.GatherError(es.Gather); err != nil { - t.Fatal(err) - } - - checkIsMaster(es, es.Servers[0], false, t) + require.NoError(t, acc.GatherError(es.Gather)) + require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") tags := defaultTags() acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", nodestatsIndicesExpected, tags) @@ -120,16 +103,13 @@ func TestGatherIndividualStats(t *testing.T) { func TestGatherNodeStats(t *testing.T) { es := newElasticsearchWithClient() es.Servers = []string{"http://example.com:9200"} - es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse) + es.client.Transport = newTransportMock(nodeStatsResponse) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator - if err := es.gatherNodeStats("junk", &acc); err != nil { - t.Fatal(err) - } - - checkIsMaster(es, es.Servers[0], false, t) + require.NoError(t, es.gatherNodeStats("junk", &acc)) + require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") checkNodeStatsResult(t, &acc) } @@ -138,14 +118,13 @@ func TestGatherClusterHealthEmptyClusterHealth(t *testing.T) { es.Servers = []string{"http://example.com:9200"} es.ClusterHealth = true es.ClusterHealthLevel = "" - es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse) + es.client.Transport = newTransportMock(clusterHealthResponse) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator require.NoError(t, es.gatherClusterHealth("junk", &acc)) - - checkIsMaster(es, es.Servers[0], false, t) + require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health", clusterHealthExpected, @@ -165,14 +144,13 @@ func TestGatherClusterHealthSpecificClusterHealth(t *testing.T) { es.Servers = []string{"http://example.com:9200"} es.ClusterHealth = true es.ClusterHealthLevel = "cluster" - es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse) + es.client.Transport = newTransportMock(clusterHealthResponse) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator require.NoError(t, es.gatherClusterHealth("junk", &acc)) - - checkIsMaster(es, es.Servers[0], false, t) + require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health", clusterHealthExpected, @@ -192,14 +170,13 @@ func TestGatherClusterHealthAlsoIndicesHealth(t *testing.T) { es.Servers = []string{"http://example.com:9200"} es.ClusterHealth = true es.ClusterHealthLevel = "indices" - es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponseWithIndices) + es.client.Transport = newTransportMock(clusterHealthResponseWithIndices) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator require.NoError(t, es.gatherClusterHealth("junk", &acc)) - - checkIsMaster(es, es.Servers[0], false, t) + require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health", clusterHealthExpected, @@ -223,31 +200,25 @@ func TestGatherClusterStatsMaster(t *testing.T) { info := serverInfo{nodeID: "SDFsfSDFsdfFSDSDfSFDSDF", masterID: ""} // first get catMaster - es.client.Transport = newTransportMock(http.StatusOK, IsMasterResult) + es.client.Transport = newTransportMock(IsMasterResult) masterID, err := es.getCatMaster("junk") require.NoError(t, err) info.masterID = masterID es.serverInfo["http://example.com:9200"] = info - IsMasterResultTokens := strings.Split(string(IsMasterResult), " ") - if masterID != IsMasterResultTokens[0] { - msg := fmt.Sprintf("catmaster is incorrect") - assert.Fail(t, msg) - } + isMasterResultTokens := strings.Split(IsMasterResult, " ") + require.Equal(t, masterID, isMasterResultTokens[0], "catmaster is incorrect") // now get node status, which determines whether we're master var acc testutil.Accumulator es.Local = true - es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse) - if err := es.gatherNodeStats("junk", &acc); err != nil { - t.Fatal(err) - } - - checkIsMaster(es, es.Servers[0], true, t) + es.client.Transport = newTransportMock(nodeStatsResponse) + require.NoError(t, es.gatherNodeStats("junk", &acc)) + require.True(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") checkNodeStatsResult(t, &acc) // now test the clusterstats method - es.client.Transport = newTransportMock(http.StatusOK, clusterStatsResponse) + es.client.Transport = newTransportMock(clusterStatsResponse) require.NoError(t, es.gatherClusterStats("junk", &acc)) tags := map[string]string{ @@ -269,26 +240,21 @@ func TestGatherClusterStatsNonMaster(t *testing.T) { es.serverInfo["http://example.com:9200"] = serverInfo{nodeID: "SDFsfSDFsdfFSDSDfSFDSDF", masterID: ""} // first get catMaster - es.client.Transport = newTransportMock(http.StatusOK, IsNotMasterResult) + es.client.Transport = newTransportMock(IsNotMasterResult) masterID, err := es.getCatMaster("junk") require.NoError(t, err) - IsNotMasterResultTokens := strings.Split(string(IsNotMasterResult), " ") - if masterID != IsNotMasterResultTokens[0] { - msg := fmt.Sprintf("catmaster is incorrect") - assert.Fail(t, msg) - } + isNotMasterResultTokens := strings.Split(IsNotMasterResult, " ") + require.Equal(t, masterID, isNotMasterResultTokens[0], "catmaster is incorrect") // now get node status, which determines whether we're master var acc testutil.Accumulator es.Local = true - es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse) - if err := es.gatherNodeStats("junk", &acc); err != nil { - t.Fatal(err) - } + es.client.Transport = newTransportMock(nodeStatsResponse) + require.NoError(t, es.gatherNodeStats("junk", &acc)) // ensure flag is clear so Cluster Stats would not be done - checkIsMaster(es, es.Servers[0], false, t) + require.False(t, es.serverInfo[es.Servers[0]].isMaster(), "IsMaster set incorrectly") checkNodeStatsResult(t, &acc) } @@ -296,32 +262,69 @@ func TestGatherClusterIndicesStats(t *testing.T) { es := newElasticsearchWithClient() es.IndicesInclude = []string{"_all"} es.Servers = []string{"http://example.com:9200"} - es.client.Transport = newTransportMock(http.StatusOK, clusterIndicesResponse) + es.client.Transport = newTransportMock(clusterIndicesResponse) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator - if err := es.gatherIndicesStats("junk", &acc); err != nil { - t.Fatal(err) - } + require.NoError(t, es.gatherIndicesStats("junk", &acc)) acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", clusterIndicesExpected, map[string]string{"index_name": "twitter"}) } +func TestGatherDateStampedIndicesStats(t *testing.T) { + es := newElasticsearchWithClient() + es.IndicesInclude = []string{"twitter*", "influx*", "penguins"} + es.NumMostRecentIndices = 2 + es.Servers = []string{"http://example.com:9200"} + es.client.Transport = newTransportMock(dateStampedIndicesResponse) + es.serverInfo = make(map[string]serverInfo) + es.serverInfo["http://example.com:9200"] = defaultServerInfo() + require.NoError(t, es.Init()) + + var acc testutil.Accumulator + require.NoError(t, es.gatherIndicesStats(es.Servers[0]+"/"+strings.Join(es.IndicesInclude, ",")+"/_stats", &acc)) + + // includes 2 most recent indices for "twitter", only expect the most recent two. + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "twitter_2020_08_02"}) + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "twitter_2020_08_01"}) + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "twitter_2020_07_31"}) + + // includes 2 most recent indices for "influx", only expect the most recent two. + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "influx2021.01.02"}) + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "influx2021.01.01"}) + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "influx2020.12.31"}) + + // not configured to sort the 'penguins' index, but ensure it is also included. + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "penguins"}) +} + func TestGatherClusterIndiceShardsStats(t *testing.T) { es := newElasticsearchWithClient() es.IndicesLevel = "shards" es.Servers = []string{"http://example.com:9200"} - es.client.Transport = newTransportMock(http.StatusOK, clusterIndicesShardsResponse) + es.client.Transport = newTransportMock(clusterIndicesShardsResponse) es.serverInfo = make(map[string]serverInfo) es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator - if err := es.gatherIndicesStats("junk", &acc); err != nil { - t.Fatal(err) - } + require.NoError(t, es.gatherIndicesStats("junk", &acc)) acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", clusterIndicesExpected, diff --git a/plugins/inputs/elasticsearch/sample.conf b/plugins/inputs/elasticsearch/sample.conf new file mode 100644 index 0000000000000..d65ff1b1bda58 --- /dev/null +++ b/plugins/inputs/elasticsearch/sample.conf @@ -0,0 +1,59 @@ +# Read stats from one or more Elasticsearch servers or clusters +[[inputs.elasticsearch]] + ## specify a list of one or more Elasticsearch servers + ## you can add username and password to your url to use basic authentication: + ## servers = ["http://user:pass@localhost:9200"] + servers = ["http://localhost:9200"] + + ## Timeout for HTTP requests to the elastic search server(s) + http_timeout = "5s" + + ## When local is true (the default), the node will read only its own stats. + ## Set local to false when you want to read the node stats from all nodes + ## of the cluster. + local = true + + ## Set cluster_health to true when you want to obtain cluster health stats + cluster_health = false + + ## Adjust cluster_health_level when you want to obtain detailed health stats + ## The options are + ## - indices (default) + ## - cluster + # cluster_health_level = "indices" + + ## Set cluster_stats to true when you want to obtain cluster stats. + cluster_stats = false + + ## Only gather cluster_stats from the master node. To work this require local = true + cluster_stats_only_from_master = true + + ## Indices to collect; can be one or more indices names or _all + ## Use of wildcards is allowed. Use a wildcard at the end to retrieve index names that end with a changing value, like a date. + indices_include = ["_all"] + + ## One of "shards", "cluster", "indices" + ## Currently only "shards" is implemented + indices_level = "shards" + + ## node_stats is a list of sub-stats that you want to have gathered. Valid options + ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", + ## "breaker". Per default, all stats are gathered. + # node_stats = ["jvm", "http"] + + ## HTTP Basic Authentication username and password. + # username = "" + # password = "" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Sets the number of most recent indices to return for indices that are configured with a date-stamped suffix. + ## Each 'indices_include' entry ending with a wildcard (*) or glob matching pattern will group together all indices that match it, and + ## sort them by the date or number after the wildcard. Metrics then are gathered for only the 'num_most_recent_indices' amount of most + ## recent indices. + # num_most_recent_indices = 0 diff --git a/plugins/inputs/elasticsearch/testdata_test.go b/plugins/inputs/elasticsearch/testdata_test.go index a04fe1521e999..1006e4848bb65 100644 --- a/plugins/inputs/elasticsearch/testdata_test.go +++ b/plugins/inputs/elasticsearch/testdata_test.go @@ -2089,6 +2089,2008 @@ const clusterIndicesResponse = ` } }` +const dateStampedIndicesResponse = ` +{ + "_shards": { + "total": 9, + "successful": 6, + "failed": 0 + }, + "_all": { + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "indices": { + "twitter_2020_08_02": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "twitter_2020_08_01": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "twitter_2020_07_31": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "influx2021.01.02": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "influx2020.12.31": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "influx2021.01.01": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "penguins": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + } + } +}` + var clusterIndicesExpected = map[string]interface{}{ "completion_size_in_bytes": float64(0), "docs_count": float64(999), diff --git a/plugins/inputs/elasticsearch_query/README.md b/plugins/inputs/elasticsearch_query/README.md new file mode 100755 index 0000000000000..dfd5ccdfb130d --- /dev/null +++ b/plugins/inputs/elasticsearch_query/README.md @@ -0,0 +1,185 @@ +# Elasticsearch Query Input Plugin + +This [elasticsearch](https://www.elastic.co/) query plugin queries endpoints to +obtain metrics from data stored in an Elasticsearch cluster. + +The following is supported: + +- return number of hits for a search query +- calculate the avg/max/min/sum for a numeric field, filtered by a query, + aggregated per tag +- count number of terms for a particular field + +## Elasticsearch Support + +This plugins is tested against Elasticsearch 5.x and 6.x releases. Currently it +is known to break on 7.x or greater versions. + +## Configuration + +```toml @sample.conf +# Derive metrics from aggregating Elasticsearch query results +[[inputs.elasticsearch_query]] + ## The full HTTP endpoint URL for your Elasticsearch instance + ## Multiple urls can be specified as part of the same cluster, + ## this means that only ONE of the urls will be written to each interval. + urls = [ "http://node1.es.example.com:9200" ] # required. + + ## Elasticsearch client timeout, defaults to "5s". + # timeout = "5s" + + ## Set to true to ask Elasticsearch a list of all cluster nodes, + ## thus it is not necessary to list all nodes in the urls config option + # enable_sniffer = false + + ## Set the interval to check if the Elasticsearch nodes are available + ## This option is only used if enable_sniffer is also set (0s to disable it) + # health_check_interval = "10s" + + ## HTTP basic authentication details (eg. when using x-pack) + # username = "telegraf" + # password = "mypassword" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + [[inputs.elasticsearch_query.aggregation]] + ## measurement name for the results of the aggregation query + measurement_name = "measurement" + + ## Elasticsearch indexes to query (accept wildcards). + index = "index-*" + + ## The date/time field in the Elasticsearch index (mandatory). + date_field = "@timestamp" + + ## If the field used for the date/time field in Elasticsearch is also using + ## a custom date/time format it may be required to provide the format to + ## correctly parse the field. + ## + ## If using one of the built in elasticsearch formats this is not required. + # date_field_custom_format = "" + + ## Time window to query (eg. "1m" to query documents from last minute). + ## Normally should be set to same as collection interval + query_period = "1m" + + ## Lucene query to filter results + # filter_query = "*" + + ## Fields to aggregate values (must be numeric fields) + # metric_fields = ["metric"] + + ## Aggregation function to use on the metric fields + ## Must be set if 'metric_fields' is set + ## Valid values are: avg, sum, min, max, sum + # metric_function = "avg" + + ## Fields to be used as tags + ## Must be text, non-analyzed fields. Metric aggregations are performed per tag + # tags = ["field.keyword", "field2.keyword"] + + ## Set to true to not ignore documents when the tag(s) above are missing + # include_missing_tag = false + + ## String value of the tag when the tag does not exist + ## Used when include_missing_tag is true + # missing_tag_value = "null" +``` + +## Examples + +Please note that the `[[inputs.elasticsearch_query]]` is still required for all +of the examples below. + +### Search the average response time, per URI and per response status code + +```toml +[[inputs.elasticsearch_query.aggregation]] + measurement_name = "http_logs" + index = "my-index-*" + filter_query = "*" + metric_fields = ["response_time"] + metric_function = "avg" + tags = ["URI.keyword", "response.keyword"] + include_missing_tag = true + missing_tag_value = "null" + date_field = "@timestamp" + query_period = "1m" +``` + +### Search the maximum response time per method and per URI + +```toml +[[inputs.elasticsearch_query.aggregation]] + measurement_name = "http_logs" + index = "my-index-*" + filter_query = "*" + metric_fields = ["response_time"] + metric_function = "max" + tags = ["method.keyword","URI.keyword"] + include_missing_tag = false + missing_tag_value = "null" + date_field = "@timestamp" + query_period = "1m" +``` + +### Search number of documents matching a filter query in all indices + +```toml +[[inputs.elasticsearch_query.aggregation]] + measurement_name = "http_logs" + index = "*" + filter_query = "product_1 AND HEAD" + query_period = "1m" + date_field = "@timestamp" +``` + +### Search number of documents matching a filter query, returning per response status code + +```toml +[[inputs.elasticsearch_query.aggregation]] + measurement_name = "http_logs" + index = "*" + filter_query = "downloads" + tags = ["response.keyword"] + include_missing_tag = false + date_field = "@timestamp" + query_period = "1m" +``` + +### Required parameters + +- `measurement_name`: The target measurement to be stored the results of the + aggregation query. +- `index`: The index name to query on Elasticsearch +- `query_period`: The time window to query (eg. "1m" to query documents from + last minute). Normally should be set to same as collection +- `date_field`: The date/time field in the Elasticsearch index + +### Optional parameters + +- `date_field_custom_format`: Not needed if using one of the built in date/time + formats of Elasticsearch, but may be required if using a custom date/time + format. The format syntax uses the [Joda date format][joda]. +- `filter_query`: Lucene query to filter the results (default: "\*") +- `metric_fields`: The list of fields to perform metric aggregation (these must + be indexed as numeric fields) +- `metric_funcion`: The single-value metric aggregation function to be performed + on the `metric_fields` defined. Currently supported aggregations are "avg", + "min", "max", "sum". (see the [aggregation docs][agg] +- `tags`: The list of fields to be used as tags (these must be indexed as + non-analyzed fields). A "terms aggregation" will be done per tag defined +- `include_missing_tag`: Set to true to not ignore documents where the tag(s) + specified above does not exist. (If false, documents without the specified tag + field will be ignored in `doc_count` and in the metric aggregation) +- `missing_tag_value`: The value of the tag that will be set for documents in + which the tag field does not exist. Only used when `include_missing_tag` is + set to `true`. + +[joda]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/search-aggregations-bucket-daterange-aggregation.html#date-format-pattern +[agg]: https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-metrics.html diff --git a/plugins/inputs/elasticsearch_query/aggregation_parser.go b/plugins/inputs/elasticsearch_query/aggregation_parser.go new file mode 100644 index 0000000000000..c4dff05ee6fee --- /dev/null +++ b/plugins/inputs/elasticsearch_query/aggregation_parser.go @@ -0,0 +1,153 @@ +package elasticsearch_query + +import ( + "fmt" + + "github.com/influxdata/telegraf" + elastic5 "gopkg.in/olivere/elastic.v5" +) + +type resultMetric struct { + name string + fields map[string]interface{} + tags map[string]string +} + +func parseSimpleResult(acc telegraf.Accumulator, measurement string, searchResult *elastic5.SearchResult) { + fields := make(map[string]interface{}) + tags := make(map[string]string) + + fields["doc_count"] = searchResult.Hits.TotalHits + + acc.AddFields(measurement, fields, tags) +} + +func parseAggregationResult(acc telegraf.Accumulator, aggregationQueryList []aggregationQueryData, searchResult *elastic5.SearchResult) error { + measurements := map[string]map[string]string{} + + // organize the aggregation query data by measurement + for _, aggregationQuery := range aggregationQueryList { + if measurements[aggregationQuery.measurement] == nil { + measurements[aggregationQuery.measurement] = map[string]string{ + aggregationQuery.name: aggregationQuery.function, + } + } else { + t := measurements[aggregationQuery.measurement] + t[aggregationQuery.name] = aggregationQuery.function + measurements[aggregationQuery.measurement] = t + } + } + + // recurse over query aggregation results per measurement + for measurement, aggNameFunction := range measurements { + var m resultMetric + + m.fields = make(map[string]interface{}) + m.tags = make(map[string]string) + m.name = measurement + + _, err := recurseResponse(acc, aggNameFunction, searchResult.Aggregations, m) + if err != nil { + return err + } + } + return nil +} + +func recurseResponse(acc telegraf.Accumulator, aggNameFunction map[string]string, bucketResponse elastic5.Aggregations, m resultMetric) (resultMetric, error) { + var err error + + aggNames := getAggNames(bucketResponse) + if len(aggNames) == 0 { + // we've reached a single bucket or response without aggregation, nothing here + return m, nil + } + + // metrics aggregations response can contain multiple field values, so we iterate over them + for _, aggName := range aggNames { + aggFunction, found := aggNameFunction[aggName] + if !found { + return m, fmt.Errorf("child aggregation function '%s' not found %v", aggName, aggNameFunction) + } + + resp := getResponseAggregation(aggFunction, aggName, bucketResponse) + if resp == nil { + return m, fmt.Errorf("child aggregation '%s' not found", aggName) + } + + switch resp := resp.(type) { + case *elastic5.AggregationBucketKeyItems: + // we've found a terms aggregation, iterate over the buckets and try to retrieve the inner aggregation values + for _, bucket := range resp.Buckets { + var s string + var ok bool + m.fields["doc_count"] = bucket.DocCount + if s, ok = bucket.Key.(string); !ok { + return m, fmt.Errorf("bucket key is not a string (%s, %s)", aggName, aggFunction) + } + m.tags[aggName] = s + + // we need to recurse down through the buckets, as it may contain another terms aggregation + m, err = recurseResponse(acc, aggNameFunction, bucket.Aggregations, m) + if err != nil { + return m, err + } + + // if there are fields present after finishing the bucket, it is a complete metric + // store it and clean the fields to start a new metric + if len(m.fields) > 0 { + acc.AddFields(m.name, m.fields, m.tags) + m.fields = make(map[string]interface{}) + } + + // after finishing the bucket, remove its tag from the tags map + delete(m.tags, aggName) + } + + case *elastic5.AggregationValueMetric: + if resp.Value != nil { + m.fields[aggName] = *resp.Value + } else { + m.fields[aggName] = float64(0) + } + + default: + return m, fmt.Errorf("aggregation type %T not supported", resp) + } + } + + // if there are fields here it comes from a metrics aggregation without a parent terms aggregation + if len(m.fields) > 0 { + acc.AddFields(m.name, m.fields, m.tags) + m.fields = make(map[string]interface{}) + } + return m, nil +} + +func getResponseAggregation(function string, aggName string, aggs elastic5.Aggregations) (agg interface{}) { + switch function { + case "avg": + agg, _ = aggs.Avg(aggName) + case "sum": + agg, _ = aggs.Sum(aggName) + case "min": + agg, _ = aggs.Min(aggName) + case "max": + agg, _ = aggs.Max(aggName) + case "terms": + agg, _ = aggs.Terms(aggName) + } + + return agg +} + +// getAggNames returns the aggregation names from a response aggregation +func getAggNames(agg elastic5.Aggregations) (aggs []string) { + for k := range agg { + if (k != "key") && (k != "doc_count") { + aggs = append(aggs, k) + } + } + + return aggs +} diff --git a/plugins/inputs/elasticsearch_query/aggregation_query.go b/plugins/inputs/elasticsearch_query/aggregation_query.go new file mode 100644 index 0000000000000..ab97277f4e119 --- /dev/null +++ b/plugins/inputs/elasticsearch_query/aggregation_query.go @@ -0,0 +1,217 @@ +package elasticsearch_query + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "time" + + elastic5 "gopkg.in/olivere/elastic.v5" +) + +type aggKey struct { + measurement string + name string + function string + field string +} + +type aggregationQueryData struct { + aggKey + isParent bool + aggregation elastic5.Aggregation +} + +func (e *ElasticsearchQuery) runAggregationQuery(ctx context.Context, aggregation esAggregation) (*elastic5.SearchResult, error) { + now := time.Now().UTC() + from := now.Add(time.Duration(-aggregation.QueryPeriod)) + filterQuery := aggregation.FilterQuery + if filterQuery == "" { + filterQuery = "*" + } + + query := elastic5.NewBoolQuery() + query = query.Filter(elastic5.NewQueryStringQuery(filterQuery)) + query = query.Filter(elastic5.NewRangeQuery(aggregation.DateField).From(from).To(now).Format(aggregation.DateFieldFormat)) + + src, err := query.Source() + if err != nil { + return nil, fmt.Errorf("failed to get query source - %v", err) + } + data, err := json.Marshal(src) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal response - %v", err) + } + e.Log.Debugf("{\"query\": %s}", string(data)) + + search := e.esClient.Search().Index(aggregation.Index).Query(query).Size(0) + + // add only parent elastic.Aggregations to the search request, all the rest are subaggregations of these + for _, v := range aggregation.aggregationQueryList { + if v.isParent && v.aggregation != nil { + search.Aggregation(v.aggKey.name, v.aggregation) + } + } + + searchResult, err := search.Do(ctx) + if err != nil && searchResult != nil { + return searchResult, fmt.Errorf("%s - %s", searchResult.Error.Type, searchResult.Error.Reason) + } + + return searchResult, err +} + +// getMetricFields function returns a map of fields and field types on Elasticsearch that matches field.MetricFields +func (e *ElasticsearchQuery) getMetricFields(ctx context.Context, aggregation esAggregation) (map[string]string, error) { + mapMetricFields := make(map[string]string) + + for _, metricField := range aggregation.MetricFields { + resp, err := e.esClient.GetFieldMapping().Index(aggregation.Index).Field(metricField).Do(ctx) + if err != nil { + return mapMetricFields, fmt.Errorf("error retrieving field mappings for %s: %s", aggregation.Index, err.Error()) + } + + for _, index := range resp { + var ok bool + var mappings interface{} + if mappings, ok = index.(map[string]interface{})["mappings"]; !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected map[string]interface{}, got %T)", index) + } + + var types map[string]interface{} + if types, ok = mappings.(map[string]interface{}); !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected map[string]interface{}, got %T)", mappings) + } + + var fields map[string]interface{} + for _, _type := range types { + if fields, ok = _type.(map[string]interface{}); !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected map[string]interface{}, got %T)", _type) + } + + var field map[string]interface{} + for _, _field := range fields { + if field, ok = _field.(map[string]interface{}); !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected map[string]interface{}, got %T)", _field) + } + + fullname := field["full_name"] + mapping := field["mapping"] + + var fname string + if fname, ok = fullname.(string); !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected string, got %T)", fullname) + } + + var fieldTypes map[string]interface{} + if fieldTypes, ok = mapping.(map[string]interface{}); !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected map[string]interface{}, got %T)", mapping) + } + + var fieldType interface{} + for _, _fieldType := range fieldTypes { + if fieldType, ok = _fieldType.(map[string]interface{})["type"]; !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected map[string]interface{}, got %T)", _fieldType) + } + + var ftype string + if ftype, ok = fieldType.(string); !ok { + return nil, fmt.Errorf("assertion error, wrong type (expected string, got %T)", fieldType) + } + mapMetricFields[fname] = ftype + } + } + } + } + } + + return mapMetricFields, nil +} + +func (aggregation *esAggregation) buildAggregationQuery() error { + // create one aggregation per metric field found & function defined for numeric fields + for k, v := range aggregation.mapMetricFields { + switch v { + case "long": + case "float": + case "integer": + case "short": + case "double": + case "scaled_float": + default: + continue + } + + agg, err := getFunctionAggregation(aggregation.MetricFunction, k) + if err != nil { + return err + } + + aggregationQuery := aggregationQueryData{ + aggKey: aggKey{ + measurement: aggregation.MeasurementName, + function: aggregation.MetricFunction, + field: k, + name: strings.ReplaceAll(k, ".", "_") + "_" + aggregation.MetricFunction, + }, + isParent: true, + aggregation: agg, + } + + aggregation.aggregationQueryList = append(aggregation.aggregationQueryList, aggregationQuery) + } + + // create a terms aggregation per tag + for _, term := range aggregation.Tags { + agg := elastic5.NewTermsAggregation() + if aggregation.IncludeMissingTag && aggregation.MissingTagValue != "" { + agg.Missing(aggregation.MissingTagValue) + } + + agg.Field(term).Size(1000) + + // add each previous parent aggregations as subaggregations of this terms aggregation + for key, aggMap := range aggregation.aggregationQueryList { + if aggMap.isParent { + agg.Field(term).SubAggregation(aggMap.name, aggMap.aggregation).Size(1000) + // update subaggregation map with parent information + aggregation.aggregationQueryList[key].isParent = false + } + } + + aggregationQuery := aggregationQueryData{ + aggKey: aggKey{ + measurement: aggregation.MeasurementName, + function: "terms", + field: term, + name: strings.ReplaceAll(term, ".", "_"), + }, + isParent: true, + aggregation: agg, + } + + aggregation.aggregationQueryList = append(aggregation.aggregationQueryList, aggregationQuery) + } + + return nil +} + +func getFunctionAggregation(function string, aggfield string) (elastic5.Aggregation, error) { + var agg elastic5.Aggregation + + switch function { + case "avg": + agg = elastic5.NewAvgAggregation().Field(aggfield) + case "sum": + agg = elastic5.NewSumAggregation().Field(aggfield) + case "min": + agg = elastic5.NewMinAggregation().Field(aggfield) + case "max": + agg = elastic5.NewMaxAggregation().Field(aggfield) + default: + return nil, fmt.Errorf("aggregation function '%s' not supported", function) + } + + return agg, nil +} diff --git a/plugins/inputs/elasticsearch_query/elasticsearch_query.go b/plugins/inputs/elasticsearch_query/elasticsearch_query.go new file mode 100644 index 0000000000000..b6bbcf36c3fca --- /dev/null +++ b/plugins/inputs/elasticsearch_query/elasticsearch_query.go @@ -0,0 +1,250 @@ +//go:generate ../../../tools/readme_config_includer/generator +package elasticsearch_query + +import ( + "context" + _ "embed" + "fmt" + "net/http" + "strconv" + "strings" + "sync" + "time" + + elastic5 "gopkg.in/olivere/elastic.v5" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +// ElasticsearchQuery struct +type ElasticsearchQuery struct { + URLs []string `toml:"urls"` + Username string `toml:"username"` + Password string `toml:"password"` + EnableSniffer bool `toml:"enable_sniffer"` + Timeout config.Duration `toml:"timeout"` + HealthCheckInterval config.Duration `toml:"health_check_interval"` + Aggregations []esAggregation `toml:"aggregation"` + + Log telegraf.Logger `toml:"-"` + + tls.ClientConfig + httpclient *http.Client + esClient *elastic5.Client +} + +// esAggregation struct +type esAggregation struct { + Index string `toml:"index"` + MeasurementName string `toml:"measurement_name"` + DateField string `toml:"date_field"` + DateFieldFormat string `toml:"date_field_custom_format"` + QueryPeriod config.Duration `toml:"query_period"` + FilterQuery string `toml:"filter_query"` + MetricFields []string `toml:"metric_fields"` + MetricFunction string `toml:"metric_function"` + Tags []string `toml:"tags"` + IncludeMissingTag bool `toml:"include_missing_tag"` + MissingTagValue string `toml:"missing_tag_value"` + mapMetricFields map[string]string + aggregationQueryList []aggregationQueryData +} + +func (*ElasticsearchQuery) SampleConfig() string { + return sampleConfig +} + +// Init the plugin. +func (e *ElasticsearchQuery) Init() error { + if e.URLs == nil { + return fmt.Errorf("elasticsearch urls is not defined") + } + + err := e.connectToES() + if err != nil { + e.Log.Errorf("E! error connecting to elasticsearch: %s", err) + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(e.Timeout)) + defer cancel() + + for i, agg := range e.Aggregations { + if agg.MeasurementName == "" { + return fmt.Errorf("field 'measurement_name' is not set") + } + if agg.DateField == "" { + return fmt.Errorf("field 'date_field' is not set") + } + err = e.initAggregation(ctx, agg, i) + if err != nil { + e.Log.Errorf("%s", err) + return nil + } + } + return nil +} + +func (e *ElasticsearchQuery) initAggregation(ctx context.Context, agg esAggregation, i int) (err error) { + // retrieve field mapping and build queries only once + agg.mapMetricFields, err = e.getMetricFields(ctx, agg) + if err != nil { + return fmt.Errorf("not possible to retrieve fields: %v", err.Error()) + } + + for _, metricField := range agg.MetricFields { + if _, ok := agg.mapMetricFields[metricField]; !ok { + return fmt.Errorf("metric field '%s' not found on index '%s'", metricField, agg.Index) + } + } + + err = agg.buildAggregationQuery() + if err != nil { + return err + } + + e.Aggregations[i] = agg + return nil +} + +func (e *ElasticsearchQuery) connectToES() error { + var clientOptions []elastic5.ClientOptionFunc + + if e.esClient != nil { + if e.esClient.IsRunning() { + return nil + } + } + + if e.httpclient == nil { + httpclient, err := e.createHTTPClient() + if err != nil { + return err + } + e.httpclient = httpclient + } + + clientOptions = append(clientOptions, + elastic5.SetHttpClient(e.httpclient), + elastic5.SetSniff(e.EnableSniffer), + elastic5.SetURL(e.URLs...), + elastic5.SetHealthcheckInterval(time.Duration(e.HealthCheckInterval)), + ) + + if e.Username != "" { + clientOptions = append(clientOptions, elastic5.SetBasicAuth(e.Username, e.Password)) + } + + if time.Duration(e.HealthCheckInterval) == 0 { + clientOptions = append(clientOptions, elastic5.SetHealthcheck(false)) + } + + client, err := elastic5.NewClient(clientOptions...) + if err != nil { + return err + } + + // check for ES version on first node + esVersion, err := client.ElasticsearchVersion(e.URLs[0]) + if err != nil { + return fmt.Errorf("elasticsearch version check failed: %s", err) + } + + esVersionSplit := strings.Split(esVersion, ".") + + // quit if ES version is not supported + if len(esVersionSplit) == 0 { + return fmt.Errorf("elasticsearch version check failed") + } + + i, err := strconv.Atoi(esVersionSplit[0]) + if err != nil || i < 5 || i > 6 { + return fmt.Errorf("elasticsearch version %s not supported (currently supported versions are 5.x and 6.x)", esVersion) + } + + e.esClient = client + return nil +} + +// Gather writes the results of the queries from Elasticsearch to the Accumulator. +func (e *ElasticsearchQuery) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + + err := e.connectToES() + if err != nil { + return err + } + + for i, agg := range e.Aggregations { + wg.Add(1) + go func(agg esAggregation, i int) { + defer wg.Done() + err := e.esAggregationQuery(acc, agg, i) + if err != nil { + acc.AddError(fmt.Errorf("elasticsearch query aggregation %s: %s ", agg.MeasurementName, err.Error())) + } + }(agg, i) + } + + wg.Wait() + return nil +} + +func (e *ElasticsearchQuery) createHTTPClient() (*http.Client, error) { + tlsCfg, err := e.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + tr := &http.Transport{ + ResponseHeaderTimeout: time.Duration(e.Timeout), + TLSClientConfig: tlsCfg, + } + httpclient := &http.Client{ + Transport: tr, + Timeout: time.Duration(e.Timeout), + } + + return httpclient, nil +} + +func (e *ElasticsearchQuery) esAggregationQuery(acc telegraf.Accumulator, aggregation esAggregation, i int) error { + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(e.Timeout)) + defer cancel() + + // try to init the aggregation query if it is not done already + if aggregation.aggregationQueryList == nil { + err := e.initAggregation(ctx, aggregation, i) + if err != nil { + return err + } + aggregation = e.Aggregations[i] + } + + searchResult, err := e.runAggregationQuery(ctx, aggregation) + if err != nil { + return err + } + + if searchResult.Aggregations == nil { + parseSimpleResult(acc, aggregation.MeasurementName, searchResult) + return nil + } + + return parseAggregationResult(acc, aggregation.aggregationQueryList, searchResult) +} + +func init() { + inputs.Add("elasticsearch_query", func() telegraf.Input { + return &ElasticsearchQuery{ + Timeout: config.Duration(time.Second * 5), + HealthCheckInterval: config.Duration(time.Second * 10), + } + }) +} diff --git a/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go b/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go new file mode 100644 index 0000000000000..d18c13afdee05 --- /dev/null +++ b/plugins/inputs/elasticsearch_query/elasticsearch_query_test.go @@ -0,0 +1,760 @@ +package elasticsearch_query + +import ( + "bufio" + "context" + "fmt" + "os" + "strconv" + "strings" + "testing" + "time" + + "github.com/docker/go-connections/nat" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go/wait" + elastic5 "gopkg.in/olivere/elastic.v5" +) + +const ( + servicePort = "9200" + testindex = "test-elasticsearch" +) + +type esAggregationQueryTest struct { + queryName string + testAggregationQueryInput esAggregation + testAggregationQueryData []aggregationQueryData + expectedMetrics []telegraf.Metric + wantBuildQueryErr bool + wantGetMetricFieldsErr bool + wantQueryResErr bool +} + +var queryPeriod = config.Duration(time.Second * 600) + +var testEsAggregationData = []esAggregationQueryTest{ + { + "query 1", + esAggregation{ + Index: testindex, + MeasurementName: "measurement1", + MetricFields: []string{"size"}, + FilterQuery: "product_1", + MetricFunction: "avg", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{"URI.keyword"}, + mapMetricFields: map[string]string{"size": "long"}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement1", name: "size_avg", function: "avg", field: "size"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement1", name: "URI_keyword", function: "terms", field: "URI.keyword"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement1", + map[string]string{"URI_keyword": "/downloads/product_1"}, + map[string]interface{}{"size_avg": float64(202.30038022813687), "doc_count": int64(263)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 2", + esAggregation{ + Index: testindex, + MeasurementName: "measurement2", + MetricFields: []string{"size"}, + FilterQuery: "downloads", + MetricFunction: "max", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{"URI.keyword"}, + mapMetricFields: map[string]string{"size": "long"}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement2", name: "size_max", function: "max", field: "size"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement2", name: "URI_keyword", function: "terms", field: "URI.keyword"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement2", + map[string]string{"URI_keyword": "/downloads/product_1"}, + map[string]interface{}{"size_max": float64(3301), "doc_count": int64(263)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement2", + map[string]string{"URI_keyword": "/downloads/product_2"}, + map[string]interface{}{"size_max": float64(3318), "doc_count": int64(237)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 3", + esAggregation{ + Index: testindex, + MeasurementName: "measurement3", + MetricFields: []string{"size"}, + FilterQuery: "downloads", + MetricFunction: "sum", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{"response.keyword"}, + mapMetricFields: map[string]string{"size": "long"}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement3", name: "size_sum", function: "sum", field: "size"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement3", name: "response_keyword", function: "terms", field: "response.keyword"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement3", + map[string]string{"response_keyword": "200"}, + map[string]interface{}{"size_sum": float64(22790), "doc_count": int64(22)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement3", + map[string]string{"response_keyword": "304"}, + map[string]interface{}{"size_sum": float64(0), "doc_count": int64(219)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement3", + map[string]string{"response_keyword": "404"}, + map[string]interface{}{"size_sum": float64(86932), "doc_count": int64(259)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 4", + esAggregation{ + Index: testindex, + MeasurementName: "measurement4", + MetricFields: []string{"size", "response_time"}, + FilterQuery: "downloads", + MetricFunction: "min", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + IncludeMissingTag: true, + MissingTagValue: "missing", + Tags: []string{"response.keyword", "URI.keyword", "method.keyword"}, + mapMetricFields: map[string]string{"size": "long", "response_time": "long"}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement4", name: "size_min", function: "min", field: "size"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement4", name: "response_time_min", function: "min", field: "response_time"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement4", name: "response_keyword", function: "terms", field: "response.keyword"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement4", name: "URI_keyword", function: "terms", field: "URI.keyword"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement4", name: "method_keyword", function: "terms", field: "method.keyword"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "404", "URI_keyword": "/downloads/product_1", "method_keyword": "GET"}, + map[string]interface{}{"size_min": float64(318), "response_time_min": float64(126), "doc_count": int64(146)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "304", "URI_keyword": "/downloads/product_1", "method_keyword": "GET"}, + map[string]interface{}{"size_min": float64(0), "response_time_min": float64(71), "doc_count": int64(113)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "200", "URI_keyword": "/downloads/product_1", "method_keyword": "GET"}, + map[string]interface{}{"size_min": float64(490), "response_time_min": float64(1514), "doc_count": int64(3)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "404", "URI_keyword": "/downloads/product_2", "method_keyword": "GET"}, + map[string]interface{}{"size_min": float64(318), "response_time_min": float64(237), "doc_count": int64(113)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "304", "URI_keyword": "/downloads/product_2", "method_keyword": "GET"}, + map[string]interface{}{"size_min": float64(0), "response_time_min": float64(134), "doc_count": int64(106)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "200", "URI_keyword": "/downloads/product_2", "method_keyword": "GET"}, + map[string]interface{}{"size_min": float64(490), "response_time_min": float64(2), "doc_count": int64(13)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "200", "URI_keyword": "/downloads/product_1", "method_keyword": "HEAD"}, + map[string]interface{}{"size_min": float64(0), "response_time_min": float64(8479), "doc_count": int64(1)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement4", + map[string]string{"response_keyword": "200", "URI_keyword": "/downloads/product_2", "method_keyword": "HEAD"}, + map[string]interface{}{"size_min": float64(0), "response_time_min": float64(1059), "doc_count": int64(5)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 5", + esAggregation{ + Index: testindex, + MeasurementName: "measurement5", + FilterQuery: "product_2", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{"URI.keyword"}, + mapMetricFields: map[string]string{}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement5", name: "URI_keyword", function: "terms", field: "URI.keyword"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement5", + map[string]string{"URI_keyword": "/downloads/product_2"}, + map[string]interface{}{"doc_count": int64(237)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 6", + esAggregation{ + Index: testindex, + MeasurementName: "measurement6", + FilterQuery: "response: 200", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{"URI.keyword", "response.keyword"}, + mapMetricFields: map[string]string{}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement6", name: "URI_keyword", function: "terms", field: "URI.keyword"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement6", name: "response_keyword", function: "terms", field: "response.keyword"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement6", + map[string]string{"response_keyword": "200", "URI_keyword": "/downloads/product_1"}, + map[string]interface{}{"doc_count": int64(4)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + testutil.MustMetric( + "measurement6", + map[string]string{"response_keyword": "200", "URI_keyword": "/downloads/product_2"}, + map[string]interface{}{"doc_count": int64(18)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 7 - simple query", + esAggregation{ + Index: testindex, + MeasurementName: "measurement7", + FilterQuery: "response: 200", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{}, + }, + nil, + []telegraf.Metric{ + testutil.MustMetric( + "measurement7", + map[string]string{}, + map[string]interface{}{"doc_count": int64(22)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 8", + esAggregation{ + Index: testindex, + MeasurementName: "measurement8", + MetricFields: []string{"size"}, + FilterQuery: "downloads", + MetricFunction: "max", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{"size": "long"}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement8", name: "size_max", function: "max", field: "size"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement8", + map[string]string{}, + map[string]interface{}{"size_max": float64(3318)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 9 - invalid function", + esAggregation{ + Index: testindex, + MeasurementName: "measurement9", + MetricFields: []string{"size"}, + FilterQuery: "downloads", + MetricFunction: "average", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{"size": "long"}, + }, + nil, + nil, + true, + false, + true, + }, + { + "query 10 - non-existing metric field", + esAggregation{ + Index: testindex, + MeasurementName: "measurement10", + MetricFields: []string{"none"}, + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{}, + }, + nil, + nil, + false, + false, + true, + }, + { + "query 11 - non-existing index field", + esAggregation{ + Index: "notanindex", + MeasurementName: "measurement11", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{}, + }, + nil, + nil, + false, + false, + true, + }, + { + "query 12 - non-existing timestamp field", + esAggregation{ + Index: testindex, + MeasurementName: "measurement12", + MetricFields: []string{"size"}, + MetricFunction: "avg", + DateField: "@notatimestamp", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{"size": "long"}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement12", name: "size_avg", function: "avg", field: "size"}, + isParent: true, + }, + }, + []telegraf.Metric{ + testutil.MustMetric( + "measurement12", + map[string]string{}, + map[string]interface{}{"size_avg": float64(0)}, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + false, + false, + false, + }, + { + "query 13 - non-existing tag field", + esAggregation{ + Index: testindex, + MeasurementName: "measurement13", + MetricFields: []string{"size"}, + MetricFunction: "avg", + DateField: "@timestamp", + QueryPeriod: queryPeriod, + IncludeMissingTag: false, + Tags: []string{"nothere"}, + mapMetricFields: map[string]string{"size": "long"}, + }, + []aggregationQueryData{ + { + aggKey: aggKey{measurement: "measurement13", name: "size_avg", function: "avg", field: "size"}, + isParent: false, + }, + { + aggKey: aggKey{measurement: "measurement13", name: "nothere", function: "terms", field: "nothere"}, + isParent: true, + }, + }, + nil, + false, + false, + false, + }, + { + "query 14 - non-existing custom date/time format", + esAggregation{ + Index: testindex, + MeasurementName: "measurement14", + DateField: "@timestamp", + DateFieldFormat: "yyyy", + QueryPeriod: queryPeriod, + Tags: []string{}, + mapMetricFields: map[string]string{}, + }, + nil, + nil, + false, + false, + true, + }, +} + +func setupIntegrationTest(t *testing.T) (*testutil.Container, error) { + type nginxlog struct { + IPaddress string `json:"IP"` + Timestamp time.Time `json:"@timestamp"` + Method string `json:"method"` + URI string `json:"URI"` + Httpversion string `json:"http_version"` + Response string `json:"response"` + Size float64 `json:"size"` + ResponseTime float64 `json:"response_time"` + } + + container := testutil.Container{ + Image: "elasticsearch:6.8.23", + ExposedPorts: []string{servicePort}, + Env: map[string]string{ + "discovery.type": "single-node", + }, + WaitingFor: wait.ForAll( + wait.ForLog("] mode [basic] - valid"), + wait.ForListeningPort(nat.Port(servicePort)), + ), + } + err := container.Start() + require.NoError(t, err, "failed to start container") + + url := fmt.Sprintf( + "http://%s:%s", container.Address, container.Ports[servicePort], + ) + e := &ElasticsearchQuery{ + URLs: []string{url}, + Timeout: config.Duration(time.Second * 30), + Log: testutil.Logger{}, + } + + err = e.connectToES() + if err != nil { + return &container, err + } + + bulkRequest := e.esClient.Bulk() + + // populate elasticsearch with nginx_logs test data file + file, err := os.Open("testdata/nginx_logs") + if err != nil { + return &container, err + } + + defer file.Close() + + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + parts := strings.Split(scanner.Text(), " ") + size, _ := strconv.Atoi(parts[9]) + responseTime, _ := strconv.Atoi(parts[len(parts)-1]) + + logline := nginxlog{ + IPaddress: parts[0], + Timestamp: time.Now().UTC(), + Method: strings.ReplaceAll(parts[5], `"`, ""), + URI: parts[6], + Httpversion: strings.ReplaceAll(parts[7], `"`, ""), + Response: parts[8], + Size: float64(size), + ResponseTime: float64(responseTime), + } + + bulkRequest.Add(elastic5.NewBulkIndexRequest(). + Index(testindex). + Type("testquery_data"). + Doc(logline)) + } + if scanner.Err() != nil { + return &container, err + } + + _, err = bulkRequest.Do(context.Background()) + if err != nil { + return &container, err + } + + // force elastic to refresh indexes to get new batch data + ctx := context.Background() + _, err = e.esClient.Refresh().Do(ctx) + if err != nil { + return &container, err + } + + return &container, nil +} + +func TestElasticsearchQueryIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + container, err := setupIntegrationTest(t) + require.NoError(t, err) + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + + var acc testutil.Accumulator + e := &ElasticsearchQuery{ + URLs: []string{ + fmt.Sprintf("http://%s:%s", container.Address, container.Ports[servicePort]), + }, + Timeout: config.Duration(time.Second * 30), + Log: testutil.Logger{}, + } + + err = e.connectToES() + require.NoError(t, err) + + var aggs []esAggregation + var aggsErr []esAggregation + + for _, agg := range testEsAggregationData { + if !agg.wantQueryResErr { + aggs = append(aggs, agg.testAggregationQueryInput) + } + } + e.Aggregations = aggs + + require.NoError(t, e.Init()) + require.NoError(t, e.Gather(&acc)) + + if len(acc.Errors) > 0 { + t.Errorf("%s", acc.Errors) + } + + var expectedMetrics []telegraf.Metric + for _, result := range testEsAggregationData { + expectedMetrics = append(expectedMetrics, result.expectedMetrics...) + } + testutil.RequireMetricsEqual(t, expectedMetrics, acc.GetTelegrafMetrics(), testutil.SortMetrics(), testutil.IgnoreTime()) + + // aggregations that should return an error + for _, agg := range testEsAggregationData { + if agg.wantQueryResErr { + aggsErr = append(aggsErr, agg.testAggregationQueryInput) + } + } + e.Aggregations = aggsErr + require.NoError(t, e.Init()) + require.NoError(t, e.Gather(&acc)) + + if len(acc.Errors) != len(aggsErr) { + t.Errorf("expecting %v query result errors, got %v: %s", len(aggsErr), len(acc.Errors), acc.Errors) + } +} + +func TestElasticsearchQueryIntegration_getMetricFields(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + container, err := setupIntegrationTest(t) + require.NoError(t, err) + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + + type args struct { + ctx context.Context + aggregation esAggregation + } + + e := &ElasticsearchQuery{ + URLs: []string{ + fmt.Sprintf("http://%s:%s", container.Address, container.Ports[servicePort]), + }, + Timeout: config.Duration(time.Second * 30), + Log: testutil.Logger{}, + } + + err = e.connectToES() + require.NoError(t, err) + + type test struct { + name string + e *ElasticsearchQuery + args args + want map[string]string + wantErr bool + } + + var tests []test + + for _, d := range testEsAggregationData { + tests = append(tests, test{ + "getMetricFields " + d.queryName, + e, + args{context.Background(), d.testAggregationQueryInput}, + d.testAggregationQueryInput.mapMetricFields, + d.wantGetMetricFieldsErr, + }) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.e.getMetricFields(tt.args.ctx, tt.args.aggregation) + if (err != nil) != tt.wantErr { + t.Errorf("ElasticsearchQuery.buildAggregationQuery() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if !cmp.Equal(got, tt.want) { + t.Errorf("ElasticsearchQuery.getMetricFields() = error = %s", cmp.Diff(got, tt.want)) + } + }) + } +} + +func TestElasticsearchQuery_buildAggregationQuery(t *testing.T) { + type test struct { + name string + aggregation esAggregation + want []aggregationQueryData + wantErr bool + } + var tests []test + + for _, d := range testEsAggregationData { + tests = append(tests, test{ + "build " + d.queryName, + d.testAggregationQueryInput, + d.testAggregationQueryData, + d.wantBuildQueryErr, + }) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.aggregation.buildAggregationQuery() + if (err != nil) != tt.wantErr { + t.Errorf("ElasticsearchQuery.buildAggregationQuery() error = %v, wantErr %v", err, tt.wantErr) + return + } + + opts := []cmp.Option{ + cmp.AllowUnexported(aggKey{}, aggregationQueryData{}), + cmpopts.IgnoreFields(aggregationQueryData{}, "aggregation"), + cmpopts.SortSlices(func(x, y aggregationQueryData) bool { return x.aggKey.name > y.aggKey.name }), + } + + if !cmp.Equal(tt.aggregation.aggregationQueryList, tt.want, opts...) { + t.Errorf("ElasticsearchQuery.buildAggregationQuery(): %s error = %s ", tt.name, cmp.Diff(tt.aggregation.aggregationQueryList, tt.want, opts...)) + } + }) + } +} diff --git a/plugins/inputs/elasticsearch_query/sample.conf b/plugins/inputs/elasticsearch_query/sample.conf new file mode 100644 index 0000000000000..11637774a3401 --- /dev/null +++ b/plugins/inputs/elasticsearch_query/sample.conf @@ -0,0 +1,71 @@ +# Derive metrics from aggregating Elasticsearch query results +[[inputs.elasticsearch_query]] + ## The full HTTP endpoint URL for your Elasticsearch instance + ## Multiple urls can be specified as part of the same cluster, + ## this means that only ONE of the urls will be written to each interval. + urls = [ "http://node1.es.example.com:9200" ] # required. + + ## Elasticsearch client timeout, defaults to "5s". + # timeout = "5s" + + ## Set to true to ask Elasticsearch a list of all cluster nodes, + ## thus it is not necessary to list all nodes in the urls config option + # enable_sniffer = false + + ## Set the interval to check if the Elasticsearch nodes are available + ## This option is only used if enable_sniffer is also set (0s to disable it) + # health_check_interval = "10s" + + ## HTTP basic authentication details (eg. when using x-pack) + # username = "telegraf" + # password = "mypassword" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + [[inputs.elasticsearch_query.aggregation]] + ## measurement name for the results of the aggregation query + measurement_name = "measurement" + + ## Elasticsearch indexes to query (accept wildcards). + index = "index-*" + + ## The date/time field in the Elasticsearch index (mandatory). + date_field = "@timestamp" + + ## If the field used for the date/time field in Elasticsearch is also using + ## a custom date/time format it may be required to provide the format to + ## correctly parse the field. + ## + ## If using one of the built in elasticsearch formats this is not required. + # date_field_custom_format = "" + + ## Time window to query (eg. "1m" to query documents from last minute). + ## Normally should be set to same as collection interval + query_period = "1m" + + ## Lucene query to filter results + # filter_query = "*" + + ## Fields to aggregate values (must be numeric fields) + # metric_fields = ["metric"] + + ## Aggregation function to use on the metric fields + ## Must be set if 'metric_fields' is set + ## Valid values are: avg, sum, min, max, sum + # metric_function = "avg" + + ## Fields to be used as tags + ## Must be text, non-analyzed fields. Metric aggregations are performed per tag + # tags = ["field.keyword", "field2.keyword"] + + ## Set to true to not ignore documents when the tag(s) above are missing + # include_missing_tag = false + + ## String value of the tag when the tag does not exist + ## Used when include_missing_tag is true + # missing_tag_value = "null" diff --git a/plugins/inputs/elasticsearch_query/testdata/nginx_logs b/plugins/inputs/elasticsearch_query/testdata/nginx_logs new file mode 100644 index 0000000000000..f6e9c8a110226 --- /dev/null +++ b/plugins/inputs/elasticsearch_query/testdata/nginx_logs @@ -0,0 +1,500 @@ +93.180.71.3 - - [17/May/2015:08:05:32 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 12060 +93.180.71.3 - - [17/May/2015:08:05:23 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 12355 +80.91.33.133 - - [17/May/2015:08:05:24 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 26272 +217.168.17.5 - - [17/May/2015:08:05:34 +0000] "GET /downloads/product_1 HTTP/1.1" 200 490 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 1514 +217.168.17.5 - - [17/May/2015:08:05:09 +0000] "GET /downloads/product_2 HTTP/1.1" 200 490 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 2204 +93.180.71.3 - - [17/May/2015:08:05:57 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 6012 +217.168.17.5 - - [17/May/2015:08:05:02 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 11220 +217.168.17.5 - - [17/May/2015:08:05:42 +0000] "GET /downloads/product_1 HTTP/1.1" 404 332 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 17843 +80.91.33.133 - - [17/May/2015:08:05:01 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 22599 +93.180.71.3 - - [17/May/2015:08:05:27 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 24828 +217.168.17.5 - - [17/May/2015:08:05:12 +0000] "GET /downloads/product_2 HTTP/1.1" 200 3316 "-" "-" 6947 +188.138.60.101 - - [17/May/2015:08:05:49 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 28288 +80.91.33.133 - - [17/May/2015:08:05:14 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 23182 +46.4.66.76 - - [17/May/2015:08:05:45 +0000] "GET /downloads/product_1 HTTP/1.1" 404 318 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 16302 +93.180.71.3 - - [17/May/2015:08:05:26 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 16102 +91.234.194.89 - - [17/May/2015:08:05:22 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 20268 +80.91.33.133 - - [17/May/2015:08:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 2794 +37.26.93.214 - - [17/May/2015:08:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 319 "-" "Go 1.1 package http" 22809 +188.138.60.101 - - [17/May/2015:08:05:25 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8807 +93.180.71.3 - - [17/May/2015:08:05:11 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 30172 +46.4.66.76 - - [17/May/2015:08:05:02 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 1973 +62.75.198.179 - - [17/May/2015:08:05:06 +0000] "GET /downloads/product_2 HTTP/1.1" 200 490 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10182 +80.91.33.133 - - [17/May/2015:08:05:55 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 14307 +173.203.139.108 - - [17/May/2015:08:05:53 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10828 +210.245.80.75 - - [17/May/2015:08:05:32 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 21956 +46.4.83.163 - - [17/May/2015:08:05:52 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5726 +91.234.194.89 - - [17/May/2015:08:05:18 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10841 +31.22.86.126 - - [17/May/2015:08:05:24 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 18132 +217.168.17.5 - - [17/May/2015:08:05:25 +0000] "GET /downloads/product_1 HTTP/1.1" 200 3301 "-" "-" 10094 +80.91.33.133 - - [17/May/2015:08:05:50 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 12355 +173.203.139.108 - - [17/May/2015:08:05:03 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 27325 +80.91.33.133 - - [17/May/2015:08:05:35 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 14101 +5.83.131.103 - - [17/May/2015:08:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 200 490 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 20175 +80.91.33.133 - - [17/May/2015:08:05:59 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 21384 +200.6.73.40 - - [17/May/2015:08:05:42 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 6570 +80.91.33.133 - - [17/May/2015:08:05:48 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 26145 +93.180.71.3 - - [17/May/2015:08:05:58 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 32705 +62.75.198.179 - - [17/May/2015:08:05:39 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18865 +50.57.209.92 - - [17/May/2015:08:05:41 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21639 +188.138.60.101 - - [17/May/2015:08:05:48 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 31242 +46.4.66.76 - - [17/May/2015:08:05:02 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 5910 +50.57.209.92 - - [17/May/2015:08:05:25 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 22900 +91.239.186.133 - - [17/May/2015:08:05:04 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23919 +173.203.139.108 - - [17/May/2015:08:05:08 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 25169 +80.91.33.133 - - [17/May/2015:08:05:04 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 24395 +93.190.71.150 - - [17/May/2015:08:05:33 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 25750 +91.234.194.89 - - [17/May/2015:08:05:57 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 26673 +46.4.83.163 - - [17/May/2015:08:05:20 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 32509 +173.203.139.108 - - [17/May/2015:08:05:39 +0000] "GET /downloads/product_1 HTTP/1.1" 404 335 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 32714 +54.187.216.43 - - [17/May/2015:08:05:07 +0000] "GET /downloads/product_2 HTTP/1.1" 200 951 "-" "urlgrabber/3.9.1 yum/3.4.3" 5016 +50.57.209.92 - - [17/May/2015:08:05:59 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 14449 +80.91.33.133 - - [17/May/2015:08:05:02 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 13183 +173.203.139.108 - - [17/May/2015:08:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 404 332 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 7791 +5.83.131.103 - - [17/May/2015:08:05:31 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 586 +173.203.139.108 - - [17/May/2015:08:05:14 +0000] "GET /downloads/product_1 HTTP/1.1" 404 334 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5036 +80.91.33.133 - - [17/May/2015:08:05:46 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 20358 +50.57.209.92 - - [17/May/2015:08:05:01 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2106 +80.91.33.133 - - [17/May/2015:08:05:41 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 9757 +37.26.93.214 - - [17/May/2015:08:05:52 +0000] "GET /downloads/product_2 HTTP/1.1" 200 3318 "-" "Go 1.1 package http" 6222 +23.23.226.37 - - [17/May/2015:08:05:19 +0000] "GET /downloads/product_2 HTTP/1.1" 200 2578 "-" "urlgrabber/3.9.1 yum/3.4.3" 9523 +93.180.71.3 - - [17/May/2015:08:05:20 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 7228 +173.203.139.108 - - [17/May/2015:08:05:56 +0000] "GET /downloads/product_1 HTTP/1.1" 404 331 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 31464 +62.75.198.179 - - [17/May/2015:08:05:13 +0000] "GET /downloads/product_2 HTTP/1.1" 404 346 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 22462 +31.22.86.126 - - [17/May/2015:08:05:10 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 29906 +50.57.209.92 - - [17/May/2015:08:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 16217 +91.239.186.133 - - [17/May/2015:08:05:11 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18335 +46.4.66.76 - - [17/May/2015:08:05:00 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 27375 +200.6.73.40 - - [17/May/2015:08:05:23 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 32073 +173.203.139.108 - - [17/May/2015:08:05:13 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 31071 +93.190.71.150 - - [17/May/2015:08:05:35 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 1200 +91.234.194.89 - - [17/May/2015:08:05:26 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 13143 +173.203.139.108 - - [17/May/2015:08:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 404 333 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 16138 +80.91.33.133 - - [17/May/2015:08:05:23 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 21432 +217.168.17.5 - - [17/May/2015:08:05:27 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 1419 +46.4.83.163 - - [17/May/2015:08:05:54 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 28449 +80.91.33.133 - - [17/May/2015:08:05:25 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 25906 +50.57.209.92 - - [17/May/2015:08:05:56 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 27099 +173.203.139.108 - - [17/May/2015:08:05:52 +0000] "GET /downloads/product_1 HTTP/1.1" 404 334 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 32238 +188.138.60.101 - - [17/May/2015:08:05:04 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 237 +80.91.33.133 - - [17/May/2015:08:05:11 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 7103 +134.119.20.172 - - [17/May/2015:08:05:26 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5423 +173.203.139.108 - - [17/May/2015:08:05:29 +0000] "GET /downloads/product_1 HTTP/1.1" 404 331 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 6373 +80.91.33.133 - - [17/May/2015:08:05:44 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 22230 +91.121.161.213 - - [17/May/2015:08:05:14 +0000] "GET /downloads/product_2 HTTP/1.1" 200 490 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 14196 +80.91.33.133 - - [17/May/2015:08:05:17 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 17820 +80.91.33.133 - - [17/May/2015:08:05:27 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 9097 +37.26.93.214 - - [17/May/2015:08:05:03 +0000] "GET /downloads/product_2 HTTP/1.1" 200 490 "-" "Go 1.1 package http" 27632 +5.83.131.103 - - [17/May/2015:08:05:57 +0000] "GET /downloads/product_1 HTTP/1.1" 404 346 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 14609 +50.57.209.92 - - [17/May/2015:08:05:39 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21926 +173.203.139.108 - - [17/May/2015:08:05:52 +0000] "GET /downloads/product_1 HTTP/1.1" 404 331 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4915 +54.64.16.235 - - [17/May/2015:08:05:13 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 12816 +93.180.71.3 - - [17/May/2015:08:05:28 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 30742 +202.143.95.26 - - [17/May/2015:08:05:55 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 24544 +202.143.95.26 - - [17/May/2015:08:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 25819 +202.143.95.26 - - [17/May/2015:08:05:01 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 26831 +80.91.33.133 - - [17/May/2015:08:05:14 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 1344 +91.239.186.133 - - [17/May/2015:08:05:03 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4987 +173.203.139.108 - - [17/May/2015:08:05:35 +0000] "GET /downloads/product_1 HTTP/1.1" 404 328 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 13419 +80.91.33.133 - - [17/May/2015:08:05:39 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 12879 +87.233.156.242 - - [17/May/2015:08:05:37 +0000] "GET /downloads/product_2 HTTP/1.1" 404 318 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 20611 +62.75.198.179 - - [17/May/2015:08:05:33 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 1387 +50.57.209.92 - - [17/May/2015:08:05:16 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 31286 +80.91.33.133 - - [17/May/2015:08:05:53 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 15247 +93.190.71.150 - - [17/May/2015:08:05:34 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 134 +46.4.66.76 - - [17/May/2015:08:05:38 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 23909 +80.91.33.133 - - [17/May/2015:08:05:09 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 15771 +91.234.194.89 - - [17/May/2015:08:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4641 +217.168.17.5 - - [17/May/2015:08:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 6382 +46.4.83.163 - - [17/May/2015:08:05:23 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 14599 +50.57.209.92 - - [17/May/2015:08:05:19 +0000] "GET /downloads/product_1 HTTP/1.1" 404 335 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8263 +200.6.73.40 - - [17/May/2015:08:05:46 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23514 +91.121.161.213 - - [17/May/2015:08:05:28 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29473 +80.91.33.133 - - [17/May/2015:08:05:52 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 26659 +188.138.60.101 - - [17/May/2015:08:05:22 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5147 +144.76.151.58 - - [17/May/2015:08:05:54 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21698 +134.119.20.172 - - [17/May/2015:09:05:28 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21077 +80.91.33.133 - - [17/May/2015:09:05:26 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 7173 +80.91.33.133 - - [17/May/2015:09:05:55 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 1878 +5.83.131.103 - - [17/May/2015:09:05:08 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 24451 +93.180.71.3 - - [17/May/2015:09:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 30170 +80.91.33.133 - - [17/May/2015:09:05:05 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 13156 +50.57.209.92 - - [17/May/2015:09:05:25 +0000] "GET /downloads/product_1 HTTP/1.1" 404 332 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 306 +5.83.131.103 - - [17/May/2015:09:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 404 345 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 24862 +62.75.167.106 - - [17/May/2015:09:05:56 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10227 +37.26.93.214 - - [17/May/2015:09:05:42 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Go 1.1 package http" 28504 +93.64.134.186 - - [17/May/2015:09:05:29 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 27681 +87.233.156.242 - - [17/May/2015:09:05:36 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 1502 +80.91.33.133 - - [17/May/2015:09:05:26 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 18177 +80.91.33.133 - - [17/May/2015:09:05:15 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 7934 +54.193.30.212 - - [17/May/2015:09:05:23 +0000] "GET /downloads/product_2 HTTP/1.1" 200 951 "-" "urlgrabber/3.9.1 yum/3.4.3" 2 +62.75.198.179 - - [17/May/2015:09:05:09 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23920 +91.239.186.133 - - [17/May/2015:09:05:46 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9333 +83.161.14.106 - - [17/May/2015:09:05:09 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 19640 +80.91.33.133 - - [17/May/2015:09:05:54 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 11061 +80.91.33.133 - - [17/May/2015:09:05:46 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 24501 +93.190.71.150 - - [17/May/2015:09:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 15895 +50.57.209.92 - - [17/May/2015:09:05:40 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 20558 +80.91.33.133 - - [17/May/2015:09:05:49 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 2338 +80.91.33.133 - - [17/May/2015:09:05:25 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 12192 +217.168.17.5 - - [17/May/2015:09:05:09 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 9824 +80.91.33.133 - - [17/May/2015:09:05:59 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 2246 +54.191.136.177 - - [17/May/2015:09:05:08 +0000] "GET /downloads/product_2 HTTP/1.1" 200 951 "-" "urlgrabber/3.9.1 yum/3.4.3" 7239 +80.91.33.133 - - [17/May/2015:09:05:27 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 21154 +91.234.194.89 - - [17/May/2015:09:05:57 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2966 +80.91.33.133 - - [17/May/2015:09:05:05 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 10715 +80.91.33.133 - - [17/May/2015:09:05:22 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 14856 +46.4.83.163 - - [17/May/2015:09:05:12 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 17717 +91.121.161.213 - - [17/May/2015:09:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 404 346 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9951 +188.138.60.101 - - [17/May/2015:09:05:57 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 25787 +144.76.151.58 - - [17/May/2015:09:05:33 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4930 +195.154.77.170 - - [17/May/2015:09:05:00 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21921 +50.57.209.92 - - [17/May/2015:09:05:19 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29773 +31.22.86.126 - - [17/May/2015:09:05:41 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 7593 +54.64.16.235 - - [17/May/2015:09:05:51 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 26867 +202.143.95.26 - - [17/May/2015:09:05:20 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 31361 +202.143.95.26 - - [17/May/2015:09:05:28 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 13167 +87.233.156.242 - - [17/May/2015:09:05:47 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 22554 +62.75.167.106 - - [17/May/2015:09:05:37 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29795 +152.90.220.17 - - [17/May/2015:09:05:01 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18753 +80.91.33.133 - - [17/May/2015:09:05:02 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 27083 +93.180.71.3 - - [17/May/2015:09:05:38 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 28187 +80.91.33.133 - - [17/May/2015:09:05:03 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 25595 +5.83.131.103 - - [17/May/2015:09:05:15 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 26070 +5.83.131.103 - - [17/May/2015:09:05:56 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 27724 +200.6.73.40 - - [17/May/2015:09:05:33 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8086 +46.4.88.134 - - [17/May/2015:09:05:49 +0000] "GET /downloads/product_1 HTTP/1.1" 404 318 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 4853 +50.57.209.92 - - [17/May/2015:09:05:34 +0000] "GET /downloads/product_1 HTTP/1.1" 404 334 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9464 +93.64.134.186 - - [17/May/2015:09:05:28 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 12194 +80.91.33.133 - - [17/May/2015:09:05:50 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 26621 +62.75.198.180 - - [17/May/2015:09:05:55 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29857 +80.91.33.133 - - [17/May/2015:09:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 20514 +80.91.33.133 - - [17/May/2015:09:05:36 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 5526 +62.75.198.179 - - [17/May/2015:09:05:46 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 14143 +80.91.33.133 - - [17/May/2015:09:05:17 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 20873 +91.239.186.133 - - [17/May/2015:09:05:16 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23230 +80.91.33.133 - - [17/May/2015:09:05:25 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 25246 +83.161.14.106 - - [17/May/2015:09:05:45 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 19052 +80.91.33.133 - - [17/May/2015:09:05:31 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 12362 +195.154.77.170 - - [17/May/2015:09:05:35 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10153 +93.190.71.150 - - [17/May/2015:09:05:56 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 22418 +80.91.33.133 - - [17/May/2015:09:05:43 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 6565 +80.91.33.133 - - [17/May/2015:09:05:44 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 9883 +144.76.160.62 - - [17/May/2015:09:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 318 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 2564 +91.121.161.213 - - [17/May/2015:09:05:34 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 17140 +46.4.83.163 - - [17/May/2015:09:05:10 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 22794 +91.234.194.89 - - [17/May/2015:09:05:42 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 17718 +50.57.209.92 - - [17/May/2015:09:05:40 +0000] "GET /downloads/product_1 HTTP/1.1" 404 331 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5434 +188.138.60.101 - - [17/May/2015:09:05:41 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 573 +210.245.80.75 - - [17/May/2015:09:05:07 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 28482 +144.76.151.58 - - [17/May/2015:09:05:28 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 31161 +80.91.33.133 - - [17/May/2015:09:05:11 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 24151 +144.76.117.56 - - [17/May/2015:09:05:59 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 6185 +80.91.33.133 - - [17/May/2015:09:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 6276 +31.22.86.126 - - [17/May/2015:09:05:19 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 27127 +80.91.33.133 - - [17/May/2015:09:05:17 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 9549 +62.75.167.106 - - [17/May/2015:09:05:03 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21397 +87.233.156.242 - - [17/May/2015:09:05:17 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 10781 +152.90.220.18 - - [17/May/2015:09:05:11 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 19773 +93.180.71.3 - - [17/May/2015:09:05:01 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 11889 +80.91.33.133 - - [17/May/2015:09:05:54 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 14111 +31.22.86.126 - - [17/May/2015:09:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 404 319 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 17787 +50.57.209.92 - - [17/May/2015:09:05:42 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18330 +5.83.131.103 - - [17/May/2015:09:05:49 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 8993 +46.4.88.134 - - [17/May/2015:09:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 17460 +80.91.33.133 - - [17/May/2015:09:05:06 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 32412 +80.91.33.133 - - [17/May/2015:09:05:19 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 12639 +62.75.198.180 - - [17/May/2015:09:05:43 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 32511 +80.91.33.133 - - [17/May/2015:09:05:22 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 29012 +80.91.33.133 - - [17/May/2015:09:05:23 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 9767 +5.83.131.103 - - [17/May/2015:09:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 12212 +5.83.131.103 - - [17/May/2015:09:05:22 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 2440 +5.83.131.103 - - [17/May/2015:09:05:27 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 8157 +195.154.77.170 - - [17/May/2015:09:05:23 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 16242 +202.143.95.26 - - [17/May/2015:09:05:08 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 22261 +93.64.134.186 - - [17/May/2015:09:05:19 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 15048 +85.214.47.178 - - [17/May/2015:09:05:39 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 27105 +83.161.14.106 - - [17/May/2015:09:05:15 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 32234 +80.70.214.71 - - [17/May/2015:09:05:20 +0000] "HEAD /downloads/product_1 HTTP/1.1" 200 0 "-" "Wget/1.13.4 (linux-gnu)" 8479 +87.233.156.242 - - [17/May/2015:09:05:08 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 20831 +54.64.16.235 - - [17/May/2015:09:05:55 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 18289 +50.57.209.92 - - [17/May/2015:09:05:29 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9858 +91.239.186.133 - - [17/May/2015:09:05:00 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 20442 +91.121.161.213 - - [17/May/2015:09:05:09 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9004 +200.6.73.40 - - [17/May/2015:09:05:30 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 13221 +62.75.198.179 - - [17/May/2015:09:05:49 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 954 +93.190.71.150 - - [17/May/2015:09:05:13 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 26398 +80.91.33.133 - - [17/May/2015:09:05:33 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 22775 +80.91.33.133 - - [17/May/2015:09:05:32 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 13886 +80.91.33.133 - - [17/May/2015:09:05:49 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 19340 +144.76.160.62 - - [17/May/2015:09:05:11 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 17157 +80.91.33.133 - - [17/May/2015:09:05:59 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 9971 +217.168.17.5 - - [17/May/2015:09:05:12 +0000] "GET /downloads/product_1 HTTP/1.1" 404 334 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 26268 +80.91.33.133 - - [17/May/2015:09:05:47 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 5983 +80.91.33.133 - - [17/May/2015:09:05:09 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 15296 +144.76.117.56 - - [17/May/2015:09:05:52 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 13922 +144.76.151.58 - - [17/May/2015:09:05:42 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10692 +80.91.33.133 - - [17/May/2015:10:05:40 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 22550 +62.75.167.106 - - [17/May/2015:10:05:47 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 20757 +80.91.33.133 - - [17/May/2015:10:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 25956 +37.187.238.39 - - [17/May/2015:10:05:22 +0000] "GET /downloads/product_2 HTTP/1.1" 404 318 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 16674 +80.70.214.71 - - [17/May/2015:10:05:13 +0000] "GET /downloads/product_2 HTTP/1.1" 404 327 "-" "Wget/1.13.4 (linux-gnu)" 15327 +91.234.194.89 - - [17/May/2015:10:05:48 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21807 +80.91.33.133 - - [17/May/2015:10:05:10 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 20469 +188.138.60.101 - - [17/May/2015:10:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10122 +80.91.33.133 - - [17/May/2015:10:05:01 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 1971 +80.91.33.133 - - [17/May/2015:10:05:32 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 7263 +93.180.71.3 - - [17/May/2015:10:05:28 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 953 +46.4.88.134 - - [17/May/2015:10:05:54 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 23703 +80.91.33.133 - - [17/May/2015:10:05:53 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 126 +62.210.138.59 - - [17/May/2015:10:05:22 +0000] "GET /downloads/product_2 HTTP/1.1" 404 318 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 19171 +31.22.86.126 - - [17/May/2015:10:05:38 +0000] "GET /downloads/product_1 HTTP/1.1" 404 335 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 31107 +80.91.33.133 - - [17/May/2015:10:05:16 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 8252 +54.86.157.236 - - [17/May/2015:10:05:24 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 25651 +195.154.233.202 - - [17/May/2015:10:05:39 +0000] "GET /downloads/product_2 HTTP/1.1" 404 318 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 3446 +54.86.157.236 - - [17/May/2015:10:05:43 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 20770 +80.91.33.133 - - [17/May/2015:10:05:14 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 27979 +94.23.21.169 - - [17/May/2015:10:05:09 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 28723 +54.86.157.236 - - [17/May/2015:10:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 13439 +195.154.77.170 - - [17/May/2015:10:05:17 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 22432 +54.86.157.236 - - [17/May/2015:10:05:36 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 1572 +85.214.47.178 - - [17/May/2015:10:05:57 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 27196 +5.83.131.103 - - [17/May/2015:10:05:55 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 9637 +5.83.131.103 - - [17/May/2015:10:05:03 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 18830 +5.83.131.103 - - [17/May/2015:10:05:05 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 844 +5.83.131.103 - - [17/May/2015:10:05:08 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 20882 +80.91.33.133 - - [17/May/2015:10:05:40 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 1325 +80.91.33.133 - - [17/May/2015:10:05:39 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 11125 +84.53.65.28 - - [17/May/2015:10:05:25 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10771 +80.91.33.133 - - [17/May/2015:10:05:33 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 24891 +54.86.157.236 - - [17/May/2015:10:05:28 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 23541 +217.168.17.5 - - [17/May/2015:10:05:02 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.10.3)" 22323 +91.121.161.213 - - [17/May/2015:10:05:18 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29114 +80.70.214.71 - - [17/May/2015:10:05:33 +0000] "GET /downloads/product_1 HTTP/1.1" 404 329 "-" "Wget/1.13.4 (linux-gnu)" 13629 +144.76.160.62 - - [17/May/2015:10:05:10 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 32440 +54.86.157.236 - - [17/May/2015:10:05:52 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 20402 +93.64.134.186 - - [17/May/2015:10:05:54 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5113 +93.190.71.150 - - [17/May/2015:10:05:41 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 31729 +87.233.156.242 - - [17/May/2015:10:05:02 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 28958 +80.91.33.133 - - [17/May/2015:10:05:22 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 15630 +91.239.186.133 - - [17/May/2015:10:05:50 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 7488 +62.75.198.179 - - [17/May/2015:10:05:28 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9316 +144.76.117.56 - - [17/May/2015:10:05:46 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 9965 +178.32.54.253 - - [17/May/2015:10:05:33 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2881 +37.187.238.39 - - [17/May/2015:10:05:05 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 17544 +83.161.14.106 - - [17/May/2015:10:05:47 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 11419 +54.86.157.236 - - [17/May/2015:10:05:48 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 16406 +91.194.188.90 - - [17/May/2015:10:05:51 +0000] "HEAD /downloads/product_2 HTTP/1.1" 200 0 "-" "Wget/1.13.4 (linux-gnu)" 28324 +83.161.14.106 - - [17/May/2015:10:05:13 +0000] "GET /downloads/product_2 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 1893 +80.91.33.133 - - [17/May/2015:10:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 14697 +93.180.71.3 - - [17/May/2015:10:05:34 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 16168 +62.210.138.59 - - [17/May/2015:10:05:40 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 663 +46.4.88.134 - - [17/May/2015:10:05:16 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 27962 +202.143.95.26 - - [17/May/2015:10:05:50 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 18539 +202.143.95.26 - - [17/May/2015:10:05:02 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 13495 +202.143.95.26 - - [17/May/2015:10:05:10 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 3192 +62.75.198.180 - - [17/May/2015:10:05:36 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4349 +144.76.137.134 - - [17/May/2015:10:05:03 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 1395 +80.91.33.133 - - [17/May/2015:10:05:23 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 12898 +54.86.157.236 - - [17/May/2015:10:05:11 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 26930 +80.70.214.71 - - [17/May/2015:10:05:22 +0000] "GET /downloads/product_2 HTTP/1.1" 404 326 "-" "Wget/1.13.4 (linux-gnu)" 16662 +91.234.194.89 - - [17/May/2015:10:05:06 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9445 +188.138.60.101 - - [17/May/2015:10:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18804 +80.91.33.133 - - [17/May/2015:10:05:33 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 22429 +195.154.233.202 - - [17/May/2015:10:05:47 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 8456 +94.23.21.169 - - [17/May/2015:10:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 32187 +144.76.151.58 - - [17/May/2015:10:05:10 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29276 +80.91.33.133 - - [17/May/2015:10:05:42 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 9700 +62.75.167.106 - - [17/May/2015:10:05:31 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10078 +80.91.33.133 - - [17/May/2015:10:05:41 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 7600 +50.57.209.92 - - [17/May/2015:10:05:16 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8540 +202.143.95.26 - - [17/May/2015:10:05:43 +0000] "GET /downloads/product_2 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 24400 +200.6.73.40 - - [17/May/2015:10:05:38 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29363 +195.154.77.170 - - [17/May/2015:10:05:33 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 17025 +54.187.216.43 - - [17/May/2015:10:05:55 +0000] "GET /downloads/product_2 HTTP/1.1" 200 951 "-" "urlgrabber/3.9.1 yum/3.4.3" 27997 +80.91.33.133 - - [17/May/2015:10:05:04 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 1806 +80.91.33.133 - - [17/May/2015:10:05:09 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 28234 +54.86.157.236 - - [17/May/2015:10:05:06 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 19286 +202.143.95.26 - - [17/May/2015:10:05:05 +0000] "GET /downloads/product_2 HTTP/1.1" 404 325 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 19522 +202.143.95.26 - - [17/May/2015:10:05:40 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 23841 +54.86.157.236 - - [17/May/2015:10:05:02 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 31135 +80.91.33.133 - - [17/May/2015:10:05:50 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 21510 +80.91.33.133 - - [17/May/2015:10:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 26977 +80.91.33.133 - - [17/May/2015:10:05:55 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 1078 +80.91.33.133 - - [17/May/2015:10:05:47 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 7473 +84.53.65.28 - - [17/May/2015:10:05:30 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 28347 +92.50.100.22 - - [17/May/2015:10:05:15 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8699 +85.214.47.178 - - [17/May/2015:10:05:30 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2078 +80.91.33.133 - - [17/May/2015:10:05:08 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 7013 +54.86.157.236 - - [17/May/2015:10:05:36 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 29440 +5.83.131.103 - - [17/May/2015:10:05:05 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 24206 +37.187.238.39 - - [17/May/2015:10:05:33 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 5674 +80.91.33.133 - - [17/May/2015:10:05:04 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 15781 +195.210.47.239 - - [17/May/2015:10:05:49 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 1462 +80.91.33.133 - - [17/May/2015:10:05:11 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 9446 +54.64.16.235 - - [17/May/2015:10:05:12 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 23687 +178.32.54.253 - - [17/May/2015:10:05:54 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 17314 +144.92.16.161 - - [17/May/2015:10:05:39 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 4021 +54.86.157.236 - - [17/May/2015:10:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 13168 +87.233.156.242 - - [17/May/2015:10:05:49 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 8142 +31.22.86.126 - - [17/May/2015:10:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 404 332 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 28923 +80.91.33.133 - - [17/May/2015:10:05:49 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 17021 +91.121.161.213 - - [17/May/2015:10:05:48 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 711 +80.91.33.133 - - [17/May/2015:10:05:06 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 15815 +50.57.209.92 - - [17/May/2015:10:05:19 +0000] "GET /downloads/product_1 HTTP/1.1" 404 333 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 12290 +91.239.186.133 - - [17/May/2015:10:05:15 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9172 +144.76.117.56 - - [17/May/2015:10:05:31 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 27106 +144.76.160.62 - - [17/May/2015:10:05:47 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 2607 +62.210.138.59 - - [17/May/2015:10:05:45 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 26922 +54.86.157.236 - - [17/May/2015:10:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 2045 +62.75.198.179 - - [17/May/2015:10:05:14 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 14090 +93.190.71.150 - - [17/May/2015:10:05:07 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2233 +144.76.117.56 - - [17/May/2015:10:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 14988 +94.23.21.169 - - [17/May/2015:10:05:23 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 11645 +91.194.188.90 - - [17/May/2015:10:05:05 +0000] "HEAD /downloads/product_2 HTTP/1.1" 200 0 "-" "Wget/1.13.4 (linux-gnu)" 28064 +93.64.134.186 - - [17/May/2015:10:05:51 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 16583 +54.86.157.236 - - [17/May/2015:10:05:48 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 23208 +80.70.214.71 - - [17/May/2015:10:05:23 +0000] "HEAD /downloads/product_2 HTTP/1.1" 200 0 "-" "Wget/1.13.4 (linux-gnu)" 1059 +93.180.71.3 - - [17/May/2015:10:05:22 +0000] "GET /downloads/product_1 HTTP/1.1" 404 333 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 16367 +195.154.233.202 - - [17/May/2015:10:05:43 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 26788 +193.192.58.163 - - [17/May/2015:11:05:31 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 6753 +144.76.137.134 - - [17/May/2015:11:05:00 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18307 +54.86.157.236 - - [17/May/2015:11:05:22 +0000] "GET /downloads/product_1 HTTP/1.1" 404 333 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 10520 +83.161.14.106 - - [17/May/2015:11:05:09 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 5640 +144.76.151.58 - - [17/May/2015:11:05:16 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9992 +144.92.16.161 - - [17/May/2015:11:05:06 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 3262 +195.154.77.170 - - [17/May/2015:11:05:20 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 17687 +62.75.198.180 - - [17/May/2015:11:05:05 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18911 +91.234.194.89 - - [17/May/2015:11:05:29 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 22038 +80.91.33.133 - - [17/May/2015:11:05:28 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 2238 +188.138.60.101 - - [17/May/2015:11:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10581 +62.75.167.106 - - [17/May/2015:11:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 14869 +46.4.88.134 - - [17/May/2015:11:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 6669 +80.91.33.133 - - [17/May/2015:11:05:35 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 12780 +80.91.33.133 - - [17/May/2015:11:05:05 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 24133 +84.53.65.28 - - [17/May/2015:11:05:25 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 14350 +152.90.220.17 - - [17/May/2015:11:05:08 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23513 +80.91.33.133 - - [17/May/2015:11:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 31695 +80.91.33.133 - - [17/May/2015:11:05:21 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 12243 +178.32.54.253 - - [17/May/2015:11:05:44 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2641 +54.72.39.202 - - [17/May/2015:11:05:19 +0000] "GET /downloads/product_2 HTTP/1.1" 200 951 "-" "urlgrabber/3.9.1 yum/3.4.3" 27639 +91.120.61.154 - - [17/May/2015:11:05:03 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21180 +37.187.238.39 - - [17/May/2015:11:05:25 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 30661 +85.214.47.178 - - [17/May/2015:11:05:12 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 20380 +80.91.33.133 - - [17/May/2015:11:05:47 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 11957 +5.83.131.103 - - [17/May/2015:11:05:10 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 19230 +200.6.73.40 - - [17/May/2015:11:05:19 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4087 +5.83.131.103 - - [17/May/2015:11:05:45 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 16383 +91.121.161.213 - - [17/May/2015:11:05:08 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 11487 +91.239.186.133 - - [17/May/2015:11:05:40 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 11774 +50.57.209.92 - - [17/May/2015:11:05:39 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 28472 +80.91.33.133 - - [17/May/2015:11:05:18 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 24011 +144.92.16.161 - - [17/May/2015:11:05:44 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 26633 +87.233.156.242 - - [17/May/2015:11:05:33 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 16170 +94.23.21.169 - - [17/May/2015:11:05:56 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 15992 +5.83.131.103 - - [17/May/2015:11:05:31 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 20999 +80.91.33.133 - - [17/May/2015:11:05:40 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 23097 +202.143.95.26 - - [17/May/2015:11:05:30 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 3282 +202.143.95.26 - - [17/May/2015:11:05:44 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 4869 +80.91.33.133 - - [17/May/2015:11:05:28 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 9310 +80.91.33.133 - - [17/May/2015:11:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 23547 +80.91.33.133 - - [17/May/2015:11:05:11 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 5516 +80.91.33.133 - - [17/May/2015:11:05:13 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 26601 +62.210.138.59 - - [17/May/2015:11:05:23 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 26830 +144.76.160.62 - - [17/May/2015:11:05:06 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 15405 +93.190.71.150 - - [17/May/2015:11:05:29 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 16982 +80.91.33.133 - - [17/May/2015:11:05:00 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 6019 +202.143.95.26 - - [17/May/2015:11:05:29 +0000] "GET /downloads/product_2 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 3822 +193.192.58.163 - - [17/May/2015:11:05:54 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 13461 +195.154.233.202 - - [17/May/2015:11:05:46 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 32439 +80.70.214.71 - - [17/May/2015:11:05:59 +0000] "HEAD /downloads/product_2 HTTP/1.1" 200 0 "-" "Wget/1.13.4 (linux-gnu)" 31402 +62.75.198.179 - - [17/May/2015:11:05:17 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 452 +80.91.33.133 - - [17/May/2015:11:05:51 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 25508 +144.92.16.161 - - [17/May/2015:11:05:39 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 29252 +195.154.77.170 - - [17/May/2015:11:05:28 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 19649 +50.57.209.92 - - [17/May/2015:11:05:56 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 24457 +144.76.117.56 - - [17/May/2015:11:05:49 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 10519 +80.91.33.133 - - [17/May/2015:11:05:36 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 6815 +144.76.137.134 - - [17/May/2015:11:05:07 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 798 +188.138.60.101 - - [17/May/2015:11:05:00 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 19441 +54.172.198.124 - - [17/May/2015:11:05:43 +0000] "GET /downloads/product_2 HTTP/1.1" 200 2582 "-" "urlgrabber/3.9.1 yum/3.4.3" 17903 +37.187.238.39 - - [17/May/2015:11:05:27 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 3443 +178.32.54.253 - - [17/May/2015:11:05:03 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9634 +62.75.198.180 - - [17/May/2015:11:05:16 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5417 +62.75.167.106 - - [17/May/2015:11:05:26 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 1055 +195.210.47.239 - - [17/May/2015:11:05:36 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 4218 +91.234.194.89 - - [17/May/2015:11:05:48 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23355 +31.22.86.126 - - [17/May/2015:11:05:19 +0000] "GET /downloads/product_1 HTTP/1.1" 404 334 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 29547 +91.194.188.90 - - [17/May/2015:11:05:42 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Wget/1.13.4 (linux-gnu)" 26988 +92.50.100.22 - - [17/May/2015:11:05:35 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 13600 +144.76.151.58 - - [17/May/2015:11:05:45 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 18988 +93.64.134.186 - - [17/May/2015:11:05:48 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2281 +85.214.47.178 - - [17/May/2015:11:05:19 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 16054 +94.23.21.169 - - [17/May/2015:11:05:11 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 21647 +80.91.33.133 - - [17/May/2015:11:05:31 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 31277 +80.91.33.133 - - [17/May/2015:11:05:20 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 19500 +91.121.161.213 - - [17/May/2015:11:05:03 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 29579 +83.161.14.106 - - [17/May/2015:11:05:52 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 1080 +54.64.16.235 - - [17/May/2015:11:05:43 +0000] "GET /downloads/product_2 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.20.1)" 15057 +84.53.65.28 - - [17/May/2015:11:05:31 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5805 +80.91.33.133 - - [17/May/2015:11:05:09 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 32764 +50.57.209.92 - - [17/May/2015:11:05:15 +0000] "GET /downloads/product_1 HTTP/1.1" 404 334 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 28248 +91.239.186.133 - - [17/May/2015:11:05:17 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 32046 +144.92.16.161 - - [17/May/2015:11:05:30 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 31342 +62.210.138.59 - - [17/May/2015:11:05:29 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 22861 +210.245.80.75 - - [17/May/2015:11:05:05 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 32649 +80.91.33.133 - - [17/May/2015:11:05:12 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 11268 +83.161.14.106 - - [17/May/2015:11:05:55 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 8233 +87.233.156.242 - - [17/May/2015:11:05:02 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 10052 +5.83.131.103 - - [17/May/2015:11:05:49 +0000] "GET /downloads/product_1 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 20084 +80.91.33.133 - - [17/May/2015:11:05:05 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 9007 +91.120.61.154 - - [17/May/2015:11:05:48 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8410 +195.154.233.202 - - [17/May/2015:11:05:55 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 20582 +80.91.33.133 - - [17/May/2015:11:05:56 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 8327 +193.192.58.163 - - [17/May/2015:11:05:58 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4041 +93.190.71.150 - - [17/May/2015:11:05:11 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 26973 +144.76.160.62 - - [17/May/2015:11:05:20 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 24342 +50.57.209.92 - - [17/May/2015:11:05:56 +0000] "GET /downloads/product_1 HTTP/1.1" 404 331 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 27744 +62.75.198.179 - - [17/May/2015:11:05:19 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2455 +193.192.59.41 - - [17/May/2015:11:05:55 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 19596 +195.154.77.170 - - [17/May/2015:11:05:35 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23424 +80.91.33.133 - - [17/May/2015:11:05:17 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 4171 +200.6.73.40 - - [17/May/2015:11:05:26 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8274 +188.138.60.101 - - [17/May/2015:11:05:56 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2949 +80.91.33.133 - - [17/May/2015:11:05:53 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 5641 +80.91.33.133 - - [17/May/2015:11:05:42 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 28746 +80.91.33.133 - - [17/May/2015:11:05:17 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 18396 +80.91.33.133 - - [17/May/2015:11:05:32 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 17638 +80.91.33.133 - - [17/May/2015:11:05:23 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.17)" 7865 +144.76.137.134 - - [17/May/2015:11:05:57 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 4280 +80.70.214.71 - - [17/May/2015:11:05:16 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Wget/1.13.4 (linux-gnu)" 32436 +144.76.117.56 - - [17/May/2015:11:05:28 +0000] "GET /downloads/product_1 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 30048 +94.23.21.169 - - [17/May/2015:11:05:21 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 6186 +198.61.216.151 - - [17/May/2015:11:05:16 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 21567 +80.91.33.133 - - [17/May/2015:11:05:11 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 674 +91.194.188.90 - - [17/May/2015:11:05:32 +0000] "HEAD /downloads/product_2 HTTP/1.1" 200 0 "-" "Wget/1.13.4 (linux-gnu)" 5354 +62.75.198.180 - - [17/May/2015:11:05:39 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 5345 +80.91.33.133 - - [17/May/2015:11:05:52 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 2326 +31.22.86.126 - - [17/May/2015:12:05:15 +0000] "GET /downloads/product_1 HTTP/1.1" 404 331 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 3114 +84.53.65.28 - - [17/May/2015:12:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 337 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 9036 +144.92.16.161 - - [17/May/2015:12:05:32 +0000] "GET /downloads/product_1 HTTP/1.1" 404 324 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" 9410 +50.57.209.92 - - [17/May/2015:12:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 2039 +5.83.131.103 - - [17/May/2015:12:05:26 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 14852 +5.83.131.103 - - [17/May/2015:12:05:27 +0000] "GET /downloads/product_1 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 71 +62.75.167.106 - - [17/May/2015:12:05:01 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 6439 +178.32.54.253 - - [17/May/2015:12:05:26 +0000] "GET /downloads/product_1 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8721 +91.121.161.213 - - [17/May/2015:12:05:00 +0000] "GET /downloads/product_2 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 1795 +91.234.194.89 - - [17/May/2015:12:05:11 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 8556 +37.187.238.39 - - [17/May/2015:12:05:29 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 17627 +91.239.186.133 - - [17/May/2015:12:05:38 +0000] "GET /downloads/product_2 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 10970 +87.233.156.242 - - [17/May/2015:12:05:34 +0000] "GET /downloads/product_2 HTTP/1.1" 404 333 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 409 +202.143.95.26 - - [17/May/2015:12:05:22 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 10283 +144.76.151.58 - - [17/May/2015:12:05:25 +0000] "GET /downloads/product_2 HTTP/1.1" 404 333 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 22461 +62.210.138.59 - - [17/May/2015:12:05:12 +0000] "GET /downloads/product_2 HTTP/1.1" 404 340 "-" "Debian APT-HTTP/1.3 (1.0.1ubuntu2)" 22736 +80.91.33.133 - - [17/May/2015:12:05:05 +0000] "GET /downloads/product_1 HTTP/1.1" 404 336 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 21014 +83.161.14.106 - - [17/May/2015:12:05:48 +0000] "GET /downloads/product_2 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 18047 +80.91.33.133 - - [17/May/2015:12:05:31 +0000] "GET /downloads/product_1 HTTP/1.1" 404 341 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 25206 +5.83.131.103 - - [17/May/2015:12:05:21 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 15330 +80.91.33.133 - - [17/May/2015:12:05:54 +0000] "GET /downloads/product_1 HTTP/1.1" 404 339 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.16)" 8763 +198.61.216.151 - - [17/May/2015:12:05:59 +0000] "GET /downloads/product_2 HTTP/1.1" 304 0 "-" "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.22)" 11132 +195.154.77.170 - - [17/May/2015:12:05:05 +0000] "GET /downloads/product_2 HTTP/1.1" 404 338 "-" "Debian APT-HTTP/1.3 (0.9.7.9)" 23768 diff --git a/plugins/inputs/ethtool/README.md b/plugins/inputs/ethtool/README.md index 3f397cdfbe36f..e92fcf24ef53b 100644 --- a/plugins/inputs/ethtool/README.md +++ b/plugins/inputs/ethtool/README.md @@ -1,10 +1,11 @@ # Ethtool Input Plugin -The ethtool input plugin pulls ethernet device stats. Fields pulled will depend on the network device and driver +The ethtool input plugin pulls ethernet device stats. Fields pulled will depend +on the network device and driver. -### Configuration: +## Configuration -```toml +```toml @sample.conf # Returns ethtool statistics for given interfaces [[inputs.ethtool]] ## List of interfaces to pull metrics for @@ -12,22 +13,31 @@ The ethtool input plugin pulls ethernet device stats. Fields pulled will depend ## List of interfaces to ignore when pulling metrics. # interface_exclude = ["eth1"] + + ## Some drivers declare statistics with extra whitespace, different spacing, + ## and mix cases. This list, when enabled, can be used to clean the keys. + ## Here are the current possible normalizations: + ## * snakecase: converts fooBarBaz to foo_bar_baz + ## * trim: removes leading and trailing whitespace + ## * lower: changes all capitalized letters to lowercase + ## * underscore: replaces spaces with underscores + # normalize_keys = ["snakecase", "trim", "lower", "underscore"] ``` -Interfaces can be included or ignored using +Interfaces can be included or ignored using: - `interface_include` - `interface_exclude` -Note that loopback interfaces will be automatically ignored +Note that loopback interfaces will be automatically ignored. -### Metrics: +## Metrics -Metrics are dependant on the network device and driver +Metrics are dependent on the network device and driver. -### Example Output: +## Example Output -``` -ethtool,driver=igb,host=test01,interface=mgmt0 tx_queue_1_packets=280782i,rx_queue_5_csum_err=0i,tx_queue_4_restart=0i,tx_multicast=7i,tx_queue_1_bytes=39674885i,rx_queue_2_alloc_failed=0i,tx_queue_5_packets=173970i,tx_single_coll_ok=0i,rx_queue_1_drops=0i,tx_queue_2_restart=0i,tx_aborted_errors=0i,rx_queue_6_csum_err=0i,tx_queue_5_restart=0i,tx_queue_4_bytes=64810835i,tx_abort_late_coll=0i,tx_queue_4_packets=109102i,os2bmc_tx_by_bmc=0i,tx_bytes=427527435i,tx_queue_7_packets=66665i,dropped_smbus=0i,rx_queue_0_csum_err=0i,tx_flow_control_xoff=0i,rx_packets=25926536i,rx_queue_7_csum_err=0i,rx_queue_3_bytes=84326060i,rx_multicast=83771i,rx_queue_4_alloc_failed=0i,rx_queue_3_drops=0i,rx_queue_3_csum_err=0i,rx_errors=0i,tx_errors=0i,tx_queue_6_packets=183236i,rx_broadcast=24378893i,rx_queue_7_packets=88680i,tx_dropped=0i,rx_frame_errors=0i,tx_queue_3_packets=161045i,tx_packets=1257017i,rx_queue_1_csum_err=0i,tx_window_errors=0i,tx_dma_out_of_sync=0i,rx_length_errors=0i,rx_queue_5_drops=0i,tx_timeout_count=0i,rx_queue_4_csum_err=0i,rx_flow_control_xon=0i,tx_heartbeat_errors=0i,tx_flow_control_xon=0i,collisions=0i,tx_queue_0_bytes=29465801i,rx_queue_6_drops=0i,rx_queue_0_alloc_failed=0i,tx_queue_1_restart=0i,rx_queue_0_drops=0i,tx_broadcast=9i,tx_carrier_errors=0i,tx_queue_7_bytes=13777515i,tx_queue_7_restart=0i,rx_queue_5_bytes=50732006i,rx_queue_7_bytes=35744457i,tx_deferred_ok=0i,tx_multi_coll_ok=0i,rx_crc_errors=0i,rx_fifo_errors=0i,rx_queue_6_alloc_failed=0i,tx_queue_2_packets=175206i,tx_queue_0_packets=107011i,rx_queue_4_bytes=201364548i,rx_queue_6_packets=372573i,os2bmc_rx_by_host=0i,multicast=83771i,rx_queue_4_drops=0i,rx_queue_5_packets=130535i,rx_queue_6_bytes=139488035i,tx_fifo_errors=0i,tx_queue_5_bytes=84899130i,rx_queue_0_packets=24529563i,rx_queue_3_alloc_failed=0i,rx_queue_7_drops=0i,tx_queue_6_bytes=96288614i,tx_queue_2_bytes=22132949i,tx_tcp_seg_failed=0i,rx_queue_1_bytes=246703840i,rx_queue_0_bytes=1506870738i,tx_queue_0_restart=0i,rx_queue_2_bytes=111344804i,tx_tcp_seg_good=0i,tx_queue_3_restart=0i,rx_no_buffer_count=0i,rx_smbus=0i,rx_queue_1_packets=273865i,rx_over_errors=0i,os2bmc_tx_by_host=0i,rx_queue_1_alloc_failed=0i,rx_queue_7_alloc_failed=0i,rx_short_length_errors=0i,tx_hwtstamp_timeouts=0i,tx_queue_6_restart=0i,rx_queue_2_packets=207136i,tx_queue_3_bytes=70391970i,rx_queue_3_packets=112007i,rx_queue_4_packets=212177i,tx_smbus=0i,rx_long_byte_count=2480280632i,rx_queue_2_csum_err=0i,rx_missed_errors=0i,rx_bytes=2480280632i,rx_queue_5_alloc_failed=0i,rx_queue_2_drops=0i,os2bmc_rx_by_bmc=0i,rx_align_errors=0i,rx_long_length_errors=0i,rx_hwtstamp_cleared=0i,rx_flow_control_xoff=0i 1564658080000000000 -ethtool,driver=igb,host=test02,interface=mgmt0 rx_queue_2_bytes=111344804i,tx_queue_3_bytes=70439858i,multicast=83771i,rx_broadcast=24378975i,tx_queue_0_packets=107011i,rx_queue_6_alloc_failed=0i,rx_queue_6_drops=0i,rx_hwtstamp_cleared=0i,tx_window_errors=0i,tx_tcp_seg_good=0i,rx_queue_1_drops=0i,tx_queue_1_restart=0i,rx_queue_7_csum_err=0i,rx_no_buffer_count=0i,tx_queue_1_bytes=39675245i,tx_queue_5_bytes=84899130i,tx_broadcast=9i,rx_queue_1_csum_err=0i,tx_flow_control_xoff=0i,rx_queue_6_csum_err=0i,tx_timeout_count=0i,os2bmc_tx_by_bmc=0i,rx_queue_6_packets=372577i,rx_queue_0_alloc_failed=0i,tx_flow_control_xon=0i,rx_queue_2_drops=0i,tx_queue_2_packets=175206i,rx_queue_3_csum_err=0i,tx_abort_late_coll=0i,tx_queue_5_restart=0i,tx_dropped=0i,rx_queue_2_alloc_failed=0i,tx_multi_coll_ok=0i,rx_queue_1_packets=273865i,rx_flow_control_xon=0i,tx_single_coll_ok=0i,rx_length_errors=0i,rx_queue_7_bytes=35744457i,rx_queue_4_alloc_failed=0i,rx_queue_6_bytes=139488395i,rx_queue_2_csum_err=0i,rx_long_byte_count=2480288216i,rx_queue_1_alloc_failed=0i,tx_queue_0_restart=0i,rx_queue_0_csum_err=0i,tx_queue_2_bytes=22132949i,rx_queue_5_drops=0i,tx_dma_out_of_sync=0i,rx_queue_3_drops=0i,rx_queue_4_packets=212177i,tx_queue_6_restart=0i,rx_packets=25926650i,rx_queue_7_packets=88680i,rx_frame_errors=0i,rx_queue_3_bytes=84326060i,rx_short_length_errors=0i,tx_queue_7_bytes=13777515i,rx_queue_3_alloc_failed=0i,tx_queue_6_packets=183236i,rx_queue_0_drops=0i,rx_multicast=83771i,rx_queue_2_packets=207136i,rx_queue_5_csum_err=0i,rx_queue_5_packets=130535i,rx_queue_7_alloc_failed=0i,tx_smbus=0i,tx_queue_3_packets=161081i,rx_queue_7_drops=0i,tx_queue_2_restart=0i,tx_multicast=7i,tx_fifo_errors=0i,tx_queue_3_restart=0i,rx_long_length_errors=0i,tx_queue_6_bytes=96288614i,tx_queue_1_packets=280786i,tx_tcp_seg_failed=0i,rx_align_errors=0i,tx_errors=0i,rx_crc_errors=0i,rx_queue_0_packets=24529673i,rx_flow_control_xoff=0i,tx_queue_0_bytes=29465801i,rx_over_errors=0i,rx_queue_4_drops=0i,os2bmc_rx_by_bmc=0i,rx_smbus=0i,dropped_smbus=0i,tx_hwtstamp_timeouts=0i,rx_errors=0i,tx_queue_4_packets=109102i,tx_carrier_errors=0i,tx_queue_4_bytes=64810835i,tx_queue_4_restart=0i,rx_queue_4_csum_err=0i,tx_queue_7_packets=66665i,tx_aborted_errors=0i,rx_missed_errors=0i,tx_bytes=427575843i,collisions=0i,rx_queue_1_bytes=246703840i,rx_queue_5_bytes=50732006i,rx_bytes=2480288216i,os2bmc_rx_by_host=0i,rx_queue_5_alloc_failed=0i,rx_queue_3_packets=112007i,tx_deferred_ok=0i,os2bmc_tx_by_host=0i,tx_heartbeat_errors=0i,rx_queue_0_bytes=1506877506i,tx_queue_7_restart=0i,tx_packets=1257057i,rx_queue_4_bytes=201364548i,rx_fifo_errors=0i,tx_queue_5_packets=173970i 1564658090000000000 +```shell +ethtool,driver=igb,host=test01,interface=mgmt0 tx_queue_1_packets=280782i,rx_queue_5_csum_err=0i,tx_queue_4_restart=0i,tx_multicast=7i,tx_queue_1_bytes=39674885i,rx_queue_2_alloc_failed=0i,tx_queue_5_packets=173970i,tx_single_coll_ok=0i,rx_queue_1_drops=0i,tx_queue_2_restart=0i,tx_aborted_errors=0i,rx_queue_6_csum_err=0i,tx_queue_5_restart=0i,tx_queue_4_bytes=64810835i,tx_abort_late_coll=0i,tx_queue_4_packets=109102i,os2bmc_tx_by_bmc=0i,tx_bytes=427527435i,tx_queue_7_packets=66665i,dropped_smbus=0i,rx_queue_0_csum_err=0i,tx_flow_control_xoff=0i,rx_packets=25926536i,rx_queue_7_csum_err=0i,rx_queue_3_bytes=84326060i,rx_multicast=83771i,rx_queue_4_alloc_failed=0i,rx_queue_3_drops=0i,rx_queue_3_csum_err=0i,rx_errors=0i,tx_errors=0i,tx_queue_6_packets=183236i,rx_broadcast=24378893i,rx_queue_7_packets=88680i,tx_dropped=0i,rx_frame_errors=0i,tx_queue_3_packets=161045i,tx_packets=1257017i,rx_queue_1_csum_err=0i,tx_window_errors=0i,tx_dma_out_of_sync=0i,rx_length_errors=0i,rx_queue_5_drops=0i,tx_timeout_count=0i,rx_queue_4_csum_err=0i,rx_flow_control_xon=0i,tx_heartbeat_errors=0i,tx_flow_control_xon=0i,collisions=0i,tx_queue_0_bytes=29465801i,rx_queue_6_drops=0i,rx_queue_0_alloc_failed=0i,tx_queue_1_restart=0i,rx_queue_0_drops=0i,tx_broadcast=9i,tx_carrier_errors=0i,tx_queue_7_bytes=13777515i,tx_queue_7_restart=0i,rx_queue_5_bytes=50732006i,rx_queue_7_bytes=35744457i,tx_deferred_ok=0i,tx_multi_coll_ok=0i,rx_crc_errors=0i,rx_fifo_errors=0i,rx_queue_6_alloc_failed=0i,tx_queue_2_packets=175206i,tx_queue_0_packets=107011i,rx_queue_4_bytes=201364548i,rx_queue_6_packets=372573i,os2bmc_rx_by_host=0i,multicast=83771i,rx_queue_4_drops=0i,rx_queue_5_packets=130535i,rx_queue_6_bytes=139488035i,tx_fifo_errors=0i,tx_queue_5_bytes=84899130i,rx_queue_0_packets=24529563i,rx_queue_3_alloc_failed=0i,rx_queue_7_drops=0i,tx_queue_6_bytes=96288614i,tx_queue_2_bytes=22132949i,tx_tcp_seg_failed=0i,rx_queue_1_bytes=246703840i,rx_queue_0_bytes=1506870738i,tx_queue_0_restart=0i,rx_queue_2_bytes=111344804i,tx_tcp_seg_good=0i,tx_queue_3_restart=0i,rx_no_buffer_count=0i,rx_smbus=0i,rx_queue_1_packets=273865i,rx_over_errors=0i,os2bmc_tx_by_host=0i,rx_queue_1_alloc_failed=0i,rx_queue_7_alloc_failed=0i,rx_short_length_errors=0i,tx_hwtstamp_timeouts=0i,tx_queue_6_restart=0i,rx_queue_2_packets=207136i,tx_queue_3_bytes=70391970i,rx_queue_3_packets=112007i,rx_queue_4_packets=212177i,tx_smbus=0i,rx_long_byte_count=2480280632i,rx_queue_2_csum_err=0i,rx_missed_errors=0i,rx_bytes=2480280632i,rx_queue_5_alloc_failed=0i,rx_queue_2_drops=0i,os2bmc_rx_by_bmc=0i,rx_align_errors=0i,rx_long_length_errors=0i,interface_up=1i,rx_hwtstamp_cleared=0i,rx_flow_control_xoff=0i 1564658080000000000 +ethtool,driver=igb,host=test02,interface=mgmt0 rx_queue_2_bytes=111344804i,tx_queue_3_bytes=70439858i,multicast=83771i,rx_broadcast=24378975i,tx_queue_0_packets=107011i,rx_queue_6_alloc_failed=0i,rx_queue_6_drops=0i,rx_hwtstamp_cleared=0i,tx_window_errors=0i,tx_tcp_seg_good=0i,rx_queue_1_drops=0i,tx_queue_1_restart=0i,rx_queue_7_csum_err=0i,rx_no_buffer_count=0i,tx_queue_1_bytes=39675245i,tx_queue_5_bytes=84899130i,tx_broadcast=9i,rx_queue_1_csum_err=0i,tx_flow_control_xoff=0i,rx_queue_6_csum_err=0i,tx_timeout_count=0i,os2bmc_tx_by_bmc=0i,rx_queue_6_packets=372577i,rx_queue_0_alloc_failed=0i,tx_flow_control_xon=0i,rx_queue_2_drops=0i,tx_queue_2_packets=175206i,rx_queue_3_csum_err=0i,tx_abort_late_coll=0i,tx_queue_5_restart=0i,tx_dropped=0i,rx_queue_2_alloc_failed=0i,tx_multi_coll_ok=0i,rx_queue_1_packets=273865i,rx_flow_control_xon=0i,tx_single_coll_ok=0i,rx_length_errors=0i,rx_queue_7_bytes=35744457i,rx_queue_4_alloc_failed=0i,rx_queue_6_bytes=139488395i,rx_queue_2_csum_err=0i,rx_long_byte_count=2480288216i,rx_queue_1_alloc_failed=0i,tx_queue_0_restart=0i,rx_queue_0_csum_err=0i,tx_queue_2_bytes=22132949i,rx_queue_5_drops=0i,tx_dma_out_of_sync=0i,rx_queue_3_drops=0i,rx_queue_4_packets=212177i,tx_queue_6_restart=0i,rx_packets=25926650i,rx_queue_7_packets=88680i,rx_frame_errors=0i,rx_queue_3_bytes=84326060i,rx_short_length_errors=0i,tx_queue_7_bytes=13777515i,rx_queue_3_alloc_failed=0i,tx_queue_6_packets=183236i,rx_queue_0_drops=0i,rx_multicast=83771i,rx_queue_2_packets=207136i,rx_queue_5_csum_err=0i,rx_queue_5_packets=130535i,rx_queue_7_alloc_failed=0i,tx_smbus=0i,tx_queue_3_packets=161081i,rx_queue_7_drops=0i,tx_queue_2_restart=0i,tx_multicast=7i,tx_fifo_errors=0i,tx_queue_3_restart=0i,rx_long_length_errors=0i,tx_queue_6_bytes=96288614i,tx_queue_1_packets=280786i,tx_tcp_seg_failed=0i,rx_align_errors=0i,tx_errors=0i,rx_crc_errors=0i,rx_queue_0_packets=24529673i,rx_flow_control_xoff=0i,tx_queue_0_bytes=29465801i,rx_over_errors=0i,rx_queue_4_drops=0i,os2bmc_rx_by_bmc=0i,rx_smbus=0i,dropped_smbus=0i,tx_hwtstamp_timeouts=0i,rx_errors=0i,tx_queue_4_packets=109102i,tx_carrier_errors=0i,tx_queue_4_bytes=64810835i,tx_queue_4_restart=0i,rx_queue_4_csum_err=0i,tx_queue_7_packets=66665i,tx_aborted_errors=0i,rx_missed_errors=0i,tx_bytes=427575843i,collisions=0i,rx_queue_1_bytes=246703840i,rx_queue_5_bytes=50732006i,rx_bytes=2480288216i,os2bmc_rx_by_host=0i,rx_queue_5_alloc_failed=0i,rx_queue_3_packets=112007i,tx_deferred_ok=0i,os2bmc_tx_by_host=0i,tx_heartbeat_errors=0i,rx_queue_0_bytes=1506877506i,tx_queue_7_restart=0i,tx_packets=1257057i,rx_queue_4_bytes=201364548i,interface_up=0i,rx_fifo_errors=0i,tx_queue_5_packets=173970i 1564658090000000000 ``` diff --git a/plugins/inputs/ethtool/ethtool.go b/plugins/inputs/ethtool/ethtool.go index 3f8f8e15618a2..a6db2d6f3e18b 100644 --- a/plugins/inputs/ethtool/ethtool.go +++ b/plugins/inputs/ethtool/ethtool.go @@ -1,11 +1,16 @@ package ethtool import ( + _ "embed" "net" "github.com/influxdata/telegraf" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Command interface { Init() error DriverName(intf string) (string, error) @@ -20,31 +25,22 @@ type Ethtool struct { // This is the list of interface names to ignore InterfaceExclude []string `toml:"interface_exclude"` + // Normalization on the key names + NormalizeKeys []string `toml:"normalize_keys"` + Log telegraf.Logger `toml:"-"` // the ethtool command command Command } -const ( - pluginName = "ethtool" - tagInterface = "interface" - tagDriverName = "driver" - - sampleConfig = ` - ## List of interfaces to pull metrics for - # interface_include = ["eth0"] - - ## List of interfaces to ignore when pulling metrics. - # interface_exclude = ["eth1"] -` -) - -func (e *Ethtool) SampleConfig() string { +func (*Ethtool) SampleConfig() string { return sampleConfig } -// Description returns a one-sentence description on the Input -func (e *Ethtool) Description() string { - return "Returns ethtool statistics for given interfaces" -} +const ( + pluginName = "ethtool" + tagInterface = "interface" + tagDriverName = "driver" + fieldInterfaceUp = "interface_up" +) diff --git a/plugins/inputs/ethtool/ethtool_linux.go b/plugins/inputs/ethtool/ethtool_linux.go index b8c9312cbe309..78962b7ae7330 100644 --- a/plugins/inputs/ethtool/ethtool_linux.go +++ b/plugins/inputs/ethtool/ethtool_linux.go @@ -1,24 +1,28 @@ +//go:generate ../../../tools/readme_config_includer/generator +//go:build linux // +build linux package ethtool import ( "net" + "regexp" + "strings" "sync" + "github.com/pkg/errors" + ethtoolLib "github.com/safchain/ethtool" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/pkg/errors" - "github.com/safchain/ethtool" ) type CommandEthtool struct { - ethtool *ethtool.Ethtool + ethtool *ethtoolLib.Ethtool } func (e *Ethtool) Gather(acc telegraf.Accumulator) error { - // Get the list of interfaces interfaces, err := e.command.Interfaces() if err != nil { @@ -35,7 +39,6 @@ func (e *Ethtool) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup for _, iface := range interfaces { - // Check this isn't a loop back and that its matched by the filter if (iface.Flags&net.FlagLoopback == 0) && interfaceFilter.Match(iface.Name) { wg.Add(1) @@ -59,7 +62,6 @@ func (e *Ethtool) Init() error { // Gather the stats for the interface. func (e *Ethtool) gatherEthtoolStats(iface net.Interface, acc telegraf.Accumulator) { - tags := make(map[string]string) tags[tagInterface] = iface.Name @@ -80,24 +82,69 @@ func (e *Ethtool) gatherEthtoolStats(iface net.Interface, acc telegraf.Accumulat return } + fields[fieldInterfaceUp] = e.interfaceUp(iface) for k, v := range stats { - fields[k] = v + fields[e.normalizeKey(k)] = v } acc.AddFields(pluginName, fields, tags) } +// normalize key string; order matters to avoid replacing whitespace with +// underscores, then trying to trim those same underscores. Likewise with +// camelcase before trying to lower case things. +func (e *Ethtool) normalizeKey(key string) string { + // must trim whitespace or this will have a leading _ + if inStringSlice(e.NormalizeKeys, "snakecase") { + key = camelCase2SnakeCase(strings.TrimSpace(key)) + } + // must occur before underscore, otherwise nothing to trim + if inStringSlice(e.NormalizeKeys, "trim") { + key = strings.TrimSpace(key) + } + if inStringSlice(e.NormalizeKeys, "lower") { + key = strings.ToLower(key) + } + if inStringSlice(e.NormalizeKeys, "underscore") { + key = strings.ReplaceAll(key, " ", "_") + } + + return key +} + +func camelCase2SnakeCase(value string) string { + matchFirstCap := regexp.MustCompile("(.)([A-Z][a-z]+)") + matchAllCap := regexp.MustCompile("([a-z0-9])([A-Z])") + + snake := matchFirstCap.ReplaceAllString(value, "${1}_${2}") + snake = matchAllCap.ReplaceAllString(snake, "${1}_${2}") + return strings.ToLower(snake) +} + +func inStringSlice(slice []string, value string) bool { + for _, item := range slice { + if item == value { + return true + } + } + + return false +} + +func (e *Ethtool) interfaceUp(iface net.Interface) bool { + return (iface.Flags & net.FlagUp) != 0 +} + func NewCommandEthtool() *CommandEthtool { return &CommandEthtool{} } func (c *CommandEthtool) Init() error { - if c.ethtool != nil { return nil } - e, err := ethtool.NewEthtool() + e, err := ethtoolLib.NewEthtool() if err == nil { c.ethtool = e } @@ -114,7 +161,6 @@ func (c *CommandEthtool) Stats(intf string) (map[string]uint64, error) { } func (c *CommandEthtool) Interfaces() ([]net.Interface, error) { - // Get the list of interfaces interfaces, err := net.Interfaces() if err != nil { @@ -125,7 +171,6 @@ func (c *CommandEthtool) Interfaces() ([]net.Interface, error) { } func init() { - inputs.Add(pluginName, func() telegraf.Input { return &Ethtool{ InterfaceInclude: []string{}, diff --git a/plugins/inputs/ethtool/ethtool_notlinux.go b/plugins/inputs/ethtool/ethtool_notlinux.go index b022e0a46bb72..ce149ecd6e69c 100644 --- a/plugins/inputs/ethtool/ethtool_notlinux.go +++ b/plugins/inputs/ethtool/ethtool_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package ethtool diff --git a/plugins/inputs/ethtool/ethtool_test.go b/plugins/inputs/ethtool/ethtool_test.go index d281644a51ed0..e348427d05366 100644 --- a/plugins/inputs/ethtool/ethtool_test.go +++ b/plugins/inputs/ethtool/ethtool_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package ethtool @@ -6,19 +7,21 @@ import ( "net" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/pkg/errors" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) var command *Ethtool var interfaceMap map[string]*InterfaceMock type InterfaceMock struct { - Name string - DriverName string - Stat map[string]uint64 - LoopBack bool + Name string + DriverName string + Stat map[string]uint64 + LoopBack bool + InterfaceUp bool } type CommandEthtoolMock struct { @@ -30,23 +33,25 @@ func (c *CommandEthtoolMock) Init() error { return nil } -func (c *CommandEthtoolMock) DriverName(intf string) (driverName string, err error) { +func (c *CommandEthtoolMock) DriverName(intf string) (string, error) { i := c.InterfaceMap[intf] if i != nil { - driverName = i.DriverName - return + return i.DriverName, nil } - return driverName, errors.New("interface not found") + return "", errors.New("interface not found") } func (c *CommandEthtoolMock) Interfaces() ([]net.Interface, error) { interfaceNames := make([]net.Interface, 0) for k, v := range c.InterfaceMap { - - // Whether to set the flag to loopback - flag := net.FlagUp + var flag net.Flags + // When interface is up + if v.InterfaceUp { + flag |= net.FlagUp + } + // For loopback interface if v.LoopBack { - flag = net.FlagLoopback + flag |= net.FlagLoopback } // Create a dummy interface @@ -62,20 +67,19 @@ func (c *CommandEthtoolMock) Interfaces() ([]net.Interface, error) { return interfaceNames, nil } -func (c *CommandEthtoolMock) Stats(intf string) (stat map[string]uint64, err error) { +func (c *CommandEthtoolMock) Stats(intf string) (map[string]uint64, error) { i := c.InterfaceMap[intf] if i != nil { - stat = i.Stat - return + return i.Stat, nil } - return stat, errors.New("interface not found") + return nil, errors.New("interface not found") } func setup() { - interfaceMap = make(map[string]*InterfaceMock) eth1Stat := map[string]uint64{ + "interface_up": 1, "port_rx_1024_to_15xx": 25167245, "port_rx_128_to_255": 1573526387, "port_rx_15xx_to_jumbo": 137819058, @@ -173,10 +177,11 @@ func setup() { "tx_tso_fallbacks": 0, "tx_tso_long_headers": 0, } - eth1 := &InterfaceMock{"eth1", "driver1", eth1Stat, false} + eth1 := &InterfaceMock{"eth1", "driver1", eth1Stat, false, true} interfaceMap[eth1.Name] = eth1 eth2Stat := map[string]uint64{ + "interface_up": 0, "port_rx_1024_to_15xx": 11529312, "port_rx_128_to_255": 1868952037, "port_rx_15xx_to_jumbo": 130339387, @@ -274,14 +279,14 @@ func setup() { "tx_tso_fallbacks": 0, "tx_tso_long_headers": 0, } - eth2 := &InterfaceMock{"eth2", "driver1", eth2Stat, false} + eth2 := &InterfaceMock{"eth2", "driver1", eth2Stat, false, false} interfaceMap[eth2.Name] = eth2 // dummy loopback including dummy stat to ensure that the ignore feature is working lo0Stat := map[string]uint64{ "dummy": 0, } - lo0 := &InterfaceMock{"lo0", "", lo0Stat, true} + lo0 := &InterfaceMock{"lo0", "", lo0Stat, true, true} interfaceMap[lo0.Name] = lo0 c := &CommandEthtoolMock{interfaceMap} @@ -301,13 +306,12 @@ func toStringMapInterface(in map[string]uint64) map[string]interface{} { } func TestGather(t *testing.T) { - setup() var acc testutil.Accumulator err := command.Gather(&acc) - assert.NoError(t, err) - assert.Len(t, acc.Metrics, 2) + require.NoError(t, err) + require.Len(t, acc.Metrics, 2) expectedFieldsEth1 := toStringMapInterface(interfaceMap["eth1"].Stat) expectedTagsEth1 := map[string]string{ @@ -324,15 +328,14 @@ func TestGather(t *testing.T) { } func TestGatherIncludeInterfaces(t *testing.T) { - setup() var acc testutil.Accumulator command.InterfaceInclude = append(command.InterfaceInclude, "eth1") err := command.Gather(&acc) - assert.NoError(t, err) - assert.Len(t, acc.Metrics, 1) + require.NoError(t, err) + require.Len(t, acc.Metrics, 1) // Should contain eth1 expectedFieldsEth1 := toStringMapInterface(interfaceMap["eth1"].Stat) @@ -352,15 +355,14 @@ func TestGatherIncludeInterfaces(t *testing.T) { } func TestGatherIgnoreInterfaces(t *testing.T) { - setup() var acc testutil.Accumulator command.InterfaceExclude = append(command.InterfaceExclude, "eth1") err := command.Gather(&acc) - assert.NoError(t, err) - assert.Len(t, acc.Metrics, 1) + require.NoError(t, err) + require.Len(t, acc.Metrics, 1) // Should not contain eth1 expectedFieldsEth1 := toStringMapInterface(interfaceMap["eth1"].Stat) @@ -377,5 +379,120 @@ func TestGatherIgnoreInterfaces(t *testing.T) { "driver": "driver1", } acc.AssertContainsTaggedFields(t, pluginName, expectedFieldsEth2, expectedTagsEth2) +} +type TestCase struct { + normalization []string + stats map[string]uint64 + expectedFields map[string]uint64 +} + +func TestNormalizedKeys(t *testing.T) { + cases := []TestCase{ + { + normalization: []string{"underscore"}, + stats: map[string]uint64{ + "port rx": 1, + " Port_tx": 0, + "interface_up": 0, + }, + expectedFields: map[string]uint64{ + "port_rx": 1, + "_Port_tx": 0, + "interface_up": 0, + }, + }, + { + normalization: []string{"underscore", "lower"}, + stats: map[string]uint64{ + "Port rx": 1, + " Port_tx": 0, + "interface_up": 0, + }, + expectedFields: map[string]uint64{ + "port_rx": 1, + "_port_tx": 0, + "interface_up": 0, + }, + }, + { + normalization: []string{"underscore", "lower", "trim"}, + stats: map[string]uint64{ + " Port RX ": 1, + " Port_tx": 0, + "interface_up": 0, + }, + expectedFields: map[string]uint64{ + "port_rx": 1, + "port_tx": 0, + "interface_up": 0, + }, + }, + { + normalization: []string{"underscore", "lower", "snakecase", "trim"}, + stats: map[string]uint64{ + " Port RX ": 1, + " Port_tx": 0, + "interface_up": 0, + }, + expectedFields: map[string]uint64{ + "port_rx": 1, + "port_tx": 0, + "interface_up": 0, + }, + }, + { + normalization: []string{"snakecase"}, + stats: map[string]uint64{ + " PortRX ": 1, + " PortTX": 0, + "interface_up": 0, + }, + expectedFields: map[string]uint64{ + "port_rx": 1, + "port_tx": 0, + "interface_up": 0, + }, + }, + { + normalization: []string{}, + stats: map[string]uint64{ + " Port RX ": 1, + " Port_tx": 0, + "interface_up": 0, + }, + expectedFields: map[string]uint64{ + " Port RX ": 1, + " Port_tx": 0, + "interface_up": 0, + }, + }, + } + for _, c := range cases { + eth0 := &InterfaceMock{"eth0", "e1000e", c.stats, false, true} + expectedTags := map[string]string{ + "interface": eth0.Name, + "driver": eth0.DriverName, + } + + interfaceMap = make(map[string]*InterfaceMock) + interfaceMap[eth0.Name] = eth0 + + cmd := &CommandEthtoolMock{interfaceMap} + command = &Ethtool{ + InterfaceInclude: []string{}, + InterfaceExclude: []string{}, + NormalizeKeys: c.normalization, + command: cmd, + } + + var acc testutil.Accumulator + err := command.Gather(&acc) + + require.NoError(t, err) + require.Len(t, acc.Metrics, 1) + + acc.AssertContainsFields(t, pluginName, toStringMapInterface(c.expectedFields)) + acc.AssertContainsTaggedFields(t, pluginName, toStringMapInterface(c.expectedFields), expectedTags) + } } diff --git a/plugins/inputs/ethtool/sample.conf b/plugins/inputs/ethtool/sample.conf new file mode 100644 index 0000000000000..54913bca1cc7b --- /dev/null +++ b/plugins/inputs/ethtool/sample.conf @@ -0,0 +1,16 @@ +# Returns ethtool statistics for given interfaces +[[inputs.ethtool]] + ## List of interfaces to pull metrics for + # interface_include = ["eth0"] + + ## List of interfaces to ignore when pulling metrics. + # interface_exclude = ["eth1"] + + ## Some drivers declare statistics with extra whitespace, different spacing, + ## and mix cases. This list, when enabled, can be used to clean the keys. + ## Here are the current possible normalizations: + ## * snakecase: converts fooBarBaz to foo_bar_baz + ## * trim: removes leading and trailing whitespace + ## * lower: changes all capitalized letters to lowercase + ## * underscore: replaces spaces with underscores + # normalize_keys = ["snakecase", "trim", "lower", "underscore"] diff --git a/plugins/inputs/eventhub_consumer/README.md b/plugins/inputs/eventhub_consumer/README.md index 06c43cf318d39..8cbd3370121de 100644 --- a/plugins/inputs/eventhub_consumer/README.md +++ b/plugins/inputs/eventhub_consumer/README.md @@ -2,24 +2,26 @@ This plugin provides a consumer for use with Azure Event Hubs and Azure IoT Hub. -### IoT Hub Setup +## IoT Hub Setup The main focus for development of this plugin is Azure IoT hub: -1. Create an Azure IoT Hub by following any of the guides provided here: https://docs.microsoft.com/en-us/azure/iot-hub/ -2. Create a device, for example a [simulated Raspberry Pi](https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-raspberry-pi-web-simulator-get-started) -3. The connection string needed for the plugin is located under *Shared access policies*, both the *iothubowner* and *service* policies should work +1. Create an Azure IoT Hub by following any of the guides provided here: [Azure + IoT Hub](https://docs.microsoft.com/en-us/azure/iot-hub/) +2. Create a device, for example a [simulated Raspberry + Pi](https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-raspberry-pi-web-simulator-get-started) +3. The connection string needed for the plugin is located under *Shared access + policies*, both the *iothubowner* and *service* policies should work -### Configuration +## Configuration -```toml +```toml @sample.conf +# Azure Event Hubs service input plugin [[inputs.eventhub_consumer]] ## The default behavior is to create a new Event Hub client from environment variables. ## This requires one of the following sets of environment variables to be set: ## ## 1) Expected Environment Variables: - ## - "EVENTHUB_NAMESPACE" - ## - "EVENTHUB_NAME" ## - "EVENTHUB_CONNECTION_STRING" ## ## 2) Expected Environment Variables: @@ -28,8 +30,17 @@ The main focus for development of this plugin is Azure IoT hub: ## - "EVENTHUB_KEY_NAME" ## - "EVENTHUB_KEY_VALUE" + ## 3) Expected Environment Variables: + ## - "EVENTHUB_NAMESPACE" + ## - "EVENTHUB_NAME" + ## - "AZURE_TENANT_ID" + ## - "AZURE_CLIENT_ID" + ## - "AZURE_CLIENT_SECRET" + ## Uncommenting the option below will create an Event Hub client based solely on the connection string. ## This can either be the associated environment variable or hard coded directly. + ## If this option is uncommented, environment variables will be ignored. + ## Connection string should contain EventHubName (EntityPath) # connection_string = "" ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister @@ -91,7 +102,7 @@ The main focus for development of this plugin is Azure IoT hub: data_format = "influx" ``` -#### Environment Variables +### Environment Variables [Full documentation of the available environment variables][envvar]. diff --git a/plugins/inputs/eventhub_consumer/eventhub_consumer.go b/plugins/inputs/eventhub_consumer/eventhub_consumer.go index 17092de3217eb..a96a8c06bbf53 100644 --- a/plugins/inputs/eventhub_consumer/eventhub_consumer.go +++ b/plugins/inputs/eventhub_consumer/eventhub_consumer.go @@ -1,19 +1,26 @@ -package eventhub +//go:generate ../../../tools/readme_config_includer/generator +package eventhub_consumer import ( "context" + _ "embed" "fmt" "sync" "time" - eventhub "github.com/Azure/azure-event-hubs-go/v3" + eventhubClient "github.com/Azure/azure-event-hubs-go/v3" "github.com/Azure/azure-event-hubs-go/v3/persist" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const ( defaultMaxUndeliveredMessages = 1000 ) @@ -54,7 +61,7 @@ type EventHub struct { Log telegraf.Logger `toml:"-"` // Azure - hub *eventhub.Hub + hub *eventhubClient.Hub cancel context.CancelFunc wg sync.WaitGroup @@ -62,90 +69,8 @@ type EventHub struct { in chan []telegraf.Metric } -// SampleConfig is provided here func (*EventHub) SampleConfig() string { - return ` - ## The default behavior is to create a new Event Hub client from environment variables. - ## This requires one of the following sets of environment variables to be set: - ## - ## 1) Expected Environment Variables: - ## - "EVENTHUB_NAMESPACE" - ## - "EVENTHUB_NAME" - ## - "EVENTHUB_CONNECTION_STRING" - ## - ## 2) Expected Environment Variables: - ## - "EVENTHUB_NAMESPACE" - ## - "EVENTHUB_NAME" - ## - "EVENTHUB_KEY_NAME" - ## - "EVENTHUB_KEY_VALUE" - - ## Uncommenting the option below will create an Event Hub client based solely on the connection string. - ## This can either be the associated environment variable or hard coded directly. - # connection_string = "" - - ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister - # persistence_dir = "" - - ## Change the default consumer group - # consumer_group = "" - - ## By default the event hub receives all messages present on the broker, alternative modes can be set below. - ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339). - ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run). - # from_timestamp = - # latest = true - - ## Set a custom prefetch count for the receiver(s) - # prefetch_count = 1000 - - ## Add an epoch to the receiver(s) - # epoch = 0 - - ## Change to set a custom user agent, "telegraf" is used by default - # user_agent = "telegraf" - - ## To consume from a specific partition, set the partition_ids option. - ## An empty array will result in receiving from all partitions. - # partition_ids = ["0","1"] - - ## Max undelivered messages - # max_undelivered_messages = 1000 - - ## Set either option below to true to use a system property as timestamp. - ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime. - ## It is recommended to use this setting when the data itself has no timestamp. - # enqueued_time_as_ts = true - # iot_hub_enqueued_time_as_ts = true - - ## Tags or fields to create from keys present in the application property bag. - ## These could for example be set by message enrichments in Azure IoT Hub. - # application_property_tags = [] - # application_property_fields = [] - - ## Tag or field name to use for metadata - ## By default all metadata is disabled - # sequence_number_field = "SequenceNumber" - # enqueued_time_field = "EnqueuedTime" - # offset_field = "Offset" - # partition_id_tag = "PartitionID" - # partition_key_tag = "PartitionKey" - # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID" - # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID" - # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod" - # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID" - # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" - ` -} - -// Description of the plugin -func (*EventHub) Description() string { - return "Azure Event Hubs service input plugin" + return sampleConfig } // SetParser sets the parser @@ -165,7 +90,7 @@ func (e *EventHub) Init() (err error) { } // Set hub options - hubOpts := []eventhub.HubOption{} + hubOpts := []eventhubClient.HubOption{} if e.PersistenceDir != "" { persister, err := persist.NewFilePersister(e.PersistenceDir) @@ -173,20 +98,20 @@ func (e *EventHub) Init() (err error) { return err } - hubOpts = append(hubOpts, eventhub.HubWithOffsetPersistence(persister)) + hubOpts = append(hubOpts, eventhubClient.HubWithOffsetPersistence(persister)) } if e.UserAgent != "" { - hubOpts = append(hubOpts, eventhub.HubWithUserAgent(e.UserAgent)) + hubOpts = append(hubOpts, eventhubClient.HubWithUserAgent(e.UserAgent)) } else { - hubOpts = append(hubOpts, eventhub.HubWithUserAgent(internal.ProductToken())) + hubOpts = append(hubOpts, eventhubClient.HubWithUserAgent(internal.ProductToken())) } // Create event hub connection if e.ConnectionString != "" { - e.hub, err = eventhub.NewHubFromConnectionString(e.ConnectionString, hubOpts...) + e.hub, err = eventhubClient.NewHubFromConnectionString(e.ConnectionString, hubOpts...) } else { - e.hub, err = eventhub.NewHubFromEnvironment(hubOpts...) + e.hub, err = eventhubClient.NewHubFromEnvironment(hubOpts...) } return err @@ -207,11 +132,7 @@ func (e *EventHub) Start(acc telegraf.Accumulator) error { }() // Configure receiver options - receiveOpts, err := e.configureReceiver() - if err != nil { - return err - } - + receiveOpts := e.configureReceiver() partitions := e.PartitionIDs if len(e.PartitionIDs) == 0 { @@ -224,7 +145,7 @@ func (e *EventHub) Start(acc telegraf.Accumulator) error { } for _, partitionID := range partitions { - _, err = e.hub.Receive(ctx, partitionID, e.onMessage, receiveOpts...) + _, err := e.hub.Receive(ctx, partitionID, e.onMessage, receiveOpts...) if err != nil { return fmt.Errorf("creating receiver for partition %q: %v", partitionID, err) } @@ -233,34 +154,34 @@ func (e *EventHub) Start(acc telegraf.Accumulator) error { return nil } -func (e *EventHub) configureReceiver() ([]eventhub.ReceiveOption, error) { - receiveOpts := []eventhub.ReceiveOption{} +func (e *EventHub) configureReceiver() []eventhubClient.ReceiveOption { + receiveOpts := []eventhubClient.ReceiveOption{} if e.ConsumerGroup != "" { - receiveOpts = append(receiveOpts, eventhub.ReceiveWithConsumerGroup(e.ConsumerGroup)) + receiveOpts = append(receiveOpts, eventhubClient.ReceiveWithConsumerGroup(e.ConsumerGroup)) } if !e.FromTimestamp.IsZero() { - receiveOpts = append(receiveOpts, eventhub.ReceiveFromTimestamp(e.FromTimestamp)) + receiveOpts = append(receiveOpts, eventhubClient.ReceiveFromTimestamp(e.FromTimestamp)) } else if e.Latest { - receiveOpts = append(receiveOpts, eventhub.ReceiveWithLatestOffset()) + receiveOpts = append(receiveOpts, eventhubClient.ReceiveWithLatestOffset()) } if e.PrefetchCount != 0 { - receiveOpts = append(receiveOpts, eventhub.ReceiveWithPrefetchCount(e.PrefetchCount)) + receiveOpts = append(receiveOpts, eventhubClient.ReceiveWithPrefetchCount(e.PrefetchCount)) } if e.Epoch != 0 { - receiveOpts = append(receiveOpts, eventhub.ReceiveWithEpoch(e.Epoch)) + receiveOpts = append(receiveOpts, eventhubClient.ReceiveWithEpoch(e.Epoch)) } - return receiveOpts, nil + return receiveOpts } // OnMessage handles an Event. When this function returns without error the // Event is immediately accepted and the offset is updated. If an error is // returned the Event is marked for redelivery. -func (e *EventHub) onMessage(ctx context.Context, event *eventhub.Event) error { +func (e *EventHub) onMessage(ctx context.Context, event *eventhubClient.Event) error { metrics, err := e.createMetrics(event) if err != nil { return err @@ -342,7 +263,7 @@ func deepCopyMetrics(in []telegraf.Metric) []telegraf.Metric { } // CreateMetrics returns the Metrics from the Event. -func (e *EventHub) createMetrics(event *eventhub.Event) ([]telegraf.Metric, error) { +func (e *EventHub) createMetrics(event *eventhubClient.Event) ([]telegraf.Metric, error) { metrics, err := e.parser.Parse(event.Data) if err != nil { return nil, err diff --git a/plugins/inputs/eventhub_consumer/sample.conf b/plugins/inputs/eventhub_consumer/sample.conf new file mode 100644 index 0000000000000..924d6b73e21fc --- /dev/null +++ b/plugins/inputs/eventhub_consumer/sample.conf @@ -0,0 +1,84 @@ +# Azure Event Hubs service input plugin +[[inputs.eventhub_consumer]] + ## The default behavior is to create a new Event Hub client from environment variables. + ## This requires one of the following sets of environment variables to be set: + ## + ## 1) Expected Environment Variables: + ## - "EVENTHUB_CONNECTION_STRING" + ## + ## 2) Expected Environment Variables: + ## - "EVENTHUB_NAMESPACE" + ## - "EVENTHUB_NAME" + ## - "EVENTHUB_KEY_NAME" + ## - "EVENTHUB_KEY_VALUE" + + ## 3) Expected Environment Variables: + ## - "EVENTHUB_NAMESPACE" + ## - "EVENTHUB_NAME" + ## - "AZURE_TENANT_ID" + ## - "AZURE_CLIENT_ID" + ## - "AZURE_CLIENT_SECRET" + + ## Uncommenting the option below will create an Event Hub client based solely on the connection string. + ## This can either be the associated environment variable or hard coded directly. + ## If this option is uncommented, environment variables will be ignored. + ## Connection string should contain EventHubName (EntityPath) + # connection_string = "" + + ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister + # persistence_dir = "" + + ## Change the default consumer group + # consumer_group = "" + + ## By default the event hub receives all messages present on the broker, alternative modes can be set below. + ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339). + ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run). + # from_timestamp = + # latest = true + + ## Set a custom prefetch count for the receiver(s) + # prefetch_count = 1000 + + ## Add an epoch to the receiver(s) + # epoch = 0 + + ## Change to set a custom user agent, "telegraf" is used by default + # user_agent = "telegraf" + + ## To consume from a specific partition, set the partition_ids option. + ## An empty array will result in receiving from all partitions. + # partition_ids = ["0","1"] + + ## Max undelivered messages + # max_undelivered_messages = 1000 + + ## Set either option below to true to use a system property as timestamp. + ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime. + ## It is recommended to use this setting when the data itself has no timestamp. + # enqueued_time_as_ts = true + # iot_hub_enqueued_time_as_ts = true + + ## Tags or fields to create from keys present in the application property bag. + ## These could for example be set by message enrichments in Azure IoT Hub. + # application_property_tags = [] + # application_property_fields = [] + + ## Tag or field name to use for metadata + ## By default all metadata is disabled + # sequence_number_field = "SequenceNumber" + # enqueued_time_field = "EnqueuedTime" + # offset_field = "Offset" + # partition_id_tag = "PartitionID" + # partition_key_tag = "PartitionKey" + # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID" + # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID" + # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod" + # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID" + # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" diff --git a/plugins/inputs/EXAMPLE_README.md b/plugins/inputs/example/README.md similarity index 73% rename from plugins/inputs/EXAMPLE_README.md rename to plugins/inputs/example/README.md index 6b86615b0e6a8..7b2f037ba2289 100644 --- a/plugins/inputs/EXAMPLE_README.md +++ b/plugins/inputs/example/README.md @@ -4,32 +4,32 @@ The `example` plugin gathers metrics about example things. This description explains at a high level what the plugin does and provides links to where additional information can be found. -Telegraf minimum version: Telegraf x.x -Plugin minimum tested version: x.x +Telegraf minimum version: Telegraf x.x Plugin minimum tested version: x.x -### Configuration +## Configuration -This section contains the default TOML to configure the plugin. You can -generate it using `telegraf --usage `. - -```toml +```toml @sample.conf +# This is an example plugin [[inputs.example]] example_option = "example_value" ``` -#### example_option +Running `telegraf --usage ` also gives the sample TOML +configuration. + +### example_option -A more in depth description of an option can be provided here, but only do so -if the option cannot be fully described in the sample config. +A more in depth description of an option can be provided here, but only do so if +the option cannot be fully described in the sample config. -### Metrics +## Metrics -Here you should add an optional description and links to where the user can -get more information about the measurements. +Here you should add an optional description and links to where the user can get +more information about the measurements. -If the output is determined dynamically based on the input source, or there -are more metrics than can reasonably be listed, describe how the input is -mapped to the output. +If the output is determined dynamically based on the input source, or there are +more metrics than can reasonably be listed, describe how the input is mapped to +the output. - measurement1 - tags: @@ -39,7 +39,7 @@ mapped to the output. - field1 (type, unit) - field2 (float, percent) -+ measurement2 +- measurement2 - tags: - tag3 - fields: @@ -49,29 +49,30 @@ mapped to the output. - field6 (float) - field7 (boolean) -### Sample Queries +## Sample Queries This section can contain some useful InfluxDB queries that can be used to get started with the plugin or to generate dashboards. For each query listed, describe at a high level what data is returned. Get the max, mean, and min for the measurement in the last hour: -``` + +```sql SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar AND time > now() - 1h GROUP BY tag ``` -### Troubleshooting +## Troubleshooting This optional section can provide basic troubleshooting steps that a user can perform. -### Example Output +## Example This section shows example output in Line Protocol format. You can often use `telegraf --input-filter --test` or use the `file` output to get this information. -``` +```shell measurement1,tag1=foo,tag2=bar field1=1i,field2=2.1 1453831884664956455 measurement2,tag1=foo,tag2=bar,tag3=baz field3=1i 1453831884664956455 ``` diff --git a/plugins/inputs/example/example.go b/plugins/inputs/example/example.go new file mode 100644 index 0000000000000..83e0a51d70242 --- /dev/null +++ b/plugins/inputs/example/example.go @@ -0,0 +1,115 @@ +//go:generate ../../../tools/readme_config_includer/generator +package example + +import ( + _ "embed" + "fmt" + "math/rand" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +// Example struct should be named the same as the Plugin +type Example struct { + // Example for a mandatory option to set a tag + DeviceName string `toml:"device_name"` + + // Config options are converted to the correct type automatically + NumberFields int64 `toml:"number_fields"` + + // We can also use booleans and have diverging names between user-configuration options and struct members + EnableRandomVariable bool `toml:"enable_random"` + + // Example of passing a duration option allowing the format of e.g. "100ms", "5m" or "1h" + Timeout config.Duration `toml:"timeout"` + + // Telegraf logging facility + // The exact name is important to allow automatic initialization by telegraf. + Log telegraf.Logger `toml:"-"` + + // This is a non-exported internal state. + count int64 +} + +func (*Example) SampleConfig() string { + return sampleConfig +} + +// Init can be implemented to do one-time processing stuff like initializing variables +func (m *Example) Init() error { + // Check your options according to your requirements + if m.DeviceName == "" { + return fmt.Errorf("device name cannot be empty") + } + + // Set your defaults. + // Please note: In golang all fields are initialzed to their nil value, so you should not + // set these fields if the nil value is what you want (e.g. for booleans). + if m.NumberFields < 1 { + m.Log.Debugf("Setting number of fields to default from invalid value %d", m.NumberFields) + m.NumberFields = 2 + } + + // Initialze your internal states + m.count = 1 + + return nil +} + +// Gather defines what data the plugin will gather. +func (m *Example) Gather(acc telegraf.Accumulator) error { + // Imagine some completely arbitrary error occuring here + if m.NumberFields > 10 { + return fmt.Errorf("too many fields") + } + + // For illustration we gather three metrics in one go + for run := 0; run < 3; run++ { + // Imagine an error occurs here but you want to keep the other + // metrics, then you cannot simply return, as this would drop + // all later metrics. Simply accumulate errors in this case + // and ignore the metric. + if m.EnableRandomVariable && m.DeviceName == "flappy" && run > 1 { + acc.AddError(fmt.Errorf("too many runs for random values")) + continue + } + + // Construct the fields + fields := map[string]interface{}{"count": m.count} + for i := int64(1); i < m.NumberFields; i++ { + name := fmt.Sprintf("field%d", i) + value := 0.0 + if m.EnableRandomVariable { + value = rand.Float64() + } + fields[name] = value + } + + // Construct the tags + tags := map[string]string{"device": m.DeviceName} + + // Add the metric with the current timestamp + acc.AddFields("example", fields, tags) + + m.count++ + } + + return nil +} + +// Register the plugin +func init() { + inputs.Add("example", func() telegraf.Input { + return &Example{ + // Set the default timeout here to distinguish it from the user setting it to zero + Timeout: config.Duration(100 * time.Millisecond), + } + }) +} diff --git a/plugins/inputs/example/example_test.go b/plugins/inputs/example/example_test.go new file mode 100644 index 0000000000000..1c3b4b0a5e66e --- /dev/null +++ b/plugins/inputs/example/example_test.go @@ -0,0 +1,439 @@ +package example + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" +) + +// This file should contain a set of unit-tests to cover your plugin. This will ease +// spotting bugs and mistakes when later modifying or extending the functionality. +// To do so, please write one 'TestXYZ' function per 'case' e.g. default init, +// things that should fail or expected values from a mockup. + +func TestInitDefault(t *testing.T) { + // This test should succeed with the default initialization. + + // Use whatever you use in the init() function plus the mandatory options. + // ATTENTION: Always initialze the "Log" as you will get SIGSEGV otherwise. + plugin := &Example{ + DeviceName: "test", + Timeout: config.Duration(100 * time.Millisecond), + Log: testutil.Logger{}, + } + + // Test the initialization succeeds + require.NoError(t, plugin.Init()) + + // Also test that default values are set correctly + require.Equal(t, config.Duration(100*time.Millisecond), plugin.Timeout) + require.Equal(t, "test", plugin.DeviceName) + require.Equal(t, int64(2), plugin.NumberFields) +} + +func TestInitFail(t *testing.T) { + // You should also test for your safety nets to work i.e. you get errors for + // invalid configuration-option values. So check your error paths in Init() + // and check if you reach them + + // We setup a table-test here to specify "setting" - "expected error" values. + // Eventhough it seems overkill here for the example plugin, we reuse this structure + // later for checking the metrics + tests := []struct { + name string + plugin *Example + expected string + }{ + { + name: "all empty", + plugin: &Example{}, + expected: "device name cannot be empty", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Always initialze the logger to avoid SIGSEGV. This is done automatically by + // telegraf during normal operation. + tt.plugin.Log = testutil.Logger{} + err := tt.plugin.Init() + require.Error(t, err) + require.EqualError(t, err, tt.expected) + }) + } +} + +func TestFixedValue(t *testing.T) { + // You can organize the test e.g. by operation mode (like we do here random vs. fixed), by features or + // by different metrics gathered. Please choose the partitioning most suited for your plugin + + // We again setup a table-test here to specify "setting" - "expected output metric" pairs. + tests := []struct { + name string + plugin *Example + expected []telegraf.Metric + }{ + { + name: "count only", + plugin: &Example{ + DeviceName: "test", + NumberFields: 1, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 1, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 2, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 3, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "default settings", + plugin: &Example{ + DeviceName: "test", + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 1, + "field1": float64(0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 2, + "field1": float64(0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 3, + "field1": float64(0), + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "more fields", + plugin: &Example{ + DeviceName: "test", + NumberFields: 4, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 1, + "field1": float64(0), + "field2": float64(0), + "field3": float64(0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 2, + "field1": float64(0), + "field2": float64(0), + "field3": float64(0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 3, + "field1": float64(0), + "field2": float64(0), + "field3": float64(0), + }, + time.Unix(0, 0), + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + + tt.plugin.Log = testutil.Logger{} + require.NoError(t, tt.plugin.Init()) + + // Call gather and check no error occurs. In case you use acc.AddError() somewhere + // in your code, it is not sufficient to only check the return value of Gather(). + require.NoError(t, tt.plugin.Gather(&acc)) + require.Len(t, acc.Errors, 0, "found errors accumulated by acc.AddError()") + + // Wait for the expected number of metrics to avoid flaky tests due to + // race conditions. + acc.Wait(len(tt.expected)) + + // Compare the metrics in a convenient way. Here we ignore + // the metric time during comparision as we cannot inject the time + // during test. For more comparision options check testutil package. + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + }) + } +} + +func TestRandomValue(t *testing.T) { + // Sometimes, you cannot know the exact outcome of the gather cycle e.g. if the gathering involves random data. + // However, you should check the result nevertheless, applying as many conditions as you can. + + // We again setup a table-test here to specify "setting" - "expected output metric" pairs. + tests := []struct { + name string + plugin *Example + template telegraf.Metric + }{ + { + name: "count only", + plugin: &Example{ + DeviceName: "test", + NumberFields: 1, + EnableRandomVariable: true, + }, + template: testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 1, + }, + time.Unix(0, 0), + ), + }, + { + name: "default settings", + plugin: &Example{ + DeviceName: "test", + EnableRandomVariable: true, + }, + template: testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 1, + "field1": float64(0), + }, + time.Unix(0, 0), + ), + }, + { + name: "more fields", + plugin: &Example{ + DeviceName: "test", + NumberFields: 4, + EnableRandomVariable: true, + }, + template: testutil.MustMetric( + "example", + map[string]string{ + "device": "test", + }, + map[string]interface{}{ + "count": 1, + "field1": float64(0), + "field2": float64(0), + "field3": float64(0), + }, + time.Unix(0, 0), + ), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + + tt.plugin.Log = testutil.Logger{} + require.NoError(t, tt.plugin.Init()) + + // Call gather and check no error occurs. In case you use acc.AddError() somewhere + // in your code, it is not sufficient to only check the return value of Gather(). + require.NoError(t, tt.plugin.Gather(&acc)) + require.Len(t, acc.Errors, 0, "found errors accumulated by acc.AddError()") + + // Wait for the expected number of metrics to avoid flaky tests due to + // race conditions. + acc.Wait(3) + + // Compare all aspects of the metric that are known to you + for i, m := range acc.GetTelegrafMetrics() { + require.Equal(t, m.Name(), tt.template.Name()) + require.Equal(t, m.Tags(), tt.template.Tags()) + + // Check if all expected fields are there + fields := m.Fields() + for k := range tt.template.Fields() { + if k == "count" { + require.Equal(t, fields["count"], int64(i+1)) + continue + } + _, found := fields[k] + require.Truef(t, found, "field %q not found", k) + } + } + }) + } +} + +func TestGatherFail(t *testing.T) { + // You should also test for error conditions in your Gather() method. Try to cover all error paths. + + // We again setup a table-test here to specify "setting" - "expected error" pair. + tests := []struct { + name string + plugin *Example + expected string + }{ + { + name: "too many fields", + plugin: &Example{ + DeviceName: "test", + NumberFields: 11, + }, + expected: "too many fields", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + + tt.plugin.Log = testutil.Logger{} + require.NoError(t, tt.plugin.Init()) + + err := tt.plugin.Gather(&acc) + require.Error(t, err) + require.EqualError(t, err, tt.expected) + }) + } +} + +func TestRandomValueFailPartial(t *testing.T) { + // You should also test for error conditions in your Gather() with partial output. This is required when + // using acc.AddError() as Gather() might succeed (return nil) but there are some metrics missing. + + // We again setup a table-test here to specify "setting" - "expected output metric" and "errors". + tests := []struct { + name string + plugin *Example + expected []telegraf.Metric + expectedErr string + }{ + { + name: "flappy gather", + plugin: &Example{ + DeviceName: "flappy", + NumberFields: 1, + EnableRandomVariable: true, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "example", + map[string]string{ + "device": "flappy", + }, + map[string]interface{}{ + "count": 1, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "example", + map[string]string{ + "device": "flappy", + }, + map[string]interface{}{ + "count": 2, + }, + time.Unix(0, 0), + ), + }, + expectedErr: "too many runs for random values", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + + tt.plugin.Log = testutil.Logger{} + require.NoError(t, tt.plugin.Init()) + + // Call gather and check no error occurs. However, we expect an error accumulated by acc.AddError() + require.NoError(t, tt.plugin.Gather(&acc)) + + // Wait for the expected number of metrics to avoid flaky tests due to + // race conditions. + acc.Wait(len(tt.expected)) + + // Check the accumulated errors + require.Len(t, acc.Errors, 1) + require.EqualError(t, acc.Errors[0], tt.expectedErr) + + // Compare the expected partial metrics. + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + }) + } +} diff --git a/plugins/inputs/example/sample.conf b/plugins/inputs/example/sample.conf new file mode 100644 index 0000000000000..4224180f2aad1 --- /dev/null +++ b/plugins/inputs/example/sample.conf @@ -0,0 +1,3 @@ +# This is an example plugin +[[inputs.example]] + example_option = "example_value" diff --git a/plugins/inputs/exec/README.md b/plugins/inputs/exec/README.md index 4e3d7245422d2..b01455b73576f 100644 --- a/plugins/inputs/exec/README.md +++ b/plugins/inputs/exec/README.md @@ -1,13 +1,15 @@ # Exec Input Plugin -The `exec` plugin executes all the `commands` in parallel on every interval and parses metrics from -their output in any one of the accepted [Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). +The `exec` plugin executes all the `commands` in parallel on every interval and +parses metrics from their output in any one of the accepted [Input Data +Formats](../../../docs/DATA_FORMATS_INPUT.md). This plugin can be used to poll for custom metrics from any source. -### Configuration: +## Configuration -```toml +```toml @sample.conf +# Read metrics from one or more commands that can output to stdout [[inputs.exec]] ## Commands array commands = [ @@ -16,6 +18,12 @@ This plugin can be used to poll for custom metrics from any source. "/tmp/collect_*.sh" ] + ## Environment variables + ## Array of "key=value" pairs to pass as environment variables + ## e.g. "KEY=value", "USERNAME=John Doe", + ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" + # environment = [] + ## Timeout for each command to complete. timeout = "5s" @@ -32,15 +40,19 @@ This plugin can be used to poll for custom metrics from any source. Glob patterns in the `command` option are matched on every run, so adding new scripts that match the pattern will cause them to be picked up immediately. -### Example: +## Example + +This script produces static values, since no timestamp is specified the values +are at the current time. -This script produces static values, since no timestamp is specified the values are at the current time. ```sh #!/bin/sh echo 'example,tag1=a,tag2=b i=42i,j=43i,k=44i' ``` -It can be paired with the following configuration and will be run at the `interval` of the agent. +It can be paired with the following configuration and will be run at the +`interval` of the agent. + ```toml [[inputs.exec]] commands = ["sh /tmp/test.sh"] @@ -48,18 +60,19 @@ It can be paired with the following configuration and will be run at the `interv data_format = "influx" ``` -### Common Issues: +## Common Issues -#### My script works when I run it by hand, but not when Telegraf is running as a service. +### My script works when I run it by hand, but not when Telegraf is running as a service -This may be related to the Telegraf service running as a different user. The +This may be related to the Telegraf service running as a different user. The official packages run Telegraf as the `telegraf` user and group on Linux systems. -#### With a PowerShell on Windows, the output of the script appears to be truncated. +### With a PowerShell on Windows, the output of the script appears to be truncated You may need to set a variable in your script to increase the number of columns available for output: -``` + +```shell $host.UI.RawUI.BufferSize = new-object System.Management.Automation.Host.Size(1024,50) ``` diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index cb4420b0f246f..5fbca698cd5cd 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -1,50 +1,40 @@ +//go:generate ../../../tools/readme_config_includer/generator package exec import ( "bytes" + _ "embed" "fmt" - "os/exec" + "io" + "os" + osExec "os/exec" "path/filepath" "runtime" "strings" "sync" "time" + "github.com/kballard/go-shellquote" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers/nagios" - "github.com/kballard/go-shellquote" ) -const sampleConfig = ` - ## Commands array - commands = [ - "/tmp/test.sh", - "/usr/bin/mycollector --foo=bar", - "/tmp/collect_*.sh" - ] - - ## Timeout for each command to complete. - timeout = "5s" - - ## measurement name suffix (for separating different commands) - name_suffix = "_mycollector" +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" -` - -const MaxStderrBytes = 512 +const MaxStderrBytes int = 512 type Exec struct { - Commands []string - Command string - Timeout internal.Duration + Commands []string `toml:"commands"` + Command string `toml:"command"` + Environment []string `toml:"environment"` + Timeout config.Duration `toml:"timeout"` parser parsers.Parser @@ -55,26 +45,31 @@ type Exec struct { func NewExec() *Exec { return &Exec{ runner: CommandRunner{}, - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } } type Runner interface { - Run(string, time.Duration) ([]byte, []byte, error) + Run(string, []string, time.Duration) ([]byte, []byte, error) } type CommandRunner struct{} func (c CommandRunner) Run( command string, + environments []string, timeout time.Duration, ) ([]byte, []byte, error) { - split_cmd, err := shellquote.Split(command) - if err != nil || len(split_cmd) == 0 { + splitCmd, err := shellquote.Split(command) + if err != nil || len(splitCmd) == 0 { return nil, nil, fmt.Errorf("exec: unable to parse command, %s", err) } - cmd := exec.Command(split_cmd[0], split_cmd[1:]...) + cmd := osExec.Command(splitCmd[0], splitCmd[1:]...) + + if len(environments) > 0 { + cmd.Env = append(os.Environ(), environments...) + } var ( out bytes.Buffer @@ -85,16 +80,16 @@ func (c CommandRunner) Run( runErr := internal.RunTimeout(cmd, timeout) - out = removeCarriageReturns(out) - if stderr.Len() > 0 { - stderr = removeCarriageReturns(stderr) - stderr = truncate(stderr) + out = removeWindowsCarriageReturns(out) + if stderr.Len() > 0 && !telegraf.Debug { + stderr = removeWindowsCarriageReturns(stderr) + stderr = c.truncate(stderr) } return out.Bytes(), stderr.Bytes(), runErr } -func truncate(buf bytes.Buffer) bytes.Buffer { +func (c CommandRunner) truncate(buf bytes.Buffer) bytes.Buffer { // Limit the number of bytes. didTruncate := false if buf.Len() > MaxStderrBytes { @@ -109,44 +104,42 @@ func truncate(buf bytes.Buffer) bytes.Buffer { buf.Truncate(i) } if didTruncate { + //nolint:errcheck,revive // Will always return nil or panic buf.WriteString("...") } return buf } -// removeCarriageReturns removes all carriage returns from the input if the +// removeWindowsCarriageReturns removes all carriage returns from the input if the // OS is Windows. It does not return any errors. -func removeCarriageReturns(b bytes.Buffer) bytes.Buffer { +func removeWindowsCarriageReturns(b bytes.Buffer) bytes.Buffer { if runtime.GOOS == "windows" { var buf bytes.Buffer for { - byt, er := b.ReadBytes(0x0D) - end := len(byt) - if nil == er { - end -= 1 - } - if nil != byt { - buf.Write(byt[:end]) - } else { - break + byt, err := b.ReadBytes(0x0D) + byt = bytes.TrimRight(byt, "\x0d") + if len(byt) > 0 { + _, _ = buf.Write(byt) } - if nil != er { - break + if err == io.EOF { + return buf } } - b = buf } return b +} +func (*Exec) SampleConfig() string { + return sampleConfig } func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync.WaitGroup) { defer wg.Done() _, isNagios := e.parser.(*nagios.NagiosParser) - out, errbuf, runErr := e.runner.Run(command, e.Timeout.Duration) + out, errBuf, runErr := e.runner.Run(command, e.Environment, time.Duration(e.Timeout)) if !isNagios && runErr != nil { - err := fmt.Errorf("exec: %s for command '%s': %s", runErr, command, string(errbuf)) + err := fmt.Errorf("exec: %s for command '%s': %s", runErr, command, string(errBuf)) acc.AddError(err) return } @@ -158,10 +151,7 @@ func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync } if isNagios { - metrics, err = nagios.TryAddState(runErr, metrics) - if err != nil { - e.Log.Errorf("Failed to add nagios state: %s", err) - } + metrics = nagios.AddState(runErr, errBuf, metrics) } for _, m := range metrics { @@ -169,14 +159,6 @@ func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync } } -func (e *Exec) SampleConfig() string { - return sampleConfig -} - -func (e *Exec) Description() string { - return "Read metrics from one or more commands that can output to stdout" -} - func (e *Exec) SetParser(parser parsers.Parser) { e.parser = parser } diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index d0fcc71f668e5..2fa77d3927c6e 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -1,3 +1,9 @@ +//go:build !windows +// +build !windows + +// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows +// https://github.com/influxdata/telegraf/issues/6248 + package exec import ( @@ -7,16 +13,14 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/plugins/parsers" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" -) -// Midnight 9/22/2015 -const baseTimeSeconds = 1442905200 + "github.com/influxdata/telegraf/plugins/parsers/json" + "github.com/influxdata/telegraf/plugins/parsers/value" + "github.com/influxdata/telegraf/testutil" +) -const validJson = ` +const validJSON = ` { "status": "green", "num_processes": 82, @@ -30,25 +34,11 @@ const validJson = ` "users": [0, 1, 2, 3] }` -const malformedJson = ` +const malformedJSON = ` { "status": "green", ` -const lineProtocol = "cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1\n" -const lineProtocolEmpty = "" -const lineProtocolShort = "ab" - -const lineProtocolMulti = ` -cpu,cpu=cpu0,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu1,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu2,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu3,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu4,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu5,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu6,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -` - type CarriageReturnTest struct { input []byte output []byte @@ -86,18 +76,16 @@ func newRunnerMock(out []byte, errout []byte, err error) Runner { } } -func (r runnerMock) Run(command string, _ time.Duration) ([]byte, []byte, error) { +func (r runnerMock) Run(_ string, _ []string, _ time.Duration) ([]byte, []byte, error) { return r.out, r.errout, r.err } func TestExec(t *testing.T) { - parser, _ := parsers.NewParser(&parsers.Config{ - DataFormat: "json", - MetricName: "exec", - }) + parser := &json.Parser{MetricName: "exec"} + require.NoError(t, parser.Init()) e := &Exec{ Log: testutil.Logger{}, - runner: newRunnerMock([]byte(validJson), nil, nil), + runner: newRunnerMock([]byte(validJSON), nil, nil), Commands: []string{"testcommand arg1"}, parser: parser, } @@ -105,7 +93,7 @@ func TestExec(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(e.Gather) require.NoError(t, err) - assert.Equal(t, acc.NFields(), 8, "non-numeric measurements should be ignored") + require.Equal(t, acc.NFields(), 8, "non-numeric measurements should be ignored") fields := map[string]interface{}{ "num_processes": float64(82), @@ -121,27 +109,23 @@ func TestExec(t *testing.T) { } func TestExecMalformed(t *testing.T) { - parser, _ := parsers.NewParser(&parsers.Config{ - DataFormat: "json", - MetricName: "exec", - }) + parser := &json.Parser{MetricName: "exec"} + require.NoError(t, parser.Init()) e := &Exec{ Log: testutil.Logger{}, - runner: newRunnerMock([]byte(malformedJson), nil, nil), + runner: newRunnerMock([]byte(malformedJSON), nil, nil), Commands: []string{"badcommand arg1"}, parser: parser, } var acc testutil.Accumulator require.Error(t, acc.GatherError(e.Gather)) - assert.Equal(t, acc.NFields(), 0, "No new points should have been added") + require.Equal(t, acc.NFields(), 0, "No new points should have been added") } func TestCommandError(t *testing.T) { - parser, _ := parsers.NewParser(&parsers.Config{ - DataFormat: "json", - MetricName: "exec", - }) + parser := &json.Parser{MetricName: "exec"} + require.NoError(t, parser.Init()) e := &Exec{ Log: testutil.Logger{}, runner: newRunnerMock(nil, nil, fmt.Errorf("exit status code 1")), @@ -151,18 +135,22 @@ func TestCommandError(t *testing.T) { var acc testutil.Accumulator require.Error(t, acc.GatherError(e.Gather)) - assert.Equal(t, acc.NFields(), 0, "No new points should have been added") + require.Equal(t, acc.NFields(), 0, "No new points should have been added") } func TestExecCommandWithGlob(t *testing.T) { - parser, _ := parsers.NewValueParser("metric", "string", nil) + parser := value.Parser{ + MetricName: "metric", + DataType: "string", + } + require.NoError(t, parser.Init()) + e := NewExec() e.Commands = []string{"/bin/ech* metric_value"} - e.SetParser(parser) + e.SetParser(&parser) var acc testutil.Accumulator - err := acc.GatherError(e.Gather) - require.NoError(t, err) + require.NoError(t, acc.GatherError(e.Gather)) fields := map[string]interface{}{ "value": "metric_value", @@ -171,14 +159,18 @@ func TestExecCommandWithGlob(t *testing.T) { } func TestExecCommandWithoutGlob(t *testing.T) { - parser, _ := parsers.NewValueParser("metric", "string", nil) + parser := value.Parser{ + MetricName: "metric", + DataType: "string", + } + require.NoError(t, parser.Init()) + e := NewExec() e.Commands = []string{"/bin/echo metric_value"} - e.SetParser(parser) + e.SetParser(&parser) var acc testutil.Accumulator - err := acc.GatherError(e.Gather) - require.NoError(t, err) + require.NoError(t, acc.GatherError(e.Gather)) fields := map[string]interface{}{ "value": "metric_value", @@ -187,14 +179,37 @@ func TestExecCommandWithoutGlob(t *testing.T) { } func TestExecCommandWithoutGlobAndPath(t *testing.T) { - parser, _ := parsers.NewValueParser("metric", "string", nil) + parser := value.Parser{ + MetricName: "metric", + DataType: "string", + } + require.NoError(t, parser.Init()) e := NewExec() e.Commands = []string{"echo metric_value"} - e.SetParser(parser) + e.SetParser(&parser) var acc testutil.Accumulator - err := acc.GatherError(e.Gather) - require.NoError(t, err) + require.NoError(t, acc.GatherError(e.Gather)) + + fields := map[string]interface{}{ + "value": "metric_value", + } + acc.AssertContainsFields(t, "metric", fields) +} + +func TestExecCommandWithEnv(t *testing.T) { + parser := value.Parser{ + MetricName: "metric", + DataType: "string", + } + require.NoError(t, parser.Init()) + e := NewExec() + e.Commands = []string{"/bin/sh -c 'echo ${METRIC_NAME}'"} + e.Environment = []string{"METRIC_NAME=metric_value"} + e.SetParser(&parser) + + var acc testutil.Accumulator + require.NoError(t, acc.GatherError(e.Gather)) fields := map[string]interface{}{ "value": "metric_value", @@ -212,12 +227,14 @@ func TestTruncate(t *testing.T) { name: "should not truncate", bufF: func() *bytes.Buffer { var b bytes.Buffer - b.WriteString("hello world") + _, err := b.WriteString("hello world") + require.NoError(t, err) return &b }, expF: func() *bytes.Buffer { var b bytes.Buffer - b.WriteString("hello world") + _, err := b.WriteString("hello world") + require.NoError(t, err) return &b }, }, @@ -225,12 +242,14 @@ func TestTruncate(t *testing.T) { name: "should truncate up to the new line", bufF: func() *bytes.Buffer { var b bytes.Buffer - b.WriteString("hello world\nand all the people") + _, err := b.WriteString("hello world\nand all the people") + require.NoError(t, err) return &b }, expF: func() *bytes.Buffer { var b bytes.Buffer - b.WriteString("hello world...") + _, err := b.WriteString("hello world...") + require.NoError(t, err) return &b }, }, @@ -239,24 +258,26 @@ func TestTruncate(t *testing.T) { bufF: func() *bytes.Buffer { var b bytes.Buffer for i := 0; i < 2*MaxStderrBytes; i++ { - b.WriteByte('b') + require.NoError(t, b.WriteByte('b')) } return &b }, expF: func() *bytes.Buffer { var b bytes.Buffer for i := 0; i < MaxStderrBytes; i++ { - b.WriteByte('b') + require.NoError(t, b.WriteByte('b')) } - b.WriteString("...") + _, err := b.WriteString("...") + require.NoError(t, err) return &b }, }, } + c := CommandRunner{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - res := truncate(*tt.bufF()) + res := c.truncate(*tt.bufF()) require.Equal(t, tt.expF().Bytes(), res.Bytes()) }) } @@ -267,15 +288,15 @@ func TestRemoveCarriageReturns(t *testing.T) { // Test that all carriage returns are removed for _, test := range crTests { b := bytes.NewBuffer(test.input) - out := removeCarriageReturns(*b) - assert.True(t, bytes.Equal(test.output, out.Bytes())) + out := removeWindowsCarriageReturns(*b) + require.True(t, bytes.Equal(test.output, out.Bytes())) } } else { // Test that the buffer is returned unaltered for _, test := range crTests { b := bytes.NewBuffer(test.input) - out := removeCarriageReturns(*b) - assert.True(t, bytes.Equal(test.input, out.Bytes())) + out := removeWindowsCarriageReturns(*b) + require.True(t, bytes.Equal(test.input, out.Bytes())) } } } diff --git a/plugins/inputs/exec/sample.conf b/plugins/inputs/exec/sample.conf new file mode 100644 index 0000000000000..7261d14b887b5 --- /dev/null +++ b/plugins/inputs/exec/sample.conf @@ -0,0 +1,26 @@ +# Read metrics from one or more commands that can output to stdout +[[inputs.exec]] + ## Commands array + commands = [ + "/tmp/test.sh", + "/usr/bin/mycollector --foo=bar", + "/tmp/collect_*.sh" + ] + + ## Environment variables + ## Array of "key=value" pairs to pass as environment variables + ## e.g. "KEY=value", "USERNAME=John Doe", + ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" + # environment = [] + + ## Timeout for each command to complete. + timeout = "5s" + + ## measurement name suffix (for separating different commands) + name_suffix = "_mycollector" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" diff --git a/plugins/inputs/execd/README.md b/plugins/inputs/execd/README.md index aa37e7cd7696a..c0a4c50455012 100644 --- a/plugins/inputs/execd/README.md +++ b/plugins/inputs/execd/README.md @@ -1,10 +1,10 @@ # Execd Input Plugin -The `execd` plugin runs an external program as a long-running daemon. -The programs must output metrics in any one of the accepted -[Input Data Formats][] on the process's STDOUT, and is expected to -stay running. If you'd instead like the process to collect metrics and then exit, -check out the [inputs.exec][] plugin. +The `execd` plugin runs an external program as a long-running daemon. The +programs must output metrics in any one of the accepted [Input Data Formats][] +on the process's STDOUT, and is expected to stay running. If you'd instead like +the process to collect metrics and then exit, check out the [inputs.exec][] +plugin. The `signal` can be configured to send a signal the running daemon on each collection interval. This is used for when you want to have Telegraf notify the @@ -13,14 +13,21 @@ new line to the process's STDIN. STDERR from the process will be relayed to Telegraf as errors in the logs. -### Configuration: +## Configuration -```toml +```toml @sample.conf +# Run executable as long-running input plugin [[inputs.execd]] ## One program to run as daemon. ## NOTE: process and each argument should each be their own string command = ["telegraf-smartctl", "-d", "/dev/sda"] + ## Environment variables + ## Array of "key=value" pairs to pass as environment variables + ## e.g. "KEY=value", "USERNAME=John Doe", + ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" + # environment = [] + ## Define how the process is signaled on each collection interval. ## Valid values are: ## "none" : Do not signal anything. (Recommended for service inputs) @@ -41,9 +48,9 @@ STDERR from the process will be relayed to Telegraf as errors in the logs. data_format = "influx" ``` -### Example +## Example -##### Daemon written in bash using STDIN signaling +### Daemon written in bash using STDIN signaling ```bash #!/bin/bash @@ -62,7 +69,7 @@ done signal = "STDIN" ``` -##### Go daemon using SIGHUP +### Go daemon using SIGHUP ```go package main @@ -96,7 +103,7 @@ func main() { signal = "SIGHUP" ``` -##### Ruby daemon running standalone +### Ruby daemon running standalone ```ruby #!/usr/bin/env ruby @@ -118,5 +125,5 @@ end signal = "none" ``` -[Input Data Formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -[inputs.exec]: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/exec/README.md +[Input Data Formats]: ../../../docs/DATA_FORMATS_INPUT.md +[inputs.exec]: ../exec/README.md diff --git a/plugins/inputs/execd/execd.go b/plugins/inputs/execd/execd.go index 228c38db50f76..582837219d202 100644 --- a/plugins/inputs/execd/execd.go +++ b/plugins/inputs/execd/execd.go @@ -1,7 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package execd import ( "bufio" + _ "embed" "errors" "fmt" "io" @@ -14,34 +16,16 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers/influx" + "github.com/influxdata/telegraf/plugins/parsers/prometheus" ) -const sampleConfig = ` - ## Program to run as daemon - command = ["telegraf-smartctl", "-d", "/dev/sda"] - - ## Define how the process is signaled on each collection interval. - ## Valid values are: - ## "none" : Do not signal anything. - ## The process must output metrics by itself. - ## "STDIN" : Send a newline on STDIN. - ## "SIGHUP" : Send a HUP signal. Not available on Windows. - ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. - ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. - signal = "none" - - ## Delay before the process is restarted after an unexpected termination - restart_delay = "10s" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" -` +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string type Execd struct { Command []string `toml:"command"` + Environment []string `toml:"environment"` Signal string `toml:"signal"` RestartDelay config.Duration `toml:"restart_delay"` Log telegraf.Logger `toml:"-"` @@ -51,14 +35,10 @@ type Execd struct { parser parsers.Parser } -func (e *Execd) SampleConfig() string { +func (*Execd) SampleConfig() string { return sampleConfig } -func (e *Execd) Description() string { - return "Run executable as long-running input plugin" -} - func (e *Execd) SetParser(parser parsers.Parser) { e.parser = parser } @@ -66,7 +46,7 @@ func (e *Execd) SetParser(parser parsers.Parser) { func (e *Execd) Start(acc telegraf.Accumulator) error { e.acc = acc var err error - e.process, err = process.New(e.Command) + e.process, err = process.New(e.Command, e.Environment) if err != nil { return fmt.Errorf("error creating new process: %w", err) } @@ -100,10 +80,17 @@ func (e *Execd) cmdReadOut(out io.Reader) { return } + _, isPrometheus := e.parser.(*prometheus.Parser) + scanner := bufio.NewScanner(out) for scanner.Scan() { - metrics, err := e.parser.Parse(scanner.Bytes()) + data := scanner.Bytes() + if isPrometheus { + data = append(data, []byte("\n")...) + } + + metrics, err := e.parser.Parse(data) if err != nil { e.acc.AddError(fmt.Errorf("parse error: %w", err)) } diff --git a/plugins/inputs/execd/execd_posix.go b/plugins/inputs/execd/execd_posix.go index 4d8789a8d3215..a90b1a92dddf5 100644 --- a/plugins/inputs/execd/execd_posix.go +++ b/plugins/inputs/execd/execd_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package execd @@ -12,7 +13,7 @@ import ( "github.com/influxdata/telegraf" ) -func (e *Execd) Gather(acc telegraf.Accumulator) error { +func (e *Execd) Gather(_ telegraf.Accumulator) error { if e.process == nil || e.process.Cmd == nil { return nil } @@ -23,17 +24,19 @@ func (e *Execd) Gather(acc telegraf.Accumulator) error { } switch e.Signal { case "SIGHUP": - osProcess.Signal(syscall.SIGHUP) + return osProcess.Signal(syscall.SIGHUP) case "SIGUSR1": - osProcess.Signal(syscall.SIGUSR1) + return osProcess.Signal(syscall.SIGUSR1) case "SIGUSR2": - osProcess.Signal(syscall.SIGUSR2) + return osProcess.Signal(syscall.SIGUSR2) case "STDIN": if osStdin, ok := e.process.Stdin.(*os.File); ok { - osStdin.SetWriteDeadline(time.Now().Add(1 * time.Second)) + if err := osStdin.SetWriteDeadline(time.Now().Add(1 * time.Second)); err != nil { + return fmt.Errorf("setting write deadline failed: %s", err) + } } if _, err := io.WriteString(e.process.Stdin, "\n"); err != nil { - return fmt.Errorf("Error writing to stdin: %s", err) + return fmt.Errorf("writing to stdin failed: %s", err) } case "none": default: diff --git a/plugins/inputs/execd/execd_test.go b/plugins/inputs/execd/execd_test.go index a7be617da3a48..729db3785f03e 100644 --- a/plugins/inputs/execd/execd_test.go +++ b/plugins/inputs/execd/execd_test.go @@ -1,5 +1,3 @@ -// +build !windows - package execd import ( @@ -11,23 +9,23 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/agent" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/models" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" - "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/serializers" - - "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" ) func TestSettingConfigWorks(t *testing.T) { cfg := ` [[inputs.execd]] command = ["a", "b", "c"] + environment = ["d=e", "f=1"] restart_delay = "1m" signal = "SIGHUP" ` @@ -38,6 +36,7 @@ func TestSettingConfigWorks(t *testing.T) { inp, ok := conf.Inputs[0].Input.(*Execd) require.True(t, ok) require.EqualValues(t, []string{"a", "b", "c"}, inp.Command) + require.EqualValues(t, []string{"d=e", "f=1"}, inp.Environment) require.EqualValues(t, 1*time.Minute, inp.RestartDelay) require.EqualValues(t, "SIGHUP", inp.Signal) } @@ -51,6 +50,7 @@ func TestExternalInputWorks(t *testing.T) { e := &Execd{ Command: []string{exe, "-counter"}, + Environment: []string{"PLUGINS_INPUTS_EXECD_MODE=application", "METRIC_NAME=counter"}, RestartDelay: config.Duration(5 * time.Second), parser: influxParser, Signal: "STDIN", @@ -142,8 +142,8 @@ func (tm *TestMetricMaker) LogName() string { return tm.Name() } -func (tm *TestMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric { - return metric +func (tm *TestMetricMaker) MakeMetric(aMetric telegraf.Metric) telegraf.Metric { + return aMetric } func (tm *TestMetricMaker) Log() telegraf.Logger { @@ -155,25 +155,30 @@ var counter = flag.Bool("counter", false, func TestMain(m *testing.M) { flag.Parse() - if *counter { - runCounterProgram() + runMode := os.Getenv("PLUGINS_INPUTS_EXECD_MODE") + if *counter && runMode == "application" { + if err := runCounterProgram(); err != nil { + os.Exit(1) + } os.Exit(0) } code := m.Run() os.Exit(code) } -func runCounterProgram() { +func runCounterProgram() error { + envMetricName := os.Getenv("METRIC_NAME") i := 0 serializer, err := serializers.NewInfluxSerializer() if err != nil { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprintln(os.Stderr, "ERR InfluxSerializer failed to load") - os.Exit(1) + return err } scanner := bufio.NewScanner(os.Stdin) for scanner.Scan() { - metric, _ := metric.New("counter", + m := metric.New(envMetricName, map[string]string{}, map[string]interface{}{ "count": i, @@ -182,12 +187,15 @@ func runCounterProgram() { ) i++ - b, err := serializer.Serialize(metric) + b, err := serializer.Serialize(m) if err != nil { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprintf(os.Stderr, "ERR %v\n", err) - os.Exit(1) + return err + } + if _, err := fmt.Fprint(os.Stdout, string(b)); err != nil { + return err } - fmt.Fprint(os.Stdout, string(b)) } - + return nil } diff --git a/plugins/inputs/execd/execd_windows.go b/plugins/inputs/execd/execd_windows.go index 15e6798f2389b..9b1f22204bdc4 100644 --- a/plugins/inputs/execd/execd_windows.go +++ b/plugins/inputs/execd/execd_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package execd diff --git a/plugins/inputs/execd/sample.conf b/plugins/inputs/execd/sample.conf new file mode 100644 index 0000000000000..48c2e3e38a586 --- /dev/null +++ b/plugins/inputs/execd/sample.conf @@ -0,0 +1,30 @@ +# Run executable as long-running input plugin +[[inputs.execd]] + ## One program to run as daemon. + ## NOTE: process and each argument should each be their own string + command = ["telegraf-smartctl", "-d", "/dev/sda"] + + ## Environment variables + ## Array of "key=value" pairs to pass as environment variables + ## e.g. "KEY=value", "USERNAME=John Doe", + ## "LD_LIBRARY_PATH=/opt/custom/lib64:/usr/local/libs" + # environment = [] + + ## Define how the process is signaled on each collection interval. + ## Valid values are: + ## "none" : Do not signal anything. (Recommended for service inputs) + ## The process must output metrics by itself. + ## "STDIN" : Send a newline on STDIN. (Recommended for gather inputs) + ## "SIGHUP" : Send a HUP signal. Not available on Windows. (not recommended) + ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. + ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. + signal = "none" + + ## Delay before the process is restarted after an unexpected termination + restart_delay = "10s" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" diff --git a/plugins/inputs/execd/shim/goshim.go b/plugins/inputs/execd/shim/goshim.go index 2ea0b839b3e2f..cfb54e3ae0708 100644 --- a/plugins/inputs/execd/shim/goshim.go +++ b/plugins/inputs/execd/shim/goshim.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "os/signal" "strings" @@ -26,7 +25,6 @@ import ( type empty struct{} var ( - forever = 100 * 365 * 24 * time.Hour envVarEscaper = strings.NewReplacer( `"`, `\"`, `\`, `\\`, @@ -58,8 +56,7 @@ var ( // New creates a new shim interface func New() *Shim { - fmt.Fprintf(os.Stderr, "%s is deprecated; please change your import to %s\n", - oldpkg, newpkg) + _, _ = fmt.Fprintf(os.Stderr, "%s is deprecated; please change your import to %s\n", oldpkg, newpkg) return &Shim{ stdin: os.Stdin, stdout: os.Stdout, @@ -156,7 +153,9 @@ loop: return fmt.Errorf("failed to serialize metric: %s", err) } // Write this to stdout - fmt.Fprint(s.stdout, string(b)) + if _, err := fmt.Fprint(s.stdout, string(b)); err != nil { + return fmt.Errorf("failed to write %q to stdout: %s", string(b), err) + } } } @@ -233,11 +232,17 @@ func (s *Shim) startGathering(ctx context.Context, input telegraf.Input, acc tel return case <-gatherPromptCh: if err := input.Gather(acc); err != nil { - fmt.Fprintf(s.stderr, "failed to gather metrics: %s", err) + if _, perr := fmt.Fprintf(s.stderr, "failed to gather metrics: %s", err); perr != nil { + acc.AddError(err) + acc.AddError(perr) + } } case <-t.C: if err := input.Gather(acc); err != nil { - fmt.Fprintf(s.stderr, "failed to gather metrics: %s", err) + if _, perr := fmt.Fprintf(s.stderr, "failed to gather metrics: %s", err); perr != nil { + acc.AddError(err) + acc.AddError(perr) + } } } } @@ -268,7 +273,7 @@ func LoadConfig(filePath *string) ([]telegraf.Input, error) { return DefaultImportedPlugins() } - b, err := ioutil.ReadFile(*filePath) + b, err := os.ReadFile(*filePath) if err != nil { return nil, err } diff --git a/plugins/inputs/execd/shim/goshim_posix.go b/plugins/inputs/execd/shim/goshim_posix.go index 4e4a04f141b65..c1a3d0ea24d84 100644 --- a/plugins/inputs/execd/shim/goshim_posix.go +++ b/plugins/inputs/execd/shim/goshim_posix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package shim @@ -14,10 +15,7 @@ func listenForCollectMetricsSignals(ctx context.Context, collectMetricsPrompt ch signal.Notify(collectMetricsPrompt, syscall.SIGHUP, syscall.SIGUSR1, syscall.SIGUSR2) go func() { - select { - case <-ctx.Done(): - // context done. stop to signals to avoid pushing messages to a closed channel - signal.Stop(collectMetricsPrompt) - } + <-ctx.Done() + signal.Stop(collectMetricsPrompt) }() } diff --git a/plugins/inputs/execd/shim/goshim_windows.go b/plugins/inputs/execd/shim/goshim_windows.go index 317f8a2f3d4cb..90adfeff6f6c9 100644 --- a/plugins/inputs/execd/shim/goshim_windows.go +++ b/plugins/inputs/execd/shim/goshim_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package shim diff --git a/plugins/inputs/execd/shim/shim_posix_test.go b/plugins/inputs/execd/shim/shim_posix_test.go index 873ef89bf655f..36e0afcd83167 100644 --- a/plugins/inputs/execd/shim/shim_posix_test.go +++ b/plugins/inputs/execd/shim/shim_posix_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package shim @@ -7,7 +8,6 @@ import ( "context" "io" "os" - "runtime" "syscall" "testing" "time" @@ -16,10 +16,6 @@ import ( ) func TestShimUSR1SignalingWorks(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip() - return - } stdinReader, stdinWriter := io.Pipe() stdoutReader, stdoutWriter := io.Pipe() @@ -42,7 +38,7 @@ func TestShimUSR1SignalingWorks(t *testing.T) { return // test is done default: // test isn't done, keep going. - process.Signal(syscall.SIGUSR1) + require.NoError(t, process.Signal(syscall.SIGUSR1)) time.Sleep(200 * time.Millisecond) } } @@ -56,7 +52,7 @@ func TestShimUSR1SignalingWorks(t *testing.T) { require.NoError(t, err) require.Equal(t, "measurement,tag=tag field=1i 1234000005678\n", out) - stdinWriter.Close() + require.NoError(t, stdinWriter.Close()) readUntilEmpty(r) <-exited diff --git a/plugins/inputs/execd/shim/shim_test.go b/plugins/inputs/execd/shim/shim_test.go index dbc3462211222..1059bc2b7f2db 100644 --- a/plugins/inputs/execd/shim/shim_test.go +++ b/plugins/inputs/execd/shim/shim_test.go @@ -36,7 +36,8 @@ func TestShimStdinSignalingWorks(t *testing.T) { metricProcessed, exited := runInputPlugin(t, 40*time.Second, stdinReader, stdoutWriter, nil) - stdinWriter.Write([]byte("\n")) + _, err := stdinWriter.Write([]byte("\n")) + require.NoError(t, err) <-metricProcessed @@ -45,7 +46,7 @@ func TestShimStdinSignalingWorks(t *testing.T) { require.NoError(t, err) require.Equal(t, "measurement,tag=tag field=1i 1234000005678\n", out) - stdinWriter.Close() + require.NoError(t, stdinWriter.Close()) readUntilEmpty(r) @@ -71,7 +72,7 @@ func runInputPlugin(t *testing.T, interval time.Duration, stdin io.Reader, stdou shim.stderr = stderr } - shim.AddInput(inp) + require.NoError(t, shim.AddInput(inp)) go func() { err := shim.Run(interval) require.NoError(t, err) @@ -104,7 +105,7 @@ func (i *testInput) Gather(acc telegraf.Accumulator) error { return nil } -func (i *testInput) Start(acc telegraf.Accumulator) error { +func (i *testInput) Start(_ telegraf.Accumulator) error { return nil } @@ -112,18 +113,18 @@ func (i *testInput) Stop() { } func TestLoadConfig(t *testing.T) { - os.Setenv("SECRET_TOKEN", "xxxxxxxxxx") - os.Setenv("SECRET_VALUE", `test"\test`) + require.NoError(t, os.Setenv("SECRET_TOKEN", "xxxxxxxxxx")) + require.NoError(t, os.Setenv("SECRET_VALUE", `test"\test`)) inputs.Add("test", func() telegraf.Input { return &serviceInput{} }) c := "./testdata/plugin.conf" - inputs, err := LoadConfig(&c) + loadedInputs, err := LoadConfig(&c) require.NoError(t, err) - inp := inputs[0].(*serviceInput) + inp := loadedInputs[0].(*serviceInput) require.Equal(t, "awesome name", inp.ServiceName) require.Equal(t, "xxxxxxxxxx", inp.SecretToken) @@ -156,7 +157,7 @@ func (i *serviceInput) Gather(acc telegraf.Accumulator) error { return nil } -func (i *serviceInput) Start(acc telegraf.Accumulator) error { +func (i *serviceInput) Start(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/fail2ban/README.md b/plugins/inputs/fail2ban/README.md index 1762bbaf209cb..88f0f56e2b3f2 100644 --- a/plugins/inputs/fail2ban/README.md +++ b/plugins/inputs/fail2ban/README.md @@ -3,22 +3,22 @@ The fail2ban plugin gathers the count of failed and banned ip addresses using [fail2ban](https://www.fail2ban.org). -This plugin runs the `fail2ban-client` command which generally requires root access. -Acquiring the required permissions can be done using several methods: +This plugin runs the `fail2ban-client` command which generally requires root +access. Acquiring the required permissions can be done using several methods: - [Use sudo](#using-sudo) run fail2ban-client. - Run telegraf as root. (not recommended) -### Configuration +## Configuration -```toml +```toml @sample.conf # Read metrics from fail2ban. [[inputs.fail2ban]] ## Use sudo to run fail2ban-client use_sudo = false ``` -### Using sudo +## Using sudo Make sure to set `use_sudo = true` in your configuration file. @@ -26,20 +26,21 @@ You will also need to update your sudoers file. It is recommended to modify a file in the `/etc/sudoers.d` directory using `visudo`: ```bash -$ sudo visudo -f /etc/sudoers.d/telegraf +sudo visudo -f /etc/sudoers.d/telegraf ``` Add the following lines to the file, these commands allow the `telegraf` user to call `fail2ban-client` without needing to provide a password and disables logging of the call in the auth.log. Consult `man 8 visudo` and `man 5 sudoers` for details. -``` + +```text Cmnd_Alias FAIL2BAN = /usr/bin/fail2ban-client status, /usr/bin/fail2ban-client status * telegraf ALL=(root) NOEXEC: NOPASSWD: FAIL2BAN Defaults!FAIL2BAN !logfile, !syslog, !pam_session ``` -### Metrics +## Metrics - fail2ban - tags: @@ -48,9 +49,9 @@ Defaults!FAIL2BAN !logfile, !syslog, !pam_session - failed (integer, count) - banned (integer, count) -### Example Output +## Example Output -``` +```shell # fail2ban-client status sshd Status for the jail: sshd |- Filter @@ -63,6 +64,6 @@ Status for the jail: sshd `- Banned IP list: 192.168.0.1 192.168.0.2 ``` -``` +```shell fail2ban,jail=sshd failed=5i,banned=2i 1495868667000000000 ``` diff --git a/plugins/inputs/fail2ban/fail2ban.go b/plugins/inputs/fail2ban/fail2ban.go index 37afb87b68d80..819c15797ecce 100644 --- a/plugins/inputs/fail2ban/fail2ban.go +++ b/plugins/inputs/fail2ban/fail2ban.go @@ -1,17 +1,22 @@ +//go:generate ../../../tools/readme_config_includer/generator package fail2ban import ( + _ "embed" "errors" "fmt" "os/exec" - "strings" - "strconv" + "strings" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + var ( execCommand = exec.Command // execCommand is used to mock commands in tests. ) @@ -21,11 +26,6 @@ type Fail2ban struct { UseSudo bool } -var sampleConfig = ` - ## Use sudo to run fail2ban-client - use_sudo = false -` - var metricsTargets = []struct { target string field string @@ -40,14 +40,30 @@ var metricsTargets = []struct { }, } -func (f *Fail2ban) Description() string { - return "Read metrics from fail2ban." -} +const cmd = "fail2ban-client" -func (f *Fail2ban) SampleConfig() string { +func (*Fail2ban) SampleConfig() string { return sampleConfig } +func (f *Fail2ban) Init() error { + // Set defaults + if f.path == "" { + path, err := exec.LookPath(cmd) + if err != nil { + return fmt.Errorf("looking up %q failed: %v", cmd, err) + } + f.path = path + } + + // Check parameters + if f.path == "" { + return fmt.Errorf("%q not found", cmd) + } + + return nil +} + func (f *Fail2ban) Gather(acc telegraf.Accumulator) error { if len(f.path) == 0 { return errors.New("fail2ban-client not found: verify that fail2ban is installed and that fail2ban-client is in your PATH") @@ -119,13 +135,7 @@ func extractCount(line string) (string, int) { } func init() { - f := Fail2ban{} - path, _ := exec.LookPath("fail2ban-client") - if len(path) > 0 { - f.path = path - } inputs.Add("fail2ban", func() telegraf.Input { - f := f - return &f + return &Fail2ban{} }) } diff --git a/plugins/inputs/fail2ban/fail2ban_test.go b/plugins/inputs/fail2ban/fail2ban_test.go index b28d824ee3aed..0bf17f574ee02 100644 --- a/plugins/inputs/fail2ban/fail2ban_test.go +++ b/plugins/inputs/fail2ban/fail2ban_test.go @@ -7,6 +7,8 @@ import ( "strings" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" ) @@ -50,10 +52,9 @@ func TestGather(t *testing.T) { execCommand = fakeExecCommand defer func() { execCommand = exec.Command }() var acc testutil.Accumulator - err := f.Gather(&acc) - if err != nil { - t.Fatal(err) - } + + require.NoError(t, f.Init()) + require.NoError(t, f.Gather(&acc)) fields1 := map[string]interface{}{ "banned": 2, @@ -92,7 +93,7 @@ func fakeExecCommand(command string, args ...string) *exec.Cmd { return cmd } -func TestHelperProcess(t *testing.T) { +func TestHelperProcess(_ *testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } @@ -101,25 +102,37 @@ func TestHelperProcess(t *testing.T) { cmd, args := args[3], args[4:] if !strings.HasSuffix(cmd, "fail2ban-client") { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // os.Exit called intentionally os.Exit(1) } if len(args) == 1 && args[0] == "status" { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, execStatusOutput) + //nolint:revive // os.Exit called intentionally os.Exit(0) } else if len(args) == 2 && args[0] == "status" { if args[1] == "sshd" { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, execStatusSshdOutput) + //nolint:revive // os.Exit called intentionally os.Exit(0) } else if args[1] == "postfix" { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, execStatusPostfixOutput) + //nolint:revive // os.Exit called intentionally os.Exit(0) } else if args[1] == "dovecot" { + //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, execStatusDovecotOutput) + //nolint:revive // os.Exit called intentionally os.Exit(0) } } + //nolint:errcheck,revive // Test will fail anyway fmt.Fprint(os.Stdout, "invalid argument") + //nolint:revive // os.Exit called intentionally os.Exit(1) } diff --git a/plugins/inputs/fail2ban/sample.conf b/plugins/inputs/fail2ban/sample.conf new file mode 100644 index 0000000000000..eaf845a196897 --- /dev/null +++ b/plugins/inputs/fail2ban/sample.conf @@ -0,0 +1,4 @@ +# Read metrics from fail2ban. +[[inputs.fail2ban]] + ## Use sudo to run fail2ban-client + use_sudo = false diff --git a/plugins/inputs/fibaro/README.md b/plugins/inputs/fibaro/README.md index 54c20310224b3..f1831ba690f99 100644 --- a/plugins/inputs/fibaro/README.md +++ b/plugins/inputs/fibaro/README.md @@ -1,11 +1,12 @@ # Fibaro Input Plugin -The Fibaro plugin makes HTTP calls to the Fibaro controller API to gather values of hooked devices. -Those values could be true (1) or false (0) for switches, percentage for dimmers, temperature, etc. +The Fibaro plugin makes HTTP calls to the Fibaro controller API to gather values +of hooked devices. Those values could be true (1) or false (0) for switches, +percentage for dimmers, temperature, etc. -### Configuration: +## Configuration -```toml +```toml @sample.conf # Read devices value(s) from a Fibaro controller [[inputs.fibaro]] ## Required Fibaro controller address/hostname. @@ -20,7 +21,7 @@ Those values could be true (1) or false (0) for switches, percentage for dimmers # timeout = "5s" ``` -### Metrics: +## Metrics - fibaro - tags: @@ -36,10 +37,9 @@ Those values could be true (1) or false (0) for switches, percentage for dimmers - value (float) - value2 (float, when available from device) +## Example Output -### Example Output: - -``` +```shell fibaro,deviceId=9,host=vm1,name=Fenêtre\ haute,room=Cuisine,section=Cuisine,type=com.fibaro.FGRM222 energy=2.04,power=0.7,value=99,value2=99 1529996807000000000 fibaro,deviceId=10,host=vm1,name=Escaliers,room=Dégagement,section=Pièces\ communes,type=com.fibaro.binarySwitch value=0 1529996807000000000 fibaro,deviceId=13,host=vm1,name=Porte\ fenêtre,room=Salon,section=Pièces\ communes,type=com.fibaro.FGRM222 energy=4.33,power=0.7,value=99,value2=99 1529996807000000000 diff --git a/plugins/inputs/fibaro/fibaro.go b/plugins/inputs/fibaro/fibaro.go index 62889cc8dd6f7..a5b523f52e55d 100644 --- a/plugins/inputs/fibaro/fibaro.go +++ b/plugins/inputs/fibaro/fibaro.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package fibaro import ( + _ "embed" "encoding/json" "fmt" "net/http" @@ -8,26 +10,15 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) -const defaultTimeout = 5 * time.Second - -const sampleConfig = ` - ## Required Fibaro controller address/hostname. - ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available - url = "http://:80" - - ## Required credentials to access the API (http://) - username = "" - password = "" +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string - ## Amount of time allowed to complete the HTTP request - # timeout = "5s" -` - -const description = "Read devices value(s) from a Fibaro controller" +const defaultTimeout = 5 * time.Second // Fibaro contains connection information type Fibaro struct { @@ -37,7 +28,7 @@ type Fibaro struct { Username string `toml:"username"` Password string `toml:"password"` - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` client *http.Client } @@ -78,12 +69,6 @@ type Devices struct { } `json:"properties"` } -// Description returns a string explaining the purpose of this plugin -func (f *Fibaro) Description() string { return description } - -// SampleConfig returns text explaining how plugin should be configured -func (f *Fibaro) SampleConfig() string { return sampleConfig } - // getJSON connects, authenticates and reads JSON payload returned by Fibaro box func (f *Fibaro) getJSON(path string, dataStruct interface{}) error { var requestURL = f.URL + path @@ -101,7 +86,7 @@ func (f *Fibaro) getJSON(path string, dataStruct interface{}) error { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)", + err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", requestURL, resp.StatusCode, http.StatusText(resp.StatusCode), @@ -119,15 +104,18 @@ func (f *Fibaro) getJSON(path string, dataStruct interface{}) error { return nil } +func (*Fibaro) SampleConfig() string { + return sampleConfig +} + // Gather fetches all required information to output metrics func (f *Fibaro) Gather(acc telegraf.Accumulator) error { - if f.client == nil { f.client = &http.Client{ Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, }, - Timeout: f.Timeout.Duration, + Timeout: time.Duration(f.Timeout), } } @@ -160,7 +148,7 @@ func (f *Fibaro) Gather(acc telegraf.Accumulator) error { for _, device := range devices { // skip device in some cases if device.RoomID == 0 || - device.Enabled == false || + !device.Enabled || device.Properties.Dead == "true" || device.Type == "com.fibaro.zwaveDevice" { continue @@ -222,7 +210,7 @@ func (f *Fibaro) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("fibaro", func() telegraf.Input { return &Fibaro{ - Timeout: internal.Duration{Duration: defaultTimeout}, + Timeout: config.Duration(defaultTimeout), } }) } diff --git a/plugins/inputs/fibaro/fibaro_test.go b/plugins/inputs/fibaro/fibaro_test.go index 32a1447e3ef4d..dac8bc6fdf47a 100644 --- a/plugins/inputs/fibaro/fibaro_test.go +++ b/plugins/inputs/fibaro/fibaro_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -162,7 +161,8 @@ func TestJSONSuccess(t *testing.T) { payload = devicesJSON } w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, payload) + _, err := fmt.Fprintln(w, payload) + require.NoError(t, err) })) defer ts.Close() @@ -178,7 +178,7 @@ func TestJSONSuccess(t *testing.T) { require.NoError(t, err) // Gather should add 5 metrics - assert.Equal(t, uint64(5), acc.NMetrics()) + require.Equal(t, uint64(5), acc.NMetrics()) // Ensure fields / values are correct - Device 1 tags := map[string]string{"deviceId": "1", "section": "Section 1", "room": "Room 1", "name": "Device 1", "type": "com.fibaro.binarySwitch"} diff --git a/plugins/inputs/fibaro/sample.conf b/plugins/inputs/fibaro/sample.conf new file mode 100644 index 0000000000000..cc38d783f468f --- /dev/null +++ b/plugins/inputs/fibaro/sample.conf @@ -0,0 +1,12 @@ +# Read devices value(s) from a Fibaro controller +[[inputs.fibaro]] + ## Required Fibaro controller address/hostname. + ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available + url = "http://:80" + + ## Required credentials to access the API (http://) + username = "" + password = "" + + ## Amount of time allowed to complete the HTTP request + # timeout = "5s" diff --git a/plugins/inputs/file/README.md b/plugins/inputs/file/README.md index ef0fb90b0796c..bcae1029d1000 100644 --- a/plugins/inputs/file/README.md +++ b/plugins/inputs/file/README.md @@ -1,29 +1,47 @@ # File Input Plugin -The file plugin parses the **complete** contents of a file **every interval** using -the selected [input data format][]. +The file plugin parses the **complete** contents of a file **every interval** +using the selected [input data format][]. **Note:** If you wish to parse only newly appended lines use the [tail][] input plugin instead. -### Configuration: +## Configuration -```toml +```toml @sample.conf +# Parse a complete file each interval [[inputs.file]] ## Files to parse each interval. Accept standard unix glob matching rules, ## as well as ** to match recursive files and directories. files = ["/tmp/metrics.out"] + ## Character encoding to use when interpreting the file contents. Invalid + ## characters are replaced using the unicode replacement character. When set + ## to the empty string the data is not decoded to text. + ## ex: character_encoding = "utf-8" + ## character_encoding = "utf-16le" + ## character_encoding = "utf-16be" + ## character_encoding = "" + # character_encoding = "" + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + ## Name a tag containing the name of the file the data was parsed from. Leave empty - ## to disable. + ## to disable. Cautious when file name variation is high, this can increase the cardinality + ## significantly. Read more about cardinality here: + ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality # file_tag = "" ``` +## Metrics + +The format of metrics produced by this plugin depends on the content and data +format of the file. + [input data format]: /docs/DATA_FORMATS_INPUT.md [tail]: /plugins/inputs/tail diff --git a/plugins/inputs/file/file.go b/plugins/inputs/file/file.go index e431bc6df9f15..c75fc94723cd3 100644 --- a/plugins/inputs/file/file.go +++ b/plugins/inputs/file/file.go @@ -1,63 +1,39 @@ +//go:generate ../../../tools/readme_config_includer/generator package file import ( + _ "embed" "fmt" - "io/ioutil" + "io" "os" "path/filepath" "github.com/dimchansky/utfbom" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/common/encoding" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/plugins/parsers" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type File struct { Files []string `toml:"files"` FileTag string `toml:"file_tag"` CharacterEncoding string `toml:"character_encoding"` - parser parsers.Parser - filenames []string - decoder *encoding.Decoder + parserFunc telegraf.ParserFunc + filenames []string + decoder *encoding.Decoder } -const sampleConfig = ` - ## Files to parse each interval. Accept standard unix glob matching rules, - ## as well as ** to match recursive files and directories. - files = ["/tmp/metrics.out"] - - ## Name a tag containing the name of the file the data was parsed from. Leave empty - ## to disable. - # file_tag = "" - - ## Character encoding to use when interpreting the file contents. Invalid - ## characters are replaced using the unicode replacement character. When set - ## to the empty string the data is not decoded to text. - ## ex: character_encoding = "utf-8" - ## character_encoding = "utf-16le" - ## character_encoding = "utf-16be" - ## character_encoding = "" - # character_encoding = "" - - ## The dataformat to be read from files - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" -` - -// SampleConfig returns the default configuration of the Input -func (f *File) SampleConfig() string { +func (*File) SampleConfig() string { return sampleConfig } -func (f *File) Description() string { - return "Parse a complete file each interval" -} - func (f *File) Init() error { var err error f.decoder, err = encoding.NewDecoder(f.CharacterEncoding) @@ -85,8 +61,8 @@ func (f *File) Gather(acc telegraf.Accumulator) error { return nil } -func (f *File) SetParser(p parsers.Parser) { - f.parser = p +func (f *File) SetParserFunc(fn telegraf.ParserFunc) { + f.parserFunc = fn } func (f *File) refreshFilePaths() error { @@ -94,11 +70,11 @@ func (f *File) refreshFilePaths() error { for _, file := range f.Files { g, err := globpath.Compile(file) if err != nil { - return fmt.Errorf("could not compile glob %v: %v", file, err) + return fmt.Errorf("could not compile glob %q: %w", file, err) } files := g.Match() if len(files) <= 0 { - return fmt.Errorf("could not find file: %v", file) + return fmt.Errorf("could not find file(s): %v", file) } allFiles = append(allFiles, files...) } @@ -115,11 +91,19 @@ func (f *File) readMetric(filename string) ([]telegraf.Metric, error) { defer file.Close() r, _ := utfbom.Skip(f.decoder.Reader(file)) - fileContents, err := ioutil.ReadAll(r) + fileContents, err := io.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("could not read %q: %w", filename, err) + } + parser, err := f.parserFunc() + if err != nil { + return nil, fmt.Errorf("could not instantiate parser: %w", err) + } + metrics, err := parser.Parse(fileContents) if err != nil { - return nil, fmt.Errorf("E! Error file: %v could not be read, %s", filename, err) + return metrics, fmt.Errorf("could not parse %q: %w", filename, err) } - return f.parser.Parse(fileContents) + return metrics, err } func init() { diff --git a/plugins/inputs/file/file_test.go b/plugins/inputs/file/file_test.go index 427ff25d8c789..8eb0c7cb012e2 100644 --- a/plugins/inputs/file/file_test.go +++ b/plugins/inputs/file/file_test.go @@ -1,3 +1,9 @@ +//go:build !windows +// +build !windows + +// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows +// https://github.com/influxdata/telegraf/issues/6248 + package file import ( @@ -6,16 +12,19 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers/csv" + "github.com/influxdata/telegraf/plugins/parsers/grok" + "github.com/influxdata/telegraf/plugins/parsers/json" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestRefreshFilePaths(t *testing.T) { wd, err := os.Getwd() + require.NoError(t, err) + r := File{ Files: []string{filepath.Join(wd, "dev/testfiles/**.log")}, } @@ -24,7 +33,7 @@ func TestRefreshFilePaths(t *testing.T) { err = r.refreshFilePaths() require.NoError(t, err) - assert.Equal(t, 2, len(r.filenames)) + require.Equal(t, 2, len(r.filenames)) } func TestFileTag(t *testing.T) { @@ -35,23 +44,20 @@ func TestFileTag(t *testing.T) { Files: []string{filepath.Join(wd, "dev/testfiles/json_a.log")}, FileTag: "filename", } - err = r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) - parserConfig := parsers.Config{ - DataFormat: "json", - } - nParser, err := parsers.NewParser(&parserConfig) - assert.NoError(t, err) - r.parser = nParser + r.SetParserFunc(func() (telegraf.Parser, error) { + p := &json.Parser{} + err := p.Init() + return p, err + }) - err = r.Gather(&acc) - require.NoError(t, err) + require.NoError(t, r.Gather(&acc)) for _, m := range acc.Metrics { for key, value := range m.Tags { - assert.Equal(t, r.FileTag, key) - assert.Equal(t, filepath.Base(r.Files[0]), value) + require.Equal(t, r.FileTag, key) + require.Equal(t, filepath.Base(r.Files[0]), value) } } } @@ -62,19 +68,17 @@ func TestJSONParserCompile(t *testing.T) { r := File{ Files: []string{filepath.Join(wd, "dev/testfiles/json_a.log")}, } - err := r.Init() - require.NoError(t, err) - parserConfig := parsers.Config{ - DataFormat: "json", - TagKeys: []string{"parent_ignored_child"}, - } - nParser, err := parsers.NewParser(&parserConfig) - assert.NoError(t, err) - r.parser = nParser + require.NoError(t, r.Init()) + + r.SetParserFunc(func() (telegraf.Parser, error) { + p := &json.Parser{TagKeys: []string{"parent_ignored_child"}} + err := p.Init() + return p, err + }) - r.Gather(&acc) - assert.Equal(t, map[string]string{"parent_ignored_child": "hi"}, acc.Metrics[0].Tags) - assert.Equal(t, 5, len(acc.Metrics[0].Fields)) + require.NoError(t, r.Gather(&acc)) + require.Equal(t, map[string]string{"parent_ignored_child": "hi"}, acc.Metrics[0].Tags) + require.Equal(t, 5, len(acc.Metrics[0].Fields)) } func TestGrokParser(t *testing.T) { @@ -86,17 +90,19 @@ func TestGrokParser(t *testing.T) { err := r.Init() require.NoError(t, err) - parserConfig := parsers.Config{ - DataFormat: "grok", - GrokPatterns: []string{"%{COMMON_LOG_FORMAT}"}, - } + r.SetParserFunc(func() (telegraf.Parser, error) { + parser := &grok.Parser{ + Patterns: []string{"%{COMMON_LOG_FORMAT}"}, + Log: testutil.Logger{}, + } + err := parser.Init() - nParser, err := parsers.NewParser(&parserConfig) - r.parser = nParser - assert.NoError(t, err) + return parser, err + }) err = r.Gather(&acc) - assert.Equal(t, len(acc.Metrics), 2) + require.NoError(t, err) + require.Len(t, acc.Metrics, 2) } func TestCharacterEncoding(t *testing.T) { @@ -174,7 +180,7 @@ func TestCharacterEncoding(t *testing.T) { tests := []struct { name string plugin *File - csv *csv.Config + csv csv.Parser file string }{ { @@ -183,7 +189,7 @@ func TestCharacterEncoding(t *testing.T) { Files: []string{"testdata/mtr-utf-8.csv"}, CharacterEncoding: "", }, - csv: &csv.Config{ + csv: csv.Parser{ MetricName: "file", SkipRows: 1, ColumnNames: []string{"", "", "status", "dest", "hop", "ip", "loss", "snt", "", "", "avg", "best", "worst", "stdev"}, @@ -196,7 +202,7 @@ func TestCharacterEncoding(t *testing.T) { Files: []string{"testdata/mtr-utf-8.csv"}, CharacterEncoding: "utf-8", }, - csv: &csv.Config{ + csv: csv.Parser{ MetricName: "file", SkipRows: 1, ColumnNames: []string{"", "", "status", "dest", "hop", "ip", "loss", "snt", "", "", "avg", "best", "worst", "stdev"}, @@ -209,7 +215,7 @@ func TestCharacterEncoding(t *testing.T) { Files: []string{"testdata/mtr-utf-16le.csv"}, CharacterEncoding: "utf-16le", }, - csv: &csv.Config{ + csv: csv.Parser{ MetricName: "file", SkipRows: 1, ColumnNames: []string{"", "", "status", "dest", "hop", "ip", "loss", "snt", "", "", "avg", "best", "worst", "stdev"}, @@ -222,7 +228,7 @@ func TestCharacterEncoding(t *testing.T) { Files: []string{"testdata/mtr-utf-16be.csv"}, CharacterEncoding: "utf-16be", }, - csv: &csv.Config{ + csv: csv.Parser{ MetricName: "file", SkipRows: 1, ColumnNames: []string{"", "", "status", "dest", "hop", "ip", "loss", "snt", "", "", "avg", "best", "worst", "stdev"}, @@ -235,9 +241,11 @@ func TestCharacterEncoding(t *testing.T) { err := tt.plugin.Init() require.NoError(t, err) - parser, err := csv.NewParser(tt.csv) - require.NoError(t, err) - tt.plugin.SetParser(parser) + tt.plugin.SetParserFunc(func() (telegraf.Parser, error) { + parser := tt.csv + err := parser.Init() + return &parser, err + }) var acc testutil.Accumulator err = tt.plugin.Gather(&acc) @@ -247,3 +255,120 @@ func TestCharacterEncoding(t *testing.T) { }) } } + +func TestStatefulParsers(t *testing.T) { + expected := []telegraf.Metric{ + testutil.MustMetric("file", + map[string]string{ + "dest": "example.org", + "hop": "1", + "ip": "12.122.114.5", + }, + map[string]interface{}{ + "avg": 21.55, + "best": 19.34, + "loss": 0.0, + "snt": 10, + "status": "OK", + "stdev": 2.05, + "worst": 26.83, + }, + time.Unix(0, 0), + ), + testutil.MustMetric("file", + map[string]string{ + "dest": "example.org", + "hop": "2", + "ip": "192.205.32.238", + }, + map[string]interface{}{ + "avg": 25.11, + "best": 20.8, + "loss": 0.0, + "snt": 10, + "status": "OK", + "stdev": 6.03, + "worst": 38.85, + }, + time.Unix(0, 0), + ), + testutil.MustMetric("file", + map[string]string{ + "dest": "example.org", + "hop": "3", + "ip": "152.195.85.133", + }, + map[string]interface{}{ + "avg": 20.18, + "best": 19.75, + "loss": 0.0, + "snt": 10, + "status": "OK", + "stdev": 0.0, + "worst": 20.78, + }, + time.Unix(0, 0), + ), + testutil.MustMetric("file", + map[string]string{ + "dest": "example.org", + "hop": "4", + "ip": "93.184.216.34", + }, + map[string]interface{}{ + "avg": 24.02, + "best": 19.75, + "loss": 0.0, + "snt": 10, + "status": "OK", + "stdev": 4.67, + "worst": 32.41, + }, + time.Unix(0, 0), + ), + } + + tests := []struct { + name string + plugin *File + csv csv.Parser + file string + count int + }{ + { + name: "read file twice", + plugin: &File{ + Files: []string{"testdata/mtr-utf-8.csv"}, + CharacterEncoding: "", + }, + csv: csv.Parser{ + MetricName: "file", + SkipRows: 1, + ColumnNames: []string{"", "", "status", "dest", "hop", "ip", "loss", "snt", "", "", "avg", "best", "worst", "stdev"}, + TagColumns: []string{"dest", "hop", "ip"}, + }, + count: 2, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.plugin.Init() + require.NoError(t, err) + + tt.plugin.SetParserFunc(func() (telegraf.Parser, error) { + parser := tt.csv + err := parser.Init() + return &parser, err + }) + + var acc testutil.Accumulator + for i := 0; i < tt.count; i++ { + require.NoError(t, tt.plugin.Gather(&acc)) + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + acc.ClearMetrics() + } + }) + } +} diff --git a/plugins/inputs/file/sample.conf b/plugins/inputs/file/sample.conf new file mode 100644 index 0000000000000..706d1bfff6481 --- /dev/null +++ b/plugins/inputs/file/sample.conf @@ -0,0 +1,27 @@ +# Parse a complete file each interval +[[inputs.file]] + ## Files to parse each interval. Accept standard unix glob matching rules, + ## as well as ** to match recursive files and directories. + files = ["/tmp/metrics.out"] + + ## Character encoding to use when interpreting the file contents. Invalid + ## characters are replaced using the unicode replacement character. When set + ## to the empty string the data is not decoded to text. + ## ex: character_encoding = "utf-8" + ## character_encoding = "utf-16le" + ## character_encoding = "utf-16be" + ## character_encoding = "" + # character_encoding = "" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + + + ## Name a tag containing the name of the file the data was parsed from. Leave empty + ## to disable. Cautious when file name variation is high, this can increase the cardinality + ## significantly. Read more about cardinality here: + ## https://docs.influxdata.com/influxdb/cloud/reference/glossary/#series-cardinality + # file_tag = "" diff --git a/plugins/inputs/filecount/README.md b/plugins/inputs/filecount/README.md index 81fc75908e798..0842a4722c355 100644 --- a/plugins/inputs/filecount/README.md +++ b/plugins/inputs/filecount/README.md @@ -2,14 +2,11 @@ Reports the number and total size of files in specified directories. -### Configuration: +## Configuration -```toml +```toml @sample.conf +# Count files in a directory [[inputs.filecount]] - ## Directory to gather stats about. - ## deprecated in 1.9; use the directories option - # directory = "/var/cache/apt/archives" - ## Directories to gather stats about. ## This accept standard unit glob matching rules, but with the addition of ## ** as a "super asterisk". ie: @@ -42,7 +39,7 @@ Reports the number and total size of files in specified directories. mtime = "0s" ``` -### Metrics +## Metrics - filecount - tags: @@ -51,9 +48,9 @@ Reports the number and total size of files in specified directories. - count (integer) - size_bytes (integer) -### Example Output: +## Example Output -``` +```shell filecount,directory=/var/cache/apt count=7i,size_bytes=7438336i 1530034445000000000 filecount,directory=/tmp count=17i,size_bytes=28934786i 1530034445000000000 ``` diff --git a/plugins/inputs/filecount/filecount.go b/plugins/inputs/filecount/filecount.go index 30815541c8448..8e317198867c0 100644 --- a/plugins/inputs/filecount/filecount.go +++ b/plugins/inputs/filecount/filecount.go @@ -1,76 +1,40 @@ +//go:generate ../../../tools/readme_config_includer/generator package filecount import ( + _ "embed" "os" "path/filepath" "time" + "github.com/karrick/godirwalk" + "github.com/pkg/errors" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/karrick/godirwalk" - "github.com/pkg/errors" ) -const sampleConfig = ` - ## Directory to gather stats about. - ## deprecated in 1.9; use the directories option - # directory = "/var/cache/apt/archives" - - ## Directories to gather stats about. - ## This accept standard unit glob matching rules, but with the addition of - ## ** as a "super asterisk". ie: - ## /var/log/** -> recursively find all directories in /var/log and count files in each directories - ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories - ## /var/log -> count all files in /var/log and all of its subdirectories - directories = ["/var/cache/apt/archives"] - - ## Only count files that match the name pattern. Defaults to "*". - name = "*.deb" - - ## Count files in subdirectories. Defaults to true. - recursive = false - - ## Only count regular files. Defaults to true. - regular_only = true - - ## Follow all symlinks while walking the directory tree. Defaults to false. - follow_symlinks = false - - ## Only count files that are at least this size. If size is - ## a negative number, only count files that are smaller than the - ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... - ## Without quotes and units, interpreted as size in bytes. - size = "0B" - - ## Only count files that have not been touched for at least this - ## duration. If mtime is negative, only count files that have been - ## touched in this duration. Defaults to "0s". - mtime = "0s" -` +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string type FileCount struct { - Directory string // deprecated in 1.9 + Directory string `toml:"directory" deprecated:"1.9.0;use 'directories' instead"` Directories []string Name string Recursive bool RegularOnly bool FollowSymlinks bool - Size internal.Size - MTime internal.Duration `toml:"mtime"` + Size config.Size + MTime config.Duration `toml:"mtime"` fileFilters []fileFilterFunc globPaths []globpath.GlobPath Fs fileSystem Log telegraf.Logger } -func (_ *FileCount) Description() string { - return "Count files in a directory" -} - -func (_ *FileCount) SampleConfig() string { return sampleConfig } - type fileFilterFunc func(os.FileInfo) (bool, error) func rejectNilFilters(filters []fileFilterFunc) []fileFilterFunc { @@ -108,7 +72,7 @@ func (fc *FileCount) regularOnlyFilter() fileFilterFunc { } func (fc *FileCount) sizeFilter() fileFilterFunc { - if fc.Size.Size == 0 { + if fc.Size == 0 { return nil } @@ -116,22 +80,22 @@ func (fc *FileCount) sizeFilter() fileFilterFunc { if !f.Mode().IsRegular() { return false, nil } - if fc.Size.Size < 0 { - return f.Size() < -fc.Size.Size, nil + if fc.Size < 0 { + return f.Size() < -int64(fc.Size), nil } - return f.Size() >= fc.Size.Size, nil + return f.Size() >= int64(fc.Size), nil } } func (fc *FileCount) mtimeFilter() fileFilterFunc { - if fc.MTime.Duration == 0 { + if time.Duration(fc.MTime) == 0 { return nil } return func(f os.FileInfo) (bool, error) { - age := absDuration(fc.MTime.Duration) + age := absDuration(time.Duration(fc.MTime)) mtime := time.Now().Add(-age) - if fc.MTime.Duration < 0 { + if time.Duration(fc.MTime) < 0 { return f.ModTime().After(mtime), nil } return f.ModTime().Before(mtime), nil @@ -244,6 +208,10 @@ func (fc *FileCount) filter(file os.FileInfo) (bool, error) { return true, nil } +func (*FileCount) SampleConfig() string { + return sampleConfig +} + func (fc *FileCount) Gather(acc telegraf.Accumulator) error { if fc.globPaths == nil { fc.initGlobPaths(acc) @@ -292,7 +260,6 @@ func (fc *FileCount) initGlobPaths(acc telegraf.Accumulator) { fc.globPaths = append(fc.globPaths, *glob) } } - } func NewFileCount() *FileCount { @@ -303,8 +270,8 @@ func NewFileCount() *FileCount { Recursive: true, RegularOnly: true, FollowSymlinks: false, - Size: internal.Size{Size: 0}, - MTime: internal.Duration{Duration: 0}, + Size: config.Size(0), + MTime: config.Duration(0), fileFilters: nil, Fs: osFS{}, } diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go index 568ee07b5d458..d02c28fb6f170 100644 --- a/plugins/inputs/filecount/filecount_test.go +++ b/plugins/inputs/filecount/filecount_test.go @@ -1,3 +1,9 @@ +//go:build !windows +// +build !windows + +// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows +// https://github.com/influxdata/telegraf/issues/6248 + package filecount import ( @@ -9,7 +15,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -30,7 +36,7 @@ func TestNoFiltersOnChildDir(t *testing.T) { tags := map[string]string{"directory": getTestdataDir() + "/subdir"} acc := testutil.Accumulator{} - acc.GatherError(fc.Gather) + require.NoError(t, acc.GatherError(fc.Gather)) require.True(t, acc.HasPoint("filecount", tags, "count", int64(len(matches)))) require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(600))) } @@ -43,7 +49,7 @@ func TestNoRecursiveButSuperMeta(t *testing.T) { tags := map[string]string{"directory": getTestdataDir() + "/subdir"} acc := testutil.Accumulator{} - acc.GatherError(fc.Gather) + require.NoError(t, acc.GatherError(fc.Gather)) require.True(t, acc.HasPoint("filecount", tags, "count", int64(len(matches)))) require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(200))) @@ -72,7 +78,7 @@ func TestDoubleAndSimpleStar(t *testing.T) { tags := map[string]string{"directory": getTestdataDir() + "/subdir/nested2"} acc := testutil.Accumulator{} - acc.GatherError(fc.Gather) + require.NoError(t, acc.GatherError(fc.Gather)) require.True(t, acc.HasPoint("filecount", tags, "count", int64(len(matches)))) require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(400))) @@ -90,12 +96,12 @@ func TestRegularOnlyFilter(t *testing.T) { func TestSizeFilter(t *testing.T) { fc := getNoFilterFileCount() - fc.Size = internal.Size{Size: -100} + fc.Size = config.Size(-100) matches := []string{"foo", "bar", "baz", "subdir/quux", "subdir/quuz"} fileCountEquals(t, fc, len(matches), 0) - fc.Size = internal.Size{Size: 100} + fc.Size = config.Size(100) matches = []string{"qux", "subdir/nested2//qux"} fileCountEquals(t, fc, len(matches), 800) @@ -106,14 +112,14 @@ func TestMTimeFilter(t *testing.T) { fileAge := time.Since(mtime) - (60 * time.Second) fc := getNoFilterFileCount() - fc.MTime = internal.Duration{Duration: -fileAge} + fc.MTime = config.Duration(-fileAge) matches := []string{"foo", "bar", "qux", "subdir/", "subdir/quux", "subdir/quuz", "subdir/nested2", "subdir/nested2/qux"} fileCountEquals(t, fc, len(matches), 5096) - fc.MTime = internal.Duration{Duration: fileAge} + fc.MTime = config.Duration(fileAge) matches = []string{"baz"} fileCountEquals(t, fc, len(matches), 0) } @@ -170,8 +176,8 @@ func getNoFilterFileCount() FileCount { Name: "*", Recursive: true, RegularOnly: false, - Size: internal.Size{Size: 0}, - MTime: internal.Duration{Duration: 0}, + Size: config.Size(0), + MTime: config.Duration(0), fileFilters: nil, Fs: getFakeFileSystem(getTestdataDir()), } @@ -208,31 +214,29 @@ func getFakeFileSystem(basePath string) fakeFileSystem { var dmask uint32 = 0666 // set directory bit - dmask |= (1 << uint(32-1)) + dmask |= 1 << uint(32-1) // create a lookup map for getting "files" from the "filesystem" fileList := map[string]fakeFileInfo{ - basePath: {name: "testdata", size: int64(4096), filemode: uint32(dmask), modtime: mtime, isdir: true}, - basePath + "/foo": {name: "foo", filemode: uint32(fmask), modtime: mtime}, - basePath + "/bar": {name: "bar", filemode: uint32(fmask), modtime: mtime}, - basePath + "/baz": {name: "baz", filemode: uint32(fmask), modtime: olderMtime}, - basePath + "/qux": {name: "qux", size: int64(400), filemode: uint32(fmask), modtime: mtime}, - basePath + "/subdir": {name: "subdir", size: int64(4096), filemode: uint32(dmask), modtime: mtime, isdir: true}, - basePath + "/subdir/quux": {name: "quux", filemode: uint32(fmask), modtime: mtime}, - basePath + "/subdir/quuz": {name: "quuz", filemode: uint32(fmask), modtime: mtime}, - basePath + "/subdir/nested2": {name: "nested2", size: int64(200), filemode: uint32(dmask), modtime: mtime, isdir: true}, - basePath + "/subdir/nested2/qux": {name: "qux", filemode: uint32(fmask), modtime: mtime, size: int64(400)}, + basePath: {name: "testdata", size: int64(4096), filemode: dmask, modtime: mtime, isdir: true}, + basePath + "/foo": {name: "foo", filemode: fmask, modtime: mtime}, + basePath + "/bar": {name: "bar", filemode: fmask, modtime: mtime}, + basePath + "/baz": {name: "baz", filemode: fmask, modtime: olderMtime}, + basePath + "/qux": {name: "qux", size: int64(400), filemode: fmask, modtime: mtime}, + basePath + "/subdir": {name: "subdir", size: int64(4096), filemode: dmask, modtime: mtime, isdir: true}, + basePath + "/subdir/quux": {name: "quux", filemode: fmask, modtime: mtime}, + basePath + "/subdir/quuz": {name: "quuz", filemode: fmask, modtime: mtime}, + basePath + "/subdir/nested2": {name: "nested2", size: int64(200), filemode: dmask, modtime: mtime, isdir: true}, + basePath + "/subdir/nested2/qux": {name: "qux", filemode: fmask, modtime: mtime, size: int64(400)}, } - fs := fakeFileSystem{files: fileList} - return fs - + return fakeFileSystem{files: fileList} } func fileCountEquals(t *testing.T, fc FileCount, expectedCount int, expectedSize int) { tags := map[string]string{"directory": getTestdataDir()} acc := testutil.Accumulator{} - acc.GatherError(fc.Gather) + require.NoError(t, acc.GatherError(fc.Gather)) require.True(t, acc.HasPoint("filecount", tags, "count", int64(expectedCount))) require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(expectedSize))) } diff --git a/plugins/inputs/filecount/filesystem_helpers.go b/plugins/inputs/filecount/filesystem_helpers.go index 2bd6c095142cf..f43bb4ad5f394 100644 --- a/plugins/inputs/filecount/filesystem_helpers.go +++ b/plugins/inputs/filecount/filesystem_helpers.go @@ -69,5 +69,4 @@ func (f fakeFileSystem) Stat(name string) (os.FileInfo, error) { return fakeInfo, nil } return nil, &os.PathError{Op: "Stat", Path: name, Err: errors.New("No such file or directory")} - } diff --git a/plugins/inputs/filecount/filesystem_helpers_test.go b/plugins/inputs/filecount/filesystem_helpers_test.go index 08bb15a2e59cf..a3a3310d3fb4e 100644 --- a/plugins/inputs/filecount/filesystem_helpers_test.go +++ b/plugins/inputs/filecount/filesystem_helpers_test.go @@ -1,3 +1,9 @@ +//go:build !windows +// +build !windows + +// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows +// https://github.com/influxdata/telegraf/issues/6248 + package filecount import ( @@ -48,11 +54,12 @@ func TestRealFS(t *testing.T) { fs = getTestFileSystem() // now, the same test as above will return an error as the file doesn't exist in our fake fs expectedError := "Stat " + getTestdataDir() + "/qux: No such file or directory" - fileInfo, err = fs.Stat(getTestdataDir() + "/qux") - require.Equal(t, expectedError, err.Error()) + _, err = fs.Stat(getTestdataDir() + "/qux") + require.Error(t, err, expectedError) // and verify that what we DO expect to find, we do fileInfo, err = fs.Stat("/testdata/foo") require.NoError(t, err) + require.NotNil(t, fileInfo) } func getTestFileSystem() fakeFileSystem { @@ -77,14 +84,12 @@ func getTestFileSystem() fakeFileSystem { var dmask uint32 = 0666 // set directory bit - dmask |= (1 << uint(32-1)) + dmask |= 1 << uint(32-1) fileList := map[string]fakeFileInfo{ - "/testdata": {name: "testdata", size: int64(4096), filemode: uint32(dmask), modtime: mtime, isdir: true}, - "/testdata/foo": {name: "foo", filemode: uint32(fmask), modtime: mtime}, + "/testdata": {name: "testdata", size: int64(4096), filemode: dmask, modtime: mtime, isdir: true}, + "/testdata/foo": {name: "foo", filemode: fmask, modtime: mtime}, } - fs := fakeFileSystem{files: fileList} - return fs - + return fakeFileSystem{files: fileList} } diff --git a/plugins/inputs/filecount/sample.conf b/plugins/inputs/filecount/sample.conf new file mode 100644 index 0000000000000..93244d601086e --- /dev/null +++ b/plugins/inputs/filecount/sample.conf @@ -0,0 +1,32 @@ +# Count files in a directory +[[inputs.filecount]] + ## Directories to gather stats about. + ## This accept standard unit glob matching rules, but with the addition of + ## ** as a "super asterisk". ie: + ## /var/log/** -> recursively find all directories in /var/log and count files in each directories + ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories + ## /var/log -> count all files in /var/log and all of its subdirectories + directories = ["/var/cache/apt", "/tmp"] + + ## Only count files that match the name pattern. Defaults to "*". + name = "*" + + ## Count files in subdirectories. Defaults to true. + recursive = true + + ## Only count regular files. Defaults to true. + regular_only = true + + ## Follow all symlinks while walking the directory tree. Defaults to false. + follow_symlinks = false + + ## Only count files that are at least this size. If size is + ## a negative number, only count files that are smaller than the + ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... + ## Without quotes and units, interpreted as size in bytes. + size = "0B" + + ## Only count files that have not been touched for at least this + ## duration. If mtime is negative, only count files that have been + ## touched in this duration. Defaults to "0s". + mtime = "0s" diff --git a/plugins/inputs/filestat/README.md b/plugins/inputs/filestat/README.md index 840cafb53c06a..63ed975e666cf 100644 --- a/plugins/inputs/filestat/README.md +++ b/plugins/inputs/filestat/README.md @@ -1,10 +1,10 @@ -# filestat Input Plugin +# Filestat Input Plugin The filestat plugin gathers metrics about file existence, size, and other stats. -### Configuration: +## Configuration -```toml +```toml @sample.conf # Read stats about given file(s) [[inputs.filestat]] ## Files to gather stats about. @@ -16,22 +16,22 @@ The filestat plugin gathers metrics about file existence, size, and other stats. md5 = false ``` -### Measurements & Fields: +## Measurements & Fields - filestat - - exists (int, 0 | 1) - - size_bytes (int, bytes) - - modification_time (int, unix time nanoseconds) - - md5 (optional, string) + - exists (int, 0 | 1) + - size_bytes (int, bytes) + - modification_time (int, unix time nanoseconds) + - md5 (optional, string) -### Tags: +## Tags - All measurements have the following tags: - - file (the path the to file, as specified in the config) + - file (the path the to file, as specified in the config) -### Example Output: +### Example -``` +```shell $ telegraf --config /etc/telegraf/telegraf.conf --input-filter filestat --test * Plugin: filestat, Collection 1 > filestat,file=/tmp/foo/bar,host=tyrion exists=0i 1507218518192154351 diff --git a/plugins/inputs/filestat/filestat.go b/plugins/inputs/filestat/filestat.go index bf8ea6c160361..1983dbd4f6ee5 100644 --- a/plugins/inputs/filestat/filestat.go +++ b/plugins/inputs/filestat/filestat.go @@ -1,7 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package filestat import ( "crypto/md5" + _ "embed" "fmt" "io" "os" @@ -11,21 +13,9 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -const sampleConfig = ` - ## Files to gather stats about. - ## These accept standard unix glob matching rules, but with the addition of - ## ** as a "super asterisk". ie: - ## "/var/log/**.log" -> recursively find all .log files in /var/log - ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log - ## "/var/log/apache.log" -> just tail the apache log file - ## - ## See https://github.com/gobwas/glob for more examples - ## - files = ["/var/log/**.log"] - - ## If true, read the entire file and calculate an md5 checksum. - md5 = false -` +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string type FileStat struct { Md5 bool @@ -35,20 +25,25 @@ type FileStat struct { // maps full file paths to globmatch obj globs map[string]*globpath.GlobPath + + // files that were missing - we only log the first time it's not found. + missingFiles map[string]bool + // files that had an error in Stat - we only log the first error. + filesWithErrors map[string]bool } func NewFileStat() *FileStat { return &FileStat{ - globs: make(map[string]*globpath.GlobPath), + globs: make(map[string]*globpath.GlobPath), + missingFiles: make(map[string]bool), + filesWithErrors: make(map[string]bool), } } -func (*FileStat) Description() string { - return "Read stats about given file(s)" +func (*FileStat) SampleConfig() string { + return sampleConfig } -func (*FileStat) SampleConfig() string { return sampleConfig } - func (f *FileStat) Gather(acc telegraf.Accumulator) error { var err error @@ -85,22 +80,33 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error { fileInfo, err := os.Stat(fileName) if os.IsNotExist(err) { fields["exists"] = int64(0) + acc.AddFields("filestat", fields, tags) + if !f.missingFiles[fileName] { + f.Log.Warnf("File %q not found", fileName) + f.missingFiles[fileName] = true + } + continue } + f.missingFiles[fileName] = false if fileInfo == nil { - f.Log.Errorf("Unable to get info for file %q, possible permissions issue", - fileName) + if !f.filesWithErrors[fileName] { + f.filesWithErrors[fileName] = true + f.Log.Errorf("Unable to get info for file %q: %v", + fileName, err) + } } else { + f.filesWithErrors[fileName] = false fields["size_bytes"] = fileInfo.Size() fields["modification_time"] = fileInfo.ModTime().UnixNano() } if f.Md5 { - md5, err := getMd5(fileName) + md5Hash, err := getMd5(fileName) if err != nil { acc.AddError(err) } else { - fields["md5_sum"] = md5 + fields["md5_sum"] = md5Hash } } diff --git a/plugins/inputs/filestat/filestat_test.go b/plugins/inputs/filestat/filestat_test.go index a38d3b0aacdc4..ac2a9f9a9f75b 100644 --- a/plugins/inputs/filestat/filestat_test.go +++ b/plugins/inputs/filestat/filestat_test.go @@ -1,102 +1,125 @@ +//go:build !windows +// +build !windows + +// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows +// https://github.com/influxdata/telegraf/issues/6248 + package filestat import ( - "runtime" - "strings" + "os" + "path/filepath" "testing" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" +) + +var ( + testdataDir = getTestdataDir() ) func TestGatherNoMd5(t *testing.T) { - dir := getTestdataDir() fs := NewFileStat() fs.Log = testutil.Logger{} fs.Files = []string{ - dir + "log1.log", - dir + "log2.log", - "/non/existant/file", + filepath.Join(testdataDir, "log1.log"), + filepath.Join(testdataDir, "log2.log"), + filepath.Join(testdataDir, "non_existent_file"), } acc := testutil.Accumulator{} - acc.GatherError(fs.Gather) + require.NoError(t, acc.GatherError(fs.Gather)) tags1 := map[string]string{ - "file": dir + "log1.log", + "file": filepath.Join(testdataDir, "log1.log"), } require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1))) tags2 := map[string]string{ - "file": dir + "log2.log", + "file": filepath.Join(testdataDir, "log2.log"), } require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1))) tags3 := map[string]string{ - "file": "/non/existant/file", + "file": filepath.Join(testdataDir, "non_existent_file"), } require.True(t, acc.HasPoint("filestat", tags3, "exists", int64(0))) } func TestGatherExplicitFiles(t *testing.T) { - dir := getTestdataDir() fs := NewFileStat() fs.Log = testutil.Logger{} fs.Md5 = true fs.Files = []string{ - dir + "log1.log", - dir + "log2.log", - "/non/existant/file", + filepath.Join(testdataDir, "log1.log"), + filepath.Join(testdataDir, "log2.log"), + filepath.Join(testdataDir, "non_existent_file"), } acc := testutil.Accumulator{} - acc.GatherError(fs.Gather) + require.NoError(t, acc.GatherError(fs.Gather)) tags1 := map[string]string{ - "file": dir + "log1.log", + "file": filepath.Join(testdataDir, "log1.log"), } require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1))) require.True(t, acc.HasPoint("filestat", tags1, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e")) tags2 := map[string]string{ - "file": dir + "log2.log", + "file": filepath.Join(testdataDir, "log2.log"), } require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1))) require.True(t, acc.HasPoint("filestat", tags2, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e")) tags3 := map[string]string{ - "file": "/non/existant/file", + "file": filepath.Join(testdataDir, "non_existent_file"), } require.True(t, acc.HasPoint("filestat", tags3, "exists", int64(0))) } +func TestNonExistentFile(t *testing.T) { + fs := NewFileStat() + fs.Log = testutil.Logger{} + fs.Md5 = true + fs.Files = []string{ + "/non/existant/file", + } + acc := testutil.Accumulator{} + require.NoError(t, acc.GatherError(fs.Gather)) + + acc.AssertContainsFields(t, "filestat", map[string]interface{}{"exists": int64(0)}) + require.False(t, acc.HasField("filestat", "error")) + require.False(t, acc.HasField("filestat", "md5_sum")) + require.False(t, acc.HasField("filestat", "size_bytes")) + require.False(t, acc.HasField("filestat", "modification_time")) +} + func TestGatherGlob(t *testing.T) { - dir := getTestdataDir() fs := NewFileStat() fs.Log = testutil.Logger{} fs.Md5 = true fs.Files = []string{ - dir + "*.log", + filepath.Join(testdataDir, "*.log"), } acc := testutil.Accumulator{} - acc.GatherError(fs.Gather) + require.NoError(t, acc.GatherError(fs.Gather)) tags1 := map[string]string{ - "file": dir + "log1.log", + "file": filepath.Join(testdataDir, "log1.log"), } require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1))) require.True(t, acc.HasPoint("filestat", tags1, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e")) tags2 := map[string]string{ - "file": dir + "log2.log", + "file": filepath.Join(testdataDir, "log2.log"), } require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1))) @@ -104,33 +127,32 @@ func TestGatherGlob(t *testing.T) { } func TestGatherSuperAsterisk(t *testing.T) { - dir := getTestdataDir() fs := NewFileStat() fs.Log = testutil.Logger{} fs.Md5 = true fs.Files = []string{ - dir + "**", + filepath.Join(testdataDir, "**"), } acc := testutil.Accumulator{} - acc.GatherError(fs.Gather) + require.NoError(t, acc.GatherError(fs.Gather)) tags1 := map[string]string{ - "file": dir + "log1.log", + "file": filepath.Join(testdataDir, "log1.log"), } require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1))) require.True(t, acc.HasPoint("filestat", tags1, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e")) tags2 := map[string]string{ - "file": dir + "log2.log", + "file": filepath.Join(testdataDir, "log2.log"), } require.True(t, acc.HasPoint("filestat", tags2, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags2, "exists", int64(1))) require.True(t, acc.HasPoint("filestat", tags2, "md5_sum", "d41d8cd98f00b204e9800998ecf8427e")) tags3 := map[string]string{ - "file": dir + "test.conf", + "file": filepath.Join(testdataDir, "test.conf"), } require.True(t, acc.HasPoint("filestat", tags3, "size_bytes", int64(104))) require.True(t, acc.HasPoint("filestat", tags3, "exists", int64(1))) @@ -138,18 +160,17 @@ func TestGatherSuperAsterisk(t *testing.T) { } func TestModificationTime(t *testing.T) { - dir := getTestdataDir() fs := NewFileStat() fs.Log = testutil.Logger{} fs.Files = []string{ - dir + "log1.log", + filepath.Join(testdataDir, "log1.log"), } acc := testutil.Accumulator{} - acc.GatherError(fs.Gather) + require.NoError(t, acc.GatherError(fs.Gather)) tags1 := map[string]string{ - "file": dir + "log1.log", + "file": filepath.Join(testdataDir, "log1.log"), } require.True(t, acc.HasPoint("filestat", tags1, "size_bytes", int64(0))) require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(1))) @@ -160,30 +181,34 @@ func TestNoModificationTime(t *testing.T) { fs := NewFileStat() fs.Log = testutil.Logger{} fs.Files = []string{ - "/non/existant/file", + filepath.Join(testdataDir, "non_existent_file"), } acc := testutil.Accumulator{} - acc.GatherError(fs.Gather) + require.NoError(t, acc.GatherError(fs.Gather)) tags1 := map[string]string{ - "file": "/non/existant/file", + "file": filepath.Join(testdataDir, "non_existent_file"), } require.True(t, acc.HasPoint("filestat", tags1, "exists", int64(0))) require.False(t, acc.HasInt64Field("filestat", "modification_time")) } func TestGetMd5(t *testing.T) { - dir := getTestdataDir() - md5, err := getMd5(dir + "test.conf") - assert.NoError(t, err) - assert.Equal(t, "5a7e9b77fa25e7bb411dbd17cf403c1f", md5) + md5, err := getMd5(filepath.Join(testdataDir, "test.conf")) + require.NoError(t, err) + require.Equal(t, "5a7e9b77fa25e7bb411dbd17cf403c1f", md5) - md5, err = getMd5("/tmp/foo/bar/fooooo") - assert.Error(t, err) + _, err = getMd5("/tmp/foo/bar/fooooo") + require.Error(t, err) } func getTestdataDir() string { - _, filename, _, _ := runtime.Caller(1) - return strings.Replace(filename, "filestat_test.go", "testdata/", 1) + dir, err := os.Getwd() + if err != nil { + // if we cannot even establish the test directory, further progress is meaningless + panic(err) + } + + return filepath.Join(dir, "testdata") } diff --git a/plugins/inputs/filestat/sample.conf b/plugins/inputs/filestat/sample.conf new file mode 100644 index 0000000000000..93d7a9917a839 --- /dev/null +++ b/plugins/inputs/filestat/sample.conf @@ -0,0 +1,9 @@ +# Read stats about given file(s) +[[inputs.filestat]] + ## Files to gather stats about. + ## These accept standard unix glob matching rules, but with the addition of + ## ** as a "super asterisk". See https://github.com/gobwas/glob. + files = ["/etc/telegraf/telegraf.conf", "/var/log/**.log"] + + ## If true, read the entire file and calculate an md5 checksum. + md5 = false diff --git a/plugins/inputs/fireboard/README.md b/plugins/inputs/fireboard/README.md index 7e1f351fa0b7f..2c46a6e6fd1ca 100644 --- a/plugins/inputs/fireboard/README.md +++ b/plugins/inputs/fireboard/README.md @@ -4,35 +4,38 @@ The fireboard plugin gathers the real time temperature data from fireboard thermometers. In order to use this input plugin, you'll need to sign up to use the [Fireboard REST API](https://docs.fireboard.io/reference/restapi.html). -### Configuration +## Configuration -```toml +```toml @sample.conf +# Read real time temps from fireboard.io servers [[inputs.fireboard]] ## Specify auth token for your account auth_token = "invalidAuthToken" ## You can override the fireboard server URL if necessary # url = https://fireboard.io/api/v1/devices.json ## You can set a different http_timeout if you need to - # http_timeout = 4 + ## You should set a string using an number and time indicator + ## for example "12s" for 12 seconds. + # http_timeout = "4s" ``` -#### auth_token +### auth_token In lieu of requiring a username and password, this plugin requires an authentication token that you can generate using the [Fireboard REST API](https://docs.fireboard.io/reference/restapi.html#Authentication). -#### url +### url While there should be no reason to override the URL, the option is available in case Fireboard changes their site, etc. -#### http_timeout +### http_timeout If you need to increase the HTTP timeout, you can do so here. You can set this value in seconds. The default value is four (4) seconds. -### Metrics +## Metrics The Fireboard REST API docs have good examples of the data that is available, currently this input only returns the real time temperatures. Temperature @@ -47,12 +50,12 @@ values are included if they are less than a minute old. - fields: - temperature (float, unit) -### Example Output +## Example This section shows example output in Line Protocol format. You can often use `telegraf --input-filter --test` or use the `file` output to get this information. -``` +```shell fireboard,channel=2,host=patas-mbp,scale=Farenheit,title=telegraf-FireBoard,uuid=b55e766c-b308-49b5-93a4-df89fe31efd0 temperature=78.2 1561690040000000000 ``` diff --git a/plugins/inputs/fireboard/fireboard.go b/plugins/inputs/fireboard/fireboard.go index a92930aae9598..11ca858848165 100644 --- a/plugins/inputs/fireboard/fireboard.go +++ b/plugins/inputs/fireboard/fireboard.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package fireboard import ( + _ "embed" "encoding/json" "fmt" "net/http" @@ -8,25 +10,29 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // Fireboard gathers statistics from the fireboard.io servers type Fireboard struct { - AuthToken string `toml:"auth_token"` - URL string `toml:"url"` - HTTPTimeout internal.Duration `toml:"http_timeout"` + AuthToken string `toml:"auth_token"` + URL string `toml:"url"` + HTTPTimeout config.Duration `toml:"http_timeout"` client *http.Client } // NewFireboard return a new instance of Fireboard with a default http client func NewFireboard() *Fireboard { - tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)} + tr := &http.Transport{ResponseHeaderTimeout: 3 * time.Second} client := &http.Client{ Transport: tr, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, } return &Fireboard{client: client} } @@ -45,50 +51,30 @@ type fireboardStats struct { Latesttemps []RTT `json:"latest_temps"` } -// A sample configuration to only gather stats from localhost, default port. -const sampleConfig = ` - ## Specify auth token for your account - auth_token = "invalidAuthToken" - ## You can override the fireboard server URL if necessary - # url = https://fireboard.io/api/v1/devices.json - ## You can set a different http_timeout if you need to - ## You should set a string using an number and time indicator - ## for example "12s" for 12 seconds. - # http_timeout = "4s" -` - -// SampleConfig Returns a sample configuration for the plugin -func (r *Fireboard) SampleConfig() string { +func (*Fireboard) SampleConfig() string { return sampleConfig } -// Description Returns a description of the plugin -func (r *Fireboard) Description() string { - return "Read real time temps from fireboard.io servers" -} - // Init the things func (r *Fireboard) Init() error { - if len(r.AuthToken) == 0 { - return fmt.Errorf("You must specify an authToken") + return fmt.Errorf("you must specify an authToken") } if len(r.URL) == 0 { r.URL = "https://fireboard.io/api/v1/devices.json" } // Have a default timeout of 4s - if r.HTTPTimeout.Duration == 0 { - r.HTTPTimeout.Duration = time.Second * 4 + if r.HTTPTimeout == 0 { + r.HTTPTimeout = config.Duration(time.Second * 4) } - r.client.Timeout = r.HTTPTimeout.Duration + r.client.Timeout = time.Duration(r.HTTPTimeout) return nil } // Gather Reads stats from all configured servers. func (r *Fireboard) Gather(acc telegraf.Accumulator) error { - // Perform the GET request to the fireboard servers req, err := http.NewRequest("GET", r.URL, nil) if err != nil { diff --git a/plugins/inputs/fireboard/fireboard_test.go b/plugins/inputs/fireboard/fireboard_test.go index a5e93a4533e59..8fe1c21bd757d 100644 --- a/plugins/inputs/fireboard/fireboard_test.go +++ b/plugins/inputs/fireboard/fireboard_test.go @@ -16,7 +16,8 @@ func TestFireboard(t *testing.T) { // Create a test server with the const response JSON ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, response) + _, err := fmt.Fprintln(w, response) + require.NoError(t, err) })) defer ts.Close() diff --git a/plugins/inputs/fireboard/sample.conf b/plugins/inputs/fireboard/sample.conf new file mode 100644 index 0000000000000..9f2b0319db427 --- /dev/null +++ b/plugins/inputs/fireboard/sample.conf @@ -0,0 +1,10 @@ +# Read real time temps from fireboard.io servers +[[inputs.fireboard]] + ## Specify auth token for your account + auth_token = "invalidAuthToken" + ## You can override the fireboard server URL if necessary + # url = https://fireboard.io/api/v1/devices.json + ## You can set a different http_timeout if you need to + ## You should set a string using an number and time indicator + ## for example "12s" for 12 seconds. + # http_timeout = "4s" diff --git a/plugins/inputs/fluentd/README.md b/plugins/inputs/fluentd/README.md index 3fabbddb75012..9dc87ffc080a5 100644 --- a/plugins/inputs/fluentd/README.md +++ b/plugins/inputs/fluentd/README.md @@ -1,13 +1,18 @@ # Fluentd Input Plugin -The fluentd plugin gathers metrics from plugin endpoint provided by [in_monitor plugin](https://docs.fluentd.org/input/monitor_agent). -This plugin understands data provided by /api/plugin.json resource (/api/config.json is not covered). +The fluentd plugin gathers metrics from plugin endpoint provided by [in_monitor +plugin][1]. This plugin understands data provided by /api/plugin.json resource +(/api/config.json is not covered). -You might need to adjust your fluentd configuration, in order to reduce series cardinality in case your fluentd restarts frequently. Every time fluentd starts, `plugin_id` value is given a new random value. -According to [fluentd documentation](https://docs.fluentd.org/configuration/config-file#common-plugin-parameter), you are able to add `@id` parameter for each plugin to avoid this behaviour and define custom `plugin_id`. +You might need to adjust your fluentd configuration, in order to reduce series +cardinality in case your fluentd restarts frequently. Every time fluentd starts, +`plugin_id` value is given a new random value. According to [fluentd +documentation][2], you are able to add `@id` parameter for each plugin to avoid +this behaviour and define custom `plugin_id`. example configuration with `@id` parameter for http plugin: -``` + +```text @type http @id http @@ -15,9 +20,12 @@ example configuration with `@id` parameter for http plugin: ``` -### Configuration: +[1]: https://docs.fluentd.org/input/monitor_agent +[2]: https://docs.fluentd.org/configuration/config-file#common-plugin-parameter + +## Configuration -```toml +```toml @sample.conf # Read metrics exposed by fluentd in_monitor plugin [[inputs.fluentd]] ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint). @@ -29,30 +37,41 @@ example configuration with `@id` parameter for http plugin: ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent) exclude = [ - "monitor_agent", - "dummy", + "monitor_agent", + "dummy", ] ``` -### Measurements & Fields: +## Measurements & Fields Fields may vary depending on the plugin type - fluentd - - retry_count (float, unit) - - buffer_queue_length (float, unit) - - buffer_total_queued_size (float, unit) + - retry_count (float, unit) + - buffer_queue_length (float, unit) + - buffer_total_queued_size (float, unit) + - rollback_count (float, unit) + - flush_time_count (float, unit) + - slow_flush_count (float, unit) + - emit_count (float, unit) + - emit_records (float, unit) + - emit_size (float, unit) + - write_count (float, unit) + - buffer_stage_length (float, unit) + - buffer_queue_byte_size (float, unit) + - buffer_stage_byte_size (float, unit) + - buffer_available_buffer_space_ratios (float, unit) -### Tags: +## Tags - All measurements have the following tags: - - plugin_id (unique plugin id) - - plugin_type (type of the plugin e.g. s3) + - plugin_id (unique plugin id) + - plugin_type (type of the plugin e.g. s3) - plugin_category (plugin category e.g. output) -### Example Output: +## Example Output -``` +```shell $ telegraf --config fluentd.conf --input-filter fluentd --test * Plugin: inputs.fluentd, Collection 1 > fluentd,host=T440s,plugin_id=object:9f748c,plugin_category=input,plugin_type=dummy buffer_total_queued_size=0,buffer_queue_length=0,retry_count=0 1492006105000000000 @@ -60,5 +79,5 @@ $ telegraf --config fluentd.conf --input-filter fluentd --test > fluentd,plugin_id=object:820190,plugin_category=input,plugin_type=monitor_agent,host=T440s retry_count=0,buffer_total_queued_size=0,buffer_queue_length=0 1492006105000000000 > fluentd,plugin_id=object:c5e054,plugin_category=output,plugin_type=stdout,host=T440s buffer_queue_length=0,retry_count=0,buffer_total_queued_size=0 1492006105000000000 > fluentd,plugin_type=s3,host=T440s,plugin_id=object:bd7a90,plugin_category=output buffer_queue_length=0,retry_count=0,buffer_total_queued_size=0 1492006105000000000 - +> fluentd,plugin_id=output_td, plugin_category=output,plugin_type=tdlog, host=T440s buffer_available_buffer_space_ratios=100,buffer_queue_byte_size=0,buffer_queue_length=0,buffer_stage_byte_size=0,buffer_stage_length=0,buffer_total_queued_size=0,emit_count=0,emit_records=0,flush_time_count=0,retry_count=0,rollback_count=0,slow_flush_count=0,write_count=0 1651474085000000000 ``` diff --git a/plugins/inputs/fluentd/fluentd.go b/plugins/inputs/fluentd/fluentd.go index 7d4a0cd5eecb4..1a5d667b198e9 100644 --- a/plugins/inputs/fluentd/fluentd.go +++ b/plugins/inputs/fluentd/fluentd.go @@ -1,9 +1,11 @@ +//go:generate ../../../tools/readme_config_includer/generator package fluentd import ( + _ "embed" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "time" @@ -12,24 +14,11 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -const ( - measurement = "fluentd" - description = "Read metrics exposed by fluentd in_monitor plugin" - sampleConfig = ` - ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint). - ## - ## Endpoint: - ## - only one URI is allowed - ## - https is not supported - endpoint = "http://localhost:24220/api/plugins.json" - - ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent) - exclude = [ - "monitor_agent", - "dummy", - ] -` -) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +const measurement = "fluentd" // Fluentd - plugin main structure type Fluentd struct { @@ -43,12 +32,23 @@ type endpointInfo struct { } type pluginData struct { - PluginID string `json:"plugin_id"` - PluginType string `json:"type"` - PluginCategory string `json:"plugin_category"` - RetryCount *float64 `json:"retry_count"` - BufferQueueLength *float64 `json:"buffer_queue_length"` - BufferTotalQueuedSize *float64 `json:"buffer_total_queued_size"` + PluginID string `json:"plugin_id"` + PluginType string `json:"type"` + PluginCategory string `json:"plugin_category"` + RetryCount *float64 `json:"retry_count"` + BufferQueueLength *float64 `json:"buffer_queue_length"` + BufferTotalQueuedSize *float64 `json:"buffer_total_queued_size"` + RollbackCount *float64 `json:"rollback_count"` + EmitRecords *float64 `json:"emit_records"` + EmitSize *float64 `json:"emit_size"` + EmitCount *float64 `json:"emit_count"` + WriteCount *float64 `json:"write_count"` + SlowFlushCount *float64 `json:"slow_flush_count"` + FlushTimeCount *float64 `json:"flush_time_count"` + BufferStageLength *float64 `json:"buffer_stage_length"` + BufferStageByteSize *float64 `json:"buffer_stage_byte_size"` + BufferQueueByteSize *float64 `json:"buffer_queue_byte_size"` + AvailBufferSpaceRatios *float64 `json:"buffer_available_buffer_space_ratios"` } // parse JSON from fluentd Endpoint @@ -62,40 +62,33 @@ func parse(data []byte) (datapointArray []pluginData, err error) { var endpointData endpointInfo if err = json.Unmarshal(data, &endpointData); err != nil { - err = fmt.Errorf("Processing JSON structure") - return + err = fmt.Errorf("processing JSON structure") + return nil, err } - for _, point := range endpointData.Payload { - datapointArray = append(datapointArray, point) - } - - return + datapointArray = append(datapointArray, endpointData.Payload...) + return datapointArray, err } -// Description - display description -func (h *Fluentd) Description() string { return description } - -// SampleConfig - generate configuration -func (h *Fluentd) SampleConfig() string { return sampleConfig } +func (*Fluentd) SampleConfig() string { + return sampleConfig +} // Gather - Main code responsible for gathering, processing and creating metrics func (h *Fluentd) Gather(acc telegraf.Accumulator) error { - _, err := url.Parse(h.Endpoint) if err != nil { - return fmt.Errorf("Invalid URL \"%s\"", h.Endpoint) + return fmt.Errorf("invalid URL \"%s\"", h.Endpoint) } if h.client == nil { - tr := &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: 3 * time.Second, } client := &http.Client{ Transport: tr, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, } h.client = client @@ -104,15 +97,15 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error { resp, err := h.client.Get(h.Endpoint) if err != nil { - return fmt.Errorf("Unable to perform HTTP client GET on \"%s\": %s", h.Endpoint, err) + return fmt.Errorf("unable to perform HTTP client GET on \"%s\": %v", h.Endpoint, err) } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { - return fmt.Errorf("Unable to read the HTTP body \"%s\": %s", string(body), err) + return fmt.Errorf("unable to read the HTTP body \"%s\": %v", string(body), err) } if resp.StatusCode != http.StatusOK { @@ -122,12 +115,11 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error { dataPoints, err := parse(body) if err != nil { - return fmt.Errorf("Problem with parsing") + return fmt.Errorf("problem with parsing") } // Go through all plugins one by one for _, p := range dataPoints { - skip := false // Check if this specific type was excluded in configuration @@ -149,8 +141,8 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error { if p.BufferQueueLength != nil { tmpFields["buffer_queue_length"] = *p.BufferQueueLength - } + if p.RetryCount != nil { tmpFields["retry_count"] = *p.RetryCount } @@ -159,7 +151,64 @@ func (h *Fluentd) Gather(acc telegraf.Accumulator) error { tmpFields["buffer_total_queued_size"] = *p.BufferTotalQueuedSize } - if !((p.BufferQueueLength == nil) && (p.RetryCount == nil) && (p.BufferTotalQueuedSize == nil)) { + if p.RollbackCount != nil { + tmpFields["rollback_count"] = *p.RollbackCount + } + + if p.EmitRecords != nil { + tmpFields["emit_records"] = *p.EmitRecords + } + + if p.EmitCount != nil { + tmpFields["emit_count"] = *p.EmitCount + } + + if p.EmitSize != nil { + tmpFields["emit_size"] = *p.EmitSize + } + + if p.WriteCount != nil { + tmpFields["write_count"] = *p.WriteCount + } + + if p.SlowFlushCount != nil { + tmpFields["slow_flush_count"] = *p.SlowFlushCount + } + + if p.FlushTimeCount != nil { + tmpFields["flush_time_count"] = *p.FlushTimeCount + } + + if p.BufferStageLength != nil { + tmpFields["buffer_stage_length"] = *p.BufferStageLength + } + + if p.BufferStageByteSize != nil { + tmpFields["buffer_stage_byte_size"] = *p.BufferStageByteSize + } + + if p.BufferQueueByteSize != nil { + tmpFields["buffer_queue_byte_size"] = *p.BufferQueueByteSize + } + + if p.AvailBufferSpaceRatios != nil { + tmpFields["buffer_available_buffer_space_ratios"] = *p.AvailBufferSpaceRatios + } + + if !((p.BufferQueueLength == nil) && + (p.RetryCount == nil) && + (p.BufferTotalQueuedSize == nil) && + (p.EmitCount == nil) && + (p.EmitRecords == nil) && + (p.EmitSize == nil) && + (p.WriteCount == nil) && + (p.FlushTimeCount == nil) && + (p.SlowFlushCount == nil) && + (p.RollbackCount == nil) && + (p.BufferStageLength == nil) && + (p.BufferStageByteSize == nil) && + (p.BufferQueueByteSize == nil) && + (p.AvailBufferSpaceRatios == nil)) { acc.AddFields(measurement, tmpFields, tmpTags) } } diff --git a/plugins/inputs/fluentd/fluentd_test.go b/plugins/inputs/fluentd/fluentd_test.go index c7699c3384906..9cd67ecdc3a6f 100644 --- a/plugins/inputs/fluentd/fluentd_test.go +++ b/plugins/inputs/fluentd/fluentd_test.go @@ -8,8 +8,9 @@ import ( "net/url" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) // sampleJSON from fluentd version '0.14.9' @@ -87,8 +88,53 @@ const sampleJSON = ` }, "output_plugin": true, "buffer_queue_length": 0, + "retry_count": 0, + "buffer_total_queued_size": 0 + }, + { + "plugin_id": "object:output_td_1", + "plugin_category": "output", + "type": "tdlog", + "config": { + "@type": "tdlog", + "@id": "output_td", + "apikey": "xxxxxx", + "auto_create_table": "" + }, + "output_plugin": true, + "buffer_queue_length": 0, "buffer_total_queued_size": 0, - "retry_count": 0 + "retry_count": 0, + "emit_records": 0, + "emit_size": 0, + "emit_count": 0, + "write_count": 0, + "rollback_count": 0, + "slow_flush_count": 0, + "flush_time_count": 0, + "buffer_stage_length": 0, + "buffer_stage_byte_size": 0, + "buffer_queue_byte_size": 0, + "buffer_available_buffer_space_ratios": 0 + }, + { + "plugin_id": "object:output_td_2", + "plugin_category": "output", + "type": "tdlog", + "config": { + "@type": "tdlog", + "@id": "output_td", + "apikey": "xxxxxx", + "auto_create_table": "" + }, + "output_plugin": true, + "buffer_queue_length": 0, + "buffer_total_queued_size": 0, + "retry_count": 0, + "rollback_count": 0, + "emit_records": 0, + "slow_flush_count": 0, + "buffer_available_buffer_space_ratios": 0 } ] } @@ -96,14 +142,14 @@ const sampleJSON = ` var ( zero float64 - err error - pluginOutput []pluginData expectedOutput = []pluginData{ // {"object:f48698", "dummy", "input", nil, nil, nil}, // {"object:e27138", "dummy", "input", nil, nil, nil}, // {"object:d74060", "monitor_agent", "input", nil, nil, nil}, - {"object:11a5e2c", "stdout", "output", (*float64)(&zero), nil, nil}, - {"object:11237ec", "s3", "output", (*float64)(&zero), (*float64)(&zero), (*float64)(&zero)}, + {"object:11a5e2c", "stdout", "output", &zero, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil}, + {"object:11237ec", "s3", "output", &zero, &zero, &zero, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil}, + {"object:output_td_1", "tdlog", "output", &zero, &zero, &zero, &zero, &zero, &zero, &zero, &zero, &zero, &zero, &zero, &zero, &zero, &zero}, + {"object:output_td_2", "tdlog", "output", &zero, &zero, &zero, &zero, &zero, nil, nil, nil, &zero, nil, nil, nil, nil, &zero}, } fluentdTest = &Fluentd{ Endpoint: "http://localhost:8081", @@ -111,14 +157,13 @@ var ( ) func Test_parse(t *testing.T) { - t.Log("Testing parser function") + t.Logf("JSON (%s) ", sampleJSON) _, err := parse([]byte(sampleJSON)) if err != nil { t.Error(err) } - } func Test_Gather(t *testing.T) { @@ -126,12 +171,16 @@ func Test_Gather(t *testing.T) { ts := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") - fmt.Fprintf(w, "%s", string(sampleJSON)) + _, err := fmt.Fprintf(w, "%s", string(sampleJSON)) + require.NoError(t, err) })) requestURL, err := url.Parse(fluentdTest.Endpoint) + require.NoError(t, err) + require.NotNil(t, requestURL) - ts.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + ts.Listener, err = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + require.NoError(t, err) ts.Start() @@ -148,16 +197,44 @@ func Test_Gather(t *testing.T) { t.Errorf("acc.HasMeasurement: expected fluentd") } - assert.Equal(t, expectedOutput[0].PluginID, acc.Metrics[0].Tags["plugin_id"]) - assert.Equal(t, expectedOutput[0].PluginType, acc.Metrics[0].Tags["plugin_type"]) - assert.Equal(t, expectedOutput[0].PluginCategory, acc.Metrics[0].Tags["plugin_category"]) - assert.Equal(t, *expectedOutput[0].RetryCount, acc.Metrics[0].Fields["retry_count"]) - - assert.Equal(t, expectedOutput[1].PluginID, acc.Metrics[1].Tags["plugin_id"]) - assert.Equal(t, expectedOutput[1].PluginType, acc.Metrics[1].Tags["plugin_type"]) - assert.Equal(t, expectedOutput[1].PluginCategory, acc.Metrics[1].Tags["plugin_category"]) - assert.Equal(t, *expectedOutput[1].RetryCount, acc.Metrics[1].Fields["retry_count"]) - assert.Equal(t, *expectedOutput[1].BufferQueueLength, acc.Metrics[1].Fields["buffer_queue_length"]) - assert.Equal(t, *expectedOutput[1].BufferTotalQueuedSize, acc.Metrics[1].Fields["buffer_total_queued_size"]) - + require.Equal(t, expectedOutput[0].PluginID, acc.Metrics[0].Tags["plugin_id"]) + require.Equal(t, expectedOutput[0].PluginType, acc.Metrics[0].Tags["plugin_type"]) + require.Equal(t, expectedOutput[0].PluginCategory, acc.Metrics[0].Tags["plugin_category"]) + require.Equal(t, *expectedOutput[0].RetryCount, acc.Metrics[0].Fields["retry_count"]) + + require.Equal(t, expectedOutput[1].PluginID, acc.Metrics[1].Tags["plugin_id"]) + require.Equal(t, expectedOutput[1].PluginType, acc.Metrics[1].Tags["plugin_type"]) + require.Equal(t, expectedOutput[1].PluginCategory, acc.Metrics[1].Tags["plugin_category"]) + require.Equal(t, *expectedOutput[1].RetryCount, acc.Metrics[1].Fields["retry_count"]) + require.Equal(t, *expectedOutput[1].BufferQueueLength, acc.Metrics[1].Fields["buffer_queue_length"]) + require.Equal(t, *expectedOutput[1].BufferTotalQueuedSize, acc.Metrics[1].Fields["buffer_total_queued_size"]) + + require.Equal(t, expectedOutput[2].PluginID, acc.Metrics[2].Tags["plugin_id"]) + require.Equal(t, expectedOutput[2].PluginType, acc.Metrics[2].Tags["plugin_type"]) + require.Equal(t, expectedOutput[2].PluginCategory, acc.Metrics[2].Tags["plugin_category"]) + require.Equal(t, *expectedOutput[2].RetryCount, acc.Metrics[2].Fields["retry_count"]) + require.Equal(t, *expectedOutput[2].BufferQueueLength, acc.Metrics[2].Fields["buffer_queue_length"]) + require.Equal(t, *expectedOutput[2].BufferTotalQueuedSize, acc.Metrics[2].Fields["buffer_total_queued_size"]) + require.Equal(t, *expectedOutput[2].EmitRecords, acc.Metrics[2].Fields["emit_records"]) + require.Equal(t, *expectedOutput[2].EmitSize, acc.Metrics[2].Fields["emit_size"]) + require.Equal(t, *expectedOutput[2].EmitCount, acc.Metrics[2].Fields["emit_count"]) + require.Equal(t, *expectedOutput[2].RollbackCount, acc.Metrics[2].Fields["rollback_count"]) + require.Equal(t, *expectedOutput[2].SlowFlushCount, acc.Metrics[2].Fields["slow_flush_count"]) + require.Equal(t, *expectedOutput[2].WriteCount, acc.Metrics[2].Fields["write_count"]) + require.Equal(t, *expectedOutput[2].FlushTimeCount, acc.Metrics[2].Fields["flush_time_count"]) + require.Equal(t, *expectedOutput[2].BufferStageLength, acc.Metrics[2].Fields["buffer_stage_length"]) + require.Equal(t, *expectedOutput[2].BufferStageByteSize, acc.Metrics[2].Fields["buffer_stage_byte_size"]) + require.Equal(t, *expectedOutput[2].BufferQueueByteSize, acc.Metrics[2].Fields["buffer_queue_byte_size"]) + require.Equal(t, *expectedOutput[2].AvailBufferSpaceRatios, acc.Metrics[2].Fields["buffer_available_buffer_space_ratios"]) + + require.Equal(t, expectedOutput[3].PluginID, acc.Metrics[3].Tags["plugin_id"]) + require.Equal(t, expectedOutput[3].PluginType, acc.Metrics[3].Tags["plugin_type"]) + require.Equal(t, expectedOutput[3].PluginCategory, acc.Metrics[3].Tags["plugin_category"]) + require.Equal(t, *expectedOutput[3].RetryCount, acc.Metrics[3].Fields["retry_count"]) + require.Equal(t, *expectedOutput[3].BufferQueueLength, acc.Metrics[3].Fields["buffer_queue_length"]) + require.Equal(t, *expectedOutput[3].BufferTotalQueuedSize, acc.Metrics[3].Fields["buffer_total_queued_size"]) + require.Equal(t, *expectedOutput[3].EmitRecords, acc.Metrics[3].Fields["emit_records"]) + require.Equal(t, *expectedOutput[3].RollbackCount, acc.Metrics[3].Fields["rollback_count"]) + require.Equal(t, *expectedOutput[3].SlowFlushCount, acc.Metrics[3].Fields["slow_flush_count"]) + require.Equal(t, *expectedOutput[3].AvailBufferSpaceRatios, acc.Metrics[3].Fields["buffer_available_buffer_space_ratios"]) } diff --git a/plugins/inputs/fluentd/sample.conf b/plugins/inputs/fluentd/sample.conf new file mode 100644 index 0000000000000..d92741f1e3572 --- /dev/null +++ b/plugins/inputs/fluentd/sample.conf @@ -0,0 +1,14 @@ +# Read metrics exposed by fluentd in_monitor plugin +[[inputs.fluentd]] + ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint). + ## + ## Endpoint: + ## - only one URI is allowed + ## - https is not supported + endpoint = "http://localhost:24220/api/plugins.json" + + ## Define which plugins have to be excluded (based on "type" field - e.g. monitor_agent) + exclude = [ + "monitor_agent", + "dummy", + ] diff --git a/plugins/inputs/github/README.md b/plugins/inputs/github/README.md index 46127082e69c5..06491e95ba780 100644 --- a/plugins/inputs/github/README.md +++ b/plugins/inputs/github/README.md @@ -5,14 +5,15 @@ Gather repository information from [GitHub][] hosted repositories. **Note:** Telegraf also contains the [webhook][] input which can be used as an alternative method for collecting repository information. -### Configuration +## Configuration -```toml +```toml @sample.conf +# Gather repository information from GitHub hosted repositories. [[inputs.github]] ## List of repositories to monitor repositories = [ - "influxdata/telegraf", - "influxdata/influxdb" + "influxdata/telegraf", + "influxdata/influxdb" ] ## Github API access token. Unauthenticated requests are limited to 60 per hour. @@ -23,9 +24,17 @@ alternative method for collecting repository information. ## Timeout for HTTP requests. # http_timeout = "5s" + + ## List of additional fields to query. + ## NOTE: Getting those fields might involve issuing additional API-calls, so please + ## make sure you do not exceed the rate-limit of GitHub. + ## + ## Available fields are: + ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) + # additional_fields = [] ``` -### Metrics +## Metrics - github_repository - tags: @@ -44,7 +53,7 @@ alternative method for collecting repository information. When the [internal][] input is enabled: -+ internal_github +- internal_github - tags: - access_token - An obfuscated reference to the configured access token or "Unauthenticated" - fields: @@ -52,11 +61,22 @@ When the [internal][] input is enabled: - remaining - How many requests you have remaining (per hour) - blocks - How many requests have been blocked due to rate limit -### Example Output +When specifying `additional_fields` the plugin will collect the specified +properties. **NOTE:** Querying this additional fields might require to perform +additional API-calls. Please make sure you don't exceed the query rate-limit by +specifying too many additional fields. In the following we list the available +options with the required API-calls and the resulting fields -``` +- "pull-requests" (2 API-calls per repository) + - fields: + - open_pull_requests (int) + - closed_pull_requests (int) + +## Example Output + +```shell github_repository,language=Go,license=MIT\ License,name=telegraf,owner=influxdata forks=2679i,networks=2679i,open_issues=794i,size=23263i,stars=7091i,subscribers=316i,watchers=7091i 1563901372000000000 -internal_github,access_token=Unauthenticated rate_limit_remaining=59i,rate_limit_limit=60i,rate_limit_blocks=0i 1552653551000000000 +internal_github,access_token=Unauthenticated closed_pull_requests=3522i,rate_limit_remaining=59i,rate_limit_limit=60i,rate_limit_blocks=0i,open_pull_requests=260i 1552653551000000000 ``` [GitHub]: https://www.github.com diff --git a/plugins/inputs/github/github.go b/plugins/inputs/github/github.go index a26923f3f305c..428740ed64b51 100644 --- a/plugins/inputs/github/github.go +++ b/plugins/inputs/github/github.go @@ -1,28 +1,36 @@ +//go:generate ../../../tools/readme_config_includer/generator package github import ( "context" + _ "embed" "fmt" "net/http" "strings" "sync" "time" - "github.com/google/go-github/v32/github" + githubLib "github.com/google/go-github/v32/github" + "golang.org/x/oauth2" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/selfstat" - "golang.org/x/oauth2" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // GitHub - plugin main structure type GitHub struct { - Repositories []string `toml:"repositories"` - AccessToken string `toml:"access_token"` - EnterpriseBaseURL string `toml:"enterprise_base_url"` - HTTPTimeout internal.Duration `toml:"http_timeout"` - githubClient *github.Client + Repositories []string `toml:"repositories"` + AccessToken string `toml:"access_token"` + AdditionalFields []string `toml:"additional_fields"` + EnterpriseBaseURL string `toml:"enterprise_base_url"` + HTTPTimeout config.Duration `toml:"http_timeout"` + githubClient *githubLib.Client obfuscatedToken string @@ -31,40 +39,13 @@ type GitHub struct { RateRemaining selfstat.Stat } -const sampleConfig = ` - ## List of repositories to monitor. - repositories = [ - "influxdata/telegraf", - "influxdata/influxdb" - ] - - ## Github API access token. Unauthenticated requests are limited to 60 per hour. - # access_token = "" - - ## Github API enterprise url. Github Enterprise accounts must specify their base url. - # enterprise_base_url = "" - - ## Timeout for HTTP requests. - # http_timeout = "5s" -` - -// SampleConfig returns sample configuration for this plugin. -func (g *GitHub) SampleConfig() string { - return sampleConfig -} - -// Description returns the plugin description. -func (g *GitHub) Description() string { - return "Gather repository information from GitHub hosted repositories." -} - // Create GitHub Client -func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error) { +func (g *GitHub) createGitHubClient(ctx context.Context) (*githubLib.Client, error) { httpClient := &http.Client{ Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, }, - Timeout: g.HTTPTimeout.Duration, + Timeout: time.Duration(g.HTTPTimeout), } g.obfuscatedToken = "Unauthenticated" @@ -84,11 +65,15 @@ func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error) return g.newGithubClient(httpClient) } -func (g *GitHub) newGithubClient(httpClient *http.Client) (*github.Client, error) { +func (g *GitHub) newGithubClient(httpClient *http.Client) (*githubLib.Client, error) { if g.EnterpriseBaseURL != "" { - return github.NewEnterpriseClient(g.EnterpriseBaseURL, "", httpClient) + return githubLib.NewEnterpriseClient(g.EnterpriseBaseURL, "", httpClient) } - return github.NewClient(httpClient), nil + return githubLib.NewClient(httpClient), nil +} + +func (*GitHub) SampleConfig() string { + return sampleConfig } // Gather GitHub Metrics @@ -97,7 +82,6 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error { if g.githubClient == nil { githubClient, err := g.createGitHubClient(ctx) - if err != nil { return err } @@ -127,23 +111,35 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error { } repositoryInfo, response, err := g.githubClient.Repositories.Get(ctx, owner, repository) - - if _, ok := err.(*github.RateLimitError); ok { - g.RateLimitErrors.Incr(1) - } - + g.handleRateLimit(response, err) if err != nil { acc.AddError(err) return } - g.RateLimit.Set(int64(response.Rate.Limit)) - g.RateRemaining.Set(int64(response.Rate.Remaining)) - now := time.Now() tags := getTags(repositoryInfo) fields := getFields(repositoryInfo) + for _, field := range g.AdditionalFields { + switch field { + case "pull-requests": + // Pull request properties + addFields, err := g.getPullRequestFields(ctx, owner, repository) + if err != nil { + acc.AddError(err) + continue + } + + for k, v := range addFields { + fields[k] = v + } + default: + acc.AddError(fmt.Errorf("unknown additional field %q", field)) + continue + } + } + acc.AddFields("github_repository", fields, tags, now) }(repository, acc) } @@ -152,7 +148,16 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error { return nil } -func splitRepositoryName(repositoryName string) (string, string, error) { +func (g *GitHub) handleRateLimit(response *githubLib.Response, err error) { + if err == nil { + g.RateLimit.Set(int64(response.Rate.Limit)) + g.RateRemaining.Set(int64(response.Rate.Remaining)) + } else if _, ok := err.(*githubLib.RateLimitError); ok { + g.RateLimitErrors.Incr(1) + } +} + +func splitRepositoryName(repositoryName string) (owner string, repository string, err error) { splits := strings.SplitN(repositoryName, "/", 2) if len(splits) != 2 { @@ -162,7 +167,7 @@ func splitRepositoryName(repositoryName string) (string, string, error) { return splits[0], splits[1], nil } -func getLicense(rI *github.Repository) string { +func getLicense(rI *githubLib.Repository) string { if licenseName := rI.GetLicense().GetName(); licenseName != "" { return licenseName } @@ -170,7 +175,7 @@ func getLicense(rI *github.Repository) string { return "None" } -func getTags(repositoryInfo *github.Repository) map[string]string { +func getTags(repositoryInfo *githubLib.Repository) map[string]string { return map[string]string{ "owner": repositoryInfo.GetOwner().GetLogin(), "name": repositoryInfo.GetName(), @@ -179,7 +184,7 @@ func getTags(repositoryInfo *github.Repository) map[string]string { } } -func getFields(repositoryInfo *github.Repository) map[string]interface{} { +func getFields(repositoryInfo *githubLib.Repository) map[string]interface{} { return map[string]interface{}{ "stars": repositoryInfo.GetStargazersCount(), "subscribers": repositoryInfo.GetSubscribersCount(), @@ -191,10 +196,36 @@ func getFields(repositoryInfo *github.Repository) map[string]interface{} { } } +func (g *GitHub) getPullRequestFields(ctx context.Context, owner, repo string) (map[string]interface{}, error) { + options := githubLib.SearchOptions{ + TextMatch: false, + ListOptions: githubLib.ListOptions{ + PerPage: 100, + Page: 1, + }, + } + + classes := []string{"open", "closed"} + fields := make(map[string]interface{}) + for _, class := range classes { + q := fmt.Sprintf("repo:%s/%s is:pr is:%s", owner, repo, class) + searchResult, response, err := g.githubClient.Search.Issues(ctx, q, &options) + g.handleRateLimit(response, err) + if err != nil { + return fields, err + } + + f := fmt.Sprintf("%s_pull_requests", class) + fields[f] = searchResult.GetTotal() + } + + return fields, nil +} + func init() { inputs.Add("github", func() telegraf.Input { return &GitHub{ - HTTPTimeout: internal.Duration{Duration: time.Second * 5}, + HTTPTimeout: config.Duration(time.Second * 5), } }) } diff --git a/plugins/inputs/github/sample.conf b/plugins/inputs/github/sample.conf new file mode 100644 index 0000000000000..5e19f1f325b3d --- /dev/null +++ b/plugins/inputs/github/sample.conf @@ -0,0 +1,24 @@ +# Gather repository information from GitHub hosted repositories. +[[inputs.github]] + ## List of repositories to monitor + repositories = [ + "influxdata/telegraf", + "influxdata/influxdb" + ] + + ## Github API access token. Unauthenticated requests are limited to 60 per hour. + # access_token = "" + + ## Github API enterprise url. Github Enterprise accounts must specify their base url. + # enterprise_base_url = "" + + ## Timeout for HTTP requests. + # http_timeout = "5s" + + ## List of additional fields to query. + ## NOTE: Getting those fields might involve issuing additional API-calls, so please + ## make sure you do not exceed the rate-limit of GitHub. + ## + ## Available fields are: + ## - pull-requests -- number of open and closed pull requests (2 API-calls per repository) + # additional_fields = [] diff --git a/plugins/inputs/gnmi/README.md b/plugins/inputs/gnmi/README.md index aa940f76d4e14..ae42ebffd2bef 100644 --- a/plugins/inputs/gnmi/README.md +++ b/plugins/inputs/gnmi/README.md @@ -1,14 +1,20 @@ # gNMI (gRPC Network Management Interface) Input Plugin -This plugin consumes telemetry data based on the [gNMI](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md) Subscribe method. TLS is supported for authentication and encryption. This input plugin is vendor-agnostic and is supported on any platform that supports the gNMI spec. +This plugin consumes telemetry data based on the [gNMI][1] Subscribe method. TLS +is supported for authentication and encryption. This input plugin is +vendor-agnostic and is supported on any platform that supports the gNMI spec. -For Cisco devices: -It has been optimized to support gNMI telemetry as produced by Cisco IOS XR (64-bit) version 6.5.1, Cisco NX-OS 9.3 and Cisco IOS XE 16.12 and later. +For Cisco devices: +It has been optimized to support gNMI telemetry as produced by Cisco IOS XR +(64-bit) version 6.5.1, Cisco NX-OS 9.3 and Cisco IOS XE 16.12 and later. -### Configuration +[1]: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md -```toml +## Configuration + +```toml @sample.conf +# gNMI telemetry input plugin [[inputs.gnmi]] ## Address and port of the gNMI GRPC server addresses = ["10.49.234.114:57777"] @@ -64,10 +70,23 @@ It has been optimized to support gNMI telemetry as produced by Cisco IOS XR (64- ## If suppression is enabled, send updates at least every X seconds anyway # heartbeat_interval = "60s" -``` -### Example Output + #[[inputs.gnmi.subscription]] + # name = "descr" + # origin = "openconfig-interfaces" + # path = "/interfaces/interface/state/description" + # subscription_mode = "on_change" + + ## If tag_only is set, the subscription in question will be utilized to maintain a map of + ## tags to apply to other measurements emitted by the plugin, by matching path keys + ## All fields from the tag-only subscription will be applied as tags to other readings, + ## in the format _. + # tag_only = true ``` -ifcounters,path=openconfig-interfaces:/interfaces/interface/state/counters,host=linux,name=MgmtEth0/RP0/CPU0/0,source=10.49.234.115 in-multicast-pkts=0i,out-multicast-pkts=0i,out-errors=0i,out-discards=0i,in-broadcast-pkts=0i,out-broadcast-pkts=0i,in-discards=0i,in-unknown-protos=0i,in-errors=0i,out-unicast-pkts=0i,in-octets=0i,out-octets=0i,last-clear="2019-05-22T16:53:21Z",in-unicast-pkts=0i 1559145777425000000 -ifcounters,path=openconfig-interfaces:/interfaces/interface/state/counters,host=linux,name=GigabitEthernet0/0/0/0,source=10.49.234.115 out-multicast-pkts=0i,out-broadcast-pkts=0i,in-errors=0i,out-errors=0i,in-discards=0i,out-octets=0i,in-unknown-protos=0i,in-unicast-pkts=0i,in-octets=0i,in-multicast-pkts=0i,in-broadcast-pkts=0i,last-clear="2019-05-22T16:54:50Z",out-unicast-pkts=0i,out-discards=0i 1559145777425000000 + +## Example Output + +```shell +ifcounters,path=openconfig-interfaces:/interfaces/interface/state/counters,host=linux,name=MgmtEth0/RP0/CPU0/0,source=10.49.234.115,descr/description=Foo in-multicast-pkts=0i,out-multicast-pkts=0i,out-errors=0i,out-discards=0i,in-broadcast-pkts=0i,out-broadcast-pkts=0i,in-discards=0i,in-unknown-protos=0i,in-errors=0i,out-unicast-pkts=0i,in-octets=0i,out-octets=0i,last-clear="2019-05-22T16:53:21Z",in-unicast-pkts=0i 1559145777425000000 +ifcounters,path=openconfig-interfaces:/interfaces/interface/state/counters,host=linux,name=GigabitEthernet0/0/0/0,source=10.49.234.115,descr/description=Bar out-multicast-pkts=0i,out-broadcast-pkts=0i,in-errors=0i,out-errors=0i,in-discards=0i,out-octets=0i,in-unknown-protos=0i,in-unicast-pkts=0i,in-octets=0i,in-multicast-pkts=0i,in-broadcast-pkts=0i,last-clear="2019-05-22T16:54:50Z",out-unicast-pkts=0i,out-discards=0i 1559145777425000000 ``` diff --git a/plugins/inputs/gnmi/gnmi.go b/plugins/inputs/gnmi/gnmi.go index 09332cc29f532..b7b8bc22b86e9 100644 --- a/plugins/inputs/gnmi/gnmi.go +++ b/plugins/inputs/gnmi/gnmi.go @@ -1,9 +1,11 @@ +//go:generate ../../../tools/readme_config_includer/generator package gnmi import ( "bytes" "context" "crypto/tls" + _ "embed" "encoding/json" "fmt" "io" @@ -14,18 +16,24 @@ import ( "sync" "time" + "github.com/google/gnxi/utils/xpath" + gnmiLib "github.com/openconfig/gnmi/proto/gnmi" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" internaltls "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" - "github.com/openconfig/gnmi/proto/gnmi" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/metadata" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // gNMI plugin instance type GNMI struct { Addresses []string `toml:"addresses"` @@ -44,17 +52,20 @@ type GNMI struct { Password string // Redial - Redial internal.Duration + Redial config.Duration // GRPC TLS settings EnableTLS bool `toml:"enable_tls"` internaltls.ClientConfig // Internal state - aliases map[string]string - acc telegraf.Accumulator - cancel context.CancelFunc - wg sync.WaitGroup + internalAliases map[string]string + acc telegraf.Accumulator + cancel context.CancelFunc + wg sync.WaitGroup + // Lookup/device+name/key/value + lookup map[string]map[string]map[string]interface{} + lookupMutex sync.Mutex Log telegraf.Logger } @@ -66,12 +77,19 @@ type Subscription struct { Path string // Subscription mode and interval - SubscriptionMode string `toml:"subscription_mode"` - SampleInterval internal.Duration `toml:"sample_interval"` + SubscriptionMode string `toml:"subscription_mode"` + SampleInterval config.Duration `toml:"sample_interval"` // Duplicate suppression - SuppressRedundant bool `toml:"suppress_redundant"` - HeartbeatInterval internal.Duration `toml:"heartbeat_interval"` + SuppressRedundant bool `toml:"suppress_redundant"` + HeartbeatInterval config.Duration `toml:"heartbeat_interval"` + + // Mark this subscription as a tag-only lookup source, not emitting any metric + TagOnly bool `toml:"tag_only"` +} + +func (*GNMI) SampleConfig() string { + return sampleConfig } // Start the http listener service @@ -79,14 +97,17 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { var err error var ctx context.Context var tlscfg *tls.Config - var request *gnmi.SubscribeRequest + var request *gnmiLib.SubscribeRequest c.acc = acc ctx, c.cancel = context.WithCancel(context.Background()) + c.lookupMutex.Lock() + c.lookup = make(map[string]map[string]map[string]interface{}) + c.lookupMutex.Unlock() // Validate configuration if request, err = c.newSubscribeRequest(); err != nil { return err - } else if c.Redial.Duration.Nanoseconds() <= 0 { + } else if time.Duration(c.Redial).Nanoseconds() <= 0 { return fmt.Errorf("redial duration must be positive") } @@ -102,9 +123,9 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { } // Invert explicit alias list and prefill subscription names - c.aliases = make(map[string]string, len(c.Subscriptions)+len(c.Aliases)) + c.internalAliases = make(map[string]string, len(c.Subscriptions)+len(c.Aliases)) for _, subscription := range c.Subscriptions { - var gnmiLongPath, gnmiShortPath *gnmi.Path + var gnmiLongPath, gnmiShortPath *gnmiLib.Path // Build the subscription path without keys if gnmiLongPath, err = parsePath(subscription.Origin, subscription.Path, ""); err != nil { @@ -114,8 +135,14 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { return err } - longPath, _ := c.handlePath(gnmiLongPath, nil, "") - shortPath, _ := c.handlePath(gnmiShortPath, nil, "") + longPath, _, err := c.handlePath(gnmiLongPath, nil, "") + if err != nil { + return fmt.Errorf("handling long-path failed: %v", err) + } + shortPath, _, err := c.handlePath(gnmiShortPath, nil, "") + if err != nil { + return fmt.Errorf("handling short-path failed: %v", err) + } name := subscription.Name // If the user didn't provide a measurement name, use last path element @@ -123,12 +150,19 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { name = path.Base(shortPath) } if len(name) > 0 { - c.aliases[longPath] = name - c.aliases[shortPath] = name + c.internalAliases[longPath] = name + c.internalAliases[shortPath] = name + } + + if subscription.TagOnly { + // Create the top-level lookup for this tag + c.lookupMutex.Lock() + c.lookup[name] = make(map[string]map[string]interface{}) + c.lookupMutex.Unlock() } } - for alias, path := range c.Aliases { - c.aliases[path] = alias + for alias, encodingPath := range c.Aliases { + c.internalAliases[encodingPath] = alias } // Create a goroutine for each device, dial and subscribe @@ -143,7 +177,7 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { select { case <-ctx.Done(): - case <-time.After(c.Redial.Duration): + case <-time.After(time.Duration(c.Redial)): } } }(addr) @@ -152,24 +186,24 @@ func (c *GNMI) Start(acc telegraf.Accumulator) error { } // Create a new gNMI SubscribeRequest -func (c *GNMI) newSubscribeRequest() (*gnmi.SubscribeRequest, error) { +func (c *GNMI) newSubscribeRequest() (*gnmiLib.SubscribeRequest, error) { // Create subscription objects - subscriptions := make([]*gnmi.Subscription, len(c.Subscriptions)) + subscriptions := make([]*gnmiLib.Subscription, len(c.Subscriptions)) for i, subscription := range c.Subscriptions { gnmiPath, err := parsePath(subscription.Origin, subscription.Path, "") if err != nil { return nil, err } - mode, ok := gnmi.SubscriptionMode_value[strings.ToUpper(subscription.SubscriptionMode)] + mode, ok := gnmiLib.SubscriptionMode_value[strings.ToUpper(subscription.SubscriptionMode)] if !ok { return nil, fmt.Errorf("invalid subscription mode %s", subscription.SubscriptionMode) } - subscriptions[i] = &gnmi.Subscription{ + subscriptions[i] = &gnmiLib.Subscription{ Path: gnmiPath, - Mode: gnmi.SubscriptionMode(mode), - SampleInterval: uint64(subscription.SampleInterval.Duration.Nanoseconds()), + Mode: gnmiLib.SubscriptionMode(mode), + SampleInterval: uint64(time.Duration(subscription.SampleInterval).Nanoseconds()), SuppressRedundant: subscription.SuppressRedundant, - HeartbeatInterval: uint64(subscription.HeartbeatInterval.Duration.Nanoseconds()), + HeartbeatInterval: uint64(time.Duration(subscription.HeartbeatInterval).Nanoseconds()), } } @@ -183,12 +217,12 @@ func (c *GNMI) newSubscribeRequest() (*gnmi.SubscribeRequest, error) { return nil, fmt.Errorf("unsupported encoding %s", c.Encoding) } - return &gnmi.SubscribeRequest{ - Request: &gnmi.SubscribeRequest_Subscribe{ - Subscribe: &gnmi.SubscriptionList{ + return &gnmiLib.SubscribeRequest{ + Request: &gnmiLib.SubscribeRequest_Subscribe{ + Subscribe: &gnmiLib.SubscriptionList{ Prefix: gnmiPath, - Mode: gnmi.SubscriptionList_STREAM, - Encoding: gnmi.Encoding(gnmi.Encoding_value[strings.ToUpper(c.Encoding)]), + Mode: gnmiLib.SubscriptionList_STREAM, + Encoding: gnmiLib.Encoding(gnmiLib.Encoding_value[strings.ToUpper(c.Encoding)]), Subscription: subscriptions, UpdatesOnly: c.UpdatesOnly, }, @@ -197,7 +231,7 @@ func (c *GNMI) newSubscribeRequest() (*gnmi.SubscribeRequest, error) { } // SubscribeGNMI and extract telemetry data -func (c *GNMI) subscribeGNMI(ctx context.Context, address string, tlscfg *tls.Config, request *gnmi.SubscribeRequest) error { +func (c *GNMI) subscribeGNMI(ctx context.Context, address string, tlscfg *tls.Config, request *gnmiLib.SubscribeRequest) error { var opt grpc.DialOption if tlscfg != nil { opt = grpc.WithTransportCredentials(credentials.NewTLS(tlscfg)) @@ -211,7 +245,7 @@ func (c *GNMI) subscribeGNMI(ctx context.Context, address string, tlscfg *tls.Co } defer client.Close() - subscribeClient, err := gnmi.NewGNMIClient(client).Subscribe(ctx) + subscribeClient, err := gnmiLib.NewGNMIClient(client).Subscribe(ctx) if err != nil { return fmt.Errorf("failed to setup subscription: %v", err) } @@ -227,7 +261,7 @@ func (c *GNMI) subscribeGNMI(ctx context.Context, address string, tlscfg *tls.Co c.Log.Debugf("Connection to gNMI device %s established", address) defer c.Log.Debugf("Connection to gNMI device %s closed", address) for ctx.Err() == nil { - var reply *gnmi.SubscribeResponse + var reply *gnmiLib.SubscribeResponse if reply, err = subscribeClient.Recv(); err != nil { if err != io.EOF && ctx.Err() == nil { return fmt.Errorf("aborted gNMI subscription: %v", err) @@ -240,21 +274,27 @@ func (c *GNMI) subscribeGNMI(ctx context.Context, address string, tlscfg *tls.Co return nil } -// HandleSubscribeResponse message from gNMI and parse contained telemetry data -func (c *GNMI) handleSubscribeResponse(address string, reply *gnmi.SubscribeResponse) { - // Check if response is a gNMI Update and if we have a prefix to derive the measurement name - response, ok := reply.Response.(*gnmi.SubscribeResponse_Update) - if !ok { - return +func (c *GNMI) handleSubscribeResponse(address string, reply *gnmiLib.SubscribeResponse) { + switch response := reply.Response.(type) { + case *gnmiLib.SubscribeResponse_Update: + c.handleSubscribeResponseUpdate(address, response) + case *gnmiLib.SubscribeResponse_Error: + c.Log.Errorf("Subscribe error (%d), %q", response.Error.Code, response.Error.Message) } +} +// Handle SubscribeResponse_Update message from gNMI and parse contained telemetry data +func (c *GNMI) handleSubscribeResponseUpdate(address string, response *gnmiLib.SubscribeResponse_Update) { var prefix, prefixAliasPath string grouper := metric.NewSeriesGrouper() timestamp := time.Unix(0, response.Update.Timestamp) prefixTags := make(map[string]string) if response.Update.Prefix != nil { - prefix, prefixAliasPath = c.handlePath(response.Update.Prefix, prefixTags, "") + var err error + if prefix, prefixAliasPath, err = c.handlePath(response.Update.Prefix, prefixTags, ""); err != nil { + c.Log.Errorf("handling path %q failed: %v", response.Update.Prefix, err) + } } prefixTags["source"], _, _ = net.SplitHostPort(address) prefixTags["path"] = prefix @@ -277,21 +317,47 @@ func (c *GNMI) handleSubscribeResponse(address string, reply *gnmi.SubscribeResp // Lookup alias if alias-path has changed if aliasPath != lastAliasPath { name = prefix - if alias, ok := c.aliases[aliasPath]; ok { + if alias, ok := c.internalAliases[aliasPath]; ok { name = alias } else { c.Log.Debugf("No measurement alias for gNMI path: %s", name) } } + // Update tag lookups and discard rest of update + subscriptionKey := tags["source"] + "/" + tags["name"] + c.lookupMutex.Lock() + if _, ok := c.lookup[name]; ok { + // We are subscribed to this, so add the fields to the lookup-table + if _, ok := c.lookup[name][subscriptionKey]; !ok { + c.lookup[name][subscriptionKey] = make(map[string]interface{}) + } + for k, v := range fields { + c.lookup[name][subscriptionKey][path.Base(k)] = v + } + c.lookupMutex.Unlock() + // Do not process the data further as we only subscribed here for the lookup table + continue + } + + // Apply lookups if present + for subscriptionName, values := range c.lookup { + if annotations, ok := values[subscriptionKey]; ok { + for k, v := range annotations { + tags[subscriptionName+"/"+k] = fmt.Sprint(v) + } + } + } + c.lookupMutex.Unlock() + // Group metrics for k, v := range fields { key := k - if len(aliasPath) < len(key) { + if len(aliasPath) < len(key) && len(aliasPath) != 0 { // This may not be an exact prefix, due to naming style // conversion on the key. key = key[len(aliasPath)+1:] - } else { + } else if len(aliasPath) >= len(key) { // Otherwise use the last path element as the field key. key = path.Base(key) @@ -304,55 +370,60 @@ func (c *GNMI) handleSubscribeResponse(address string, reply *gnmi.SubscribeResp } } - grouper.Add(name, tags, timestamp, key, v) + if err := grouper.Add(name, tags, timestamp, key, v); err != nil { + c.Log.Errorf("cannot add to grouper: %v", err) + } } lastAliasPath = aliasPath } // Add grouped measurements - for _, metric := range grouper.Metrics() { - c.acc.AddMetric(metric) + for _, metricToAdd := range grouper.Metrics() { + c.acc.AddMetric(metricToAdd) } } // HandleTelemetryField and add it to a measurement -func (c *GNMI) handleTelemetryField(update *gnmi.Update, tags map[string]string, prefix string) (string, map[string]interface{}) { - path, aliasPath := c.handlePath(update.Path, tags, prefix) +func (c *GNMI) handleTelemetryField(update *gnmiLib.Update, tags map[string]string, prefix string) (string, map[string]interface{}) { + gpath, aliasPath, err := c.handlePath(update.Path, tags, prefix) + if err != nil { + c.Log.Errorf("handling path %q failed: %v", update.Path, err) + } var value interface{} var jsondata []byte // Make sure a value is actually set if update.Val == nil || update.Val.Value == nil { - c.Log.Infof("Discarded empty or legacy type value with path: %q", path) + c.Log.Infof("Discarded empty or legacy type value with path: %q", gpath) return aliasPath, nil } switch val := update.Val.Value.(type) { - case *gnmi.TypedValue_AsciiVal: + case *gnmiLib.TypedValue_AsciiVal: value = val.AsciiVal - case *gnmi.TypedValue_BoolVal: + case *gnmiLib.TypedValue_BoolVal: value = val.BoolVal - case *gnmi.TypedValue_BytesVal: + case *gnmiLib.TypedValue_BytesVal: value = val.BytesVal - case *gnmi.TypedValue_DecimalVal: + case *gnmiLib.TypedValue_DecimalVal: value = float64(val.DecimalVal.Digits) / math.Pow(10, float64(val.DecimalVal.Precision)) - case *gnmi.TypedValue_FloatVal: + case *gnmiLib.TypedValue_FloatVal: value = val.FloatVal - case *gnmi.TypedValue_IntVal: + case *gnmiLib.TypedValue_IntVal: value = val.IntVal - case *gnmi.TypedValue_StringVal: + case *gnmiLib.TypedValue_StringVal: value = val.StringVal - case *gnmi.TypedValue_UintVal: + case *gnmiLib.TypedValue_UintVal: value = val.UintVal - case *gnmi.TypedValue_JsonIetfVal: + case *gnmiLib.TypedValue_JsonIetfVal: jsondata = val.JsonIetfVal - case *gnmi.TypedValue_JsonVal: + case *gnmiLib.TypedValue_JsonVal: jsondata = val.JsonVal } - name := strings.Replace(path, "-", "_", -1) + name := strings.ReplaceAll(gpath, "-", "_") fields := make(map[string]interface{}) if value != nil { fields[name] = value @@ -361,38 +432,47 @@ func (c *GNMI) handleTelemetryField(update *gnmi.Update, tags map[string]string, c.acc.AddError(fmt.Errorf("failed to parse JSON value: %v", err)) } else { flattener := jsonparser.JSONFlattener{Fields: fields} - flattener.FullFlattenJSON(name, value, true, true) + if err := flattener.FullFlattenJSON(name, value, true, true); err != nil { + c.acc.AddError(fmt.Errorf("failed to flatten JSON: %v", err)) + } } } return aliasPath, fields } // Parse path to path-buffer and tag-field -func (c *GNMI) handlePath(path *gnmi.Path, tags map[string]string, prefix string) (string, string) { - var aliasPath string +func (c *GNMI) handlePath(gnmiPath *gnmiLib.Path, tags map[string]string, prefix string) (pathBuffer string, aliasPath string, err error) { builder := bytes.NewBufferString(prefix) // Prefix with origin - if len(path.Origin) > 0 { - builder.WriteString(path.Origin) - builder.WriteRune(':') + if len(gnmiPath.Origin) > 0 { + if _, err := builder.WriteString(gnmiPath.Origin); err != nil { + return "", "", err + } + if _, err := builder.WriteRune(':'); err != nil { + return "", "", err + } } // Parse generic keys from prefix - for _, elem := range path.Elem { + for _, elem := range gnmiPath.Elem { if len(elem.Name) > 0 { - builder.WriteRune('/') - builder.WriteString(elem.Name) + if _, err := builder.WriteRune('/'); err != nil { + return "", "", err + } + if _, err := builder.WriteString(elem.Name); err != nil { + return "", "", err + } } name := builder.String() - if _, exists := c.aliases[name]; exists { + if _, exists := c.internalAliases[name]; exists { aliasPath = name } if tags != nil { for key, val := range elem.Key { - key = strings.Replace(key, "-", "_", -1) + key = strings.ReplaceAll(key, "-", "_") // Use short-form of key if possible if _, exists := tags[key]; exists { @@ -400,76 +480,22 @@ func (c *GNMI) handlePath(path *gnmi.Path, tags map[string]string, prefix string } else { tags[key] = val } - } } } - return builder.String(), aliasPath + return builder.String(), aliasPath, nil } //ParsePath from XPath-like string to gNMI path structure -func parsePath(origin string, path string, target string) (*gnmi.Path, error) { - var err error - gnmiPath := gnmi.Path{Origin: origin, Target: target} - - if len(path) > 0 && path[0] != '/' { - return nil, fmt.Errorf("path does not start with a '/': %s", path) - } - - elem := &gnmi.PathElem{} - start, name, value, end := 0, -1, -1, -1 - - path = path + "/" - - for i := 0; i < len(path); i++ { - if path[i] == '[' { - if name >= 0 { - break - } - if end < 0 { - end = i - elem.Key = make(map[string]string) - } - name = i + 1 - } else if path[i] == '=' { - if name <= 0 || value >= 0 { - break - } - value = i + 1 - } else if path[i] == ']' { - if name <= 0 || value <= name { - break - } - elem.Key[path[name:value-1]] = strings.Trim(path[value:i], "'\"") - name, value = -1, -1 - } else if path[i] == '/' { - if name < 0 { - if end < 0 { - end = i - } - - if end > start { - elem.Name = path[start:end] - gnmiPath.Elem = append(gnmiPath.Elem, elem) - gnmiPath.Element = append(gnmiPath.Element, path[start:i]) - } - - start, name, value, end = i+1, -1, -1, -1 - elem = &gnmi.PathElem{} - } - } - } - - if name >= 0 || value >= 0 { - err = fmt.Errorf("Invalid gNMI path: %s", path) - } - +func parsePath(origin string, pathToParse string, target string) (*gnmiLib.Path, error) { + gnmiPath, err := xpath.ToGNMIPath(pathToParse) if err != nil { return nil, err } - - return &gnmiPath, nil + gnmiPath.Origin = origin + gnmiPath.Target = target + return gnmiPath, err } // Stop listener and cleanup @@ -478,73 +504,6 @@ func (c *GNMI) Stop() { c.wg.Wait() } -const sampleConfig = ` - ## Address and port of the gNMI GRPC server - addresses = ["10.49.234.114:57777"] - - ## define credentials - username = "cisco" - password = "cisco" - - ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") - # encoding = "proto" - - ## redial in case of failures after - redial = "10s" - - ## enable client-side TLS and define CA to authenticate the device - # enable_tls = true - # tls_ca = "/etc/telegraf/ca.pem" - # insecure_skip_verify = true - - ## define client-side TLS certificate & key to authenticate to the device - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - - ## gNMI subscription prefix (optional, can usually be left empty) - ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths - # origin = "" - # prefix = "" - # target = "" - - ## Define additional aliases to map telemetry encoding paths to simple measurement names - #[inputs.gnmi.aliases] - # ifcounters = "openconfig:/interfaces/interface/state/counters" - - [[inputs.gnmi.subscription]] - ## Name of the measurement that will be emitted - name = "ifcounters" - - ## Origin and path of the subscription - ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths - ## - ## origin usually refers to a (YANG) data model implemented by the device - ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) - ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr - origin = "openconfig-interfaces" - path = "/interfaces/interface/state/counters" - - # Subscription mode (one of: "target_defined", "sample", "on_change") and interval - subscription_mode = "sample" - sample_interval = "10s" - - ## Suppress redundant transmissions when measured values are unchanged - # suppress_redundant = false - - ## If suppression is enabled, send updates at least every X seconds anyway - # heartbeat_interval = "60s" -` - -// SampleConfig of plugin -func (c *GNMI) SampleConfig() string { - return sampleConfig -} - -// Description of plugin -func (c *GNMI) Description() string { - return "gNMI telemetry input plugin" -} - // Gather plugin measurements (unused) func (c *GNMI) Gather(_ telegraf.Accumulator) error { return nil @@ -553,7 +512,7 @@ func (c *GNMI) Gather(_ telegraf.Accumulator) error { func New() telegraf.Input { return &GNMI{ Encoding: "proto", - Redial: internal.Duration{Duration: 10 * time.Second}, + Redial: config.Duration(10 * time.Second), } } diff --git a/plugins/inputs/gnmi/gnmi_test.go b/plugins/inputs/gnmi/gnmi_test.go index c74fbcd4a5164..e2078a676f8ef 100644 --- a/plugins/inputs/gnmi/gnmi_test.go +++ b/plugins/inputs/gnmi/gnmi_test.go @@ -9,54 +9,53 @@ import ( "testing" "time" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/testutil" - "github.com/openconfig/gnmi/proto/gnmi" - "github.com/stretchr/testify/assert" + gnmiLib "github.com/openconfig/gnmi/proto/gnmi" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/metadata" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" ) func TestParsePath(t *testing.T) { path := "/foo/bar/bla[shoo=woo][shoop=/woop/]/z" parsed, err := parsePath("theorigin", path, "thetarget") - assert.NoError(t, err) - assert.Equal(t, parsed.Origin, "theorigin") - assert.Equal(t, parsed.Target, "thetarget") - assert.Equal(t, parsed.Element, []string{"foo", "bar", "bla[shoo=woo][shoop=/woop/]", "z"}) - assert.Equal(t, parsed.Elem, []*gnmi.PathElem{{Name: "foo"}, {Name: "bar"}, - {Name: "bla", Key: map[string]string{"shoo": "woo", "shoop": "/woop/"}}, {Name: "z"}}) + require.NoError(t, err) + require.Equal(t, "theorigin", parsed.Origin) + require.Equal(t, "thetarget", parsed.Target) + require.Equal(t, []*gnmiLib.PathElem{{Name: "foo"}, {Name: "bar"}, + {Name: "bla", Key: map[string]string{"shoo": "woo", "shoop": "/woop/"}}, {Name: "z"}}, parsed.Elem) parsed, err = parsePath("", "", "") - assert.NoError(t, err) - assert.Equal(t, *parsed, gnmi.Path{}) + require.NoError(t, err) + require.Equal(t, &gnmiLib.Path{}, parsed) parsed, err = parsePath("", "/foo[[", "") - assert.Nil(t, parsed) - assert.Equal(t, errors.New("Invalid gNMI path: /foo[[/"), err) + require.Nil(t, parsed) + require.NotNil(t, err) } type MockServer struct { - SubscribeF func(gnmi.GNMI_SubscribeServer) error + SubscribeF func(gnmiLib.GNMI_SubscribeServer) error GRPCServer *grpc.Server } -func (s *MockServer) Capabilities(context.Context, *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error) { +func (s *MockServer) Capabilities(context.Context, *gnmiLib.CapabilityRequest) (*gnmiLib.CapabilityResponse, error) { return nil, nil } -func (s *MockServer) Get(context.Context, *gnmi.GetRequest) (*gnmi.GetResponse, error) { +func (s *MockServer) Get(context.Context, *gnmiLib.GetRequest) (*gnmiLib.GetResponse, error) { return nil, nil } -func (s *MockServer) Set(context.Context, *gnmi.SetRequest) (*gnmi.SetResponse, error) { +func (s *MockServer) Set(context.Context, *gnmiLib.SetRequest) (*gnmiLib.SetResponse, error) { return nil, nil } -func (s *MockServer) Subscribe(server gnmi.GNMI_SubscribeServer) error { +func (s *MockServer) Subscribe(server gnmiLib.GNMI_SubscribeServer) error { return s.SubscribeF(server) } @@ -66,18 +65,18 @@ func TestWaitError(t *testing.T) { grpcServer := grpc.NewServer() gnmiServer := &MockServer{ - SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { return fmt.Errorf("testerror") }, GRPCServer: grpcServer, } - gnmi.RegisterGNMIServer(grpcServer, gnmiServer) + gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer) plugin := &GNMI{ Log: testutil.Logger{}, Addresses: []string{listener.Addr().String()}, Encoding: "proto", - Redial: internal.Duration{Duration: 1 * time.Second}, + Redial: config.Duration(1 * time.Second), } var acc testutil.Accumulator @@ -107,7 +106,7 @@ func TestUsernamePassword(t *testing.T) { grpcServer := grpc.NewServer() gnmiServer := &MockServer{ - SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { metadata, ok := metadata.FromIncomingContext(server.Context()) if !ok { return errors.New("failed to get metadata") @@ -127,7 +126,7 @@ func TestUsernamePassword(t *testing.T) { }, GRPCServer: grpcServer, } - gnmi.RegisterGNMIServer(grpcServer, gnmiServer) + gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer) plugin := &GNMI{ Log: testutil.Logger{}, @@ -135,7 +134,7 @@ func TestUsernamePassword(t *testing.T) { Username: "theusername", Password: "thepassword", Encoding: "proto", - Redial: internal.Duration{Duration: 1 * time.Second}, + Redial: config.Duration(1 * time.Second), } var acc testutil.Accumulator @@ -159,12 +158,12 @@ func TestUsernamePassword(t *testing.T) { errors.New("aborted gNMI subscription: rpc error: code = Unknown desc = success")) } -func mockGNMINotification() *gnmi.Notification { - return &gnmi.Notification{ +func mockGNMINotification() *gnmiLib.Notification { + return &gnmiLib.Notification{ Timestamp: 1543236572000000000, - Prefix: &gnmi.Path{ + Prefix: &gnmiLib.Path{ Origin: "type", - Elem: []*gnmi.PathElem{ + Elem: []*gnmiLib.PathElem{ { Name: "model", Key: map[string]string{"foo": "bar"}, @@ -172,35 +171,35 @@ func mockGNMINotification() *gnmi.Notification { }, Target: "subscription", }, - Update: []*gnmi.Update{ + Update: []*gnmiLib.Update{ { - Path: &gnmi.Path{ - Elem: []*gnmi.PathElem{ + Path: &gnmiLib.Path{ + Elem: []*gnmiLib.PathElem{ {Name: "some"}, { Name: "path", Key: map[string]string{"name": "str", "uint64": "1234"}}, }, }, - Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_IntVal{IntVal: 5678}}, + Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_IntVal{IntVal: 5678}}, }, { - Path: &gnmi.Path{ - Elem: []*gnmi.PathElem{ + Path: &gnmiLib.Path{ + Elem: []*gnmiLib.PathElem{ {Name: "other"}, {Name: "path"}, }, }, - Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_StringVal{StringVal: "foobar"}}, + Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_StringVal{StringVal: "foobar"}}, }, { - Path: &gnmi.Path{ - Elem: []*gnmi.PathElem{ + Path: &gnmiLib.Path{ + Elem: []*gnmiLib.PathElem{ {Name: "other"}, {Name: "this"}, }, }, - Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_StringVal{StringVal: "that"}}, + Val: &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_StringVal{StringVal: "that"}}, }, }, } @@ -218,7 +217,7 @@ func TestNotification(t *testing.T) { plugin: &GNMI{ Log: testutil.Logger{}, Encoding: "proto", - Redial: internal.Duration{Duration: 1 * time.Second}, + Redial: config.Duration(1 * time.Second), Subscriptions: []Subscription{ { Name: "alias", @@ -229,15 +228,20 @@ func TestNotification(t *testing.T) { }, }, server: &MockServer{ - SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { notification := mockGNMINotification() - server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) - server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}}) + err := server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_Update{Update: notification}}) + if err != nil { + return err + } + err = server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_SyncResponse{SyncResponse: true}}) + if err != nil { + return err + } notification.Prefix.Elem[0].Key["foo"] = "bar2" notification.Update[0].Path.Elem[1].Key["name"] = "str2" - notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_JsonVal{JsonVal: []byte{'"', '1', '2', '3', '"'}}} - server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) - return nil + notification.Update[0].Val = &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_JsonVal{JsonVal: []byte{'"', '1', '2', '3', '"'}}} + return server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_Update{Update: notification}}) }, }, expected: []telegraf.Metric{ @@ -302,7 +306,7 @@ func TestNotification(t *testing.T) { plugin: &GNMI{ Log: testutil.Logger{}, Encoding: "proto", - Redial: internal.Duration{Duration: 1 * time.Second}, + Redial: config.Duration(1 * time.Second), Subscriptions: []Subscription{ { Name: "PHY_COUNTERS", @@ -313,14 +317,14 @@ func TestNotification(t *testing.T) { }, }, server: &MockServer{ - SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { - response := &gnmi.SubscribeResponse{ - Response: &gnmi.SubscribeResponse_Update{ - Update: &gnmi.Notification{ + SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { + response := &gnmiLib.SubscribeResponse{ + Response: &gnmiLib.SubscribeResponse_Update{ + Update: &gnmiLib.Notification{ Timestamp: 1543236572000000000, - Prefix: &gnmi.Path{ + Prefix: &gnmiLib.Path{ Origin: "type", - Elem: []*gnmi.PathElem{ + Elem: []*gnmiLib.PathElem{ { Name: "state", }, @@ -337,19 +341,18 @@ func TestNotification(t *testing.T) { }, Target: "subscription", }, - Update: []*gnmi.Update{ + Update: []*gnmiLib.Update{ { - Path: &gnmi.Path{}, - Val: &gnmi.TypedValue{ - Value: &gnmi.TypedValue_IntVal{IntVal: 42}, + Path: &gnmiLib.Path{}, + Val: &gnmiLib.TypedValue{ + Value: &gnmiLib.TypedValue_IntVal{IntVal: 42}, }, }, }, }, }, } - server.Send(response) - return nil + return server.Send(response) }, }, expected: []telegraf.Metric{ @@ -367,6 +370,126 @@ func TestNotification(t *testing.T) { ), }, }, + { + name: "tagged update pair", + plugin: &GNMI{ + Log: testutil.Logger{}, + Encoding: "proto", + Redial: config.Duration(1 * time.Second), + Subscriptions: []Subscription{ + { + Name: "oc-intf-desc", + Origin: "openconfig-interfaces", + Path: "/interfaces/interface/state/description", + SubscriptionMode: "on_change", + TagOnly: true, + }, + { + Name: "oc-intf-counters", + Origin: "openconfig-interfaces", + Path: "/interfaces/interface/state/counters", + SubscriptionMode: "sample", + }, + }, + }, + server: &MockServer{ + SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { + tagResponse := &gnmiLib.SubscribeResponse{ + Response: &gnmiLib.SubscribeResponse_Update{ + Update: &gnmiLib.Notification{ + Timestamp: 1543236571000000000, + Prefix: &gnmiLib.Path{}, + Update: []*gnmiLib.Update{ + { + Path: &gnmiLib.Path{ + Origin: "", + Elem: []*gnmiLib.PathElem{ + { + Name: "interfaces", + }, + { + Name: "interface", + Key: map[string]string{"name": "Ethernet1"}, + }, + { + Name: "state", + }, + { + Name: "description", + }, + }, + Target: "", + }, + Val: &gnmiLib.TypedValue{ + Value: &gnmiLib.TypedValue_StringVal{StringVal: "foo"}, + }, + }, + }, + }, + }, + } + if err := server.Send(tagResponse); err != nil { + return err + } + if err := server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_SyncResponse{SyncResponse: true}}); err != nil { + return err + } + taggedResponse := &gnmiLib.SubscribeResponse{ + Response: &gnmiLib.SubscribeResponse_Update{ + Update: &gnmiLib.Notification{ + Timestamp: 1543236572000000000, + Prefix: &gnmiLib.Path{}, + Update: []*gnmiLib.Update{ + { + Path: &gnmiLib.Path{ + Origin: "", + Elem: []*gnmiLib.PathElem{ + { + Name: "interfaces", + }, + { + Name: "interface", + Key: map[string]string{"name": "Ethernet1"}, + }, + { + Name: "state", + }, + { + Name: "counters", + }, + { + Name: "in-broadcast-pkts", + }, + }, + Target: "", + }, + Val: &gnmiLib.TypedValue{ + Value: &gnmiLib.TypedValue_IntVal{IntVal: 42}, + }, + }, + }, + }, + }, + } + return server.Send(taggedResponse) + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "oc-intf-counters", + map[string]string{ + "path": "", + "source": "127.0.0.1", + "name": "Ethernet1", + "oc-intf-desc/description": "foo", + }, + map[string]interface{}{ + "in_broadcast_pkts": 42, + }, + time.Unix(0, 0), + ), + }, + }, } for _, tt := range tests { @@ -378,7 +501,7 @@ func TestNotification(t *testing.T) { grpcServer := grpc.NewServer() tt.server.GRPCServer = grpcServer - gnmi.RegisterGNMIServer(grpcServer, tt.server) + gnmiLib.RegisterGNMIServer(grpcServer, tt.server) var acc testutil.Accumulator err = tt.plugin.Start(&acc) @@ -403,6 +526,29 @@ func TestNotification(t *testing.T) { } } +type MockLogger struct { + telegraf.Logger + lastFormat string + lastArgs []interface{} +} + +func (l *MockLogger) Errorf(format string, args ...interface{}) { + l.lastFormat = format + l.lastArgs = args +} + +func TestSubscribeResponseError(t *testing.T) { + me := "mock error message" + var mc uint32 = 7 + ml := &MockLogger{} + plugin := &GNMI{Log: ml} + // TODO: FIX SA1019: gnmi.Error is deprecated: Do not use. + errorResponse := &gnmiLib.SubscribeResponse_Error{Error: &gnmiLib.Error{Message: me, Code: mc}} + plugin.handleSubscribeResponse("127.0.0.1:0", &gnmiLib.SubscribeResponse{Response: errorResponse}) + require.NotEmpty(t, ml.lastFormat) + require.Equal(t, []interface{}{mc, me}, ml.lastArgs) +} + func TestRedial(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) @@ -411,19 +557,18 @@ func TestRedial(t *testing.T) { Log: testutil.Logger{}, Addresses: []string{listener.Addr().String()}, Encoding: "proto", - Redial: internal.Duration{Duration: 10 * time.Millisecond}, + Redial: config.Duration(10 * time.Millisecond), } grpcServer := grpc.NewServer() gnmiServer := &MockServer{ - SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { notification := mockGNMINotification() - server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) - return nil + return server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_Update{Update: notification}}) }, GRPCServer: grpcServer, } - gnmi.RegisterGNMIServer(grpcServer, gnmiServer) + gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer) var wg sync.WaitGroup wg.Add(1) @@ -447,17 +592,16 @@ func TestRedial(t *testing.T) { grpcServer = grpc.NewServer() gnmiServer = &MockServer{ - SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + SubscribeF: func(server gnmiLib.GNMI_SubscribeServer) error { notification := mockGNMINotification() notification.Prefix.Elem[0].Key["foo"] = "bar2" notification.Update[0].Path.Elem[1].Key["name"] = "str2" - notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_BoolVal{BoolVal: false}} - server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) - return nil + notification.Update[0].Val = &gnmiLib.TypedValue{Value: &gnmiLib.TypedValue_BoolVal{BoolVal: false}} + return server.Send(&gnmiLib.SubscribeResponse{Response: &gnmiLib.SubscribeResponse_Update{Update: notification}}) }, GRPCServer: grpcServer, } - gnmi.RegisterGNMIServer(grpcServer, gnmiServer) + gnmiLib.RegisterGNMIServer(grpcServer, gnmiServer) wg.Add(1) go func() { diff --git a/plugins/inputs/gnmi/sample.conf b/plugins/inputs/gnmi/sample.conf new file mode 100644 index 0000000000000..b10f7e984e620 --- /dev/null +++ b/plugins/inputs/gnmi/sample.conf @@ -0,0 +1,68 @@ +# gNMI telemetry input plugin +[[inputs.gnmi]] + ## Address and port of the gNMI GRPC server + addresses = ["10.49.234.114:57777"] + + ## define credentials + username = "cisco" + password = "cisco" + + ## gNMI encoding requested (one of: "proto", "json", "json_ietf", "bytes") + # encoding = "proto" + + ## redial in case of failures after + redial = "10s" + + ## enable client-side TLS and define CA to authenticate the device + # enable_tls = true + # tls_ca = "/etc/telegraf/ca.pem" + # insecure_skip_verify = true + + ## define client-side TLS certificate & key to authenticate to the device + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## gNMI subscription prefix (optional, can usually be left empty) + ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths + # origin = "" + # prefix = "" + # target = "" + + ## Define additional aliases to map telemetry encoding paths to simple measurement names + # [inputs.gnmi.aliases] + # ifcounters = "openconfig:/interfaces/interface/state/counters" + + [[inputs.gnmi.subscription]] + ## Name of the measurement that will be emitted + name = "ifcounters" + + ## Origin and path of the subscription + ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths + ## + ## origin usually refers to a (YANG) data model implemented by the device + ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) + ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr + origin = "openconfig-interfaces" + path = "/interfaces/interface/state/counters" + + # Subscription mode (one of: "target_defined", "sample", "on_change") and interval + subscription_mode = "sample" + sample_interval = "10s" + + ## Suppress redundant transmissions when measured values are unchanged + # suppress_redundant = false + + ## If suppression is enabled, send updates at least every X seconds anyway + # heartbeat_interval = "60s" + + #[[inputs.gnmi.subscription]] + # name = "descr" + # origin = "openconfig-interfaces" + # path = "/interfaces/interface/state/description" + # subscription_mode = "on_change" + + ## If tag_only is set, the subscription in question will be utilized to maintain a map of + ## tags to apply to other measurements emitted by the plugin, by matching path keys + ## All fields from the tag-only subscription will be applied as tags to other readings, + ## in the format _. + # tag_only = true diff --git a/plugins/inputs/graylog/README.md b/plugins/inputs/graylog/README.md index 6a835f1d60a4f..a50e8615fc3c6 100644 --- a/plugins/inputs/graylog/README.md +++ b/plugins/inputs/graylog/README.md @@ -4,37 +4,41 @@ The Graylog plugin can collect data from remote Graylog service URLs. Plugin currently support two type of end points:- -- multiple (Ex http://[graylog-server-ip]:12900/system/metrics/multiple) -- namespace (Ex http://[graylog-server-ip]:12900/system/metrics/namespace/{namespace}) +- multiple (e.g. `http://[graylog-server-ip]:9000/api/system/metrics/multiple`) +- namespace (e.g. `http://[graylog-server-ip]:9000/api/system/metrics/namespace/{namespace}`) -End Point can be a mix of one multiple end point and several namespaces end points +End Point can be a mix of one multiple end point and several namespaces end +points +Note: if namespace end point specified metrics array will be ignored for that +call. -Note: if namespace end point specified metrics array will be ignored for that call. +## Configuration -### Configuration: - -```toml +```toml @sample.conf # Read flattened metrics from one or more GrayLog HTTP endpoints [[inputs.graylog]] ## API endpoint, currently supported API: ## - ## - multiple (Ex http://:12900/system/metrics/multiple) - ## - namespace (Ex http://:12900/system/metrics/namespace/{namespace}) + ## - multiple (e.g. http://:9000/api/system/metrics/multiple) + ## - namespace (e.g. http://:9000/api/system/metrics/namespace/{namespace}) ## ## For namespace endpoint, the metrics array will be ignored for that call. ## Endpoint can contain namespace and multiple type calls. ## - ## Please check http://[graylog-server-ip]:12900/api-browser for full list + ## Please check http://[graylog-server-ip]:9000/api/api-browser for full list ## of endpoints servers = [ - "http://[graylog-server-ip]:12900/system/metrics/multiple", + "http://[graylog-server-ip]:9000/api/system/metrics/multiple", ] + ## Set timeout (default 5 seconds) + # timeout = "5s" + ## Metrics list ## List of metrics can be found on Graylog webservice documentation. ## Or by hitting the web service api at: - ## http://[graylog-host]:12900/system/metrics + ## http://[graylog-host]:9000/api/system/metrics metrics = [ "jvm.cl.loaded", "jvm.memory.pools.Metaspace.committed" @@ -52,4 +56,5 @@ Note: if namespace end point specified metrics array will be ignored for that ca # insecure_skip_verify = false ``` -Please refer to GrayLog metrics api browser for full metric end points http://host:12900/api-browser +Please refer to GrayLog metrics api browser for full metric end points: +`http://host:9000/api/api-browser` diff --git a/plugins/inputs/graylog/graylog.go b/plugins/inputs/graylog/graylog.go index be5f8fc60aaa4..dfe29bd38e7d8 100644 --- a/plugins/inputs/graylog/graylog.go +++ b/plugins/inputs/graylog/graylog.go @@ -1,11 +1,13 @@ +//go:generate ../../../tools/readme_config_includer/generator package graylog import ( "bytes" + _ "embed" "encoding/base64" "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -14,12 +16,16 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type ResponseMetrics struct { - total int Metrics []Metric `json:"metrics"` } @@ -31,12 +37,13 @@ type Metric struct { } type GrayLog struct { - Servers []string - Metrics []string - Username string - Password string - tls.ClientConfig + Servers []string `toml:"servers"` + Metrics []string `toml:"metrics"` + Username string `toml:"username"` + Password string `toml:"password"` + Timeout config.Duration `toml:"timeout"` + tls.ClientConfig client HTTPClient } @@ -75,50 +82,10 @@ func (c *RealHTTPClient) HTTPClient() *http.Client { return c.client } -var sampleConfig = ` - ## API endpoint, currently supported API: - ## - ## - multiple (Ex http://:12900/system/metrics/multiple) - ## - namespace (Ex http://:12900/system/metrics/namespace/{namespace}) - ## - ## For namespace endpoint, the metrics array will be ignored for that call. - ## Endpoint can contain namespace and multiple type calls. - ## - ## Please check http://[graylog-server-ip]:12900/api-browser for full list - ## of endpoints - servers = [ - "http://[graylog-server-ip]:12900/system/metrics/multiple", - ] - - ## Metrics list - ## List of metrics can be found on Graylog webservice documentation. - ## Or by hitting the the web service api at: - ## http://[graylog-host]:12900/system/metrics - metrics = [ - "jvm.cl.loaded", - "jvm.memory.pools.Metaspace.committed" - ] - - ## Username and password - username = "" - password = "" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - -func (h *GrayLog) SampleConfig() string { +func (*GrayLog) SampleConfig() string { return sampleConfig } -func (h *GrayLog) Description() string { - return "Read flattened metrics from one or more GrayLog HTTP endpoints" -} - // Gathers data for all servers. func (h *GrayLog) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup @@ -129,12 +96,12 @@ func (h *GrayLog) Gather(acc telegraf.Accumulator) error { return err } tr := &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: time.Duration(h.Timeout), TLSClientConfig: tlsCfg, } client := &http.Client{ Transport: tr, - Timeout: time.Duration(4 * time.Second), + Timeout: time.Duration(h.Timeout), } h.client.SetHTTPClient(client) } @@ -178,16 +145,16 @@ func (h *GrayLog) gatherServer( if err := json.Unmarshal([]byte(resp), &dat); err != nil { return err } - for _, m_item := range dat.Metrics { + for _, mItem := range dat.Metrics { fields := make(map[string]interface{}) tags := map[string]string{ "server": host, "port": port, - "name": m_item.Name, - "type": m_item.Type, + "name": mItem.Name, + "type": mItem.Type, } - h.flatten(m_item.Fields, fields, "") - acc.AddFields(m_item.FullName, fields, tags) + h.flatten(mItem.Fields, fields, "") + acc.AddFields(mItem.FullName, fields, tags) } return nil } @@ -204,13 +171,13 @@ func (h *GrayLog) flatten(item map[string]interface{}, fields map[string]interfa id = id + "_" } for k, i := range item { - switch i.(type) { + switch i := i.(type) { case int: - fields[id+k] = i.(float64) + fields[id+k] = float64(i) case float64: - fields[id+k] = i.(float64) + fields[id+k] = i case map[string]interface{}: - h.flatten(i.(map[string]interface{}), fields, id+k) + h.flatten(i, fields, id+k) default: } } @@ -234,19 +201,19 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) { // Prepare URL requestURL, err := url.Parse(serverURL) if err != nil { - return "", -1, fmt.Errorf("Invalid server URL \"%s\"", serverURL) + return "", -1, fmt.Errorf("invalid server URL \"%s\"", serverURL) } // Add X-Requested-By header headers["X-Requested-By"] = "Telegraf" if strings.Contains(requestURL.String(), "multiple") { m := &Messagebody{Metrics: h.Metrics} - http_body, err := json.Marshal(m) + httpBody, err := json.Marshal(m) if err != nil { - return "", -1, fmt.Errorf("Invalid list of Metrics %s", h.Metrics) + return "", -1, fmt.Errorf("invalid list of Metrics %s", h.Metrics) } method = "POST" - content = bytes.NewBuffer(http_body) + content = bytes.NewBuffer(httpBody) } req, err := http.NewRequest(method, requestURL.String(), content) if err != nil { @@ -265,14 +232,14 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) { defer resp.Body.Close() responseTime := time.Since(start).Seconds() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return string(body), responseTime, err } // Process response if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)", + err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", requestURL.String(), resp.StatusCode, http.StatusText(resp.StatusCode), @@ -286,7 +253,8 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) { func init() { inputs.Add("graylog", func() telegraf.Input { return &GrayLog{ - client: &RealHTTPClient{}, + client: &RealHTTPClient{}, + Timeout: config.Duration(5 * time.Second), } }) } diff --git a/plugins/inputs/graylog/graylog_test.go b/plugins/inputs/graylog/graylog_test.go index f8008f1d94c66..108d3bc28dad6 100644 --- a/plugins/inputs/graylog/graylog_test.go +++ b/plugins/inputs/graylog/graylog_test.go @@ -1,14 +1,14 @@ package graylog import ( - "io/ioutil" + "io" "net/http" "strings" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const validJSON = ` @@ -115,7 +115,7 @@ func (c *mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) resp.StatusCode = 405 // Method not allowed } - resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) + resp.Body = io.NopCloser(strings.NewReader(c.responseBody)) return &resp, nil } @@ -172,8 +172,8 @@ func TestHttpJson500(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(graylog[0].Gather) - assert.Error(t, err) - assert.Equal(t, 0, acc.NFields()) + require.Error(t, err) + require.Equal(t, 0, acc.NFields()) } // Test response to malformed JSON @@ -183,8 +183,8 @@ func TestHttpJsonBadJson(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(graylog[0].Gather) - assert.Error(t, err) - assert.Equal(t, 0, acc.NFields()) + require.Error(t, err) + require.Equal(t, 0, acc.NFields()) } // Test response to empty string as response objectgT @@ -194,6 +194,6 @@ func TestHttpJsonEmptyResponse(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(graylog[0].Gather) - assert.Error(t, err) - assert.Equal(t, 0, acc.NFields()) + require.Error(t, err) + require.Equal(t, 0, acc.NFields()) } diff --git a/plugins/inputs/graylog/sample.conf b/plugins/inputs/graylog/sample.conf new file mode 100644 index 0000000000000..004f22decc156 --- /dev/null +++ b/plugins/inputs/graylog/sample.conf @@ -0,0 +1,38 @@ +# Read flattened metrics from one or more GrayLog HTTP endpoints +[[inputs.graylog]] + ## API endpoint, currently supported API: + ## + ## - multiple (e.g. http://:9000/api/system/metrics/multiple) + ## - namespace (e.g. http://:9000/api/system/metrics/namespace/{namespace}) + ## + ## For namespace endpoint, the metrics array will be ignored for that call. + ## Endpoint can contain namespace and multiple type calls. + ## + ## Please check http://[graylog-server-ip]:9000/api/api-browser for full list + ## of endpoints + servers = [ + "http://[graylog-server-ip]:9000/api/system/metrics/multiple", + ] + + ## Set timeout (default 5 seconds) + # timeout = "5s" + + ## Metrics list + ## List of metrics can be found on Graylog webservice documentation. + ## Or by hitting the web service api at: + ## http://[graylog-host]:9000/api/system/metrics + metrics = [ + "jvm.cl.loaded", + "jvm.memory.pools.Metaspace.committed" + ] + + ## Username and password + username = "" + password = "" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/haproxy/README.md b/plugins/inputs/haproxy/README.md index 86fbb986b696a..ac584972fb998 100644 --- a/plugins/inputs/haproxy/README.md +++ b/plugins/inputs/haproxy/README.md @@ -1,13 +1,15 @@ # HAProxy Input Plugin -The [HAProxy](http://www.haproxy.org/) input plugin gathers -[statistics](https://cbonte.github.io/haproxy-dconv/1.9/intro.html#3.3.16) -using the [stats socket](https://cbonte.github.io/haproxy-dconv/1.9/management.html#9.3) -or [HTTP statistics page](https://cbonte.github.io/haproxy-dconv/1.9/management.html#9) of a HAProxy server. +The [HAProxy](http://www.haproxy.org/) input plugin gathers [statistics][1] +using the [stats socket][2] or [HTTP statistics page][3] of a HAProxy server. -### Configuration: +[1]: https://cbonte.github.io/haproxy-dconv/1.9/intro.html#3.3.16 +[2]: https://cbonte.github.io/haproxy-dconv/1.9/management.html#9.3 +[3]: https://cbonte.github.io/haproxy-dconv/1.9/management.html#9 -```toml +## Configuration + +```toml @sample.conf # Read metrics of HAProxy, via socket or HTTP stats page [[inputs.haproxy]] ## An array of address to gather stats about. Specify an ip on hostname @@ -40,39 +42,39 @@ or [HTTP statistics page](https://cbonte.github.io/haproxy-dconv/1.9/management. # insecure_skip_verify = false ``` -#### HAProxy Configuration +### HAProxy Configuration -The following information may be useful when getting started, but please -consult the HAProxy documentation for complete and up to date instructions. +The following information may be useful when getting started, but please consult +the HAProxy documentation for complete and up to date instructions. -The [`stats enable`](https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#4-stats%20enable) -option can be used to add unauthenticated access over HTTP using the default -settings. To enable the unix socket begin by reading about the -[`stats socket`](https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#3.1-stats%20socket) -option. +The [`stats enable`][4] option can be used to add unauthenticated access over +HTTP using the default settings. To enable the unix socket begin by reading +about the [`stats socket`][5] option. +[4]: https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#4-stats%20enable +[5]: https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#3.1-stats%20socket -#### servers +### servers Server addresses must explicitly start with 'http' if you wish to use HAProxy -status page. Otherwise, addresses will be assumed to be an UNIX socket and -any protocol (if present) will be discarded. +status page. Otherwise, addresses will be assumed to be an UNIX socket and any +protocol (if present) will be discarded. When using socket names, wildcard expansion is supported so plugin can gather stats from multiple sockets at once. -To use HTTP Basic Auth add the username and password in the userinfo section -of the URL: `http://user:password@1.2.3.4/haproxy?stats`. The credentials are -sent via the `Authorization` header and not using the request URL. - +To use HTTP Basic Auth add the username and password in the userinfo section of +the URL: `http://user:password@1.2.3.4/haproxy?stats`. The credentials are sent +via the `Authorization` header and not using the request URL. -#### keep_field_names +### keep_field_names By default, some of the fields are renamed from what haproxy calls them. Setting the `keep_field_names` parameter to `true` will result in the plugin keeping the original field names. The following renames are made: + - `pxname` -> `proxy` - `svname` -> `sv` - `act` -> `active_servers` @@ -86,10 +88,10 @@ The following renames are made: - `hrsp_5xx` -> `http_response.5xx` - `hrsp_other` -> `http_response.other` -### Metrics: +## Metrics For more details about collected metrics reference the [HAProxy CSV format -documentation](https://cbonte.github.io/haproxy-dconv/1.8/management.html#9.1). +documentation][6]. - haproxy - tags: @@ -110,7 +112,10 @@ documentation](https://cbonte.github.io/haproxy-dconv/1.8/management.html#9.1). - `lastsess` (int) - **all other stats** (int) -### Example Output: -``` +[6]: https://cbonte.github.io/haproxy-dconv/1.8/management.html#9.1 + +## Example Output + +```shell haproxy,server=/run/haproxy/admin.sock,proxy=public,sv=FRONTEND,type=frontend http_response.other=0i,req_rate_max=1i,comp_byp=0i,status="OPEN",rate_lim=0i,dses=0i,req_rate=0i,comp_rsp=0i,bout=9287i,comp_in=0i,mode="http",smax=1i,slim=2000i,http_response.1xx=0i,conn_rate=0i,dreq=0i,ereq=0i,iid=2i,rate_max=1i,http_response.2xx=1i,comp_out=0i,intercepted=1i,stot=2i,pid=1i,http_response.5xx=1i,http_response.3xx=0i,http_response.4xx=0i,conn_rate_max=1i,conn_tot=2i,dcon=0i,bin=294i,rate=0i,sid=0i,req_tot=2i,scur=0i,dresp=0i 1513293519000000000 ``` diff --git a/plugins/inputs/haproxy/haproxy.go b/plugins/inputs/haproxy/haproxy.go index 9ec9512ea170c..bcf21349dba6a 100644 --- a/plugins/inputs/haproxy/haproxy.go +++ b/plugins/inputs/haproxy/haproxy.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package haproxy import ( + _ "embed" "encoding/csv" "fmt" "io" @@ -18,6 +20,10 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + //CSV format: https://cbonte.github.io/haproxy-dconv/1.5/configuration.html#9.1 type haproxy struct { @@ -30,56 +36,20 @@ type haproxy struct { client *http.Client } -var sampleConfig = ` - ## An array of address to gather stats about. Specify an ip on hostname - ## with optional port. ie localhost, 10.10.3.33:1936, etc. - ## Make sure you specify the complete path to the stats endpoint - ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats - - ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats - servers = ["http://myhaproxy.com:1936/haproxy?stats"] - - ## Credentials for basic HTTP authentication - # username = "admin" - # password = "admin" - - ## You can also use local socket with standard wildcard globbing. - ## Server address not starting with 'http' will be treated as a possible - ## socket, so both examples below are valid. - # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"] - - ## By default, some of the fields are renamed from what haproxy calls them. - ## Setting this option to true results in the plugin keeping the original - ## field names. - # keep_field_names = false - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - -func (r *haproxy) SampleConfig() string { +func (*haproxy) SampleConfig() string { return sampleConfig } -func (r *haproxy) Description() string { - return "Read metrics of haproxy, via socket or csv stats page" -} - // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). -func (g *haproxy) Gather(acc telegraf.Accumulator) error { - if len(g.Servers) == 0 { - return g.gatherServer("http://127.0.0.1:1936/haproxy?stats", acc) +func (h *haproxy) Gather(acc telegraf.Accumulator) error { + if len(h.Servers) == 0 { + return h.gatherServer("http://127.0.0.1:1936/haproxy?stats", acc) } - endpoints := make([]string, 0, len(g.Servers)) - - for _, endpoint := range g.Servers { + endpoints := make([]string, 0, len(h.Servers)) + for _, endpoint := range h.Servers { if strings.HasPrefix(endpoint, "http") { endpoints = append(endpoints, endpoint) continue @@ -96,9 +66,7 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error { if len(matches) == 0 { endpoints = append(endpoints, socketPath) } else { - for _, match := range matches { - endpoints = append(endpoints, match) - } + endpoints = append(endpoints, matches...) } } @@ -107,7 +75,7 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error { for _, server := range endpoints { go func(serv string) { defer wg.Done() - if err := g.gatherServer(serv, acc); err != nil { + if err := h.gatherServer(serv, acc); err != nil { acc.AddError(err) } }(server) @@ -117,43 +85,43 @@ func (g *haproxy) Gather(acc telegraf.Accumulator) error { return nil } -func (g *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) error { +func (h *haproxy) gatherServerSocket(addr string, acc telegraf.Accumulator) error { socketPath := getSocketAddr(addr) c, err := net.Dial("unix", socketPath) if err != nil { - return fmt.Errorf("Could not connect to socket '%s': %s", addr, err) + return fmt.Errorf("could not connect to socket '%s': %s", addr, err) } _, errw := c.Write([]byte("show stat\n")) if errw != nil { - return fmt.Errorf("Could not write to socket '%s': %s", addr, errw) + return fmt.Errorf("could not write to socket '%s': %s", addr, errw) } - return g.importCsvResult(c, acc, socketPath) + return h.importCsvResult(c, acc, socketPath) } -func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error { +func (h *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error { if !strings.HasPrefix(addr, "http") { - return g.gatherServerSocket(addr, acc) + return h.gatherServerSocket(addr, acc) } - if g.client == nil { - tlsCfg, err := g.ClientConfig.TLSConfig() + if h.client == nil { + tlsCfg, err := h.ClientConfig.TLSConfig() if err != nil { return err } tr := &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: 3 * time.Second, TLSClientConfig: tlsCfg, } client := &http.Client{ Transport: tr, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, } - g.client = client + h.client = client } if !strings.HasSuffix(addr, ";csv") { @@ -176,11 +144,11 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error { addr = u.String() } - if g.Username != "" || g.Password != "" { - req.SetBasicAuth(g.Username, g.Password) + if h.Username != "" || h.Password != "" { + req.SetBasicAuth(h.Username, h.Password) } - res, err := g.client.Do(req) + res, err := h.client.Do(req) if err != nil { return fmt.Errorf("unable to connect to haproxy server '%s': %s", addr, err) } @@ -190,7 +158,7 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error { return fmt.Errorf("unable to get valid stat result from '%s', http response code : %d", addr, res.StatusCode) } - if err := g.importCsvResult(res.Body, acc, u.Host); err != nil { + if err := h.importCsvResult(res.Body, acc, u.Host); err != nil { return fmt.Errorf("unable to parse stat result from '%s': %s", addr, err) } @@ -202,9 +170,8 @@ func getSocketAddr(sock string) string { if len(socketAddr) >= 2 { return socketAddr[1] - } else { - return socketAddr[0] } + return socketAddr[0] } var typeNames = []string{"frontend", "backend", "server", "listener"} @@ -223,7 +190,7 @@ var fieldRenames = map[string]string{ "hrsp_other": "http_response.other", } -func (g *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error { +func (h *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host string) error { csvr := csv.NewReader(r) now := time.Now() @@ -260,7 +227,7 @@ func (g *haproxy) importCsvResult(r io.Reader, acc telegraf.Accumulator, host st colName := headers[i] fieldName := colName - if !g.KeepFieldNames { + if !h.KeepFieldNames { if fieldRename, ok := fieldRenames[colName]; ok { fieldName = fieldRename } diff --git a/plugins/inputs/haproxy/haproxy_test.go b/plugins/inputs/haproxy/haproxy_test.go index e05031f192675..21a1b09c10d02 100644 --- a/plugins/inputs/haproxy/haproxy_test.go +++ b/plugins/inputs/haproxy/haproxy_test.go @@ -7,12 +7,14 @@ import ( "net" "net/http" "net/http/httptest" + "os" + "path/filepath" "strings" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) type statServer struct{} @@ -25,13 +27,15 @@ func (s statServer) serverSocket(l net.Listener) { } go func(c net.Conn) { + defer c.Close() + buf := make([]byte, 1024) n, _ := c.Read(buf) data := buf[:n] if string(data) == "show stat\n" { + //nolint:errcheck,revive // we return anyway c.Write([]byte(csvOutputSample)) - c.Close() } }(conn) } @@ -43,15 +47,18 @@ func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) { username, password, ok := r.BasicAuth() if !ok { w.WriteHeader(http.StatusNotFound) - fmt.Fprint(w, "Unauthorized") + _, err := fmt.Fprint(w, "Unauthorized") + require.NoError(t, err) return } if username == "user" && password == "password" { - fmt.Fprint(w, csvOutputSample) + _, err := fmt.Fprint(w, csvOutputSample) + require.NoError(t, err) } else { w.WriteHeader(http.StatusNotFound) - fmt.Fprint(w, "Unauthorized") + _, err := fmt.Fprint(w, "Unauthorized") + require.NoError(t, err) } })) defer ts.Close() @@ -81,13 +88,14 @@ func TestHaproxyGeneratesMetricsWithAuthentication(t *testing.T) { Servers: []string{ts.URL}, } - r.Gather(&acc) + require.NoError(t, r.Gather(&acc)) require.NotEmpty(t, acc.Errors) } func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, csvOutputSample) + _, err := fmt.Fprint(w, csvOutputSample) + require.NoError(t, err) })) defer ts.Close() @@ -97,8 +105,7 @@ func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) { var acc testutil.Accumulator - err := r.Gather(&acc) - require.NoError(t, err) + require.NoError(t, r.Gather(&acc)) tags := map[string]string{ "server": ts.Listener.Addr().String(), @@ -114,12 +121,13 @@ func TestHaproxyGeneratesMetricsWithoutAuthentication(t *testing.T) { func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) { var randomNumber int64 var sockets [5]net.Listener - _globmask := "/tmp/test-haproxy*.sock" - _badmask := "/tmp/test-fail-haproxy*.sock" + + _globmask := filepath.Join(os.TempDir(), "test-haproxy*.sock") + _badmask := filepath.Join(os.TempDir(), "test-fail-haproxy*.sock") for i := 0; i < 5; i++ { - binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) - sockname := fmt.Sprintf("/tmp/test-haproxy%d.sock", randomNumber) + require.NoError(t, binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)) + sockname := filepath.Join(os.TempDir(), fmt.Sprintf("test-haproxy%d.sock", randomNumber)) sock, err := net.Listen("unix", sockname) if err != nil { @@ -127,7 +135,7 @@ func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) { } sockets[i] = sock - defer sock.Close() + defer sock.Close() //nolint:revive // done on purpose, closing will be executed properly s := statServer{} go s.serverSocket(sock) @@ -146,7 +154,7 @@ func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) { for _, sock := range sockets { tags := map[string]string{ - "server": sock.Addr().String(), + "server": getSocketAddr(sock.Addr().String()), "proxy": "git", "sv": "www", "type": "server", @@ -158,7 +166,7 @@ func TestHaproxyGeneratesMetricsUsingSocket(t *testing.T) { // This mask should not match any socket r.Servers = []string{_badmask} - r.Gather(&acc) + require.NoError(t, r.Gather(&acc)) require.NotEmpty(t, acc.Errors) } @@ -171,12 +179,13 @@ func TestHaproxyDefaultGetFromLocalhost(t *testing.T) { err := r.Gather(&acc) require.Error(t, err) - assert.Contains(t, err.Error(), "127.0.0.1:1936/haproxy?stats/;csv") + require.Contains(t, err.Error(), "127.0.0.1:1936/haproxy?stats/;csv") } func TestHaproxyKeepFieldNames(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, csvOutputSample) + _, err := fmt.Fprint(w, csvOutputSample) + require.NoError(t, err) })) defer ts.Close() @@ -187,8 +196,7 @@ func TestHaproxyKeepFieldNames(t *testing.T) { var acc testutil.Accumulator - err := r.Gather(&acc) - require.NoError(t, err) + require.NoError(t, r.Gather(&acc)) tags := map[string]string{ "server": ts.Listener.Addr().String(), diff --git a/plugins/inputs/haproxy/sample.conf b/plugins/inputs/haproxy/sample.conf new file mode 100644 index 0000000000000..6efe33f9dfbf6 --- /dev/null +++ b/plugins/inputs/haproxy/sample.conf @@ -0,0 +1,30 @@ +# Read metrics of HAProxy, via socket or HTTP stats page +[[inputs.haproxy]] + ## An array of address to gather stats about. Specify an ip on hostname + ## with optional port. ie localhost, 10.10.3.33:1936, etc. + ## Make sure you specify the complete path to the stats endpoint + ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats + + ## Credentials for basic HTTP authentication + # username = "admin" + # password = "admin" + + ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats + servers = ["http://myhaproxy.com:1936/haproxy?stats"] + + ## You can also use local socket with standard wildcard globbing. + ## Server address not starting with 'http' will be treated as a possible + ## socket, so both examples below are valid. + # servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"] + + ## By default, some of the fields are renamed from what haproxy calls them. + ## Setting this option to true results in the plugin keeping the original + ## field names. + # keep_field_names = false + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/hddtemp/README.md b/plugins/inputs/hddtemp/README.md index d2d3e4f13ec89..0a9631db42a03 100644 --- a/plugins/inputs/hddtemp/README.md +++ b/plugins/inputs/hddtemp/README.md @@ -4,9 +4,10 @@ This plugin reads data from hddtemp daemon. Hddtemp should be installed and its daemon running. -### Configuration +## Configuration -```toml +```toml @sample.conf +# Monitor disks' temperatures using hddtemp [[inputs.hddtemp]] ## By default, telegraf gathers temps data from all disks detected by the ## hddtemp. @@ -19,7 +20,7 @@ Hddtemp should be installed and its daemon running. # devices = ["sda", "*"] ``` -### Metrics +## Metrics - hddtemp - tags: @@ -31,10 +32,9 @@ Hddtemp should be installed and its daemon running. - fields: - temperature +## Example output -### Example output - -``` +```shell hddtemp,source=server1,unit=C,status=,device=sdb,model=WDC\ WD740GD-00FLA1 temperature=43i 1481655647000000000 hddtemp,device=sdc,model=SAMSUNG\ HD103UI,unit=C,source=server1,status= temperature=38i 148165564700000000 hddtemp,device=sdd,model=SAMSUNG\ HD103UI,unit=C,source=server1,status= temperature=36i 1481655647000000000 diff --git a/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go index a3fda2abd2013..41d513e4011e3 100644 --- a/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go +++ b/plugins/inputs/hddtemp/go-hddtemp/hddtemp_test.go @@ -2,8 +2,9 @@ package hddtemp import ( "net" - "reflect" "testing" + + "github.com/stretchr/testify/require" ) func TestFetch(t *testing.T) { @@ -11,10 +12,7 @@ func TestFetch(t *testing.T) { defer l.Close() disks, err := New().Fetch(l.Addr().String()) - - if err != nil { - t.Error("expecting err to be nil") - } + require.NoError(t, err) expected := []Disk{ { @@ -24,18 +22,12 @@ func TestFetch(t *testing.T) { Unit: "C", }, } - - if !reflect.DeepEqual(expected, disks) { - t.Error("disks' slice is different from expected") - } + require.Equal(t, expected, disks, "disks' slice is different from expected") } func TestFetchWrongAddress(t *testing.T) { _, err := New().Fetch("127.0.0.1:1") - - if err == nil { - t.Error("expecting err to be non-nil") - } + require.Error(t, err) } func TestFetchStatus(t *testing.T) { @@ -43,10 +35,7 @@ func TestFetchStatus(t *testing.T) { defer l.Close() disks, err := New().Fetch(l.Addr().String()) - - if err != nil { - t.Error("expecting err to be nil") - } + require.NoError(t, err) expected := []Disk{ { @@ -57,10 +46,7 @@ func TestFetchStatus(t *testing.T) { Status: "SLP", }, } - - if !reflect.DeepEqual(expected, disks) { - t.Error("disks' slice is different from expected") - } + require.Equal(t, expected, disks, "disks' slice is different from expected") } func TestFetchTwoDisks(t *testing.T) { @@ -68,10 +54,7 @@ func TestFetchTwoDisks(t *testing.T) { defer l.Close() disks, err := New().Fetch(l.Addr().String()) - - if err != nil { - t.Error("expecting err to be nil") - } + require.NoError(t, err) expected := []Disk{ { @@ -88,28 +71,20 @@ func TestFetchTwoDisks(t *testing.T) { Status: "SLP", }, } - - if !reflect.DeepEqual(expected, disks) { - t.Error("disks' slice is different from expected") - } + require.Equal(t, expected, disks, "disks' slice is different from expected") } func serve(t *testing.T, data []byte) net.Listener { l, err := net.Listen("tcp", "127.0.0.1:0") - - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) go func(t *testing.T) { conn, err := l.Accept() + require.NoError(t, err) - if err != nil { - t.Fatal(err) - } - - conn.Write(data) - conn.Close() + _, err = conn.Write(data) + require.NoError(t, err) + require.NoError(t, conn.Close()) }(t) return l diff --git a/plugins/inputs/hddtemp/hddtemp.go b/plugins/inputs/hddtemp/hddtemp.go index 0f084ac219bff..4904e6ea2393a 100644 --- a/plugins/inputs/hddtemp/hddtemp.go +++ b/plugins/inputs/hddtemp/hddtemp.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package hddtemp import ( + _ "embed" "net" "github.com/influxdata/telegraf" @@ -8,6 +10,10 @@ import ( gohddtemp "github.com/influxdata/telegraf/plugins/inputs/hddtemp/go-hddtemp" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const defaultAddress = "127.0.0.1:7634" type HDDTemp struct { @@ -20,10 +26,6 @@ type Fetcher interface { Fetch(address string) ([]gohddtemp.Disk, error) } -func (_ *HDDTemp) Description() string { - return "Monitor disks' temperatures using hddtemp" -} - var hddtempSampleConfig = ` ## By default, telegraf gathers temps data from all disks detected by the ## hddtemp. @@ -36,8 +38,8 @@ var hddtempSampleConfig = ` # devices = ["sda", "*"] ` -func (_ *HDDTemp) SampleConfig() string { - return hddtempSampleConfig +func (*HDDTemp) SampleConfig() string { + return sampleConfig } func (h *HDDTemp) Gather(acc telegraf.Accumulator) error { diff --git a/plugins/inputs/hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/hddtemp_test.go index f299c2ac66c4b..44be91bb28bf9 100644 --- a/plugins/inputs/hddtemp/hddtemp_test.go +++ b/plugins/inputs/hddtemp/hddtemp_test.go @@ -3,16 +3,16 @@ package hddtemp import ( "testing" - hddtemp "github.com/influxdata/telegraf/plugins/inputs/hddtemp/go-hddtemp" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/plugins/inputs/hddtemp/go-hddtemp" + "github.com/influxdata/telegraf/testutil" ) type mockFetcher struct { } -func (h *mockFetcher) Fetch(address string) ([]hddtemp.Disk, error) { +func (h *mockFetcher) Fetch(_ string) ([]hddtemp.Disk, error) { return []hddtemp.Disk{ { DeviceName: "Disk1", @@ -27,24 +27,23 @@ func (h *mockFetcher) Fetch(address string) ([]hddtemp.Disk, error) { Unit: "C", }, }, nil - } func newMockFetcher() *mockFetcher { return &mockFetcher{} } func TestFetch(t *testing.T) { - hddtemp := &HDDTemp{ + hddTemp := &HDDTemp{ fetcher: newMockFetcher(), Address: "localhost", Devices: []string{"*"}, } acc := &testutil.Accumulator{} - err := hddtemp.Gather(acc) + err := hddTemp.Gather(acc) require.NoError(t, err) - assert.Equal(t, acc.NFields(), 2) + require.Equal(t, acc.NFields(), 2) var tests = []struct { fields map[string]interface{} @@ -79,5 +78,4 @@ func TestFetch(t *testing.T) { for _, test := range tests { acc.AssertContainsTaggedFields(t, "hddtemp", test.fields, test.tags) } - } diff --git a/plugins/inputs/hddtemp/sample.conf b/plugins/inputs/hddtemp/sample.conf new file mode 100644 index 0000000000000..2bd0a1cf99876 --- /dev/null +++ b/plugins/inputs/hddtemp/sample.conf @@ -0,0 +1,11 @@ +# Monitor disks' temperatures using hddtemp +[[inputs.hddtemp]] + ## By default, telegraf gathers temps data from all disks detected by the + ## hddtemp. + ## + ## Only collect temps from the selected disks. + ## + ## A * as the device name will return the temperature values of all disks. + ## + # address = "127.0.0.1:7634" + # devices = ["sda", "*"] diff --git a/plugins/inputs/http/README.md b/plugins/inputs/http/README.md index 59abd82562672..f94917a7b309d 100644 --- a/plugins/inputs/http/README.md +++ b/plugins/inputs/http/README.md @@ -1,11 +1,14 @@ # HTTP Input Plugin -The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The endpoint should have metrics formatted in one of the supported [input data formats](../../../docs/DATA_FORMATS_INPUT.md). Each data format has its own unique set of configuration options which can be added to the input configuration. +The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The +endpoint should have metrics formatted in one of the supported [input data +formats](../../../docs/DATA_FORMATS_INPUT.md). Each data format has its own +unique set of configuration options which can be added to the input +configuration. +## Configuration -### Configuration: - -```toml +```toml @sample.conf # Read formatted metrics from one or more HTTP endpoints [[inputs.http]] ## One or more URLs from which to read formatted metrics @@ -34,6 +37,15 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The # username = "username" # password = "pa$$word" + ## OAuth2 Client Credentials. The options 'client_id', 'client_secret', and 'token_url' are required to use OAuth2. + # client_id = "clientid" + # client_secret = "secret" + # token_url = "https://indentityprovider/oauth2/v1/token" + # scopes = ["urn:opc:idm:__myscopes__"] + + ## HTTP Proxy support + # http_proxy_url = "" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -41,6 +53,16 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The ## Use TLS but skip chain & host verification # insecure_skip_verify = false + ## Optional Cookie authentication + # cookie_auth_url = "https://localhost/authMe" + # cookie_auth_method = "POST" + # cookie_auth_username = "username" + # cookie_auth_password = "pa$$word" + # cookie_auth_headers = '{"Content-Type": "application/json", "X-MY-HEADER":"hello"}' + # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' + ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie + # cookie_auth_renewal = "5m" + ## Amount of time allowed to complete the HTTP request # timeout = "5s" @@ -55,12 +77,24 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The ``` -### Metrics: +## Metrics -The metrics collected by this input plugin will depend on the configured `data_format` and the payload returned by the HTTP endpoint(s). +The metrics collected by this input plugin will depend on the configured +`data_format` and the payload returned by the HTTP endpoint(s). The default values below are added if the input format does not specify a value: - http - tags: - url + +## Optional Cookie Authentication Settings + +The optional Cookie Authentication Settings will retrieve a cookie from the +given authorization endpoint, and use it in subsequent API requests. This is +useful for services that do not provide OAuth or Basic Auth authentication, +e.g. the [Tesla Powerwall API][tesla], which uses a Cookie Auth Body to retrieve +an authorization cookie. The Cookie Auth Renewal interval will renew the +authorization by retrieving a new cookie at the given interval. + +[tesla]: https://www.tesla.com/support/energy/powerwall/own/monitoring-from-home-network diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index c247d40076620..c290ecf7028eb 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -1,21 +1,28 @@ +//go:generate ../../../tools/readme_config_includer/generator package http import ( + "bytes" + "context" + _ "embed" "fmt" "io" "io/ioutil" "net/http" + "os" "strings" "sync" - "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/plugins/common/tls" + httpconfig "github.com/influxdata/telegraf/plugins/common/http" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/plugins/parsers" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type HTTP struct { URLs []string `toml:"urls"` Method string `toml:"method"` @@ -27,92 +34,32 @@ type HTTP struct { // HTTP Basic Auth Credentials Username string `toml:"username"` Password string `toml:"password"` - tls.ClientConfig // Absolute path to file with Bearer token BearerToken string `toml:"bearer_token"` SuccessStatusCodes []int `toml:"success_status_codes"` - Timeout internal.Duration `toml:"timeout"` + Log telegraf.Logger `toml:"-"` - client *http.Client + httpconfig.HTTPClientConfig - // The parser will automatically be set by Telegraf core code because - // this plugin implements the ParserInput interface (i.e. the SetParser method) - parser parsers.Parser + client *http.Client + parserFunc telegraf.ParserFunc } -var sampleConfig = ` - ## One or more URLs from which to read formatted metrics - urls = [ - "http://localhost/metrics" - ] - - ## HTTP method - # method = "GET" - - ## Optional HTTP headers - # headers = {"X-Special-Header" = "Special-Value"} - - ## Optional file with Bearer token - ## file content is added as an Authorization header - # bearer_token = "/path/to/file" - - ## Optional HTTP Basic Auth Credentials - # username = "username" - # password = "pa$$word" - - ## HTTP entity-body to send with POST/PUT requests. - # body = "" - - ## HTTP Content-Encoding for write request body, can be set to "gzip" to - ## compress body or "identity" to apply no encoding. - # content_encoding = "identity" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## Amount of time allowed to complete the HTTP request - # timeout = "5s" - - ## List of success status codes - # success_status_codes = [200] - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - # data_format = "influx" -` - -// SampleConfig returns the default configuration of the Input func (*HTTP) SampleConfig() string { return sampleConfig } -// Description returns a one-sentence description on the Input -func (*HTTP) Description() string { - return "Read formatted metrics from one or more HTTP endpoints" -} - func (h *HTTP) Init() error { - tlsCfg, err := h.ClientConfig.TLSConfig() + ctx := context.Background() + client, err := h.HTTPClientConfig.CreateClient(ctx, h.Log) if err != nil { return err } - h.client = &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsCfg, - Proxy: http.ProxyFromEnvironment, - }, - Timeout: h.Timeout.Duration, - } + h.client = client // Set default as [200] if len(h.SuccessStatusCodes) == 0 { @@ -140,9 +87,9 @@ func (h *HTTP) Gather(acc telegraf.Accumulator) error { return nil } -// SetParser takes the data_format from the config and finds the right parser for that format -func (h *HTTP) SetParser(parser parsers.Parser) { - h.parser = parser +// SetParserFunc takes the data_format from the config and finds the right parser for that format +func (h *HTTP) SetParserFunc(fn telegraf.ParserFunc) { + h.parserFunc = fn } // Gathers data from a particular URL @@ -160,7 +107,6 @@ func (h *HTTP) gatherURL( if err != nil { return err } - defer body.Close() request, err := http.NewRequest(h.Method, url, body) if err != nil { @@ -168,7 +114,7 @@ func (h *HTTP) gatherURL( } if h.BearerToken != "" { - token, err := ioutil.ReadFile(h.BearerToken) + token, err := os.ReadFile(h.BearerToken) if err != nil { return err } @@ -213,14 +159,19 @@ func (h *HTTP) gatherURL( h.SuccessStatusCodes) } - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { - return err + return fmt.Errorf("reading body failed: %v", err) } - metrics, err := h.parser.Parse(b) + // Instantiate a new parser for the new data to avoid trouble with stateful parsers + parser, err := h.parserFunc() if err != nil { - return err + return fmt.Errorf("instantiating parser failed: %v", err) + } + metrics, err := parser.Parse(b) + if err != nil { + return fmt.Errorf("parsing metrics failed: %v", err) } for _, metric := range metrics { @@ -233,23 +184,31 @@ func (h *HTTP) gatherURL( return nil } -func makeRequestBodyReader(contentEncoding, body string) (io.ReadCloser, error) { +func makeRequestBodyReader(contentEncoding, body string) (io.Reader, error) { + if body == "" { + return nil, nil + } + var reader io.Reader = strings.NewReader(body) if contentEncoding == "gzip" { rc, err := internal.CompressWithGzip(reader) if err != nil { return nil, err } - return rc, nil + data, err := ioutil.ReadAll(rc) + if err != nil { + return nil, err + } + return bytes.NewReader(data), nil } - return ioutil.NopCloser(reader), nil + + return reader, nil } func init() { inputs.Add("http", func() telegraf.Input { return &HTTP{ - Timeout: internal.Duration{Duration: time.Second * 5}, - Method: "GET", + Method: "GET", } }) } diff --git a/plugins/inputs/http/http_test.go b/plugins/inputs/http/http_test.go index 993eda7321c0f..eec056979b701 100644 --- a/plugins/inputs/http/http_test.go +++ b/plugins/inputs/http/http_test.go @@ -3,18 +3,27 @@ package http_test import ( "compress/gzip" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" + "net/url" "testing" + "time" - plugin "github.com/influxdata/telegraf/plugins/inputs/http" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + httpconfig "github.com/influxdata/telegraf/plugins/common/http" + "github.com/influxdata/telegraf/plugins/common/oauth" + httpplugin "github.com/influxdata/telegraf/plugins/inputs/http" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/csv" + "github.com/influxdata/telegraf/plugins/parsers/json" + "github.com/influxdata/telegraf/plugins/parsers/value" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) -func TestHTTPwithJSONFormat(t *testing.T) { +func TestHTTPWithJSONFormat(t *testing.T) { fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { _, _ = w.Write([]byte(simpleJSON)) @@ -24,20 +33,21 @@ func TestHTTPwithJSONFormat(t *testing.T) { })) defer fakeServer.Close() - url := fakeServer.URL + "/endpoint" - plugin := &plugin.HTTP{ - URLs: []string{url}, + address := fakeServer.URL + "/endpoint" + plugin := &httpplugin.HTTP{ + URLs: []string{address}, + Log: testutil.Logger{}, } metricName := "metricName" - p, _ := parsers.NewParser(&parsers.Config{ - DataFormat: "json", - MetricName: "metricName", + plugin.SetParserFunc(func() (telegraf.Parser, error) { + p := &json.Parser{MetricName: "metricName"} + err := p.Init() + return p, err }) - plugin.SetParser(p) var acc testutil.Accumulator - plugin.Init() + require.NoError(t, plugin.Init()) require.NoError(t, acc.GatherError(plugin.Gather)) require.Len(t, acc.Metrics, 1) @@ -47,7 +57,7 @@ func TestHTTPwithJSONFormat(t *testing.T) { require.Equal(t, metric.Measurement, metricName) require.Len(t, acc.Metrics[0].Fields, 1) require.Equal(t, acc.Metrics[0].Fields["a"], 1.2) - require.Equal(t, acc.Metrics[0].Tags["url"], url) + require.Equal(t, acc.Metrics[0].Tags["url"], address) } func TestHTTPHeaders(t *testing.T) { @@ -66,20 +76,54 @@ func TestHTTPHeaders(t *testing.T) { })) defer fakeServer.Close() - url := fakeServer.URL + "/endpoint" - plugin := &plugin.HTTP{ - URLs: []string{url}, + address := fakeServer.URL + "/endpoint" + plugin := &httpplugin.HTTP{ + URLs: []string{address}, Headers: map[string]string{header: headerValue}, + Log: testutil.Logger{}, + } + + plugin.SetParserFunc(func() (telegraf.Parser, error) { + p := &json.Parser{MetricName: "metricName"} + err := p.Init() + return p, err + }) + + var acc testutil.Accumulator + require.NoError(t, plugin.Init()) + require.NoError(t, acc.GatherError(plugin.Gather)) +} + +func TestHTTPContentLengthHeader(t *testing.T) { + fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/endpoint" { + if r.Header.Get("Content-Length") != "" { + _, _ = w.Write([]byte(simpleJSON)) + } else { + w.WriteHeader(http.StatusForbidden) + } + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + defer fakeServer.Close() + + address := fakeServer.URL + "/endpoint" + plugin := &httpplugin.HTTP{ + URLs: []string{address}, + Headers: map[string]string{}, + Body: "{}", + Log: testutil.Logger{}, } - p, _ := parsers.NewParser(&parsers.Config{ - DataFormat: "json", - MetricName: "metricName", + plugin.SetParserFunc(func() (telegraf.Parser, error) { + p := &json.Parser{MetricName: "metricName"} + err := p.Init() + return p, err }) - plugin.SetParser(p) var acc testutil.Accumulator - plugin.Init() + require.NoError(t, plugin.Init()) require.NoError(t, acc.GatherError(plugin.Gather)) } @@ -89,20 +133,20 @@ func TestInvalidStatusCode(t *testing.T) { })) defer fakeServer.Close() - url := fakeServer.URL + "/endpoint" - plugin := &plugin.HTTP{ - URLs: []string{url}, + address := fakeServer.URL + "/endpoint" + plugin := &httpplugin.HTTP{ + URLs: []string{address}, + Log: testutil.Logger{}, } - metricName := "metricName" - p, _ := parsers.NewParser(&parsers.Config{ - DataFormat: "json", - MetricName: metricName, + plugin.SetParserFunc(func() (telegraf.Parser, error) { + p := &json.Parser{MetricName: "metricName"} + err := p.Init() + return p, err }) - plugin.SetParser(p) var acc testutil.Accumulator - plugin.Init() + require.NoError(t, plugin.Init()) require.Error(t, acc.GatherError(plugin.Gather)) } @@ -112,21 +156,21 @@ func TestSuccessStatusCodes(t *testing.T) { })) defer fakeServer.Close() - url := fakeServer.URL + "/endpoint" - plugin := &plugin.HTTP{ - URLs: []string{url}, + address := fakeServer.URL + "/endpoint" + plugin := &httpplugin.HTTP{ + URLs: []string{address}, SuccessStatusCodes: []int{200, 202}, + Log: testutil.Logger{}, } - metricName := "metricName" - p, _ := parsers.NewParser(&parsers.Config{ - DataFormat: "json", - MetricName: metricName, + plugin.SetParserFunc(func() (telegraf.Parser, error) { + p := &json.Parser{MetricName: "metricName"} + err := p.Init() + return p, err }) - plugin.SetParser(p) var acc testutil.Accumulator - plugin.Init() + require.NoError(t, plugin.Init()) require.NoError(t, acc.GatherError(plugin.Gather)) } @@ -140,19 +184,20 @@ func TestMethod(t *testing.T) { })) defer fakeServer.Close() - plugin := &plugin.HTTP{ + plugin := &httpplugin.HTTP{ URLs: []string{fakeServer.URL}, Method: "POST", + Log: testutil.Logger{}, } - p, _ := parsers.NewParser(&parsers.Config{ - DataFormat: "json", - MetricName: "metricName", + plugin.SetParserFunc(func() (telegraf.Parser, error) { + p := &json.Parser{MetricName: "metricName"} + err := p.Init() + return p, err }) - plugin.SetParser(p) var acc testutil.Accumulator - plugin.Init() + require.NoError(t, plugin.Init()) require.NoError(t, acc.GatherError(plugin.Gather)) } @@ -161,26 +206,32 @@ const simpleJSON = ` "a": 1.2 } ` +const simpleCSVWithHeader = ` +# Simple CSV with header(s) +a,b,c +1.2,3.1415,ok +` func TestBodyAndContentEncoding(t *testing.T) { ts := httptest.NewServer(http.NotFoundHandler()) defer ts.Close() - url := fmt.Sprintf("http://%s", ts.Listener.Addr().String()) + address := fmt.Sprintf("http://%s", ts.Listener.Addr().String()) tests := []struct { name string - plugin *plugin.HTTP + plugin *httpplugin.HTTP queryHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) }{ { name: "no body", - plugin: &plugin.HTTP{ + plugin: &httpplugin.HTTP{ Method: "POST", - URLs: []string{url}, + URLs: []string{address}, + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Equal(t, []byte(""), body) w.WriteHeader(http.StatusOK) @@ -188,13 +239,14 @@ func TestBodyAndContentEncoding(t *testing.T) { }, { name: "post body", - plugin: &plugin.HTTP{ - URLs: []string{url}, + plugin: &httpplugin.HTTP{ + URLs: []string{address}, Method: "POST", Body: "test", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Equal(t, []byte("test"), body) w.WriteHeader(http.StatusOK) @@ -202,13 +254,14 @@ func TestBodyAndContentEncoding(t *testing.T) { }, { name: "get method body is sent", - plugin: &plugin.HTTP{ - URLs: []string{url}, + plugin: &httpplugin.HTTP{ + URLs: []string{address}, Method: "GET", Body: "test", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) require.NoError(t, err) require.Equal(t, []byte("test"), body) w.WriteHeader(http.StatusOK) @@ -216,18 +269,19 @@ func TestBodyAndContentEncoding(t *testing.T) { }, { name: "gzip encoding", - plugin: &plugin.HTTP{ - URLs: []string{url}, + plugin: &httpplugin.HTTP{ + URLs: []string{address}, Method: "GET", Body: "test", ContentEncoding: "gzip", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.Header.Get("Content-Encoding"), "gzip") gr, err := gzip.NewReader(r.Body) require.NoError(t, err) - body, err := ioutil.ReadAll(gr) + body, err := io.ReadAll(gr) require.NoError(t, err) require.Equal(t, []byte("test"), body) w.WriteHeader(http.StatusOK) @@ -240,15 +294,153 @@ func TestBodyAndContentEncoding(t *testing.T) { tt.queryHandlerFunc(t, w, r) }) - parser, err := parsers.NewParser(&parsers.Config{DataFormat: "influx"}) - require.NoError(t, err) + tt.plugin.SetParserFunc(func() (telegraf.Parser, error) { + return parsers.NewParser(&parsers.Config{DataFormat: "influx"}) + }) + + var acc testutil.Accumulator + require.NoError(t, tt.plugin.Init()) + require.NoError(t, tt.plugin.Gather(&acc)) + }) + } +} - tt.plugin.SetParser(parser) +type TestHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) + +func TestOAuthClientCredentialsGrant(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + var token = "2YotnFZFEjr1zCsicMWpAA" + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + tests := []struct { + name string + plugin *httpplugin.HTTP + tokenHandler TestHandlerFunc + handler TestHandlerFunc + }{ + { + name: "no credentials", + plugin: &httpplugin.HTTP{ + URLs: []string{u.String()}, + Log: testutil.Logger{}, + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Len(t, r.Header["Authorization"], 0) + w.WriteHeader(http.StatusOK) + }, + }, + { + name: "success", + plugin: &httpplugin.HTTP{ + URLs: []string{u.String() + "/write"}, + HTTPClientConfig: httpconfig.HTTPClientConfig{ + OAuth2Config: oauth.OAuth2Config{ + ClientID: "howdy", + ClientSecret: "secret", + TokenURL: u.String() + "/token", + Scopes: []string{"urn:opc:idm:__myscopes__"}, + }, + }, + Log: testutil.Logger{}, + }, + tokenHandler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + values := url.Values{} + values.Add("access_token", token) + values.Add("token_type", "bearer") + values.Add("expires_in", "3600") + _, err := w.Write([]byte(values.Encode())) + require.NoError(t, err) + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, []string{"Bearer " + token}, r.Header["Authorization"]) + w.WriteHeader(http.StatusOK) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + tt.handler(t, w, r) + case "/token": + tt.tokenHandler(t, w, r) + } + }) + + tt.plugin.SetParserFunc(func() (telegraf.Parser, error) { + p := &value.Parser{ + MetricName: "metric", + DataType: "string", + } + err := p.Init() + return p, err + }) + + err = tt.plugin.Init() + require.NoError(t, err) var acc testutil.Accumulator - tt.plugin.Init() err = tt.plugin.Gather(&acc) require.NoError(t, err) }) } } + +func TestHTTPWithCSVFormat(t *testing.T) { + fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/endpoint" { + _, _ = w.Write([]byte(simpleCSVWithHeader)) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + defer fakeServer.Close() + + address := fakeServer.URL + "/endpoint" + plugin := &httpplugin.HTTP{ + URLs: []string{address}, + Log: testutil.Logger{}, + } + + plugin.SetParserFunc(func() (telegraf.Parser, error) { + parser := &csv.Parser{ + MetricName: "metricName", + SkipRows: 3, + ColumnNames: []string{"a", "b", "c"}, + TagColumns: []string{"c"}, + } + err := parser.Init() + return parser, err + }) + + expected := []telegraf.Metric{ + testutil.MustMetric("metricName", + map[string]string{ + "url": address, + "c": "ok", + }, + map[string]interface{}{ + "a": 1.2, + "b": 3.1415, + }, + time.Unix(0, 0), + ), + } + + var acc testutil.Accumulator + require.NoError(t, plugin.Init()) + require.NoError(t, acc.GatherError(plugin.Gather)) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + + // Run the parser a second time to test for correct stateful handling + acc.ClearMetrics() + require.NoError(t, acc.GatherError(plugin.Gather)) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} diff --git a/plugins/inputs/http/sample.conf b/plugins/inputs/http/sample.conf new file mode 100644 index 0000000000000..2d8b6116eff7b --- /dev/null +++ b/plugins/inputs/http/sample.conf @@ -0,0 +1,66 @@ +# Read formatted metrics from one or more HTTP endpoints +[[inputs.http]] + ## One or more URLs from which to read formatted metrics + urls = [ + "http://localhost/metrics" + ] + + ## HTTP method + # method = "GET" + + ## Optional HTTP headers + # headers = {"X-Special-Header" = "Special-Value"} + + ## HTTP entity-body to send with POST/PUT requests. + # body = "" + + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "identity" + + ## Optional file with Bearer token + ## file content is added as an Authorization header + # bearer_token = "/path/to/file" + + ## Optional HTTP Basic Auth Credentials + # username = "username" + # password = "pa$$word" + + ## OAuth2 Client Credentials. The options 'client_id', 'client_secret', and 'token_url' are required to use OAuth2. + # client_id = "clientid" + # client_secret = "secret" + # token_url = "https://indentityprovider/oauth2/v1/token" + # scopes = ["urn:opc:idm:__myscopes__"] + + ## HTTP Proxy support + # http_proxy_url = "" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Optional Cookie authentication + # cookie_auth_url = "https://localhost/authMe" + # cookie_auth_method = "POST" + # cookie_auth_username = "username" + # cookie_auth_password = "pa$$word" + # cookie_auth_headers = '{"Content-Type": "application/json", "X-MY-HEADER":"hello"}' + # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}' + ## cookie_auth_renewal not set or set to "0" will auth once and never renew the cookie + # cookie_auth_renewal = "5m" + + ## Amount of time allowed to complete the HTTP request + # timeout = "5s" + + ## List of success status codes + # success_status_codes = [200] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + # data_format = "influx" + diff --git a/plugins/inputs/http_listener_v2/README.md b/plugins/inputs/http_listener_v2/README.md index 05e48058667ef..fefacae42e9f6 100644 --- a/plugins/inputs/http_listener_v2/README.md +++ b/plugins/inputs/http_listener_v2/README.md @@ -1,23 +1,29 @@ # HTTP Listener v2 Input Plugin HTTP Listener v2 is a service input plugin that listens for metrics sent via -HTTP. Metrics may be sent in any supported [data format][data_format]. +HTTP. Metrics may be sent in any supported [data format][data_format]. For +metrics in [InfluxDB Line Protocol][line_protocol] it's recommended to use the +[`influxdb_listener`][influxdb_listener] or +[`influxdb_v2_listener`][influxdb_v2_listener] instead. **Note:** The plugin previously known as `http_listener` has been renamed `influxdb_listener`. If you would like Telegraf to act as a proxy/relay for -InfluxDB it is recommended to use [`influxdb_listener`][influxdb_listener]. +InfluxDB it is recommended to use [`influxdb_listener`][influxdb_listener] or +[`influxdb_v2_listener`][influxdb_v2_listener]. -### Configuration: +## Configuration -This is a sample configuration for the plugin. - -```toml +```toml @sample.conf +# Generic HTTP write listener [[inputs.http_listener_v2]] ## Address and port to host HTTP listener on service_address = ":8080" - ## Path to listen to. - # path = "/telegraf" + ## Paths to listen to. + # paths = ["/telegraf"] + + ## Save path as http_listener_v2_path tag if set to true + # path_tag = false ## HTTP methods to accept. # methods = ["POST", "PUT"] @@ -60,26 +66,32 @@ This is a sample configuration for the plugin. data_format = "influx" ``` -### Metrics: +## Metrics -Metrics are collected from the part of the request specified by the `data_source` param and are parsed depending on the value of `data_format`. +Metrics are collected from the part of the request specified by the +`data_source` param and are parsed depending on the value of `data_format`. -### Troubleshooting: +## Troubleshooting -**Send Line Protocol** -``` +Send Line Protocol: + +```shell curl -i -XPOST 'http://localhost:8080/telegraf' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' ``` -**Send JSON** -``` +Send JSON: + +```shell curl -i -XPOST 'http://localhost:8080/telegraf' --data-binary '{"value1": 42, "value2": 42}' ``` -**Send query params** -``` +Send query params: + +```shell curl -i -XGET 'http://localhost:8080/telegraf?host=server01&value=0.42' ``` [data_format]: /docs/DATA_FORMATS_INPUT.md [influxdb_listener]: /plugins/inputs/influxdb_listener/README.md +[line_protocol]: https://docs.influxdata.com/influxdb/cloud/reference/syntax/line-protocol/ +[influxdb_v2_listener]: /plugins/inputs/influxdb_v2_listener/README.md diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go index 1023c0d10bcf5..ed5dda2525347 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -1,10 +1,13 @@ +//go:generate ../../../tools/readme_config_includer/generator package http_listener_v2 import ( "compress/gzip" "crypto/subtle" "crypto/tls" - "io/ioutil" + _ "embed" + "errors" + "io" "net" "net/http" "net/url" @@ -12,21 +15,29 @@ import ( "sync" "time" + "github.com/golang/snappy" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal/choice" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // defaultMaxBodySize is the default maximum request body size, in bytes. // if the request body is over this size, we will return an HTTP 413 error. // 500 MB const defaultMaxBodySize = 500 * 1024 * 1024 const ( - body = "body" - query = "query" + body = "body" + query = "query" + pathTag = "http_listener_v2_path" ) // TimeFunc provides a timestamp for the metrics @@ -35,22 +46,27 @@ type TimeFunc func() time.Time // HTTPListenerV2 is an input plugin that collects external metrics sent via HTTP type HTTPListenerV2 struct { ServiceAddress string `toml:"service_address"` - Path string `toml:"path"` + Path string `toml:"path" deprecated:"1.20.0;use 'paths' instead"` + Paths []string `toml:"paths"` + PathTag bool `toml:"path_tag"` Methods []string `toml:"methods"` DataSource string `toml:"data_source"` - ReadTimeout internal.Duration `toml:"read_timeout"` - WriteTimeout internal.Duration `toml:"write_timeout"` - MaxBodySize internal.Size `toml:"max_body_size"` + ReadTimeout config.Duration `toml:"read_timeout"` + WriteTimeout config.Duration `toml:"write_timeout"` + MaxBodySize config.Size `toml:"max_body_size"` Port int `toml:"port"` BasicUsername string `toml:"basic_username"` BasicPassword string `toml:"basic_password"` HTTPHeaderTags map[string]string `toml:"http_header_tags"` + tlsint.ServerConfig + tlsConf *tls.Config TimeFunc Log telegraf.Logger - wg sync.WaitGroup + wg sync.WaitGroup + close chan struct{} listener net.Listener @@ -58,62 +74,10 @@ type HTTPListenerV2 struct { acc telegraf.Accumulator } -const sampleConfig = ` - ## Address and port to host HTTP listener on - service_address = ":8080" - - ## Path to listen to. - # path = "/telegraf" - - ## HTTP methods to accept. - # methods = ["POST", "PUT"] - - ## maximum duration before timing out read of the request - # read_timeout = "10s" - ## maximum duration before timing out write of the response - # write_timeout = "10s" - - ## Maximum allowed http request body size in bytes. - ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) - # max_body_size = "500MB" - - ## Part of the request to consume. Available options are "body" and - ## "query". - # data_source = "body" - - ## Set one or more allowed client CA certificate file names to - ## enable mutually authenticated TLS connections - # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - - ## Add service certificate and key - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - - ## Optional username and password to accept for HTTP basic authentication. - ## You probably want to make sure you have TLS configured above for this. - # basic_username = "foobar" - # basic_password = "barfoo" - - ## Optional setting to map http headers into tags - ## If the http header is not present on the request, no corresponding tag will be added - ## If multiple instances of the http header are present, only the first value will be used - # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" -` - -func (h *HTTPListenerV2) SampleConfig() string { +func (*HTTPListenerV2) SampleConfig() string { return sampleConfig } -func (h *HTTPListenerV2) Description() string { - return "Generic HTTP write listener" -} - func (h *HTTPListenerV2) Gather(_ telegraf.Accumulator) error { return nil } @@ -124,30 +88,66 @@ func (h *HTTPListenerV2) SetParser(parser parsers.Parser) { // Start starts the http listener service. func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error { - if h.MaxBodySize.Size == 0 { - h.MaxBodySize.Size = defaultMaxBodySize + if h.MaxBodySize == 0 { + h.MaxBodySize = config.Size(defaultMaxBodySize) } - if h.ReadTimeout.Duration < time.Second { - h.ReadTimeout.Duration = time.Second * 10 + if h.ReadTimeout < config.Duration(time.Second) { + h.ReadTimeout = config.Duration(time.Second * 10) + } + if h.WriteTimeout < config.Duration(time.Second) { + h.WriteTimeout = config.Duration(time.Second * 10) } - if h.WriteTimeout.Duration < time.Second { - h.WriteTimeout.Duration = time.Second * 10 + + // Append h.Path to h.Paths + if h.Path != "" && !choice.Contains(h.Path, h.Paths) { + h.Paths = append(h.Paths, h.Path) } h.acc = acc - tlsConf, err := h.ServerConfig.TLSConfig() - if err != nil { - return err - } + server := h.createHTTPServer() + + h.wg.Add(1) + go func() { + defer h.wg.Done() + if err := server.Serve(h.listener); err != nil { + if !errors.Is(err, net.ErrClosed) { + h.Log.Errorf("Serve failed: %v", err) + } + close(h.close) + } + }() + + h.Log.Infof("Listening on %s", h.listener.Addr().String()) - server := &http.Server{ + return nil +} + +func (h *HTTPListenerV2) createHTTPServer() *http.Server { + return &http.Server{ Addr: h.ServiceAddress, Handler: h, - ReadTimeout: h.ReadTimeout.Duration, - WriteTimeout: h.WriteTimeout.Duration, - TLSConfig: tlsConf, + ReadTimeout: time.Duration(h.ReadTimeout), + WriteTimeout: time.Duration(h.WriteTimeout), + TLSConfig: h.tlsConf, + } +} + +// Stop cleans up all resources +func (h *HTTPListenerV2) Stop() { + if h.listener != nil { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive + h.listener.Close() + } + h.wg.Wait() +} + +func (h *HTTPListenerV2) Init() error { + tlsConf, err := h.ServerConfig.TLSConfig() + if err != nil { + return err } var listener net.Listener @@ -159,30 +159,17 @@ func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error { if err != nil { return err } + h.tlsConf = tlsConf h.listener = listener h.Port = listener.Addr().(*net.TCPAddr).Port - h.wg.Add(1) - go func() { - defer h.wg.Done() - server.Serve(h.listener) - }() - - h.Log.Infof("Listening on %s", listener.Addr().String()) - return nil } -// Stop cleans up all resources -func (h *HTTPListenerV2) Stop() { - h.listener.Close() - h.wg.Wait() -} - func (h *HTTPListenerV2) ServeHTTP(res http.ResponseWriter, req *http.Request) { handler := h.serveWrite - if req.URL.Path != h.Path { + if !choice.Contains(req.URL.Path, h.Paths) { handler = http.NotFound } @@ -190,9 +177,18 @@ func (h *HTTPListenerV2) ServeHTTP(res http.ResponseWriter, req *http.Request) { } func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) { + select { + case <-h.close: + res.WriteHeader(http.StatusGone) + return + default: + } + // Check that the content length is not too large for us to handle. - if req.ContentLength > h.MaxBodySize.Size { - tooLarge(res) + if req.ContentLength > int64(h.MaxBodySize) { + if err := tooLarge(res); err != nil { + h.Log.Debugf("error in too-large: %v", err) + } return } @@ -205,7 +201,9 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) } } if !isAcceptedMethod { - methodNotAllowed(res) + if err := methodNotAllowed(res); err != nil { + h.Log.Debugf("error in method-not-allowed: %v", err) + } return } @@ -226,7 +224,9 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) metrics, err := h.Parse(bytes) if err != nil { h.Log.Debugf("Parse error: %s", err.Error()) - badRequest(res) + if err := badRequest(res); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return } @@ -238,6 +238,10 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) } } + if h.PathTag { + m.AddTag(pathTag, req.URL.Path) + } + h.acc.AddMetric(m) } @@ -245,28 +249,60 @@ func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) } func (h *HTTPListenerV2) collectBody(res http.ResponseWriter, req *http.Request) ([]byte, bool) { - body := req.Body + encoding := req.Header.Get("Content-Encoding") - // Handle gzip request bodies - if req.Header.Get("Content-Encoding") == "gzip" { - var err error - body, err = gzip.NewReader(req.Body) + switch encoding { + case "gzip": + r, err := gzip.NewReader(req.Body) if err != nil { h.Log.Debug(err.Error()) - badRequest(res) + if err := badRequest(res); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return nil, false } - defer body.Close() - } - - body = http.MaxBytesReader(res, body, h.MaxBodySize.Size) - bytes, err := ioutil.ReadAll(body) - if err != nil { - tooLarge(res) - return nil, false + defer r.Close() + maxReader := http.MaxBytesReader(res, r, int64(h.MaxBodySize)) + bytes, err := io.ReadAll(maxReader) + if err != nil { + if err := tooLarge(res); err != nil { + h.Log.Debugf("error in too-large: %v", err) + } + return nil, false + } + return bytes, true + case "snappy": + defer req.Body.Close() + bytes, err := io.ReadAll(req.Body) + if err != nil { + h.Log.Debug(err.Error()) + if err := badRequest(res); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } + return nil, false + } + // snappy block format is only supported by decode/encode not snappy reader/writer + bytes, err = snappy.Decode(nil, bytes) + if err != nil { + h.Log.Debug(err.Error()) + if err := badRequest(res); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } + return nil, false + } + return bytes, true + default: + defer req.Body.Close() + bytes, err := io.ReadAll(req.Body) + if err != nil { + h.Log.Debug(err.Error()) + if err := badRequest(res); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } + return nil, false + } + return bytes, true } - - return bytes, true } func (h *HTTPListenerV2) collectQuery(res http.ResponseWriter, req *http.Request) ([]byte, bool) { @@ -275,34 +311,34 @@ func (h *HTTPListenerV2) collectQuery(res http.ResponseWriter, req *http.Request query, err := url.QueryUnescape(rawQuery) if err != nil { h.Log.Debugf("Error parsing query: %s", err.Error()) - badRequest(res) + if err := badRequest(res); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return nil, false } return []byte(query), true } -func tooLarge(res http.ResponseWriter) { +func tooLarge(res http.ResponseWriter) error { res.Header().Set("Content-Type", "application/json") res.WriteHeader(http.StatusRequestEntityTooLarge) - res.Write([]byte(`{"error":"http: request body too large"}`)) + _, err := res.Write([]byte(`{"error":"http: request body too large"}`)) + return err } -func methodNotAllowed(res http.ResponseWriter) { +func methodNotAllowed(res http.ResponseWriter) error { res.Header().Set("Content-Type", "application/json") res.WriteHeader(http.StatusMethodNotAllowed) - res.Write([]byte(`{"error":"http: method not allowed"}`)) -} - -func internalServerError(res http.ResponseWriter) { - res.Header().Set("Content-Type", "application/json") - res.WriteHeader(http.StatusInternalServerError) + _, err := res.Write([]byte(`{"error":"http: method not allowed"}`)) + return err } -func badRequest(res http.ResponseWriter) { +func badRequest(res http.ResponseWriter) error { res.Header().Set("Content-Type", "application/json") res.WriteHeader(http.StatusBadRequest) - res.Write([]byte(`{"error":"http: bad request"}`)) + _, err := res.Write([]byte(`{"error":"http: bad request"}`)) + return err } func (h *HTTPListenerV2) authenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) { @@ -311,7 +347,6 @@ func (h *HTTPListenerV2) authenticateIfSet(handler http.HandlerFunc, res http.Re if !ok || subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.BasicUsername)) != 1 || subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.BasicPassword)) != 1 { - http.Error(res, "Unauthorized.", http.StatusUnauthorized) return } @@ -326,9 +361,10 @@ func init() { return &HTTPListenerV2{ ServiceAddress: ":8080", TimeFunc: time.Now, - Path: "/telegraf", + Paths: []string{"/telegraf"}, Methods: []string{"POST", "PUT"}, DataSource: body, + close: make(chan struct{}), } }) } diff --git a/plugins/inputs/http_listener_v2/http_listener_v2_test.go b/plugins/inputs/http_listener_v2/http_listener_v2_test.go index 4457fcacda79d..fda803ef22abe 100644 --- a/plugins/inputs/http_listener_v2/http_listener_v2_test.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2_test.go @@ -4,19 +4,22 @@ import ( "bytes" "crypto/tls" "crypto/x509" - "io/ioutil" "net/http" "net/url" + "os" "runtime" "strconv" "sync" "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/golang/snappy" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/form_urlencoded" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) const ( @@ -52,8 +55,9 @@ func newTestHTTPListenerV2() *HTTPListenerV2 { Methods: []string{"POST"}, Parser: parser, TimeFunc: time.Now, - MaxBodySize: internal.Size{Size: 70000}, + MaxBodySize: config.Size(70000), DataSource: "body", + close: make(chan struct{}), } return listener } @@ -76,6 +80,7 @@ func newTestHTTPSListenerV2() *HTTPListenerV2 { Parser: parser, ServerConfig: *pki.TLSServerConfig(), TimeFunc: time.Now, + close: make(chan struct{}), } return listener @@ -103,11 +108,33 @@ func createURL(listener *HTTPListenerV2, scheme string, path string, rawquery st return u.String() } +func TestInvalidListenerConfig(t *testing.T) { + parser, _ := parsers.NewInfluxParser() + + listener := &HTTPListenerV2{ + Log: testutil.Logger{}, + ServiceAddress: "address_without_port", + Path: "/write", + Methods: []string{"POST"}, + Parser: parser, + TimeFunc: time.Now, + MaxBodySize: config.Size(70000), + DataSource: "body", + close: make(chan struct{}), + } + + require.Error(t, listener.Init()) + + // Stop is called when any ServiceInput fails to start; it must succeed regardless of state + listener.Stop() +} + func TestWriteHTTPSNoClientAuth(t *testing.T) { listener := newTestHTTPSListenerV2() listener.TLSAllowedCACerts = nil acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -124,7 +151,7 @@ func TestWriteHTTPSNoClientAuth(t *testing.T) { // post single message to listener resp, err := noClientAuthClient.Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -132,13 +159,14 @@ func TestWriteHTTPSWithClientAuth(t *testing.T) { listener := newTestHTTPSListenerV2() acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() // post single message to listener resp, err := getHTTPSClient().Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -146,6 +174,7 @@ func TestWriteHTTPBasicAuth(t *testing.T) { listener := newTestHTTPAuthListener() acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -156,7 +185,7 @@ func TestWriteHTTPBasicAuth(t *testing.T) { req.SetBasicAuth(basicUsername, basicPassword) resp, err := client.Do(req) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, http.StatusNoContent, resp.StatusCode) } @@ -164,13 +193,14 @@ func TestWriteHTTP(t *testing.T) { listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -182,7 +212,7 @@ func TestWriteHTTP(t *testing.T) { // post multiple message to listener resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(2) @@ -198,7 +228,7 @@ func TestWriteHTTP(t *testing.T) { // Post a gigantic metric to the listener and verify that an error is returned: resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 413, resp.StatusCode) acc.Wait(3) @@ -208,18 +238,77 @@ func TestWriteHTTP(t *testing.T) { ) } +// http listener should add request path as configured path_tag +func TestWriteHTTPWithPathTag(t *testing.T) { + listener := newTestHTTPListenerV2() + listener.PathTag = true + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01", "http_listener_v2_path": "/write"}, + ) +} + +// http listener should add request path as configured path_tag (trimming it before) +func TestWriteHTTPWithMultiplePaths(t *testing.T) { + listener := newTestHTTPListenerV2() + listener.Paths = []string{"/alternative_write"} + listener.PathTag = true + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to /write + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) + + // post single message to /alternative_write + resp, err = http.Post(createURL(listener, "http", "/alternative_write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01", "http_listener_v2_path": "/write"}, + ) + + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01", "http_listener_v2_path": "/alternative_write"}, + ) +} + // http listener should add a newline at the end of the buffer if it's not there func TestWriteHTTPNoNewline(t *testing.T) { listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -238,17 +327,19 @@ func TestWriteHTTPExactMaxBodySize(t *testing.T) { Path: "/write", Methods: []string{"POST"}, Parser: parser, - MaxBodySize: internal.Size{Size: int64(len(hugeMetric))}, + MaxBodySize: config.Size(len(hugeMetric)), TimeFunc: time.Now, + close: make(chan struct{}), } acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -261,17 +352,19 @@ func TestWriteHTTPVerySmallMaxBody(t *testing.T) { Path: "/write", Methods: []string{"POST"}, Parser: parser, - MaxBodySize: internal.Size{Size: 4096}, + MaxBodySize: config.Size(4096), TimeFunc: time.Now, + close: make(chan struct{}), } acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 413, resp.StatusCode) } @@ -280,10 +373,12 @@ func TestWriteHTTPGzippedData(t *testing.T) { listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() - data, err := ioutil.ReadFile("./testdata/testmsgs.gz") + data, err := os.ReadFile("./testdata/testmsgs.gz") require.NoError(t, err) req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(data)) @@ -293,6 +388,7 @@ func TestWriteHTTPGzippedData(t *testing.T) { client := &http.Client{} resp, err := client.Do(req) require.NoError(t, err) + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) hostTags := []string{"server02", "server03", @@ -306,6 +402,42 @@ func TestWriteHTTPGzippedData(t *testing.T) { } } +// test that writing snappy data works +func TestWriteHTTPSnappyData(t *testing.T) { + listener := newTestHTTPListenerV2() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + testData := "cpu_load_short,host=server01 value=12.0 1422568543702900257\n" + encodedData := snappy.Encode(nil, []byte(testData)) + + req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(encodedData)) + require.NoError(t, err) + req.Header.Set("Content-Encoding", "snappy") + + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + t.Log("Test client request failed. Error: ", err) + } + require.NoErrorf(t, resp.Body.Close(), "Test client close failed. Error: %v", err) + require.NoError(t, err) + require.EqualValues(t, 204, resp.StatusCode) + + hostTags := []string{"server01"} + acc.Wait(1) + + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag}, + ) + } +} + // writes 25,000 metrics to the listener with 10 different writers func TestWriteHTTPHighTraffic(t *testing.T) { if runtime.GOOS == "darwin" { @@ -314,6 +446,7 @@ func TestWriteHTTPHighTraffic(t *testing.T) { listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -325,15 +458,21 @@ func TestWriteHTTPHighTraffic(t *testing.T) { defer innerwg.Done() for i := 0; i < 500; i++ { resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) + if err != nil { + return + } + if err := resp.Body.Close(); err != nil { + return + } + if resp.StatusCode != 204 { + return + } } }(&wg) } wg.Wait() - listener.Gather(acc) + require.NoError(t, listener.Gather(acc)) acc.Wait(25000) require.Equal(t, int64(25000), int64(acc.NMetrics())) @@ -343,13 +482,14 @@ func TestReceive404ForInvalidEndpoint(t *testing.T) { listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() // post single message to listener resp, err := http.Post(createURL(listener, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 404, resp.StatusCode) } @@ -357,13 +497,14 @@ func TestWriteHTTPInvalid(t *testing.T) { listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(badMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 400, resp.StatusCode) } @@ -371,13 +512,14 @@ func TestWriteHTTPEmpty(t *testing.T) { listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(emptyMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -386,6 +528,7 @@ func TestWriteHTTPTransformHeaderValuesToTagsSingleWrite(t *testing.T) { listener.HTTPHeaderTags = map[string]string{"Present_http_header_1": "presentMeasurementKey1", "present_http_header_2": "presentMeasurementKey2", "NOT_PRESENT_HEADER": "notPresentMeasurementKey"} acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -397,7 +540,7 @@ func TestWriteHTTPTransformHeaderValuesToTagsSingleWrite(t *testing.T) { resp, err := http.DefaultClient.Do(req) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -409,7 +552,7 @@ func TestWriteHTTPTransformHeaderValuesToTagsSingleWrite(t *testing.T) { // post single message to listener resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -424,6 +567,7 @@ func TestWriteHTTPTransformHeaderValuesToTagsBulkWrite(t *testing.T) { listener.HTTPHeaderTags = map[string]string{"Present_http_header_1": "presentMeasurementKey1", "Present_http_header_2": "presentMeasurementKey2", "NOT_PRESENT_HEADER": "notPresentMeasurementKey"} acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -435,7 +579,7 @@ func TestWriteHTTPTransformHeaderValuesToTagsBulkWrite(t *testing.T) { resp, err := http.DefaultClient.Do(req) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(2) @@ -449,18 +593,23 @@ func TestWriteHTTPTransformHeaderValuesToTagsBulkWrite(t *testing.T) { } func TestWriteHTTPQueryParams(t *testing.T) { - parser, _ := parsers.NewFormUrlencodedParser("query_measurement", nil, []string{"tagKey"}) + parser := form_urlencoded.Parser{ + MetricName: "query_measurement", + TagKeys: []string{"tagKey"}, + } + listener := newTestHTTPListenerV2() listener.DataSource = "query" - listener.Parser = parser + listener.Parser = &parser acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() resp, err := http.Post(createURL(listener, "http", "/write", "tagKey=tagValue&fieldKey=42"), "", bytes.NewBuffer([]byte(emptyMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -471,11 +620,16 @@ func TestWriteHTTPQueryParams(t *testing.T) { } func TestWriteHTTPFormData(t *testing.T) { - parser, _ := parsers.NewFormUrlencodedParser("query_measurement", nil, []string{"tagKey"}) + parser := form_urlencoded.Parser{ + MetricName: "query_measurement", + TagKeys: []string{"tagKey"}, + } + listener := newTestHTTPListenerV2() - listener.Parser = parser + listener.Parser = &parser acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) require.NoError(t, listener.Start(acc)) defer listener.Stop() @@ -484,7 +638,7 @@ func TestWriteHTTPFormData(t *testing.T) { "fieldKey": {"42"}, }) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) diff --git a/plugins/inputs/http_listener_v2/sample.conf b/plugins/inputs/http_listener_v2/sample.conf new file mode 100644 index 0000000000000..102c90e7799aa --- /dev/null +++ b/plugins/inputs/http_listener_v2/sample.conf @@ -0,0 +1,50 @@ +# Generic HTTP write listener +[[inputs.http_listener_v2]] + ## Address and port to host HTTP listener on + service_address = ":8080" + + ## Paths to listen to. + # paths = ["/telegraf"] + + ## Save path as http_listener_v2_path tag if set to true + # path_tag = false + + ## HTTP methods to accept. + # methods = ["POST", "PUT"] + + ## maximum duration before timing out read of the request + # read_timeout = "10s" + ## maximum duration before timing out write of the response + # write_timeout = "10s" + + ## Maximum allowed http request body size in bytes. + ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) + # max_body_size = "500MB" + + ## Part of the request to consume. Available options are "body" and + ## "query". + # data_source = "body" + + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Add service certificate and key + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Optional username and password to accept for HTTP basic authentication. + ## You probably want to make sure you have TLS configured above for this. + # basic_username = "foobar" + # basic_password = "barfoo" + + ## Optional setting to map http headers into tags + ## If the http header is not present on the request, no corresponding tag will be added + ## If multiple instances of the http header are present, only the first value will be used + # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" diff --git a/plugins/inputs/http_response/README.md b/plugins/inputs/http_response/README.md index 4e01bc0bbdfaf..be74d4d754d10 100644 --- a/plugins/inputs/http_response/README.md +++ b/plugins/inputs/http_response/README.md @@ -2,13 +2,11 @@ This input plugin checks HTTP/HTTPS connections. -### Configuration: +## Configuration -```toml +```toml @sample.conf # HTTP/HTTPS request given an address a method and a timeout [[inputs.http_response]] - ## address is Deprecated in 1.12, use 'urls' - ## List of urls to query. # urls = ["http://localhost"] @@ -63,6 +61,8 @@ This input plugin checks HTTP/HTTPS connections. # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false + ## Use the given name as the SNI server name on each URL + # tls_server_name = "" ## HTTP Request Headers (all values must be strings) # [inputs.http_response.headers] @@ -77,7 +77,7 @@ This input plugin checks HTTP/HTTPS connections. # interface = "eth0" ``` -### Metrics: +## Metrics - http_response - tags: @@ -91,14 +91,17 @@ This input plugin checks HTTP/HTTPS connections. - response_string_match (int, 0 = mismatch / body read error, 1 = match) - response_status_code_match (int, 0 = mismatch, 1 = match) - http_response_code (int, response status code) - - result_type (string, deprecated in 1.6: use `result` tag and `result_code` field) + - result_type (string, deprecated in 1.6: use `result` tag and `result_code` field) - result_code (int, [see below](#result--result_code)) -#### `result` / `result_code` +### `result` / `result_code` -Upon finishing polling the target server, the plugin registers the result of the operation in the `result` tag, and adds a numeric field called `result_code` corresponding with that tag value. +Upon finishing polling the target server, the plugin registers the result of the +operation in the `result` tag, and adds a numeric field called `result_code` +corresponding with that tag value. -This tag is used to expose network and plugin errors. HTTP errors are considered a successful connection. +This tag is used to expose network and plugin errors. HTTP errors are considered +a successful connection. |Tag value |Corresponding field value|Description| -------------------------------|-------------------------|-----------| @@ -110,9 +113,8 @@ This tag is used to expose network and plugin errors. HTTP errors are considered |dns_error | 5 |There was a DNS error while attempting to connect to the host| |response_status_code_mismatch | 6 |The option `response_status_code_match` was used, and the status code of the response didn't match the value.| +## Example Output -### Example Output: - -``` +```shell http_response,method=GET,result=success,server=http://github.com,status_code=200 content_length=87878i,http_response_code=200i,response_time=0.937655534,result_code=0i,result_type="success" 1565839598000000000 ``` diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index bd3078e490c33..cec3468523b90 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -1,13 +1,15 @@ +//go:generate ../../../tools/readme_config_includer/generator package http_response import ( + _ "embed" "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" + "os" "regexp" "strconv" "strings" @@ -15,11 +17,15 @@ import ( "unicode/utf8" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const ( // defaultResponseBodyMaxSize is the default maximum response body size, in bytes. // if the response body is over this size, we will raise a body_read_error. @@ -28,19 +34,19 @@ const ( // HTTPResponse struct type HTTPResponse struct { - Address string // deprecated in 1.12 + Address string `toml:"address" deprecated:"1.12.0;use 'urls' instead"` URLs []string `toml:"urls"` HTTPProxy string `toml:"http_proxy"` Body string Method string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration HTTPHeaderTags map[string]string `toml:"http_header_tags"` Headers map[string]string FollowRedirects bool // Absolute path to file with Bearer token - BearerToken string `toml:"bearer_token"` - ResponseBodyField string `toml:"response_body_field"` - ResponseBodyMaxSize internal.Size `toml:"response_body_max_size"` + BearerToken string `toml:"bearer_token"` + ResponseBodyField string `toml:"response_body_field"` + ResponseBodyMaxSize config.Size `toml:"response_body_max_size"` ResponseStringMatch string ResponseStatusCode int Interface string @@ -52,101 +58,22 @@ type HTTPResponse struct { Log telegraf.Logger compiledStringMatch *regexp.Regexp - client *http.Client + client httpClient } -// Description returns the plugin Description -func (h *HTTPResponse) Description() string { - return "HTTP/HTTPS request given an address a method and a timeout" -} - -var sampleConfig = ` - ## Deprecated in 1.12, use 'urls' - ## Server address (default http://localhost) - # address = "http://localhost" - - ## List of urls to query. - # urls = ["http://localhost"] - - ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) - # http_proxy = "http://localhost:8888" - - ## Set response_timeout (default 5 seconds) - # response_timeout = "5s" - - ## HTTP Request Method - # method = "GET" - - ## Whether to follow redirects from the server (defaults to false) - # follow_redirects = false - - ## Optional file with Bearer token - ## file content is added as an Authorization header - # bearer_token = "/path/to/file" - - ## Optional HTTP Basic Auth Credentials - # username = "username" - # password = "pa$$word" - - ## Optional HTTP Request Body - # body = ''' - # {'fake':'data'} - # ''' - - ## Optional name of the field that will contain the body of the response. - ## By default it is set to an empty String indicating that the body's content won't be added - # response_body_field = '' - - ## Maximum allowed HTTP response body size in bytes. - ## 0 means to use the default of 32MiB. - ## If the response body size exceeds this limit a "body_read_error" will be raised - # response_body_max_size = "32MiB" - - ## Optional substring or regex match in body of the response (case sensitive) - # response_string_match = "\"service_status\": \"up\"" - # response_string_match = "ok" - # response_string_match = "\".*_status\".?:.?\"up\"" - - ## Expected response status code. - ## The status code of the response is compared to this value. If they match, the field - ## "response_status_code_match" will be 1, otherwise it will be 0. If the - ## expected status code is 0, the check is disabled and the field won't be added. - # response_status_code = 0 - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## HTTP Request Headers (all values must be strings) - # [inputs.http_response.headers] - # Host = "github.com" - - ## Optional setting to map response http headers into tags - ## If the http header is not present on the request, no corresponding tag will be added - ## If multiple instances of the http header are present, only the first value will be used - # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} - - ## Interface to use when dialing an address - # interface = "eth0" -` - -// SampleConfig returns the plugin SampleConfig -func (h *HTTPResponse) SampleConfig() string { - return sampleConfig +type httpClient interface { + Do(req *http.Request) (*http.Response, error) } // ErrRedirectAttempted indicates that a redirect occurred var ErrRedirectAttempted = errors.New("redirect") // Set the proxy. A configured proxy overwrites the system wide proxy. -func getProxyFunc(http_proxy string) func(*http.Request) (*url.URL, error) { - if http_proxy == "" { +func getProxyFunc(httpProxy string) func(*http.Request) (*url.URL, error) { + if httpProxy == "" { return http.ProxyFromEnvironment } - proxyURL, err := url.Parse(http_proxy) + proxyURL, err := url.Parse(httpProxy) if err != nil { return func(_ *http.Request) (*url.URL, error) { return nil, errors.New("bad proxy: " + err.Error()) @@ -157,9 +84,9 @@ func getProxyFunc(http_proxy string) func(*http.Request) (*url.URL, error) { } } -// createHttpClient creates an http client which will timeout at the specified +// createHTTPClient creates an http client which will timeout at the specified // timeout period and can follow redirects if specified -func (h *HTTPResponse) createHttpClient() (*http.Client, error) { +func (h *HTTPResponse) createHTTPClient() (*http.Client, error) { tlsCfg, err := h.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -181,10 +108,10 @@ func (h *HTTPResponse) createHttpClient() (*http.Client, error) { DisableKeepAlives: true, TLSClientConfig: tlsCfg, }, - Timeout: h.ResponseTimeout.Duration, + Timeout: time.Duration(h.ResponseTimeout), } - if h.FollowRedirects == false { + if !h.FollowRedirects { client.CheckRedirect = func(req *http.Request, via []*http.Request) error { return http.ErrUseLastResponse } @@ -213,8 +140,8 @@ func localAddress(interfaceName string) (net.Addr, error) { return nil, fmt.Errorf("cannot create local address for interface %q", interfaceName) } -func setResult(result_string string, fields map[string]interface{}, tags map[string]string) { - result_codes := map[string]int{ +func setResult(resultString string, fields map[string]interface{}, tags map[string]string) { + resultCodes := map[string]int{ "success": 0, "response_string_mismatch": 1, "body_read_error": 2, @@ -224,9 +151,9 @@ func setResult(result_string string, fields map[string]interface{}, tags map[str "response_status_code_mismatch": 6, } - tags["result"] = result_string - fields["result_type"] = result_string - fields["result_code"] = result_codes[result_string] + tags["result"] = resultString + fields["result_type"] = resultString + fields["result_code"] = resultCodes[resultString] } func setError(err error, fields map[string]interface{}, tags map[string]string) error { @@ -235,18 +162,18 @@ func setError(err error, fields map[string]interface{}, tags map[string]string) return timeoutError } - urlErr, isUrlErr := err.(*url.Error) - if !isUrlErr { + urlErr, isURLErr := err.(*url.Error) + if !isURLErr { return nil } opErr, isNetErr := (urlErr.Err).(*net.OpError) if isNetErr { switch e := (opErr.Err).(type) { - case (*net.DNSError): + case *net.DNSError: setResult("dns_error", fields, tags) return e - case (*net.ParseError): + case *net.ParseError: // Parse error has to do with parsing of IP addresses, so we // group it with address errors setResult("address_error", fields, tags) @@ -273,7 +200,7 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] } if h.BearerToken != "" { - token, err := ioutil.ReadFile(h.BearerToken) + token, err := os.ReadFile(h.BearerToken) if err != nil { return nil, nil, err } @@ -295,7 +222,7 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] // Start Timer start := time.Now() resp, err := h.client.Do(request) - response_time := time.Since(start).Seconds() + responseTime := time.Since(start).Seconds() // If an error in returned, it means we are dealing with a network error, as // HTTP error codes do not generate errors in the net/http library @@ -304,20 +231,16 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] h.Log.Debugf("Network error while polling %s: %s", u, err.Error()) // Get error details - netErr := setError(err, fields, tags) - - // If recognize the returned error, get out - if netErr != nil { - return fields, tags, nil + if setError(err, fields, tags) == nil { + // Any error not recognized by `set_error` is considered a "connection_failed" + setResult("connection_failed", fields, tags) } - // Any error not recognized by `set_error` is considered a "connection_failed" - setResult("connection_failed", fields, tags) return fields, tags, nil } if _, ok := fields["response_time"]; !ok { - fields["response_time"] = response_time + fields["response_time"] = responseTime } // This function closes the response body, as @@ -336,12 +259,12 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] tags["status_code"] = strconv.Itoa(resp.StatusCode) fields["http_response_code"] = resp.StatusCode - if h.ResponseBodyMaxSize.Size == 0 { - h.ResponseBodyMaxSize.Size = defaultResponseBodyMaxSize + if h.ResponseBodyMaxSize == 0 { + h.ResponseBodyMaxSize = config.Size(defaultResponseBodyMaxSize) } - bodyBytes, err := ioutil.ReadAll(io.LimitReader(resp.Body, h.ResponseBodyMaxSize.Size+1)) + bodyBytes, err := io.ReadAll(io.LimitReader(resp.Body, int64(h.ResponseBodyMaxSize)+1)) // Check first if the response body size exceeds the limit. - if err == nil && int64(len(bodyBytes)) > h.ResponseBodyMaxSize.Size { + if err == nil && int64(len(bodyBytes)) > int64(h.ResponseBodyMaxSize) { h.setBodyReadError("The body of the HTTP Response is too large", bodyBytes, fields, tags) return fields, tags, nil } else if err != nil { @@ -392,8 +315,8 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] } // Set result in case of a body read error -func (h *HTTPResponse) setBodyReadError(error_msg string, bodyBytes []byte, fields map[string]interface{}, tags map[string]string) { - h.Log.Debugf(error_msg) +func (h *HTTPResponse) setBodyReadError(errorMsg string, bodyBytes []byte, fields map[string]interface{}, tags map[string]string) { + h.Log.Debugf(errorMsg) setResult("body_read_error", fields, tags) fields["content_length"] = len(bodyBytes) if h.ResponseStringMatch != "" { @@ -401,6 +324,10 @@ func (h *HTTPResponse) setBodyReadError(error_msg string, bodyBytes []byte, fiel } } +func (*HTTPResponse) SampleConfig() string { + return sampleConfig +} + // Gather gets all metric fields and tags and returns any errors it encounters func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error { // Compile the body regex if it exist @@ -408,13 +335,13 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error { var err error h.compiledStringMatch, err = regexp.Compile(h.ResponseStringMatch) if err != nil { - return fmt.Errorf("Failed to compile regular expression %s : %s", h.ResponseStringMatch, err) + return fmt.Errorf("failed to compile regular expression %s : %s", h.ResponseStringMatch, err) } } // Set default values - if h.ResponseTimeout.Duration < time.Second { - h.ResponseTimeout.Duration = time.Second * 5 + if h.ResponseTimeout < config.Duration(time.Second) { + h.ResponseTimeout = config.Duration(time.Second * 5) } // Check send and expected string if h.Method == "" { @@ -425,13 +352,12 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error { if h.Address == "" { h.URLs = []string{"http://localhost"} } else { - h.Log.Warn("'address' deprecated in telegraf 1.12, please use 'urls'") h.URLs = []string{h.Address} } } if h.client == nil { - client, err := h.createHttpClient() + client, err := h.createHTTPClient() if err != nil { return err } @@ -446,7 +372,7 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error { } if addr.Scheme != "http" && addr.Scheme != "https" { - acc.AddError(errors.New("Only http and https are supported")) + acc.AddError(errors.New("only http and https are supported")) continue } diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index adf4e7999aa94..0d537f5358433 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -1,20 +1,27 @@ +//go:build !windows +// +build !windows + +// TODO: Windows - should be enabled for Windows when https://github.com/influxdata/telegraf/issues/8451 is fixed + package http_response import ( "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/http/httptest" + "net/url" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // Receives a list with fields that are expected to be absent @@ -82,21 +89,26 @@ func checkTags(t *testing.T, tags map[string]interface{}, acc *testutil.Accumula func setUpTestMux() http.Handler { mux := http.NewServeMux() + // Ignore all returned errors below as the tests will fail anyway mux.HandleFunc("/redirect", func(w http.ResponseWriter, req *http.Request) { http.Redirect(w, req, "/good", http.StatusMovedPermanently) }) mux.HandleFunc("/good", func(w http.ResponseWriter, req *http.Request) { w.Header().Set("Server", "MyTestServer") w.Header().Set("Content-Type", "application/json; charset=utf-8") + //nolint:errcheck,revive fmt.Fprintf(w, "hit the good page!") }) mux.HandleFunc("/invalidUTF8", func(w http.ResponseWriter, req *http.Request) { + //nolint:errcheck,revive w.Write([]byte{0xff, 0xfe, 0xfd}) }) mux.HandleFunc("/noheader", func(w http.ResponseWriter, req *http.Request) { + //nolint:errcheck,revive fmt.Fprintf(w, "hit the good page!") }) mux.HandleFunc("/jsonresponse", func(w http.ResponseWriter, req *http.Request) { + //nolint:errcheck,revive fmt.Fprintf(w, "\"service_status\": \"up\", \"healthy\" : \"true\"") }) mux.HandleFunc("/badredirect", func(w http.ResponseWriter, req *http.Request) { @@ -107,10 +119,12 @@ func setUpTestMux() http.Handler { http.Error(w, "method wasn't post", http.StatusMethodNotAllowed) return } + //nolint:errcheck,revive fmt.Fprintf(w, "used post correctly!") }) mux.HandleFunc("/musthaveabody", func(w http.ResponseWriter, req *http.Request) { - body, err := ioutil.ReadAll(req.Body) + body, err := io.ReadAll(req.Body) + //nolint:errcheck,revive req.Body.Close() if err != nil { http.Error(w, "couldn't read request body", http.StatusBadRequest) @@ -120,11 +134,11 @@ func setUpTestMux() http.Handler { http.Error(w, "body was empty", http.StatusBadRequest) return } + //nolint:errcheck,revive fmt.Fprintf(w, "sent a body!") }) mux.HandleFunc("/twosecondnap", func(w http.ResponseWriter, req *http.Request) { time.Sleep(time.Second * 2) - return }) mux.HandleFunc("/nocontent", func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusNoContent) @@ -154,17 +168,17 @@ func checkOutput(t *testing.T, acc *testutil.Accumulator, presentFields map[stri func TestHeaders(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { cHeader := r.Header.Get("Content-Type") - assert.Equal(t, "Hello", r.Host) - assert.Equal(t, "application/json", cHeader) + require.Equal(t, "Hello", r.Host) + require.Equal(t, "application/json", cHeader) w.WriteHeader(http.StatusOK) })) defer ts.Close() h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL, + URLs: []string{ts.URL}, Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 2}, + ResponseTimeout: config.Duration(time.Second * 2), Headers: map[string]string{ "Content-Type": "application/json", "Host": "Hello", @@ -198,10 +212,10 @@ func TestFields(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -236,10 +250,10 @@ func TestResponseBodyField(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -271,10 +285,10 @@ func TestResponseBodyField(t *testing.T) { // Invalid UTF-8 String h = &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/invalidUTF8", + URLs: []string{ts.URL + "/invalidUTF8"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -305,14 +319,14 @@ func TestResponseBodyMaxSize(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, - ResponseBodyMaxSize: internal.Size{Size: 5}, + ResponseBodyMaxSize: config.Size(5), FollowRedirects: true, } @@ -339,10 +353,10 @@ func TestHTTPHeaderTags(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), HTTPHeaderTags: map[string]string{"Server": "my_server", "Content-Type": "content_type"}, Headers: map[string]string{ "Content-Type": "application/json", @@ -374,10 +388,10 @@ func TestHTTPHeaderTags(t *testing.T) { h = &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/noheader", + URLs: []string{ts.URL + "/noheader"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), HTTPHeaderTags: map[string]string{"Server": "my_server", "Content-Type": "content_type"}, Headers: map[string]string{ "Content-Type": "application/json", @@ -400,10 +414,10 @@ func TestHTTPHeaderTags(t *testing.T) { // Connection failed h = &HTTPResponse{ Log: testutil.Logger{}, - Address: "https:/nonexistent.nonexistent", // Any non-routable IP works here + URLs: []string{"https:/nonexistent.nonexistent"}, // Any non-routable IP works here Body: "", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 5}, + ResponseTimeout: config.Duration(time.Second * 5), HTTPHeaderTags: map[string]string{"Server": "my_server", "Content-Type": "content_type"}, FollowRedirects: false, } @@ -456,10 +470,10 @@ func TestInterface(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -495,10 +509,10 @@ func TestRedirects(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/redirect", + URLs: []string{ts.URL + "/redirect"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -526,10 +540,10 @@ func TestRedirects(t *testing.T) { h = &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/badredirect", + URLs: []string{ts.URL + "/badredirect"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -563,10 +577,10 @@ func TestMethod(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/mustbepostmethod", + URLs: []string{ts.URL + "/mustbepostmethod"}, Body: "{ 'test': 'data'}", Method: "POST", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -594,10 +608,10 @@ func TestMethod(t *testing.T) { h = &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/mustbepostmethod", + URLs: []string{ts.URL + "/mustbepostmethod"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -626,10 +640,10 @@ func TestMethod(t *testing.T) { //check that lowercase methods work correctly h = &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/mustbepostmethod", + URLs: []string{ts.URL + "/mustbepostmethod"}, Body: "{ 'test': 'data'}", Method: "head", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -663,10 +677,10 @@ func TestBody(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/musthaveabody", + URLs: []string{ts.URL + "/musthaveabody"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -694,9 +708,9 @@ func TestBody(t *testing.T) { h = &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/musthaveabody", + URLs: []string{ts.URL + "/musthaveabody"}, Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -728,11 +742,11 @@ func TestStringMatch(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseStringMatch: "hit the good page", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -766,11 +780,11 @@ func TestStringMatchJson(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/jsonresponse", + URLs: []string{ts.URL + "/jsonresponse"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseStringMatch: "\"service_status\": \"up\"", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -804,11 +818,11 @@ func TestStringMatchFail(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseStringMatch: "hit the bad page", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -847,10 +861,10 @@ func TestTimeout(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/twosecondnap", + URLs: []string{ts.URL + "/twosecondnap"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second}, + ResponseTimeout: config.Duration(time.Second), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -881,11 +895,11 @@ func TestBadRegex(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", ResponseStringMatch: "bad regex:[[", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -901,15 +915,25 @@ func TestBadRegex(t *testing.T) { checkOutput(t, &acc, nil, nil, absentFields, absentTags) } +type fakeClient struct { + statusCode int + err error +} + +func (f *fakeClient) Do(_ *http.Request) (*http.Response, error) { + return &http.Response{StatusCode: f.statusCode}, f.err +} + func TestNetworkErrors(t *testing.T) { // DNS error h := &HTTPResponse{ Log: testutil.Logger{}, - Address: "https://nonexistent.nonexistent", // Any non-resolvable URL works here + URLs: []string{"https://nonexistent.nonexistent"}, // Any non-resolvable URL works here Body: "", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), FollowRedirects: false, + client: &fakeClient{err: &url.Error{Err: &net.OpError{Err: &net.DNSError{Err: "DNS error"}}}}, } var acc testutil.Accumulator @@ -932,10 +956,10 @@ func TestNetworkErrors(t *testing.T) { // Connection failed h = &HTTPResponse{ Log: testutil.Logger{}, - Address: "https:/nonexistent.nonexistent", // Any non-routable IP works here + URLs: []string{"https:/nonexistent.nonexistent"}, // Any non-routable IP works here Body: "", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 5}, + ResponseTimeout: config.Duration(time.Second * 5), FollowRedirects: false, } @@ -967,7 +991,7 @@ func TestContentLength(t *testing.T) { URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -998,7 +1022,7 @@ func TestContentLength(t *testing.T) { URLs: []string{ts.URL + "/musthaveabody"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Headers: map[string]string{ "Content-Type": "application/json", }, @@ -1032,7 +1056,8 @@ func TestRedirect(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Add("Location", "http://example.org") w.WriteHeader(http.StatusMovedPermanently) - w.Write([]byte("test")) + _, err := w.Write([]byte("test")) + require.NoError(t, err) }) plugin := &HTTPResponse{ @@ -1075,17 +1100,17 @@ func TestRedirect(t *testing.T) { func TestBasicAuth(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { aHeader := r.Header.Get("Authorization") - assert.Equal(t, "Basic bWU6bXlwYXNzd29yZA==", aHeader) + require.Equal(t, "Basic bWU6bXlwYXNzd29yZA==", aHeader) w.WriteHeader(http.StatusOK) })) defer ts.Close() h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, Body: "{ 'test': 'data'}", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), Username: "me", Password: "mypassword", Headers: map[string]string{ @@ -1121,9 +1146,9 @@ func TestStatusCodeMatchFail(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/nocontent", + URLs: []string{ts.URL + "/nocontent"}, ResponseStatusCode: http.StatusOK, - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), } var acc testutil.Accumulator @@ -1154,9 +1179,9 @@ func TestStatusCodeMatch(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/nocontent", + URLs: []string{ts.URL + "/nocontent"}, ResponseStatusCode: http.StatusNoContent, - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), } var acc testutil.Accumulator @@ -1187,10 +1212,10 @@ func TestStatusCodeAndStringMatch(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/good", + URLs: []string{ts.URL + "/good"}, ResponseStatusCode: http.StatusOK, ResponseStringMatch: "hit the good page", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), } var acc testutil.Accumulator @@ -1222,10 +1247,10 @@ func TestStatusCodeAndStringMatchFail(t *testing.T) { h := &HTTPResponse{ Log: testutil.Logger{}, - Address: ts.URL + "/nocontent", + URLs: []string{ts.URL + "/nocontent"}, ResponseStatusCode: http.StatusOK, ResponseStringMatch: "hit the good page", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: config.Duration(time.Second * 20), } var acc testutil.Accumulator @@ -1249,3 +1274,40 @@ func TestStatusCodeAndStringMatchFail(t *testing.T) { } checkOutput(t, &acc, expectedFields, expectedTags, nil, nil) } + +func TestSNI(t *testing.T) { + ts := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, "super-special-hostname.example.com", r.TLS.ServerName) + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + h := &HTTPResponse{ + Log: testutil.Logger{}, + URLs: []string{ts.URL + "/good"}, + Method: "GET", + ResponseTimeout: config.Duration(time.Second * 20), + ClientConfig: tls.ClientConfig{ + InsecureSkipVerify: true, + ServerName: "super-special-hostname.example.com", + }, + } + var acc testutil.Accumulator + err := h.Gather(&acc) + require.NoError(t, err) + expectedFields := map[string]interface{}{ + "http_response_code": http.StatusOK, + "result_type": "success", + "result_code": 0, + "response_time": nil, + "content_length": nil, + } + expectedTags := map[string]interface{}{ + "server": nil, + "method": "GET", + "status_code": "200", + "result": "success", + } + absentFields := []string{"response_string_match"} + checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) +} diff --git a/plugins/inputs/http_response/sample.conf b/plugins/inputs/http_response/sample.conf new file mode 100644 index 0000000000000..fd594fdcddee4 --- /dev/null +++ b/plugins/inputs/http_response/sample.conf @@ -0,0 +1,70 @@ +# HTTP/HTTPS request given an address a method and a timeout +[[inputs.http_response]] + ## List of urls to query. + # urls = ["http://localhost"] + + ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) + # http_proxy = "http://localhost:8888" + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## HTTP Request Method + # method = "GET" + + ## Whether to follow redirects from the server (defaults to false) + # follow_redirects = false + + ## Optional file with Bearer token + ## file content is added as an Authorization header + # bearer_token = "/path/to/file" + + ## Optional HTTP Basic Auth Credentials + # username = "username" + # password = "pa$$word" + + ## Optional HTTP Request Body + # body = ''' + # {'fake':'data'} + # ''' + + ## Optional name of the field that will contain the body of the response. + ## By default it is set to an empty String indicating that the body's content won't be added + # response_body_field = '' + + ## Maximum allowed HTTP response body size in bytes. + ## 0 means to use the default of 32MiB. + ## If the response body size exceeds this limit a "body_read_error" will be raised + # response_body_max_size = "32MiB" + + ## Optional substring or regex match in body of the response (case sensitive) + # response_string_match = "\"service_status\": \"up\"" + # response_string_match = "ok" + # response_string_match = "\".*_status\".?:.?\"up\"" + + ## Expected response status code. + ## The status code of the response is compared to this value. If they match, the field + ## "response_status_code_match" will be 1, otherwise it will be 0. If the + ## expected status code is 0, the check is disabled and the field won't be added. + # response_status_code = 0 + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + ## Use the given name as the SNI server name on each URL + # tls_server_name = "" + + ## HTTP Request Headers (all values must be strings) + # [inputs.http_response.headers] + # Host = "github.com" + + ## Optional setting to map response http headers into tags + ## If the http header is not present on the request, no corresponding tag will be added + ## If multiple instances of the http header are present, only the first value will be used + # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} + + ## Interface to use when dialing an address + # interface = "eth0" diff --git a/plugins/inputs/httpjson/README.md b/plugins/inputs/httpjson/README.md index 19fe014457734..37f73ba67226d 100644 --- a/plugins/inputs/httpjson/README.md +++ b/plugins/inputs/httpjson/README.md @@ -1,12 +1,14 @@ # HTTP JSON Input Plugin -The httpjson plugin collects data from HTTP URLs which respond with JSON. It flattens the JSON and finds all numeric values, treating them as floats. +**DEPRECATED in Telegraf v1.6: Use [HTTP input plugin][] as replacement** -Deprecated (1.6): use the [http](../http) input. +The httpjson plugin collects data from HTTP URLs which respond with JSON. It +flattens the JSON and finds all numeric values, treating them as floats. -### Configuration: +## Configuration -```toml +```toml @sample.conf +# Read flattened metrics from one or more JSON HTTP endpoints [[inputs.httpjson]] ## NOTE This plugin only reads numerical measurements, strings and booleans ## will be ignored. @@ -54,28 +56,33 @@ Deprecated (1.6): use the [http](../http) input. # apiVersion = "v1" ``` -### Measurements & Fields: +## Measurements & Fields - httpjson - - response_time (float): Response time in seconds + - response_time (float): Response time in seconds -Additional fields are dependant on the response of the remote service being polled. +Additional fields are dependant on the response of the remote service being +polled. -### Tags: +## Tags - All measurements have the following tags: - - server: HTTP origin as defined in configuration as `servers`. - -Any top level keys listed under `tag_keys` in the configuration are added as tags. Top level keys are defined as keys in the root level of the object in a single object response, or in the root level of each object within an array of objects. + - server: HTTP origin as defined in configuration as `servers`. +Any top level keys listed under `tag_keys` in the configuration are added as +tags. Top level keys are defined as keys in the root level of the object in a +single object response, or in the root level of each object within an array of +objects. -### Examples Output: +## Examples Output -This plugin understands responses containing a single JSON object, or a JSON Array of Objects. +This plugin understands responses containing a single JSON object, or a JSON +Array of Objects. **Object Output:** Given the following response body: + ```json { "a": 0.5, @@ -87,9 +94,12 @@ Given the following response body: "service": "service01" } ``` + The following metric is produced: -`httpjson,server=http://localhost:9999/stats/ b_d=0.1,a=0.5,b_e=5,response_time=0.001` +```shell +httpjson,server=http://localhost:9999/stats/ b_d=0.1,a=0.5,b_e=5,response_time=0.001 +``` Note that only numerical values are extracted and the type is float. @@ -102,11 +112,14 @@ If `tag_keys` is included in the configuration: Then the `service` tag will also be added: -`httpjson,server=http://localhost:9999/stats/,service=service01 b_d=0.1,a=0.5,b_e=5,response_time=0.001` +```shell +httpjson,server=http://localhost:9999/stats/,service=service01 b_d=0.1,a=0.5,b_e=5,response_time=0.001 +``` **Array Output:** -If the service returns an array of objects, one metric is be created for each object: +If the service returns an array of objects, one metric is be created for each +object: ```json [ @@ -131,5 +144,9 @@ If the service returns an array of objects, one metric is be created for each ob ] ``` -`httpjson,server=http://localhost:9999/stats/,service=service01 a=0.5,b_d=0.1,b_e=5,response_time=0.003` -`httpjson,server=http://localhost:9999/stats/,service=service02 a=0.6,b_d=0.2,b_e=6,response_time=0.003` +```shell +httpjson,server=http://localhost:9999/stats/,service=service01 a=0.5,b_d=0.1,b_e=5,response_time=0.003 +httpjson,server=http://localhost:9999/stats/,service=service02 a=0.6,b_d=0.2,b_e=6,response_time=0.003 +``` + +[HTTP input plugin]: ../http/README.md diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index a5f5e47aad68e..f6d8f241364f7 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -1,9 +1,11 @@ +//go:generate ../../../tools/readme_config_includer/generator package httpjson import ( "bytes" + _ "embed" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strings" @@ -11,23 +13,27 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/json" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + var ( utf8BOM = []byte("\xef\xbb\xbf") ) -// HttpJson struct -type HttpJson struct { - Name string +// HTTPJSON struct +type HTTPJSON struct { + Name string `toml:"name" deprecated:"1.3.0;use 'name_override', 'name_suffix', 'name_prefix' instead"` Servers []string Method string TagKeys []string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration Parameters map[string]string Headers map[string]string tls.ClientConfig @@ -66,63 +72,12 @@ func (c *RealHTTPClient) HTTPClient() *http.Client { return c.client } -var sampleConfig = ` - ## NOTE This plugin only reads numerical measurements, strings and booleans - ## will be ignored. - - ## Name for the service being polled. Will be appended to the name of the - ## measurement e.g. httpjson_webserver_stats - ## - ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead. - name = "webserver_stats" - - ## URL of each server in the service's cluster - servers = [ - "http://localhost:9999/stats/", - "http://localhost:9998/stats/", - ] - ## Set response_timeout (default 5 seconds) - response_timeout = "5s" - - ## HTTP method to use: GET or POST (case-sensitive) - method = "GET" - - ## List of tag names to extract from top-level of JSON server response - # tag_keys = [ - # "my_tag_1", - # "my_tag_2" - # ] - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## HTTP parameters (all values must be strings). For "GET" requests, data - ## will be included in the query. For "POST" requests, data will be included - ## in the request body as "x-www-form-urlencoded". - # [inputs.httpjson.parameters] - # event_type = "cpu_spike" - # threshold = "0.75" - - ## HTTP Headers (all values must be strings) - # [inputs.httpjson.headers] - # X-Auth-Token = "my-xauth-token" - # apiVersion = "v1" -` - -func (h *HttpJson) SampleConfig() string { +func (*HTTPJSON) SampleConfig() string { return sampleConfig } -func (h *HttpJson) Description() string { - return "Read flattened metrics from one or more JSON HTTP endpoints" -} - // Gathers data for all servers. -func (h *HttpJson) Gather(acc telegraf.Accumulator) error { +func (h *HTTPJSON) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup if h.client.HTTPClient() == nil { @@ -131,12 +86,12 @@ func (h *HttpJson) Gather(acc telegraf.Accumulator) error { return err } tr := &http.Transport{ - ResponseHeaderTimeout: h.ResponseTimeout.Duration, + ResponseHeaderTimeout: time.Duration(h.ResponseTimeout), TLSClientConfig: tlsCfg, } client := &http.Client{ Transport: tr, - Timeout: h.ResponseTimeout.Duration, + Timeout: time.Duration(h.ResponseTimeout), } h.client.SetHTTPClient(client) } @@ -162,7 +117,7 @@ func (h *HttpJson) Gather(acc telegraf.Accumulator) error { // // Returns: // error: Any error that may have occurred -func (h *HttpJson) gatherServer( +func (h *HTTPJSON) gatherServer( acc telegraf.Accumulator, serverURL string, ) error { @@ -171,23 +126,22 @@ func (h *HttpJson) gatherServer( return err } - var msrmnt_name string + var msrmntName string if h.Name == "" { - msrmnt_name = "httpjson" + msrmntName = "httpjson" } else { - msrmnt_name = "httpjson_" + h.Name + msrmntName = "httpjson_" + h.Name } tags := map[string]string{ "server": serverURL, } - parser, err := parsers.NewParser(&parsers.Config{ - DataFormat: "json", - MetricName: msrmnt_name, + parser := &json.Parser{ + MetricName: msrmntName, TagKeys: h.TagKeys, DefaultTags: tags, - }) - if err != nil { + } + if err := parser.Init(); err != nil { return err } @@ -207,7 +161,7 @@ func (h *HttpJson) gatherServer( return nil } -// Sends an HTTP request to the server using the HttpJson object's HTTPClient. +// Sends an HTTP request to the server using the HTTPJSON object's HTTPClient. // This request can be either a GET or a POST. // Parameters: // serverURL: endpoint to send request to @@ -215,7 +169,7 @@ func (h *HttpJson) gatherServer( // Returns: // string: body of the response // error : Any error that may have occurred -func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) { +func (h *HTTPJSON) sendRequest(serverURL string) (string, float64, error) { // Prepare URL requestURL, err := url.Parse(serverURL) if err != nil { @@ -263,7 +217,7 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) { defer resp.Body.Close() responseTime := time.Since(start).Seconds() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return string(body), responseTime, err } @@ -285,11 +239,9 @@ func (h *HttpJson) sendRequest(serverURL string) (string, float64, error) { func init() { inputs.Add("httpjson", func() telegraf.Input { - return &HttpJson{ - client: &RealHTTPClient{}, - ResponseTimeout: internal.Duration{ - Duration: 5 * time.Second, - }, + return &HTTPJSON{ + client: &RealHTTPClient{}, + ResponseTimeout: config.Duration(5 * time.Second), } }) } diff --git a/plugins/inputs/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go index 90975919959e8..c522ebe9978d2 100644 --- a/plugins/inputs/httpjson/httpjson_test.go +++ b/plugins/inputs/httpjson/httpjson_test.go @@ -2,15 +2,15 @@ package httpjson import ( "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "strings" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const validJSON = ` @@ -143,7 +143,7 @@ func (c *mockHTTPClient) MakeRequest(req *http.Request) (*http.Response, error) resp.StatusCode = 405 // Method not allowed } - resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) + resp.Body = io.NopCloser(strings.NewReader(c.responseBody)) return &resp, nil } @@ -154,15 +154,15 @@ func (c *mockHTTPClient) HTTPClient() *http.Client { return nil } -// Generates a pointer to an HttpJson object that uses a mock HTTP client. +// Generates a pointer to an HTTPJSON object that uses a mock HTTP client. // Parameters: // response : Body of the response that the mock HTTP client should return // statusCode: HTTP status code the mock HTTP client should return // // Returns: -// *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client -func genMockHttpJson(response string, statusCode int) []*HttpJson { - return []*HttpJson{ +// *HTTPJSON: Pointer to an HTTPJSON object that uses the generated mock HTTP client +func genMockHTTPJSON(response string, statusCode int) []*HTTPJSON { + return []*HTTPJSON{ { client: &mockHTTPClient{responseBody: response, statusCode: statusCode}, Servers: []string{ @@ -206,13 +206,13 @@ func genMockHttpJson(response string, statusCode int) []*HttpJson { // Test that the proper values are ignored or collected func TestHttpJson200(t *testing.T) { - httpjson := genMockHttpJson(validJSON, 200) + httpjson := genMockHTTPJSON(validJSON, 200) for _, service := range httpjson { var acc testutil.Accumulator err := acc.GatherError(service.Gather) require.NoError(t, err) - assert.Equal(t, 12, acc.NFields()) + require.Equal(t, 12, acc.NFields()) // Set responsetime for _, p := range acc.Metrics { p.Fields["response_time"] = 1.0 @@ -231,13 +231,14 @@ func TestHttpJson200(t *testing.T) { func TestHttpJsonGET_URL(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { key := r.FormValue("api_key") - assert.Equal(t, "mykey", key) + require.Equal(t, "mykey", key) w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, validJSON2) + _, err := fmt.Fprintln(w, validJSON2) + require.NoError(t, err) })) defer ts.Close() - a := HttpJson{ + a := HTTPJSON{ Servers: []string{ts.URL + "?api_key=mykey"}, Name: "", Method: "GET", @@ -303,13 +304,14 @@ func TestHttpJsonGET(t *testing.T) { } ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { key := r.FormValue("api_key") - assert.Equal(t, "mykey", key) + require.Equal(t, "mykey", key) w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, validJSON2) + _, err := fmt.Fprintln(w, validJSON2) + require.NoError(t, err) })) defer ts.Close() - a := HttpJson{ + a := HTTPJSON{ Servers: []string{ts.URL}, Name: "", Method: "GET", @@ -375,15 +377,16 @@ func TestHttpJsonPOST(t *testing.T) { "api_key": "mykey", } ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - body, err := ioutil.ReadAll(r.Body) - assert.NoError(t, err) - assert.Equal(t, "api_key=mykey", string(body)) + body, err := io.ReadAll(r.Body) + require.NoError(t, err) + require.Equal(t, "api_key=mykey", string(body)) w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, validJSON2) + _, err = fmt.Fprintln(w, validJSON2) + require.NoError(t, err) })) defer ts.Close() - a := HttpJson{ + a := HTTPJSON{ Servers: []string{ts.URL}, Name: "", Method: "POST", @@ -445,50 +448,50 @@ func TestHttpJsonPOST(t *testing.T) { // Test response to HTTP 500 func TestHttpJson500(t *testing.T) { - httpjson := genMockHttpJson(validJSON, 500) + httpjson := genMockHTTPJSON(validJSON, 500) var acc testutil.Accumulator err := acc.GatherError(httpjson[0].Gather) - assert.Error(t, err) - assert.Equal(t, 0, acc.NFields()) + require.Error(t, err) + require.Equal(t, 0, acc.NFields()) } // Test response to HTTP 405 func TestHttpJsonBadMethod(t *testing.T) { - httpjson := genMockHttpJson(validJSON, 200) + httpjson := genMockHTTPJSON(validJSON, 200) httpjson[0].Method = "NOT_A_REAL_METHOD" var acc testutil.Accumulator err := acc.GatherError(httpjson[0].Gather) - assert.Error(t, err) - assert.Equal(t, 0, acc.NFields()) + require.Error(t, err) + require.Equal(t, 0, acc.NFields()) } // Test response to malformed JSON func TestHttpJsonBadJson(t *testing.T) { - httpjson := genMockHttpJson(invalidJSON, 200) + httpjson := genMockHTTPJSON(invalidJSON, 200) var acc testutil.Accumulator err := acc.GatherError(httpjson[0].Gather) - assert.Error(t, err) - assert.Equal(t, 0, acc.NFields()) + require.Error(t, err) + require.Equal(t, 0, acc.NFields()) } // Test response to empty string as response object func TestHttpJsonEmptyResponse(t *testing.T) { - httpjson := genMockHttpJson(empty, 200) + httpjson := genMockHTTPJSON(empty, 200) var acc testutil.Accumulator err := acc.GatherError(httpjson[0].Gather) - assert.NoError(t, err) + require.NoError(t, err) } // Test that the proper values are ignored or collected func TestHttpJson200Tags(t *testing.T) { - httpjson := genMockHttpJson(validJSONTags, 200) + httpjson := genMockHTTPJSON(validJSONTags, 200) for _, service := range httpjson { if service.Name == "other_webapp" { @@ -499,7 +502,7 @@ func TestHttpJson200Tags(t *testing.T) { p.Fields["response_time"] = 1.0 } require.NoError(t, err) - assert.Equal(t, 4, acc.NFields()) + require.Equal(t, 4, acc.NFields()) for _, srv := range service.Servers { tags := map[string]string{"server": srv, "role": "master", "build": "123"} fields := map[string]interface{}{"value": float64(15), "response_time": float64(1)} @@ -526,7 +529,7 @@ const validJSONArrayTags = ` // Test that array data is collected correctly func TestHttpJsonArray200Tags(t *testing.T) { - httpjson := genMockHttpJson(validJSONArrayTags, 200) + httpjson := genMockHTTPJSON(validJSONArrayTags, 200) for _, service := range httpjson { if service.Name == "other_webapp" { @@ -537,22 +540,22 @@ func TestHttpJsonArray200Tags(t *testing.T) { p.Fields["response_time"] = 1.0 } require.NoError(t, err) - assert.Equal(t, 8, acc.NFields()) - assert.Equal(t, uint64(4), acc.NMetrics()) + require.Equal(t, 8, acc.NFields()) + require.Equal(t, uint64(4), acc.NMetrics()) for _, m := range acc.Metrics { if m.Tags["role"] == "master" { - assert.Equal(t, "123", m.Tags["build"]) - assert.Equal(t, float64(15), m.Fields["value"]) - assert.Equal(t, float64(1), m.Fields["response_time"]) - assert.Equal(t, "httpjson_"+service.Name, m.Measurement) + require.Equal(t, "123", m.Tags["build"]) + require.Equal(t, float64(15), m.Fields["value"]) + require.Equal(t, float64(1), m.Fields["response_time"]) + require.Equal(t, "httpjson_"+service.Name, m.Measurement) } else if m.Tags["role"] == "slave" { - assert.Equal(t, "456", m.Tags["build"]) - assert.Equal(t, float64(17), m.Fields["value"]) - assert.Equal(t, float64(1), m.Fields["response_time"]) - assert.Equal(t, "httpjson_"+service.Name, m.Measurement) + require.Equal(t, "456", m.Tags["build"]) + require.Equal(t, float64(17), m.Fields["value"]) + require.Equal(t, float64(1), m.Fields["response_time"]) + require.Equal(t, "httpjson_"+service.Name, m.Measurement) } else { - assert.FailNow(t, "unknown metric") + require.FailNow(t, "unknown metric") } } } @@ -563,7 +566,7 @@ var jsonBOM = []byte("\xef\xbb\xbf[{\"value\":17}]") // TestHttpJsonBOM tests that UTF-8 JSON with a BOM can be parsed func TestHttpJsonBOM(t *testing.T) { - httpjson := genMockHttpJson(string(jsonBOM), 200) + httpjson := genMockHTTPJSON(string(jsonBOM), 200) for _, service := range httpjson { if service.Name == "other_webapp" { diff --git a/plugins/inputs/httpjson/sample.conf b/plugins/inputs/httpjson/sample.conf new file mode 100644 index 0000000000000..0a6f6ea3efbc0 --- /dev/null +++ b/plugins/inputs/httpjson/sample.conf @@ -0,0 +1,46 @@ +# Read flattened metrics from one or more JSON HTTP endpoints +[[inputs.httpjson]] + ## NOTE This plugin only reads numerical measurements, strings and booleans + ## will be ignored. + + ## Name for the service being polled. Will be appended to the name of the + ## measurement e.g. "httpjson_webserver_stats". + ## + ## Deprecated (1.3.0): Use name_override, name_suffix, name_prefix instead. + name = "webserver_stats" + + ## URL of each server in the service's cluster + servers = [ + "http://localhost:9999/stats/", + "http://localhost:9998/stats/", + ] + ## Set response_timeout (default 5 seconds) + response_timeout = "5s" + + ## HTTP method to use: GET or POST (case-sensitive) + method = "GET" + + ## Tags to extract from top-level of JSON server response. + # tag_keys = [ + # "my_tag_1", + # "my_tag_2" + # ] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## HTTP Request Parameters (all values must be strings). For "GET" requests, data + ## will be included in the query. For "POST" requests, data will be included + ## in the request body as "x-www-form-urlencoded". + # [inputs.httpjson.parameters] + # event_type = "cpu_spike" + # threshold = "0.75" + + ## HTTP Request Headers (all values must be strings). + # [inputs.httpjson.headers] + # X-Auth-Token = "my-xauth-token" + # apiVersion = "v1" diff --git a/plugins/inputs/hugepages/README.md b/plugins/inputs/hugepages/README.md new file mode 100644 index 0000000000000..b7cc1f094e302 --- /dev/null +++ b/plugins/inputs/hugepages/README.md @@ -0,0 +1,68 @@ +# Hugepages Input Plugin + +Transparent Huge Pages (THP) is a Linux memory management system that reduces +the overhead of Translation Lookaside Buffer (TLB) lookups on machines with +large amounts of memory by using larger memory pages. + +Consult +for more details. + +## Configuration + +```toml @sample.conf +# Gathers huge pages measurements. +[[inputs.hugepages]] + ## Supported huge page types: + ## - "root" - based on root huge page control directory: /sys/kernel/mm/hugepages + ## - "per_node" - based on per NUMA node directories: /sys/devices/system/node/node[0-9]*/hugepages + ## - "meminfo" - based on /proc/meminfo file + # types = ["root", "per_node"] +``` + +## Measurements + +**The following measurements are supported by Hugepages plugin:** + +- hugepages_root (gathered from root huge page control directory: `/sys/kernel/mm/hugepages`) + - tags: + - size_kb (integer, kB) + - fields: + - free (integer) + - mempolicy (integer) + - overcommit (integer) + - reserved (integer) + - surplus (integer) + - total (integer) +- hugepages_per_node (gathered from per NUMA node directories: `/sys/devices/system/node/node[0-9]*/hugepages`) + - tags: + - size_kb (integer, kB) + - node (integer) + - fields: + - free (integer) + - surplus (integer) + - total (integer) +- hugepages_meminfo (gathered from `/proc/meminfo` file) + - The fields `total`, `free`, `reserved`, and `surplus` are counts of pages of default size. Fields with suffix `_kb` are in kilobytes. + - fields: + - anonymous_kb (integer, kB) + - file_kb (integer, kB) + - free (integer) + - reserved (integer) + - shared_kb (integer, kB) + - size_kb (integer, kB) + - surplus (integer) + - tlb_kb (integer, kB) + - total (integer) + +## Example Output + +```text +$ ./telegraf -config telegraf.conf -input-filter hugepages -test +> hugepages_root,host=ubuntu,size_kb=1048576 free=0i,mempolicy=8i,overcommit=0i,reserved=0i,surplus=0i,total=8i 1646258020000000000 +> hugepages_root,host=ubuntu,size_kb=2048 free=883i,mempolicy=2048i,overcommit=0i,reserved=0i,surplus=0i,total=2048i 1646258020000000000 +> hugepages_per_node,host=ubuntu,size_kb=1048576,node=0 free=0i,surplus=0i,total=4i 1646258020000000000 +> hugepages_per_node,host=ubuntu,size_kb=2048,node=0 free=434i,surplus=0i,total=1024i 1646258020000000000 +> hugepages_per_node,host=ubuntu,size_kb=1048576,node=1 free=0i,surplus=0i,total=4i 1646258020000000000 +> hugepages_per_node,host=ubuntu,size_kb=2048,node=1 free=449i,surplus=0i,total=1024i 1646258020000000000 +> hugepages_meminfo,host=ubuntu anonymous_kb=0i,file_kb=0i,free=883i,reserved=0i,shared_kb=0i,size_kb=2048i,surplus=0i,tlb_kb=12582912i,total=2048i 1646258020000000000 +``` diff --git a/plugins/inputs/hugepages/hugepages.go b/plugins/inputs/hugepages/hugepages.go new file mode 100644 index 0000000000000..8929c422e8733 --- /dev/null +++ b/plugins/inputs/hugepages/hugepages.go @@ -0,0 +1,281 @@ +//go:generate ../../../tools/readme_config_includer/generator +//go:build linux +// +build linux + +package hugepages + +import ( + "bytes" + _ "embed" + "fmt" + "io/ioutil" + "path/filepath" + "strconv" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +const ( + // path to root huge page control directory + rootHugepagePath = "/sys/kernel/mm/hugepages" + // path where per NUMA node statistics are kept + numaNodePath = "/sys/devices/system/node" + // path to the meminfo file + meminfoPath = "/proc/meminfo" + + rootHugepages = "root" + perNodeHugepages = "per_node" + meminfoHugepages = "meminfo" +) + +var ( + newlineByte = []byte("\n") + colonByte = []byte(":") + + hugepagesMetricsRoot = map[string]string{ + "free_hugepages": "free", + "nr_hugepages": "total", + "nr_hugepages_mempolicy": "mempolicy", + "nr_overcommit_hugepages": "overcommit", + "resv_hugepages": "reserved", + "surplus_hugepages": "surplus", + } + + hugepagesMetricsPerNUMANode = map[string]string{ + "free_hugepages": "free", + "nr_hugepages": "total", + "surplus_hugepages": "surplus", + } + + hugepagesMetricsFromMeminfo = map[string]string{ + "HugePages_Total": "total", + "HugePages_Free": "free", + "HugePages_Rsvd": "reserved", + "HugePages_Surp": "surplus", + "Hugepagesize": "size_kb", + "Hugetlb": "tlb_kb", + "AnonHugePages": "anonymous_kb", + "ShmemHugePages": "shared_kb", + "FileHugePages": "file_kb", + } +) + +type Hugepages struct { + Types []string `toml:"types"` + + gatherRoot bool + gatherPerNode bool + gatherMeminfo bool + + rootHugepagePath string + numaNodePath string + meminfoPath string +} + +func (*Hugepages) SampleConfig() string { + return sampleConfig +} + +func (h *Hugepages) Init() error { + err := h.parseHugepagesConfig() + if err != nil { + return err + } + + h.rootHugepagePath = rootHugepagePath + h.numaNodePath = numaNodePath + h.meminfoPath = meminfoPath + + return nil +} + +func (h *Hugepages) Gather(acc telegraf.Accumulator) error { + if h.gatherRoot { + if err := h.gatherRootStats(acc); err != nil { + return fmt.Errorf("gathering root stats failed: %v", err) + } + } + + if h.gatherPerNode { + if err := h.gatherStatsPerNode(acc); err != nil { + return fmt.Errorf("gathering per node stats failed: %v", err) + } + } + + if h.gatherMeminfo { + if err := h.gatherStatsFromMeminfo(acc); err != nil { + return fmt.Errorf("gathering meminfo stats failed: %v", err) + } + } + + return nil +} + +// gatherStatsPerNode collects root hugepages statistics +func (h *Hugepages) gatherRootStats(acc telegraf.Accumulator) error { + return h.gatherFromHugepagePath(acc, "hugepages_"+rootHugepages, h.rootHugepagePath, hugepagesMetricsRoot, nil) +} + +// gatherStatsPerNode collects hugepages statistics per NUMA node +func (h *Hugepages) gatherStatsPerNode(acc telegraf.Accumulator) error { + nodeDirs, err := ioutil.ReadDir(h.numaNodePath) + if err != nil { + return err + } + + // read metrics from: node*/hugepages/hugepages-*/* + for _, nodeDir := range nodeDirs { + if !nodeDir.IsDir() || !strings.HasPrefix(nodeDir.Name(), "node") { + continue + } + + nodeNumber := strings.TrimPrefix(nodeDir.Name(), "node") + _, err := strconv.Atoi(nodeNumber) + if err != nil { + continue + } + + perNodeTags := map[string]string{ + "node": nodeNumber, + } + hugepagesPath := filepath.Join(h.numaNodePath, nodeDir.Name(), "hugepages") + err = h.gatherFromHugepagePath(acc, "hugepages_"+perNodeHugepages, hugepagesPath, hugepagesMetricsPerNUMANode, perNodeTags) + if err != nil { + return err + } + } + return nil +} + +func (h *Hugepages) gatherFromHugepagePath(acc telegraf.Accumulator, measurement, path string, fileFilter map[string]string, defaultTags map[string]string) error { + // read metrics from: hugepages/hugepages-*/* + hugepagesDirs, err := ioutil.ReadDir(path) + if err != nil { + return fmt.Errorf("reading root dir failed: %v", err) + } + + for _, hugepagesDir := range hugepagesDirs { + if !hugepagesDir.IsDir() || !strings.HasPrefix(hugepagesDir.Name(), "hugepages-") { + continue + } + + hugepagesSize := strings.TrimPrefix(strings.TrimSuffix(hugepagesDir.Name(), "kB"), "hugepages-") + _, err := strconv.Atoi(hugepagesSize) + if err != nil { + continue + } + + metricsPath := filepath.Join(path, hugepagesDir.Name()) + metricFiles, err := ioutil.ReadDir(metricsPath) + if err != nil { + return fmt.Errorf("reading metric dir failed: %v", err) + } + + metrics := make(map[string]interface{}) + for _, metricFile := range metricFiles { + metricName, ok := fileFilter[metricFile.Name()] + if mode := metricFile.Mode(); !mode.IsRegular() || !ok { + continue + } + + metricFullPath := filepath.Join(metricsPath, metricFile.Name()) + metricBytes, err := ioutil.ReadFile(metricFullPath) + if err != nil { + return err + } + + metricValue, err := strconv.Atoi(string(bytes.TrimSuffix(metricBytes, newlineByte))) + if err != nil { + return fmt.Errorf("failed to convert content of '%s': %v", metricFullPath, err) + } + + metrics[metricName] = metricValue + } + + if len(metrics) == 0 { + continue + } + + tags := make(map[string]string) + for key, value := range defaultTags { + tags[key] = value + } + tags["size_kb"] = hugepagesSize + + acc.AddFields(measurement, metrics, tags) + } + return nil +} + +// gatherStatsFromMeminfo collects hugepages statistics from meminfo file +func (h *Hugepages) gatherStatsFromMeminfo(acc telegraf.Accumulator) error { + meminfo, err := ioutil.ReadFile(h.meminfoPath) + if err != nil { + return err + } + + metrics := make(map[string]interface{}) + lines := bytes.Split(meminfo, newlineByte) + for _, line := range lines { + fields := bytes.Fields(line) + if len(fields) < 2 { + continue + } + fieldName := string(bytes.TrimSuffix(fields[0], colonByte)) + metricName, ok := hugepagesMetricsFromMeminfo[fieldName] + if !ok { + continue + } + + fieldValue, err := strconv.Atoi(string(fields[1])) + if err != nil { + return fmt.Errorf("failed to convert content of '%s': %v", fieldName, err) + } + + metrics[metricName] = fieldValue + } + + acc.AddFields("hugepages_"+meminfoHugepages, metrics, map[string]string{}) + return nil +} + +func (h *Hugepages) parseHugepagesConfig() error { + // default + if h.Types == nil { + h.gatherRoot = true + h.gatherMeminfo = true + return nil + } + + // empty array + if len(h.Types) == 0 { + return fmt.Errorf("plugin was configured with nothing to read") + } + + for _, hugepagesType := range h.Types { + switch hugepagesType { + case rootHugepages: + h.gatherRoot = true + case perNodeHugepages: + h.gatherPerNode = true + case meminfoHugepages: + h.gatherMeminfo = true + default: + return fmt.Errorf("provided hugepages type `%s` is not valid", hugepagesType) + } + } + + return nil +} + +func init() { + inputs.Add("hugepages", func() telegraf.Input { + return &Hugepages{} + }) +} diff --git a/plugins/inputs/hugepages/hugepages_notlinux.go b/plugins/inputs/hugepages/hugepages_notlinux.go new file mode 100644 index 0000000000000..c2bd4f5c625f2 --- /dev/null +++ b/plugins/inputs/hugepages/hugepages_notlinux.go @@ -0,0 +1,4 @@ +//go:build !linux +// +build !linux + +package hugepages diff --git a/plugins/inputs/hugepages/hugepages_test.go b/plugins/inputs/hugepages/hugepages_test.go new file mode 100644 index 0000000000000..fa67ea7d6ca64 --- /dev/null +++ b/plugins/inputs/hugepages/hugepages_test.go @@ -0,0 +1,228 @@ +//go:build linux +// +build linux + +package hugepages + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" +) + +func TestInit(t *testing.T) { + t.Run("when no config is provided then all fields should be set to default values", func(t *testing.T) { + h := Hugepages{} + err := h.Init() + + require.NoError(t, err) + require.True(t, h.gatherRoot) + require.False(t, h.gatherPerNode) + require.True(t, h.gatherMeminfo) + require.Equal(t, rootHugepagePath, h.rootHugepagePath) + require.Equal(t, numaNodePath, h.numaNodePath) + require.Equal(t, meminfoPath, h.meminfoPath) + }) + + t.Run("when empty hugepages types is provided then plugin should fail to initialize", func(t *testing.T) { + h := Hugepages{Types: []string{}} + err := h.Init() + + require.Error(t, err) + require.Contains(t, err.Error(), "plugin was configured with nothing to read") + }) + + t.Run("when valid hugepages types is provided then proper flags should be set", func(t *testing.T) { + h := Hugepages{Types: []string{"root", "per_node", "meminfo"}} + err := h.Init() + + require.NoError(t, err) + require.True(t, h.gatherRoot) + require.True(t, h.gatherPerNode) + require.True(t, h.gatherMeminfo) + }) + + t.Run("when hugepages types contains not supported value then plugin should fail to initialize", func(t *testing.T) { + h := Hugepages{Types: []string{"root", "per_node", "linux_hdd", "meminfo"}} + err := h.Init() + + require.Error(t, err) + require.Contains(t, err.Error(), "provided hugepages type") + }) +} + +func TestGather(t *testing.T) { + t.Run("when root hugepages type is enabled then gather all root metrics successfully", func(t *testing.T) { + h := Hugepages{ + rootHugepagePath: "./testdata/valid/mm/hugepages", + gatherRoot: true, + } + + acc := &testutil.Accumulator{} + require.NoError(t, h.Gather(acc)) + + expectedFields := map[string]interface{}{ + "free": 883, + "reserved": 0, + "surplus": 0, + "mempolicy": 2048, + "total": 2048, + "overcommit": 0, + } + acc.AssertContainsTaggedFields(t, "hugepages_root", expectedFields, map[string]string{"size_kb": "2048"}) + + expectedFields = map[string]interface{}{ + "free": 0, + "reserved": 0, + "surplus": 0, + "mempolicy": 8, + "total": 8, + "overcommit": 0, + } + acc.AssertContainsTaggedFields(t, "hugepages_root", expectedFields, map[string]string{"size_kb": "1048576"}) + }) + + t.Run("when per node hugepages type is enabled then gather all per node metrics successfully", func(t *testing.T) { + h := Hugepages{ + numaNodePath: "./testdata/valid/node", + gatherPerNode: true, + } + + acc := &testutil.Accumulator{} + require.NoError(t, h.Gather(acc)) + + expectedFields := map[string]interface{}{ + "free": 434, + "surplus": 0, + "total": 1024, + } + acc.AssertContainsTaggedFields(t, "hugepages_per_node", expectedFields, map[string]string{"size_kb": "2048", "node": "0"}) + + expectedFields = map[string]interface{}{ + "free": 449, + "surplus": 0, + "total": 1024, + } + acc.AssertContainsTaggedFields(t, "hugepages_per_node", expectedFields, map[string]string{"size_kb": "2048", "node": "1"}) + + expectedFields = map[string]interface{}{ + "free": 0, + "surplus": 0, + "total": 4, + } + acc.AssertContainsTaggedFields(t, "hugepages_per_node", expectedFields, map[string]string{"size_kb": "1048576", "node": "0"}) + + expectedFields = map[string]interface{}{ + "free": 0, + "surplus": 0, + "total": 4, + } + acc.AssertContainsTaggedFields(t, "hugepages_per_node", expectedFields, map[string]string{"size_kb": "1048576", "node": "1"}) + }) + + t.Run("when meminfo hugepages type is enabled then gather all meminfo metrics successfully", func(t *testing.T) { + h := Hugepages{ + meminfoPath: "./testdata/valid/meminfo", + gatherMeminfo: true, + } + + acc := &testutil.Accumulator{} + require.NoError(t, h.Gather(acc)) + + expectedFields := map[string]interface{}{ + "anonymous_kb": 0, + "shared_kb": 0, + "file_kb": 0, + "total": 2048, + "free": 883, + "reserved": 0, + "surplus": 0, + "size_kb": 2048, + "tlb_kb": 12582912, + } + acc.AssertContainsFields(t, "hugepages_meminfo", expectedFields) + }) + + t.Run("when root hugepages type is enabled but path is invalid then return error", func(t *testing.T) { + h := Hugepages{ + rootHugepagePath: "./testdata/not_existing_path", + gatherRoot: true, + } + + acc := &testutil.Accumulator{} + require.Error(t, h.Gather(acc)) + }) + + t.Run("when root hugepages type is enabled but files/directories don't have proper naming then gather no metrics", func(t *testing.T) { + h := Hugepages{ + rootHugepagePath: "./testdata/invalid/1/node0/hugepages", + gatherRoot: true, + } + + acc := &testutil.Accumulator{} + require.NoError(t, h.Gather(acc)) + require.Nil(t, acc.Metrics) + }) + + t.Run("when root hugepages type is enabled but metric file doesn't contain number then return error", func(t *testing.T) { + h := Hugepages{ + rootHugepagePath: "./testdata/invalid/2/node1/hugepages", + gatherRoot: true, + } + + acc := &testutil.Accumulator{} + require.Error(t, h.Gather(acc)) + }) + + t.Run("when per node hugepages type is enabled but path is invalid then return error", func(t *testing.T) { + h := Hugepages{ + numaNodePath: "./testdata/not_existing_path", + gatherPerNode: true, + } + + acc := &testutil.Accumulator{} + require.Error(t, h.Gather(acc)) + }) + + t.Run("when per node hugepages type is enabled but files/directories don't have proper naming then gather no metrics", func(t *testing.T) { + h := Hugepages{ + numaNodePath: "./testdata/invalid/1", + gatherPerNode: true, + } + + acc := &testutil.Accumulator{} + require.NoError(t, h.Gather(acc)) + require.Nil(t, acc.Metrics) + }) + + t.Run("when per node hugepages type is enabled but metric file doesn't contain number then return error", func(t *testing.T) { + h := Hugepages{ + numaNodePath: "./testdata/invalid/2/", + gatherPerNode: true, + } + + acc := &testutil.Accumulator{} + require.Error(t, h.Gather(acc)) + }) + + t.Run("when meminfo hugepages type is enabled but path is invalid then return error", func(t *testing.T) { + h := Hugepages{ + meminfoPath: "./testdata/not_existing_path", + gatherMeminfo: true, + } + + acc := &testutil.Accumulator{} + require.Error(t, h.Gather(acc)) + }) + + t.Run("when per node hugepages type is enabled but any metric doesn't contain number then return error", func(t *testing.T) { + h := Hugepages{ + meminfoPath: "./testdata/invalid/meminfo", + gatherMeminfo: true, + } + + acc := &testutil.Accumulator{} + require.Error(t, h.Gather(acc)) + }) +} diff --git a/plugins/inputs/hugepages/sample.conf b/plugins/inputs/hugepages/sample.conf new file mode 100644 index 0000000000000..c4c1f5de360d4 --- /dev/null +++ b/plugins/inputs/hugepages/sample.conf @@ -0,0 +1,7 @@ +# Gathers huge pages measurements. +[[inputs.hugepages]] + ## Supported huge page types: + ## - "root" - based on root huge page control directory: /sys/kernel/mm/hugepages + ## - "per_node" - based on per NUMA node directories: /sys/devices/system/node/node[0-9]*/hugepages + ## - "meminfo" - based on /proc/meminfo file + # types = ["root", "per_node"] diff --git a/plugins/inputs/hugepages/testdata/invalid/1/anode3/dir_lock b/plugins/inputs/hugepages/testdata/invalid/1/anode3/dir_lock new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/inputs/hugepages/testdata/invalid/1/node0/hugepages/hugepages-1048576kB/free_hugepages/dir_lock b/plugins/inputs/hugepages/testdata/invalid/1/node0/hugepages/hugepages-1048576kB/free_hugepages/dir_lock new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/inputs/hugepages/testdata/invalid/1/node0/hugepages/hugepages-1048576kB/nry_hugepages b/plugins/inputs/hugepages/testdata/invalid/1/node0/hugepages/hugepages-1048576kB/nry_hugepages new file mode 100644 index 0000000000000..eb08bc0b0bc3a --- /dev/null +++ b/plugins/inputs/hugepages/testdata/invalid/1/node0/hugepages/hugepages-1048576kB/nry_hugepages @@ -0,0 +1 @@ +240 diff --git a/plugins/inputs/hugepages/testdata/invalid/1/node0/hugepages/hugepages-2048kB b/plugins/inputs/hugepages/testdata/invalid/1/node0/hugepages/hugepages-2048kB new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/inputs/hugepages/testdata/invalid/1/node0/hugepages/hugepages-aaaa1048576kB/free_hugepages b/plugins/inputs/hugepages/testdata/invalid/1/node0/hugepages/hugepages-aaaa1048576kB/free_hugepages new file mode 100644 index 0000000000000..573541ac9702d --- /dev/null +++ b/plugins/inputs/hugepages/testdata/invalid/1/node0/hugepages/hugepages-aaaa1048576kB/free_hugepages @@ -0,0 +1 @@ +0 diff --git a/plugins/inputs/hugepages/testdata/invalid/1/node0/hugepages/hugepages1048576kB/free_hugepages b/plugins/inputs/hugepages/testdata/invalid/1/node0/hugepages/hugepages1048576kB/free_hugepages new file mode 100644 index 0000000000000..573541ac9702d --- /dev/null +++ b/plugins/inputs/hugepages/testdata/invalid/1/node0/hugepages/hugepages1048576kB/free_hugepages @@ -0,0 +1 @@ +0 diff --git a/plugins/inputs/hugepages/testdata/invalid/1/node1 b/plugins/inputs/hugepages/testdata/invalid/1/node1 new file mode 100644 index 0000000000000..896266208f3cf --- /dev/null +++ b/plugins/inputs/hugepages/testdata/invalid/1/node1 @@ -0,0 +1 @@ +whatever \ No newline at end of file diff --git a/plugins/inputs/hugepages/testdata/invalid/1/node4b/dir_lock b/plugins/inputs/hugepages/testdata/invalid/1/node4b/dir_lock new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/inputs/hugepages/testdata/invalid/2/node1/hugepages/hugepages-1048576kB/nr_hugepages b/plugins/inputs/hugepages/testdata/invalid/2/node1/hugepages/hugepages-1048576kB/nr_hugepages new file mode 100644 index 0000000000000..4ae3c5803c087 --- /dev/null +++ b/plugins/inputs/hugepages/testdata/invalid/2/node1/hugepages/hugepages-1048576kB/nr_hugepages @@ -0,0 +1 @@ +fourty two \ No newline at end of file diff --git a/plugins/inputs/hugepages/testdata/invalid/meminfo b/plugins/inputs/hugepages/testdata/invalid/meminfo new file mode 100644 index 0000000000000..2133623930633 --- /dev/null +++ b/plugins/inputs/hugepages/testdata/invalid/meminfo @@ -0,0 +1,8 @@ +AnonHugePages: 0 kB +ShmemHugePages: 0 kB +HugePages_Total: 2048 +HugePages_Free: sixtynine +HugePages_Rsvd: 0 +HugePages_Surp: 0 +Hugepagesize: 2048 kB +Hugetlb: 12582912 kB diff --git a/plugins/inputs/hugepages/testdata/valid/meminfo b/plugins/inputs/hugepages/testdata/valid/meminfo new file mode 100644 index 0000000000000..16395b60bc116 --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/meminfo @@ -0,0 +1,51 @@ +MemTotal: 264026876 kB +MemFree: 260102944 kB +MemAvailable: 260015468 kB +Buffers: 115268 kB +Cached: 1203416 kB +SwapCached: 0 kB +Active: 599752 kB +Inactive: 950072 kB +Active(anon): 2740 kB +Inactive(anon): 224176 kB +Active(file): 597012 kB +Inactive(file): 725896 kB +Unevictable: 0 kB +Mlocked: 0 kB +SwapTotal: 8388604 kB +SwapFree: 8388604 kB +Dirty: 0 kB +Writeback: 0 kB +AnonPages: 231220 kB +Mapped: 317748 kB +Shmem: 5848 kB +KReclaimable: 170796 kB +Slab: 347860 kB +SReclaimable: 170796 kB +SUnreclaim: 177064 kB +KernelStack: 13776 kB +PageTables: 10756 kB +NFS_Unstable: 0 kB +Bounce: 0 kB +WritebackTmp: 0 kB +CommitLimit: 140139896 kB +Committed_AS: 2661568 kB +VmallocTotal: 34359738367 kB +VmallocUsed: 264276 kB +VmallocChunk: 0 kB +Percpu: 40896 kB +HardwareCorrupted: 0 kB +AnonHugePages: 0 kB +ShmemHugePages: 0 kB +ShmemPmdMapped: 0 kB +FileHugePages: 0 kB +FilePmdMapped: 0 kB +HugePages_Total: 2048 +HugePages_Free: 883 +HugePages_Rsvd: 0 +HugePages_Surp: 0 +Hugepagesize: 2048 kB +Hugetlb: 12582912 kB +DirectMap4k: 312056 kB +DirectMap2M: 6930432 kB +DirectMap1G: 263192576 kB diff --git a/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/free_hugepages b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/free_hugepages new file mode 100644 index 0000000000000..573541ac9702d --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/free_hugepages @@ -0,0 +1 @@ +0 diff --git a/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/nr_hugepages b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/nr_hugepages new file mode 100644 index 0000000000000..45a4fb75db864 --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/nr_hugepages @@ -0,0 +1 @@ +8 diff --git a/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/nr_hugepages_mempolicy b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/nr_hugepages_mempolicy new file mode 100644 index 0000000000000..45a4fb75db864 --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/nr_hugepages_mempolicy @@ -0,0 +1 @@ +8 diff --git a/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/nr_overcommit_hugepages b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/nr_overcommit_hugepages new file mode 100644 index 0000000000000..573541ac9702d --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/nr_overcommit_hugepages @@ -0,0 +1 @@ +0 diff --git a/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/resv_hugepages b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/resv_hugepages new file mode 100644 index 0000000000000..573541ac9702d --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/resv_hugepages @@ -0,0 +1 @@ +0 diff --git a/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/surplus_hugepages b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/surplus_hugepages new file mode 100644 index 0000000000000..573541ac9702d --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-1048576kB/surplus_hugepages @@ -0,0 +1 @@ +0 diff --git a/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/free_hugepages b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/free_hugepages new file mode 100644 index 0000000000000..d1935688f8d64 --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/free_hugepages @@ -0,0 +1 @@ +883 diff --git a/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/nr_hugepages b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/nr_hugepages new file mode 100644 index 0000000000000..c873496a2275b --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/nr_hugepages @@ -0,0 +1 @@ +2048 diff --git a/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/nr_hugepages_mempolicy b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/nr_hugepages_mempolicy new file mode 100644 index 0000000000000..c873496a2275b --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/nr_hugepages_mempolicy @@ -0,0 +1 @@ +2048 diff --git a/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/nr_overcommit_hugepages b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/nr_overcommit_hugepages new file mode 100644 index 0000000000000..573541ac9702d --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/nr_overcommit_hugepages @@ -0,0 +1 @@ +0 diff --git a/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/resv_hugepages b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/resv_hugepages new file mode 100644 index 0000000000000..573541ac9702d --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/resv_hugepages @@ -0,0 +1 @@ +0 diff --git a/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/surplus_hugepages b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/surplus_hugepages new file mode 100644 index 0000000000000..573541ac9702d --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/mm/hugepages/hugepages-2048kB/surplus_hugepages @@ -0,0 +1 @@ +0 diff --git a/plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-1048576kB/free_hugepages b/plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-1048576kB/free_hugepages new file mode 100644 index 0000000000000..573541ac9702d --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-1048576kB/free_hugepages @@ -0,0 +1 @@ +0 diff --git a/plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-1048576kB/nr_hugepages b/plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-1048576kB/nr_hugepages new file mode 100644 index 0000000000000..b8626c4cff284 --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-1048576kB/nr_hugepages @@ -0,0 +1 @@ +4 diff --git a/plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-1048576kB/surplus_hugepages b/plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-1048576kB/surplus_hugepages new file mode 100644 index 0000000000000..573541ac9702d --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-1048576kB/surplus_hugepages @@ -0,0 +1 @@ +0 diff --git a/plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-2048kB/free_hugepages b/plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-2048kB/free_hugepages new file mode 100644 index 0000000000000..e828e5d06015c --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-2048kB/free_hugepages @@ -0,0 +1 @@ +434 diff --git a/plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-2048kB/nr_hugepages b/plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-2048kB/nr_hugepages new file mode 100644 index 0000000000000..d7b1c440c0f3f --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-2048kB/nr_hugepages @@ -0,0 +1 @@ +1024 diff --git a/plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-2048kB/surplus_hugepages b/plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-2048kB/surplus_hugepages new file mode 100644 index 0000000000000..573541ac9702d --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/node/node0/hugepages/hugepages-2048kB/surplus_hugepages @@ -0,0 +1 @@ +0 diff --git a/plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-1048576kB/free_hugepages b/plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-1048576kB/free_hugepages new file mode 100644 index 0000000000000..573541ac9702d --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-1048576kB/free_hugepages @@ -0,0 +1 @@ +0 diff --git a/plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-1048576kB/nr_hugepages b/plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-1048576kB/nr_hugepages new file mode 100644 index 0000000000000..b8626c4cff284 --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-1048576kB/nr_hugepages @@ -0,0 +1 @@ +4 diff --git a/plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-1048576kB/surplus_hugepages b/plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-1048576kB/surplus_hugepages new file mode 100644 index 0000000000000..573541ac9702d --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-1048576kB/surplus_hugepages @@ -0,0 +1 @@ +0 diff --git a/plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-2048kB/free_hugepages b/plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-2048kB/free_hugepages new file mode 100644 index 0000000000000..2b20fd09737d6 --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-2048kB/free_hugepages @@ -0,0 +1 @@ +449 diff --git a/plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-2048kB/nr_hugepages b/plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-2048kB/nr_hugepages new file mode 100644 index 0000000000000..d7b1c440c0f3f --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-2048kB/nr_hugepages @@ -0,0 +1 @@ +1024 diff --git a/plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-2048kB/surplus_hugepages b/plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-2048kB/surplus_hugepages new file mode 100644 index 0000000000000..573541ac9702d --- /dev/null +++ b/plugins/inputs/hugepages/testdata/valid/node/node1/hugepages/hugepages-2048kB/surplus_hugepages @@ -0,0 +1 @@ +0 diff --git a/plugins/inputs/icinga2/README.md b/plugins/inputs/icinga2/README.md index fb36d36f3730f..0d1fc74122cd7 100644 --- a/plugins/inputs/icinga2/README.md +++ b/plugins/inputs/icinga2/README.md @@ -4,12 +4,14 @@ This plugin gather services & hosts status using Icinga2 Remote API. The icinga2 plugin uses the icinga2 remote API to gather status on running services and hosts. You can read Icinga2's documentation for their remote API -[here](https://docs.icinga.com/icinga2/latest/doc/module/icinga2/chapter/icinga2-api) +[here][1]. -### Configuration: +[1]: https://docs.icinga.com/icinga2/latest/doc/module/icinga2/chapter/icinga2-api -```toml -# Description +## Configuration + +```toml @sample.conf +# Gather Icinga2 status [[inputs.icinga2]] ## Required Icinga2 server address # server = "https://localhost:5665" @@ -32,24 +34,24 @@ services and hosts. You can read Icinga2's documentation for their remote API # insecure_skip_verify = true ``` -### Measurements & Fields: +## Measurements & Fields - All measurements have the following fields: - - name (string) - - state_code (int) + - name (string) + - state_code (int) -### Tags: +## Tags - All measurements have the following tags: - - check_command - The short name of the check command - - display_name - The name of the service or host - - state - The state: UP/DOWN for hosts, OK/WARNING/CRITICAL/UNKNOWN for services - - source - The icinga2 host - - port - The icinga2 port - - scheme - The icinga2 protocol (http/https) - - server - The server the check_command is running for + - check_command - The short name of the check command + - display_name - The name of the service or host + - state - The state: UP/DOWN for hosts, OK/WARNING/CRITICAL/UNKNOWN for services + - source - The icinga2 host + - port - The icinga2 port + - scheme - The icinga2 protocol (http/https) + - server - The server the check_command is running for -### Sample Queries: +## Sample Queries ```sql SELECT * FROM "icinga2_services" WHERE state_code = 0 AND time > now() - 24h // Service with OK status @@ -58,9 +60,9 @@ SELECT * FROM "icinga2_services" WHERE state_code = 2 AND time > now() - 24h // SELECT * FROM "icinga2_services" WHERE state_code = 3 AND time > now() - 24h // Service with UNKNOWN status ``` -### Example Output: +## Example Output -``` +```text $ ./telegraf -config telegraf.conf -input-filter icinga2 -test icinga2_hosts,display_name=router-fr.eqx.fr,check_command=hostalive-custom,host=test-vm,source=localhost,port=5665,scheme=https,state=ok name="router-fr.eqx.fr",state=0 1492021603000000000 ``` diff --git a/plugins/inputs/icinga2/icinga2.go b/plugins/inputs/icinga2/icinga2.go index 5ec0bb43db319..408aed560256f 100644 --- a/plugins/inputs/icinga2/icinga2.go +++ b/plugins/inputs/icinga2/icinga2.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package icinga2 import ( + _ "embed" "encoding/json" "fmt" "net/http" @@ -8,17 +10,21 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Icinga2 struct { Server string ObjectType string Username string Password string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration tls.ClientConfig Log telegraf.Logger @@ -50,39 +56,13 @@ var levels = []string{"ok", "warning", "critical", "unknown"} type ObjectType string -var sampleConfig = ` - ## Required Icinga2 server address - # server = "https://localhost:5665" - - ## Required Icinga2 object type ("services" or "hosts") - # object_type = "services" - - ## Credentials for basic HTTP authentication - # username = "admin" - # password = "admin" - - ## Maximum time to receive response. - # response_timeout = "5s" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = true - ` - -func (i *Icinga2) Description() string { - return "Gather Icinga2 status" -} - -func (i *Icinga2) SampleConfig() string { +func (*Icinga2) SampleConfig() string { return sampleConfig } func (i *Icinga2) GatherStatus(acc telegraf.Accumulator, checks []Object) { for _, check := range checks { - url, err := url.Parse(i.Server) + serverURL, err := url.Parse(i.Server) if err != nil { i.Log.Error(err.Error()) continue @@ -106,16 +86,16 @@ func (i *Icinga2) GatherStatus(acc telegraf.Accumulator, checks []Object) { "check_command": check.Attrs.CheckCommand, "source": source, "state": levels[state], - "server": url.Hostname(), - "scheme": url.Scheme, - "port": url.Port(), + "server": serverURL.Hostname(), + "scheme": serverURL.Scheme, + "port": serverURL.Port(), } acc.AddFields(fmt.Sprintf("icinga2_%s", i.ObjectType), fields, tags) } } -func (i *Icinga2) createHttpClient() (*http.Client, error) { +func (i *Icinga2) createHTTPClient() (*http.Client, error) { tlsCfg, err := i.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -125,36 +105,36 @@ func (i *Icinga2) createHttpClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: i.ResponseTimeout.Duration, + Timeout: time.Duration(i.ResponseTimeout), } return client, nil } func (i *Icinga2) Gather(acc telegraf.Accumulator) error { - if i.ResponseTimeout.Duration < time.Second { - i.ResponseTimeout.Duration = time.Second * 5 + if i.ResponseTimeout < config.Duration(time.Second) { + i.ResponseTimeout = config.Duration(time.Second * 5) } if i.client == nil { - client, err := i.createHttpClient() + client, err := i.createHTTPClient() if err != nil { return err } i.client = client } - requestUrl := "%s/v1/objects/%s?attrs=name&attrs=display_name&attrs=state&attrs=check_command" + requestURL := "%s/v1/objects/%s?attrs=name&attrs=display_name&attrs=state&attrs=check_command" // Note: attrs=host_name is only valid for 'services' requests, using check.Attrs.HostName for the host // 'hosts' requests will need to use attrs=name only, using check.Attrs.Name for the host if i.ObjectType == "services" { - requestUrl += "&attrs=host_name" + requestURL += "&attrs=host_name" } - url := fmt.Sprintf(requestUrl, i.Server, i.ObjectType) + address := fmt.Sprintf(requestURL, i.Server, i.ObjectType) - req, err := http.NewRequest("GET", url, nil) + req, err := http.NewRequest("GET", address, nil) if err != nil { return err } @@ -171,7 +151,7 @@ func (i *Icinga2) Gather(acc telegraf.Accumulator) error { defer resp.Body.Close() result := Result{} - json.NewDecoder(resp.Body).Decode(&result) + err = json.NewDecoder(resp.Body).Decode(&result) if err != nil { return err } @@ -186,7 +166,7 @@ func init() { return &Icinga2{ Server: "https://localhost:5665", ObjectType: "services", - ResponseTimeout: internal.Duration{Duration: time.Second * 5}, + ResponseTimeout: config.Duration(time.Second * 5), } }) } diff --git a/plugins/inputs/icinga2/icinga2_test.go b/plugins/inputs/icinga2/icinga2_test.go index 13055ed8c2d16..2a965877aeada 100644 --- a/plugins/inputs/icinga2/icinga2_test.go +++ b/plugins/inputs/icinga2/icinga2_test.go @@ -7,6 +7,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestGatherServicesStatus(t *testing.T) { @@ -30,7 +31,7 @@ func TestGatherServicesStatus(t *testing.T) { ` checks := Result{} - json.Unmarshal([]byte(s), &checks) + require.NoError(t, json.Unmarshal([]byte(s), &checks)) icinga2 := new(Icinga2) icinga2.Log = testutil.Logger{} @@ -84,7 +85,7 @@ func TestGatherHostsStatus(t *testing.T) { ` checks := Result{} - json.Unmarshal([]byte(s), &checks) + require.NoError(t, json.Unmarshal([]byte(s), &checks)) var acc testutil.Accumulator diff --git a/plugins/inputs/icinga2/sample.conf b/plugins/inputs/icinga2/sample.conf new file mode 100644 index 0000000000000..f7d4332c24736 --- /dev/null +++ b/plugins/inputs/icinga2/sample.conf @@ -0,0 +1,21 @@ +# Gather Icinga2 status +[[inputs.icinga2]] + ## Required Icinga2 server address + # server = "https://localhost:5665" + + ## Required Icinga2 object type ("services" or "hosts") + # object_type = "services" + + ## Credentials for basic HTTP authentication + # username = "admin" + # password = "admin" + + ## Maximum time to receive response. + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = true diff --git a/plugins/inputs/infiniband/README.md b/plugins/inputs/infiniband/README.md index bc5b03543c375..fb630ae3a42ef 100644 --- a/plugins/inputs/infiniband/README.md +++ b/plugins/inputs/infiniband/README.md @@ -6,14 +6,15 @@ system. These are the counters that can be found in **Supported Platforms**: Linux -### Configuration +## Configuration -```toml +```toml @sample.conf +# Gets counters from all InfiniBand cards and ports installed [[inputs.infiniband]] # no configuration ``` -### Metrics +## Metrics Actual metrics depend on the InfiniBand devices, the plugin uses a simple mapping from counter -> counter value. @@ -49,10 +50,8 @@ mapping from counter -> counter value. - unicast_xmit_packets (integer) - VL15_dropped (integer) +## Example Output - -### Example Output - -``` +```shell infiniband,device=mlx5_0,port=1 VL15_dropped=0i,excessive_buffer_overrun_errors=0i,link_downed=0i,link_error_recovery=0i,local_link_integrity_errors=0i,multicast_rcv_packets=0i,multicast_xmit_packets=0i,port_rcv_constraint_errors=0i,port_rcv_data=237159415345822i,port_rcv_errors=0i,port_rcv_packets=801977655075i,port_rcv_remote_physical_errors=0i,port_rcv_switch_relay_errors=0i,port_xmit_constraint_errors=0i,port_xmit_data=238334949937759i,port_xmit_discards=0i,port_xmit_packets=803162651391i,port_xmit_wait=4294967295i,symbol_error=0i,unicast_rcv_packets=801977655075i,unicast_xmit_packets=803162651391i 1573125558000000000 ``` diff --git a/plugins/inputs/infiniband/infiniband.go b/plugins/inputs/infiniband/infiniband.go index 65e1d6c712998..a33a05db17f1a 100644 --- a/plugins/inputs/infiniband/infiniband.go +++ b/plugins/inputs/infiniband/infiniband.go @@ -1,22 +1,28 @@ +//go:generate ../../../tools/readme_config_includer/generator package infiniband import ( + _ "embed" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // Stores the configuration values for the infiniband plugin - as there are no // config values, this is intentionally empty type Infiniband struct { Log telegraf.Logger `toml:"-"` } -// Sample configuration for plugin -var InfinibandConfig = `` - -func (_ *Infiniband) SampleConfig() string { - return InfinibandConfig +func (*Infiniband) SampleConfig() string { + return sampleConfig } -func (_ *Infiniband) Description() string { - return "Gets counters from all InfiniBand cards and ports installed" +// Initialise plugin +func init() { + inputs.Add("infiniband", func() telegraf.Input { return &Infiniband{} }) } diff --git a/plugins/inputs/infiniband/infiniband_linux.go b/plugins/inputs/infiniband/infiniband_linux.go index 48cd8a428900d..9ccb7dc2fc110 100644 --- a/plugins/inputs/infiniband/infiniband_linux.go +++ b/plugins/inputs/infiniband/infiniband_linux.go @@ -1,18 +1,18 @@ +//go:build linux // +build linux package infiniband import ( "fmt" + "strconv" + "github.com/Mellanox/rdmamap" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/inputs" - "strconv" ) // Gather statistics from our infiniband cards -func (_ *Infiniband) Gather(acc telegraf.Accumulator) error { - +func (i *Infiniband) Gather(acc telegraf.Accumulator) error { rdmaDevices := rdmamap.GetRdmaDeviceList() if len(rdmaDevices) == 0 { @@ -41,7 +41,6 @@ func (_ *Infiniband) Gather(acc telegraf.Accumulator) error { // Add the statistics to the accumulator func addStats(dev string, port string, stats []rdmamap.RdmaStatEntry, acc telegraf.Accumulator) { - // Allow users to filter by card and port tags := map[string]string{"device": dev, "port": port} fields := make(map[string]interface{}) @@ -52,8 +51,3 @@ func addStats(dev string, port string, stats []rdmamap.RdmaStatEntry, acc telegr acc.AddFields("infiniband", fields, tags) } - -// Initialise plugin -func init() { - inputs.Add("infiniband", func() telegraf.Input { return &Infiniband{} }) -} diff --git a/plugins/inputs/infiniband/infiniband_notlinux.go b/plugins/inputs/infiniband/infiniband_notlinux.go index 5b19672d975d8..8ad6731c17bd7 100644 --- a/plugins/inputs/infiniband/infiniband_notlinux.go +++ b/plugins/inputs/infiniband/infiniband_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package infiniband diff --git a/plugins/inputs/infiniband/infiniband_test.go b/plugins/inputs/infiniband/infiniband_test.go index 6c4bb24587f4a..c382a1fdf9dd0 100644 --- a/plugins/inputs/infiniband/infiniband_test.go +++ b/plugins/inputs/infiniband/infiniband_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package infiniband @@ -38,7 +39,7 @@ func TestInfiniband(t *testing.T) { "port": "1", } - sample_rdmastats_entries := []rdmamap.RdmaStatEntry{ + sampleRdmastatsEntries := []rdmamap.RdmaStatEntry{ { Name: "excessive_buffer_overrun_errors", Value: uint64(0), @@ -127,8 +128,7 @@ func TestInfiniband(t *testing.T) { var acc testutil.Accumulator - addStats("m1x5_0", "1", sample_rdmastats_entries, &acc) + addStats("m1x5_0", "1", sampleRdmastatsEntries, &acc) acc.AssertContainsTaggedFields(t, "infiniband", fields, tags) - } diff --git a/plugins/inputs/infiniband/sample.conf b/plugins/inputs/infiniband/sample.conf new file mode 100644 index 0000000000000..c8bb609d28af8 --- /dev/null +++ b/plugins/inputs/infiniband/sample.conf @@ -0,0 +1,3 @@ +# Gets counters from all InfiniBand cards and ports installed +[[inputs.infiniband]] + # no configuration diff --git a/plugins/inputs/influxdb/README.md b/plugins/inputs/influxdb/README.md index e17bd7072438b..9435e037dfc26 100644 --- a/plugins/inputs/influxdb/README.md +++ b/plugins/inputs/influxdb/README.md @@ -1,13 +1,16 @@ # InfluxDB Input Plugin -The InfluxDB plugin will collect metrics on the given InfluxDB servers. +The InfluxDB plugin will collect metrics on the given InfluxDB servers. Read our +[documentation][1] for detailed information about `influxdb` metrics. This plugin can also gather metrics from endpoints that expose InfluxDB-formatted endpoints. See below for more information. -### Configuration: +[1]: https://docs.influxdata.com/platform/monitoring/influxdata-platform/tools/measurements-internal/ -```toml +## Configuration + +```toml @sample.conf # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints [[inputs.influxdb]] ## Works with InfluxDB debug endpoints out of the box, @@ -35,51 +38,233 @@ InfluxDB-formatted endpoints. See below for more information. timeout = "5s" ``` -### Measurements & Fields - -**Note:** The measurements and fields included in this plugin are dynamically built from the InfluxDB source, and may vary between versions: - -- influxdb - - n_shards: The total number of shards in the specified database. -- influxdb_database: The database metrics are being collected from. -- influxdb_httpd: The URL to listen for network requests. By default, `http://localhost:8086/debug/var`. -- influxdb_measurement: The measurement that metrics are collected from. -- influxdb_memstats: Statistics about the memory allocator in the specified database. - - heap_inuse: The number of bytes in in-use spans. - - heap_released: The number of bytes of physical memory returned to the OS. - - mspan_inuse: The number of bytes in in-use mspans. - - total_alloc: The cumulative bytes allocated for heap objects. - - sys: The total number of bytes of memory obtained from the OS. Measures the virtual address space reserved by the Go runtime for the heap, stacks, and other internal data structures. - - mallocs: The total number of heap objects allocated. (The total number of live objects are frees.) - - frees: The cumulative number of freed (live) heap objects. - - heap_idle: The number of bytes of idle heap objects. - - pause_total_ns: The total time garbage collection cycles are paused in nanoseconds. - - lookups: The number of pointer lookups performed by the runtime. Primarily useful for debugging runtime internals. - - heap_sys: The number of bytes of heap memory obtained from the OS. Measures the amount of virtual address space reserved for the heap. - - mcache_sys: The bytes of memory obtained from the OS for mcache structures. - - next_gc: The target heap size of the next garbage collection cycle. - - gc_cpu_fraction: The fraction of CPU time used by the garbage collection cycle. - - other_sys: The number of bytes of memory used other than heap_sys, stacks_sys, mspan_sys, mcache_sys, buckhash_sys, and gc_sys. - - alloc: The currently allocated number of bytes of heap objects. - - stack_inuse: The number of bytes in in-use stacks. - - stack_sys: The total number of bytes of memory obtained from the stack in use. - - buck_hash_sys: The bytes of memory in profiling bucket hash tables. - - gc_sys: The bytes of memory in garbage collection metadata. - - num_gc: The number of completed garbage collection cycles. - - heap_alloc: The size, in bytes, of all heap objects. - - heap_objects: The number of allocated heap objects. - - mspan_sys: The bytes of memory obtained from the OS for mspan. - - mcache_inuse: The bytes of allocated mcache structures. - - last_gc: Time the last garbage collection finished, as nanoseconds since 1970 (the UNIX epoch). -- influxdb_shard: The shard metrics are collected from. -- influxdb_subscriber: The InfluxDB subscription that metrics are collected from. -- influxdb_tsm1_cache: The TSM cache that metrics are collected from. -- influxdb_tsm1_wal: The TSM Write Ahead Log (WAL) that metrics are collected from. -- influxdb_write: The total writes to the specified database. - -### Example Output: +## Measurements & Fields -``` +**Note:** The measurements and fields included in this plugin are dynamically +built from the InfluxDB source, and may vary between versions: + +- **influxdb_ae** _(Enterprise Only)_ : Statistics related to the Anti-Entropy (AE) engine in InfluxDB Enterprise clusters. + - **bytesRx**: Number of bytes received by the data node. + - **errors**: Total number of anti-entropy jobs that have resulted in errors. + - **jobs**: Total number of jobs executed by the data node. + - **jobsActive**: Number of active (currently executing) jobs. +- **influxdb_cluster** _(Enterprise Only)_ : Statistics related to the clustering features of the data nodes in InfluxDB Enterprise clusters. + - **copyShardReq**: Number of internal requests made to copy a shard from one data node to another. + - **createIteratorReq**: Number of read requests from other data nodes in the cluster. + - **expandSourcesReq**: Number of remote node requests made to find measurements on this node that match a particular regular expression. + - **fieldDimensionsReq**: Number of remote node requests for information about the fields and associated types, and tag keys of measurements on this data node. + - **iteratorCostReq**: Number of internal requests for iterator cost. + - **openConnections**: Tracks the number of open connections being handled by the data node + (including logical connections multiplexed onto a single yamux connection). + - **removeShardReq**: Number of internal requests to delete a shard from this data node. Exclusively incremented by use of the influxd-ctl remove shard command. + - **writeShardFail**: Total number of internal write requests from a remote node that failed. + - **writeShardPointsReq**: Number of points in every internal write request from any remote node, regardless of success. + - **writeShardReq**: Number of internal write requests from a remote data node, regardless of success. +- **influxdb_cq**: Metrics related to continuous queries (CQs). + - **queryFail**: Total number of continuous queries that executed but failed. + - **queryOk**: Total number of continuous queries that executed successfully. +- **influxdb_database**: Database metrics are collected from. + - **numMeasurements**: Current number of measurements in the specified database. + - **numSeries**: Current series cardinality of the specified database. +- **influxdb_hh** _(Enterprise Only)_ : Events resulting in new hinted handoff (HH) processors in InfluxDB Enterprise clusters. + - **writeShardReq**: Number of initial write requests handled by the hinted handoff engine for a remote node. + - **writeShardReqPoints**: Number of write requests for each point in the initial request to the hinted handoff engine for a remote node. +- **influxdb_hh_database** _(Enterprise Only)_ : Aggregates all hinted handoff queues for a single database and node. + - **bytesRead**: Size, in bytes, of points read from the hinted handoff queue and sent to its destination data node. + - **bytesWritten**: Total number of bytes written to the hinted handoff queue. + - **queueBytes**: Total number of bytes remaining in the hinted handoff queue. + - **queueDepth**: Total number of segments in the hinted handoff queue. The HH queue is a sequence of 10MB “segment” files. + - **writeBlocked**: Number of writes blocked because the number of concurrent HH requests exceeds the limit. + - **writeDropped**: Number of writes dropped from the HH queue because the write appeared to be corrupted. + - **writeNodeReq**: Total number of write requests that succeeded in writing a batch to the destination node. + - **writeNodeReqFail**: Total number of write requests that failed in writing a batch of data from the hinted handoff queue to the destination node. + - **writeNodeReqPoints**: Total number of points successfully written from the HH queue to the destination node fr + - **writeShardReq**: Total number of every write batch request enqueued into the hinted handoff queue. + - **writeShardReqPoints**: Total number of points enqueued into the hinted handoff queue. +- **influxdb_hh_processor** _(Enterprise Only)_: Statistics stored for a single queue (shard). + - **bytesRead**: Size, in bytes, of points read from the hinted handoff queue and sent to its destination data node. + - **bytesWritten**: Total number of bytes written to the hinted handoff queue. + - **queueBytes**: Total number of bytes remaining in the hinted handoff queue. + - **queueDepth**: Total number of segments in the hinted handoff queue. The HH queue is a sequence of 10MB “segment” files. + - **writeBlocked**: Number of writes blocked because the number of concurrent HH requests exceeds the limit. + - **writeDropped**: Number of writes dropped from the HH queue because the write appeared to be corrupted. + - **writeNodeReq**: Total number of write requests that succeeded in writing a batch to the destination node. + - **writeNodeReqFail**: Total number of write requests that failed in writing a batch of data from the hinted handoff queue to the destination node. + - **writeNodeReqPoints**: Total number of points successfully written from the HH queue to the destination node fr + - **writeShardReq**: Total number of every write batch request enqueued into the hinted handoff queue. + - **writeShardReqPoints**: Total number of points enqueued into the hinted handoff queue. +- **influxdb_httpd**: Metrics related to the InfluxDB HTTP server. + - **authFail**: Number of HTTP requests that were aborted due to authentication being required, but not supplied or incorrect. + - **clientError**: Number of HTTP responses due to client errors, with a 4XX HTTP status code. + - **fluxQueryReq**: Number of Flux query requests served. + - **fluxQueryReqDurationNs**: Duration (wall-time), in nanoseconds, spent executing Flux query requests. + - **pingReq**: Number of times InfluxDB HTTP server served the /ping HTTP endpoint. + - **pointsWrittenDropped**: Number of points dropped by the storage engine. + - **pointsWrittenFail**: Number of points accepted by the HTTP /write endpoint, but unable to be persisted. + - **pointsWrittenOK**: Number of points successfully accepted and persisted by the HTTP /write endpoint. + - **promReadReq**: Number of read requests to the Prometheus /read endpoint. + - **promWriteReq**: Number of write requests to the Prometheus /write endpoint. + - **queryReq**: Number of query requests. + - **queryReqDurationNs**: Total query request duration, in nanosecond (ns). + - **queryRespBytes**: Total number of bytes returned in query responses. + - **recoveredPanics**: Total number of panics recovered by the HTTP handler. + - **req**: Total number of HTTP requests served. + - **reqActive**: Number of currently active requests. + - **reqDurationNs**: Duration (wall time), in nanoseconds, spent inside HTTP requests. + - **serverError**: Number of HTTP responses due to server errors. + - **statusReq**: Number of status requests served using the HTTP /status endpoint. + - **valuesWrittenOK**: Number of values (fields) successfully accepted and persisted by the HTTP /write endpoint. + - **writeReq**: Number of write requests served using the HTTP /write endpoint. + - **writeReqActive**: Number of currently active write requests. + - **writeReqBytes**: Total number of bytes of line protocol data received by write requests, using the HTTP /write endpoint. + - **writeReqDurationNs**: Duration, in nanoseconds, of write requests served using the /write HTTP endpoint. +- **influxdb_memstats**: Statistics about the memory allocator in the specified database. + - **Alloc**: Number of bytes allocated to heap objects. + - **BuckHashSys**: Number of bytes of memory in profiling bucket hash tables. + - **Frees**: Cumulative count of heap objects freed. + - **GCCPUFraction**: fraction of InfluxDB's available CPU time used by the garbage collector (GC) since InfluxDB started. + - **GCSys**: Number of bytes of memory in garbage collection metadata. + - **HeapAlloc**: Number of bytes of allocated heap objects. + - **HeapIdle**: Number of bytes in idle (unused) spans. + - **HeapInuse**: Number of bytes in in-use spans. + - **HeapObjects**: Number of allocated heap objects. + - **HeapReleased**: Number of bytes of physical memory returned to the OS. + - **HeapSys**: Number of bytes of heap memory obtained from the OS. + - **LastGC**: Time the last garbage collection finished. + - **Lookups**: Number of pointer lookups performed by the runtime. + - **MCacheInuse**: Number of bytes of allocated mcache structures. + - **MCacheSys**: Number of bytes of memory obtained from the OS for mcache structures. + - **MSpanInuse**: Number of bytes of allocated mspan structures. + - **MSpanSys**: Number of bytes of memory obtained from the OS for mspan structures. + - **Mallocs**: Cumulative count of heap objects allocated. + - **NextGC**: Target heap size of the next GC cycle. + - **NumForcedGC**: Number of GC cycles that were forced by the application calling the GC function. + - **NumGC**: Number of completed GC cycles. + - **OtherSys**: Number of bytes of memory in miscellaneous off-heap runtime allocations. + - **PauseTotalNs**: Cumulative nanoseconds in GC stop-the-world pauses since the program started. + - **StackInuse**: Number of bytes in stack spans. + - **StackSys**: Number of bytes of stack memory obtained from the OS. + - **Sys**: Total bytes of memory obtained from the OS. + - **TotalAlloc**: Cumulative bytes allocated for heap objects. +- **influxdb_queryExecutor**: Metrics related to usage of the Query Executor of the InfluxDB engine. + - **queriesActive**: Number of active queries currently being handled. + - **queriesExecuted**: Number of queries executed (started). + - **queriesFinished**: Number of queries that have finished executing. + - **queryDurationNs**: Total duration, in nanoseconds, of executed queries. + - **recoveredPanics**: Number of panics recovered by the Query Executor. +- **influxdb_rpc** _(Enterprise Only)_ : Statistics related to the use of RPC calls within InfluxDB Enterprise clusters. + - **idleStreams**: Number of idle multiplexed streams across all live TCP connections. + - **liveConnections**: Current number of live TCP connections to other nodes. + - **liveStreams**: Current number of live multiplexed streams across all live TCP connections. + - **rpcCalls**: Total number of RPC calls made to remote nodes. + - **rpcFailures**: Total number of RPC failures, which are RPCs that did not recover. + - **rpcReadBytes**: Total number of RPC bytes read. + - **rpcRetries**: Total number of RPC calls that retried at least once. + - **rpcWriteBytes**: Total number of RPC bytes written. + - **singleUse**: Total number of single-use connections opened using Dial. + - **singleUseOpen**: Number of single-use connections currently open. + - **totalConnections**: Total number of TCP connections that have been established. + - **totalStreams**: Total number of streams established. +- **influxdb_runtime**: Subset of memstat record statistics for the Go memory allocator. + - **Alloc**: Currently allocated number of bytes of heap objects. + - **Frees**: Cumulative number of freed (live) heap objects. + - **HeapAlloc**: Size, in bytes, of all heap objects. + - **HeapIdle**: Number of bytes of idle heap objects. + - **HeapInUse**: Number of bytes in in-use spans. + - **HeapObjects**: Number of allocated heap objects. + - **HeapReleased**: Number of bytes of physical memory returned to the OS. + - **HeapSys**: Number of bytes of heap memory obtained from the OS. Measures the amount of virtual address space reserved for the heap. + - **Lookups**: Number of pointer lookups performed by the runtime. Primarily useful for debugging runtime internals. + - **Mallocs**: Total number of heap objects allocated. The total number of live objects is Frees. + - **NumGC**: Number of completed GC (garbage collection) cycles. + - **NumGoroutine**: Total number of Go routines. + - **PauseTotalNs**: Total duration, in nanoseconds, of total GC (garbage collection) pauses. + - **Sys**: Total number of bytes of memory obtained from the OS. Measures the virtual address space reserved by the Go runtime for the heap, stacks, and other internal data structures. + - **TotalAlloc**: Total number of bytes allocated for heap objects. This statistic does not decrease when objects are freed. +- **influxdb_shard**: Metrics related to InfluxDB shards. + - **diskBytes**: Size, in bytes, of the shard, including the size of the data directory and the WAL directory. + - **fieldsCreate**: Number of fields created. + - **indexType**: Type of index inmem or tsi1. + - **n_shards**: Total number of shards in the specified database. + - **seriesCreate**: Number of series created. + - **writeBytes**: Number of bytes written to the shard. + - **writePointsDropped**: Number of requests to write points t dropped from a write. + - **writePointsErr**: Number of requests to write points that failed to be written due to errors. + - **writePointsOk**: Number of points written successfully. + - **writeReq**: Total number of write requests. + - **writeReqErr**: Total number of write requests that failed due to errors. + - **writeReqOk**: Total number of successful write requests. +- **influxdb_subscriber**: InfluxDB subscription metrics. + - **createFailures**: Number of subscriptions that failed to be created. + - **pointsWritten**: Total number of points that were successfully written to subscribers. + - **writeFailures**: Total number of batches that failed to be written to subscribers. +- **influxdb_tsm1_cache**: TSM cache metrics. + - **cacheAgeMs**: Duration, in milliseconds, since the cache was last snapshotted at sample time. + - **cachedBytes**: Total number of bytes that have been written into snapshots. + - **diskBytes**: Size, in bytes, of on-disk snapshots. + - **memBytes**: Size, in bytes, of in-memory cache. + - **snapshotCount**: Current level (number) of active snapshots. + - **WALCompactionTimeMs**: Duration, in milliseconds, that the commit lock is held while compacting snapshots. + - **writeDropped**: Total number of writes dropped due to timeouts. + - **writeErr**: Total number of writes that failed. + - **writeOk**: Total number of successful writes. +- **influxdb_tsm1_engine**: TSM storage engine metrics. + - **cacheCompactionDuration** Duration (wall time), in nanoseconds, spent in cache compactions. + - **cacheCompactionErr** Number of cache compactions that have failed due to errors. + - **cacheCompactions** Total number of cache compactions that have ever run. + - **cacheCompactionsActive** Number of cache compactions that are currently running. + - **tsmFullCompactionDuration** Duration (wall time), in nanoseconds, spent in full compactions. + - **tsmFullCompactionErr** Total number of TSM full compactions that have failed due to errors. + - **tsmFullCompactionQueue** Current number of pending TMS Full compactions. + - **tsmFullCompactions** Total number of TSM full compactions that have ever run. + - **tsmFullCompactionsActive** Number of TSM full compactions currently running. + - **tsmLevel1CompactionDuration** Duration (wall time), in nanoseconds, spent in TSM level 1 compactions. + - **tsmLevel1CompactionErr** Total number of TSM level 1 compactions that have failed due to errors. + - **tsmLevel1CompactionQueue** Current number of pending TSM level 1 compactions. + - **tsmLevel1Compactions** Total number of TSM level 1 compactions that have ever run. + - **tsmLevel1CompactionsActive** Number of TSM level 1 compactions that are currently running. + - **tsmLevel2CompactionDuration** Duration (wall time), in nanoseconds, spent in TSM level 2 compactions. + - **tsmLevel2CompactionErr** Number of TSM level 2 compactions that have failed due to errors. + - **tsmLevel2CompactionQueue** Current number of pending TSM level 2 compactions. + - **tsmLevel2Compactions** Total number of TSM level 2 compactions that have ever run. + - **tsmLevel2CompactionsActive** Number of TSM level 2 compactions that are currently running. + - **tsmLevel3CompactionDuration** Duration (wall time), in nanoseconds, spent in TSM level 3 compactions. + - **tsmLevel3CompactionErr** Number of TSM level 3 compactions that have failed due to errors. + - **tsmLevel3CompactionQueue** Current number of pending TSM level 3 compactions. + - **tsmLevel3Compactions** Total number of TSM level 3 compactions that have ever run. + - **tsmLevel3CompactionsActive** Number of TSM level 3 compactions that are currently running. + - **tsmOptimizeCompactionDuration** Duration (wall time), in nanoseconds, spent during TSM optimize compactions. + - **tsmOptimizeCompactionErr** Total number of TSM optimize compactions that have failed due to errors. + - **tsmOptimizeCompactionQueue** Current number of pending TSM optimize compactions. + - **tsmOptimizeCompactions** Total number of TSM optimize compactions that have ever run. + - **tsmOptimizeCompactionsActive** Number of TSM optimize compactions that are currently running. +- **influxdb_tsm1_filestore**: The TSM file store metrics. + - **diskBytes**: Size, in bytes, of disk usage by the TSM file store. + - **numFiles**: Total number of files in the TSM file store. +- **influxdb_tsm1_wal**: The TSM Write Ahead Log (WAL) metrics. + - **currentSegmentDiskBytes**: Current size, in bytes, of the segment disk. + - **oldSegmentDiskBytes**: Size, in bytes, of the segment disk. + - **writeErr**: Number of writes that failed due to errors. + - **writeOK**: Number of writes that succeeded. +- **influxdb_write**: Metrics related to InfluxDB writes. + - **pointReq**: Total number of points requested to be written. + - **pointReqHH** _(Enterprise only)_: Total number of points received for write by this node and then enqueued into hinted handoff for the destination node. + - **pointReqLocal** _(Enterprise only)_: Total number of point requests that have been attempted to be written into a shard on the same (local) node. + - **pointReqRemote** _(Enterprise only)_: Total number of points received for write by this node but needed to be forwarded into a shard on a remote node. + - **pointsWrittenOK**: Number of points written to the HTTP /write endpoint and persisted successfully. + - **req**: Total number of batches requested to be written. + - **subWriteDrop**: Total number of batches that failed to be sent to the subscription dispatcher. + - **subWriteOk**: Total number of batches successfully sent to the subscription dispatcher. + - **valuesWrittenOK**: Number of values (fields) written to the HTTP /write endpoint and persisted successfully. + - **writeDrop**: Total number of write requests for points that have been dropped due to timestamps not matching any existing retention policies. + - **writeError**: Total number of batches of points that were not successfully written, due to a failure to write to a local or remote shard. + - **writeOk**: Total number of batches of points written at the requested consistency level. + - **writePartial** _(Enterprise only)_: Total number of batches written to at least one node, but did not meet the requested consistency level. + - **writeTimeout**: Total number of write requests that failed to complete within the default write timeout duration. + +## Example Output + +```sh telegraf --config ~/ws/telegraf.conf --input-filter influxdb --test * Plugin: influxdb, Collection 1 > influxdb_database,database=_internal,host=tyrion,url=http://localhost:8086/debug/vars numMeasurements=10,numSeries=29 1463590500247354636 @@ -111,7 +296,7 @@ telegraf --config ~/ws/telegraf.conf --input-filter influxdb --test > influxdb_shard,host=tyrion n_shards=4i 1463590500247354636 ``` -### InfluxDB-formatted endpoints +## InfluxDB-formatted endpoints The influxdb plugin can collect InfluxDB-formatted data from JSON endpoints. Whether associated with an Influx database or not. diff --git a/plugins/inputs/influxdb/influxdb.go b/plugins/inputs/influxdb/influxdb.go index d7eb66153034a..4e88e99eec84f 100644 --- a/plugins/inputs/influxdb/influxdb.go +++ b/plugins/inputs/influxdb/influxdb.go @@ -1,7 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package influxdb import ( "bytes" + _ "embed" "encoding/json" "errors" "io" @@ -10,11 +12,16 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const ( maxErrorResponseBodyLength = 1024 ) @@ -33,45 +40,17 @@ func (e *APIError) Error() string { } type InfluxDB struct { - URLs []string `toml:"urls"` - Username string `toml:"username"` - Password string `toml:"password"` - Timeout internal.Duration `toml:"timeout"` + URLs []string `toml:"urls"` + Username string `toml:"username"` + Password string `toml:"password"` + Timeout config.Duration `toml:"timeout"` tls.ClientConfig client *http.Client } -func (*InfluxDB) Description() string { - return "Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints" -} - func (*InfluxDB) SampleConfig() string { - return ` - ## Works with InfluxDB debug endpoints out of the box, - ## but other services can use this format too. - ## See the influxdb plugin's README for more details. - - ## Multiple URLs from which to read InfluxDB-formatted JSON - ## Default is "http://localhost:8086/debug/vars". - urls = [ - "http://localhost:8086/debug/vars" - ] - - ## Username and password to send using HTTP Basic Authentication. - # username = "" - # password = "" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## http request & header timeout - timeout = "5s" -` + return sampleConfig } func (i *InfluxDB) Gather(acc telegraf.Accumulator) error { @@ -86,10 +65,10 @@ func (i *InfluxDB) Gather(acc telegraf.Accumulator) error { } i.client = &http.Client{ Transport: &http.Transport{ - ResponseHeaderTimeout: i.Timeout.Duration, + ResponseHeaderTimeout: time.Duration(i.Timeout), TLSClientConfig: tlsCfg, }, - Timeout: i.Timeout.Duration, + Timeout: time.Duration(i.Timeout), } } @@ -318,7 +297,7 @@ func readResponseError(resp *http.Response) error { func init() { inputs.Add("influxdb", func() telegraf.Input { return &InfluxDB{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } }) } diff --git a/plugins/inputs/influxdb/influxdb_test.go b/plugins/inputs/influxdb/influxdb_test.go index 27ea81b6d7dd6..93a02a19e56a7 100644 --- a/plugins/inputs/influxdb/influxdb_test.go +++ b/plugins/inputs/influxdb/influxdb_test.go @@ -14,7 +14,8 @@ import ( func TestBasic(t *testing.T) { fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { - _, _ = w.Write([]byte(basicJSON)) + _, err := w.Write([]byte(basicJSON)) + require.NoError(t, err) } else { w.WriteHeader(http.StatusNotFound) } @@ -61,7 +62,8 @@ func TestBasic(t *testing.T) { func TestInfluxDB(t *testing.T) { fakeInfluxServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { - _, _ = w.Write([]byte(influxReturn)) + _, err := w.Write([]byte(influxReturn)) + require.NoError(t, err) } else { w.WriteHeader(http.StatusNotFound) } @@ -121,7 +123,8 @@ func TestInfluxDB(t *testing.T) { func TestInfluxDB2(t *testing.T) { fakeInfluxServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { - _, _ = w.Write([]byte(influxReturn2)) + _, err := w.Write([]byte(influxReturn2)) + require.NoError(t, err) } else { w.WriteHeader(http.StatusNotFound) } @@ -146,7 +149,8 @@ func TestInfluxDB2(t *testing.T) { func TestErrorHandling(t *testing.T) { badServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { - _, _ = w.Write([]byte("not json")) + _, err := w.Write([]byte("not json")) + require.NoError(t, err) } else { w.WriteHeader(http.StatusNotFound) } @@ -164,7 +168,8 @@ func TestErrorHandling(t *testing.T) { func TestErrorHandling404(t *testing.T) { badServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { - _, _ = w.Write([]byte(basicJSON)) + _, err := w.Write([]byte(basicJSON)) + require.NoError(t, err) } else { w.WriteHeader(http.StatusNotFound) } @@ -182,7 +187,8 @@ func TestErrorHandling404(t *testing.T) { func TestErrorResponse(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`{"error": "unable to parse authentication credentials"}`)) + _, err := w.Write([]byte(`{"error": "unable to parse authentication credentials"}`)) + require.NoError(t, err) })) defer ts.Close() diff --git a/plugins/inputs/influxdb/sample.conf b/plugins/inputs/influxdb/sample.conf new file mode 100644 index 0000000000000..668a13c151576 --- /dev/null +++ b/plugins/inputs/influxdb/sample.conf @@ -0,0 +1,25 @@ +# Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints +[[inputs.influxdb]] + ## Works with InfluxDB debug endpoints out of the box, + ## but other services can use this format too. + ## See the influxdb plugin's README for more details. + + ## Multiple URLs from which to read InfluxDB-formatted JSON + ## Default is "http://localhost:8086/debug/vars". + urls = [ + "http://localhost:8086/debug/vars" + ] + + ## Username and password to send using HTTP Basic Authentication. + # username = "" + # password = "" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## http request & header timeout + timeout = "5s" diff --git a/plugins/inputs/influxdb_listener/README.md b/plugins/inputs/influxdb_listener/README.md index aae77fb965f7a..755b7de5c0d63 100644 --- a/plugins/inputs/influxdb_listener/README.md +++ b/plugins/inputs/influxdb_listener/README.md @@ -18,9 +18,10 @@ receive a 200 OK response with message body `{"results":[]}` but they are not relayed. The output configuration of the Telegraf instance which ultimately submits data to InfluxDB determines the destination database. -### Configuration: +## Configuration -```toml +```toml @sample.conf +# Accept metrics over InfluxDB 1.x HTTP API [[inputs.influxdb_listener]] ## Address and port to host HTTP listener on service_address = ":8186" @@ -62,18 +63,24 @@ submits data to InfluxDB determines the destination database. ## You probably want to make sure you have TLS configured above for this. # basic_username = "foobar" # basic_password = "barfoo" + + ## Influx line protocol parser + ## 'internal' is the default. 'upstream' is a newer parser that is faster + ## and more memory efficient. + # parser_type = "internal" ``` -### Metrics: +## Metrics Metrics are created from InfluxDB Line Protocol in the request body. -### Troubleshooting: +## Troubleshooting **Example Query:** -``` + +```sh curl -i -XPOST 'http://localhost:8186/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' ``` -[influxdb_http_api]: https://docs.influxdata.com/influxdb/latest/guides/writing_data/ +[influxdb_http_api]: https://docs.influxdata.com/influxdb/v1.8/guides/write_data/ [http_listener_v2]: /plugins/inputs/http_listener_v2/README.md diff --git a/plugins/inputs/influxdb_listener/influxdb_listener.go b/plugins/inputs/influxdb_listener/influxdb_listener.go index 07d27ebbd934d..f2cfdf6fe2667 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener.go @@ -1,9 +1,11 @@ +//go:generate ../../../tools/readme_config_includer/generator package influxdb_listener import ( "compress/gzip" "context" "crypto/tls" + _ "embed" "encoding/json" "fmt" "net" @@ -11,13 +13,19 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers/influx" + "github.com/influxdata/telegraf/plugins/parsers/influx/influx_upstream" "github.com/influxdata/telegraf/selfstat" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const ( // defaultMaxBodySize is the default maximum request body size, in bytes. // if the request body is over this size, we will return an HTTP 413 error. @@ -29,14 +37,15 @@ type InfluxDBListener struct { port int tlsint.ServerConfig - ReadTimeout internal.Duration `toml:"read_timeout"` - WriteTimeout internal.Duration `toml:"write_timeout"` - MaxBodySize internal.Size `toml:"max_body_size"` - MaxLineSize internal.Size `toml:"max_line_size"` // deprecated in 1.14; ignored - BasicUsername string `toml:"basic_username"` - BasicPassword string `toml:"basic_password"` - DatabaseTag string `toml:"database_tag"` - RetentionPolicyTag string `toml:"retention_policy_tag"` + ReadTimeout config.Duration `toml:"read_timeout"` + WriteTimeout config.Duration `toml:"write_timeout"` + MaxBodySize config.Size `toml:"max_body_size"` + MaxLineSize config.Size `toml:"max_line_size" deprecated:"1.14.0;parser now handles lines of unlimited length and option is ignored"` + BasicUsername string `toml:"basic_username"` + BasicPassword string `toml:"basic_password"` + DatabaseTag string `toml:"database_tag"` + RetentionPolicyTag string `toml:"retention_policy_tag"` + ParserType string `toml:"parser_type"` timeFunc influx.TimeFunc @@ -60,51 +69,10 @@ type InfluxDBListener struct { mux http.ServeMux } -const sampleConfig = ` - ## Address and port to host InfluxDB listener on - service_address = ":8186" - - ## maximum duration before timing out read of the request - read_timeout = "10s" - ## maximum duration before timing out write of the response - write_timeout = "10s" - - ## Maximum allowed HTTP request body size in bytes. - ## 0 means to use the default of 32MiB. - max_body_size = "32MiB" - - ## Optional tag name used to store the database. - ## If the write has a database in the query string then it will be kept in this tag name. - ## This tag can be used in downstream outputs. - ## The default value of nothing means it will be off and the database will not be recorded. - # database_tag = "" - - ## If set the retention policy specified in the write query will be added as - ## the value of this tag name. - # retention_policy_tag = "" - - ## Set one or more allowed client CA certificate file names to - ## enable mutually authenticated TLS connections - tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - - ## Add service certificate and key - tls_cert = "/etc/telegraf/cert.pem" - tls_key = "/etc/telegraf/key.pem" - - ## Optional username and password to accept for HTTP basic authentication. - ## You probably want to make sure you have TLS configured above for this. - # basic_username = "foobar" - # basic_password = "barfoo" -` - -func (h *InfluxDBListener) SampleConfig() string { +func (*InfluxDBListener) SampleConfig() string { return sampleConfig } -func (h *InfluxDBListener) Description() string { - return "Accept metrics over InfluxDB 1.x HTTP API" -} - func (h *InfluxDBListener) Gather(_ telegraf.Accumulator) error { return nil } @@ -137,19 +105,15 @@ func (h *InfluxDBListener) Init() error { h.authFailures = selfstat.Register("influxdb_listener", "auth_failures", tags) h.routes() - if h.MaxBodySize.Size == 0 { - h.MaxBodySize.Size = defaultMaxBodySize - } - - if h.MaxLineSize.Size != 0 { - h.Log.Warnf("Use of deprecated configuration: 'max_line_size'; parser now handles lines of unlimited length and option is ignored") + if h.MaxBodySize == 0 { + h.MaxBodySize = config.Size(defaultMaxBodySize) } - if h.ReadTimeout.Duration < time.Second { - h.ReadTimeout.Duration = time.Second * 10 + if h.ReadTimeout < config.Duration(time.Second) { + h.ReadTimeout = config.Duration(time.Second * 10) } - if h.WriteTimeout.Duration < time.Second { - h.WriteTimeout.Duration = time.Second * 10 + if h.WriteTimeout < config.Duration(time.Second) { + h.WriteTimeout = config.Duration(time.Second * 10) } return nil @@ -167,8 +131,8 @@ func (h *InfluxDBListener) Start(acc telegraf.Accumulator) error { h.server = http.Server{ Addr: h.ServiceAddress, Handler: h, - ReadTimeout: h.ReadTimeout.Duration, - WriteTimeout: h.WriteTimeout.Duration, + ReadTimeout: time.Duration(h.ReadTimeout), + WriteTimeout: time.Duration(h.WriteTimeout), TLSConfig: tlsConf, } @@ -221,7 +185,10 @@ func (h *InfluxDBListener) handleQuery() http.HandlerFunc { res.Header().Set("Content-Type", "application/json") res.Header().Set("X-Influxdb-Version", "1.0") res.WriteHeader(http.StatusOK) - res.Write([]byte("{\"results\":[]}")) + _, err := res.Write([]byte("{\"results\":[]}")) + if err != nil { + h.Log.Debugf("error writing result in handleQuery: %v", err) + } } } @@ -236,7 +203,9 @@ func (h *InfluxDBListener) handlePing() http.HandlerFunc { res.Header().Set("Content-Type", "application/json") res.WriteHeader(http.StatusOK) b, _ := json.Marshal(map[string]string{"version": "1.0"}) // based on header set above - res.Write(b) + if _, err := res.Write(b); err != nil { + h.Log.Debugf("error writing result in handlePing: %v", err) + } } else { res.WriteHeader(http.StatusNoContent) } @@ -252,116 +221,240 @@ func (h *InfluxDBListener) handleDefault() http.HandlerFunc { func (h *InfluxDBListener) handleWrite() http.HandlerFunc { return func(res http.ResponseWriter, req *http.Request) { - defer h.writesServed.Incr(1) - // Check that the content length is not too large for us to handle. - if req.ContentLength > h.MaxBodySize.Size { - tooLarge(res) - return + if h.ParserType == "upstream" { + h.handleWriteUpstreamParser(res, req) + } else { + h.handleWriteInternalParser(res, req) } + } +} + +func (h *InfluxDBListener) handleWriteInternalParser(res http.ResponseWriter, req *http.Request) { + defer h.writesServed.Incr(1) + // Check that the content length is not too large for us to handle. + if req.ContentLength > int64(h.MaxBodySize) { + if err := tooLarge(res); err != nil { + h.Log.Debugf("error in too-large: %v", err) + } + return + } + + db := req.URL.Query().Get("db") + rp := req.URL.Query().Get("rp") - db := req.URL.Query().Get("db") - rp := req.URL.Query().Get("rp") - - body := req.Body - body = http.MaxBytesReader(res, body, h.MaxBodySize.Size) - // Handle gzip request bodies - if req.Header.Get("Content-Encoding") == "gzip" { - var err error - body, err = gzip.NewReader(body) - if err != nil { - h.Log.Debugf("Error decompressing request body: %v", err.Error()) - badRequest(res, err.Error()) - return + body := req.Body + body = http.MaxBytesReader(res, body, int64(h.MaxBodySize)) + // Handle gzip request bodies + if req.Header.Get("Content-Encoding") == "gzip" { + var err error + body, err = gzip.NewReader(body) + if err != nil { + h.Log.Debugf("Error decompressing request body: %v", err.Error()) + if err := badRequest(res, err.Error()); err != nil { + h.Log.Debugf("error in bad-request: %v", err) } - defer body.Close() + return } + defer body.Close() + } - parser := influx.NewStreamParser(body) - parser.SetTimeFunc(h.timeFunc) + parser := influx.NewStreamParser(body) + parser.SetTimeFunc(h.timeFunc) - precisionStr := req.URL.Query().Get("precision") - if precisionStr != "" { - precision := getPrecisionMultiplier(precisionStr) - parser.SetTimePrecision(precision) + precisionStr := req.URL.Query().Get("precision") + if precisionStr != "" { + precision := getPrecisionMultiplier(precisionStr) + parser.SetTimePrecision(precision) + } + + var m telegraf.Metric + var err error + var parseErrorCount int + var lastPos int + var firstParseErrorStr string + for { + select { + case <-req.Context().Done(): + // Shutting down before parsing is finished. + res.WriteHeader(http.StatusServiceUnavailable) + return + default: } - var m telegraf.Metric - var err error - var parseErrorCount int - var lastPos int = 0 - var firstParseErrorStr string - for { - select { - case <-req.Context().Done(): - // Shutting down before parsing is finished. - res.WriteHeader(http.StatusServiceUnavailable) - return - default: + m, err = parser.Next() + pos := parser.Position() + h.bytesRecv.Incr(int64(pos - lastPos)) + lastPos = pos + + // Continue parsing metrics even if some are malformed + if parseErr, ok := err.(*influx.ParseError); ok { + parseErrorCount++ + errStr := parseErr.Error() + if firstParseErrorStr == "" { + firstParseErrorStr = errStr } + continue + } else if err != nil { + // Either we're exiting cleanly (err == + // influx.EOF) or there's an unexpected error + break + } - m, err = parser.Next() - pos := parser.Position() - h.bytesRecv.Incr(int64(pos - lastPos)) - lastPos = pos - - // Continue parsing metrics even if some are malformed - if parseErr, ok := err.(*influx.ParseError); ok { - parseErrorCount += 1 - errStr := parseErr.Error() - if firstParseErrorStr == "" { - firstParseErrorStr = errStr - } - continue - } else if err != nil { - // Either we're exiting cleanly (err == - // influx.EOF) or there's an unexpected error - break - } + if h.DatabaseTag != "" && db != "" { + m.AddTag(h.DatabaseTag, db) + } - if h.DatabaseTag != "" && db != "" { - m.AddTag(h.DatabaseTag, db) - } + if h.RetentionPolicyTag != "" && rp != "" { + m.AddTag(h.RetentionPolicyTag, rp) + } - if h.RetentionPolicyTag != "" && rp != "" { - m.AddTag(h.RetentionPolicyTag, rp) - } + h.acc.AddMetric(m) + } + if err != influx.EOF { + h.Log.Debugf("Error parsing the request body: %v", err.Error()) + if err := badRequest(res, err.Error()); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } + return + } + if parseErrorCount > 0 { + var partialErrorString string + switch parseErrorCount { + case 1: + partialErrorString = firstParseErrorStr + case 2: + partialErrorString = fmt.Sprintf("%s (and 1 other parse error)", firstParseErrorStr) + default: + partialErrorString = fmt.Sprintf("%s (and %d other parse errors)", firstParseErrorStr, parseErrorCount-1) + } + if err := partialWrite(res, partialErrorString); err != nil { + h.Log.Debugf("error in partial-write: %v", err) + } + return + } - h.acc.AddMetric(m) + // http request success + res.WriteHeader(http.StatusNoContent) +} +func (h *InfluxDBListener) handleWriteUpstreamParser(res http.ResponseWriter, req *http.Request) { + defer h.writesServed.Incr(1) + // Check that the content length is not too large for us to handle. + if req.ContentLength > int64(h.MaxBodySize) { + if err := tooLarge(res); err != nil { + h.Log.Debugf("error in too-large: %v", err) } - if err != influx.EOF { - h.Log.Debugf("Error parsing the request body: %v", err.Error()) - badRequest(res, err.Error()) + return + } + + db := req.URL.Query().Get("db") + rp := req.URL.Query().Get("rp") + + body := req.Body + body = http.MaxBytesReader(res, body, int64(h.MaxBodySize)) + // Handle gzip request bodies + if req.Header.Get("Content-Encoding") == "gzip" { + var err error + body, err = gzip.NewReader(body) + if err != nil { + h.Log.Debugf("Error decompressing request body: %v", err.Error()) + if err := badRequest(res, err.Error()); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return } - if parseErrorCount > 0 { - var partialErrorString string - switch parseErrorCount { - case 1: - partialErrorString = fmt.Sprintf("%s", firstParseErrorStr) - case 2: - partialErrorString = fmt.Sprintf("%s (and 1 other parse error)", firstParseErrorStr) - default: - partialErrorString = fmt.Sprintf("%s (and %d other parse errors)", firstParseErrorStr, parseErrorCount-1) - } - partialWrite(res, partialErrorString) + defer body.Close() + } + + parser := influx_upstream.NewStreamParser(body) + parser.SetTimeFunc(influx_upstream.TimeFunc(h.timeFunc)) + + precisionStr := req.URL.Query().Get("precision") + if precisionStr != "" { + precision := getPrecisionMultiplier(precisionStr) + parser.SetTimePrecision(precision) + } + + if req.ContentLength >= 0 { + h.bytesRecv.Incr(req.ContentLength) + } + + var m telegraf.Metric + var err error + var parseErrorCount int + var firstParseErrorStr string + for { + select { + case <-req.Context().Done(): + // Shutting down before parsing is finished. + res.WriteHeader(http.StatusServiceUnavailable) return + default: + } + + m, err = parser.Next() + + // Continue parsing metrics even if some are malformed + if parseErr, ok := err.(*influx_upstream.ParseError); ok { + parseErrorCount++ + errStr := parseErr.Error() + if firstParseErrorStr == "" { + firstParseErrorStr = errStr + } + continue + } else if err != nil { + // Either we're exiting cleanly (err == + // influx.ErrEOF) or there's an unexpected error + break + } + + if h.DatabaseTag != "" && db != "" { + m.AddTag(h.DatabaseTag, db) + } + + if h.RetentionPolicyTag != "" && rp != "" { + m.AddTag(h.RetentionPolicyTag, rp) } - // http request success - res.WriteHeader(http.StatusNoContent) + h.acc.AddMetric(m) + } + if err != influx_upstream.ErrEOF { + h.Log.Debugf("Error parsing the request body: %v", err.Error()) + if err := badRequest(res, err.Error()); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } + return } + if parseErrorCount > 0 { + var partialErrorString string + switch parseErrorCount { + case 1: + partialErrorString = firstParseErrorStr + case 2: + partialErrorString = fmt.Sprintf("%s (and 1 other parse error)", firstParseErrorStr) + default: + partialErrorString = fmt.Sprintf("%s (and %d other parse errors)", firstParseErrorStr, parseErrorCount-1) + } + if err := partialWrite(res, partialErrorString); err != nil { + h.Log.Debugf("error in partial-write: %v", err) + } + return + } + + // http request success + res.WriteHeader(http.StatusNoContent) } -func tooLarge(res http.ResponseWriter) { +func tooLarge(res http.ResponseWriter) error { res.Header().Set("Content-Type", "application/json") res.Header().Set("X-Influxdb-Version", "1.0") res.Header().Set("X-Influxdb-Error", "http: request body too large") res.WriteHeader(http.StatusRequestEntityTooLarge) - res.Write([]byte(`{"error":"http: request body too large"}`)) + _, err := res.Write([]byte(`{"error":"http: request body too large"}`)) + return err } -func badRequest(res http.ResponseWriter, errString string) { +func badRequest(res http.ResponseWriter, errString string) error { res.Header().Set("Content-Type", "application/json") res.Header().Set("X-Influxdb-Version", "1.0") if errString == "" { @@ -369,15 +462,17 @@ func badRequest(res http.ResponseWriter, errString string) { } res.Header().Set("X-Influxdb-Error", errString) res.WriteHeader(http.StatusBadRequest) - res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString))) + _, err := res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString))) + return err } -func partialWrite(res http.ResponseWriter, errString string) { +func partialWrite(res http.ResponseWriter, errString string) error { res.Header().Set("Content-Type", "application/json") res.Header().Set("X-Influxdb-Version", "1.0") res.Header().Set("X-Influxdb-Error", errString) res.WriteHeader(http.StatusBadRequest) - res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString))) + _, err := res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString))) + return err } func getPrecisionMultiplier(precision string) time.Duration { diff --git a/plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go b/plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go index d3dc552192007..f0bfc695c98a3 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/selfstat" "github.com/influxdata/telegraf/testutil" ) @@ -20,9 +20,7 @@ func newListener() *InfluxDBListener { acc: &testutil.NopAccumulator{}, bytesRecv: selfstat.Register("influxdb_listener", "bytes_received", map[string]string{}), writesServed: selfstat.Register("influxdb_listener", "writes_served", map[string]string{}), - MaxBodySize: internal.Size{ - Size: defaultMaxBodySize, - }, + MaxBodySize: config.Size(defaultMaxBodySize), } return listener } diff --git a/plugins/inputs/influxdb_listener/influxdb_listener_test.go b/plugins/inputs/influxdb_listener/influxdb_listener_test.go index 5c934e371bfc7..f60c69fe083cd 100644 --- a/plugins/inputs/influxdb_listener/influxdb_listener_test.go +++ b/plugins/inputs/influxdb_listener/influxdb_listener_test.go @@ -4,19 +4,21 @@ import ( "bytes" "crypto/tls" "crypto/x509" - "io/ioutil" + "fmt" "net/http" "net/url" + "os" "runtime" "strconv" "sync" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) const ( @@ -43,7 +45,13 @@ cpu,host=c value1=1` ) var ( - pki = testutil.NewPKI("../../../testutil/pki") + pki = testutil.NewPKI("../../../testutil/pki") + parserTestCases = []struct { + parser string + }{ + {"upstream"}, + {"internal"}, + } ) func newTestListener() *InfluxDBListener { @@ -117,7 +125,7 @@ func TestWriteSecureNoClientAuth(t *testing.T) { // post single message to listener resp, err := noClientAuthClient.Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -132,7 +140,7 @@ func TestWriteSecureWithClientAuth(t *testing.T) { // post single message to listener resp, err := getSecureClient().Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -151,59 +159,64 @@ func TestWriteBasicAuth(t *testing.T) { req.SetBasicAuth(basicUsername, basicPassword) resp, err := client.Do(req) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, http.StatusNoContent, resp.StatusCode) } func TestWriteKeepDatabase(t *testing.T) { testMsgWithDB := "cpu_load_short,host=server01,database=wrongdb value=12.0 1422568543702900257\n" - listener := newTestListener() - listener.DatabaseTag = "database" - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := newTestListener() + listener.ParserType = tc.parser + listener.DatabaseTag = "database" - // post single message to listener - resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() - acc.Wait(1) - acc.AssertContainsTaggedFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(12)}, - map[string]string{"host": "server01", "database": "mydb"}, - ) + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) - // post single message to listener with a database tag in it already. It should be clobbered. - resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgWithDB))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01", "database": "mydb"}, + ) - acc.Wait(1) - acc.AssertContainsTaggedFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(12)}, - map[string]string{"host": "server01", "database": "mydb"}, - ) + // post single message to listener with a database tag in it already. It should be clobbered. + resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgWithDB))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) - // post multiple message to listener - resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01", "database": "mydb"}, + ) - acc.Wait(2) - hostTags := []string{"server02", "server03", - "server04", "server05", "server06"} - for _, hostTag := range hostTags { - acc.AssertContainsTaggedFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(12)}, - map[string]string{"host": hostTag, "database": "mydb"}, - ) + // post multiple message to listener + resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(2) + hostTags := []string{"server02", "server03", + "server04", "server05", "server06"} + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag, "database": "mydb"}, + ) + } + }) } } @@ -218,7 +231,7 @@ func TestWriteRetentionPolicyTag(t *testing.T) { resp, err := http.Post(createURL(listener, "http", "/write", "rp=myrp"), "", bytes.NewBuffer([]byte("cpu time_idle=42"))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.Equal(t, 204, resp.StatusCode) expected := []telegraf.Metric{ @@ -240,192 +253,227 @@ func TestWriteRetentionPolicyTag(t *testing.T) { // http listener should add a newline at the end of the buffer if it's not there func TestWriteNoNewline(t *testing.T) { - listener := newTestListener() - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := newTestListener() + listener.ParserType = tc.parser - // post single message to listener - resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() - acc.Wait(1) - acc.AssertContainsTaggedFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(12)}, - map[string]string{"host": "server01"}, - ) + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01"}, + ) + }) + } } func TestPartialWrite(t *testing.T) { - listener := newTestListener() + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := newTestListener() + listener.ParserType = tc.parser - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() - // post single message to listener - resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testPartial))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 400, resp.StatusCode) + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testPartial))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 400, resp.StatusCode) - acc.Wait(1) - acc.AssertContainsTaggedFields(t, "cpu", - map[string]interface{}{"value1": float64(1)}, - map[string]string{"host": "a"}, - ) - acc.AssertContainsTaggedFields(t, "cpu", - map[string]interface{}{"value1": float64(1)}, - map[string]string{"host": "c"}, - ) + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu", + map[string]interface{}{"value1": float64(1)}, + map[string]string{"host": "a"}, + ) + acc.AssertContainsTaggedFields(t, "cpu", + map[string]interface{}{"value1": float64(1)}, + map[string]string{"host": "c"}, + ) + }) + } } func TestWriteMaxLineSizeIncrease(t *testing.T) { - listener := &InfluxDBListener{ - Log: testutil.Logger{}, - ServiceAddress: "localhost:0", - timeFunc: time.Now, - } + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := &InfluxDBListener{ + Log: testutil.Logger{}, + ServiceAddress: "localhost:0", + ParserType: tc.parser, + timeFunc: time.Now, + } - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() - // Post a gigantic metric to the listener and verify that it writes OK this time: - resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) + // Post a gigantic metric to the listener and verify that it writes OK this time: + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) + }) + } } func TestWriteVerySmallMaxBody(t *testing.T) { - listener := &InfluxDBListener{ - Log: testutil.Logger{}, - ServiceAddress: "localhost:0", - MaxBodySize: internal.Size{Size: 4096}, - timeFunc: time.Now, - } + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := &InfluxDBListener{ + Log: testutil.Logger{}, + ServiceAddress: "localhost:0", + MaxBodySize: config.Size(4096), + ParserType: tc.parser, + timeFunc: time.Now, + } - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() - resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 413, resp.StatusCode) + for _, parser := range []string{"internal", "upstream"} { + listener.ParserType = parser + + resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 413, resp.StatusCode) + } + }) + } } func TestWriteLargeLine(t *testing.T) { - listener := &InfluxDBListener{ - Log: testutil.Logger{}, - ServiceAddress: "localhost:0", - timeFunc: func() time.Time { - return time.Unix(123456789, 0) - }, - } + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := &InfluxDBListener{ + Log: testutil.Logger{}, + ServiceAddress: "localhost:0", + ParserType: tc.parser, + timeFunc: func() time.Time { + return time.Unix(123456789, 0) + }, + } - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() - resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs))) - require.NoError(t, err) - resp.Body.Close() - //todo: with the new parser, long lines aren't a problem. Do we need to skip them? - //require.EqualValues(t, 400, resp.StatusCode) - - expected := testutil.MustMetric( - "super_long_metric", - map[string]string{"foo": "bar"}, - map[string]interface{}{ - "clients": 42, - "connected_followers": 43, - "evicted_keys": 44, - "expired_keys": 45, - "instantaneous_ops_per_sec": 46, - "keyspace_hitrate": 47.0, - "keyspace_hits": 48, - "keyspace_misses": 49, - "latest_fork_usec": 50, - "master_repl_offset": 51, - "mem_fragmentation_ratio": 52.58, - "pubsub_channels": 53, - "pubsub_patterns": 54, - "rdb_changes_since_last_save": 55, - "repl_backlog_active": 56, - "repl_backlog_histlen": 57, - "repl_backlog_size": 58, - "sync_full": 59, - "sync_partial_err": 60, - "sync_partial_ok": 61, - "total_commands_processed": 62, - "total_connections_received": 63, - "uptime": 64, - "used_cpu_sys": 65.07, - "used_cpu_sys_children": 66.0, - "used_cpu_user": 67.1, - "used_cpu_user_children": 68.0, - "used_memory": 692048, - "used_memory_lua": 70792, - "used_memory_peak": 711128, - "used_memory_rss": 7298144, - }, - time.Unix(123456789, 0), - ) - - m, ok := acc.Get("super_long_metric") - require.True(t, ok) - testutil.RequireMetricEqual(t, expected, testutil.FromTestMetric(m)) - - hostTags := []string{"server02", "server03", - "server04", "server05", "server06"} - acc.Wait(len(hostTags)) - for _, hostTag := range hostTags { - acc.AssertContainsTaggedFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(12)}, - map[string]string{"host": hostTag}, - ) + resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + //todo: with the new parser, long lines aren't a problem. Do we need to skip them? + //require.EqualValues(t, 400, resp.StatusCode) + + expected := testutil.MustMetric( + "super_long_metric", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "clients": 42, + "connected_followers": 43, + "evicted_keys": 44, + "expired_keys": 45, + "instantaneous_ops_per_sec": 46, + "keyspace_hitrate": 47.0, + "keyspace_hits": 48, + "keyspace_misses": 49, + "latest_fork_usec": 50, + "master_repl_offset": 51, + "mem_fragmentation_ratio": 52.58, + "pubsub_channels": 53, + "pubsub_patterns": 54, + "rdb_changes_since_last_save": 55, + "repl_backlog_active": 56, + "repl_backlog_histlen": 57, + "repl_backlog_size": 58, + "sync_full": 59, + "sync_partial_err": 60, + "sync_partial_ok": 61, + "total_commands_processed": 62, + "total_connections_received": 63, + "uptime": 64, + "used_cpu_sys": 65.07, + "used_cpu_sys_children": 66.0, + "used_cpu_user": 67.1, + "used_cpu_user_children": 68.0, + "used_memory": 692048, + "used_memory_lua": 70792, + "used_memory_peak": 711128, + "used_memory_rss": 7298144, + }, + time.Unix(123456789, 0), + ) + + m, ok := acc.Get("super_long_metric") + require.True(t, ok) + testutil.RequireMetricEqual(t, expected, testutil.FromTestMetric(m)) + + hostTags := []string{"server02", "server03", + "server04", "server05", "server06"} + acc.Wait(len(hostTags)) + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag}, + ) + } + }) } } // test that writing gzipped data works func TestWriteGzippedData(t *testing.T) { - listener := newTestListener() - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := newTestListener() + listener.ParserType = tc.parser - data, err := ioutil.ReadFile("./testdata/testmsgs.gz") - require.NoError(t, err) + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() - req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(data)) - require.NoError(t, err) - req.Header.Set("Content-Encoding", "gzip") + data, err := os.ReadFile("./testdata/testmsgs.gz") + require.NoError(t, err) - client := &http.Client{} - resp, err := client.Do(req) - require.NoError(t, err) - require.EqualValues(t, 204, resp.StatusCode) + req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(data)) + require.NoError(t, err) + req.Header.Set("Content-Encoding", "gzip") - hostTags := []string{"server02", "server03", - "server04", "server05", "server06"} - acc.Wait(len(hostTags)) - for _, hostTag := range hostTags { - acc.AssertContainsTaggedFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(12)}, - map[string]string{"host": hostTag}, - ) + client := &http.Client{} + resp, err := client.Do(req) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) + + hostTags := []string{"server02", "server03", + "server04", "server05", "server06"} + acc.Wait(len(hostTags)) + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag}, + ) + } + }) } } @@ -434,6 +482,10 @@ func TestWriteHighTraffic(t *testing.T) { if runtime.GOOS == "darwin" { t.Skip("Skipping due to hang on darwin") } + // resource intensive, large test + if testing.Short() { + t.Skip("Skipping long test in short mode") + } listener := newTestListener() acc := &testutil.Accumulator{} @@ -449,158 +501,249 @@ func TestWriteHighTraffic(t *testing.T) { defer innerwg.Done() for i := 0; i < 500; i++ { resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) + if err != nil { + return + } + if err := resp.Body.Close(); err != nil { + return + } + if resp.StatusCode != 204 { + return + } } }(&wg) } wg.Wait() - listener.Gather(acc) + require.NoError(t, listener.Gather(acc)) acc.Wait(25000) require.Equal(t, int64(25000), int64(acc.NMetrics())) } func TestReceive404ForInvalidEndpoint(t *testing.T) { - listener := newTestListener() + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := newTestListener() + listener.ParserType = tc.parser - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() - // post single message to listener - resp, err := http.Post(createURL(listener, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 404, resp.StatusCode) + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 404, resp.StatusCode) + }) + } } func TestWriteInvalid(t *testing.T) { - listener := newTestListener() + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := newTestListener() + listener.ParserType = tc.parser - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() - // post single message to listener - resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(badMsg))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 400, resp.StatusCode) + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(badMsg))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 400, resp.StatusCode) + }) + } } func TestWriteEmpty(t *testing.T) { - listener := newTestListener() + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := newTestListener() + listener.ParserType = tc.parser - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() - // post single message to listener - resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(emptyMsg))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(emptyMsg))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) + }) + } } func TestQuery(t *testing.T) { - listener := newTestListener() + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := newTestListener() + listener.ParserType = tc.parser - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() - // post query to listener - resp, err := http.Post( - createURL(listener, "http", "/query", "db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22"), "", nil) - require.NoError(t, err) - require.EqualValues(t, 200, resp.StatusCode) + // post query to listener + resp, err := http.Post( + createURL(listener, "http", "/query", "db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22"), "", nil) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 200, resp.StatusCode) + }) + } } func TestPing(t *testing.T) { - listener := newTestListener() - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := newTestListener() + listener.ParserType = tc.parser - // post ping to listener - resp, err := http.Post(createURL(listener, "http", "/ping", ""), "", nil) - require.NoError(t, err) - require.Equal(t, "1.0", resp.Header["X-Influxdb-Version"][0]) - require.Len(t, resp.Header["Content-Type"], 0) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post ping to listener + resp, err := http.Post(createURL(listener, "http", "/ping", ""), "", nil) + require.NoError(t, err) + require.Equal(t, "1.0", resp.Header["X-Influxdb-Version"][0]) + require.Len(t, resp.Header["Content-Type"], 0) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) + }) + } } func TestPingVerbose(t *testing.T) { - listener := newTestListener() - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := newTestListener() + listener.ParserType = tc.parser - // post ping to listener - resp, err := http.Post(createURL(listener, "http", "/ping", "verbose=1"), "", nil) - require.NoError(t, err) - require.Equal(t, "1.0", resp.Header["X-Influxdb-Version"][0]) - require.Equal(t, "application/json", resp.Header["Content-Type"][0]) - resp.Body.Close() - require.EqualValues(t, 200, resp.StatusCode) + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post ping to listener + resp, err := http.Post(createURL(listener, "http", "/ping", "verbose=1"), "", nil) + require.NoError(t, err) + require.Equal(t, "1.0", resp.Header["X-Influxdb-Version"][0]) + require.Equal(t, "application/json", resp.Header["Content-Type"][0]) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 200, resp.StatusCode) + }) + } } func TestWriteWithPrecision(t *testing.T) { - listener := newTestListener() - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := newTestListener() + listener.ParserType = tc.parser - msg := "xyzzy value=42 1422568543\n" - resp, err := http.Post( - createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() - acc.Wait(1) - require.Equal(t, 1, len(acc.Metrics)) - // When timestamp is provided, the precision parameter is - // overloaded to specify the timestamp's unit - require.Equal(t, time.Unix(0, 1422568543000000000), acc.Metrics[0].Time) + msg := "xyzzy value=42 1422568543\n" + resp, err := http.Post( + createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + require.Equal(t, 1, len(acc.Metrics)) + // When timestamp is provided, the precision parameter is + // overloaded to specify the timestamp's unit + require.Equal(t, time.Unix(0, 1422568543000000000), acc.Metrics[0].Time) + }) + } } func TestWriteWithPrecisionNoTimestamp(t *testing.T) { - listener := newTestListener() - listener.timeFunc = func() time.Time { - return time.Unix(42, 123456789) + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := newTestListener() + listener.ParserType = tc.parser + listener.timeFunc = func() time.Time { + return time.Unix(42, 123456789) + } + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + msg := "xyzzy value=42\n" + resp, err := http.Post( + createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + require.Equal(t, 1, len(acc.Metrics)) + // When timestamp is omitted, the precision parameter actually + // specifies the precision. The timestamp is set to the greatest + // integer unit less than the provided timestamp (floor). + require.Equal(t, time.Unix(42, 0), acc.Metrics[0].Time) + }) } +} - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() +func TestWriteUpstreamParseErrors(t *testing.T) { + var tests = []struct { + name string + input string + expected string + }{ + { + name: "one parse error", + input: "foo value=1.0\nfoo value=2asdf2.0\nfoo value=3.0\nfoo value=4.0", + expected: `metric parse error: cannot parse value for field key "value": invalid float value syntax at 2:11`, + }, + { + name: "two parse errors", + input: "foo value=1asdf2.0\nfoo value=2.0\nfoo value=3asdf2.0\nfoo value=4.0", + expected: `metric parse error: cannot parse value for field key "value": invalid float value syntax at 1:11 (and 1 other parse error)`, + }, + { + name: "three or more parse errors", + input: "foo value=1asdf2.0\nfoo value=2.0\nfoo value=3asdf2.0\nfoo value=4asdf2.0", + expected: `metric parse error: cannot parse value for field key "value": invalid float value syntax at 1:11 (and 2 other parse errors)`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + listener := newTestListener() + listener.ParserType = "upstream" - msg := "xyzzy value=42\n" - resp, err := http.Post( - createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) + acc := &testutil.NopAccumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() - acc.Wait(1) - require.Equal(t, 1, len(acc.Metrics)) - // When timestamp is omitted, the precision parameter actually - // specifies the precision. The timestamp is set to the greatest - // integer unit less than the provided timestamp (floor). - require.Equal(t, time.Unix(42, 0), acc.Metrics[0].Time) + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(tt.input))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 400, resp.StatusCode) + require.Equal(t, tt.expected, resp.Header["X-Influxdb-Error"][0]) + }) + } } func TestWriteParseErrors(t *testing.T) { @@ -638,7 +781,7 @@ func TestWriteParseErrors(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(tt.input))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 400, resp.StatusCode) require.Equal(t, tt.expected, resp.Header["X-Influxdb-Error"][0]) }) diff --git a/plugins/inputs/influxdb_listener/sample.conf b/plugins/inputs/influxdb_listener/sample.conf new file mode 100644 index 0000000000000..f8d4aa8e3c94c --- /dev/null +++ b/plugins/inputs/influxdb_listener/sample.conf @@ -0,0 +1,47 @@ +# Accept metrics over InfluxDB 1.x HTTP API +[[inputs.influxdb_listener]] + ## Address and port to host HTTP listener on + service_address = ":8186" + + ## maximum duration before timing out read of the request + read_timeout = "10s" + ## maximum duration before timing out write of the response + write_timeout = "10s" + + ## Maximum allowed HTTP request body size in bytes. + ## 0 means to use the default of 32MiB. + max_body_size = 0 + + ## Maximum line size allowed to be sent in bytes. + ## deprecated in 1.14; parser now handles lines of unlimited length and option is ignored + # max_line_size = 0 + + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Add service certificate and key + tls_cert = "/etc/telegraf/cert.pem" + tls_key = "/etc/telegraf/key.pem" + + ## Optional tag name used to store the database name. + ## If the write has a database in the query string then it will be kept in this tag name. + ## This tag can be used in downstream outputs. + ## The default value of nothing means it will be off and the database will not be recorded. + ## If you have a tag that is the same as the one specified below, and supply a database, + ## the tag will be overwritten with the database supplied. + # database_tag = "" + + ## If set the retention policy specified in the write query will be added as + ## the value of this tag name. + # retention_policy_tag = "" + + ## Optional username and password to accept for HTTP basic authentication. + ## You probably want to make sure you have TLS configured above for this. + # basic_username = "foobar" + # basic_password = "barfoo" + + ## Influx line protocol parser + ## 'internal' is the default. 'upstream' is a newer parser that is faster + ## and more memory efficient. + # parser_type = "internal" diff --git a/plugins/inputs/influxdb_v2_listener/README.md b/plugins/inputs/influxdb_v2_listener/README.md index 4258e021d85fd..0da74db360736 100644 --- a/plugins/inputs/influxdb_v2_listener/README.md +++ b/plugins/inputs/influxdb_v2_listener/README.md @@ -5,15 +5,16 @@ according to the [InfluxDB HTTP API][influxdb_http_api]. The intent of the plugin is to allow Telegraf to serve as a proxy/router for the `/api/v2/write` endpoint of the InfluxDB HTTP API. -The `/api/v2/write` endpoint supports the `precision` query parameter and can be set -to one of `ns`, `us`, `ms`, `s`. All other parameters are ignored and -defer to the output plugins configuration. +The `/api/v2/write` endpoint supports the `precision` query parameter and can be +set to one of `ns`, `us`, `ms`, `s`. All other parameters are ignored and defer +to the output plugins configuration. Telegraf minimum version: Telegraf 1.16.0 -### Configuration: +## Configuration -```toml +```toml @sample.conf +# Accept metrics over InfluxDB 2.x HTTP API [[inputs.influxdb_v2_listener]] ## Address and port to host InfluxDB listener on ## (Double check the port. Could be 9999 if using OSS Beta) @@ -40,17 +41,23 @@ Telegraf minimum version: Telegraf 1.16.0 ## Optional token to accept for HTTP authentication. ## You probably want to make sure you have TLS configured above for this. # token = "some-long-shared-secret-token" + + ## Influx line protocol parser + ## 'internal' is the default. 'upstream' is a newer parser that is faster + ## and more memory efficient. + # parser_type = "internal" ``` -### Metrics: +## Metrics Metrics are created from InfluxDB Line Protocol in the request body. -### Troubleshooting: +## Troubleshooting **Example Query:** -``` + +```sh curl -i -XPOST 'http://localhost:8186/api/v2/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' ``` -[influxdb_http_api]: https://v2.docs.influxdata.com/v2.0/api/ +[influxdb_http_api]: https://docs.influxdata.com/influxdb/latest/api/ diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go index 30c449f7dd910..33374c8df3a9b 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener.go @@ -1,30 +1,41 @@ +//go:generate ../../../tools/readme_config_includer/generator package influxdb_v2_listener import ( "compress/gzip" "context" "crypto/tls" + _ "embed" "encoding/json" + "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers/influx" + "github.com/influxdata/telegraf/plugins/parsers/influx/influx_upstream" "github.com/influxdata/telegraf/selfstat" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const ( // defaultMaxBodySize is the default maximum request body size, in bytes. // if the request body is over this size, we will return an HTTP 413 error. defaultMaxBodySize = 32 * 1024 * 1024 ) +var ErrEOF = errors.New("EOF") + // The BadRequestCode constants keep standard error messages // see: https://v2.docs.influxdata.com/v2.0/api/#operation/PostWrite type BadRequestCode string @@ -39,9 +50,10 @@ type InfluxDBV2Listener struct { port int tlsint.ServerConfig - MaxBodySize internal.Size `toml:"max_body_size"` - Token string `toml:"token"` - BucketTag string `toml:"bucket_tag"` + MaxBodySize config.Size `toml:"max_body_size"` + Token string `toml:"token"` + BucketTag string `toml:"bucket_tag"` + ParserType string `toml:"parser_type"` timeFunc influx.TimeFunc @@ -65,42 +77,10 @@ type InfluxDBV2Listener struct { mux http.ServeMux } -const sampleConfig = ` - ## Address and port to host InfluxDB listener on - ## (Double check the port. Could be 9999 if using OSS Beta) - service_address = ":8086" - - ## Maximum allowed HTTP request body size in bytes. - ## 0 means to use the default of 32MiB. - # max_body_size = "32MiB" - - ## Optional tag to determine the bucket. - ## If the write has a bucket in the query string then it will be kept in this tag name. - ## This tag can be used in downstream outputs. - ## The default value of nothing means it will be off and the database will not be recorded. - # bucket_tag = "" - - ## Set one or more allowed client CA certificate file names to - ## enable mutually authenticated TLS connections - # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - - ## Add service certificate and key - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - - ## Optional token to accept for HTTP authentication. - ## You probably want to make sure you have TLS configured above for this. - # token = "some-long-shared-secret-token" -` - -func (h *InfluxDBV2Listener) SampleConfig() string { +func (*InfluxDBV2Listener) SampleConfig() string { return sampleConfig } -func (h *InfluxDBV2Listener) Description() string { - return "Accept metrics over InfluxDB 2.x HTTP API" -} - func (h *InfluxDBV2Listener) Gather(_ telegraf.Accumulator) error { return nil } @@ -134,8 +114,8 @@ func (h *InfluxDBV2Listener) Init() error { h.authFailures = selfstat.Register("influxdb_v2_listener", "auth_failures", tags) h.routes() - if h.MaxBodySize.Size == 0 { - h.MaxBodySize.Size = defaultMaxBodySize + if h.MaxBodySize == 0 { + h.MaxBodySize = config.Size(defaultMaxBodySize) } return nil @@ -210,7 +190,9 @@ func (h *InfluxDBV2Listener) handleReady() http.HandlerFunc { "started": h.startTime.Format(time.RFC3339Nano), "status": "ready", "up": h.timeFunc().Sub(h.startTime).String()}) - res.Write(b) + if _, err := res.Write(b); err != nil { + h.Log.Debugf("error writing in handle-ready: %v", err) + } } } @@ -225,22 +207,26 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc { return func(res http.ResponseWriter, req *http.Request) { defer h.writesServed.Incr(1) // Check that the content length is not too large for us to handle. - if req.ContentLength > h.MaxBodySize.Size { - tooLarge(res, h.MaxBodySize.Size) + if req.ContentLength > int64(h.MaxBodySize) { + if err := tooLarge(res, int64(h.MaxBodySize)); err != nil { + h.Log.Debugf("error in too-large: %v", err) + } return } bucket := req.URL.Query().Get("bucket") body := req.Body - body = http.MaxBytesReader(res, body, h.MaxBodySize.Size) + body = http.MaxBytesReader(res, body, int64(h.MaxBodySize)) // Handle gzip request bodies if req.Header.Get("Content-Encoding") == "gzip" { var err error body, err = gzip.NewReader(body) if err != nil { h.Log.Debugf("Error decompressing request body: %v", err.Error()) - badRequest(res, Invalid, err.Error()) + if err := badRequest(res, Invalid, err.Error()); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return } defer body.Close() @@ -249,30 +235,47 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc { var readErr error var bytes []byte //body = http.MaxBytesReader(res, req.Body, 1000000) //p.MaxBodySize.Size) - bytes, readErr = ioutil.ReadAll(body) + bytes, readErr = io.ReadAll(body) if readErr != nil { h.Log.Debugf("Error parsing the request body: %v", readErr.Error()) - badRequest(res, InternalError, readErr.Error()) + if err := badRequest(res, InternalError, readErr.Error()); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return } - metricHandler := influx.NewMetricHandler() - parser := influx.NewParser(metricHandler) - parser.SetTimeFunc(h.timeFunc) precisionStr := req.URL.Query().Get("precision") - if precisionStr != "" { - precision := getPrecisionMultiplier(precisionStr) - metricHandler.SetTimePrecision(precision) - } var metrics []telegraf.Metric var err error + if h.ParserType == "upstream" { + parser := influx_upstream.NewParser() + parser.SetTimeFunc(influx_upstream.TimeFunc(h.timeFunc)) + + if precisionStr != "" { + precision := getPrecisionMultiplier(precisionStr) + parser.SetTimePrecision(precision) + } - metrics, err = parser.Parse(bytes) + metrics, err = parser.Parse(bytes) + } else { + metricHandler := influx.NewMetricHandler() + parser := influx.NewParser(metricHandler) + parser.SetTimeFunc(h.timeFunc) - if err != influx.EOF && err != nil { + if precisionStr != "" { + precision := getPrecisionMultiplier(precisionStr) + metricHandler.SetTimePrecision(precision) + } + + metrics, err = parser.Parse(bytes) + } + + if err != ErrEOF && err != nil { h.Log.Debugf("Error parsing the request body: %v", err.Error()) - badRequest(res, Invalid, err.Error()) + if err := badRequest(res, Invalid, err.Error()); err != nil { + h.Log.Debugf("error in bad-request: %v", err) + } return } @@ -290,7 +293,7 @@ func (h *InfluxDBV2Listener) handleWrite() http.HandlerFunc { } } -func tooLarge(res http.ResponseWriter, maxLength int64) { +func tooLarge(res http.ResponseWriter, maxLength int64) error { res.Header().Set("Content-Type", "application/json") res.Header().Set("X-Influxdb-Error", "http: request body too large") res.WriteHeader(http.StatusRequestEntityTooLarge) @@ -298,10 +301,11 @@ func tooLarge(res http.ResponseWriter, maxLength int64) { "code": fmt.Sprint(Invalid), "message": "http: request body too large", "maxLength": fmt.Sprint(maxLength)}) - res.Write(b) + _, err := res.Write(b) + return err } -func badRequest(res http.ResponseWriter, code BadRequestCode, errString string) { +func badRequest(res http.ResponseWriter, code BadRequestCode, errString string) error { res.Header().Set("Content-Type", "application/json") if errString == "" { errString = "http: bad request" @@ -314,7 +318,8 @@ func badRequest(res http.ResponseWriter, code BadRequestCode, errString string) "op": "", "err": errString, }) - res.Write(b) + _, err := res.Write(b) + return err } func getPrecisionMultiplier(precision string) time.Duration { diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_benchmark_test.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_benchmark_test.go index e1e2c7090b359..219d59a93863e 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_benchmark_test.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_benchmark_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/selfstat" "github.com/influxdata/telegraf/testutil" ) @@ -20,9 +20,7 @@ func newListener() *InfluxDBV2Listener { acc: &testutil.NopAccumulator{}, bytesRecv: selfstat.Register("influxdb_v2_listener", "bytes_received", map[string]string{}), writesServed: selfstat.Register("influxdb_v2_listener", "writes_served", map[string]string{}), - MaxBodySize: internal.Size{ - Size: defaultMaxBodySize, - }, + MaxBodySize: config.Size(defaultMaxBodySize), } return listener } diff --git a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go index 2a80bb4d351e6..7e2e2421bdc9e 100644 --- a/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go +++ b/plugins/inputs/influxdb_v2_listener/influxdb_v2_listener_test.go @@ -5,18 +5,20 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" + "io" "net/http" "net/url" + "os" "runtime" "strconv" "sync" "testing" "time" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" ) const ( @@ -42,7 +44,13 @@ cpu,host=c value1=1` ) var ( - pki = testutil.NewPKI("../../../testutil/pki") + pki = testutil.NewPKI("../../../testutil/pki") + parserTestCases = []struct { + parser string + }{ + {"upstream"}, + {"internal"}, + } ) func newTestListener() *InfluxDBV2Listener { @@ -115,7 +123,7 @@ func TestWriteSecureNoClientAuth(t *testing.T) { // post single message to listener resp, err := noClientAuthClient.Post(createURL(listener, "https", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -130,7 +138,7 @@ func TestWriteSecureWithClientAuth(t *testing.T) { // post single message to listener resp, err := getSecureClient().Post(createURL(listener, "https", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) } @@ -149,7 +157,7 @@ func TestWriteTokenAuth(t *testing.T) { req.Header.Set("Authorization", fmt.Sprintf("Token %s", token)) resp, err := client.Do(req) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, http.StatusNoContent, resp.StatusCode) } @@ -167,7 +175,7 @@ func TestWriteKeepBucket(t *testing.T) { // post single message to listener resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -179,7 +187,7 @@ func TestWriteKeepBucket(t *testing.T) { // post single message to listener with a database tag in it already. It should be clobbered. resp, err = http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsgWithDB))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) @@ -191,7 +199,7 @@ func TestWriteKeepBucket(t *testing.T) { // post multiple message to listener resp, err = http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsgs))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(2) @@ -207,77 +215,96 @@ func TestWriteKeepBucket(t *testing.T) { // http listener should add a newline at the end of the buffer if it's not there func TestWriteNoNewline(t *testing.T) { - listener := newTestListener() - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - // post single message to listener - resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) - - acc.Wait(1) - acc.AssertContainsTaggedFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(12)}, - map[string]string{"host": "server01"}, - ) + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := newTestListener() + listener.ParserType = tc.parser + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01"}, + ) + }) + } } func TestAllOrNothing(t *testing.T) { - listener := newTestListener() - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - // post single message to listener - resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testPartial))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 400, resp.StatusCode) + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := newTestListener() + listener.ParserType = tc.parser + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testPartial))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 400, resp.StatusCode) + }) + } } func TestWriteMaxLineSizeIncrease(t *testing.T) { - listener := &InfluxDBV2Listener{ - Log: testutil.Logger{}, - ServiceAddress: "localhost:0", - timeFunc: time.Now, - } - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := &InfluxDBV2Listener{ + Log: testutil.Logger{}, + ServiceAddress: "localhost:0", + timeFunc: time.Now, + ParserType: tc.parser, + } - // Post a gigantic metric to the listener and verify that it writes OK this time: - resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(hugeMetric))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // Post a gigantic metric to the listener and verify that it writes OK this time: + resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(hugeMetric))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) + }) + } } func TestWriteVerySmallMaxBody(t *testing.T) { - listener := &InfluxDBV2Listener{ - Log: testutil.Logger{}, - ServiceAddress: "localhost:0", - MaxBodySize: internal.Size{Size: 4096}, - timeFunc: time.Now, - } + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := &InfluxDBV2Listener{ + Log: testutil.Logger{}, + ServiceAddress: "localhost:0", + MaxBodySize: config.Size(4096), + timeFunc: time.Now, + ParserType: tc.parser, + } - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() - resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(hugeMetric))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 413, resp.StatusCode) + resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(hugeMetric))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 413, resp.StatusCode) + }) + } } func TestWriteLargeLine(t *testing.T) { @@ -296,7 +323,7 @@ func TestWriteLargeLine(t *testing.T) { resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) //todo: with the new parser, long lines aren't a problem. Do we need to skip them? //require.EqualValues(t, 400, resp.StatusCode) @@ -363,7 +390,7 @@ func TestWriteGzippedData(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - data, err := ioutil.ReadFile("./testdata/testmsgs.gz") + data, err := os.ReadFile("./testdata/testmsgs.gz") require.NoError(t, err) req, err := http.NewRequest("POST", createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), bytes.NewBuffer(data)) @@ -373,6 +400,7 @@ func TestWriteGzippedData(t *testing.T) { client := &http.Client{} resp, err := client.Do(req) require.NoError(t, err) + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) hostTags := []string{"server02", "server03", @@ -406,63 +434,84 @@ func TestWriteHighTraffic(t *testing.T) { defer innerwg.Done() for i := 0; i < 500; i++ { resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(testMsgs))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) + if err != nil { + return + } + if err := resp.Body.Close(); err != nil { + return + } + if resp.StatusCode != 204 { + return + } } }(&wg) } wg.Wait() - listener.Gather(acc) + require.NoError(t, listener.Gather(acc)) acc.Wait(25000) require.Equal(t, int64(25000), int64(acc.NMetrics())) } func TestReceive404ForInvalidEndpoint(t *testing.T) { - listener := newTestListener() - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - // post single message to listener - resp, err := http.Post(createURL(listener, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 404, resp.StatusCode) + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := newTestListener() + listener.ParserType = tc.parser + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 404, resp.StatusCode) + }) + } } func TestWriteInvalid(t *testing.T) { - listener := newTestListener() - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - // post single message to listener - resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(badMsg))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 400, resp.StatusCode) + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := newTestListener() + listener.ParserType = tc.parser + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(badMsg))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 400, resp.StatusCode) + }) + } } func TestWriteEmpty(t *testing.T) { - listener := newTestListener() - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - // post single message to listener - resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(emptyMsg))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := newTestListener() + listener.ParserType = tc.parser + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/api/v2/write", "bucket=mybucket"), "", bytes.NewBuffer([]byte(emptyMsg))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) + }) + } } func TestReady(t *testing.T) { @@ -479,33 +528,37 @@ func TestReady(t *testing.T) { resp, err := http.Get(createURL(listener, "http", "/api/v2/ready", "")) require.NoError(t, err) require.Equal(t, "application/json", resp.Header["Content-Type"][0]) - bodyBytes, err := ioutil.ReadAll(resp.Body) + bodyBytes, err := io.ReadAll(resp.Body) require.NoError(t, err) require.Contains(t, string(bodyBytes), "\"status\":\"ready\"") - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 200, resp.StatusCode) } func TestWriteWithPrecision(t *testing.T) { - listener := newTestListener() - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Init()) - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - msg := "xyzzy value=42 1422568543\n" - resp, err := http.Post( - createURL(listener, "http", "/api/v2/write", "bucket=mybucket&precision=s"), "", bytes.NewBuffer([]byte(msg))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) - - acc.Wait(1) - require.Equal(t, 1, len(acc.Metrics)) - // When timestamp is provided, the precision parameter is - // overloaded to specify the timestamp's unit - require.Equal(t, time.Unix(0, 1422568543000000000), acc.Metrics[0].Time) + for _, tc := range parserTestCases { + t.Run(fmt.Sprintf("parser %s", tc.parser), func(t *testing.T) { + listener := newTestListener() + listener.ParserType = tc.parser + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + msg := "xyzzy value=42 1422568543\n" + resp, err := http.Post( + createURL(listener, "http", "/api/v2/write", "bucket=mybucket&precision=s"), "", bytes.NewBuffer([]byte(msg))) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + // When timestamp is provided, the precision parameter is + // overloaded to specify the timestamp's unit + require.Equal(t, time.Unix(0, 1422568543000000000), acc.Metrics[0].Time) + }) + } } func TestWriteWithPrecisionNoTimestamp(t *testing.T) { @@ -523,7 +576,7 @@ func TestWriteWithPrecisionNoTimestamp(t *testing.T) { resp, err := http.Post( createURL(listener, "http", "/api/v2/write", "bucket=mybucket&precision=s"), "", bytes.NewBuffer([]byte(msg))) require.NoError(t, err) - resp.Body.Close() + require.NoError(t, resp.Body.Close()) require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) diff --git a/plugins/inputs/influxdb_v2_listener/sample.conf b/plugins/inputs/influxdb_v2_listener/sample.conf new file mode 100644 index 0000000000000..dec888cfc39bd --- /dev/null +++ b/plugins/inputs/influxdb_v2_listener/sample.conf @@ -0,0 +1,32 @@ +# Accept metrics over InfluxDB 2.x HTTP API +[[inputs.influxdb_v2_listener]] + ## Address and port to host InfluxDB listener on + ## (Double check the port. Could be 9999 if using OSS Beta) + service_address = ":8086" + + ## Maximum allowed HTTP request body size in bytes. + ## 0 means to use the default of 32MiB. + # max_body_size = "32MiB" + + ## Optional tag to determine the bucket. + ## If the write has a bucket in the query string then it will be kept in this tag name. + ## This tag can be used in downstream outputs. + ## The default value of nothing means it will be off and the database will not be recorded. + # bucket_tag = "" + + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Add service certificate and key + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Optional token to accept for HTTP authentication. + ## You probably want to make sure you have TLS configured above for this. + # token = "some-long-shared-secret-token" + + ## Influx line protocol parser + ## 'internal' is the default. 'upstream' is a newer parser that is faster + ## and more memory efficient. + # parser_type = "internal" diff --git a/plugins/inputs/intel_pmu/README.md b/plugins/inputs/intel_pmu/README.md new file mode 100644 index 0000000000000..659e4fa475cc1 --- /dev/null +++ b/plugins/inputs/intel_pmu/README.md @@ -0,0 +1,236 @@ +# Intel Performance Monitoring Unit Plugin + +This input plugin exposes Intel PMU (Performance Monitoring Unit) metrics +available through [Linux Perf](https://perf.wiki.kernel.org/index.php/Main_Page) +subsystem. + +PMU metrics gives insight into performance and health of IA processor's internal +components, including core and uncore units. With the number of cores increasing +and processor topology getting more complex the insight into those metrics is +vital to assure the best CPU performance and utilization. + +Performance counters are CPU hardware registers that count hardware events such +as instructions executed, cache-misses suffered, or branches mispredicted. They +form a basis for profiling applications to trace dynamic control flow and +identify hotspots. + +## Configuration + +```toml @sample.conf +# Intel Performance Monitoring Unit plugin exposes Intel PMU metrics available through Linux Perf subsystem +[[inputs.intel_pmu]] + ## List of filesystem locations of JSON files that contain PMU event definitions. + event_definitions = ["/var/cache/pmu/GenuineIntel-6-55-4-core.json", "/var/cache/pmu/GenuineIntel-6-55-4-uncore.json"] + + ## List of core events measurement entities. There can be more than one core_events sections. + [[inputs.intel_pmu.core_events]] + ## List of events to be counted. Event names shall match names from event_definitions files. + ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. + ## If absent, all core events from provided event_definitions are counted skipping unresolvable ones. + events = ["INST_RETIRED.ANY", "CPU_CLK_UNHALTED.THREAD_ANY:config1=0x4043200000000k"] + + ## Limits the counting of events to core numbers specified. + ## If absent, events are counted on all cores. + ## Single "0", multiple "0,1,2" and range "0-2" notation is supported for each array element. + ## example: cores = ["0,2", "4", "12-16"] + cores = ["0"] + + ## Indicator that plugin shall attempt to run core_events.events as a single perf group. + ## If absent or set to false, each event is counted individually. Defaults to false. + ## This limits the number of events that can be measured to a maximum of available hardware counters per core. + ## Could vary depending on type of event, use of fixed counters. + # perf_group = false + + ## Optionally set a custom tag value that will be added to every measurement within this events group. + ## Can be applied to any group of events, unrelated to perf_group setting. + # events_tag = "" + + ## List of uncore event measurement entities. There can be more than one uncore_events sections. + [[inputs.intel_pmu.uncore_events]] + ## List of events to be counted. Event names shall match names from event_definitions files. + ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. + ## If absent, all uncore events from provided event_definitions are counted skipping unresolvable ones. + events = ["UNC_CHA_CLOCKTICKS", "UNC_CHA_TOR_OCCUPANCY.IA_MISS"] + + ## Limits the counting of events to specified sockets. + ## If absent, events are counted on all sockets. + ## Single "0", multiple "0,1" and range "0-1" notation is supported for each array element. + ## example: sockets = ["0-2"] + sockets = ["0"] + + ## Indicator that plugin shall provide an aggregated value for multiple units of same type distributed in an uncore. + ## If absent or set to false, events for each unit are exposed as separate metric. Defaults to false. + # aggregate_uncore_units = false + + ## Optionally set a custom tag value that will be added to every measurement within this events group. + # events_tag = "" +``` + +### Modifiers + +Perf modifiers adjust event-specific perf attribute to fulfill particular +requirements. Details about perf attribute structure could be found in +[perf_event_open][man] +syscall manual. + +General schema of configuration's `events` list element: + +```regexp +EVENT_NAME(:(config|config1|config2)=(0x[0-9a-f]{1-16})(p|k|u|h|H|I|G|D))* +``` + +where: + +| Modifier | Underlying attribute | Description | +|----------|---------------------------------|-----------------------------| +| config | perf_event_attr.config | type-specific configuration | +| config1 | perf_event_attr.config1 | extension of config | +| config2 | perf_event_attr.config2 | extension of config1 | +| p | perf_event_attr.precise_ip | skid constraint | +| k | perf_event_attr.exclude_user | don't count user | +| u | perf_event_attr.exclude_kernel | don't count kernel | +| h / H | perf_event_attr.exclude_guest | don't count in guest | +| I | perf_event_attr.exclude_idle | don't count when idle | +| G | perf_event_attr.exclude_hv | don't count hypervisor | +| D | perf_event_attr.pinned | must always be on PMU | + +## Requirements + +The plugin is using [iaevents](https://github.com/intel/iaevents) library which +is a golang package that makes accessing the Linux kernel's perf interface +easier. + +Intel PMU plugin, is only intended for use on **linux 64-bit** systems. + +Event definition JSON files for specific architectures can be found at +[01.org](https://download.01.org/perfmon/). A script to download the event +definitions that are appropriate for your system (event_download.py) is +available at [pmu-tools](https://github.com/andikleen/pmu-tools). Please keep +these files in a safe place on your system. + +## Measuring + +Plugin allows measuring both core and uncore events. During plugin +initialization the event names provided by user are compared with event +definitions included in JSON files and translated to perf attributes. Next, +those events are activated to start counting. During every telegraf interval, +the plugin reads proper measurement for each previously activated event. + +Each single core event may be counted severally on every available CPU's +core. In contrast, uncore events could be placed in many PMUs within specified +CPU package. The plugin allows choosing core ids (core events) or socket ids +(uncore events) on which the counting should be executed. Uncore events are +separately activated on all socket's PMUs, and can be exposed as separate +measurement or to be summed up as one measurement. + +Obtained measurements are stored as three values: **Raw**, **Enabled** and +**Running**. Raw is a total count of event. Enabled and running are total time +the event was enabled and running. Normally these are the same. If more events +are started than available counter slots on the PMU, then multiplexing occurs +and events only run part of the time. Therefore, the plugin provides a 4-th +value called **scaled** which is calculated using following formula: `raw * +enabled / running`. + +Events are measured for all running processes. + +### Core event groups + +Perf allows assembling events as a group. A perf event group is scheduled onto +the CPU as a unit: it will be put onto the CPU only if all of the events in the +group can be put onto the CPU. This means that the values of the member events +can be meaningfully compared — added, divided (to get ratios), and so on — with +each other, since they have counted events for the same set of executed +instructions [(source)][man]. + +> **NOTE:** Be aware that the plugin will throw an error when trying to create +> core event group of size that exceeds available core PMU counters. The error +> message from perf syscall will be shown as "invalid argument". If you want to +> check how many PMUs are supported by your Intel CPU, you can use the +> [cpuid](https://linux.die.net/man/1/cpuid) command. + +### Note about file descriptors + +The plugin opens a number of file descriptors dependent on number of monitored +CPUs and number of monitored counters. It can easily exceed the default per +process limit of allowed file descriptors. Depending on configuration, it might +be required to increase the limit of opened file descriptors allowed. This can +be done for example by using `ulimit -n command`. + +## Metrics + +On each Telegraf interval, Intel PMU plugin transmits following data: + +### Metric Fields + +| Field | Type | Description | +|---------|--------|-----------------------------------------------------------------------------------------------------------------------------------------------| +| enabled | uint64 | time counter, contains time the associated perf event was enabled | +| running | uint64 | time counter, contains time the event was actually counted | +| raw | uint64 | value counter, contains event count value during the time the event was actually counted | +| scaled | uint64 | value counter, contains approximated value of counter if the event was continuously counted, using scaled = raw * (enabled / running) formula | + +### Metric Tags - common + +| Tag | Description | +|-------|------------------------------| +| host | hostname as read by Telegraf | +| event | name of the event | + +### Metric Tags - core events + +| Tag | Description | +|------------|----------------------------------------------------------------------------------------------------| +| cpu | CPU id as identified by linux OS (either logical cpu id when HT on or physical cpu id when HT off) | +| events_tag | (optional) tag as defined in "intel_pmu.core_events" configuration element | + +### Metric Tags - uncore events + +| Tag | Description | +|-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| socket | socket number as identified by linux OS (physical_package_id) | +| unit_type | type of event-capable PMU that the event was counted for, provides category of PMU that the event was counted for, e.g. cbox for uncore_cbox_1, r2pcie for uncore_r2pcie etc. | +| unit | name of event-capable PMU that the event was counted for, as listed in /sys/bus/event_source/devices/ e.g. uncore_cbox_1, uncore_imc_1 etc. Present for non-aggregated uncore events only | +| events_tag| (optional) tag as defined in "intel_pmu.uncore_events" configuration element | + +## Example outputs + +Event group: + +```text +pmu_metric,cpu=0,event=CPU_CLK_THREAD_UNHALTED.REF_XCLK,events_tag=unhalted,host=xyz enabled=2871237051i,running=2871237051i,raw=1171711i,scaled=1171711i 1621254096000000000 +pmu_metric,cpu=0,event=CPU_CLK_UNHALTED.THREAD_P_ANY,events_tag=unhalted,host=xyz enabled=2871240713i,running=2871240713i,raw=72340716i,scaled=72340716i 1621254096000000000 +pmu_metric,cpu=1,event=CPU_CLK_THREAD_UNHALTED.REF_XCLK,events_tag=unhalted,host=xyz enabled=2871118275i,running=2871118275i,raw=1646752i,scaled=1646752i 1621254096000000000 +pmu_metric,cpu=1,event=CPU_CLK_UNHALTED.THREAD_P_ANY,events_tag=unhalted,host=xyz raw=108802421i,scaled=108802421i,enabled=2871120107i,running=2871120107i 1621254096000000000 +pmu_metric,cpu=2,event=CPU_CLK_THREAD_UNHALTED.REF_XCLK,events_tag=unhalted,host=xyz enabled=2871143950i,running=2871143950i,raw=1316834i,scaled=1316834i 1621254096000000000 +pmu_metric,cpu=2,event=CPU_CLK_UNHALTED.THREAD_P_ANY,events_tag=unhalted,host=xyz enabled=2871074681i,running=2871074681i,raw=68728436i,scaled=68728436i 1621254096000000000 +``` + +Uncore event not aggregated: + +```text +pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_0,unit_type=cbox enabled=2870630747i,running=2870630747i,raw=183996i,scaled=183996i 1621254096000000000 +pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_1,unit_type=cbox enabled=2870608194i,running=2870608194i,raw=185703i,scaled=185703i 1621254096000000000 +pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_2,unit_type=cbox enabled=2870600211i,running=2870600211i,raw=187331i,scaled=187331i 1621254096000000000 +pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_3,unit_type=cbox enabled=2870593914i,running=2870593914i,raw=184228i,scaled=184228i 1621254096000000000 +pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_4,unit_type=cbox scaled=195355i,enabled=2870558952i,running=2870558952i,raw=195355i 1621254096000000000 +pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit=uncore_cbox_5,unit_type=cbox enabled=2870554131i,running=2870554131i,raw=197756i,scaled=197756i 1621254096000000000 +``` + +Uncore event aggregated: + +```text +pmu_metric,event=UNC_CBO_XSNP_RESPONSE.MISS_XCORE,host=xyz,socket=0,unit_type=cbox enabled=13199712335i,running=13199712335i,raw=467485i,scaled=467485i 1621254412000000000 +``` + +Time multiplexing: + +```text +pmu_metric,cpu=0,event=CPU_CLK_THREAD_UNHALTED.REF_XCLK,host=xyz raw=2947727i,scaled=4428970i,enabled=2201071844i,running=1464935978i 1621254412000000000 +pmu_metric,cpu=0,event=CPU_CLK_UNHALTED.THREAD_P_ANY,host=xyz running=1465155618i,raw=302553190i,scaled=454511623i,enabled=2201035323i 1621254412000000000 +pmu_metric,cpu=0,event=CPU_CLK_UNHALTED.REF_XCLK,host=xyz enabled=2200994057i,running=1466812391i,raw=3177535i,scaled=4767982i 1621254412000000000 +pmu_metric,cpu=0,event=CPU_CLK_UNHALTED.REF_XCLK_ANY,host=xyz enabled=2200963921i,running=1470523496i,raw=3359272i,scaled=5027894i 1621254412000000000 +pmu_metric,cpu=0,event=L1D_PEND_MISS.PENDING_CYCLES_ANY,host=xyz enabled=2200933946i,running=1470322480i,raw=23631950i,scaled=35374798i 1621254412000000000 +pmu_metric,cpu=0,event=L1D_PEND_MISS.PENDING_CYCLES,host=xyz raw=18767833i,scaled=28169827i,enabled=2200888514i,running=1466317384i 1621254412000000000 +``` + +[man]: https://man7.org/linux/man-pages/man2/perf_event_open.2.html diff --git a/plugins/inputs/intel_pmu/activators.go b/plugins/inputs/intel_pmu/activators.go new file mode 100644 index 0000000000000..1750c72789c00 --- /dev/null +++ b/plugins/inputs/intel_pmu/activators.go @@ -0,0 +1,205 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "errors" + "fmt" + + ia "github.com/intel/iaevents" +) + +type placementMaker interface { + makeCorePlacements(cores []int, factory ia.PlacementFactory) ([]ia.PlacementProvider, error) + makeUncorePlacements(socket int, factory ia.PlacementFactory) ([]ia.PlacementProvider, error) +} + +type iaPlacementMaker struct{} + +func (iaPlacementMaker) makeCorePlacements(cores []int, factory ia.PlacementFactory) ([]ia.PlacementProvider, error) { + var err error + var corePlacements []ia.PlacementProvider + + switch len(cores) { + case 0: + return nil, errors.New("no cores provided") + case 1: + corePlacements, err = ia.NewCorePlacements(factory, cores[0]) + if err != nil { + return nil, err + } + default: + corePlacements, err = ia.NewCorePlacements(factory, cores[0], cores[1:]...) + if err != nil { + return nil, err + } + } + return corePlacements, nil +} + +func (iaPlacementMaker) makeUncorePlacements(socket int, factory ia.PlacementFactory) ([]ia.PlacementProvider, error) { + return ia.NewUncoreAllPlacements(factory, socket) +} + +type eventsActivator interface { + activateEvent(ia.Activator, ia.PlacementProvider, ia.Options) (*ia.ActiveEvent, error) + activateGroup(ia.PlacementProvider, []ia.CustomizableEvent) (*ia.ActiveEventGroup, error) + activateMulti(ia.MultiActivator, []ia.PlacementProvider, ia.Options) (*ia.ActiveMultiEvent, error) +} + +type iaEventsActivator struct{} + +func (iaEventsActivator) activateEvent(a ia.Activator, p ia.PlacementProvider, o ia.Options) (*ia.ActiveEvent, error) { + return a.Activate(p, ia.NewEventTargetProcess(-1, 0), o) +} + +func (iaEventsActivator) activateGroup(p ia.PlacementProvider, e []ia.CustomizableEvent) (*ia.ActiveEventGroup, error) { + return ia.ActivateGroup(p, ia.NewEventTargetProcess(-1, 0), e) +} + +func (iaEventsActivator) activateMulti(a ia.MultiActivator, p []ia.PlacementProvider, o ia.Options) (*ia.ActiveMultiEvent, error) { + return a.ActivateMulti(p, ia.NewEventTargetProcess(-1, 0), o) +} + +type entitiesActivator interface { + activateEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error +} + +type iaEntitiesActivator struct { + placementMaker placementMaker + perfActivator eventsActivator +} + +func (ea *iaEntitiesActivator) activateEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error { + for _, coreEventsEntity := range coreEntities { + err := ea.activateCoreEvents(coreEventsEntity) + if err != nil { + return fmt.Errorf("failed to activate core events `%s`: %v", coreEventsEntity.EventsTag, err) + } + } + for _, uncoreEventsEntity := range uncoreEntities { + err := ea.activateUncoreEvents(uncoreEventsEntity) + if err != nil { + return fmt.Errorf("failed to activate uncore events `%s`: %v", uncoreEventsEntity.EventsTag, err) + } + } + return nil +} + +func (ea *iaEntitiesActivator) activateCoreEvents(entity *CoreEventEntity) error { + if entity == nil { + return fmt.Errorf("core events entity is nil") + } + if ea.placementMaker == nil { + return fmt.Errorf("placement maker is nil") + } + if entity.PerfGroup { + err := ea.activateCoreEventsGroup(entity) + if err != nil { + return fmt.Errorf("failed to activate core events group: %v", err) + } + } else { + for _, event := range entity.parsedEvents { + if event == nil { + return fmt.Errorf("core parsed event is nil") + } + placements, err := ea.placementMaker.makeCorePlacements(entity.parsedCores, event.custom.Event) + if err != nil { + return fmt.Errorf("failed to create core placements for event `%s`: %v", event.name, err) + } + activeEvent, err := ea.activateEventForPlacements(event, placements) + if err != nil { + return fmt.Errorf("failed to activate core event `%s`: %v", event.name, err) + } + entity.activeEvents = append(entity.activeEvents, activeEvent...) + } + } + return nil +} + +func (ea *iaEntitiesActivator) activateUncoreEvents(entity *UncoreEventEntity) error { + if entity == nil { + return fmt.Errorf("uncore events entity is nil") + } + if ea.perfActivator == nil || ea.placementMaker == nil { + return fmt.Errorf("events activator or placement maker is nil") + } + for _, event := range entity.parsedEvents { + if event == nil { + return fmt.Errorf("uncore parsed event is nil") + } + perfEvent := event.custom.Event + if perfEvent == nil { + return fmt.Errorf("perf event of `%s` event is nil", event.name) + } + options := event.custom.Options + + for _, socket := range entity.parsedSockets { + placements, err := ea.placementMaker.makeUncorePlacements(socket, perfEvent) + if err != nil { + return fmt.Errorf("failed to create uncore placements for event `%s`: %v", event.name, err) + } + activeMultiEvent, err := ea.perfActivator.activateMulti(perfEvent, placements, options) + if err != nil { + return fmt.Errorf("failed to activate multi event `%s`: %v", event.name, err) + } + events := activeMultiEvent.Events() + entity.activeMultiEvents = append(entity.activeMultiEvents, multiEvent{events, perfEvent, socket}) + } + } + return nil +} + +func (ea *iaEntitiesActivator) activateCoreEventsGroup(entity *CoreEventEntity) error { + if ea.perfActivator == nil || ea.placementMaker == nil { + return fmt.Errorf("missing perf activator or placement maker") + } + if entity == nil || len(entity.parsedEvents) < 1 { + return fmt.Errorf("missing parsed events") + } + + var events []ia.CustomizableEvent + for _, event := range entity.parsedEvents { + if event == nil { + return fmt.Errorf("core event is nil") + } + events = append(events, event.custom) + } + leader := entity.parsedEvents[0].custom + + placements, err := ea.placementMaker.makeCorePlacements(entity.parsedCores, leader.Event) + if err != nil { + return fmt.Errorf("failed to make core placements: %v", err) + } + + for _, plc := range placements { + activeGroup, err := ea.perfActivator.activateGroup(plc, events) + if err != nil { + return err + } + entity.activeEvents = append(entity.activeEvents, activeGroup.Events()...) + } + return nil +} + +func (ea *iaEntitiesActivator) activateEventForPlacements(event *eventWithQuals, placements []ia.PlacementProvider) ([]*ia.ActiveEvent, error) { + if event == nil { + return nil, fmt.Errorf("core event is nil") + } + if ea.perfActivator == nil { + return nil, fmt.Errorf("missing perf activator") + } + var activeEvents []*ia.ActiveEvent + for _, placement := range placements { + perfEvent := event.custom.Event + options := event.custom.Options + + activeEvent, err := ea.perfActivator.activateEvent(perfEvent, placement, options) + if err != nil { + return nil, fmt.Errorf("failed to activate event `%s`: %v", event.name, err) + } + activeEvents = append(activeEvents, activeEvent) + } + return activeEvents, nil +} diff --git a/plugins/inputs/intel_pmu/activators_test.go b/plugins/inputs/intel_pmu/activators_test.go new file mode 100644 index 0000000000000..28f05710d3e69 --- /dev/null +++ b/plugins/inputs/intel_pmu/activators_test.go @@ -0,0 +1,432 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "errors" + "fmt" + "testing" + + ia "github.com/intel/iaevents" + "github.com/stretchr/testify/require" +) + +type mockPlacementFactory struct { + err bool +} + +func (m *mockPlacementFactory) NewPlacements(_ string, cpu int, cpus ...int) ([]ia.PlacementProvider, error) { + if m.err { + return nil, errors.New("mock error") + } + placements := []ia.PlacementProvider{ + &ia.Placement{CPU: cpu, PMUType: 4}, + } + for _, cpu := range cpus { + placements = append(placements, &ia.Placement{CPU: cpu, PMUType: 4}) + } + return placements, nil +} + +func TestActivateEntities(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{} + + // more core test cases in TestActivateCoreEvents + t.Run("failed to activate core events", func(t *testing.T) { + tag := "TAG" + mEntities := []*CoreEventEntity{{EventsTag: tag}} + err := mEntitiesActivator.activateEntities(mEntities, nil) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to activate core events `%s`", tag)) + }) + + // more uncore test cases in TestActivateUncoreEvents + t.Run("failed to activate uncore events", func(t *testing.T) { + tag := "TAG" + mEntities := []*UncoreEventEntity{{EventsTag: tag}} + err := mEntitiesActivator.activateEntities(nil, mEntities) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to activate uncore events `%s`", tag)) + }) + + t.Run("nothing to do", func(t *testing.T) { + err := mEntitiesActivator.activateEntities(nil, nil) + require.NoError(t, err) + }) +} + +func TestActivateUncoreEvents(t *testing.T) { + mActivator := &mockEventsActivator{} + mMaker := &mockPlacementMaker{} + errMock := fmt.Errorf("error mock") + + t.Run("entity is nil", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + err := mEntitiesActivator.activateUncoreEvents(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "uncore events entity is nil") + }) + + t.Run("event is nil", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + mEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{nil, nil}} + err := mEntitiesActivator.activateUncoreEvents(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), "uncore parsed event is nil") + }) + + t.Run("perf event is nil", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + name := "event name" + mEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{{name: name, custom: ia.CustomizableEvent{Event: nil}}}} + err := mEntitiesActivator.activateUncoreEvents(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("perf event of `%s` event is nil", name)) + }) + + t.Run("placement maker and perf activator is nil", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: nil, perfActivator: nil} + err := mEntitiesActivator.activateUncoreEvents(&UncoreEventEntity{}) + require.Error(t, err) + require.Contains(t, err.Error(), "events activator or placement maker is nil") + }) + + t.Run("failed to create placements", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + eventName := "mock event 1" + parsedEvents := []*eventWithQuals{{name: eventName, custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: eventName}}}} + mEntity := &UncoreEventEntity{parsedEvents: parsedEvents, parsedSockets: []int{0, 1, 2}} + + mMaker.On("makeUncorePlacements", parsedEvents[0].custom.Event, mEntity.parsedSockets[0]).Return(nil, errMock).Once() + err := mEntitiesActivator.activateUncoreEvents(mEntity) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("ailed to create uncore placements for event `%s`", eventName)) + mMaker.AssertExpectations(t) + }) + + t.Run("failed to activate event", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + eventName := "mock event 1" + parsedEvents := []*eventWithQuals{{name: eventName, custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: eventName}}}} + placements := []ia.PlacementProvider{&ia.Placement{CPU: 0}, &ia.Placement{CPU: 1}} + mEntity := &UncoreEventEntity{parsedEvents: parsedEvents, parsedSockets: []int{0, 1, 2}} + + mMaker.On("makeUncorePlacements", parsedEvents[0].custom.Event, mEntity.parsedSockets[0]).Return(placements, nil).Once() + mActivator.On("activateMulti", parsedEvents[0].custom.Event, placements, parsedEvents[0].custom.Options).Return(nil, errMock).Once() + + err := mEntitiesActivator.activateUncoreEvents(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to activate multi event `%s`", eventName)) + mMaker.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) + + t.Run("successfully activate core events", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + + parsedEvents := []*eventWithQuals{ + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 1", Uncore: true}}}, + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 2", Uncore: true}}}, + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 3", Uncore: true}}}, + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 4", Uncore: true}}}, + } + mEntity := &UncoreEventEntity{parsedEvents: parsedEvents, parsedSockets: []int{0, 1, 2}} + placements := []ia.PlacementProvider{&ia.Placement{}, &ia.Placement{}, &ia.Placement{}} + + var expectedEvents []multiEvent + for _, event := range parsedEvents { + for _, socket := range mEntity.parsedSockets { + mMaker.On("makeUncorePlacements", event.custom.Event, socket).Return(placements, nil).Once() + newActiveMultiEvent := &ia.ActiveMultiEvent{} + expectedEvents = append(expectedEvents, multiEvent{newActiveMultiEvent.Events(), event.custom.Event, socket}) + mActivator.On("activateMulti", event.custom.Event, placements, event.custom.Options).Return(newActiveMultiEvent, nil).Once() + } + } + err := mEntitiesActivator.activateUncoreEvents(mEntity) + + require.NoError(t, err) + require.Equal(t, expectedEvents, mEntity.activeMultiEvents) + mMaker.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) +} + +func TestActivateCoreEvents(t *testing.T) { + mMaker := &mockPlacementMaker{} + mActivator := &mockEventsActivator{} + errMock := fmt.Errorf("error mock") + + t.Run("entity is nil", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + err := mEntitiesActivator.activateCoreEvents(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "core events entity is nil") + }) + + t.Run("placement maker is nil", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: nil, perfActivator: mActivator} + err := mEntitiesActivator.activateCoreEvents(&CoreEventEntity{}) + require.Error(t, err) + require.Contains(t, err.Error(), "placement maker is nil") + }) + + t.Run("event is nil", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + mEntity := &CoreEventEntity{parsedEvents: []*eventWithQuals{nil, nil}} + err := mEntitiesActivator.activateCoreEvents(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), "core parsed event is nil") + }) + + t.Run("failed to create placements", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + parsedEvents := []*eventWithQuals{{name: "mock event 1", custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 1"}}}} + mEntity := &CoreEventEntity{PerfGroup: false, parsedEvents: parsedEvents, parsedCores: []int{0, 1, 2}} + + mMaker.On("makeCorePlacements", mEntity.parsedCores, parsedEvents[0].custom.Event).Return(nil, errMock).Once() + err := mEntitiesActivator.activateCoreEvents(mEntity) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to create core placements for event `%s`", parsedEvents[0].name)) + mMaker.AssertExpectations(t) + }) + + t.Run("failed to activate event", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + + parsedEvents := []*eventWithQuals{{name: "mock event 1", custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 1"}}}} + placements := []ia.PlacementProvider{&ia.Placement{CPU: 0}, &ia.Placement{CPU: 1}} + mEntity := &CoreEventEntity{PerfGroup: false, parsedEvents: parsedEvents, parsedCores: []int{0, 1, 2}} + + event := parsedEvents[0] + plc := placements[0] + mMaker.On("makeCorePlacements", mEntity.parsedCores, event.custom.Event).Return(placements, nil).Once() + mActivator.On("activateEvent", event.custom.Event, plc, event.custom.Options).Return(nil, errMock).Once() + + err := mEntitiesActivator.activateCoreEvents(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to activate core event `%s`", parsedEvents[0].name)) + mMaker.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) + + t.Run("failed to activate core events group", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: nil} + mEntity := &CoreEventEntity{PerfGroup: true, parsedEvents: nil} + + err := mEntitiesActivator.activateCoreEvents(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to activate core events group") + }) + + t.Run("successfully activate core events", func(t *testing.T) { + mEntitiesActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + + parsedEvents := []*eventWithQuals{ + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 1"}}}, + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 2"}}}, + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 3"}}}, + {custom: ia.CustomizableEvent{Event: &ia.PerfEvent{Name: "mock event 4"}}}, + } + placements := []ia.PlacementProvider{&ia.Placement{CPU: 0}, &ia.Placement{CPU: 1}, &ia.Placement{CPU: 2}} + mEntity := &CoreEventEntity{PerfGroup: false, parsedEvents: parsedEvents, parsedCores: []int{0, 1, 2}} + + var activeEvents []*ia.ActiveEvent + for _, event := range parsedEvents { + mMaker.On("makeCorePlacements", mEntity.parsedCores, event.custom.Event).Return(placements, nil).Once() + for _, plc := range placements { + newActiveEvent := &ia.ActiveEvent{PerfEvent: event.custom.Event} + activeEvents = append(activeEvents, newActiveEvent) + mActivator.On("activateEvent", event.custom.Event, plc, event.custom.Options).Return(newActiveEvent, nil).Once() + } + } + + err := mEntitiesActivator.activateCoreEvents(mEntity) + require.NoError(t, err) + require.Equal(t, activeEvents, mEntity.activeEvents) + mMaker.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) +} + +func TestActivateCoreEventsGroup(t *testing.T) { + mMaker := &mockPlacementMaker{} + mActivator := &mockEventsActivator{} + eActivator := &iaEntitiesActivator{placementMaker: mMaker, perfActivator: mActivator} + errMock := errors.New("mock error") + + leader := &ia.PerfEvent{Name: "mock event 1"} + perfEvent2 := &ia.PerfEvent{Name: "mock event 2"} + + parsedEvents := []*eventWithQuals{{custom: ia.CustomizableEvent{Event: leader}}, {custom: ia.CustomizableEvent{Event: perfEvent2}}} + placements := []ia.PlacementProvider{&ia.Placement{}, &ia.Placement{}} + + // cannot populate this struct due to unexported events field + activeGroup := &ia.ActiveEventGroup{} + + mEntity := &CoreEventEntity{ + EventsTag: "mock group", + PerfGroup: true, + parsedEvents: parsedEvents, + parsedCores: nil, + } + + var events []ia.CustomizableEvent + for _, event := range parsedEvents { + events = append(events, event.custom) + } + + t.Run("missing perf activator and placement maker", func(t *testing.T) { + mActivator := &iaEntitiesActivator{} + err := mActivator.activateCoreEventsGroup(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "missing perf activator or placement maker") + }) + + t.Run("missing parsed events", func(t *testing.T) { + mActivator := &iaEntitiesActivator{placementMaker: &mockPlacementMaker{}, perfActivator: &mockEventsActivator{}} + err := mActivator.activateCoreEventsGroup(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "missing parsed events") + }) + + t.Run("nil in parsed event", func(t *testing.T) { + mEntity := &CoreEventEntity{EventsTag: "Nice tag", PerfGroup: true, parsedEvents: []*eventWithQuals{nil, nil}} + err := eActivator.activateCoreEventsGroup(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), "core event is nil") + }) + + t.Run("failed to make core placements", func(t *testing.T) { + mMaker.On("makeCorePlacements", mEntity.parsedCores, leader).Return(nil, errMock).Once() + err := eActivator.activateCoreEventsGroup(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to make core placements") + mMaker.AssertExpectations(t) + }) + + t.Run("failed to activate group", func(t *testing.T) { + mMaker.On("makeCorePlacements", mEntity.parsedCores, leader).Return(placements, nil).Once() + mActivator.On("activateGroup", placements[0], events).Return(nil, errMock).Once() + + err := eActivator.activateCoreEventsGroup(mEntity) + require.Error(t, err) + require.Contains(t, err.Error(), errMock.Error()) + mMaker.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) + + var allActive []*ia.ActiveEvent + t.Run("successfully activated group", func(t *testing.T) { + mMaker.On("makeCorePlacements", mEntity.parsedCores, leader).Return(placements, nil).Once() + for _, plc := range placements { + mActivator.On("activateGroup", plc, events).Return(activeGroup, nil).Once() + allActive = append(allActive, activeGroup.Events()...) + } + + err := eActivator.activateCoreEventsGroup(mEntity) + require.NoError(t, err) + require.Equal(t, allActive, mEntity.activeEvents) + mMaker.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) +} + +func TestMakeCorePlacements(t *testing.T) { + tests := []struct { + name string + cores []int + perfEvent ia.PlacementFactory + result []ia.PlacementProvider + errMsg string + }{ + {"no cores", nil, &ia.PerfEvent{}, nil, "no cores provided"}, + {"one core placement", []int{1}, &mockPlacementFactory{}, []ia.PlacementProvider{&ia.Placement{CPU: 1, PMUType: 4}}, ""}, + {"multiple core placement", []int{1, 2, 4}, &mockPlacementFactory{}, []ia.PlacementProvider{ + &ia.Placement{CPU: 1, PMUType: 4}, + &ia.Placement{CPU: 2, PMUType: 4}, + &ia.Placement{CPU: 4, PMUType: 4}}, + ""}, + {"placement factory error", []int{1}, &mockPlacementFactory{true}, nil, "mock error"}, + {"placement factory error 2", []int{1, 2, 3}, &mockPlacementFactory{true}, nil, "mock error"}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + maker := &iaPlacementMaker{} + providers, err := maker.makeCorePlacements(test.cores, test.perfEvent) + if len(test.errMsg) > 0 { + require.Error(t, err) + require.Nil(t, providers) + require.Contains(t, err.Error(), test.errMsg) + return + } + require.NoError(t, err) + require.Equal(t, test.result, providers) + }) + } +} + +func TestActivateEventForPlacement(t *testing.T) { + placement1 := &ia.Placement{CPU: 0} + placement2 := &ia.Placement{CPU: 1} + placement3 := &ia.Placement{CPU: 2} + + mPlacements := []ia.PlacementProvider{placement1, placement2, placement3} + + mPerfEvent := &ia.PerfEvent{Name: "mock1"} + mOptions := &ia.PerfEventOptions{} + mEvent := &eventWithQuals{name: mPerfEvent.Name, custom: ia.CustomizableEvent{Event: mPerfEvent, Options: mOptions}} + + mPerfActivator := &mockEventsActivator{} + mActivator := &iaEntitiesActivator{perfActivator: mPerfActivator} + + t.Run("event is nil", func(t *testing.T) { + activeEvents, err := mActivator.activateEventForPlacements(nil, mPlacements) + require.Error(t, err) + require.Contains(t, err.Error(), "core event is nil") + require.Nil(t, activeEvents) + }) + + t.Run("perf activator is nil", func(t *testing.T) { + mActivator := &iaEntitiesActivator{} + activeEvents, err := mActivator.activateEventForPlacements(mEvent, mPlacements) + require.Error(t, err) + require.Contains(t, err.Error(), "missing perf activator") + require.Nil(t, activeEvents) + }) + + t.Run("placements are nil", func(t *testing.T) { + activeEvents, err := mActivator.activateEventForPlacements(mEvent, nil) + require.NoError(t, err) + require.Nil(t, activeEvents) + }) + + t.Run("activation error", func(t *testing.T) { + mPerfActivator.On("activateEvent", mPerfEvent, placement1, mOptions).Once().Return(nil, errors.New("err")) + activeEvents, err := mActivator.activateEventForPlacements(mEvent, mPlacements) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to activate event `%s`", mEvent.name)) + require.Nil(t, activeEvents) + mPerfActivator.AssertExpectations(t) + }) + + t.Run("successfully activated", func(t *testing.T) { + mActiveEvent := &ia.ActiveEvent{} + mActiveEvent2 := &ia.ActiveEvent{} + mActiveEvent3 := &ia.ActiveEvent{} + + mPerfActivator.On("activateEvent", mPerfEvent, placement1, mOptions).Once().Return(mActiveEvent, nil). + On("activateEvent", mPerfEvent, placement2, mOptions).Once().Return(mActiveEvent2, nil). + On("activateEvent", mPerfEvent, placement3, mOptions).Once().Return(mActiveEvent3, nil) + + activeEvents, err := mActivator.activateEventForPlacements(mEvent, mPlacements) + require.NoError(t, err) + require.Len(t, activeEvents, len(mPlacements)) + require.Contains(t, activeEvents, mActiveEvent) + require.Contains(t, activeEvents, mActiveEvent2) + mPerfActivator.AssertExpectations(t) + }) +} diff --git a/plugins/inputs/intel_pmu/config.go b/plugins/inputs/intel_pmu/config.go new file mode 100644 index 0000000000000..c788744e9549b --- /dev/null +++ b/plugins/inputs/intel_pmu/config.go @@ -0,0 +1,239 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "fmt" + "strconv" + "strings" + + "github.com/influxdata/telegraf" +) + +// Maximum size of core IDs or socket IDs (8192). Based on maximum value of CPUs that linux kernel supports. +const maxIDsSize = 1 << 13 + +type entitiesParser interface { + parseEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) (err error) +} + +type configParser struct { + log telegraf.Logger + sys sysInfoProvider +} + +func (cp *configParser) parseEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) (err error) { + if len(coreEntities) == 0 && len(uncoreEntities) == 0 { + return fmt.Errorf("neither core nor uncore entities configured") + } + + for _, coreEntity := range coreEntities { + if coreEntity == nil { + return fmt.Errorf("core entity is nil") + } + if coreEntity.Events == nil { + if cp.log != nil { + cp.log.Debug("all core events from provided files will be configured") + } + coreEntity.allEvents = true + } else { + events := cp.parseEvents(coreEntity.Events) + if events == nil { + return fmt.Errorf("an empty list of core events was provided") + } + coreEntity.parsedEvents = events + } + + coreEntity.parsedCores, err = cp.parseCores(coreEntity.Cores) + if err != nil { + return fmt.Errorf("error during cores parsing: %v", err) + } + } + + for _, uncoreEntity := range uncoreEntities { + if uncoreEntity == nil { + return fmt.Errorf("uncore entity is nil") + } + if uncoreEntity.Events == nil { + if cp.log != nil { + cp.log.Debug("all uncore events from provided files will be configured") + } + uncoreEntity.allEvents = true + } else { + events := cp.parseEvents(uncoreEntity.Events) + if events == nil { + return fmt.Errorf("an empty list of uncore events was provided") + } + uncoreEntity.parsedEvents = events + } + + uncoreEntity.parsedSockets, err = cp.parseSockets(uncoreEntity.Sockets) + if err != nil { + return fmt.Errorf("error during sockets parsing: %v", err) + } + } + return nil +} + +func (cp *configParser) parseEvents(events []string) []*eventWithQuals { + if len(events) == 0 { + return nil + } + + events, duplications := removeDuplicateStrings(events) + for _, duplication := range duplications { + if cp.log != nil { + cp.log.Warnf("duplicated event `%s` will be removed", duplication) + } + } + return parseEventsWithQualifiers(events) +} + +func (cp *configParser) parseCores(cores []string) ([]int, error) { + if cores == nil { + if cp.log != nil { + cp.log.Debug("all possible cores will be configured") + } + if cp.sys == nil { + return nil, fmt.Errorf("system info provider is nil") + } + cores, err := cp.sys.allCPUs() + if err != nil { + return nil, fmt.Errorf("cannot obtain all cpus: %v", err) + } + return cores, nil + } + if len(cores) == 0 { + return nil, fmt.Errorf("an empty list of cores was provided") + } + + result, err := cp.parseIntRanges(cores) + if err != nil { + return nil, err + } + return result, nil +} + +func (cp *configParser) parseSockets(sockets []string) ([]int, error) { + if sockets == nil { + if cp.log != nil { + cp.log.Debug("all possible sockets will be configured") + } + if cp.sys == nil { + return nil, fmt.Errorf("system info provider is nil") + } + sockets, err := cp.sys.allSockets() + if err != nil { + return nil, fmt.Errorf("cannot obtain all sockets: %v", err) + } + return sockets, nil + } + if len(sockets) == 0 { + return nil, fmt.Errorf("an empty list of sockets was provided") + } + + result, err := cp.parseIntRanges(sockets) + if err != nil { + return nil, err + } + return result, nil +} + +func (cp *configParser) parseIntRanges(ranges []string) ([]int, error) { + var ids []int + var duplicatedIDs []int + var err error + ids, err = parseIDs(ranges) + if err != nil { + return nil, err + } + ids, duplicatedIDs = removeDuplicateValues(ids) + for _, duplication := range duplicatedIDs { + if cp.log != nil { + cp.log.Warnf("duplicated id number `%d` will be removed", duplication) + } + } + return ids, nil +} + +func parseEventsWithQualifiers(events []string) []*eventWithQuals { + var result []*eventWithQuals + + for _, event := range events { + newEventWithQualifiers := &eventWithQuals{} + + split := strings.Split(event, ":") + newEventWithQualifiers.name = split[0] + + if len(split) > 1 { + newEventWithQualifiers.qualifiers = split[1:] + } + result = append(result, newEventWithQualifiers) + } + return result +} + +func parseIDs(allIDsStrings []string) ([]int, error) { + var result []int + for _, idsString := range allIDsStrings { + ids := strings.Split(idsString, ",") + + for _, id := range ids { + id := strings.TrimSpace(id) + // a-b support + var start, end uint + n, err := fmt.Sscanf(id, "%d-%d", &start, &end) + if err == nil && n == 2 { + if start >= end { + return nil, fmt.Errorf("`%d` is equal or greater than `%d`", start, end) + } + for ; start <= end; start++ { + if len(result)+1 > maxIDsSize { + return nil, fmt.Errorf("requested number of IDs exceeds max size `%d`", maxIDsSize) + } + result = append(result, int(start)) + } + continue + } + // Single value + num, err := strconv.Atoi(id) + if err != nil { + return nil, fmt.Errorf("wrong format for id number `%s`: %v", id, err) + } + if len(result)+1 > maxIDsSize { + return nil, fmt.Errorf("requested number of IDs exceeds max size `%d`", maxIDsSize) + } + result = append(result, num) + } + } + return result, nil +} + +func removeDuplicateValues(intSlice []int) (result []int, duplicates []int) { + keys := make(map[int]bool) + + for _, entry := range intSlice { + if _, value := keys[entry]; !value { + keys[entry] = true + result = append(result, entry) + } else { + duplicates = append(duplicates, entry) + } + } + return result, duplicates +} + +func removeDuplicateStrings(strSlice []string) (result []string, duplicates []string) { + keys := make(map[string]bool) + + for _, entry := range strSlice { + if _, value := keys[entry]; !value { + keys[entry] = true + result = append(result, entry) + } else { + duplicates = append(duplicates, entry) + } + } + return result, duplicates +} diff --git a/plugins/inputs/intel_pmu/config_test.go b/plugins/inputs/intel_pmu/config_test.go new file mode 100644 index 0000000000000..5a0f288e3b443 --- /dev/null +++ b/plugins/inputs/intel_pmu/config_test.go @@ -0,0 +1,230 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "errors" + "fmt" + "math" + "testing" + + "github.com/influxdata/telegraf/testutil" + ia "github.com/intel/iaevents" + "github.com/stretchr/testify/require" +) + +func TestConfigParser_parseEntities(t *testing.T) { + mSysInfo := &mockSysInfoProvider{} + mConfigParser := &configParser{ + sys: mSysInfo, + log: testutil.Logger{}, + } + e := ia.CustomizableEvent{} + + t.Run("no entities", func(t *testing.T) { + err := mConfigParser.parseEntities(nil, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "neither core nor uncore entities configured") + }) + + // more specific parsing cases in TestConfigParser_parseIntRanges and TestConfigParser_parseEvents + coreTests := []struct { + name string + + coreEntity *CoreEventEntity + parsedCoreEvents []*eventWithQuals + parsedCores []int + coreAll bool + + uncoreEntity *UncoreEventEntity + parsedUncoreEvents []*eventWithQuals + parsedSockets []int + uncoreAll bool + + failMsg string + }{ + {"no events provided", + &CoreEventEntity{Events: nil, Cores: []string{"1"}}, nil, []int{1}, true, + &UncoreEventEntity{Events: nil, Sockets: []string{"0"}}, nil, []int{0}, true, + ""}, + {"uncore entity is nil", + &CoreEventEntity{Events: []string{"EVENT"}, Cores: []string{"1,2"}}, []*eventWithQuals{{"EVENT", nil, e}}, []int{1, 2}, false, + nil, nil, nil, false, + "uncore entity is nil"}, + {"core entity is nil", + nil, nil, nil, false, + &UncoreEventEntity{Events: []string{"EVENT"}, Sockets: []string{"1,2"}}, []*eventWithQuals{{"EVENT", nil, e}}, []int{1, 2}, false, + "core entity is nil"}, + {"error parsing sockets", + &CoreEventEntity{Events: nil, Cores: []string{"1,2"}}, nil, []int{1, 2}, true, + &UncoreEventEntity{Events: []string{"E"}, Sockets: []string{"wrong sockets"}}, []*eventWithQuals{{"E", nil, e}}, nil, false, + "error during sockets parsing"}, + {"error parsing cores", + &CoreEventEntity{Events: nil, Cores: []string{"wrong cpus"}}, nil, nil, true, + &UncoreEventEntity{Events: nil, Sockets: []string{"0,1"}}, nil, []int{0, 1}, true, + "error during cores parsing"}, + {"valid settings", + &CoreEventEntity{Events: []string{"E1", "E2:config=123"}, Cores: []string{"1-5"}}, []*eventWithQuals{{"E1", nil, e}, {"E2", []string{"config=123"}, e}}, []int{1, 2, 3, 4, 5}, false, + &UncoreEventEntity{Events: []string{"E1", "E2", "E3"}, Sockets: []string{"0,2-6"}}, []*eventWithQuals{{"E1", nil, e}, {"E2", nil, e}, {"E3", nil, e}}, []int{0, 2, 3, 4, 5, 6}, false, + ""}, + } + + for _, test := range coreTests { + t.Run(test.name, func(t *testing.T) { + coreEntities := []*CoreEventEntity{test.coreEntity} + uncoreEntities := []*UncoreEventEntity{test.uncoreEntity} + + err := mConfigParser.parseEntities(coreEntities, uncoreEntities) + + if len(test.failMsg) > 0 { + require.Error(t, err) + require.Contains(t, err.Error(), test.failMsg) + return + } + require.NoError(t, err) + require.Equal(t, test.coreAll, test.coreEntity.allEvents) + require.Equal(t, test.parsedCores, test.coreEntity.parsedCores) + require.Equal(t, test.parsedCoreEvents, test.coreEntity.parsedEvents) + + require.Equal(t, test.uncoreAll, test.uncoreEntity.allEvents) + require.Equal(t, test.parsedSockets, test.uncoreEntity.parsedSockets) + require.Equal(t, test.parsedUncoreEvents, test.uncoreEntity.parsedEvents) + }) + } +} + +func TestConfigParser_parseCores(t *testing.T) { + mSysInfo := &mockSysInfoProvider{} + mConfigParser := &configParser{ + sys: mSysInfo, + log: testutil.Logger{}, + } + + t.Run("no cores provided", func(t *testing.T) { + t.Run("system info provider is nil", func(t *testing.T) { + result, err := (&configParser{}).parseCores(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "system info provider is nil") + require.Nil(t, result) + }) + t.Run("cannot gather all cpus info", func(t *testing.T) { + mSysInfo.On("allCPUs").Return(nil, errors.New("all cpus error")).Once() + result, err := mConfigParser.parseCores(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot obtain all cpus") + require.Nil(t, result) + mSysInfo.AssertExpectations(t) + }) + t.Run("all cpus gathering succeeded", func(t *testing.T) { + allCPUs := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11} + + mSysInfo.On("allCPUs").Return(allCPUs, nil).Once() + result, err := mConfigParser.parseCores(nil) + require.NoError(t, err) + require.Equal(t, allCPUs, result) + mSysInfo.AssertExpectations(t) + }) + }) +} + +func TestConfigParser_parseSockets(t *testing.T) { + mSysInfo := &mockSysInfoProvider{} + mConfigParser := &configParser{ + sys: mSysInfo, + log: testutil.Logger{}, + } + + t.Run("no sockets provided", func(t *testing.T) { + t.Run("system info provider is nil", func(t *testing.T) { + result, err := (&configParser{}).parseSockets(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "system info provider is nil") + require.Nil(t, result) + }) + t.Run("cannot gather all sockets info", func(t *testing.T) { + mSysInfo.On("allSockets").Return(nil, errors.New("all sockets error")).Once() + result, err := mConfigParser.parseSockets(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot obtain all sockets") + require.Nil(t, result) + mSysInfo.AssertExpectations(t) + }) + t.Run("all cpus gathering succeeded", func(t *testing.T) { + allSockets := []int{0, 1, 2, 3, 4} + + mSysInfo.On("allSockets").Return(allSockets, nil).Once() + result, err := mConfigParser.parseSockets(nil) + require.NoError(t, err) + require.Equal(t, allSockets, result) + mSysInfo.AssertExpectations(t) + }) + }) +} + +func TestConfigParser_parseEvents(t *testing.T) { + mConfigParser := &configParser{log: testutil.Logger{}} + e := ia.CustomizableEvent{} + + tests := []struct { + name string + input []string + result []*eventWithQuals + }{ + {"no events", nil, nil}, + {"single string", []string{"mock string"}, []*eventWithQuals{{"mock string", nil, e}}}, + {"two events", []string{"EVENT.FIRST", "EVENT.SECOND"}, []*eventWithQuals{{"EVENT.FIRST", nil, e}, {"EVENT.SECOND", nil, e}}}, + {"event with configs", []string{"EVENT.SECOND:config1=0x404300k:config2=0x404300k"}, + []*eventWithQuals{{"EVENT.SECOND", []string{"config1=0x404300k", "config2=0x404300k"}, e}}}, + {"two events with modifiers", []string{"EVENT.FIRST:config1=0x200300:config2=0x231100:u:H", "EVENT.SECOND:K:p"}, + []*eventWithQuals{{"EVENT.FIRST", []string{"config1=0x200300", "config2=0x231100", "u", "H"}, e}, {"EVENT.SECOND", []string{"K", "p"}, e}}}, + {"duplicates", []string{"EVENT1", "EVENT1", "EVENT2"}, []*eventWithQuals{{"EVENT1", nil, e}, {"EVENT2", nil, e}}}, + {"duplicates with different configs", []string{"EVENT1:config1", "EVENT1:config2"}, + []*eventWithQuals{{"EVENT1", []string{"config1"}, e}, {"EVENT1", []string{"config2"}, e}}}, + {"duplicates with the same modifiers", []string{"EVENT1:config1", "EVENT1:config1"}, + []*eventWithQuals{{"EVENT1", []string{"config1"}, e}}}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result := mConfigParser.parseEvents(test.input) + require.Equal(t, test.result, result) + }) + } +} + +func TestConfigParser_parseIntRanges(t *testing.T) { + mConfigParser := &configParser{log: testutil.Logger{}} + tests := []struct { + name string + input []string + result []int + failMsg string + }{ + {"coma separated", []string{"0,1,2,3,4"}, []int{0, 1, 2, 3, 4}, ""}, + {"range", []string{"0-10"}, []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, ""}, + {"mixed", []string{"0-3", "4", "12-16"}, []int{0, 1, 2, 3, 4, 12, 13, 14, 15, 16}, ""}, + {"min and max values", []string{"-2147483648", "2147483647"}, []int{math.MinInt32, math.MaxInt32}, ""}, + {"should remove duplicates", []string{"1-5", "2-6"}, []int{1, 2, 3, 4, 5, 6}, ""}, + {"wrong format", []string{"1,2,3%$S,-100"}, nil, "wrong format for id"}, + {"start is greater than end", []string{"10-3"}, nil, "`10` is equal or greater than `3"}, + {"too big value", []string{"18446744073709551615"}, nil, "wrong format for id"}, + {"too much numbers", []string{fmt.Sprintf("0-%d", maxIDsSize)}, nil, + fmt.Sprintf("requested number of IDs exceeds max size `%d`", maxIDsSize)}, + {"too much numbers mixed", []string{fmt.Sprintf("1-%d", maxIDsSize), "0"}, nil, + fmt.Sprintf("requested number of IDs exceeds max size `%d`", maxIDsSize)}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result, err := mConfigParser.parseIntRanges(test.input) + require.Equal(t, test.result, result) + if len(test.failMsg) > 0 { + require.Error(t, err) + require.Contains(t, err.Error(), test.failMsg) + return + } + require.NoError(t, err) + }) + } +} diff --git a/plugins/inputs/intel_pmu/intel_pmu.go b/plugins/inputs/intel_pmu/intel_pmu.go new file mode 100644 index 0000000000000..6728a7c9439ee --- /dev/null +++ b/plugins/inputs/intel_pmu/intel_pmu.go @@ -0,0 +1,434 @@ +//go:generate ../../../tools/readme_config_includer/generator +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + _ "embed" + "fmt" + "io/ioutil" + "math" + "math/big" + "os" + "strconv" + "strings" + "syscall" + + ia "github.com/intel/iaevents" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +// Linux availability: https://www.kernel.org/doc/Documentation/sysctl/fs.txt +const fileMaxPath = "/proc/sys/fs/file-max" + +type fileInfoProvider interface { + readFile(string) ([]byte, error) + lstat(string) (os.FileInfo, error) + fileLimit() (uint64, error) +} + +type fileHelper struct{} + +func (fileHelper) readFile(path string) ([]byte, error) { + return ioutil.ReadFile(path) +} + +func (fileHelper) lstat(path string) (os.FileInfo, error) { + return os.Lstat(path) +} + +func (fileHelper) fileLimit() (uint64, error) { + var rLimit syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit) + return rLimit.Cur, err +} + +type sysInfoProvider interface { + allCPUs() ([]int, error) + allSockets() ([]int, error) +} + +type iaSysInfo struct{} + +func (iaSysInfo) allCPUs() ([]int, error) { + return ia.AllCPUs() +} + +func (iaSysInfo) allSockets() ([]int, error) { + return ia.AllSockets() +} + +// IntelPMU is the plugin type. +type IntelPMU struct { + EventListPaths []string `toml:"event_definitions"` + CoreEntities []*CoreEventEntity `toml:"core_events"` + UncoreEntities []*UncoreEventEntity `toml:"uncore_events"` + + Log telegraf.Logger `toml:"-"` + + fileInfo fileInfoProvider + entitiesReader entitiesValuesReader +} + +// CoreEventEntity represents config section for core events. +type CoreEventEntity struct { + Events []string `toml:"events"` + Cores []string `toml:"cores"` + EventsTag string `toml:"events_tag"` + PerfGroup bool `toml:"perf_group"` + + parsedEvents []*eventWithQuals + parsedCores []int + allEvents bool + + activeEvents []*ia.ActiveEvent +} + +// UncoreEventEntity represents config section for uncore events. +type UncoreEventEntity struct { + Events []string `toml:"events"` + Sockets []string `toml:"sockets"` + Aggregate bool `toml:"aggregate_uncore_units"` + EventsTag string `toml:"events_tag"` + + parsedEvents []*eventWithQuals + parsedSockets []int + allEvents bool + + activeMultiEvents []multiEvent +} + +type multiEvent struct { + activeEvents []*ia.ActiveEvent + perfEvent *ia.PerfEvent + socket int +} + +type eventWithQuals struct { + name string + qualifiers []string + + custom ia.CustomizableEvent +} + +// Start is required for IntelPMU to implement the telegraf.ServiceInput interface. +// Necessary initialization and config checking are done in Init. +func (IntelPMU) Start(_ telegraf.Accumulator) error { + return nil +} + +func (*IntelPMU) SampleConfig() string { + return sampleConfig +} + +func (i *IntelPMU) Init() error { + err := checkFiles(i.EventListPaths, i.fileInfo) + if err != nil { + return fmt.Errorf("error during event definitions paths validation: %v", err) + } + + reader, err := newReader(i.EventListPaths) + if err != nil { + return err + } + transformer := ia.NewPerfTransformer() + resolver := &iaEntitiesResolver{reader: reader, transformer: transformer, log: i.Log} + parser := &configParser{log: i.Log, sys: &iaSysInfo{}} + activator := &iaEntitiesActivator{perfActivator: &iaEventsActivator{}, placementMaker: &iaPlacementMaker{}} + + i.entitiesReader = &iaEntitiesValuesReader{eventReader: &iaValuesReader{}, timer: &realClock{}} + + return i.initialization(parser, resolver, activator) +} + +func (i *IntelPMU) initialization(parser entitiesParser, resolver entitiesResolver, activator entitiesActivator) error { + if parser == nil || resolver == nil || activator == nil { + return fmt.Errorf("entities parser and/or resolver and/or activator is nil") + } + + err := parser.parseEntities(i.CoreEntities, i.UncoreEntities) + if err != nil { + return fmt.Errorf("error during parsing configuration sections: %v", err) + } + + err = resolver.resolveEntities(i.CoreEntities, i.UncoreEntities) + if err != nil { + return fmt.Errorf("error during events resolving: %v", err) + } + + err = i.checkFileDescriptors() + if err != nil { + return fmt.Errorf("error during file descriptors checking: %v", err) + } + + err = activator.activateEntities(i.CoreEntities, i.UncoreEntities) + if err != nil { + return fmt.Errorf("error during events activation: %v", err) + } + return nil +} + +func (i *IntelPMU) checkFileDescriptors() error { + coreFd, err := estimateCoresFd(i.CoreEntities) + if err != nil { + return fmt.Errorf("failed to estimate number of core events file descriptors: %v", err) + } + uncoreFd, err := estimateUncoreFd(i.UncoreEntities) + if err != nil { + return fmt.Errorf("failed to estimate nubmer of uncore events file descriptors: %v", err) + } + if coreFd > math.MaxUint64-uncoreFd { + return fmt.Errorf("requested number of file descriptors exceeds uint64") + } + allFd := coreFd + uncoreFd + + // maximum file descriptors enforced on a kernel level + maxFd, err := readMaxFD(i.fileInfo) + if err != nil { + i.Log.Warnf("cannot obtain number of available file descriptors: %v", err) + } else if allFd > maxFd { + return fmt.Errorf("required file descriptors number `%d` exceeds maximum number of available file descriptors `%d`"+ + ": consider increasing the maximum number", allFd, maxFd) + } + + // soft limit for current process + limit, err := i.fileInfo.fileLimit() + if err != nil { + i.Log.Warnf("cannot obtain limit value of open files: %v", err) + } else if allFd > limit { + return fmt.Errorf("required file descriptors number `%d` exceeds soft limit of open files `%d`"+ + ": consider increasing the limit", allFd, limit) + } + + return nil +} + +func (i *IntelPMU) Gather(acc telegraf.Accumulator) error { + if i.entitiesReader == nil { + return fmt.Errorf("entities reader is nil") + } + coreMetrics, uncoreMetrics, err := i.entitiesReader.readEntities(i.CoreEntities, i.UncoreEntities) + if err != nil { + return fmt.Errorf("failed to read entities events values: %v", err) + } + + for id, m := range coreMetrics { + scaled := ia.EventScaledValue(m.values) + if !scaled.IsUint64() { + return fmt.Errorf("cannot process `%s` scaled value `%s`: exceeds uint64", m.name, scaled.String()) + } + coreMetrics[id].scaled = scaled.Uint64() + } + for id, m := range uncoreMetrics { + scaled := ia.EventScaledValue(m.values) + if !scaled.IsUint64() { + return fmt.Errorf("cannot process `%s` scaled value `%s`: exceeds uint64", m.name, scaled.String()) + } + uncoreMetrics[id].scaled = scaled.Uint64() + } + + publishCoreMeasurements(coreMetrics, acc) + publishUncoreMeasurements(uncoreMetrics, acc) + + return nil +} + +func (i *IntelPMU) Stop() { + for _, entity := range i.CoreEntities { + if entity == nil { + continue + } + for _, event := range entity.activeEvents { + if event == nil { + continue + } + err := event.Deactivate() + if err != nil { + i.Log.Warnf("failed to deactivate core event `%s`: %v", event, err) + } + } + } + for _, entity := range i.UncoreEntities { + if entity == nil { + continue + } + for _, multi := range entity.activeMultiEvents { + for _, event := range multi.activeEvents { + if event == nil { + continue + } + err := event.Deactivate() + if err != nil { + i.Log.Warnf("failed to deactivate uncore event `%s`: %v", event, err) + } + } + } + } +} + +func newReader(files []string) (*ia.JSONFilesReader, error) { + reader := ia.NewFilesReader() + for _, file := range files { + err := reader.AddFiles(file) + if err != nil { + return nil, fmt.Errorf("failed to add files to reader: %v", err) + } + } + return reader, nil +} + +func estimateCoresFd(entities []*CoreEventEntity) (uint64, error) { + var err error + number := uint64(0) + for _, entity := range entities { + if entity == nil { + continue + } + events := uint64(len(entity.parsedEvents)) + cores := uint64(len(entity.parsedCores)) + number, err = multiplyAndAdd(events, cores, number) + if err != nil { + return 0, err + } + } + return number, nil +} + +func estimateUncoreFd(entities []*UncoreEventEntity) (uint64, error) { + var err error + number := uint64(0) + for _, entity := range entities { + if entity == nil { + continue + } + for _, e := range entity.parsedEvents { + if e.custom.Event == nil { + continue + } + pmus := uint64(len(e.custom.Event.PMUTypes)) + sockets := uint64(len(entity.parsedSockets)) + number, err = multiplyAndAdd(pmus, sockets, number) + if err != nil { + return 0, err + } + } + } + return number, nil +} + +func multiplyAndAdd(factorA uint64, factorB uint64, sum uint64) (uint64, error) { + bigA := new(big.Int).SetUint64(factorA) + bigB := new(big.Int).SetUint64(factorB) + activeEvents := new(big.Int).Mul(bigA, bigB) + if !activeEvents.IsUint64() { + return 0, fmt.Errorf("value `%s` cannot be represented as uint64", activeEvents.String()) + } + if sum > math.MaxUint64-activeEvents.Uint64() { + return 0, fmt.Errorf("value `%s` exceeds uint64", new(big.Int).Add(activeEvents, new(big.Int).SetUint64(sum))) + } + sum += activeEvents.Uint64() + return sum, nil +} + +func readMaxFD(reader fileInfoProvider) (uint64, error) { + if reader == nil { + return 0, fmt.Errorf("file reader is nil") + } + buf, err := reader.readFile(fileMaxPath) + if err != nil { + return 0, fmt.Errorf("cannot open `%s` file: %v", fileMaxPath, err) + } + max, err := strconv.ParseUint(strings.Trim(string(buf), "\n "), 10, 64) + if err != nil { + return 0, fmt.Errorf("cannot parse file content of `%s`: %v", fileMaxPath, err) + } + return max, nil +} + +func checkFiles(paths []string, fileInfo fileInfoProvider) error { + // No event definition JSON locations present + if len(paths) == 0 { + return fmt.Errorf("no paths were given") + } + if fileInfo == nil { + return fmt.Errorf("file info provider is nil") + } + // Wrong files + for _, path := range paths { + lInfo, err := fileInfo.lstat(path) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("file `%s` doesn't exist", path) + } + return fmt.Errorf("cannot obtain file info of `%s`: %v", path, err) + } + mode := lInfo.Mode() + if mode&os.ModeSymlink != 0 { + return fmt.Errorf("file %s is a symlink", path) + } + if !mode.IsRegular() { + return fmt.Errorf("file `%s` doesn't point to a reagular file", path) + } + } + return nil +} + +func publishCoreMeasurements(metrics []coreMetric, acc telegraf.Accumulator) { + for _, m := range metrics { + fields := make(map[string]interface{}) + tags := make(map[string]string) + + fields["raw"] = m.values.Raw + fields["enabled"] = m.values.Enabled + fields["running"] = m.values.Running + fields["scaled"] = m.scaled + + tags["event"] = m.name + tags["cpu"] = strconv.Itoa(m.cpu) + + if len(m.tag) > 0 { + tags["events_tag"] = m.tag + } + acc.AddFields("pmu_metric", fields, tags, m.time) + } +} + +func publishUncoreMeasurements(metrics []uncoreMetric, acc telegraf.Accumulator) { + for _, m := range metrics { + fields := make(map[string]interface{}) + tags := make(map[string]string) + + fields["raw"] = m.values.Raw + fields["enabled"] = m.values.Enabled + fields["running"] = m.values.Running + fields["scaled"] = m.scaled + + tags["event"] = m.name + + tags["socket"] = strconv.Itoa(m.socket) + tags["unit_type"] = m.unitType + if !m.agg { + tags["unit"] = m.unit + } + if len(m.tag) > 0 { + tags["events_tag"] = m.tag + } + acc.AddFields("pmu_metric", fields, tags, m.time) + } +} + +func init() { + inputs.Add("intel_pmu", func() telegraf.Input { + pmu := IntelPMU{ + fileInfo: &fileHelper{}, + } + return &pmu + }) +} diff --git a/plugins/inputs/intel_pmu/intel_pmu_notamd64linux.go b/plugins/inputs/intel_pmu/intel_pmu_notamd64linux.go new file mode 100644 index 0000000000000..64c7f5bbf1ce1 --- /dev/null +++ b/plugins/inputs/intel_pmu/intel_pmu_notamd64linux.go @@ -0,0 +1,4 @@ +//go:build !linux || !amd64 +// +build !linux !amd64 + +package intel_pmu diff --git a/plugins/inputs/intel_pmu/intel_pmu_test.go b/plugins/inputs/intel_pmu/intel_pmu_test.go new file mode 100644 index 0000000000000..5b3cd2ca7201e --- /dev/null +++ b/plugins/inputs/intel_pmu/intel_pmu_test.go @@ -0,0 +1,555 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "errors" + "fmt" + "math" + "os" + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" + ia "github.com/intel/iaevents" + "github.com/stretchr/testify/require" +) + +func TestInitialization(t *testing.T) { + mError := errors.New("mock error") + mParser := &mockEntitiesParser{} + mResolver := &mockEntitiesResolver{} + mActivator := &mockEntitiesActivator{} + mFileInfo := &mockFileInfoProvider{} + + file := "path/to/file" + paths := []string{file} + + t.Run("missing parser, resolver or activator", func(t *testing.T) { + err := (&IntelPMU{}).initialization(mParser, nil, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "entities parser and/or resolver and/or activator is nil") + err = (&IntelPMU{}).initialization(nil, mResolver, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "entities parser and/or resolver and/or activator is nil") + err = (&IntelPMU{}).initialization(nil, nil, mActivator) + require.Error(t, err) + require.Contains(t, err.Error(), "entities parser and/or resolver and/or activator is nil") + }) + + t.Run("parse entities error", func(t *testing.T) { + mIntelPMU := &IntelPMU{EventListPaths: paths, fileInfo: mFileInfo} + + mParser.On("parseEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(mError).Once() + + err := mIntelPMU.initialization(mParser, mResolver, mActivator) + require.Error(t, err) + require.Contains(t, err.Error(), "error during parsing configuration sections") + mParser.AssertExpectations(t) + }) + + t.Run("resolver error", func(t *testing.T) { + mIntelPMU := &IntelPMU{EventListPaths: paths, fileInfo: mFileInfo} + + mParser.On("parseEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + mResolver.On("resolveEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(mError).Once() + + err := mIntelPMU.initialization(mParser, mResolver, mActivator) + require.Error(t, err) + require.Contains(t, err.Error(), "error during events resolving") + mParser.AssertExpectations(t) + }) + + t.Run("exceeded file descriptors", func(t *testing.T) { + limit := []byte("10") + uncoreEntities := []*UncoreEventEntity{{parsedEvents: makeEvents(10, 21), parsedSockets: makeIDs(5)}} + estimation := 1050 + + mIntelPMU := IntelPMU{EventListPaths: paths, Log: testutil.Logger{}, fileInfo: mFileInfo, UncoreEntities: uncoreEntities} + + mParser.On("parseEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + mResolver.On("resolveEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + mFileInfo.On("readFile", fileMaxPath).Return(limit, nil).Once() + + err := mIntelPMU.initialization(mParser, mResolver, mActivator) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("required file descriptors number `%d` exceeds maximum number of available file descriptors `%d`"+ + ": consider increasing the maximum number", estimation, 10)) + mFileInfo.AssertExpectations(t) + mParser.AssertExpectations(t) + mResolver.AssertExpectations(t) + }) + + t.Run("failed to activate entities", func(t *testing.T) { + mIntelPMU := IntelPMU{EventListPaths: paths, Log: testutil.Logger{}, fileInfo: mFileInfo} + + mParser.On("parseEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + mResolver.On("resolveEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + mActivator.On("activateEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(mError).Once() + mFileInfo.On("readFile", fileMaxPath).Return(nil, mError). + On("fileLimit").Return(uint64(0), mError).Once() + + err := mIntelPMU.initialization(mParser, mResolver, mActivator) + require.Error(t, err) + require.Contains(t, err.Error(), "error during events activation") + mFileInfo.AssertExpectations(t) + mParser.AssertExpectations(t) + mResolver.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) + + t.Run("everything all right", func(t *testing.T) { + mIntelPMU := IntelPMU{EventListPaths: paths, Log: testutil.Logger{}, fileInfo: mFileInfo} + + mParser.On("parseEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + mResolver.On("resolveEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + mFileInfo.On("readFile", fileMaxPath).Return(nil, mError). + On("fileLimit").Return(uint64(0), mError).Once() + mActivator.On("activateEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities).Return(nil).Once() + + err := mIntelPMU.initialization(mParser, mResolver, mActivator) + require.NoError(t, err) + mFileInfo.AssertExpectations(t) + mParser.AssertExpectations(t) + mResolver.AssertExpectations(t) + mActivator.AssertExpectations(t) + }) +} + +func TestGather(t *testing.T) { + mEntitiesValuesReader := &mockEntitiesValuesReader{} + mAcc := &testutil.Accumulator{} + + mIntelPMU := &IntelPMU{entitiesReader: mEntitiesValuesReader} + + type fieldWithTags struct { + fields map[string]interface{} + tags map[string]string + } + + t.Run("entities reader is nil", func(t *testing.T) { + err := (&IntelPMU{entitiesReader: nil}).Gather(mAcc) + + require.Error(t, err) + require.Contains(t, err.Error(), "entities reader is nil") + }) + + t.Run("error while reading entities", func(t *testing.T) { + errMock := fmt.Errorf("houston we have a problem") + mEntitiesValuesReader.On("readEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities). + Return(nil, nil, errMock).Once() + + err := mIntelPMU.Gather(mAcc) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to read entities events values: %v", errMock)) + mEntitiesValuesReader.AssertExpectations(t) + }) + + tests := []struct { + name string + coreMetrics []coreMetric + uncoreMetrics []uncoreMetric + results []fieldWithTags + errMSg string + }{ + { + name: "successful readings", + coreMetrics: []coreMetric{ + { + values: ia.CounterValue{Raw: 100, Enabled: 200, Running: 200}, + name: "CORE_EVENT_1", + tag: "DOGES", + cpu: 1, + }, + { + values: ia.CounterValue{Raw: 2100, Enabled: 400, Running: 200}, + name: "CORE_EVENT_2", + cpu: 0, + }, + }, + uncoreMetrics: []uncoreMetric{ + { + values: ia.CounterValue{Raw: 2134562, Enabled: 1000000, Running: 1000000}, + name: "UNCORE_EVENT_1", + tag: "SHIBA", + unitType: "cbox", + unit: "cbox_1", + socket: 3, + agg: false, + }, + { + values: ia.CounterValue{Raw: 2134562, Enabled: 3222222, Running: 2100000}, + name: "UNCORE_EVENT_2", + unitType: "cbox", + socket: 0, + agg: true, + }, + }, + results: []fieldWithTags{ + { + fields: map[string]interface{}{ + "raw": uint64(100), + "enabled": uint64(200), + "running": uint64(200), + "scaled": uint64(100), + }, + tags: map[string]string{ + "event": "CORE_EVENT_1", + "cpu": "1", + "events_tag": "DOGES", + }, + }, + { + fields: map[string]interface{}{ + "raw": uint64(2100), + "enabled": uint64(400), + "running": uint64(200), + "scaled": uint64(4200), + }, + tags: map[string]string{ + "event": "CORE_EVENT_2", + "cpu": "0", + }, + }, + { + fields: map[string]interface{}{ + "raw": uint64(2134562), + "enabled": uint64(1000000), + "running": uint64(1000000), + "scaled": uint64(2134562), + }, + tags: map[string]string{ + "event": "UNCORE_EVENT_1", + "events_tag": "SHIBA", + "socket": "3", + "unit_type": "cbox", + "unit": "cbox_1", + }, + }, + { + fields: map[string]interface{}{ + "raw": uint64(2134562), + "enabled": uint64(3222222), + "running": uint64(2100000), + "scaled": uint64(3275253), + }, + tags: map[string]string{ + "event": "UNCORE_EVENT_2", + "socket": "0", + "unit_type": "cbox", + }, + }, + }, + }, + { + name: "core scaled value greater then max uint64", + coreMetrics: []coreMetric{ + { + values: ia.CounterValue{Raw: math.MaxUint64, Enabled: 400000, Running: 200000}, + name: "I_AM_TOO_BIG", + tag: "BIG_FISH", + }, + }, + errMSg: "cannot process `I_AM_TOO_BIG` scaled value `36893488147419103230`: exceeds uint64", + }, + { + name: "uncore scaled value greater then max uint64", + uncoreMetrics: []uncoreMetric{ + { + values: ia.CounterValue{Raw: math.MaxUint64, Enabled: 400000, Running: 200000}, + name: "I_AM_TOO_BIG_UNCORE", + tag: "BIG_FISH", + }, + }, + errMSg: "cannot process `I_AM_TOO_BIG_UNCORE` scaled value `36893488147419103230`: exceeds uint64", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + mEntitiesValuesReader.On("readEntities", mIntelPMU.CoreEntities, mIntelPMU.UncoreEntities). + Return(test.coreMetrics, test.uncoreMetrics, nil).Once() + + err := mIntelPMU.Gather(mAcc) + + mEntitiesValuesReader.AssertExpectations(t) + if len(test.errMSg) > 0 { + require.Error(t, err) + require.Contains(t, err.Error(), test.errMSg) + return + } + require.NoError(t, err) + for _, result := range test.results { + mAcc.AssertContainsTaggedFields(t, "pmu_metric", result.fields, result.tags) + } + }) + } +} + +func TestCheckFileDescriptors(t *testing.T) { + tests := []struct { + name string + uncores []*UncoreEventEntity + cores []*CoreEventEntity + estimation uint64 + maxFD []byte + fileLimit uint64 + errMsg string + }{ + {"exceed maximum file descriptors number", []*UncoreEventEntity{ + {parsedEvents: makeEvents(100, 21), parsedSockets: makeIDs(5)}, + {parsedEvents: makeEvents(25, 3), parsedSockets: makeIDs(7)}, + {parsedEvents: makeEvents(2, 7), parsedSockets: makeIDs(20)}}, + []*CoreEventEntity{ + {parsedEvents: makeEvents(100, 1), parsedCores: makeIDs(5)}, + {parsedEvents: makeEvents(25, 1), parsedCores: makeIDs(7)}, + {parsedEvents: makeEvents(2, 1), parsedCores: makeIDs(20)}}, + 12020, []byte("11000"), 8000, fmt.Sprintf("required file descriptors number `%d` exceeds maximum number of available file descriptors `%d`"+ + ": consider increasing the maximum number", 12020, 11000), + }, + {"exceed soft file limit", []*UncoreEventEntity{{parsedEvents: makeEvents(100, 21), parsedSockets: makeIDs(5)}}, []*CoreEventEntity{ + {parsedEvents: makeEvents(100, 1), parsedCores: makeIDs(5)}}, + 11000, []byte("2515357"), 800, fmt.Sprintf("required file descriptors number `%d` exceeds soft limit of open files `%d`"+ + ": consider increasing the limit", 11000, 800), + }, + {"no exceeds", []*UncoreEventEntity{{parsedEvents: makeEvents(100, 21), parsedSockets: makeIDs(5)}}, + []*CoreEventEntity{{parsedEvents: makeEvents(100, 1), parsedCores: makeIDs(5)}}, + 11000, []byte("2515357"), 13000, "", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + mFileInfo := &mockFileInfoProvider{} + mIntelPMU := IntelPMU{ + CoreEntities: test.cores, + UncoreEntities: test.uncores, + fileInfo: mFileInfo, + Log: testutil.Logger{}, + } + mFileInfo.On("readFile", fileMaxPath).Return(test.maxFD, nil). + On("fileLimit").Return(test.fileLimit, nil).Once() + + err := mIntelPMU.checkFileDescriptors() + if len(test.errMsg) > 0 { + require.Error(t, err) + require.Contains(t, err.Error(), test.errMsg) + return + } + require.NoError(t, err) + mFileInfo.AssertExpectations(t) + }) + } +} + +func TestEstimateUncoreFd(t *testing.T) { + tests := []struct { + name string + entities []*UncoreEventEntity + result uint64 + }{ + {"nil entities", nil, 0}, + {"nil perf event", []*UncoreEventEntity{{parsedEvents: []*eventWithQuals{{"", nil, ia.CustomizableEvent{}}}, parsedSockets: makeIDs(0)}}, 0}, + {"one uncore entity", []*UncoreEventEntity{{parsedEvents: makeEvents(10, 10), parsedSockets: makeIDs(20)}}, 2000}, + {"nil entity", []*UncoreEventEntity{nil, {parsedEvents: makeEvents(1, 8), parsedSockets: makeIDs(1)}}, 8}, + {"many core entities", []*UncoreEventEntity{ + {parsedEvents: makeEvents(100, 21), parsedSockets: makeIDs(5)}, + {parsedEvents: makeEvents(25, 3), parsedSockets: makeIDs(7)}, + {parsedEvents: makeEvents(2, 7), parsedSockets: makeIDs(20)}, + }, 11305}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + mIntelPMU := IntelPMU{UncoreEntities: test.entities} + result, err := estimateUncoreFd(mIntelPMU.UncoreEntities) + require.Equal(t, test.result, result) + require.NoError(t, err) + }) + } +} + +func TestEstimateCoresFd(t *testing.T) { + tests := []struct { + name string + entities []*CoreEventEntity + result uint64 + }{ + {"nil entities", nil, 0}, + {"one core entity", []*CoreEventEntity{{parsedEvents: makeEvents(10, 1), parsedCores: makeIDs(20)}}, 200}, + {"nil entity", []*CoreEventEntity{nil, {parsedEvents: makeEvents(10, 1), parsedCores: makeIDs(20)}}, 200}, + {"many core entities", []*CoreEventEntity{ + {parsedEvents: makeEvents(100, 1), parsedCores: makeIDs(5)}, + {parsedEvents: makeEvents(25, 1), parsedCores: makeIDs(7)}, + {parsedEvents: makeEvents(2, 1), parsedCores: makeIDs(20)}, + }, 715}, + {"1024 events", []*CoreEventEntity{{parsedEvents: makeEvents(1024, 1), parsedCores: makeIDs(12)}}, 12288}, + {"big number", []*CoreEventEntity{{parsedEvents: makeEvents(1024, 1), parsedCores: makeIDs(1048576)}}, 1073741824}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + mIntelPMU := IntelPMU{CoreEntities: test.entities} + result, err := estimateCoresFd(mIntelPMU.CoreEntities) + require.NoError(t, err) + require.Equal(t, test.result, result) + }) + } +} + +func makeEvents(number int, pmusNumber int) []*eventWithQuals { + a := make([]*eventWithQuals, number) + for i := range a { + b := make([]ia.NamedPMUType, pmusNumber) + for j := range b { + b[j] = ia.NamedPMUType{} + } + a[i] = &eventWithQuals{fmt.Sprintf("EVENT.%d", i), nil, + ia.CustomizableEvent{Event: &ia.PerfEvent{PMUTypes: b}}, + } + } + return a +} + +func makeIDs(number int) []int { + a := make([]int, number) + for i := range a { + a[i] = i + } + return a +} + +func TestReadMaxFD(t *testing.T) { + mFileReader := &mockFileInfoProvider{} + + t.Run("reader is nil", func(t *testing.T) { + result, err := readMaxFD(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "file reader is nil") + require.Zero(t, result) + }) + + openErrorMsg := fmt.Sprintf("cannot open `%s` file", fileMaxPath) + parseErrorMsg := fmt.Sprintf("cannot parse file content of `%s`", fileMaxPath) + + tests := []struct { + name string + err error + content []byte + maxFD uint64 + failMsg string + }{ + {"read file error", fmt.Errorf("mock error"), nil, 0, openErrorMsg}, + {"file content parse error", nil, []byte("wrong format"), 0, parseErrorMsg}, + {"negative value reading", nil, []byte("-10000"), 0, parseErrorMsg}, + {"max uint exceeded", nil, []byte("18446744073709551616"), 0, parseErrorMsg}, + {"reading succeeded", nil, []byte("12343122"), 12343122, ""}, + {"min value reading", nil, []byte("0"), 0, ""}, + {"max uint 64 reading", nil, []byte("18446744073709551615"), math.MaxUint64, ""}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + mFileReader.On("readFile", fileMaxPath).Return(test.content, test.err).Once() + result, err := readMaxFD(mFileReader) + + if len(test.failMsg) > 0 { + require.Error(t, err) + require.Contains(t, err.Error(), test.failMsg) + } else { + require.NoError(t, err) + } + require.Equal(t, test.maxFD, result) + mFileReader.AssertExpectations(t) + }) + } +} + +func TestAddFiles(t *testing.T) { + mFileInfo := &mockFileInfoProvider{} + mError := errors.New("mock error") + + t.Run("no paths", func(t *testing.T) { + err := checkFiles([]string{}, mFileInfo) + require.Error(t, err) + require.Contains(t, err.Error(), "no paths were given") + }) + + t.Run("no file info provider", func(t *testing.T) { + err := checkFiles([]string{"path/1, path/2"}, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "file info provider is nil") + }) + + t.Run("stat error", func(t *testing.T) { + file := "path/to/file" + paths := []string{file} + mFileInfo.On("lstat", file).Return(nil, mError).Once() + + err := checkFiles(paths, mFileInfo) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("cannot obtain file info of `%s`", file)) + mFileInfo.AssertExpectations(t) + }) + + t.Run("file does not exist", func(t *testing.T) { + file := "path/to/file" + paths := []string{file} + mFileInfo.On("lstat", file).Return(nil, os.ErrNotExist).Once() + + err := checkFiles(paths, mFileInfo) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("file `%s` doesn't exist", file)) + mFileInfo.AssertExpectations(t) + }) + + t.Run("file is symlink", func(t *testing.T) { + file := "path/to/symlink" + paths := []string{file} + fileInfo := fakeFileInfo{fileMode: os.ModeSymlink} + mFileInfo.On("lstat", file).Return(fileInfo, nil).Once() + + err := checkFiles(paths, mFileInfo) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("file %s is a symlink", file)) + mFileInfo.AssertExpectations(t) + }) + + t.Run("file doesn't point to a regular file", func(t *testing.T) { + file := "path/to/file" + paths := []string{file} + fileInfo := fakeFileInfo{fileMode: os.ModeDir} + mFileInfo.On("lstat", file).Return(fileInfo, nil).Once() + + err := checkFiles(paths, mFileInfo) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("file `%s` doesn't point to a reagular file", file)) + mFileInfo.AssertExpectations(t) + }) + + t.Run("checking succeeded", func(t *testing.T) { + paths := []string{"path/to/file1", "path/to/file2", "path/to/file3"} + fileInfo := fakeFileInfo{} + + for _, file := range paths { + mFileInfo.On("lstat", file).Return(fileInfo, nil).Once() + } + + err := checkFiles(paths, mFileInfo) + require.NoError(t, err) + mFileInfo.AssertExpectations(t) + }) +} + +type fakeFileInfo struct { + fileMode os.FileMode +} + +func (f fakeFileInfo) Name() string { return "" } +func (f fakeFileInfo) Size() int64 { return 0 } +func (f fakeFileInfo) Mode() os.FileMode { return f.fileMode } +func (f fakeFileInfo) ModTime() time.Time { return time.Time{} } +func (f fakeFileInfo) IsDir() bool { return false } +func (f fakeFileInfo) Sys() interface{} { return nil } diff --git a/plugins/inputs/intel_pmu/mocks.go b/plugins/inputs/intel_pmu/mocks.go new file mode 100644 index 0000000000000..82799b26f2b04 --- /dev/null +++ b/plugins/inputs/intel_pmu/mocks.go @@ -0,0 +1,407 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "os" + + "github.com/intel/iaevents" + "github.com/stretchr/testify/mock" +) + +// mockValuesReader is an autogenerated mock type for the valuesReader type +type mockValuesReader struct { + mock.Mock +} + +// readValue provides a mock function with given fields: event +func (_m *mockValuesReader) readValue(event *iaevents.ActiveEvent) (iaevents.CounterValue, error) { + ret := _m.Called(event) + + var r0 iaevents.CounterValue + if rf, ok := ret.Get(0).(func(*iaevents.ActiveEvent) iaevents.CounterValue); ok { + r0 = rf(event) + } else { + r0 = ret.Get(0).(iaevents.CounterValue) + } + + var r1 error + if rf, ok := ret.Get(1).(func(*iaevents.ActiveEvent) error); ok { + r1 = rf(event) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// mockEntitiesValuesReader is an autogenerated mock type for the entitiesValuesReader type +type mockEntitiesValuesReader struct { + mock.Mock +} + +// readEntities provides a mock function with given fields: _a0, _a1 +func (_m *mockEntitiesValuesReader) readEntities(_a0 []*CoreEventEntity, _a1 []*UncoreEventEntity) ([]coreMetric, []uncoreMetric, error) { + ret := _m.Called(_a0, _a1) + + var r0 []coreMetric + if rf, ok := ret.Get(0).(func([]*CoreEventEntity, []*UncoreEventEntity) []coreMetric); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]coreMetric) + } + } + + var r1 []uncoreMetric + if rf, ok := ret.Get(1).(func([]*CoreEventEntity, []*UncoreEventEntity) []uncoreMetric); ok { + r1 = rf(_a0, _a1) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).([]uncoreMetric) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func([]*CoreEventEntity, []*UncoreEventEntity) error); ok { + r2 = rf(_a0, _a1) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// mockEntitiesActivator is an autogenerated mock type for the entitiesActivator type +type mockEntitiesActivator struct { + mock.Mock +} + +// activateEntities provides a mock function with given fields: coreEntities, uncoreEntities +func (_m *mockEntitiesActivator) activateEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error { + ret := _m.Called(coreEntities, uncoreEntities) + + var r0 error + if rf, ok := ret.Get(0).(func([]*CoreEventEntity, []*UncoreEventEntity) error); ok { + r0 = rf(coreEntities, uncoreEntities) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// mockEntitiesParser is an autogenerated mock type for the entitiesParser type +type mockEntitiesParser struct { + mock.Mock +} + +// parseEntities provides a mock function with given fields: coreEntities, uncoreEntities +func (_m *mockEntitiesParser) parseEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error { + ret := _m.Called(coreEntities, uncoreEntities) + + var r0 error + if rf, ok := ret.Get(0).(func([]*CoreEventEntity, []*UncoreEventEntity) error); ok { + r0 = rf(coreEntities, uncoreEntities) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// mockEntitiesResolver is an autogenerated mock type for the entitiesResolver type +type mockEntitiesResolver struct { + mock.Mock +} + +// resolveEntities provides a mock function with given fields: coreEntities, uncoreEntities +func (_m *mockEntitiesResolver) resolveEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error { + ret := _m.Called(coreEntities, uncoreEntities) + + var r0 error + if rf, ok := ret.Get(0).(func([]*CoreEventEntity, []*UncoreEventEntity) error); ok { + r0 = rf(coreEntities, uncoreEntities) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// mockEventsActivator is an autogenerated mock type for the eventsActivator type +type mockEventsActivator struct { + mock.Mock +} + +// activateEvent provides a mock function with given fields: _a0, _a1, _a2 +func (_m *mockEventsActivator) activateEvent(_a0 iaevents.Activator, _a1 iaevents.PlacementProvider, _a2 iaevents.Options) (*iaevents.ActiveEvent, error) { + ret := _m.Called(_a0, _a1, _a2) + + var r0 *iaevents.ActiveEvent + if rf, ok := ret.Get(0).(func(iaevents.Activator, iaevents.PlacementProvider, iaevents.Options) *iaevents.ActiveEvent); ok { + r0 = rf(_a0, _a1, _a2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*iaevents.ActiveEvent) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(iaevents.Activator, iaevents.PlacementProvider, iaevents.Options) error); ok { + r1 = rf(_a0, _a1, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// activateGroup provides a mock function with given fields: _a0, _a1 +func (_m *mockEventsActivator) activateGroup(_a0 iaevents.PlacementProvider, _a1 []iaevents.CustomizableEvent) (*iaevents.ActiveEventGroup, error) { + ret := _m.Called(_a0, _a1) + + var r0 *iaevents.ActiveEventGroup + if rf, ok := ret.Get(0).(func(iaevents.PlacementProvider, []iaevents.CustomizableEvent) *iaevents.ActiveEventGroup); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*iaevents.ActiveEventGroup) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(iaevents.PlacementProvider, []iaevents.CustomizableEvent) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// activateMulti provides a mock function with given fields: _a0, _a1, _a2 +func (_m *mockEventsActivator) activateMulti(_a0 iaevents.MultiActivator, _a1 []iaevents.PlacementProvider, _a2 iaevents.Options) (*iaevents.ActiveMultiEvent, error) { + ret := _m.Called(_a0, _a1, _a2) + + var r0 *iaevents.ActiveMultiEvent + if rf, ok := ret.Get(0).(func(iaevents.MultiActivator, []iaevents.PlacementProvider, iaevents.Options) *iaevents.ActiveMultiEvent); ok { + r0 = rf(_a0, _a1, _a2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*iaevents.ActiveMultiEvent) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(iaevents.MultiActivator, []iaevents.PlacementProvider, iaevents.Options) error); ok { + r1 = rf(_a0, _a1, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// mockFileInfoProvider is an autogenerated mock type for the fileInfoProvider type +type mockFileInfoProvider struct { + mock.Mock +} + +// fileLimit provides a mock function with given fields: +func (_m *mockFileInfoProvider) fileLimit() (uint64, error) { + ret := _m.Called() + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// readFile provides a mock function with given fields: _a0 +func (_m *mockFileInfoProvider) readFile(_a0 string) ([]byte, error) { + ret := _m.Called(_a0) + + var r0 []byte + if rf, ok := ret.Get(0).(func(string) []byte); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// lstat provides a mock function with given fields: _a0 +func (_m *mockFileInfoProvider) lstat(_a0 string) (os.FileInfo, error) { + ret := _m.Called(_a0) + + var r0 os.FileInfo + if rf, ok := ret.Get(0).(func(string) os.FileInfo); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(os.FileInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// mockPlacementMaker is an autogenerated mock type for the placementMaker type +type mockPlacementMaker struct { + mock.Mock +} + +// makeCorePlacements provides a mock function with given fields: cores, perfEvent +func (_m *mockPlacementMaker) makeCorePlacements(cores []int, factory iaevents.PlacementFactory) ([]iaevents.PlacementProvider, error) { + ret := _m.Called(cores, factory) + + var r0 []iaevents.PlacementProvider + if rf, ok := ret.Get(0).(func([]int, iaevents.PlacementFactory) []iaevents.PlacementProvider); ok { + r0 = rf(cores, factory) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]iaevents.PlacementProvider) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func([]int, iaevents.PlacementFactory) error); ok { + r1 = rf(cores, factory) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// makeUncorePlacements provides a mock function with given fields: factory, socket +func (_m *mockPlacementMaker) makeUncorePlacements(socket int, factory iaevents.PlacementFactory) ([]iaevents.PlacementProvider, error) { + ret := _m.Called(factory, socket) + + var r0 []iaevents.PlacementProvider + if rf, ok := ret.Get(0).(func(iaevents.PlacementFactory, int) []iaevents.PlacementProvider); ok { + r0 = rf(factory, socket) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]iaevents.PlacementProvider) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(iaevents.PlacementFactory, int) error); ok { + r1 = rf(factory, socket) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// mockSysInfoProvider is an autogenerated mock type for the sysInfoProvider type +type mockSysInfoProvider struct { + mock.Mock +} + +// allCPUs provides a mock function with given fields: +func (_m *mockSysInfoProvider) allCPUs() ([]int, error) { + ret := _m.Called() + + var r0 []int + if rf, ok := ret.Get(0).(func() []int); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// allSockets provides a mock function with given fields: +func (_m *mockSysInfoProvider) allSockets() ([]int, error) { + ret := _m.Called() + + var r0 []int + if rf, ok := ret.Get(0).(func() []int); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockTransformer is an autogenerated mock type for the Transformer type +type MockTransformer struct { + mock.Mock +} + +// Transform provides a mock function with given fields: reader, matcher +func (_m *MockTransformer) Transform(reader iaevents.Reader, matcher iaevents.Matcher) ([]*iaevents.PerfEvent, error) { + ret := _m.Called(reader, matcher) + + var r0 []*iaevents.PerfEvent + if rf, ok := ret.Get(0).(func(iaevents.Reader, iaevents.Matcher) []*iaevents.PerfEvent); ok { + r0 = rf(reader, matcher) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*iaevents.PerfEvent) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(iaevents.Reader, iaevents.Matcher) error); ok { + r1 = rf(reader, matcher) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/plugins/inputs/intel_pmu/reader.go b/plugins/inputs/intel_pmu/reader.go new file mode 100644 index 0000000000000..2df72a96618df --- /dev/null +++ b/plugins/inputs/intel_pmu/reader.go @@ -0,0 +1,249 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "fmt" + "time" + + ia "github.com/intel/iaevents" + "golang.org/x/sync/errgroup" +) + +type coreMetric struct { + values ia.CounterValue + scaled uint64 + + name string + tag string + cpu int + + time time.Time +} + +type uncoreMetric struct { + values ia.CounterValue + scaled uint64 + + name string + unitType string + unit string + tag string + socket int + + agg bool + + time time.Time +} + +type valuesReader interface { + readValue(event *ia.ActiveEvent) (ia.CounterValue, error) +} + +type iaValuesReader struct{} + +func (iaValuesReader) readValue(event *ia.ActiveEvent) (ia.CounterValue, error) { + return event.ReadValue() +} + +type entitiesValuesReader interface { + readEntities([]*CoreEventEntity, []*UncoreEventEntity) ([]coreMetric, []uncoreMetric, error) +} + +type iaEntitiesValuesReader struct { + eventReader valuesReader + timer clock +} + +type clock interface { + now() time.Time +} + +type realClock struct{} + +func (realClock) now() time.Time { + return time.Now() +} + +func (ie *iaEntitiesValuesReader) readEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) ([]coreMetric, []uncoreMetric, error) { + var coreMetrics []coreMetric + var uncoreMetrics []uncoreMetric + + for _, entity := range coreEntities { + newMetrics, err := ie.readCoreEvents(entity) + if err != nil { + return nil, nil, err + } + coreMetrics = append(coreMetrics, newMetrics...) + } + for _, entity := range uncoreEntities { + newMetrics, err := ie.readUncoreEvents(entity) + if err != nil { + return nil, nil, err + } + uncoreMetrics = append(uncoreMetrics, newMetrics...) + } + return coreMetrics, uncoreMetrics, nil +} + +func (ie *iaEntitiesValuesReader) readCoreEvents(entity *CoreEventEntity) ([]coreMetric, error) { + if ie.eventReader == nil || ie.timer == nil { + return nil, fmt.Errorf("event values reader or timer is nil") + } + if entity == nil { + return nil, fmt.Errorf("entity is nil") + } + metrics := make([]coreMetric, len(entity.activeEvents)) + errGroup := errgroup.Group{} + + for i, event := range entity.activeEvents { + id := i + actualEvent := event + + if event == nil || event.PerfEvent == nil { + return nil, fmt.Errorf("active event or corresponding perf event is nil") + } + + errGroup.Go(func() error { + values, err := ie.eventReader.readValue(actualEvent) + if err != nil { + return fmt.Errorf("failed to read core event `%s` values: %v", actualEvent, err) + } + cpu, _ := actualEvent.PMUPlacement() + newMetric := coreMetric{ + values: values, + tag: entity.EventsTag, + cpu: cpu, + name: actualEvent.PerfEvent.Name, + time: ie.timer.now(), + } + metrics[id] = newMetric + return nil + }) + } + err := errGroup.Wait() + if err != nil { + return nil, err + } + return metrics, nil +} + +func (ie *iaEntitiesValuesReader) readUncoreEvents(entity *UncoreEventEntity) ([]uncoreMetric, error) { + if entity == nil { + return nil, fmt.Errorf("entity is nil") + } + var uncoreMetrics []uncoreMetric + + for _, event := range entity.activeMultiEvents { + if entity.Aggregate { + newMetric, err := ie.readMultiEventAgg(event) + if err != nil { + return nil, err + } + newMetric.tag = entity.EventsTag + uncoreMetrics = append(uncoreMetrics, newMetric) + } else { + newMetrics, err := ie.readMultiEventSeparately(event) + if err != nil { + return nil, err + } + for i := range newMetrics { + newMetrics[i].tag = entity.EventsTag + } + uncoreMetrics = append(uncoreMetrics, newMetrics...) + } + } + return uncoreMetrics, nil +} + +func (ie *iaEntitiesValuesReader) readMultiEventSeparately(multiEvent multiEvent) ([]uncoreMetric, error) { + if ie.eventReader == nil || ie.timer == nil { + return nil, fmt.Errorf("event values reader or timer is nil") + } + if len(multiEvent.activeEvents) < 1 || multiEvent.perfEvent == nil { + return nil, fmt.Errorf("no active events or perf event is nil") + } + activeEvents := multiEvent.activeEvents + perfEvent := multiEvent.perfEvent + + metrics := make([]uncoreMetric, len(activeEvents)) + group := errgroup.Group{} + + for i, event := range activeEvents { + id := i + actualEvent := event + + group.Go(func() error { + values, err := ie.eventReader.readValue(actualEvent) + if err != nil { + return fmt.Errorf("failed to read uncore event `%s` values: %v", actualEvent, err) + } + newMetric := uncoreMetric{ + values: values, + socket: multiEvent.socket, + unitType: perfEvent.PMUName, + name: perfEvent.Name, + unit: actualEvent.PMUName(), + time: ie.timer.now(), + } + metrics[id] = newMetric + return nil + }) + err := group.Wait() + if err != nil { + return nil, err + } + } + return metrics, nil +} + +func (ie *iaEntitiesValuesReader) readMultiEventAgg(multiEvent multiEvent) (uncoreMetric, error) { + if ie.eventReader == nil || ie.timer == nil { + return uncoreMetric{}, fmt.Errorf("event values reader or timer is nil") + } + if len(multiEvent.activeEvents) < 1 || multiEvent.perfEvent == nil { + return uncoreMetric{}, fmt.Errorf("no active events or perf event is nil") + } + activeEvents := multiEvent.activeEvents + perfEvent := multiEvent.perfEvent + + values := make([]ia.CounterValue, len(activeEvents)) + group := errgroup.Group{} + + for i, event := range activeEvents { + id := i + actualEvent := event + + group.Go(func() error { + value, err := ie.eventReader.readValue(actualEvent) + if err != nil { + return fmt.Errorf("failed to read uncore event `%s` values: %v", actualEvent, err) + } + values[id] = value + return nil + }) + } + err := group.Wait() + if err != nil { + return uncoreMetric{}, err + } + + bRaw, bEnabled, bRunning := ia.AggregateValues(values) + if !bRaw.IsUint64() || !bEnabled.IsUint64() || !bRunning.IsUint64() { + return uncoreMetric{}, fmt.Errorf("cannot aggregate `%s` values, uint64 exceeding", perfEvent) + } + aggValues := ia.CounterValue{ + Raw: bRaw.Uint64(), + Enabled: bEnabled.Uint64(), + Running: bRunning.Uint64(), + } + newMetric := uncoreMetric{ + values: aggValues, + socket: multiEvent.socket, + unitType: perfEvent.PMUName, + name: perfEvent.Name, + time: ie.timer.now(), + } + return newMetric, nil +} diff --git a/plugins/inputs/intel_pmu/reader_test.go b/plugins/inputs/intel_pmu/reader_test.go new file mode 100644 index 0000000000000..409393383056f --- /dev/null +++ b/plugins/inputs/intel_pmu/reader_test.go @@ -0,0 +1,522 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "fmt" + "math" + "testing" + "time" + + ia "github.com/intel/iaevents" + "github.com/stretchr/testify/require" +) + +type moonClock struct{} + +func (moonClock) now() time.Time { + return time.Date(1969, 7, 20, 20, 17, 0, 0, time.UTC) +} + +type eventWithValues struct { + activeEvent *ia.ActiveEvent + values ia.CounterValue +} + +func TestReadCoreEvents(t *testing.T) { + mReader := &mockValuesReader{} + mTimer := &moonClock{} + mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer} + + t.Run("event reader is nil", func(t *testing.T) { + metrics, err := (&iaEntitiesValuesReader{timer: moonClock{}}).readCoreEvents(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "event values reader or timer is nil") + require.Nil(t, metrics) + }) + + t.Run("timer is nil", func(t *testing.T) { + metrics, err := (&iaEntitiesValuesReader{eventReader: &iaValuesReader{}}).readCoreEvents(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "event values reader or timer is nil") + require.Nil(t, metrics) + }) + + t.Run("entity is nil", func(t *testing.T) { + metrics, err := (&iaEntitiesValuesReader{eventReader: &iaValuesReader{}, timer: moonClock{}}).readCoreEvents(nil) + require.Error(t, err) + require.Contains(t, err.Error(), "entity is nil") + require.Nil(t, metrics) + }) + + t.Run("nil events", func(t *testing.T) { + entity := &CoreEventEntity{} + + entity.activeEvents = append(entity.activeEvents, nil) + metrics, err := mEntitiesReader.readCoreEvents(entity) + + require.Error(t, err) + require.Contains(t, err.Error(), "active event or corresponding perf event is nil") + require.Nil(t, metrics) + }) + + t.Run("reading failed", func(t *testing.T) { + errMock := fmt.Errorf("mock error") + event := &ia.ActiveEvent{PerfEvent: &ia.PerfEvent{Name: "event1"}} + + entity := &CoreEventEntity{} + + entity.activeEvents = append(entity.activeEvents, event) + mReader.On("readValue", event).Return(ia.CounterValue{}, errMock).Once() + + metrics, err := mEntitiesReader.readCoreEvents(entity) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to read core event `%s` values: %v", event, errMock)) + require.Nil(t, metrics) + mReader.AssertExpectations(t) + }) + + t.Run("read active events values", func(t *testing.T) { + entity := &CoreEventEntity{} + var expected []coreMetric + + tEvents := []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: &ia.PerfEvent{Name: "event1"}}, ia.CounterValue{Raw: 316, Enabled: 182060524, Running: 182060524}}, + {&ia.ActiveEvent{PerfEvent: &ia.PerfEvent{Name: "event2"}}, ia.CounterValue{Raw: 1238901, Enabled: 18234123, Running: 18234123}}, + {&ia.ActiveEvent{PerfEvent: &ia.PerfEvent{Name: "event3"}}, ia.CounterValue{Raw: 412323, Enabled: 1823132, Running: 1823180}}, + } + + for _, tc := range tEvents { + entity.activeEvents = append(entity.activeEvents, tc.activeEvent) + cpu, _ := tc.activeEvent.PMUPlacement() + newMetric := coreMetric{ + values: tc.values, + tag: entity.EventsTag, + cpu: cpu, + name: tc.activeEvent.PerfEvent.Name, + time: mTimer.now(), + } + expected = append(expected, newMetric) + mReader.On("readValue", tc.activeEvent).Return(tc.values, nil).Once() + } + metrics, err := mEntitiesReader.readCoreEvents(entity) + + require.NoError(t, err) + require.Equal(t, expected, metrics) + mReader.AssertExpectations(t) + }) +} + +func TestReadMultiEventSeparately(t *testing.T) { + mReader := &mockValuesReader{} + mTimer := &moonClock{} + mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer} + + t.Run("event reader is nil", func(t *testing.T) { + event := multiEvent{} + metrics, err := (&iaEntitiesValuesReader{timer: moonClock{}}).readMultiEventSeparately(event) + require.Error(t, err) + require.Contains(t, err.Error(), "event values reader or timer is nil") + require.Nil(t, metrics) + }) + + t.Run("timer is nil", func(t *testing.T) { + event := multiEvent{} + metrics, err := (&iaEntitiesValuesReader{eventReader: &iaValuesReader{}}).readMultiEventSeparately(event) + require.Error(t, err) + require.Contains(t, err.Error(), "event values reader or timer is nil") + require.Nil(t, metrics) + }) + + t.Run("multi event is nil", func(t *testing.T) { + event := multiEvent{} + metrics, err := (&iaEntitiesValuesReader{&iaValuesReader{}, moonClock{}}).readMultiEventSeparately(event) + require.Error(t, err) + require.Contains(t, err.Error(), "no active events or perf event is nil") + require.Nil(t, metrics) + }) + + t.Run("reading failed", func(t *testing.T) { + errMock := fmt.Errorf("mock error") + perfEvent := &ia.PerfEvent{Name: "event"} + + event := &ia.ActiveEvent{PerfEvent: perfEvent} + multi := multiEvent{perfEvent: perfEvent, activeEvents: []*ia.ActiveEvent{event}} + + mReader.On("readValue", event).Return(ia.CounterValue{}, errMock).Once() + + metrics, err := mEntitiesReader.readMultiEventSeparately(multi) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to read uncore event `%s` values: %v", event, errMock)) + require.Nil(t, metrics) + mReader.AssertExpectations(t) + }) + + t.Run("read active events values", func(t *testing.T) { + perfEvent := &ia.PerfEvent{Name: "event", PMUName: "pmu name"} + multi := multiEvent{perfEvent: perfEvent} + var expected []uncoreMetric + + tEvents := []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 316, Enabled: 182060524, Running: 182060524}}, + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 1238901, Enabled: 18234123, Running: 18234123}}, + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 412323, Enabled: 1823132, Running: 1823180}}, + } + + for _, tc := range tEvents { + multi.activeEvents = append(multi.activeEvents, tc.activeEvent) + newMetric := uncoreMetric{ + values: tc.values, + socket: multi.socket, + unitType: multi.perfEvent.PMUName, + name: multi.perfEvent.Name, + unit: tc.activeEvent.PMUName(), + time: mTimer.now(), + } + expected = append(expected, newMetric) + mReader.On("readValue", tc.activeEvent).Return(tc.values, nil).Once() + } + metrics, err := mEntitiesReader.readMultiEventSeparately(multi) + + require.NoError(t, err) + require.Equal(t, expected, metrics) + mReader.AssertExpectations(t) + }) +} + +func TestReadMultiEventAgg(t *testing.T) { + mReader := &mockValuesReader{} + mTimer := &moonClock{} + mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer} + errMock := fmt.Errorf("mock error") + + t.Run("event reader is nil", func(t *testing.T) { + event := multiEvent{} + _, err := (&iaEntitiesValuesReader{timer: moonClock{}}).readMultiEventAgg(event) + require.Error(t, err) + require.Contains(t, err.Error(), "event values reader or timer is nil") + }) + + t.Run("timer is nil", func(t *testing.T) { + event := multiEvent{} + _, err := (&iaEntitiesValuesReader{eventReader: &iaValuesReader{}}).readMultiEventAgg(event) + require.Error(t, err) + require.Contains(t, err.Error(), "event values reader or timer is nil") + }) + + perfEvent := &ia.PerfEvent{Name: "event", PMUName: "pmu name"} + + tests := []struct { + name string + multi multiEvent + events []eventWithValues + result ia.CounterValue + readFail bool + errMsg string + }{ + { + name: "no events", + multi: multiEvent{perfEvent: perfEvent}, + events: nil, + result: ia.CounterValue{}, + errMsg: "no active events or perf event is nil", + }, + { + name: "no perf event", + multi: multiEvent{perfEvent: nil, activeEvents: []*ia.ActiveEvent{{}, {}}}, + events: nil, + result: ia.CounterValue{}, + errMsg: "no active events or perf event is nil", + }, + { + name: "successful reading and aggregation", + multi: multiEvent{perfEvent: perfEvent}, + events: []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 5123, Enabled: 1231242, Running: 41123}}, + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 4500, Enabled: 1823423, Running: 182343}}, + }, + result: ia.CounterValue{Raw: 9623, Enabled: 3054665, Running: 223466}, + errMsg: "", + }, + { + name: "to big numbers", + multi: multiEvent{perfEvent: perfEvent}, + events: []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: math.MaxUint64, Enabled: 0, Running: 0}}, + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 1, Enabled: 0, Running: 0}}, + }, + result: ia.CounterValue{}, + errMsg: fmt.Sprintf("cannot aggregate `%s` values, uint64 exceeding", perfEvent), + }, + { + name: "reading fail", + multi: multiEvent{perfEvent: perfEvent}, + events: []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 0, Enabled: 0, Running: 0}}, + }, + readFail: true, + result: ia.CounterValue{}, + errMsg: "failed to read uncore event", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + for _, eventWithValue := range test.events { + test.multi.activeEvents = append(test.multi.activeEvents, eventWithValue.activeEvent) + if test.readFail { + mReader.On("readValue", eventWithValue.activeEvent).Return(ia.CounterValue{}, errMock).Once() + continue + } + mReader.On("readValue", eventWithValue.activeEvent).Return(eventWithValue.values, nil).Once() + } + metric, err := mEntitiesReader.readMultiEventAgg(test.multi) + mReader.AssertExpectations(t) + + if len(test.errMsg) > 0 { + require.Error(t, err) + require.Contains(t, err.Error(), test.errMsg) + return + } + expected := uncoreMetric{ + values: test.result, + socket: test.multi.socket, + unitType: test.multi.perfEvent.PMUName, + name: test.multi.perfEvent.Name, + time: mTimer.now(), + } + require.NoError(t, err) + require.Equal(t, expected, metric) + }) + } +} + +func TestReadUncoreEvents(t *testing.T) { + errMock := fmt.Errorf("mock error") + + t.Run("entity is nil", func(t *testing.T) { + metrics, err := (&iaEntitiesValuesReader{}).readUncoreEvents(nil) + + require.Error(t, err) + require.Contains(t, err.Error(), "entity is nil") + require.Nil(t, metrics) + }) + + t.Run("read aggregated entities", func(t *testing.T) { + mReader := &mockValuesReader{} + mTimer := &moonClock{} + mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer} + + perfEvent := &ia.PerfEvent{Name: "mock event", PMUName: "cbox", PMUTypes: []ia.NamedPMUType{{Name: "cbox"}}} + perfEvent2 := &ia.PerfEvent{Name: "mock event2", PMUName: "rad", PMUTypes: []ia.NamedPMUType{{Name: "rad2"}}} + + multi := multiEvent{perfEvent: perfEvent} + events := []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 2003}}, + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 4005}}, + } + multi2 := multiEvent{perfEvent: perfEvent2} + events2 := []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent2}, ia.CounterValue{Raw: 2003}}, + {&ia.ActiveEvent{PerfEvent: perfEvent2}, ia.CounterValue{Raw: 123005}}, + } + for _, event := range events { + multi.activeEvents = append(multi.activeEvents, event.activeEvent) + mReader.On("readValue", event.activeEvent).Return(event.values, nil).Once() + } + for _, event := range events2 { + multi2.activeEvents = append(multi2.activeEvents, event.activeEvent) + mReader.On("readValue", event.activeEvent).Return(event.values, nil).Once() + } + newMetric := uncoreMetric{ + values: ia.CounterValue{Raw: 6008, Enabled: 0, Running: 0}, + socket: multi.socket, + unitType: perfEvent.PMUName, + name: perfEvent.Name, + time: mTimer.now(), + } + newMetric2 := uncoreMetric{ + values: ia.CounterValue{Raw: 125008, Enabled: 0, Running: 0}, + socket: multi2.socket, + unitType: perfEvent2.PMUName, + name: perfEvent2.Name, + time: mTimer.now(), + } + expected := []uncoreMetric{newMetric, newMetric2} + entityAgg := &UncoreEventEntity{Aggregate: true, activeMultiEvents: []multiEvent{multi, multi2}} + + metrics, err := mEntitiesReader.readUncoreEvents(entityAgg) + + require.NoError(t, err) + require.Equal(t, expected, metrics) + mReader.AssertExpectations(t) + + t.Run("reading error", func(t *testing.T) { + event := &ia.ActiveEvent{PerfEvent: perfEvent} + multi := multiEvent{perfEvent: perfEvent, activeEvents: []*ia.ActiveEvent{event}} + + mReader.On("readValue", event).Return(ia.CounterValue{}, errMock).Once() + + entityAgg := &UncoreEventEntity{Aggregate: true, activeMultiEvents: []multiEvent{multi}} + metrics, err = mEntitiesReader.readUncoreEvents(entityAgg) + + require.Error(t, err) + require.Nil(t, metrics) + mReader.AssertExpectations(t) + }) + }) + + t.Run("read distributed entities", func(t *testing.T) { + mReader := &mockValuesReader{} + mTimer := &moonClock{} + mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer} + + perfEvent := &ia.PerfEvent{Name: "mock event", PMUName: "cbox", PMUTypes: []ia.NamedPMUType{{Name: "cbox"}}} + perfEvent2 := &ia.PerfEvent{Name: "mock event2", PMUName: "rad", PMUTypes: []ia.NamedPMUType{{Name: "rad2"}}} + + multi := multiEvent{perfEvent: perfEvent, socket: 2} + events := []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 2003}}, + {&ia.ActiveEvent{PerfEvent: perfEvent}, ia.CounterValue{Raw: 4005}}, + } + multi2 := multiEvent{perfEvent: perfEvent2, socket: 1} + events2 := []eventWithValues{ + {&ia.ActiveEvent{PerfEvent: perfEvent2}, ia.CounterValue{Raw: 2003}}, + {&ia.ActiveEvent{PerfEvent: perfEvent2}, ia.CounterValue{Raw: 123005}}, + } + var expected []uncoreMetric + for _, event := range events { + multi.activeEvents = append(multi.activeEvents, event.activeEvent) + mReader.On("readValue", event.activeEvent).Return(event.values, nil).Once() + + newMetric := uncoreMetric{ + values: event.values, + socket: multi.socket, + unitType: perfEvent.PMUName, + name: perfEvent.Name, + unit: event.activeEvent.PMUName(), + time: mTimer.now(), + } + expected = append(expected, newMetric) + } + for _, event := range events2 { + multi2.activeEvents = append(multi2.activeEvents, event.activeEvent) + mReader.On("readValue", event.activeEvent).Return(event.values, nil).Once() + + newMetric := uncoreMetric{ + values: event.values, + socket: multi2.socket, + unitType: perfEvent2.PMUName, + name: perfEvent2.Name, + unit: event.activeEvent.PMUName(), + time: mTimer.now(), + } + expected = append(expected, newMetric) + } + entity := &UncoreEventEntity{activeMultiEvents: []multiEvent{multi, multi2}} + + metrics, err := mEntitiesReader.readUncoreEvents(entity) + + require.NoError(t, err) + require.Equal(t, expected, metrics) + mReader.AssertExpectations(t) + + t.Run("reading error", func(t *testing.T) { + event := &ia.ActiveEvent{PerfEvent: perfEvent} + multi := multiEvent{perfEvent: perfEvent, activeEvents: []*ia.ActiveEvent{event}} + + mReader.On("readValue", event).Return(ia.CounterValue{}, errMock).Once() + + entityAgg := &UncoreEventEntity{activeMultiEvents: []multiEvent{multi}} + metrics, err = mEntitiesReader.readUncoreEvents(entityAgg) + + require.Error(t, err) + require.Nil(t, metrics) + mReader.AssertExpectations(t) + }) + }) +} + +func TestReadEntities(t *testing.T) { + mReader := &mockValuesReader{} + mTimer := &moonClock{} + mEntitiesReader := &iaEntitiesValuesReader{mReader, mTimer} + + t.Run("read entities", func(t *testing.T) { + values := ia.CounterValue{} + socket := 0 + + corePerfEvent := &ia.PerfEvent{Name: "core event 1", PMUName: "cpu"} + activeCoreEvent := []*ia.ActiveEvent{{PerfEvent: corePerfEvent}} + coreMetric1 := coreMetric{values: values, name: corePerfEvent.Name, time: mTimer.now()} + + corePerfEvent2 := &ia.PerfEvent{Name: "core event 2", PMUName: "cpu"} + activeCoreEvent2 := []*ia.ActiveEvent{{PerfEvent: corePerfEvent2}} + coreMetric2 := coreMetric{values: values, name: corePerfEvent2.Name, time: mTimer.now()} + + uncorePerfEvent := &ia.PerfEvent{Name: "uncore event 1", PMUName: "cbox"} + activeUncoreEvent := []*ia.ActiveEvent{{PerfEvent: uncorePerfEvent}} + uncoreMetric1 := uncoreMetric{ + values: values, + name: uncorePerfEvent.Name, + unitType: uncorePerfEvent.PMUName, + socket: socket, + time: mTimer.now(), + } + + uncorePerfEvent2 := &ia.PerfEvent{Name: "uncore event 2", PMUName: "rig"} + activeUncoreEvent2 := []*ia.ActiveEvent{{PerfEvent: uncorePerfEvent2}} + uncoreMetric2 := uncoreMetric{ + values: values, + name: uncorePerfEvent2.Name, + unitType: uncorePerfEvent2.PMUName, + socket: socket, + time: mTimer.now(), + } + + coreEntities := []*CoreEventEntity{{activeEvents: activeCoreEvent}, {activeEvents: activeCoreEvent2}} + + uncoreEntities := []*UncoreEventEntity{ + {activeMultiEvents: []multiEvent{{activeEvents: activeUncoreEvent, perfEvent: uncorePerfEvent, socket: socket}}}, + {activeMultiEvents: []multiEvent{{activeEvents: activeUncoreEvent2, perfEvent: uncorePerfEvent2, socket: socket}}}, + } + + expectedCoreMetrics := []coreMetric{coreMetric1, coreMetric2} + expectedUncoreMetrics := []uncoreMetric{uncoreMetric1, uncoreMetric2} + + mReader.On("readValue", activeCoreEvent[0]).Return(values, nil).Once() + mReader.On("readValue", activeCoreEvent2[0]).Return(values, nil).Once() + mReader.On("readValue", activeUncoreEvent[0]).Return(values, nil).Once() + mReader.On("readValue", activeUncoreEvent2[0]).Return(values, nil).Once() + + coreMetrics, uncoreMetrics, err := mEntitiesReader.readEntities(coreEntities, uncoreEntities) + + require.NoError(t, err) + require.Equal(t, expectedCoreMetrics, coreMetrics) + require.NotNil(t, expectedUncoreMetrics, uncoreMetrics) + mReader.AssertExpectations(t) + }) + + t.Run("core entity reading failed", func(t *testing.T) { + coreEntities := []*CoreEventEntity{nil} + coreMetrics, uncoreMetrics, err := mEntitiesReader.readEntities(coreEntities, nil) + + require.Error(t, err) + require.Contains(t, err.Error(), "entity is nil") + require.Nil(t, coreMetrics) + require.Nil(t, uncoreMetrics) + }) + + t.Run("uncore entity reading failed", func(t *testing.T) { + uncoreEntities := []*UncoreEventEntity{nil} + coreMetrics, uncoreMetrics, err := mEntitiesReader.readEntities(nil, uncoreEntities) + + require.Error(t, err) + require.Contains(t, err.Error(), "entity is nil") + require.Nil(t, coreMetrics) + require.Nil(t, uncoreMetrics) + }) +} diff --git a/plugins/inputs/intel_pmu/resolver.go b/plugins/inputs/intel_pmu/resolver.go new file mode 100644 index 0000000000000..8457f48ca14db --- /dev/null +++ b/plugins/inputs/intel_pmu/resolver.go @@ -0,0 +1,150 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "errors" + "fmt" + "strings" + + "github.com/influxdata/telegraf" + ia "github.com/intel/iaevents" +) + +type entitiesResolver interface { + resolveEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error +} + +type iaEntitiesResolver struct { + reader ia.Reader + transformer ia.Transformer + log telegraf.Logger +} + +func (e *iaEntitiesResolver) resolveEntities(coreEntities []*CoreEventEntity, uncoreEntities []*UncoreEventEntity) error { + for _, entity := range coreEntities { + if entity == nil { + return fmt.Errorf("core entity is nil") + } + if entity.allEvents { + newEvents, _, err := e.resolveAllEvents() + if err != nil { + return fmt.Errorf("failed to resolve all events: %v", err) + } + entity.parsedEvents = newEvents + continue + } + for _, event := range entity.parsedEvents { + if event == nil { + return fmt.Errorf("parsed core event is nil") + } + customEvent, err := e.resolveEvent(event.name, event.qualifiers) + if err != nil { + return fmt.Errorf("failed to resolve core event `%s`: %v", event.name, err) + } + if customEvent.Event.Uncore { + return fmt.Errorf("uncore event `%s` found in core entity", event.name) + } + event.custom = customEvent + } + } + for _, entity := range uncoreEntities { + if entity == nil { + return fmt.Errorf("uncore entity is nil") + } + if entity.allEvents { + _, newEvents, err := e.resolveAllEvents() + if err != nil { + return fmt.Errorf("failed to resolve all events: %v", err) + } + entity.parsedEvents = newEvents + continue + } + for _, event := range entity.parsedEvents { + if event == nil { + return fmt.Errorf("parsed uncore event is nil") + } + customEvent, err := e.resolveEvent(event.name, event.qualifiers) + if err != nil { + return fmt.Errorf("failed to resolve uncore event `%s`: %v", event.name, err) + } + if !customEvent.Event.Uncore { + return fmt.Errorf("core event `%s` found in uncore entity", event.name) + } + event.custom = customEvent + } + } + return nil +} + +func (e *iaEntitiesResolver) resolveAllEvents() (coreEvents []*eventWithQuals, uncoreEvents []*eventWithQuals, err error) { + if e.transformer == nil { + return nil, nil, errors.New("transformer is nil") + } + + perfEvents, err := e.transformer.Transform(e.reader, ia.NewNameMatcher()) + if err != nil { + re, ok := err.(*ia.TransformationError) + if !ok { + return nil, nil, err + } + if e.log != nil && re != nil { + var eventErrs []string + for _, eventErr := range re.Errors() { + if eventErr == nil { + continue + } + eventErrs = append(eventErrs, eventErr.Error()) + } + errorsStr := strings.Join(eventErrs, ",\n") + e.log.Warnf("Cannot resolve all of the events from provided files:\n%s.\nSome events may be omitted.", errorsStr) + } + } + + for _, perfEvent := range perfEvents { + newEvent := &eventWithQuals{ + name: perfEvent.Name, + custom: ia.CustomizableEvent{Event: perfEvent}, + } + // build options for event + newEvent.custom.Options, err = ia.NewOptions().Build() + if err != nil { + return nil, nil, fmt.Errorf("failed to build options for event `%s`: %v", perfEvent.Name, err) + } + if perfEvent.Uncore { + uncoreEvents = append(uncoreEvents, newEvent) + continue + } + coreEvents = append(coreEvents, newEvent) + } + return coreEvents, uncoreEvents, nil +} + +func (e *iaEntitiesResolver) resolveEvent(name string, qualifiers []string) (ia.CustomizableEvent, error) { + var custom ia.CustomizableEvent + if e.transformer == nil { + return custom, errors.New("events transformer is nil") + } + if name == "" { + return custom, errors.New("event name is empty") + } + matcher := ia.NewNameMatcher(name) + perfEvents, err := e.transformer.Transform(e.reader, matcher) + if err != nil { + return custom, fmt.Errorf("failed to transform perf events: %v", err) + } + if len(perfEvents) < 1 { + return custom, fmt.Errorf("failed to resolve unknown event `%s`", name) + } + // build options for event + options, err := ia.NewOptions().SetAttrModifiers(qualifiers).Build() + if err != nil { + return custom, fmt.Errorf("failed to build options for event `%s`: %v", name, err) + } + custom = ia.CustomizableEvent{ + Event: perfEvents[0], + Options: options, + } + return custom, nil +} diff --git a/plugins/inputs/intel_pmu/resolver_test.go b/plugins/inputs/intel_pmu/resolver_test.go new file mode 100644 index 0000000000000..176b6d133772c --- /dev/null +++ b/plugins/inputs/intel_pmu/resolver_test.go @@ -0,0 +1,376 @@ +//go:build linux && amd64 +// +build linux,amd64 + +package intel_pmu + +import ( + "errors" + "fmt" + "testing" + + "github.com/influxdata/telegraf/testutil" + ia "github.com/intel/iaevents" + "github.com/stretchr/testify/require" +) + +func TestResolveEntities(t *testing.T) { + errMock := errors.New("mock error") + mLog := testutil.Logger{} + mTransformer := &MockTransformer{} + mResolver := &iaEntitiesResolver{transformer: mTransformer, log: mLog} + + type test struct { + perfEvent *ia.PerfEvent + options ia.Options + event *eventWithQuals + } + + t.Run("nil entities", func(t *testing.T) { + err := mResolver.resolveEntities([]*CoreEventEntity{nil}, nil) + + require.Error(t, err) + require.Contains(t, err.Error(), "core entity is nil") + + err = mResolver.resolveEntities(nil, []*UncoreEventEntity{nil}) + + require.Error(t, err) + require.Contains(t, err.Error(), "uncore entity is nil") + }) + + t.Run("nil parsed events", func(t *testing.T) { + mCoreEntity := &CoreEventEntity{parsedEvents: []*eventWithQuals{nil, nil}} + mUncoreEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{nil, nil}} + + err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil) + + require.Error(t, err) + require.Contains(t, err.Error(), "parsed core event is nil") + + err = mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity}) + + require.Error(t, err) + require.Contains(t, err.Error(), "parsed uncore event is nil") + }) + + t.Run("fail to resolve core events", func(t *testing.T) { + name := "mock event 1" + mCoreEntity := &CoreEventEntity{parsedEvents: []*eventWithQuals{{name: name}}, allEvents: false} + matcher := ia.NewNameMatcher(name) + + mTransformer.On("Transform", nil, matcher).Once().Return(nil, errMock) + err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to resolve core event `%s`", name)) + mTransformer.AssertExpectations(t) + }) + + t.Run("fail to resolve uncore events", func(t *testing.T) { + name := "mock event 1" + mUncoreEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{{name: name}}, allEvents: false} + matcher := ia.NewNameMatcher(name) + + mTransformer.On("Transform", nil, matcher).Once().Return(nil, errMock) + err := mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity}) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to resolve uncore event `%s`", name)) + mTransformer.AssertExpectations(t) + }) + + t.Run("resolve all core and uncore events", func(t *testing.T) { + mCoreEntity := &CoreEventEntity{allEvents: true} + mUncoreEntity := &UncoreEventEntity{allEvents: true} + corePerfEvents := []*ia.PerfEvent{ + {Name: "core event1"}, + {Name: "core event2"}, + {Name: "core event3"}, + } + uncorePerfEvents := []*ia.PerfEvent{ + {Name: "uncore event1", Uncore: true}, + {Name: "uncore event2", Uncore: true}, + {Name: "uncore event3", Uncore: true}, + } + matcher := ia.NewNameMatcher() + + t.Run("fail to resolve all core events", func(t *testing.T) { + mTransformer.On("Transform", nil, matcher).Once().Return(nil, errMock) + err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to resolve all events") + mTransformer.AssertExpectations(t) + }) + + t.Run("fail to resolve all uncore events", func(t *testing.T) { + mTransformer.On("Transform", nil, matcher).Once().Return(nil, errMock) + err := mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity}) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to resolve all events") + mTransformer.AssertExpectations(t) + }) + + t.Run("fail to resolve all events with transformationError", func(t *testing.T) { + transformErr := &ia.TransformationError{} + + mTransformer.On("Transform", nil, matcher).Once().Return(corePerfEvents, transformErr).Once() + mTransformer.On("Transform", nil, matcher).Once().Return(uncorePerfEvents, transformErr).Once() + + err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, []*UncoreEventEntity{mUncoreEntity}) + require.NoError(t, err) + require.Len(t, mCoreEntity.parsedEvents, len(corePerfEvents)) + require.Len(t, mUncoreEntity.parsedEvents, len(uncorePerfEvents)) + for _, coreEvent := range mCoreEntity.parsedEvents { + require.Contains(t, corePerfEvents, coreEvent.custom.Event) + } + for _, uncoreEvent := range mUncoreEntity.parsedEvents { + require.Contains(t, uncorePerfEvents, uncoreEvent.custom.Event) + } + mTransformer.AssertExpectations(t) + }) + + mTransformer.On("Transform", nil, matcher).Once().Return(corePerfEvents, nil).Once() + mTransformer.On("Transform", nil, matcher).Once().Return(uncorePerfEvents, nil).Once() + + err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, []*UncoreEventEntity{mUncoreEntity}) + require.NoError(t, err) + require.Len(t, mCoreEntity.parsedEvents, len(corePerfEvents)) + require.Len(t, mUncoreEntity.parsedEvents, len(uncorePerfEvents)) + for _, coreEvent := range mCoreEntity.parsedEvents { + require.Contains(t, corePerfEvents, coreEvent.custom.Event) + } + for _, uncoreEvent := range mUncoreEntity.parsedEvents { + require.Contains(t, uncorePerfEvents, uncoreEvent.custom.Event) + } + mTransformer.AssertExpectations(t) + }) + + t.Run("uncore event found in core entity", func(t *testing.T) { + mQuals := []string{"config1=0x23h"} + mOptions, _ := ia.NewOptions().SetAttrModifiers(mQuals).Build() + eventName := "uncore event 1" + + testCase := test{event: &eventWithQuals{name: eventName, qualifiers: mQuals}, + options: mOptions, + perfEvent: &ia.PerfEvent{Name: eventName, Uncore: true}} + + matcher := ia.NewNameMatcher(eventName) + mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{testCase.perfEvent}, nil).Once() + + mCoreEntity := &CoreEventEntity{parsedEvents: []*eventWithQuals{testCase.event}, allEvents: false} + err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, nil) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("uncore event `%s` found in core entity", eventName)) + mTransformer.AssertExpectations(t) + }) + + t.Run("core event found in uncore entity", func(t *testing.T) { + mQuals := []string{"config1=0x23h"} + mOptions, _ := ia.NewOptions().SetAttrModifiers(mQuals).Build() + eventName := "core event 1" + + testCase := test{event: &eventWithQuals{name: eventName, qualifiers: mQuals}, + options: mOptions, + perfEvent: &ia.PerfEvent{Name: eventName, Uncore: false}} + + matcher := ia.NewNameMatcher(eventName) + mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{testCase.perfEvent}, nil).Once() + + mUncoreEntity := &UncoreEventEntity{parsedEvents: []*eventWithQuals{testCase.event}, allEvents: false} + err := mResolver.resolveEntities(nil, []*UncoreEventEntity{mUncoreEntity}) + + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("core event `%s` found in uncore entity", eventName)) + mTransformer.AssertExpectations(t) + }) + + t.Run("resolve core and uncore events", func(t *testing.T) { + var mCoreEvents []*eventWithQuals + var nUncoreEvents []*eventWithQuals + + mQuals := []string{"config1=0x23h"} + mOptions, _ := ia.NewOptions().SetAttrModifiers(mQuals).Build() + emptyOptions, _ := ia.NewOptions().Build() + + coreTestCases := []test{ + {event: &eventWithQuals{name: "core1", qualifiers: mQuals}, + options: mOptions, + perfEvent: &ia.PerfEvent{Name: "core1"}}, + {event: &eventWithQuals{name: "core2", qualifiers: nil}, + options: emptyOptions, + perfEvent: &ia.PerfEvent{Name: "core2"}}, + {event: &eventWithQuals{name: "core3", qualifiers: nil}, + options: emptyOptions, + perfEvent: &ia.PerfEvent{Name: "core3"}}, + } + uncoreTestCases := []test{ + {event: &eventWithQuals{name: "uncore1", qualifiers: mQuals}, + options: mOptions, + perfEvent: &ia.PerfEvent{Name: "uncore1", Uncore: true}}, + {event: &eventWithQuals{name: "uncore2", qualifiers: nil}, + options: emptyOptions, + perfEvent: &ia.PerfEvent{Name: "uncore2", Uncore: true}}, + {event: &eventWithQuals{name: "uncore3", qualifiers: nil}, + options: emptyOptions, + perfEvent: &ia.PerfEvent{Name: "uncore3", Uncore: true}}, + } + + for _, test := range coreTestCases { + matcher := ia.NewNameMatcher(test.event.name) + mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{test.perfEvent}, nil).Once() + mCoreEvents = append(mCoreEvents, test.event) + } + + for _, test := range uncoreTestCases { + matcher := ia.NewNameMatcher(test.event.name) + mTransformer.On("Transform", nil, matcher).Return([]*ia.PerfEvent{test.perfEvent}, nil).Once() + nUncoreEvents = append(nUncoreEvents, test.event) + } + + mCoreEntity := &CoreEventEntity{parsedEvents: mCoreEvents, allEvents: false} + mUncoreEntity := &UncoreEventEntity{parsedEvents: nUncoreEvents, allEvents: false} + err := mResolver.resolveEntities([]*CoreEventEntity{mCoreEntity}, []*UncoreEventEntity{mUncoreEntity}) + + require.NoError(t, err) + for _, test := range append(coreTestCases, uncoreTestCases...) { + require.Equal(t, test.perfEvent, test.event.custom.Event) + require.Equal(t, test.options, test.event.custom.Options) + } + mTransformer.AssertExpectations(t) + }) +} + +func TestResolveAllEvents(t *testing.T) { + mTransformer := &MockTransformer{} + + mResolver := &iaEntitiesResolver{transformer: mTransformer} + + t.Run("transformer is nil", func(t *testing.T) { + mResolver := &iaEntitiesResolver{transformer: nil} + _, _, err := mResolver.resolveAllEvents() + require.Error(t, err) + }) + + t.Run("transformer returns error", func(t *testing.T) { + matcher := ia.NewNameMatcher() + mTransformer.On("Transform", nil, matcher).Once().Return(nil, errors.New("mock error")) + + _, _, err := mResolver.resolveAllEvents() + require.Error(t, err) + mTransformer.AssertExpectations(t) + }) + + t.Run("no events", func(t *testing.T) { + matcher := ia.NewNameMatcher() + mTransformer.On("Transform", nil, matcher).Once().Return(nil, nil) + + _, _, err := mResolver.resolveAllEvents() + require.NoError(t, err) + mTransformer.AssertExpectations(t) + }) + + t.Run("successfully resolved events", func(t *testing.T) { + perfEvent1 := &ia.PerfEvent{Name: "mock1"} + perfEvent2 := &ia.PerfEvent{Name: "mock2"} + uncorePerfEvent1 := &ia.PerfEvent{Name: "mock3", Uncore: true} + uncorePerfEvent2 := &ia.PerfEvent{Name: "mock4", Uncore: true} + + options, _ := ia.NewOptions().Build() + perfEvents := []*ia.PerfEvent{perfEvent1, perfEvent2, uncorePerfEvent1, uncorePerfEvent2} + + expectedCore := []*eventWithQuals{ + {name: perfEvent1.Name, custom: ia.CustomizableEvent{Event: perfEvent1, Options: options}}, + {name: perfEvent2.Name, custom: ia.CustomizableEvent{Event: perfEvent2, Options: options}}, + } + + expectedUncore := []*eventWithQuals{ + {name: uncorePerfEvent1.Name, custom: ia.CustomizableEvent{Event: uncorePerfEvent1, Options: options}}, + {name: uncorePerfEvent2.Name, custom: ia.CustomizableEvent{Event: uncorePerfEvent2, Options: options}}, + } + + matcher := ia.NewNameMatcher() + mTransformer.On("Transform", nil, matcher).Once().Return(perfEvents, nil) + + coreEvents, uncoreEvents, err := mResolver.resolveAllEvents() + require.NoError(t, err) + require.Equal(t, expectedCore, coreEvents) + require.Equal(t, expectedUncore, uncoreEvents) + + mTransformer.AssertExpectations(t) + }) +} + +func TestResolveEvent(t *testing.T) { + mTransformer := &MockTransformer{} + mEvent := "mock event" + + mResolver := &iaEntitiesResolver{transformer: mTransformer} + + t.Run("transformer is nil", func(t *testing.T) { + mResolver := &iaEntitiesResolver{transformer: nil} + _, err := mResolver.resolveEvent("event", nil) + require.Error(t, err) + require.Contains(t, err.Error(), "events transformer is nil") + }) + + t.Run("event is empty", func(t *testing.T) { + _, err := mResolver.resolveEvent("", nil) + require.Error(t, err) + require.Contains(t, err.Error(), "event name is empty") + }) + + t.Run("transformer returns error", func(t *testing.T) { + matcher := ia.NewNameMatcher(mEvent) + mTransformer.On("Transform", nil, matcher).Once().Return(nil, errors.New("mock error")) + + _, err := mResolver.resolveEvent(mEvent, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to transform perf events") + mTransformer.AssertExpectations(t) + }) + + t.Run("no events transformed", func(t *testing.T) { + matcher := ia.NewNameMatcher(mEvent) + mTransformer.On("Transform", nil, matcher).Once().Return(nil, nil) + + _, err := mResolver.resolveEvent(mEvent, nil) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to resolve unknown event") + mTransformer.AssertExpectations(t) + }) + + t.Run("not valid qualifiers", func(t *testing.T) { + event := "mock event 1" + qualifiers := []string{"wrong modifiers"} + + matcher := ia.NewNameMatcher(event) + mPerfEvent := &ia.PerfEvent{Name: event} + mPerfEvents := []*ia.PerfEvent{mPerfEvent} + mTransformer.On("Transform", nil, matcher).Once().Return(mPerfEvents, nil) + + _, err := mResolver.resolveEvent(event, qualifiers) + require.Error(t, err) + require.Contains(t, err.Error(), fmt.Sprintf("failed to build options for event `%s`", event)) + mTransformer.AssertExpectations(t) + }) + + t.Run("successfully transformed", func(t *testing.T) { + event := "mock event 1" + qualifiers := []string{"config1=0x012h", "config2=0x034k"} + + matcher := ia.NewNameMatcher(event) + + mPerfEvent := &ia.PerfEvent{Name: event} + mPerfEvents := []*ia.PerfEvent{mPerfEvent} + + expectedOptions, _ := ia.NewOptions().SetAttrModifiers(qualifiers).Build() + + mTransformer.On("Transform", nil, matcher).Once().Return(mPerfEvents, nil) + + customEvent, err := mResolver.resolveEvent(event, qualifiers) + require.NoError(t, err) + require.Equal(t, mPerfEvent, customEvent.Event) + require.Equal(t, expectedOptions, customEvent.Options) + mTransformer.AssertExpectations(t) + }) +} diff --git a/plugins/inputs/intel_pmu/sample.conf b/plugins/inputs/intel_pmu/sample.conf new file mode 100644 index 0000000000000..2ef4ba35c4387 --- /dev/null +++ b/plugins/inputs/intel_pmu/sample.conf @@ -0,0 +1,47 @@ +# Intel Performance Monitoring Unit plugin exposes Intel PMU metrics available through Linux Perf subsystem +[[inputs.intel_pmu]] + ## List of filesystem locations of JSON files that contain PMU event definitions. + event_definitions = ["/var/cache/pmu/GenuineIntel-6-55-4-core.json", "/var/cache/pmu/GenuineIntel-6-55-4-uncore.json"] + + ## List of core events measurement entities. There can be more than one core_events sections. + [[inputs.intel_pmu.core_events]] + ## List of events to be counted. Event names shall match names from event_definitions files. + ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. + ## If absent, all core events from provided event_definitions are counted skipping unresolvable ones. + events = ["INST_RETIRED.ANY", "CPU_CLK_UNHALTED.THREAD_ANY:config1=0x4043200000000k"] + + ## Limits the counting of events to core numbers specified. + ## If absent, events are counted on all cores. + ## Single "0", multiple "0,1,2" and range "0-2" notation is supported for each array element. + ## example: cores = ["0,2", "4", "12-16"] + cores = ["0"] + + ## Indicator that plugin shall attempt to run core_events.events as a single perf group. + ## If absent or set to false, each event is counted individually. Defaults to false. + ## This limits the number of events that can be measured to a maximum of available hardware counters per core. + ## Could vary depending on type of event, use of fixed counters. + # perf_group = false + + ## Optionally set a custom tag value that will be added to every measurement within this events group. + ## Can be applied to any group of events, unrelated to perf_group setting. + # events_tag = "" + + ## List of uncore event measurement entities. There can be more than one uncore_events sections. + [[inputs.intel_pmu.uncore_events]] + ## List of events to be counted. Event names shall match names from event_definitions files. + ## Single entry can contain name of the event (case insensitive) augmented with config options and perf modifiers. + ## If absent, all uncore events from provided event_definitions are counted skipping unresolvable ones. + events = ["UNC_CHA_CLOCKTICKS", "UNC_CHA_TOR_OCCUPANCY.IA_MISS"] + + ## Limits the counting of events to specified sockets. + ## If absent, events are counted on all sockets. + ## Single "0", multiple "0,1" and range "0-1" notation is supported for each array element. + ## example: sockets = ["0-2"] + sockets = ["0"] + + ## Indicator that plugin shall provide an aggregated value for multiple units of same type distributed in an uncore. + ## If absent or set to false, events for each unit are exposed as separate metric. Defaults to false. + # aggregate_uncore_units = false + + ## Optionally set a custom tag value that will be added to every measurement within this events group. + # events_tag = "" diff --git a/plugins/inputs/intel_powerstat/README.md b/plugins/inputs/intel_powerstat/README.md new file mode 100644 index 0000000000000..1688fc9677231 --- /dev/null +++ b/plugins/inputs/intel_powerstat/README.md @@ -0,0 +1,300 @@ +# Intel PowerStat Input Plugin + +This input plugin monitors power statistics on Intel-based platforms and assumes +presence of Linux based OS. + +Main use cases are power saving and workload migration. Telemetry frameworks +allow users to monitor critical platform level metrics. Key source of platform +telemetry is power domain that is beneficial for MANO Monitoring&Analytics +systems to take preventive/corrective actions based on platform busyness, CPU +temperature, actual CPU utilization and power statistics. + +## Configuration + +```toml @sample.conf +# Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) and per-CPU metrics like temperature, power and utilization. +[[inputs.intel_powerstat]] + ## The user can choose which package metrics are monitored by the plugin with the package_metrics setting: + ## - The default, will collect "current_power_consumption", "current_dram_power_consumption" and "thermal_design_power" + ## - Setting this value to an empty array means no package metrics will be collected + ## - Finally, a user can specify individual metrics to capture from the supported options list + ## Supported options: + ## "current_power_consumption", "current_dram_power_consumption", "thermal_design_power", "max_turbo_frequency", "uncore_frequency" + # package_metrics = ["current_power_consumption", "current_dram_power_consumption", "thermal_design_power"] + + ## The user can choose which per-CPU metrics are monitored by the plugin in cpu_metrics array. + ## Empty or missing array means no per-CPU specific metrics will be collected by the plugin. + ## Supported options: + ## "cpu_frequency", "cpu_c0_state_residency", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles", "cpu_temperature", "cpu_busy_frequency" + ## ATTENTION: cpu_busy_cycles option is DEPRECATED - superseded by cpu_c0_state_residency + # cpu_metrics = [] +``` + +## Example: Configuration with no per-CPU telemetry + +This configuration allows getting default processor package specific metrics, no +per-CPU metrics are collected: + +```toml +[[inputs.intel_powerstat]] + cpu_metrics = [] +``` + +## Example: Configuration with no per-CPU telemetry - equivalent case + +This configuration allows getting default processor package specific metrics, no +per-CPU metrics are collected: + +```toml +[[inputs.intel_powerstat]] +``` + +## Example: Configuration for CPU Temperature and CPU Frequency + +This configuration allows getting default processor package specific metrics, +plus subset of per-CPU metrics (CPU Temperature and CPU Frequency): + +```toml +[[inputs.intel_powerstat]] + cpu_metrics = ["cpu_frequency", "cpu_temperature"] +``` + +## Example: Configuration for CPU Temperature and CPU Frequency without default package metrics + +This configuration allows getting only a subset of per-CPU metrics (CPU +Temperature and CPU Frequency): + +```toml +[[inputs.intel_powerstat]] + package_metrics = [] + cpu_metrics = ["cpu_frequency", "cpu_temperature"] +``` + +## Example: Configuration with all available metrics + +This configuration allows getting all processor package specific metrics and all +per-CPU metrics: + +```toml +[[inputs.intel_powerstat]] + package_metrics = ["current_power_consumption", "current_dram_power_consumption", "thermal_design_power", "max_turbo_frequency", "uncore_frequency"] + cpu_metrics = ["cpu_frequency", "cpu_busy_frequency", "cpu_temperature", "cpu_c0_state_residency", "cpu_c1_state_residency", "cpu_c6_state_residency"] +``` + +## SW Dependencies + +Plugin is based on Linux Kernel modules that expose specific metrics over +`sysfs` or `devfs` interfaces. The following dependencies are expected by +plugin: + +- _intel-rapl_ module which exposes Intel Runtime Power Limiting metrics over `sysfs` (`/sys/devices/virtual/powercap/intel-rapl`), +- _msr_ kernel module that provides access to processor model specific registers over `devfs` (`/dev/cpu/cpu%d/msr`), +- _cpufreq_ kernel module - which exposes per-CPU Frequency over `sysfs` (`/sys/devices/system/cpu/cpu%d/cpufreq/scaling_cur_freq`). +- _intel-uncore-frequency_ module exposes Intel uncore frequency metrics over `sysfs` (`/sys/devices/system/cpu/intel_uncore_frequency`), + +Minimum kernel version required is 3.13 to satisfy most of requirements, +for `uncore_frequency` metrics `intel-uncore-frequency` module is required +(available since kernel 5.6). + +Please make sure that kernel modules are loaded and running (cpufreq is +integrated in kernel). Modules might have to be manually enabled by using +`modprobe`. Depending on the kernel version, run commands: + +```sh +# kernel 5.x.x: +sudo modprobe rapl +subo modprobe msr +sudo modprobe intel_rapl_common +sudo modprobe intel_rapl_msr + +# also for kernel >= 5.6.0 +sudo modprobe intel-uncore-frequency + +# kernel 4.x.x: +sudo modprobe msr +sudo modprobe intel_rapl +``` + +**Telegraf with Intel PowerStat plugin enabled may require root access to read +model specific registers (MSRs)** to retrieve data for calculation of most +critical per-CPU specific metrics: + +- `cpu_busy_frequency_mhz` +- `cpu_temperature_celsius` +- `cpu_c0_state_residency_percent` +- `cpu_c1_state_residency_percent` +- `cpu_c6_state_residency_percent` + +and to retrieve data for calculation per-package specific metric: + +- `max_turbo_frequency_mhz` +- `uncore_frequency_mhz_cur` + +To expose other Intel PowerStat metrics root access may or may not be required +(depending on OS type or configuration). + +## HW Dependencies + +Specific metrics require certain processor features to be present, otherwise +Intel PowerStat plugin won't be able to read them. When using Linux Kernel based +OS, user can detect supported processor features reading `/proc/cpuinfo` file. +Plugin assumes crucial properties are the same for all CPU cores in the system. +The following processor properties are examined in more detail in this section: +processor _cpu family_, _model_ and _flags_. The following processor properties +are required by the plugin: + +- Processor _cpu family_ must be Intel (0x6) - since data used by the plugin assumes Intel specific +model specific registers for all features +- The following processor flags shall be present: + - "_msr_" shall be present for plugin to read platform data from processor model specific registers and collect + the following metrics: _powerstat\_core.cpu\_temperature_, _powerstat\_core.cpu\_busy\_frequency_, + _powerstat\_core.cpu\_c0\_state\_residency_, _powerstat\_core.cpu\_c1\_state\_residency_, _powerstat\_core.cpu\_c6\_state\_residency_ + - "_aperfmperf_" shall be present to collect the following metrics: _powerstat\_core.cpu\_busy\_frequency_, + _powerstat\_core.cpu\_c0\_state\_residency_, _powerstat\_core.cpu\_c1\_state\_residency_ + - "_dts_" shall be present to collect _powerstat\_core.cpu\_temperature_ +- Processor _Model number_ must be one of the following values for plugin to read _powerstat\_core.cpu\_c1\_state\_residency_ +and _powerstat\_core.cpu\_c6\_state\_residency_ metrics: + +| Model number | Processor name | +|-----|-------------| +| 0x37 | Intel Atom® Bay Trail | +| 0x4D | Intel Atom® Avaton | +| 0x5C | Intel Atom® Apollo Lake | +| 0x5F | Intel Atom® Denverton | +| 0x7A | Intel Atom® Goldmont | +| 0x4C | Intel Atom® Airmont | +| 0x86 | Intel Atom® Jacobsville | +| 0x96 | Intel Atom® Elkhart Lake | +| 0x9C | Intel Atom® Jasper Lake | +| 0x1A | Intel Nehalem-EP | +| 0x1E | Intel Nehalem | +| 0x1F | Intel Nehalem-G | +| 0x2E | Intel Nehalem-EX | +| 0x25 | Intel Westmere | +| 0x2C | Intel Westmere-EP | +| 0x2F | Intel Westmere-EX | +| 0x2A | Intel Sandybridge | +| 0x2D | Intel Sandybridge-X | +| 0x3A | Intel Ivybridge | +| 0x3E | Intel Ivybridge-X | +| 0x4E | Intel Atom® Silvermont-MID | +| 0x5E | Intel Skylake | +| 0x55 | Intel Skylake-X | +| 0x8E | Intel Kabylake-L | +| 0x9E | Intel Kabylake | +| 0x6A | Intel Icelake-X | +| 0x6C | Intel Icelake-D | +| 0x7D | Intel Icelake | +| 0x7E | Intel Icelake-L | +| 0x9D | Intel Icelake-NNPI | +| 0x3C | Intel Haswell | +| 0x3F | Intel Haswell-X | +| 0x45 | Intel Haswell-L | +| 0x46 | Intel Haswell-G | +| 0x3D | Intel Broadwell | +| 0x47 | Intel Broadwell-G | +| 0x4F | Intel Broadwell-X | +| 0x56 | Intel Broadwell-D | +| 0x66 | Intel Cannonlake-L | +| 0x57 | Intel Xeon® PHI Knights Landing | +| 0x85 | Intel Xeon® PHI Knights Mill | +| 0xA5 | Intel CometLake | +| 0xA6 | Intel CometLake-L | +| 0x8F | Intel Sapphire Rapids X | +| 0x8C | Intel TigerLake-L | +| 0x8D | Intel TigerLake | + +## Metrics + +All metrics collected by Intel PowerStat plugin are collected in fixed +intervals. Metrics that reports processor C-state residency or power are +calculated over elapsed intervals. When starting to measure metrics, plugin +skips first iteration of metrics if they are based on deltas with previous +value. + +**The following measurements are supported by Intel PowerStat plugin:** + +- powerstat_core + + - The following Tags are returned by plugin with powerstat_core measurements: + + | Tag | Description | + |--------------|-------------------------------| + | `package_id` | ID of platform package/socket | + | `core_id` | ID of physical processor core | + | `cpu_id` | ID of logical processor core | + + Measurement powerstat_core metrics are collected per-CPU (cpu_id is the key) + while core_id and package_id tags are additional topology information. + + - Available metrics for powerstat_core measurement + + | Metric name (field) | Description | Units | + |---------------------|-------------|-------| + | `cpu_frequency_mhz` | Current operational frequency of CPU Core | MHz | + | `cpu_busy_frequency_mhz` | CPU Core Busy Frequency measured as frequency adjusted to CPU Core busy cycles | MHz | + | `cpu_temperature_celsius` | Current temperature of CPU Core | Celsius degrees | + | `cpu_c0_state_residency_percent` | Percentage of time that CPU Core spent in C0 Core residency state | % | + | `cpu_c1_state_residency_percent` | Percentage of time that CPU Core spent in C1 Core residency state | % | + | `cpu_c6_state_residency_percent` | Percentage of time that CPU Core spent in C6 Core residency state | % | + | `cpu_busy_cycles_percent` | (**DEPRECATED** - superseded by cpu_c0_state_residency_percent) CPU Core Busy cycles as a ratio of Cycles spent in C0 state residency to all cycles executed by CPU Core | % | + +- powerstat_package + + - The following Tags are returned by plugin with powerstat_package measurements: + + | Tag | Description | + |-----|-------------| + | `package_id` | ID of platform package/socket | + | `active_cores`| Specific tag for `max_turbo_frequency_mhz` metric. The maximum number of activated cores for reachable turbo frequency + | `die`| Specific tag for all `uncore_frequency` metrics. Id of die + | `type`| Specific tag for all `uncore_frequency` metrics. Type of uncore frequency (current or initial) + + Measurement powerstat_package metrics are collected per processor package -_package_id_ tag indicates which package metric refers to. + + - Available metrics for powerstat_package measurement + + | Metric name (field) | Description | Units | + |-----|-------------|-----| + | `thermal_design_power_watts` | Maximum Thermal Design Power (TDP) available for processor package | Watts | + | `current_power_consumption_watts` | Current power consumption of processor package | Watts | + | `current_dram_power_consumption_watts` | Current power consumption of processor package DRAM subsystem | Watts | + | `max_turbo_frequency_mhz`| Maximum reachable turbo frequency for number of cores active | MHz + | `uncore_frequency_limit_mhz_min`| Minimum uncore frequency limit for die in processor package | MHz + | `uncore_frequency_limit_mhz_max`| Maximum uncore frequency limit for die in processor package | MHz + | `uncore_frequency_mhz_cur`| Current uncore frequency for die in processor package. Available only with tag `current`. Since this value is not yet available from `intel-uncore-frequency` module it needs to be accessed via MSR. In case of lack of loaded msr, only `uncore_frequency_limit_mhz_min` and `uncore_frequency_limit_mhz_max` metrics will be collected | MHz + +### Known issues + +From linux kernel version v5.4.77 with [this kernel change][19f6d91b] resources +like `/sys/class/powercap/intel-rapl*/*/energy_uj` are readable only by root for +security reasons, so this plugin needs root privileges to work properly. + +If such strict security restrictions are not relevant, reading permissions to +files in `/sys/devices/virtual/powercap/intel-rapl/` directory can be manually +changed for example with `chmod` command with custom parameters. For example to +give all users permission to all files in `intel-rapl` directory: + +```bash +sudo chmod -R a+rx /sys/devices/virtual/powercap/intel-rapl/ +``` + +[19f6d91b]: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v5.4.77&id=19f6d91bdad42200aac557a683c17b1f65ee6c94 + +## Example Output + +```shell +powerstat_package,host=ubuntu,package_id=0 thermal_design_power_watts=160 1606494744000000000 +powerstat_package,host=ubuntu,package_id=0 current_power_consumption_watts=35 1606494744000000000 +powerstat_package,host=ubuntu,package_id=0 current_dram_power_consumption_watts=13.94 1606494744000000000 +powerstat_package,host=ubuntu,package_id=0,active_cores=0 max_turbo_frequency_mhz=3000i 1606494744000000000 +powerstat_package,host=ubuntu,package_id=0,active_cores=1 max_turbo_frequency_mhz=2800i 1606494744000000000 +powerstat_package,die=0,host=ubuntu,package_id=0,type=initial uncore_frequency_limit_mhz_min=800,uncore_frequency_limit_mhz_max=2400 1606494744000000000 +powerstat_package,die=0,host=ubuntu,package_id=0,type=current uncore_frequency_mhz_cur=800i,uncore_frequency_limit_mhz_min=800,uncore_frequency_limit_mhz_max=2400 1606494744000000000 +powerstat_core,core_id=0,cpu_id=0,host=ubuntu,package_id=0 cpu_frequency_mhz=1200.29 1606494744000000000 +powerstat_core,core_id=0,cpu_id=0,host=ubuntu,package_id=0 cpu_temperature_celsius=34i 1606494744000000000 +powerstat_core,core_id=0,cpu_id=0,host=ubuntu,package_id=0 cpu_c6_state_residency_percent=92.52 1606494744000000000 +powerstat_core,core_id=0,cpu_id=0,host=ubuntu,package_id=0 cpu_c1_state_residency_percent=6.68 1606494744000000000 +powerstat_core,core_id=0,cpu_id=0,host=ubuntu,package_id=0 cpu_c0_state_residency_percent=0.8 1606494744000000000 +powerstat_core,core_id=0,cpu_id=0,host=ubuntu,package_id=0 cpu_busy_frequency_mhz=1213.24 1606494744000000000 +``` diff --git a/plugins/inputs/intel_powerstat/dto.go b/plugins/inputs/intel_powerstat/dto.go new file mode 100644 index 0000000000000..71fc10f50a3d1 --- /dev/null +++ b/plugins/inputs/intel_powerstat/dto.go @@ -0,0 +1,37 @@ +package intel_powerstat + +type msrData struct { + mperf uint64 + aperf uint64 + timeStampCounter uint64 + c3 uint64 + c6 uint64 + c7 uint64 + throttleTemp int64 + temp int64 + mperfDelta uint64 + aperfDelta uint64 + timeStampCounterDelta uint64 + c3Delta uint64 + c6Delta uint64 + c7Delta uint64 + readDate int64 +} + +type raplData struct { + dramCurrentEnergy float64 + socketCurrentEnergy float64 + socketEnergy float64 + dramEnergy float64 + readDate int64 +} + +type cpuInfo struct { + physicalID string + coreID string + cpuID string + vendorID string + cpuFamily string + model string + flags string +} diff --git a/plugins/inputs/intel_powerstat/file.go b/plugins/inputs/intel_powerstat/file.go new file mode 100644 index 0000000000000..349228bd8ff71 --- /dev/null +++ b/plugins/inputs/intel_powerstat/file.go @@ -0,0 +1,173 @@ +//go:build linux +// +build linux + +package intel_powerstat + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" +) + +// fileService is responsible for handling operations on files. +type fileService interface { + getCPUInfoStats() (map[string]*cpuInfo, error) + getStringsMatchingPatternOnPath(path string) ([]string, error) + readFile(path string) ([]byte, error) + readFileToFloat64(reader io.Reader) (float64, int64, error) + readFileAtOffsetToUint64(reader io.ReaderAt, offset int64) (uint64, error) +} + +type fileServiceImpl struct { +} + +// getCPUInfoStats retrieves basic information about CPU from /proc/cpuinfo. +func (fs *fileServiceImpl) getCPUInfoStats() (map[string]*cpuInfo, error) { + path := "/proc/cpuinfo" + cpuInfoFile, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("error while reading %s, err: %v", path, err) + } + defer cpuInfoFile.Close() + + scanner := bufio.NewScanner(cpuInfoFile) + + processorRegexp := regexp.MustCompile(`^processor\t+:\s([0-9]+)\n*$`) + physicalIDRegexp := regexp.MustCompile(`^physical id\t+:\s([0-9]+)\n*$`) + coreIDRegexp := regexp.MustCompile(`^core id\t+:\s([0-9]+)\n*$`) + vendorIDRegexp := regexp.MustCompile(`^vendor_id\t+:\s([a-zA-Z]+)\n*$`) + cpuFamilyRegexp := regexp.MustCompile(`^cpu\sfamily\t+:\s([0-9]+)\n*$`) + modelRegexp := regexp.MustCompile(`^model\t+:\s([0-9]+)\n*$`) + flagsRegexp := regexp.MustCompile(`^flags\t+:\s(.+)\n*$`) + + stats := make(map[string]*cpuInfo) + currentInfo := &cpuInfo{} + + for scanner.Scan() { + line := scanner.Text() + + processorRes := processorRegexp.FindStringSubmatch(line) + if len(processorRes) > 1 { + currentInfo = &cpuInfo{ + cpuID: processorRes[1], + } + } + + vendorIDRes := vendorIDRegexp.FindStringSubmatch(line) + if len(vendorIDRes) > 1 { + currentInfo.vendorID = vendorIDRes[1] + } + + physicalIDRes := physicalIDRegexp.FindStringSubmatch(line) + if len(physicalIDRes) > 1 { + currentInfo.physicalID = physicalIDRes[1] + } + + coreIDRes := coreIDRegexp.FindStringSubmatch(line) + if len(coreIDRes) > 1 { + currentInfo.coreID = coreIDRes[1] + } + + cpuFamilyRes := cpuFamilyRegexp.FindStringSubmatch(line) + if len(cpuFamilyRes) > 1 { + currentInfo.cpuFamily = cpuFamilyRes[1] + } + + modelRes := modelRegexp.FindStringSubmatch(line) + if len(modelRes) > 1 { + currentInfo.model = modelRes[1] + } + + flagsRes := flagsRegexp.FindStringSubmatch(line) + if len(flagsRes) > 1 { + currentInfo.flags = flagsRes[1] + + // Flags is the last value we have to acquire, so currentInfo is added to map. + stats[currentInfo.cpuID] = currentInfo + } + } + + return stats, nil +} + +// getStringsMatchingPatternOnPath looks for filenames and directory names on path matching given regexp. +// It ignores file system errors such as I/O errors reading directories. The only possible returned error +// is ErrBadPattern, when pattern is malformed. +func (fs *fileServiceImpl) getStringsMatchingPatternOnPath(path string) ([]string, error) { + return filepath.Glob(path) +} + +// readFile reads file on path and return string content. +func (fs *fileServiceImpl) readFile(path string) ([]byte, error) { + out, err := os.ReadFile(path) + if err != nil { + return make([]byte, 0), err + } + return out, nil +} + +// readFileToFloat64 reads file on path and tries to parse content to float64. +func (fs *fileServiceImpl) readFileToFloat64(reader io.Reader) (float64, int64, error) { + read, err := io.ReadAll(reader) + if err != nil { + return 0, 0, err + } + + readDate := time.Now().UnixNano() + + // Remove new line character + trimmedString := strings.TrimRight(string(read), "\n") + // Parse result to float64 + parsedValue, err := strconv.ParseFloat(trimmedString, 64) + if err != nil { + return 0, 0, fmt.Errorf("error parsing string to float for %s", trimmedString) + } + + return parsedValue, readDate, nil +} + +// readFileAtOffsetToUint64 reads 8 bytes from passed file at given offset. +func (fs *fileServiceImpl) readFileAtOffsetToUint64(reader io.ReaderAt, offset int64) (uint64, error) { + buffer := make([]byte, 8) + + if offset == 0 { + return 0, fmt.Errorf("file offset %d should not be 0", offset) + } + + _, err := reader.ReadAt(buffer, offset) + if err != nil { + return 0, fmt.Errorf("error on reading file at offset %d, err: %v", offset, err) + } + + return binary.LittleEndian.Uint64(buffer), nil +} + +func newFileService() *fileServiceImpl { + return &fileServiceImpl{} +} + +func checkFile(path string) error { + if path == "" { + return fmt.Errorf("empty path given") + } + + lInfo, err := os.Lstat(path) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("file `%s` doesn't exist", path) + } + return fmt.Errorf("cannot obtain file info of `%s`: %v", path, err) + } + mode := lInfo.Mode() + if mode&os.ModeSymlink != 0 { + return fmt.Errorf("file `%s` is a symlink", path) + } + return nil +} diff --git a/plugins/inputs/intel_powerstat/file_mock_test.go b/plugins/inputs/intel_powerstat/file_mock_test.go new file mode 100644 index 0000000000000..102142c2b3930 --- /dev/null +++ b/plugins/inputs/intel_powerstat/file_mock_test.go @@ -0,0 +1,147 @@ +// Code generated by mockery v2.12.3. DO NOT EDIT. + +package intel_powerstat + +import ( + io "io" + + mock "github.com/stretchr/testify/mock" +) + +// mockFileService is an autogenerated mock type for the mockFileService type +type mockFileService struct { + mock.Mock +} + +// getCPUInfoStats provides a mock function with given fields: +func (_m *mockFileService) getCPUInfoStats() (map[string]*cpuInfo, error) { + ret := _m.Called() + + var r0 map[string]*cpuInfo + if rf, ok := ret.Get(0).(func() map[string]*cpuInfo); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]*cpuInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// getStringsMatchingPatternOnPath provides a mock function with given fields: path +func (_m *mockFileService) getStringsMatchingPatternOnPath(path string) ([]string, error) { + ret := _m.Called(path) + + var r0 []string + if rf, ok := ret.Get(0).(func(string) []string); ok { + r0 = rf(path) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(path) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// readFile provides a mock function with given fields: path +func (_m *mockFileService) readFile(path string) ([]byte, error) { + ret := _m.Called(path) + + var r0 []byte + if rf, ok := ret.Get(0).(func(string) []byte); ok { + r0 = rf(path) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(path) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// readFileAtOffsetToUint64 provides a mock function with given fields: reader, offset +func (_m *mockFileService) readFileAtOffsetToUint64(reader io.ReaderAt, offset int64) (uint64, error) { + ret := _m.Called(reader, offset) + + var r0 uint64 + if rf, ok := ret.Get(0).(func(io.ReaderAt, int64) uint64); ok { + r0 = rf(reader, offset) + } else { + r0 = ret.Get(0).(uint64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(io.ReaderAt, int64) error); ok { + r1 = rf(reader, offset) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// readFileToFloat64 provides a mock function with given fields: reader +func (_m *mockFileService) readFileToFloat64(reader io.Reader) (float64, int64, error) { + ret := _m.Called(reader) + + var r0 float64 + if rf, ok := ret.Get(0).(func(io.Reader) float64); ok { + r0 = rf(reader) + } else { + r0 = ret.Get(0).(float64) + } + + var r1 int64 + if rf, ok := ret.Get(1).(func(io.Reader) int64); ok { + r1 = rf(reader) + } else { + r1 = ret.Get(1).(int64) + } + + var r2 error + if rf, ok := ret.Get(2).(func(io.Reader) error); ok { + r2 = rf(reader) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +type newmockFileServiceT interface { + mock.TestingT + Cleanup(func()) +} + +// newmockFileService creates a new instance of mockFileService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func newmockFileService(t newmockFileServiceT) *mockFileService { + mock := &mockFileService{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/plugins/inputs/intel_powerstat/intel_powerstat.go b/plugins/inputs/intel_powerstat/intel_powerstat.go new file mode 100644 index 0000000000000..3868356697e7e --- /dev/null +++ b/plugins/inputs/intel_powerstat/intel_powerstat.go @@ -0,0 +1,812 @@ +//go:generate ../../../tools/readme_config_includer/generator +//go:build linux +// +build linux + +package intel_powerstat + +import ( + _ "embed" + "errors" + "fmt" + "math/big" + "strconv" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +const ( + cpuFrequency = "cpu_frequency" + cpuBusyFrequency = "cpu_busy_frequency" + cpuTemperature = "cpu_temperature" + cpuC0StateResidency = "cpu_c0_state_residency" + cpuC1StateResidency = "cpu_c1_state_residency" + cpuC6StateResidency = "cpu_c6_state_residency" + cpuBusyCycles = "cpu_busy_cycles" + packageCurrentPowerConsumption = "current_power_consumption" + packageCurrentDramPowerConsumption = "current_dram_power_consumption" + packageThermalDesignPower = "thermal_design_power" + packageTurboLimit = "max_turbo_frequency" + packageUncoreFrequency = "uncore_frequency" + percentageMultiplier = 100 +) + +// PowerStat plugin enables monitoring of platform metrics (power, TDP) and Core metrics like temperature, power and utilization. +type PowerStat struct { + CPUMetrics []string `toml:"cpu_metrics"` + PackageMetrics []string `toml:"package_metrics"` + Log telegraf.Logger `toml:"-"` + + fs fileService + rapl raplService + msr msrService + + cpuFrequency bool + cpuBusyFrequency bool + cpuTemperature bool + cpuC0StateResidency bool + cpuC1StateResidency bool + cpuC6StateResidency bool + cpuBusyCycles bool + packageTurboLimit bool + packageCurrentPowerConsumption bool + packageCurrentDramPowerConsumption bool + packageThermalDesignPower bool + packageUncoreFrequency bool + cpuInfo map[string]*cpuInfo + skipFirstIteration bool + logOnce map[string]error +} + +func (*PowerStat) SampleConfig() string { + return sampleConfig +} + +// Init performs one time setup of the plugin +func (p *PowerStat) Init() error { + p.parsePackageMetricsConfig() + p.parseCPUMetricsConfig() + err := p.verifyProcessor() + if err != nil { + return err + } + // Initialize MSR service only when there is at least one metric enabled + if p.cpuFrequency || p.cpuBusyFrequency || p.cpuTemperature || p.cpuC0StateResidency || p.cpuC1StateResidency || + p.cpuC6StateResidency || p.cpuBusyCycles || p.packageTurboLimit || p.packageUncoreFrequency { + p.msr = newMsrServiceWithFs(p.Log, p.fs) + } + if p.packageCurrentPowerConsumption || p.packageCurrentDramPowerConsumption || p.packageThermalDesignPower || p.packageTurboLimit || p.packageUncoreFrequency { + p.rapl = newRaplServiceWithFs(p.Log, p.fs) + } + + if !p.areCoreMetricsEnabled() && !p.areGlobalMetricsEnabled() { + return fmt.Errorf("all configuration options are empty or invalid. Did not find anything to gather") + } + + return nil +} + +// Gather takes in an accumulator and adds the metrics that the Input gathers +func (p *PowerStat) Gather(acc telegraf.Accumulator) error { + if p.areGlobalMetricsEnabled() { + p.addGlobalMetrics(acc) + } + + if p.areCoreMetricsEnabled() { + if p.msr.isMsrLoaded() { + p.logOnce["msr"] = nil + p.addPerCoreMetrics(acc) + } else { + err := errors.New("error while trying to read MSR (probably msr module was not loaded)") + if val := p.logOnce["msr"]; val == nil || val.Error() != err.Error() { + p.Log.Errorf("%v", err) + // Remember that specific error occurs to omit logging next time + p.logOnce["msr"] = err + } + } + } + + // Gathering the first iteration of metrics was skipped for most of them because they are based on delta calculations + p.skipFirstIteration = false + + return nil +} + +func (p *PowerStat) addGlobalMetrics(acc telegraf.Accumulator) { + // Prepare RAPL data each gather because there is a possibility to disable rapl kernel module + p.rapl.initializeRaplData() + for socketID := range p.rapl.getRaplData() { + if p.packageTurboLimit { + p.addTurboRatioLimit(socketID, acc) + } + + if p.packageUncoreFrequency { + die := maxDiePerSocket(socketID) + for actualDie := 0; actualDie < die; actualDie++ { + p.addUncoreFreq(socketID, strconv.Itoa(actualDie), acc) + } + } + + err := p.rapl.retrieveAndCalculateData(socketID) + if err != nil { + // In case of an error skip calculating metrics for this socket + if val := p.logOnce[socketID+"rapl"]; val == nil || val.Error() != err.Error() { + p.Log.Errorf("error fetching rapl data for socket %s, err: %v", socketID, err) + // Remember that specific error occurs for socketID to omit logging next time + p.logOnce[socketID+"rapl"] = err + } + continue + } + + // If error stops occurring, clear logOnce indicator + p.logOnce[socketID+"rapl"] = nil + if p.packageThermalDesignPower { + p.addThermalDesignPowerMetric(socketID, acc) + } + + if p.skipFirstIteration { + continue + } + if p.packageCurrentPowerConsumption { + p.addCurrentSocketPowerConsumption(socketID, acc) + } + if p.packageCurrentDramPowerConsumption { + p.addCurrentDramPowerConsumption(socketID, acc) + } + } +} +func maxDiePerSocket(_ string) int { + /* + TODO: + At the moment, linux does not distinguish between more dies per socket. + This piece of code will need to be upgraded in the future. + https://github.com/torvalds/linux/blob/v5.17/arch/x86/include/asm/topology.h#L153 + */ + return 1 +} + +func (p *PowerStat) addUncoreFreq(socketID string, die string, acc telegraf.Accumulator) { + err := checkFile("/sys/devices/system/cpu/intel_uncore_frequency") + if err != nil { + err := fmt.Errorf("error while checking existing intel_uncore_frequency (probably intel-uncore-frequency module was not loaded)") + if val := p.logOnce["intel_uncore_frequency"]; val == nil || val.Error() != err.Error() { + p.Log.Errorf("%v", err) + // Remember that specific error occurs to omit logging next time + p.logOnce["intel_uncore_frequency"] = err + } + return + } + p.logOnce["intel_uncore_frequency"] = nil + p.readUncoreFreq("initial", socketID, die, acc) + p.readUncoreFreq("current", socketID, die, acc) +} + +func (p *PowerStat) readUncoreFreq(typeFreq string, socketID string, die string, acc telegraf.Accumulator) { + fields := map[string]interface{}{} + cpuID := "" + if typeFreq == "current" { + if p.areCoreMetricsEnabled() && p.msr.isMsrLoaded() { + p.logOnce[socketID+"msr"] = nil + for _, v := range p.cpuInfo { + if v.physicalID == socketID { + cpuID = v.cpuID + } + } + if cpuID == "" { + p.Log.Debugf("error while reading socket ID") + return + } + actualUncoreFreq, err := p.msr.readSingleMsr(cpuID, "MSR_UNCORE_PERF_STATUS") + if err != nil { + p.Log.Debugf("error while reading MSR_UNCORE_PERF_STATUS: %v", err) + return + } + actualUncoreFreq = (actualUncoreFreq & 0x3F) * 100 + fields["uncore_frequency_mhz_cur"] = actualUncoreFreq + } else { + err := errors.New("error while trying to read MSR (probably msr module was not loaded), uncore_frequency_mhz_cur metric will not be collected") + if val := p.logOnce[socketID+"msr"]; val == nil || val.Error() != err.Error() { + p.Log.Errorf("%v", err) + // Remember that specific error occurs for socketID to omit logging next time + p.logOnce[socketID+"msr"] = err + } + } + } + initMinFreq, err := p.msr.retrieveUncoreFrequency(socketID, typeFreq, "min", die) + if err != nil { + p.Log.Errorf("error while retrieving minimum uncore frequency of the socket %s, err: %v", socketID, err) + return + } + initMaxFreq, err := p.msr.retrieveUncoreFrequency(socketID, typeFreq, "max", die) + if err != nil { + p.Log.Errorf("error while retrieving maximum uncore frequency of the socket %s, err: %v", socketID, err) + return + } + + tags := map[string]string{ + "package_id": socketID, + "type": typeFreq, + "die": die, + } + fields["uncore_frequency_limit_mhz_min"] = initMinFreq + fields["uncore_frequency_limit_mhz_max"] = initMaxFreq + + acc.AddGauge("powerstat_package", fields, tags) +} + +func (p *PowerStat) addThermalDesignPowerMetric(socketID string, acc telegraf.Accumulator) { + maxPower, err := p.rapl.getConstraintMaxPowerWatts(socketID) + if err != nil { + p.Log.Errorf("error while retrieving TDP of the socket %s, err: %v", socketID, err) + return + } + + tags := map[string]string{ + "package_id": socketID, + } + + fields := map[string]interface{}{ + "thermal_design_power_watts": roundFloatToNearestTwoDecimalPlaces(maxPower), + } + + acc.AddGauge("powerstat_package", fields, tags) +} + +func (p *PowerStat) addCurrentSocketPowerConsumption(socketID string, acc telegraf.Accumulator) { + tags := map[string]string{ + "package_id": socketID, + } + + fields := map[string]interface{}{ + "current_power_consumption_watts": roundFloatToNearestTwoDecimalPlaces(p.rapl.getRaplData()[socketID].socketCurrentEnergy), + } + + acc.AddGauge("powerstat_package", fields, tags) +} + +func (p *PowerStat) addCurrentDramPowerConsumption(socketID string, acc telegraf.Accumulator) { + tags := map[string]string{ + "package_id": socketID, + } + + fields := map[string]interface{}{ + "current_dram_power_consumption_watts": roundFloatToNearestTwoDecimalPlaces(p.rapl.getRaplData()[socketID].dramCurrentEnergy), + } + + acc.AddGauge("powerstat_package", fields, tags) +} + +func (p *PowerStat) addPerCoreMetrics(acc telegraf.Accumulator) { + var wg sync.WaitGroup + wg.Add(len(p.msr.getCPUCoresData())) + + for cpuID := range p.msr.getCPUCoresData() { + go p.addMetricsForSingleCore(cpuID, acc, &wg) + } + + wg.Wait() +} + +func (p *PowerStat) addMetricsForSingleCore(cpuID string, acc telegraf.Accumulator, wg *sync.WaitGroup) { + defer wg.Done() + + if p.cpuFrequency { + p.addCPUFrequencyMetric(cpuID, acc) + } + + // Read data from MSR only if required + if p.cpuC0StateResidency || p.cpuC1StateResidency || p.cpuC6StateResidency || p.cpuBusyCycles || p.cpuTemperature || p.cpuBusyFrequency { + err := p.msr.openAndReadMsr(cpuID) + if err != nil { + // In case of an error exit the function. All metrics past this point are dependent on MSR + p.Log.Debugf("error while reading msr: %v", err) + return + } + } + + if p.cpuTemperature { + p.addCPUTemperatureMetric(cpuID, acc) + } + + // cpuBusyFrequency metric does some calculations inside that are required in another plugin cycle + if p.cpuBusyFrequency { + p.addCPUBusyFrequencyMetric(cpuID, acc) + } + + if !p.skipFirstIteration { + if p.cpuC0StateResidency || p.cpuBusyCycles { + p.addCPUC0StateResidencyMetric(cpuID, acc) + } + + if p.cpuC1StateResidency { + p.addCPUC1StateResidencyMetric(cpuID, acc) + } + + if p.cpuC6StateResidency { + p.addCPUC6StateResidencyMetric(cpuID, acc) + } + } +} + +func (p *PowerStat) addCPUFrequencyMetric(cpuID string, acc telegraf.Accumulator) { + frequency, err := p.msr.retrieveCPUFrequencyForCore(cpuID) + + // In case of an error leave func + if err != nil { + p.Log.Debugf("error while reading file: %v", err) + return + } + + cpu := p.cpuInfo[cpuID] + tags := map[string]string{ + "package_id": cpu.physicalID, + "core_id": cpu.coreID, + "cpu_id": cpu.cpuID, + } + + fields := map[string]interface{}{ + "cpu_frequency_mhz": roundFloatToNearestTwoDecimalPlaces(frequency), + } + + acc.AddGauge("powerstat_core", fields, tags) +} + +func (p *PowerStat) addCPUTemperatureMetric(cpuID string, acc telegraf.Accumulator) { + coresData := p.msr.getCPUCoresData() + temp := coresData[cpuID].throttleTemp - coresData[cpuID].temp + + cpu := p.cpuInfo[cpuID] + tags := map[string]string{ + "package_id": cpu.physicalID, + "core_id": cpu.coreID, + "cpu_id": cpu.cpuID, + } + fields := map[string]interface{}{ + "cpu_temperature_celsius": temp, + } + + acc.AddGauge("powerstat_core", fields, tags) +} + +func calculateTurboRatioGroup(coreCounts uint64, msr uint64, group map[int]uint64) { + from := coreCounts & 0xFF // value of number of active cores of bucket 1 is written in the first 8 bits. The next buckets values are saved on the following 8-bit sides + for i := 0; i < 8; i++ { + to := (coreCounts >> (i * 8)) & 0xFF + if to == 0 { + break + } + value := (msr >> (i * 8)) & 0xFF + // value of freq ratio is stored in 8-bit blocks, and their real value is obtained after multiplication by 100 + if value != 0 && to != 0 { + for ; from <= to; from++ { + group[int(from)] = value * 100 + } + } + from = to + 1 + } +} + +func (p *PowerStat) addTurboRatioLimit(socketID string, acc telegraf.Accumulator) { + var err error + turboRatioLimitGroups := make(map[int]uint64) + + var cpuID = "" + var model = "" + for _, v := range p.cpuInfo { + if v.physicalID == socketID { + cpuID = v.cpuID + model = v.model + } + } + if cpuID == "" || model == "" { + p.Log.Debugf("error while reading socket ID") + return + } + // dump_hsw_turbo_ratio_limit + if model == strconv.FormatInt(0x3F, 10) { // INTEL_FAM6_HASWELL_X + coreCounts := uint64(0x1211) // counting the number of active cores 17 and 18 + msrTurboRatioLimit2, err := p.msr.readSingleMsr(cpuID, "MSR_TURBO_RATIO_LIMIT2") + if err != nil { + p.Log.Debugf("error while reading MSR_TURBO_RATIO_LIMIT2: %v", err) + return + } + + calculateTurboRatioGroup(coreCounts, msrTurboRatioLimit2, turboRatioLimitGroups) + } + + // dump_ivt_turbo_ratio_limit + if (model == strconv.FormatInt(0x3E, 10)) || // INTEL_FAM6_IVYBRIDGE_X + (model == strconv.FormatInt(0x3F, 10)) { // INTEL_FAM6_HASWELL_X + coreCounts := uint64(0x100F0E0D0C0B0A09) // counting the number of active cores 9 to 16 + msrTurboRatioLimit1, err := p.msr.readSingleMsr(cpuID, "MSR_TURBO_RATIO_LIMIT1") + if err != nil { + p.Log.Debugf("error while reading MSR_TURBO_RATIO_LIMIT1: %v", err) + return + } + calculateTurboRatioGroup(coreCounts, msrTurboRatioLimit1, turboRatioLimitGroups) + } + + if (model != strconv.FormatInt(0x37, 10)) && // INTEL_FAM6_ATOM_SILVERMONT + (model != strconv.FormatInt(0x4A, 10)) && // INTEL_FAM6_ATOM_SILVERMONT_MID: + (model != strconv.FormatInt(0x5A, 10)) && // INTEL_FAM6_ATOM_AIRMONT_MID: + (model != strconv.FormatInt(0x2E, 10)) && // INTEL_FAM6_NEHALEM_EX + (model != strconv.FormatInt(0x2F, 10)) && // INTEL_FAM6_WESTMERE_EX + (model != strconv.FormatInt(0x57, 10)) && // INTEL_FAM6_XEON_PHI_KNL + (model != strconv.FormatInt(0x85, 10)) { // INTEL_FAM6_XEON_PHI_KNM + coreCounts := uint64(0x0807060504030201) // default value (counting the number of active cores 1 to 8). May be changed in "if" segment below + if (model == strconv.FormatInt(0x5C, 10)) || // INTEL_FAM6_ATOM_GOLDMONT + (model == strconv.FormatInt(0x55, 10)) || // INTEL_FAM6_SKYLAKE_X + (model == strconv.FormatInt(0x6C, 10) || model == strconv.FormatInt(0x8F, 10) || model == strconv.FormatInt(0x6A, 10)) || // INTEL_FAM6_ICELAKE_X + (model == strconv.FormatInt(0x5F, 10)) || // INTEL_FAM6_ATOM_GOLDMONT_D + (model == strconv.FormatInt(0x86, 10)) { // INTEL_FAM6_ATOM_TREMONT_D + coreCounts, err = p.msr.readSingleMsr(cpuID, "MSR_TURBO_RATIO_LIMIT1") + + if err != nil { + p.Log.Debugf("error while reading MSR_TURBO_RATIO_LIMIT1: %v", err) + return + } + } + + msrTurboRatioLimit, err := p.msr.readSingleMsr(cpuID, "MSR_TURBO_RATIO_LIMIT") + if err != nil { + p.Log.Debugf("error while reading MSR_TURBO_RATIO_LIMIT: %v", err) + return + } + calculateTurboRatioGroup(coreCounts, msrTurboRatioLimit, turboRatioLimitGroups) + } + // dump_atom_turbo_ratio_limits + if model == strconv.FormatInt(0x37, 10) || // INTEL_FAM6_ATOM_SILVERMONT + model == strconv.FormatInt(0x4A, 10) || // INTEL_FAM6_ATOM_SILVERMONT_MID: + model == strconv.FormatInt(0x5A, 10) { // INTEL_FAM6_ATOM_AIRMONT_MID + coreCounts := uint64(0x04030201) // counting the number of active cores 1 to 4 + msrTurboRatioLimit, err := p.msr.readSingleMsr(cpuID, "MSR_ATOM_CORE_TURBO_RATIOS") + + if err != nil { + p.Log.Debugf("error while reading MSR_ATOM_CORE_TURBO_RATIOS: %v", err) + return + } + value := uint64(0) + newValue := uint64(0) + + for i := 0; i < 4; i++ { // value "4" is specific for this group of processors + newValue = (msrTurboRatioLimit >> (8 * (i))) & 0x3F // value of freq ratio is stored in 6-bit blocks, saved every 8 bits + value = value + (newValue << ((i - 1) * 8)) // now value of freq ratio is stored in 8-bit blocks, saved every 8 bits + } + + calculateTurboRatioGroup(coreCounts, value, turboRatioLimitGroups) + } + // dump_knl_turbo_ratio_limits + if model == strconv.FormatInt(0x57, 10) { // INTEL_FAM6_XEON_PHI_KNL + msrTurboRatioLimit, err := p.msr.readSingleMsr(cpuID, "MSR_TURBO_RATIO_LIMIT") + if err != nil { + p.Log.Debugf("error while reading MSR_TURBO_RATIO_LIMIT: %v", err) + return + } + + // value of freq ratio of bucket 1 is saved in bits 15 to 8. + // each next value is calculated as the previous value - delta. Delta is stored in 3-bit blocks every 8 bits (start at 21 (2*8+5)) + value := (msrTurboRatioLimit >> 8) & 0xFF + newValue := value + for i := 2; i < 8; i++ { + newValue = newValue - (msrTurboRatioLimit>>(8*i+5))&0x7 + value = value + (newValue << ((i - 1) * 8)) + } + + // value of number of active cores of bucket 1 is saved in bits 1 to 7. + // each next value is calculated as the previous value + delta. Delta is stored in 5-bit blocks every 8 bits (start at 16 (2*8)) + coreCounts := (msrTurboRatioLimit & 0xFF) >> 1 + newBucket := coreCounts + for i := 2; i < 8; i++ { + newBucket = newBucket + (msrTurboRatioLimit>>(8*i))&0x1F + coreCounts = coreCounts + (newBucket << ((i - 1) * 8)) + } + calculateTurboRatioGroup(coreCounts, value, turboRatioLimitGroups) + } + + for key, val := range turboRatioLimitGroups { + tags := map[string]string{ + "package_id": socketID, + "active_cores": strconv.Itoa(key), + } + fields := map[string]interface{}{ + "max_turbo_frequency_mhz": val, + } + acc.AddGauge("powerstat_package", fields, tags) + } +} + +func (p *PowerStat) addCPUBusyFrequencyMetric(cpuID string, acc telegraf.Accumulator) { + coresData := p.msr.getCPUCoresData() + mperfDelta := coresData[cpuID].mperfDelta + // Avoid division by 0 + if mperfDelta == 0 { + p.Log.Errorf("mperf delta should not equal 0 on core %s", cpuID) + return + } + aperfMperf := float64(coresData[cpuID].aperfDelta) / float64(mperfDelta) + tsc := convertProcessorCyclesToHertz(coresData[cpuID].timeStampCounterDelta) + timeNow := time.Now().UnixNano() + interval := convertNanoSecondsToSeconds(timeNow - coresData[cpuID].readDate) + coresData[cpuID].readDate = timeNow + + if p.skipFirstIteration { + return + } + + if interval == 0 { + p.Log.Errorf("interval between last two Telegraf cycles is 0") + return + } + + busyMhzValue := roundFloatToNearestTwoDecimalPlaces(tsc * aperfMperf / interval) + + cpu := p.cpuInfo[cpuID] + tags := map[string]string{ + "package_id": cpu.physicalID, + "core_id": cpu.coreID, + "cpu_id": cpu.cpuID, + } + fields := map[string]interface{}{ + "cpu_busy_frequency_mhz": busyMhzValue, + } + + acc.AddGauge("powerstat_core", fields, tags) +} + +func (p *PowerStat) addCPUC1StateResidencyMetric(cpuID string, acc telegraf.Accumulator) { + coresData := p.msr.getCPUCoresData() + timestampDeltaBig := new(big.Int).SetUint64(coresData[cpuID].timeStampCounterDelta) + // Avoid division by 0 + if timestampDeltaBig.Sign() < 1 { + p.Log.Errorf("timestamp delta value %v should not be lower than 1", timestampDeltaBig) + return + } + + // Since counter collection is not atomic it may happen that sum of C0, C1, C3, C6 and C7 + // is bigger value than TSC, in such case C1 residency shall be set to 0. + // Operating on big.Int to avoid overflow + mperfDeltaBig := new(big.Int).SetUint64(coresData[cpuID].mperfDelta) + c3DeltaBig := new(big.Int).SetUint64(coresData[cpuID].c3Delta) + c6DeltaBig := new(big.Int).SetUint64(coresData[cpuID].c6Delta) + c7DeltaBig := new(big.Int).SetUint64(coresData[cpuID].c7Delta) + + c1Big := new(big.Int).Sub(timestampDeltaBig, mperfDeltaBig) + c1Big.Sub(c1Big, c3DeltaBig) + c1Big.Sub(c1Big, c6DeltaBig) + c1Big.Sub(c1Big, c7DeltaBig) + + if c1Big.Sign() < 0 { + c1Big = c1Big.SetInt64(0) + } + c1Value := roundFloatToNearestTwoDecimalPlaces(percentageMultiplier * float64(c1Big.Uint64()) / float64(timestampDeltaBig.Uint64())) + + cpu := p.cpuInfo[cpuID] + tags := map[string]string{ + "package_id": cpu.physicalID, + "core_id": cpu.coreID, + "cpu_id": cpu.cpuID, + } + fields := map[string]interface{}{ + "cpu_c1_state_residency_percent": c1Value, + } + + acc.AddGauge("powerstat_core", fields, tags) +} + +func (p *PowerStat) addCPUC6StateResidencyMetric(cpuID string, acc telegraf.Accumulator) { + coresData := p.msr.getCPUCoresData() + // Avoid division by 0 + if coresData[cpuID].timeStampCounterDelta == 0 { + p.Log.Errorf("timestamp counter on offset %s should not equal 0 on cpuID %s", + timestampCounterLocation, cpuID) + return + } + c6Value := roundFloatToNearestTwoDecimalPlaces(percentageMultiplier * + float64(coresData[cpuID].c6Delta) / float64(coresData[cpuID].timeStampCounterDelta)) + + cpu := p.cpuInfo[cpuID] + tags := map[string]string{ + "package_id": cpu.physicalID, + "core_id": cpu.coreID, + "cpu_id": cpu.cpuID, + } + fields := map[string]interface{}{ + "cpu_c6_state_residency_percent": c6Value, + } + + acc.AddGauge("powerstat_core", fields, tags) +} + +func (p *PowerStat) addCPUC0StateResidencyMetric(cpuID string, acc telegraf.Accumulator) { + coresData := p.msr.getCPUCoresData() + // Avoid division by 0 + if coresData[cpuID].timeStampCounterDelta == 0 { + p.Log.Errorf("timestamp counter on offset %s should not equal 0 on cpuID %s", + timestampCounterLocation, cpuID) + return + } + c0Value := roundFloatToNearestTwoDecimalPlaces(percentageMultiplier * + float64(coresData[cpuID].mperfDelta) / float64(coresData[cpuID].timeStampCounterDelta)) + cpu := p.cpuInfo[cpuID] + tags := map[string]string{ + "package_id": cpu.physicalID, + "core_id": cpu.coreID, + "cpu_id": cpu.cpuID, + } + if p.cpuC0StateResidency { + fields := map[string]interface{}{ + "cpu_c0_state_residency_percent": c0Value, + } + acc.AddGauge("powerstat_core", fields, tags) + } + if p.cpuBusyCycles { + deprecatedFields := map[string]interface{}{ + "cpu_busy_cycles_percent": c0Value, + } + acc.AddGauge("powerstat_core", deprecatedFields, tags) + } +} + +func (p *PowerStat) parsePackageMetricsConfig() { + if p.PackageMetrics == nil { + // if Package Metric config is empty, use the default settings. + p.packageCurrentPowerConsumption = true + p.packageCurrentDramPowerConsumption = true + p.packageThermalDesignPower = true + return + } + + if contains(p.PackageMetrics, packageTurboLimit) { + p.packageTurboLimit = true + } + if contains(p.PackageMetrics, packageCurrentPowerConsumption) { + p.packageCurrentPowerConsumption = true + } + + if contains(p.PackageMetrics, packageCurrentDramPowerConsumption) { + p.packageCurrentDramPowerConsumption = true + } + if contains(p.PackageMetrics, packageThermalDesignPower) { + p.packageThermalDesignPower = true + } + if contains(p.PackageMetrics, packageUncoreFrequency) { + p.packageUncoreFrequency = true + } +} + +func (p *PowerStat) parseCPUMetricsConfig() { + if len(p.CPUMetrics) == 0 { + return + } + + if contains(p.CPUMetrics, cpuFrequency) { + p.cpuFrequency = true + } + + if contains(p.CPUMetrics, cpuC0StateResidency) { + p.cpuC0StateResidency = true + } + + if contains(p.CPUMetrics, cpuC1StateResidency) { + p.cpuC1StateResidency = true + } + + if contains(p.CPUMetrics, cpuC6StateResidency) { + p.cpuC6StateResidency = true + } + + if contains(p.CPUMetrics, cpuBusyCycles) { + p.cpuBusyCycles = true + } + + if contains(p.CPUMetrics, cpuBusyFrequency) { + p.cpuBusyFrequency = true + } + + if contains(p.CPUMetrics, cpuTemperature) { + p.cpuTemperature = true + } +} + +func (p *PowerStat) verifyProcessor() error { + allowedProcessorModelsForC1C6 := []int64{0x37, 0x4D, 0x5C, 0x5F, 0x7A, 0x4C, 0x86, 0x96, 0x9C, + 0x1A, 0x1E, 0x1F, 0x2E, 0x25, 0x2C, 0x2F, 0x2A, 0x2D, 0x3A, 0x3E, 0x4E, 0x5E, 0x55, 0x8E, + 0x9E, 0x6A, 0x6C, 0x7D, 0x7E, 0x9D, 0x3C, 0x3F, 0x45, 0x46, 0x3D, 0x47, 0x4F, 0x56, + 0x66, 0x57, 0x85, 0xA5, 0xA6, 0x8F, 0x8C, 0x8D} + stats, err := p.fs.getCPUInfoStats() + if err != nil { + return err + } + + p.cpuInfo = stats + + // First CPU is sufficient for verification + firstCPU := p.cpuInfo["0"] + if firstCPU == nil { + return fmt.Errorf("first core not found while parsing /proc/cpuinfo") + } + + if firstCPU.vendorID != "GenuineIntel" || firstCPU.cpuFamily != "6" { + return fmt.Errorf("Intel processor not found, vendorId: %s", firstCPU.vendorID) + } + + if !contains(convertIntegerArrayToStringArray(allowedProcessorModelsForC1C6), firstCPU.model) { + p.cpuC1StateResidency = false + p.cpuC6StateResidency = false + } + + if !strings.Contains(firstCPU.flags, "msr") { + p.cpuTemperature = false + p.cpuC6StateResidency = false + p.cpuC0StateResidency = false + p.cpuBusyCycles = false + p.cpuBusyFrequency = false + p.cpuC1StateResidency = false + } + + if !strings.Contains(firstCPU.flags, "aperfmperf") { + p.cpuBusyCycles = false + p.cpuBusyFrequency = false + p.cpuC0StateResidency = false + p.cpuC1StateResidency = false + } + + if !strings.Contains(firstCPU.flags, "dts") { + p.cpuTemperature = false + } + + return nil +} + +func contains(slice []string, str string) bool { + for _, v := range slice { + if v == str { + return true + } + } + return false +} + +func (p *PowerStat) areCoreMetricsEnabled() bool { + return p.msr != nil && len(p.msr.getCPUCoresData()) > 0 +} + +func (p *PowerStat) areGlobalMetricsEnabled() bool { + return p.rapl != nil +} + +// newPowerStat creates and returns PowerStat struct +func newPowerStat(fs fileService) *PowerStat { + p := &PowerStat{ + cpuFrequency: false, + cpuC0StateResidency: false, + cpuC1StateResidency: false, + cpuC6StateResidency: false, + cpuBusyCycles: false, + cpuTemperature: false, + cpuBusyFrequency: false, + packageTurboLimit: false, + packageUncoreFrequency: false, + packageCurrentPowerConsumption: false, + packageCurrentDramPowerConsumption: false, + packageThermalDesignPower: false, + skipFirstIteration: true, + fs: fs, + logOnce: make(map[string]error), + } + + return p +} + +func init() { + inputs.Add("intel_powerstat", func() telegraf.Input { + return newPowerStat(newFileService()) + }) +} diff --git a/plugins/inputs/intel_powerstat/intel_powerstat_notlinux.go b/plugins/inputs/intel_powerstat/intel_powerstat_notlinux.go new file mode 100644 index 0000000000000..256e64970094e --- /dev/null +++ b/plugins/inputs/intel_powerstat/intel_powerstat_notlinux.go @@ -0,0 +1,4 @@ +//go:build !linux +// +build !linux + +package intel_powerstat diff --git a/plugins/inputs/intel_powerstat/intel_powerstat_test.go b/plugins/inputs/intel_powerstat/intel_powerstat_test.go new file mode 100644 index 0000000000000..14a7f59b1b2bf --- /dev/null +++ b/plugins/inputs/intel_powerstat/intel_powerstat_test.go @@ -0,0 +1,644 @@ +//go:build linux +// +build linux + +package intel_powerstat + +import ( + "errors" + "strconv" + "sync" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" +) + +type MockServices struct { + fs *mockFileService + msr *mockMsrService + rapl *mockRaplService +} + +func TestInitPlugin(t *testing.T) { + cores := []string{"cpu0", "cpu1", "cpu2", "cpu3"} + power, mockServices := getPowerWithMockedServices() + + mockServices.fs.On("getCPUInfoStats", mock.Anything). + Return(nil, errors.New("error getting cpu stats")).Once() + require.Error(t, power.Init()) + + mockServices.fs.On("getCPUInfoStats", mock.Anything). + Return(make(map[string]*cpuInfo), nil).Once() + require.Error(t, power.Init()) + + mockServices.fs.On("getCPUInfoStats", mock.Anything). + Return(map[string]*cpuInfo{"0": { + vendorID: "GenuineIntel", + cpuFamily: "test", + }}, nil).Once() + require.Error(t, power.Init()) + + mockServices.fs.On("getStringsMatchingPatternOnPath", mock.Anything). + Return(cores, nil).Once(). + On("getCPUInfoStats", mock.Anything). + Return(map[string]*cpuInfo{"0": { + vendorID: "GenuineIntel", + cpuFamily: "6", + }}, nil) + // Verify MSR service initialization. + power.cpuFrequency = true + require.NoError(t, power.Init()) + mockServices.fs.AssertCalled(t, "getStringsMatchingPatternOnPath", mock.Anything) + require.Equal(t, len(cores), len(power.msr.getCPUCoresData())) + + mockServices.fs.On("getStringsMatchingPatternOnPath", mock.Anything). + Return(nil, errors.New("error during getStringsMatchingPatternOnPath")).Once() + + // In case of an error when fetching cpu cores plugin should proceed with execution. + require.NoError(t, power.Init()) + mockServices.fs.AssertCalled(t, "getStringsMatchingPatternOnPath", mock.Anything) + require.Equal(t, 0, len(power.msr.getCPUCoresData())) +} + +func TestParseCPUMetricsConfig(t *testing.T) { + power, _ := getPowerWithMockedServices() + disableCoreMetrics(power) + + power.CPUMetrics = []string{ + "cpu_frequency", "cpu_c0_state_residency", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles", "cpu_temperature", + "cpu_busy_frequency", + } + power.parseCPUMetricsConfig() + verifyCoreMetrics(t, power, true) + disableCoreMetrics(power) + verifyCoreMetrics(t, power, false) + + power.CPUMetrics = []string{} + power.parseCPUMetricsConfig() + + power.CPUMetrics = []string{"cpu_c6_state_residency", "#@$sdkjdfsdf3@", "1pu_c1_state_residency"} + power.parseCPUMetricsConfig() + require.Equal(t, false, power.cpuC1StateResidency) + require.Equal(t, true, power.cpuC6StateResidency) + disableCoreMetrics(power) + verifyCoreMetrics(t, power, false) + + power.CPUMetrics = []string{"#@$sdkjdfsdf3@", "1pu_c1_state_residency", "123"} + power.parseCPUMetricsConfig() + verifyCoreMetrics(t, power, false) +} + +func verifyCoreMetrics(t *testing.T, power *PowerStat, enabled bool) { + require.Equal(t, enabled, power.cpuFrequency) + require.Equal(t, enabled, power.cpuC1StateResidency) + require.Equal(t, enabled, power.cpuC6StateResidency) + require.Equal(t, enabled, power.cpuC0StateResidency) + require.Equal(t, enabled, power.cpuBusyCycles) + require.Equal(t, enabled, power.cpuBusyFrequency) + require.Equal(t, enabled, power.cpuTemperature) +} + +func TestGather(t *testing.T) { + var acc testutil.Accumulator + packageIDs := []string{"0", "1"} + coreIDs := []string{"0", "1", "2", "3"} + socketCurrentEnergy := 13213852.2 + dramCurrentEnergy := 784552.0 + preparedCPUData := getPreparedCPUData(coreIDs) + raplDataMap := prepareRaplDataMap(packageIDs, socketCurrentEnergy, dramCurrentEnergy) + + power, mockServices := getPowerWithMockedServices() + prepareCPUInfo(power, coreIDs, packageIDs) + enableCoreMetrics(power) + power.skipFirstIteration = false + + mockServices.rapl.On("initializeRaplData", mock.Anything). + On("getRaplData").Return(raplDataMap). + On("retrieveAndCalculateData", mock.Anything).Return(nil).Times(len(raplDataMap)). + On("getConstraintMaxPowerWatts", mock.Anything).Return(546783852.3, nil) + mockServices.msr.On("getCPUCoresData").Return(preparedCPUData). + On("isMsrLoaded", mock.Anything).Return(true). + On("openAndReadMsr", mock.Anything).Return(nil). + On("retrieveCPUFrequencyForCore", mock.Anything).Return(1200000.2, nil) + + require.NoError(t, power.Gather(&acc)) + // Number of global metrics : 3 + // Number of per core metrics : 7 + require.Equal(t, 3*len(packageIDs)+7*len(coreIDs), len(acc.GetTelegrafMetrics())) +} + +func TestAddGlobalMetricsNegative(t *testing.T) { + var acc testutil.Accumulator + socketCurrentEnergy := 13213852.2 + dramCurrentEnergy := 784552.0 + raplDataMap := prepareRaplDataMap([]string{"0", "1"}, socketCurrentEnergy, dramCurrentEnergy) + power, mockServices := getPowerWithMockedServices() + power.skipFirstIteration = false + mockServices.rapl.On("initializeRaplData", mock.Anything).Once(). + On("getRaplData").Return(raplDataMap).Once(). + On("retrieveAndCalculateData", mock.Anything).Return(errors.New("error while calculating data")).Times(len(raplDataMap)) + + power.addGlobalMetrics(&acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) + mockServices.rapl.AssertNumberOfCalls(t, "retrieveAndCalculateData", len(raplDataMap)) + + mockServices.rapl.On("initializeRaplData", mock.Anything).Once(). + On("getRaplData").Return(make(map[string]*raplData)).Once() + + power.addGlobalMetrics(&acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) + mockServices.rapl.AssertNotCalled(t, "retrieveAndCalculateData") + + mockServices.rapl.On("initializeRaplData", mock.Anything).Once(). + On("getRaplData").Return(raplDataMap). + On("retrieveAndCalculateData", mock.Anything).Return(nil).Once(). + On("retrieveAndCalculateData", mock.Anything).Return(errors.New("error while calculating data")).Once(). + On("getConstraintMaxPowerWatts", mock.Anything).Return(12313851.5, nil).Twice() + + power.addGlobalMetrics(&acc) + require.Equal(t, 3, len(acc.GetTelegrafMetrics())) +} + +func TestAddGlobalMetricsPositive(t *testing.T) { + var acc testutil.Accumulator + socketCurrentEnergy := 3644574.4 + dramCurrentEnergy := 124234872.5 + raplDataMap := prepareRaplDataMap([]string{"0", "1"}, socketCurrentEnergy, dramCurrentEnergy) + maxPower := 546783852.9 + power, mockServices := getPowerWithMockedServices() + power.skipFirstIteration = false + + mockServices.rapl.On("initializeRaplData", mock.Anything). + On("getRaplData").Return(raplDataMap). + On("retrieveAndCalculateData", mock.Anything).Return(nil).Times(len(raplDataMap)). + On("getConstraintMaxPowerWatts", mock.Anything).Return(maxPower, nil).Twice(). + On("getCurrentDramPowerConsumption", mock.Anything).Return(dramCurrentEnergy) + + power.addGlobalMetrics(&acc) + require.Equal(t, 6, len(acc.GetTelegrafMetrics())) + + expectedResults := getGlobalMetrics(maxPower, socketCurrentEnergy, dramCurrentEnergy) + for _, test := range expectedResults { + acc.AssertContainsTaggedFields(t, "powerstat_package", test.fields, test.tags) + } +} + +func TestAddMetricsForSingleCoreNegative(t *testing.T) { + var wg sync.WaitGroup + var acc testutil.Accumulator + core := "0" + power, mockServices := getPowerWithMockedServices() + + mockServices.msr.On("openAndReadMsr", core).Return(errors.New("error reading MSR file")).Once() + + // Skip generating metric for CPU frequency. + power.cpuFrequency = false + + wg.Add(1) + power.addMetricsForSingleCore(core, &acc, &wg) + wg.Wait() + + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) +} + +func TestAddCPUFrequencyMetric(t *testing.T) { + var acc testutil.Accumulator + cpuID := "1" + coreID := "3" + packageID := "0" + frequency := 1200000.2 + power, mockServices := getPowerWithMockedServices() + prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID) + + mockServices.msr.On("retrieveCPUFrequencyForCore", mock.Anything). + Return(float64(0), errors.New("error on reading file")).Once() + + power.addCPUFrequencyMetric(cpuID, &acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) + + mockServices.msr.On("retrieveCPUFrequencyForCore", mock.Anything).Return(frequency, nil).Once() + + power.addCPUFrequencyMetric(cpuID, &acc) + require.Equal(t, 1, len(acc.GetTelegrafMetrics())) + + expectedFrequency := roundFloatToNearestTwoDecimalPlaces(frequency) + expectedMetric := getPowerCoreMetric("cpu_frequency_mhz", expectedFrequency, coreID, packageID, cpuID) + acc.AssertContainsTaggedFields(t, "powerstat_core", expectedMetric.fields, expectedMetric.tags) +} + +func TestReadUncoreFreq(t *testing.T) { + var acc testutil.Accumulator + cpuID := "0" + coreID := "0" + packageID := "0" + die := "0" + power, mockServices := getPowerWithMockedServices() + prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID) + preparedData := getPreparedCPUData([]string{cpuID}) + + mockServices.msr.On("getCPUCoresData").Return(preparedData) + + mockServices.msr.On("isMsrLoaded").Return(true) + + mockServices.msr.On("readSingleMsr", "0", "MSR_UNCORE_PERF_STATUS").Return(uint64(10), nil) + + mockServices.msr.On("retrieveUncoreFrequency", "0", "initial", "min", "0"). + Return(float64(500), nil) + mockServices.msr.On("retrieveUncoreFrequency", "0", "initial", "max", "0"). + Return(float64(1200), nil) + mockServices.msr.On("retrieveUncoreFrequency", "0", "current", "min", "0"). + Return(float64(600), nil) + mockServices.msr.On("retrieveUncoreFrequency", "0", "current", "max", "0"). + Return(float64(1100), nil) + + power.readUncoreFreq("current", packageID, die, &acc) + power.readUncoreFreq("initial", packageID, die, &acc) + + require.Equal(t, 2, len(acc.GetTelegrafMetrics())) + + expectedMetric := getPowerUncoreFreqMetric("initial", float64(500), float64(1200), nil, packageID, die) + acc.AssertContainsTaggedFields(t, "powerstat_package", expectedMetric.fields, expectedMetric.tags) + + expectedMetric = getPowerUncoreFreqMetric("current", float64(600), float64(1100), uint64(1000), packageID, die) + acc.AssertContainsTaggedFields(t, "powerstat_package", expectedMetric.fields, expectedMetric.tags) +} + +func TestAddCoreCPUTemperatureMetric(t *testing.T) { + var acc testutil.Accumulator + cpuID := "0" + coreID := "2" + packageID := "1" + power, mockServices := getPowerWithMockedServices() + preparedData := getPreparedCPUData([]string{cpuID}) + expectedTemp := preparedData[cpuID].throttleTemp - preparedData[cpuID].temp + prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID) + + mockServices.msr.On("getCPUCoresData").Return(preparedData).Once() + power.addCPUTemperatureMetric(cpuID, &acc) + require.Equal(t, 1, len(acc.GetTelegrafMetrics())) + + expectedMetric := getPowerCoreMetric("cpu_temperature_celsius", expectedTemp, coreID, packageID, cpuID) + acc.AssertContainsTaggedFields(t, "powerstat_core", expectedMetric.fields, expectedMetric.tags) +} + +func TestAddC6StateResidencyMetric(t *testing.T) { + var acc testutil.Accumulator + cpuID := "0" + coreID := "2" + packageID := "1" + power, mockServices := getPowerWithMockedServices() + prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID) + preparedData := getPreparedCPUData([]string{cpuID}) + expectedC6 := roundFloatToNearestTwoDecimalPlaces(percentageMultiplier * + float64(preparedData[cpuID].c6Delta) / float64(preparedData[cpuID].timeStampCounterDelta)) + + mockServices.msr.On("getCPUCoresData").Return(preparedData).Twice() + power.addCPUC6StateResidencyMetric(cpuID, &acc) + require.Equal(t, 1, len(acc.GetTelegrafMetrics())) + + expectedMetric := getPowerCoreMetric("cpu_c6_state_residency_percent", expectedC6, coreID, packageID, cpuID) + acc.AssertContainsTaggedFields(t, "powerstat_core", expectedMetric.fields, expectedMetric.tags) + + acc.ClearMetrics() + preparedData[cpuID].timeStampCounterDelta = 0 + + power.addCPUC6StateResidencyMetric(cpuID, &acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) +} + +func TestAddC0StateResidencyMetric(t *testing.T) { + var acc testutil.Accumulator + cpuID := "0" + coreID := "2" + packageID := "1" + power, mockServices := getPowerWithMockedServices() + prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID) + preparedData := getPreparedCPUData([]string{cpuID}) + expectedBusyCycles := roundFloatToNearestTwoDecimalPlaces(percentageMultiplier * float64(preparedData[cpuID].mperfDelta) / + float64(preparedData[cpuID].timeStampCounterDelta)) + + mockServices.msr.On("getCPUCoresData").Return(preparedData).Twice() + power.cpuBusyCycles, power.cpuC0StateResidency = true, true + power.addCPUC0StateResidencyMetric(cpuID, &acc) + require.Equal(t, 2, len(acc.GetTelegrafMetrics())) + + expectedMetric := getPowerCoreMetric("cpu_c0_state_residency_percent", expectedBusyCycles, coreID, packageID, cpuID) + acc.AssertContainsTaggedFields(t, "powerstat_core", expectedMetric.fields, expectedMetric.tags) + + // Deprecated + expectedMetric = getPowerCoreMetric("cpu_busy_cycles_percent", expectedBusyCycles, coreID, packageID, cpuID) + acc.AssertContainsTaggedFields(t, "powerstat_core", expectedMetric.fields, expectedMetric.tags) + + acc.ClearMetrics() + preparedData[cpuID].timeStampCounterDelta = 0 + power.addCPUC0StateResidencyMetric(cpuID, &acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) +} + +func TestAddProcessorBusyFrequencyMetric(t *testing.T) { + var acc testutil.Accumulator + cpuID := "0" + coreID := "2" + packageID := "1" + power, mockServices := getPowerWithMockedServices() + prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID) + preparedData := getPreparedCPUData([]string{cpuID}) + power.skipFirstIteration = false + + mockServices.msr.On("getCPUCoresData").Return(preparedData).Twice() + power.addCPUBusyFrequencyMetric(cpuID, &acc) + require.Equal(t, 1, len(acc.GetTelegrafMetrics())) + + acc.ClearMetrics() + preparedData[cpuID].mperfDelta = 0 + power.addCPUBusyFrequencyMetric(cpuID, &acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) +} + +func TestAddC1StateResidencyMetric(t *testing.T) { + var acc testutil.Accumulator + cpuID := "0" + coreID := "2" + packageID := "1" + power, mockServices := getPowerWithMockedServices() + prepareCPUInfoForSingleCPU(power, cpuID, coreID, packageID) + preparedData := getPreparedCPUData([]string{cpuID}) + c1 := preparedData[cpuID].timeStampCounterDelta - preparedData[cpuID].mperfDelta - preparedData[cpuID].c3Delta - + preparedData[cpuID].c6Delta - preparedData[cpuID].c7Delta + expectedC1 := roundFloatToNearestTwoDecimalPlaces(percentageMultiplier * float64(c1) / float64(preparedData[cpuID].timeStampCounterDelta)) + + mockServices.msr.On("getCPUCoresData").Return(preparedData).Twice() + + power.addCPUC1StateResidencyMetric(cpuID, &acc) + require.Equal(t, 1, len(acc.GetTelegrafMetrics())) + + expectedMetric := getPowerCoreMetric("cpu_c1_state_residency_percent", expectedC1, coreID, packageID, cpuID) + acc.AssertContainsTaggedFields(t, "powerstat_core", expectedMetric.fields, expectedMetric.tags) + + acc.ClearMetrics() + preparedData[cpuID].timeStampCounterDelta = 0 + power.addCPUC1StateResidencyMetric(cpuID, &acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) +} + +func TestAddThermalDesignPowerMetric(t *testing.T) { + var acc testutil.Accumulator + sockets := []string{"0"} + maxPower := 195720672.1 + power, mockServices := getPowerWithMockedServices() + + mockServices.rapl.On("getConstraintMaxPowerWatts", mock.Anything). + Return(float64(0), errors.New("getConstraintMaxPowerWatts error")).Once(). + On("getConstraintMaxPowerWatts", mock.Anything).Return(maxPower, nil).Once() + + power.addThermalDesignPowerMetric(sockets[0], &acc) + require.Equal(t, 0, len(acc.GetTelegrafMetrics())) + + power.addThermalDesignPowerMetric(sockets[0], &acc) + require.Equal(t, 1, len(acc.GetTelegrafMetrics())) + + expectedTDP := roundFloatToNearestTwoDecimalPlaces(maxPower) + expectedMetric := getPowerGlobalMetric("thermal_design_power_watts", expectedTDP, sockets[0]) + acc.AssertContainsTaggedFields(t, "powerstat_package", expectedMetric.fields, expectedMetric.tags) +} + +func TestCalculateTurboRatioGroup(t *testing.T) { + coreCounts := uint64(0x0807060504030201) + msr := uint64(0x0807060504030201) + turboRatioLimitGroups := make(map[int]uint64) + + calculateTurboRatioGroup(coreCounts, msr, turboRatioLimitGroups) + require.Equal(t, 8, len(turboRatioLimitGroups)) + require.Equal(t, uint64(100), turboRatioLimitGroups[1]) + require.Equal(t, uint64(200), turboRatioLimitGroups[2]) + require.Equal(t, uint64(300), turboRatioLimitGroups[3]) + require.Equal(t, uint64(400), turboRatioLimitGroups[4]) + require.Equal(t, uint64(500), turboRatioLimitGroups[5]) + require.Equal(t, uint64(600), turboRatioLimitGroups[6]) + require.Equal(t, uint64(700), turboRatioLimitGroups[7]) + require.Equal(t, uint64(800), turboRatioLimitGroups[8]) + + coreCounts = uint64(0x100e0c0a08060402) + calculateTurboRatioGroup(coreCounts, msr, turboRatioLimitGroups) + require.Equal(t, 16, len(turboRatioLimitGroups)) + require.Equal(t, uint64(100), turboRatioLimitGroups[1]) + require.Equal(t, uint64(100), turboRatioLimitGroups[2]) + require.Equal(t, uint64(200), turboRatioLimitGroups[3]) + require.Equal(t, uint64(200), turboRatioLimitGroups[4]) + require.Equal(t, uint64(300), turboRatioLimitGroups[5]) + require.Equal(t, uint64(300), turboRatioLimitGroups[6]) + require.Equal(t, uint64(400), turboRatioLimitGroups[7]) + require.Equal(t, uint64(400), turboRatioLimitGroups[8]) + require.Equal(t, uint64(500), turboRatioLimitGroups[9]) + require.Equal(t, uint64(500), turboRatioLimitGroups[10]) + require.Equal(t, uint64(600), turboRatioLimitGroups[11]) + require.Equal(t, uint64(600), turboRatioLimitGroups[12]) + require.Equal(t, uint64(700), turboRatioLimitGroups[13]) + require.Equal(t, uint64(700), turboRatioLimitGroups[14]) + require.Equal(t, uint64(800), turboRatioLimitGroups[15]) + require.Equal(t, uint64(800), turboRatioLimitGroups[16]) + coreCounts = uint64(0x1211) + msr = uint64(0xfffe) + calculateTurboRatioGroup(coreCounts, msr, turboRatioLimitGroups) + require.Equal(t, 18, len(turboRatioLimitGroups)) + require.Equal(t, uint64(25400), turboRatioLimitGroups[17]) + require.Equal(t, uint64(25500), turboRatioLimitGroups[18]) + + coreCounts = uint64(0x1201) + msr = uint64(0x0202) + calculateTurboRatioGroup(coreCounts, msr, turboRatioLimitGroups) + require.Equal(t, 18, len(turboRatioLimitGroups)) + require.Equal(t, uint64(200), turboRatioLimitGroups[1]) + require.Equal(t, uint64(200), turboRatioLimitGroups[2]) + require.Equal(t, uint64(200), turboRatioLimitGroups[3]) + require.Equal(t, uint64(200), turboRatioLimitGroups[4]) + require.Equal(t, uint64(200), turboRatioLimitGroups[5]) + require.Equal(t, uint64(200), turboRatioLimitGroups[6]) + require.Equal(t, uint64(200), turboRatioLimitGroups[7]) + require.Equal(t, uint64(200), turboRatioLimitGroups[8]) + require.Equal(t, uint64(200), turboRatioLimitGroups[9]) + require.Equal(t, uint64(200), turboRatioLimitGroups[10]) + require.Equal(t, uint64(200), turboRatioLimitGroups[11]) + require.Equal(t, uint64(200), turboRatioLimitGroups[12]) + require.Equal(t, uint64(200), turboRatioLimitGroups[13]) + require.Equal(t, uint64(200), turboRatioLimitGroups[14]) + require.Equal(t, uint64(200), turboRatioLimitGroups[15]) + require.Equal(t, uint64(200), turboRatioLimitGroups[16]) + require.Equal(t, uint64(200), turboRatioLimitGroups[17]) + require.Equal(t, uint64(200), turboRatioLimitGroups[18]) + + coreCounts = uint64(0x1211) + msr = uint64(0xfffe) + turboRatioLimitGroups = make(map[int]uint64) + calculateTurboRatioGroup(coreCounts, msr, turboRatioLimitGroups) + require.Equal(t, 2, len(turboRatioLimitGroups)) + require.Equal(t, uint64(25400), turboRatioLimitGroups[17]) + require.Equal(t, uint64(25500), turboRatioLimitGroups[18]) +} + +func getPreparedCPUData(cores []string) map[string]*msrData { + msrDataMap := make(map[string]*msrData) + + for _, core := range cores { + msrDataMap[core] = &msrData{ + mperf: 43079, + aperf: 82001, + timeStampCounter: 15514, + c3: 52829, + c6: 86930, + c7: 25340, + throttleTemp: 88150, + temp: 40827, + mperfDelta: 23515, + aperfDelta: 33866, + timeStampCounterDelta: 13686000, + c3Delta: 20003, + c6Delta: 44518, + c7Delta: 20979, + } + } + + return msrDataMap +} + +func getGlobalMetrics(maxPower float64, socketCurrentEnergy float64, dramCurrentEnergy float64) []struct { + fields map[string]interface{} + tags map[string]string +} { + return []struct { + fields map[string]interface{} + tags map[string]string + }{ + getPowerGlobalMetric("thermal_design_power_watts", roundFloatToNearestTwoDecimalPlaces(maxPower), "0"), + getPowerGlobalMetric("thermal_design_power_watts", roundFloatToNearestTwoDecimalPlaces(maxPower), "1"), + getPowerGlobalMetric("current_power_consumption_watts", roundFloatToNearestTwoDecimalPlaces(socketCurrentEnergy), "0"), + getPowerGlobalMetric("current_power_consumption_watts", roundFloatToNearestTwoDecimalPlaces(socketCurrentEnergy), "1"), + getPowerGlobalMetric("current_dram_power_consumption_watts", roundFloatToNearestTwoDecimalPlaces(dramCurrentEnergy), "0"), + getPowerGlobalMetric("current_dram_power_consumption_watts", roundFloatToNearestTwoDecimalPlaces(dramCurrentEnergy), "1"), + } +} + +func getPowerCoreMetric(name string, value interface{}, coreID string, packageID string, cpuID string) struct { + fields map[string]interface{} + tags map[string]string +} { + return getPowerMetric(name, value, map[string]string{"package_id": packageID, "core_id": coreID, "cpu_id": cpuID}) +} + +func getPowerGlobalMetric(name string, value interface{}, socketID string) struct { + fields map[string]interface{} + tags map[string]string +} { + return getPowerMetric(name, value, map[string]string{"package_id": socketID}) +} + +func getPowerUncoreFreqMetric(typeFreq string, limitMin interface{}, limitMax interface{}, current interface{}, socketID string, die string) struct { + fields map[string]interface{} + tags map[string]string +} { + var ret struct { + fields map[string]interface{} + tags map[string]string + } + ret.tags = make(map[string]string) + ret.fields = make(map[string]interface{}) + ret.tags["package_id"] = socketID + ret.tags["die"] = die + ret.tags["type"] = typeFreq + ret.fields["uncore_frequency_limit_mhz_min"] = limitMin + ret.fields["uncore_frequency_limit_mhz_max"] = limitMax + if typeFreq == "current" { + ret.fields["uncore_frequency_mhz_cur"] = current + } + return ret +} + +func getPowerMetric(name string, value interface{}, tags map[string]string) struct { + fields map[string]interface{} + tags map[string]string +} { + return struct { + fields map[string]interface{} + tags map[string]string + }{ + map[string]interface{}{ + name: value, + }, + tags, + } +} + +func prepareCPUInfoForSingleCPU(power *PowerStat, cpuID string, coreID string, packageID string) { + power.cpuInfo = make(map[string]*cpuInfo) + power.cpuInfo[cpuID] = &cpuInfo{ + physicalID: packageID, + coreID: coreID, + cpuID: cpuID, + } +} + +func prepareCPUInfo(power *PowerStat, coreIDs []string, packageIDs []string) { + power.cpuInfo = make(map[string]*cpuInfo) + currentCPU := 0 + for _, packageID := range packageIDs { + for _, coreID := range coreIDs { + cpuID := strconv.Itoa(currentCPU) + power.cpuInfo[cpuID] = &cpuInfo{ + physicalID: packageID, + cpuID: cpuID, + coreID: coreID, + } + currentCPU++ + } + } +} + +func enableCoreMetrics(power *PowerStat) { + power.cpuC0StateResidency = true + power.cpuC1StateResidency = true + power.cpuC6StateResidency = true + power.cpuTemperature = true + power.cpuBusyFrequency = true + power.cpuFrequency = true + power.cpuBusyCycles = true +} + +func disableCoreMetrics(power *PowerStat) { + power.cpuC0StateResidency = false + power.cpuC1StateResidency = false + power.cpuC6StateResidency = false + power.cpuBusyCycles = false + power.cpuTemperature = false + power.cpuBusyFrequency = false + power.cpuFrequency = false +} + +func prepareRaplDataMap(socketIDs []string, socketCurrentEnergy float64, dramCurrentEnergy float64) map[string]*raplData { + raplDataMap := make(map[string]*raplData, len(socketIDs)) + for _, socketID := range socketIDs { + raplDataMap[socketID] = &raplData{ + socketCurrentEnergy: socketCurrentEnergy, + dramCurrentEnergy: dramCurrentEnergy, + } + } + + return raplDataMap +} + +func getPowerWithMockedServices() (*PowerStat, *MockServices) { + var mockServices MockServices + mockServices.fs = &mockFileService{} + mockServices.msr = &mockMsrService{} + mockServices.rapl = &mockRaplService{} + p := newPowerStat(mockServices.fs) + p.Log = testutil.Logger{Name: "PowerPluginTest"} + p.rapl = mockServices.rapl + p.msr = mockServices.msr + p.packageCurrentPowerConsumption = true + p.packageCurrentDramPowerConsumption = true + p.packageThermalDesignPower = true + + return p, &mockServices +} diff --git a/plugins/inputs/intel_powerstat/msr.go b/plugins/inputs/intel_powerstat/msr.go new file mode 100644 index 0000000000000..4a2ee1be611d5 --- /dev/null +++ b/plugins/inputs/intel_powerstat/msr.go @@ -0,0 +1,312 @@ +//go:build linux +// +build linux + +package intel_powerstat + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "golang.org/x/sync/errgroup" + + "github.com/influxdata/telegraf" +) + +const ( + systemCPUPath = "/sys/devices/system/cpu/" + cpuCurrentFreqPartialPath = "/sys/devices/system/cpu/cpu%s/cpufreq/scaling_cur_freq" + msrPartialPath = "/dev/cpu/%s/msr" + uncoreFreqPath = "/sys/devices/system/cpu/intel_uncore_frequency/package_%s_die_%s/%s%s_freq_khz" + c3StateResidencyLocation = 0x3FC + c6StateResidencyLocation = 0x3FD + c7StateResidencyLocation = 0x3FE + maximumFrequencyClockCountLocation = 0xE7 + actualFrequencyClockCountLocation = 0xE8 + throttleTemperatureLocation = 0x1A2 + temperatureLocation = 0x19C + timestampCounterLocation = 0x10 + turboRatioLimitLocation = 0x1AD + turboRatioLimit1Location = 0x1AE + turboRatioLimit2Location = 0x1AF + atomCoreTurboRatiosLocation = 0x66C + uncorePerfStatusLocation = 0x621 +) + +// msrService is responsible for interactions with MSR. +type msrService interface { + getCPUCoresData() map[string]*msrData + retrieveCPUFrequencyForCore(core string) (float64, error) + retrieveUncoreFrequency(socketID string, typeFreq string, kind string, die string) (float64, error) + openAndReadMsr(core string) error + readSingleMsr(core string, msr string) (uint64, error) + isMsrLoaded() bool +} + +type msrServiceImpl struct { + cpuCoresData map[string]*msrData + msrOffsets []int64 + fs fileService + log telegraf.Logger +} + +func (m *msrServiceImpl) getCPUCoresData() map[string]*msrData { + return m.cpuCoresData +} + +func (m *msrServiceImpl) isMsrLoaded() bool { + for cpuID := range m.getCPUCoresData() { + err := m.openAndReadMsr(cpuID) + if err == nil { + return true + } + } + return false +} +func (m *msrServiceImpl) retrieveCPUFrequencyForCore(core string) (float64, error) { + cpuFreqPath := fmt.Sprintf(cpuCurrentFreqPartialPath, core) + err := checkFile(cpuFreqPath) + if err != nil { + return 0, err + } + cpuFreqFile, err := os.Open(cpuFreqPath) + if err != nil { + return 0, fmt.Errorf("error opening scaling_cur_freq file on path %s, err: %v", cpuFreqPath, err) + } + defer cpuFreqFile.Close() + + cpuFreq, _, err := m.fs.readFileToFloat64(cpuFreqFile) + return convertKiloHertzToMegaHertz(cpuFreq), err +} + +func (m *msrServiceImpl) retrieveUncoreFrequency(socketID string, typeFreq string, kind string, die string) (float64, error) { + uncoreFreqPath, err := createUncoreFreqPath(socketID, typeFreq, kind, die) + if err != nil { + return 0, fmt.Errorf("unable to create uncore freq read path for socketID %s, and frequency type %s err: %v", socketID, typeFreq, err) + } + err = checkFile(uncoreFreqPath) + if err != nil { + return 0, err + } + uncoreFreqFile, err := os.Open(uncoreFreqPath) + if err != nil { + return 0, fmt.Errorf("error opening uncore frequncy file on %s, err: %v", uncoreFreqPath, err) + } + defer uncoreFreqFile.Close() + + uncoreFreq, _, err := m.fs.readFileToFloat64(uncoreFreqFile) + return convertKiloHertzToMegaHertz(uncoreFreq), err +} + +func createUncoreFreqPath(socketID string, typeFreq string, kind string, die string) (string, error) { + if socketID >= "0" && socketID <= "9" { + socketID = fmt.Sprintf("0%s", socketID) + } + if die >= "0" && die <= "9" { + die = fmt.Sprintf("0%s", die) + } + var prefix string + + switch typeFreq { + case "initial": + prefix = "initial_" + case "current": + prefix = "" + default: + return "", fmt.Errorf("unknown frequency type %s, only 'initial' and 'current' are supported", typeFreq) + } + + if kind != "min" && kind != "max" { + return "", fmt.Errorf("unknown frequency type %s, only 'min' and 'max' are supported", kind) + } + return fmt.Sprintf(uncoreFreqPath, socketID, die, prefix, kind), nil +} + +func (m *msrServiceImpl) openAndReadMsr(core string) error { + path := fmt.Sprintf(msrPartialPath, core) + err := checkFile(path) + if err != nil { + return err + } + msrFile, err := os.Open(path) + if err != nil { + return fmt.Errorf("error opening MSR file on path %s, err: %v", path, err) + } + defer msrFile.Close() + + err = m.readDataFromMsr(core, msrFile) + if err != nil { + return fmt.Errorf("error reading data from MSR for core %s, err: %v", core, err) + } + return nil +} + +func (m *msrServiceImpl) readSingleMsr(core string, msr string) (uint64, error) { + path := fmt.Sprintf(msrPartialPath, core) + err := checkFile(path) + if err != nil { + return 0, err + } + msrFile, err := os.Open(path) + if err != nil { + return 0, fmt.Errorf("error opening MSR file on path %s, err: %v", path, err) + } + defer msrFile.Close() + + var msrAddress int64 + switch msr { + case "MSR_TURBO_RATIO_LIMIT": + msrAddress = turboRatioLimitLocation + case "MSR_TURBO_RATIO_LIMIT1": + msrAddress = turboRatioLimit1Location + case "MSR_TURBO_RATIO_LIMIT2": + msrAddress = turboRatioLimit2Location + case "MSR_ATOM_CORE_TURBO_RATIOS": + msrAddress = atomCoreTurboRatiosLocation + case "MSR_UNCORE_PERF_STATUS": + msrAddress = uncorePerfStatusLocation + default: + return 0, fmt.Errorf("incorect name of MSR %s", msr) + } + + value, err := m.fs.readFileAtOffsetToUint64(msrFile, msrAddress) + if err != nil { + return 0, err + } + + return value, nil +} + +func (m *msrServiceImpl) readDataFromMsr(core string, reader io.ReaderAt) error { + g, ctx := errgroup.WithContext(context.Background()) + + // Create and populate a map that contains msr offsets along with their respective channels + msrOffsetsWithChannels := make(map[int64]chan uint64) + for _, offset := range m.msrOffsets { + msrOffsetsWithChannels[offset] = make(chan uint64) + } + + // Start a goroutine for each msr offset + for offset, channel := range msrOffsetsWithChannels { + // Wrap around function to avoid race on loop counter + func(off int64, ch chan uint64) { + g.Go(func() error { + defer close(ch) + + err := m.readValueFromFileAtOffset(ctx, ch, reader, off) + if err != nil { + return fmt.Errorf("error reading MSR file, err: %v", err) + } + + return nil + }) + }(offset, channel) + } + + newC3 := <-msrOffsetsWithChannels[c3StateResidencyLocation] + newC6 := <-msrOffsetsWithChannels[c6StateResidencyLocation] + newC7 := <-msrOffsetsWithChannels[c7StateResidencyLocation] + newMperf := <-msrOffsetsWithChannels[maximumFrequencyClockCountLocation] + newAperf := <-msrOffsetsWithChannels[actualFrequencyClockCountLocation] + newTsc := <-msrOffsetsWithChannels[timestampCounterLocation] + newThrottleTemp := <-msrOffsetsWithChannels[throttleTemperatureLocation] + newTemp := <-msrOffsetsWithChannels[temperatureLocation] + + if err := g.Wait(); err != nil { + return fmt.Errorf("received error during reading MSR values in goroutines: %v", err) + } + + m.cpuCoresData[core].c3Delta = newC3 - m.cpuCoresData[core].c3 + m.cpuCoresData[core].c6Delta = newC6 - m.cpuCoresData[core].c6 + m.cpuCoresData[core].c7Delta = newC7 - m.cpuCoresData[core].c7 + m.cpuCoresData[core].mperfDelta = newMperf - m.cpuCoresData[core].mperf + m.cpuCoresData[core].aperfDelta = newAperf - m.cpuCoresData[core].aperf + m.cpuCoresData[core].timeStampCounterDelta = newTsc - m.cpuCoresData[core].timeStampCounter + + m.cpuCoresData[core].c3 = newC3 + m.cpuCoresData[core].c6 = newC6 + m.cpuCoresData[core].c7 = newC7 + m.cpuCoresData[core].mperf = newMperf + m.cpuCoresData[core].aperf = newAperf + m.cpuCoresData[core].timeStampCounter = newTsc + // MSR (1A2h) IA32_TEMPERATURE_TARGET bits 23:16. + m.cpuCoresData[core].throttleTemp = int64((newThrottleTemp >> 16) & 0xFF) + // MSR (19Ch) IA32_THERM_STATUS bits 22:16. + m.cpuCoresData[core].temp = int64((newTemp >> 16) & 0x7F) + + return nil +} + +func (m *msrServiceImpl) readValueFromFileAtOffset(ctx context.Context, ch chan uint64, reader io.ReaderAt, offset int64) error { + value, err := m.fs.readFileAtOffsetToUint64(reader, offset) + if err != nil { + return err + } + + // Detect context cancellation and return an error if other goroutine fails + select { + case <-ctx.Done(): + return ctx.Err() + case ch <- value: + } + + return nil +} + +// setCPUCores initialize cpuCoresData map. +func (m *msrServiceImpl) setCPUCores() error { + m.cpuCoresData = make(map[string]*msrData) + cpuPrefix := "cpu" + cpuCore := fmt.Sprintf("%s%s", cpuPrefix, "[0-9]*") + cpuCorePattern := fmt.Sprintf("%s/%s", systemCPUPath, cpuCore) + cpuPaths, err := m.fs.getStringsMatchingPatternOnPath(cpuCorePattern) + if err != nil { + return err + } + if len(cpuPaths) == 0 { + m.log.Debugf("CPU core data wasn't found using pattern: %s", cpuCorePattern) + return nil + } + + for _, cpuPath := range cpuPaths { + core := strings.TrimPrefix(filepath.Base(cpuPath), cpuPrefix) + m.cpuCoresData[core] = &msrData{ + mperf: 0, + aperf: 0, + timeStampCounter: 0, + c3: 0, + c6: 0, + c7: 0, + throttleTemp: 0, + temp: 0, + mperfDelta: 0, + aperfDelta: 0, + timeStampCounterDelta: 0, + c3Delta: 0, + c6Delta: 0, + c7Delta: 0, + } + } + + return nil +} + +func newMsrServiceWithFs(logger telegraf.Logger, fs fileService) *msrServiceImpl { + msrService := &msrServiceImpl{ + fs: fs, + log: logger, + } + err := msrService.setCPUCores() + if err != nil { + // This error does not prevent plugin from working thus it is not returned. + msrService.log.Error(err) + } + + msrService.msrOffsets = []int64{c3StateResidencyLocation, c6StateResidencyLocation, c7StateResidencyLocation, + maximumFrequencyClockCountLocation, actualFrequencyClockCountLocation, timestampCounterLocation, + throttleTemperatureLocation, temperatureLocation} + return msrService +} diff --git a/plugins/inputs/intel_powerstat/msr_mock_test.go b/plugins/inputs/intel_powerstat/msr_mock_test.go new file mode 100644 index 0000000000000..3381132fa9fb9 --- /dev/null +++ b/plugins/inputs/intel_powerstat/msr_mock_test.go @@ -0,0 +1,132 @@ +// Code generated by mockery v2.12.3. DO NOT EDIT. + +package intel_powerstat + +import mock "github.com/stretchr/testify/mock" + +// mockMsrService is an autogenerated mock type for the mockMsrService type +type mockMsrService struct { + mock.Mock +} + +// isMsrLoaded provides a mock function with given fields: +func (_m *mockMsrService) isMsrLoaded() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// getCPUCoresData provides a mock function with given fields: +func (_m *mockMsrService) getCPUCoresData() map[string]*msrData { + ret := _m.Called() + + var r0 map[string]*msrData + if rf, ok := ret.Get(0).(func() map[string]*msrData); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]*msrData) + } + } + + return r0 +} + +// openAndReadMsr provides a mock function with given fields: core +func (_m *mockMsrService) openAndReadMsr(core string) error { + ret := _m.Called(core) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(core) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// readSingleMsr provides a mock function with given fields: core, msr +func (_m *mockMsrService) readSingleMsr(core string, msr string) (uint64, error) { + ret := _m.Called(core, msr) + + var r0 uint64 + if rf, ok := ret.Get(0).(func(string, string) uint64); ok { + r0 = rf(core, msr) + } else { + r0 = ret.Get(0).(uint64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(core, msr) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// retrieveCPUFrequencyForCore provides a mock function with given fields: core +func (_m *mockMsrService) retrieveCPUFrequencyForCore(core string) (float64, error) { + ret := _m.Called(core) + + var r0 float64 + if rf, ok := ret.Get(0).(func(string) float64); ok { + r0 = rf(core) + } else { + r0 = ret.Get(0).(float64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(core) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// retrieveUncoreFrequency provides a mock function with given fields: socketID, typeFreq, kind, die +func (_m *mockMsrService) retrieveUncoreFrequency(socketID string, typeFreq string, kind string, die string) (float64, error) { + ret := _m.Called(socketID, typeFreq, kind, die) + + var r0 float64 + if rf, ok := ret.Get(0).(func(string, string, string, string) float64); ok { + r0 = rf(socketID, typeFreq, kind, die) + } else { + r0 = ret.Get(0).(float64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, string, string, string) error); ok { + r1 = rf(socketID, typeFreq, kind, die) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type newmockMsrServiceT interface { + mock.TestingT + Cleanup(func()) +} + +// newmockMsrService creates a new instance of mockMsrService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func newmockMsrService(t newmockMsrServiceT) *mockMsrService { + mock := &mockMsrService{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/plugins/inputs/intel_powerstat/msr_test.go b/plugins/inputs/intel_powerstat/msr_test.go new file mode 100644 index 0000000000000..921e7f90e8f12 --- /dev/null +++ b/plugins/inputs/intel_powerstat/msr_test.go @@ -0,0 +1,189 @@ +//go:build linux +// +build linux + +package intel_powerstat + +import ( + "context" + "errors" + "strings" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" +) + +func TestReadDataFromMsrPositive(t *testing.T) { + firstValue := uint64(1000000) + secondValue := uint64(5000000) + delta := secondValue - firstValue + cpuCores := []string{"cpu0", "cpu1"} + msr, fsMock := getMsrServiceWithMockedFs() + prepareTestData(fsMock, cpuCores, msr, t) + cores := trimCPUFromCores(cpuCores) + + methodCallNumberForFirstValue := len(msr.msrOffsets) * len(cores) + methodCallNumberForSecondValue := methodCallNumberForFirstValue * 2 + + fsMock.On("readFileAtOffsetToUint64", mock.Anything, mock.Anything). + Return(firstValue, nil).Times(methodCallNumberForFirstValue) + for _, core := range cores { + require.NoError(t, msr.readDataFromMsr(core, nil)) + } + fsMock.AssertNumberOfCalls(t, "readFileAtOffsetToUint64", methodCallNumberForFirstValue) + verifyCPUCoresData(cores, t, msr, firstValue, false, 0) + + fsMock.On("readFileAtOffsetToUint64", mock.Anything, mock.Anything). + Return(secondValue, nil).Times(methodCallNumberForFirstValue) + for _, core := range cores { + require.NoError(t, msr.readDataFromMsr(core, nil)) + } + fsMock.AssertNumberOfCalls(t, "readFileAtOffsetToUint64", methodCallNumberForSecondValue) + verifyCPUCoresData(cores, t, msr, secondValue, true, delta) +} + +func trimCPUFromCores(cpuCores []string) []string { + cores := make([]string, 0) + for _, core := range cpuCores { + cores = append(cores, strings.TrimPrefix(core, "cpu")) + } + return cores +} + +func TestReadDataFromMsrNegative(t *testing.T) { + firstValue := uint64(1000000) + cpuCores := []string{"cpu0", "cpu1"} + msr, fsMock := getMsrServiceWithMockedFs() + + prepareTestData(fsMock, cpuCores, msr, t) + cores := trimCPUFromCores(cpuCores) + + methodCallNumberPerCore := len(msr.msrOffsets) + + // Normal execution for first core. + fsMock.On("readFileAtOffsetToUint64", mock.Anything, mock.Anything). + Return(firstValue, nil).Times(methodCallNumberPerCore). + // Fail to read file for second core. + On("readFileAtOffsetToUint64", mock.Anything, mock.Anything). + Return(uint64(0), errors.New("error reading file")).Times(methodCallNumberPerCore) + + require.NoError(t, msr.readDataFromMsr(cores[0], nil)) + require.Error(t, msr.readDataFromMsr(cores[1], nil)) +} + +func TestReadValueFromFileAtOffset(t *testing.T) { + cores := []string{"cpu0", "cpu1"} + msr, fsMock := getMsrServiceWithMockedFs() + ctx := context.Background() + testChannel := make(chan uint64, 1) + defer close(testChannel) + zero := uint64(0) + + prepareTestData(fsMock, cores, msr, t) + + fsMock.On("readFileAtOffsetToUint64", mock.Anything, mock.Anything). + Return(zero, errors.New("error reading file")).Once() + require.Error(t, msr.readValueFromFileAtOffset(ctx, testChannel, nil, 0)) + + fsMock.On("readFileAtOffsetToUint64", mock.Anything, mock.Anything). + Return(zero, nil).Once() + require.Equal(t, nil, msr.readValueFromFileAtOffset(ctx, testChannel, nil, 0)) + require.Equal(t, zero, <-testChannel) +} + +func TestCreateUncoreFreqPath(t *testing.T) { + path, err := createUncoreFreqPath("0", "initial", "min", "0") + expectedPath := "/sys/devices/system/cpu/intel_uncore_frequency/package_00_die_00/initial_min_freq_khz" + require.Equal(t, nil, err) + require.Equal(t, expectedPath, path) + + path, err = createUncoreFreqPath("0", "initial", "max", "0") + expectedPath = "/sys/devices/system/cpu/intel_uncore_frequency/package_00_die_00/initial_max_freq_khz" + require.Equal(t, nil, err) + require.Equal(t, expectedPath, path) + + path, err = createUncoreFreqPath("0", "current", "min", "0") + expectedPath = "/sys/devices/system/cpu/intel_uncore_frequency/package_00_die_00/min_freq_khz" + require.Equal(t, nil, err) + require.Equal(t, expectedPath, path) + + path, err = createUncoreFreqPath("0", "current", "max", "0") + expectedPath = "/sys/devices/system/cpu/intel_uncore_frequency/package_00_die_00/max_freq_khz" + require.Equal(t, nil, err) + require.Equal(t, expectedPath, path) + + path, err = createUncoreFreqPath("9", "current", "max", "0") + expectedPath = "/sys/devices/system/cpu/intel_uncore_frequency/package_09_die_00/max_freq_khz" + require.Equal(t, nil, err) + require.Equal(t, expectedPath, path) + + path, err = createUncoreFreqPath("99", "current", "max", "0") + expectedPath = "/sys/devices/system/cpu/intel_uncore_frequency/package_99_die_00/max_freq_khz" + require.Equal(t, nil, err) + require.Equal(t, expectedPath, path) + + path, err = createUncoreFreqPath("0", "current", "max", "9") + expectedPath = "/sys/devices/system/cpu/intel_uncore_frequency/package_00_die_09/max_freq_khz" + require.Equal(t, nil, err) + require.Equal(t, expectedPath, path) + + path, err = createUncoreFreqPath("0", "current", "max", "99") + expectedPath = "/sys/devices/system/cpu/intel_uncore_frequency/package_00_die_99/max_freq_khz" + require.Equal(t, nil, err) + require.Equal(t, expectedPath, path) + + path, err = createUncoreFreqPath("0", "foo", "max", "0") + expectedPath = "" + expectedError := errors.New("unknown frequency type foo, only 'initial' and 'current' are supported") + require.Equal(t, expectedError, err) + require.Equal(t, expectedPath, path) + + path, err = createUncoreFreqPath("0", "current", "bar", "0") + expectedPath = "" + expectedError = errors.New("unknown frequency type bar, only 'min' and 'max' are supported") + require.Equal(t, expectedError, err) + require.Equal(t, expectedPath, path) +} + +func prepareTestData(fsMock *mockFileService, cores []string, msr *msrServiceImpl, t *testing.T) { + // Prepare MSR offsets and CPUCoresData for test. + fsMock.On("getStringsMatchingPatternOnPath", mock.Anything). + Return(cores, nil).Once() + require.NoError(t, msr.setCPUCores()) + fsMock.AssertCalled(t, "getStringsMatchingPatternOnPath", mock.Anything) +} + +func verifyCPUCoresData(cores []string, t *testing.T, msr *msrServiceImpl, expectedValue uint64, verifyDelta bool, delta uint64) { + for _, core := range cores { + require.Equal(t, expectedValue, msr.cpuCoresData[core].c3) + require.Equal(t, expectedValue, msr.cpuCoresData[core].c6) + require.Equal(t, expectedValue, msr.cpuCoresData[core].c7) + require.Equal(t, expectedValue, msr.cpuCoresData[core].mperf) + require.Equal(t, expectedValue, msr.cpuCoresData[core].aperf) + require.Equal(t, expectedValue, msr.cpuCoresData[core].timeStampCounter) + require.Equal(t, int64((expectedValue>>16)&0xFF), msr.cpuCoresData[core].throttleTemp) + require.Equal(t, int64((expectedValue>>16)&0x7F), msr.cpuCoresData[core].temp) + + if verifyDelta { + require.Equal(t, delta, msr.cpuCoresData[core].c3Delta) + require.Equal(t, delta, msr.cpuCoresData[core].c6Delta) + require.Equal(t, delta, msr.cpuCoresData[core].c7Delta) + require.Equal(t, delta, msr.cpuCoresData[core].mperfDelta) + require.Equal(t, delta, msr.cpuCoresData[core].aperfDelta) + require.Equal(t, delta, msr.cpuCoresData[core].timeStampCounterDelta) + } + } +} + +func getMsrServiceWithMockedFs() (*msrServiceImpl, *mockFileService) { + cores := []string{"cpu0", "cpu1", "cpu2", "cpu3"} + logger := testutil.Logger{Name: "PowerPluginTest"} + fsMock := &mockFileService{} + fsMock.On("getStringsMatchingPatternOnPath", mock.Anything). + Return(cores, nil).Once() + msr := newMsrServiceWithFs(logger, fsMock) + + return msr, fsMock +} diff --git a/plugins/inputs/intel_powerstat/rapl.go b/plugins/inputs/intel_powerstat/rapl.go new file mode 100644 index 0000000000000..e89b04d501a91 --- /dev/null +++ b/plugins/inputs/intel_powerstat/rapl.go @@ -0,0 +1,266 @@ +//go:build linux +// +build linux + +package intel_powerstat + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/influxdata/telegraf" +) + +const ( + intelRaplPath = "/sys/devices/virtual/powercap/intel-rapl" + intelRaplSocketPartialPath = "%s/intel-rapl:%s" + energyUjPartialPath = "%s/energy_uj" + maxEnergyRangeUjPartialPath = "%s/max_energy_range_uj" + maxPowerUwPartialPath = "%s/constraint_0_max_power_uw" + intelRaplDramPartialPath = "%s/intel-rapl:%s/%s" + intelRaplDramNamePartialPath = "%s/name" +) + +// raplService is responsible for interactions with RAPL. +type raplService interface { + initializeRaplData() + getRaplData() map[string]*raplData + retrieveAndCalculateData(socketID string) error + getConstraintMaxPowerWatts(socketID string) (float64, error) +} + +type raplServiceImpl struct { + log telegraf.Logger + data map[string]*raplData + dramFolders map[string]string + fs fileService + logOnce map[string]error +} + +// initializeRaplData looks for RAPL folders and initializes data map with fetched information. +func (r *raplServiceImpl) initializeRaplData() { + r.prepareData() + r.findDramFolders() +} + +func (r *raplServiceImpl) getRaplData() map[string]*raplData { + return r.data +} + +func (r *raplServiceImpl) retrieveAndCalculateData(socketID string) error { + socketRaplPath := fmt.Sprintf(intelRaplSocketPartialPath, intelRaplPath, socketID) + socketEnergyUjPath := fmt.Sprintf(energyUjPartialPath, socketRaplPath) + err := checkFile(socketEnergyUjPath) + if err != nil { + return err + } + socketEnergyUjFile, err := os.Open(socketEnergyUjPath) + if err != nil { + return fmt.Errorf("error opening socket energy_uj file on path %s, err: %v", socketEnergyUjPath, err) + } + defer socketEnergyUjFile.Close() + + dramRaplPath := fmt.Sprintf(intelRaplDramPartialPath, intelRaplPath, socketID, r.dramFolders[socketID]) + dramEnergyUjPath := fmt.Sprintf(energyUjPartialPath, dramRaplPath) + err = checkFile(dramEnergyUjPath) + if err != nil { + return err + } + dramEnergyUjFile, err := os.Open(dramEnergyUjPath) + if err != nil { + return fmt.Errorf("error opening dram energy_uj file on path %s, err: %v", dramEnergyUjPath, err) + } + defer dramEnergyUjFile.Close() + + socketMaxEnergyUjPath := fmt.Sprintf(maxEnergyRangeUjPartialPath, socketRaplPath) + err = checkFile(socketMaxEnergyUjPath) + if err != nil { + return err + } + socketMaxEnergyUjFile, err := os.Open(socketMaxEnergyUjPath) + if err != nil { + return fmt.Errorf("error opening socket max_energy_range_uj file on path %s, err: %v", socketMaxEnergyUjPath, err) + } + defer socketMaxEnergyUjFile.Close() + + dramMaxEnergyUjPath := fmt.Sprintf(maxEnergyRangeUjPartialPath, dramRaplPath) + err = checkFile(dramMaxEnergyUjPath) + if err != nil { + return err + } + dramMaxEnergyUjFile, err := os.Open(dramMaxEnergyUjPath) + if err != nil { + return fmt.Errorf("error opening dram max_energy_range_uj file on path %s, err: %v", dramMaxEnergyUjPath, err) + } + defer dramMaxEnergyUjFile.Close() + + return r.calculateData(socketID, socketEnergyUjFile, dramEnergyUjFile, socketMaxEnergyUjFile, dramMaxEnergyUjFile) +} + +func (r *raplServiceImpl) getConstraintMaxPowerWatts(socketID string) (float64, error) { + socketRaplPath := fmt.Sprintf(intelRaplSocketPartialPath, intelRaplPath, socketID) + socketMaxPowerPath := fmt.Sprintf(maxPowerUwPartialPath, socketRaplPath) + err := checkFile(socketMaxPowerPath) + if err != nil { + return 0, err + } + socketMaxPowerFile, err := os.Open(socketMaxPowerPath) + if err != nil { + return 0, fmt.Errorf("error opening constraint_0_max_power_uw file on path %s, err: %v", socketMaxPowerPath, err) + } + defer socketMaxPowerFile.Close() + + socketMaxPower, _, err := r.fs.readFileToFloat64(socketMaxPowerFile) + return convertMicroWattToWatt(socketMaxPower), err +} + +func (r *raplServiceImpl) prepareData() { + intelRaplPrefix := "intel-rapl:" + intelRapl := fmt.Sprintf("%s%s", intelRaplPrefix, "[0-9]*") + raplPattern := fmt.Sprintf("%s/%s", intelRaplPath, intelRapl) + + raplPaths, err := r.fs.getStringsMatchingPatternOnPath(raplPattern) + if err != nil { + r.log.Errorf("error while preparing RAPL data: %v", err) + r.data = make(map[string]*raplData) + return + } + if len(raplPaths) == 0 { + r.log.Debugf("RAPL data wasn't found using pattern: %s", raplPattern) + r.data = make(map[string]*raplData) + return + } + + // If RAPL exists initialize data map (if it wasn't initialized before). + if len(r.data) == 0 { + for _, raplPath := range raplPaths { + socketID := strings.TrimPrefix(filepath.Base(raplPath), intelRaplPrefix) + r.data[socketID] = &raplData{ + socketCurrentEnergy: 0, + dramCurrentEnergy: 0, + socketEnergy: 0, + dramEnergy: 0, + readDate: 0, + } + } + } +} + +func (r *raplServiceImpl) findDramFolders() { + intelRaplPrefix := "intel-rapl:" + intelRaplDram := fmt.Sprintf("%s%s", intelRaplPrefix, "[0-9]*[0-9]*") + // Clean existing map + r.dramFolders = make(map[string]string) + + for socketID := range r.data { + path := fmt.Sprintf(intelRaplSocketPartialPath, intelRaplPath, socketID) + raplFoldersPattern := fmt.Sprintf("%s/%s", path, intelRaplDram) + pathsToRaplFolders, err := r.fs.getStringsMatchingPatternOnPath(raplFoldersPattern) + if err != nil { + r.log.Errorf("error during lookup for rapl dram: %v", err) + continue + } + if len(pathsToRaplFolders) == 0 { + r.log.Debugf("RAPL folders weren't found using pattern: %s", raplFoldersPattern) + continue + } + + raplFolders := make([]string, 0) + for _, folderPath := range pathsToRaplFolders { + raplFolders = append(raplFolders, filepath.Base(folderPath)) + } + + r.findDramFolder(raplFolders, socketID) + } +} + +func (r *raplServiceImpl) findDramFolder(raplFolders []string, socketID string) { + if r.logOnce == nil { + r.logOnce = make(map[string]error) + } + + for _, raplFolder := range raplFolders { + potentialDramPath := fmt.Sprintf(intelRaplDramPartialPath, intelRaplPath, socketID, raplFolder) + nameFilePath := fmt.Sprintf(intelRaplDramNamePartialPath, potentialDramPath) + read, err := r.fs.readFile(nameFilePath) + if err != nil { + if val := r.logOnce[nameFilePath]; val == nil || val.Error() != err.Error() { + r.log.Errorf("error reading file on path: %s, err: %v", nameFilePath, err) + r.logOnce[nameFilePath] = err + } + continue + } + r.logOnce[nameFilePath] = nil + // Remove new line character + trimmedString := strings.TrimRight(string(read), "\n") + if trimmedString == "dram" { + // There should be only one DRAM folder per socket + r.dramFolders[socketID] = raplFolder + return + } + } +} + +func (r *raplServiceImpl) calculateData(socketID string, socketEnergyUjFile io.Reader, dramEnergyUjFile io.Reader, + socketMaxEnergyUjFile io.Reader, dramMaxEnergyUjFile io.Reader, +) error { + newSocketEnergy, _, err := r.readEnergyInJoules(socketEnergyUjFile) + if err != nil { + return err + } + + newDramEnergy, readDate, err := r.readEnergyInJoules(dramEnergyUjFile) + if err != nil { + return err + } + + interval := convertNanoSecondsToSeconds(readDate - r.data[socketID].readDate) + r.data[socketID].readDate = readDate + if interval == 0 { + return fmt.Errorf("interval between last two Telegraf cycles is 0") + } + + if newSocketEnergy >= r.data[socketID].socketEnergy { + r.data[socketID].socketCurrentEnergy = (newSocketEnergy - r.data[socketID].socketEnergy) / interval + } else { + socketMaxEnergy, _, err := r.readEnergyInJoules(socketMaxEnergyUjFile) + if err != nil { + return err + } + // When socket energy_uj counter reaches maximum value defined in max_energy_range_uj file it + // starts counting from 0. + r.data[socketID].socketCurrentEnergy = (socketMaxEnergy - r.data[socketID].socketEnergy + newSocketEnergy) / interval + } + + if newDramEnergy >= r.data[socketID].dramEnergy { + r.data[socketID].dramCurrentEnergy = (newDramEnergy - r.data[socketID].dramEnergy) / interval + } else { + dramMaxEnergy, _, err := r.readEnergyInJoules(dramMaxEnergyUjFile) + if err != nil { + return err + } + // When dram energy_uj counter reaches maximum value defined in max_energy_range_uj file it + // starts counting from 0. + r.data[socketID].dramCurrentEnergy = (dramMaxEnergy - r.data[socketID].dramEnergy + newDramEnergy) / interval + } + r.data[socketID].socketEnergy = newSocketEnergy + r.data[socketID].dramEnergy = newDramEnergy + + return nil +} + +func (r *raplServiceImpl) readEnergyInJoules(reader io.Reader) (float64, int64, error) { + currentEnergy, readDate, err := r.fs.readFileToFloat64(reader) + return convertMicroJoulesToJoules(currentEnergy), readDate, err +} + +func newRaplServiceWithFs(logger telegraf.Logger, fs fileService) *raplServiceImpl { + return &raplServiceImpl{ + log: logger, + data: make(map[string]*raplData), + dramFolders: make(map[string]string), + fs: fs, + } +} diff --git a/plugins/inputs/intel_powerstat/rapl_mock_test.go b/plugins/inputs/intel_powerstat/rapl_mock_test.go new file mode 100644 index 0000000000000..fde381aa6c49a --- /dev/null +++ b/plugins/inputs/intel_powerstat/rapl_mock_test.go @@ -0,0 +1,81 @@ +// Code generated by mockery v2.12.3. DO NOT EDIT. + +package intel_powerstat + +import mock "github.com/stretchr/testify/mock" + +// mockRaplService is an autogenerated mock type for the mockRaplService type +type mockRaplService struct { + mock.Mock +} + +// getConstraintMaxPowerWatts provides a mock function with given fields: socketID +func (_m *mockRaplService) getConstraintMaxPowerWatts(socketID string) (float64, error) { + ret := _m.Called(socketID) + + var r0 float64 + if rf, ok := ret.Get(0).(func(string) float64); ok { + r0 = rf(socketID) + } else { + r0 = ret.Get(0).(float64) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(socketID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// getRaplData provides a mock function with given fields: +func (_m *mockRaplService) getRaplData() map[string]*raplData { + ret := _m.Called() + + var r0 map[string]*raplData + if rf, ok := ret.Get(0).(func() map[string]*raplData); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]*raplData) + } + } + + return r0 +} + +// initializeRaplData provides a mock function with given fields: +func (_m *mockRaplService) initializeRaplData() { + _m.Called() +} + +// retrieveAndCalculateData provides a mock function with given fields: socketID +func (_m *mockRaplService) retrieveAndCalculateData(socketID string) error { + ret := _m.Called(socketID) + + var r0 error + if rf, ok := ret.Get(0).(func(string) error); ok { + r0 = rf(socketID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type newmockRaplServiceT interface { + mock.TestingT + Cleanup(func()) +} + +// newmockRaplService creates a new instance of mockRaplService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func newmockRaplService(t newmockRaplServiceT) *mockRaplService { + mock := &mockRaplService{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/plugins/inputs/intel_powerstat/rapl_test.go b/plugins/inputs/intel_powerstat/rapl_test.go new file mode 100644 index 0000000000000..5333ec13aaa79 --- /dev/null +++ b/plugins/inputs/intel_powerstat/rapl_test.go @@ -0,0 +1,116 @@ +//go:build linux +// +build linux + +package intel_powerstat + +import ( + "errors" + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" +) + +func TestPrepareData(t *testing.T) { + sockets := []string{"intel-rapl:0", "intel-rapl:1"} + rapl, fsMock := getRaplWithMockedFs() + fsMock.On("getStringsMatchingPatternOnPath", mock.Anything).Return(sockets, nil).Twice() + rapl.prepareData() + fsMock.AssertCalled(t, "getStringsMatchingPatternOnPath", mock.Anything) + require.Equal(t, len(sockets), len(rapl.getRaplData())) + + // Verify no data is wiped in the next calls + socketEnergy := 74563813417.0 + socketID := "0" + rapl.data[socketID].socketEnergy = socketEnergy + + rapl.prepareData() + fsMock.AssertCalled(t, "getStringsMatchingPatternOnPath", mock.Anything) + require.Equal(t, len(sockets), len(rapl.getRaplData())) + require.Equal(t, socketEnergy, rapl.data[socketID].socketEnergy) + + // Verify data is wiped once there is no RAPL folders + fsMock.On("getStringsMatchingPatternOnPath", mock.Anything). + Return(nil, errors.New("missing RAPL")).Once() + rapl.prepareData() + fsMock.AssertCalled(t, "getStringsMatchingPatternOnPath", mock.Anything) + require.Equal(t, 0, len(rapl.getRaplData())) +} + +func TestFindDramFolders(t *testing.T) { + sockets := []string{"0", "1"} + raplFolders := []string{"intel-rapl:0:1", "intel-rapl:0:2", "intel-rapl:0:3"} + rapl, fsMock := getRaplWithMockedFs() + + for _, socketID := range sockets { + rapl.data[socketID] = &raplData{} + } + + firstPath := fmt.Sprintf(intelRaplDramNamePartialPath, + fmt.Sprintf(intelRaplDramPartialPath, intelRaplPath, "0", raplFolders[2])) + secondPath := fmt.Sprintf(intelRaplDramNamePartialPath, + fmt.Sprintf(intelRaplDramPartialPath, intelRaplPath, "1", raplFolders[1])) + + fsMock. + On("getStringsMatchingPatternOnPath", mock.Anything).Return(raplFolders, nil).Twice(). + On("readFile", firstPath).Return([]byte("dram"), nil).Once(). + On("readFile", secondPath).Return([]byte("dram"), nil).Once(). + On("readFile", mock.Anything).Return([]byte("random"), nil) + + rapl.findDramFolders() + + require.Equal(t, len(sockets), len(rapl.dramFolders)) + require.Equal(t, raplFolders[2], rapl.dramFolders["0"]) + require.Equal(t, raplFolders[1], rapl.dramFolders["1"]) + fsMock.AssertNumberOfCalls(t, "readFile", 5) +} + +func TestCalculateDataOverflowCases(t *testing.T) { + socketID := "1" + rapl, fsMock := getRaplWithMockedFs() + + rapl.data[socketID] = &raplData{} + rapl.data[socketID].socketEnergy = convertMicroJoulesToJoules(23424123.1) + rapl.data[socketID].dramEnergy = convertMicroJoulesToJoules(345611233.2) + rapl.data[socketID].readDate = 54123 + + interval := int64(54343) + convertedInterval := convertNanoSecondsToSeconds(interval - rapl.data[socketID].readDate) + + newEnergy := 3343443.4 + maxEnergy := 234324546456.6 + convertedNewEnergy := convertMicroJoulesToJoules(newEnergy) + convertedMaxNewEnergy := convertMicroJoulesToJoules(maxEnergy) + + maxDramEnergy := 981230834098.3 + newDramEnergy := 4533311.1 + convertedMaxDramEnergy := convertMicroJoulesToJoules(maxDramEnergy) + convertedDramEnergy := convertMicroJoulesToJoules(newDramEnergy) + + expectedCurrentEnergy := (convertedMaxNewEnergy - rapl.data[socketID].socketEnergy + convertedNewEnergy) / convertedInterval + expectedDramCurrentEnergy := (convertedMaxDramEnergy - rapl.data[socketID].dramEnergy + convertedDramEnergy) / convertedInterval + + fsMock. + On("readFileToFloat64", mock.Anything).Return(newEnergy, int64(12321), nil).Once(). + On("readFileToFloat64", mock.Anything).Return(newDramEnergy, interval, nil).Once(). + On("readFileToFloat64", mock.Anything).Return(maxEnergy, int64(64534), nil).Once(). + On("readFileToFloat64", mock.Anything).Return(maxDramEnergy, int64(98342), nil).Once() + + require.NoError(t, rapl.calculateData(socketID, strings.NewReader(mock.Anything), strings.NewReader(mock.Anything), + strings.NewReader(mock.Anything), strings.NewReader(mock.Anything))) + + require.Equal(t, expectedCurrentEnergy, rapl.data[socketID].socketCurrentEnergy) + require.Equal(t, expectedDramCurrentEnergy, rapl.data[socketID].dramCurrentEnergy) +} + +func getRaplWithMockedFs() (*raplServiceImpl, *mockFileService) { + logger := testutil.Logger{Name: "PowerPluginTest"} + fsMock := &mockFileService{} + rapl := newRaplServiceWithFs(logger, fsMock) + + return rapl, fsMock +} diff --git a/plugins/inputs/intel_powerstat/sample.conf b/plugins/inputs/intel_powerstat/sample.conf new file mode 100644 index 0000000000000..09a5d19cdbf00 --- /dev/null +++ b/plugins/inputs/intel_powerstat/sample.conf @@ -0,0 +1,16 @@ +# Intel PowerStat plugin enables monitoring of platform metrics (power, TDP) and per-CPU metrics like temperature, power and utilization. +[[inputs.intel_powerstat]] + ## The user can choose which package metrics are monitored by the plugin with the package_metrics setting: + ## - The default, will collect "current_power_consumption", "current_dram_power_consumption" and "thermal_design_power" + ## - Setting this value to an empty array means no package metrics will be collected + ## - Finally, a user can specify individual metrics to capture from the supported options list + ## Supported options: + ## "current_power_consumption", "current_dram_power_consumption", "thermal_design_power", "max_turbo_frequency", "uncore_frequency" + # package_metrics = ["current_power_consumption", "current_dram_power_consumption", "thermal_design_power"] + + ## The user can choose which per-CPU metrics are monitored by the plugin in cpu_metrics array. + ## Empty or missing array means no per-CPU specific metrics will be collected by the plugin. + ## Supported options: + ## "cpu_frequency", "cpu_c0_state_residency", "cpu_c1_state_residency", "cpu_c6_state_residency", "cpu_busy_cycles", "cpu_temperature", "cpu_busy_frequency" + ## ATTENTION: cpu_busy_cycles option is DEPRECATED - superseded by cpu_c0_state_residency + # cpu_metrics = [] diff --git a/plugins/inputs/intel_powerstat/unit_converter.go b/plugins/inputs/intel_powerstat/unit_converter.go new file mode 100644 index 0000000000000..7dd8c0d0d1aa0 --- /dev/null +++ b/plugins/inputs/intel_powerstat/unit_converter.go @@ -0,0 +1,50 @@ +//go:build linux +// +build linux + +package intel_powerstat + +import ( + "math" + "strconv" +) + +const ( + microJouleToJoule = 1.0 / 1000000 + microWattToWatt = 1.0 / 1000000 + kiloHertzToMegaHertz = 1.0 / 1000 + nanoSecondsToSeconds = 1.0 / 1000000000 + cyclesToHertz = 1.0 / 1000000 +) + +func convertMicroJoulesToJoules(mJ float64) float64 { + return mJ * microJouleToJoule +} + +func convertMicroWattToWatt(mW float64) float64 { + return mW * microWattToWatt +} + +func convertKiloHertzToMegaHertz(kiloHertz float64) float64 { + return kiloHertz * kiloHertzToMegaHertz +} + +func convertNanoSecondsToSeconds(ns int64) float64 { + return float64(ns) * nanoSecondsToSeconds +} + +func convertProcessorCyclesToHertz(pc uint64) float64 { + return float64(pc) * cyclesToHertz +} + +func roundFloatToNearestTwoDecimalPlaces(n float64) float64 { + return math.Round(n*100) / 100 +} + +func convertIntegerArrayToStringArray(array []int64) []string { + stringArray := make([]string, 0) + for _, value := range array { + stringArray = append(stringArray, strconv.FormatInt(value, 10)) + } + + return stringArray +} diff --git a/plugins/inputs/intel_rdt/README.md b/plugins/inputs/intel_rdt/README.md index 8a0f0a1ea6e75..d8d2cbe510673 100644 --- a/plugins/inputs/intel_rdt/README.md +++ b/plugins/inputs/intel_rdt/README.md @@ -1,64 +1,106 @@ # Intel RDT Input Plugin -The `intel_rdt` plugin collects information provided by monitoring features of -the Intel Resource Director Technology (Intel(R) RDT). Intel RDT provides the hardware framework to monitor -and control the utilization of shared resources (ex: last level cache, memory bandwidth). -### About Intel RDT +The `intel_rdt` plugin collects information provided by monitoring features of +the Intel Resource Director Technology (Intel(R) RDT). Intel RDT provides the +hardware framework to monitor and control the utilization of shared resources +(ex: last level cache, memory bandwidth). + +## About Intel RDT + Intel’s Resource Director Technology (RDT) framework consists of: -- Cache Monitoring Technology (CMT) + +- Cache Monitoring Technology (CMT) - Memory Bandwidth Monitoring (MBM) -- Cache Allocation Technology (CAT) -- Code and Data Prioritization (CDP) +- Cache Allocation Technology (CAT) +- Code and Data Prioritization (CDP) + +As multithreaded and multicore platform architectures emerge, the last level +cache and memory bandwidth are key resources to manage for running workloads in +single-threaded, multithreaded, or complex virtual machine environments. Intel +introduces CMT, MBM, CAT and CDP to manage these workloads across shared +resources. + +## Prerequsities - PQoS Tool + +To gather Intel RDT metrics, the `intel_rdt` plugin uses _pqos_ cli tool which +is a part of [Intel(R) RDT Software +Package](https://github.com/intel/intel-cmt-cat). Before using this plugin +please be sure _pqos_ is properly installed and configured regarding that the +plugin run _pqos_ to work with `OS Interface` mode. This plugin supports _pqos_ +version 4.0.0 and above. Note: pqos tool needs root privileges to work +properly. + +Metrics will be constantly reported from the following `pqos` commands within +the given interval: -As multithreaded and multicore platform architectures emerge, the last level cache and -memory bandwidth are key resources to manage for running workloads in single-threaded, -multithreaded, or complex virtual machine environments. Intel introduces CMT, MBM, CAT -and CDP to manage these workloads across shared resources. +### If telegraf does not run as the root user -### Prerequsities - PQoS Tool -To gather Intel RDT metrics, the `intel_rdt` plugin uses _pqos_ cli tool which is a -part of [Intel(R) RDT Software Package](https://github.com/intel/intel-cmt-cat). -Before using this plugin please be sure _pqos_ is properly installed and configured regarding that the plugin -run _pqos_ to work with `OS Interface` mode. This plugin supports _pqos_ version 4.0.0 and above. -Note: pqos tool needs root privileges to work properly. +The `pqos` binary needs to run as root. If telegraf is running as a non-root +user, you may enable sudo to allow `pqos` to run correctly. The `pqos` command +requires root level access to run. There are two options to overcome this if +you run telegraf as a non-root user. -Metrics will be constantly reported from the following `pqos` commands within the given interval: +It is possible to update the pqos binary with setuid using `chmod u+s +/path/to/pqos`. This approach is simple and requires no modification to the +Telegraf configuration, however pqos is not a read-only tool and there are +security implications for making such a command setuid root. -#### In case of cores monitoring: +Alternately, you may enable sudo to allow `pqos` to run correctly, as follows: + +Add the following to your sudoers file (assumes telegraf runs as a user named +`telegraf`): + +```sh +telegraf ALL=(ALL) NOPASSWD:/usr/sbin/pqos -r --iface-os --mon-file-type=csv --mon-interval=* ``` + +If you wish to use sudo, you must also add `use_sudo = true` to the Telegraf +configuration (see below). + +### In case of cores monitoring + +```sh pqos -r --iface-os --mon-file-type=csv --mon-interval=INTERVAL --mon-core=all:[CORES]\;mbt:[CORES] ``` -where `CORES` is equal to group of cores provided in config. User can provide many groups. -#### In case of process monitoring: -``` +where `CORES` is equal to group of cores provided in config. User can provide +many groups. + +### In case of process monitoring + +```sh pqos -r --iface-os --mon-file-type=csv --mon-interval=INTERVAL --mon-pid=all:[PIDS]\;mbt:[PIDS] ``` -where `PIDS` is group of processes IDs which name are equal to provided process name in a config. -User can provide many process names which lead to create many processes groups. + +where `PIDS` is group of processes IDs which name are equal to provided process +name in a config. User can provide many process names which lead to create many +processes groups. In both cases `INTERVAL` is equal to sampling_interval from config. -Because PIDs association within system could change in every moment, Intel RDT plugin provides a -functionality to check on every interval if desired processes change their PIDs association. -If some change is reported, plugin will restart _pqos_ tool with new arguments. If provided by user -process name is not equal to any of available processes, will be omitted and plugin will constantly -check for process availability. +Because PIDs association within system could change in every moment, Intel RDT +plugin provides a functionality to check on every interval if desired processes +change their PIDs association. If some change is reported, plugin will restart +_pqos_ tool with new arguments. If provided by user process name is not equal to +any of available processes, will be omitted and plugin will constantly check for +process availability. + +## Useful links -### Useful links -Pqos installation process: https://github.com/intel/intel-cmt-cat/blob/master/INSTALL -Enabling OS interface: https://github.com/intel/intel-cmt-cat/wiki, https://github.com/intel/intel-cmt-cat/wiki/resctrl -More about Intel RDT: https://www.intel.com/content/www/us/en/architecture-and-technology/resource-director-technology.html +- Pqos installation process: +- Enabling OS interface: , +- More about Intel RDT: -### Configuration -```toml +## Configuration + +```toml @sample.conf # Read Intel RDT metrics [[inputs.intel_rdt]] ## Optionally set sampling interval to Nx100ms. ## This value is propagated to pqos tool. Interval format is defined by pqos itself. ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. # sampling_interval = "10" - + ## Optionally specify the path to pqos executable. ## If not provided, auto discovery will be performed. # pqos_path = "/usr/local/bin/pqos" @@ -76,9 +118,14 @@ More about Intel RDT: https://www.intel.com/content/www/us/en/architecture-and-t ## Mandatory if cores aren't set and forbidden if cores are specified. ## e.g. ["qemu", "pmd"] # processes = ["process"] + + ## Specify if the pqos process should be called with sudo. + ## Mandatory if the telegraf process does not run as root. + # use_sudo = false ``` -### Exposed metrics +## Exposed metrics + | Name | Full name | Description | |---------------|-----------------------------------------------|-------------| | MBL | Memory Bandwidth on Local NUMA Node | Memory bandwidth utilization by the relevant CPU core/process on the local NUMA memory channel | @@ -90,22 +137,29 @@ More about Intel RDT: https://www.intel.com/content/www/us/en/architecture-and-t *optional -### Troubleshooting -Pointing to non-existing cores will lead to throwing an error by _pqos_ and the plugin will not work properly. -Be sure to check provided core number exists within desired system. +## Troubleshooting -Be aware, reading Intel RDT metrics by _pqos_ cannot be done simultaneously on the same resource. -Do not use any other _pqos_ instance that is monitoring the same cores or PIDs within the working system. -It is not possible to monitor same cores or PIDs on different groups. +Pointing to non-existing cores will lead to throwing an error by _pqos_ and the +plugin will not work properly. Be sure to check provided core number exists +within desired system. -PIDs associated for the given process could be manually checked by `pidof` command. E.g: -``` +Be aware, reading Intel RDT metrics by _pqos_ cannot be done simultaneously on +the same resource. Do not use any other _pqos_ instance that is monitoring the +same cores or PIDs within the working system. It is not possible to monitor +same cores or PIDs on different groups. + +PIDs associated for the given process could be manually checked by `pidof` +command. E.g: + +```sh pidof PROCESS ``` + where `PROCESS` is process name. -### Example Output -``` +## Example Output + +```shell > rdt_metric,cores=12\,19,host=r2-compute-20,name=IPC,process=top value=0 1598962030000000000 > rdt_metric,cores=12\,19,host=r2-compute-20,name=LLC_Misses,process=top value=0 1598962030000000000 > rdt_metric,cores=12\,19,host=r2-compute-20,name=LLC,process=top value=0 1598962030000000000 diff --git a/plugins/inputs/intel_rdt/intel_rdt.go b/plugins/inputs/intel_rdt/intel_rdt.go index bcbc1c72a9597..aeb39b8ef4d62 100644 --- a/plugins/inputs/intel_rdt/intel_rdt.go +++ b/plugins/inputs/intel_rdt/intel_rdt.go @@ -1,3 +1,5 @@ +//go:generate ../../../tools/readme_config_includer/generator +//go:build !windows // +build !windows package intel_rdt @@ -5,6 +7,7 @@ package intel_rdt import ( "bufio" "context" + _ "embed" "fmt" "io" "os" @@ -13,6 +16,7 @@ import ( "strconv" "strings" "sync" + "syscall" "time" "github.com/google/go-cmp/cmp" @@ -22,6 +26,10 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const ( timestampFormat = "2006-01-02 15:04:05" defaultSamplingInterval = 10 @@ -45,6 +53,7 @@ type IntelRDT struct { Processes []string `toml:"processes"` SamplingInterval int32 `toml:"sampling_interval"` ShortenedMetrics bool `toml:"shortened_metrics"` + UseSudo bool `toml:"use_sudo"` Log telegraf.Logger `toml:"-"` Publisher Publisher `toml:"-"` @@ -63,42 +72,21 @@ type processMeasurement struct { measurement string } +type splitCSVLine struct { + timeValue string + metricsValues []string + coreOrPIDsValues []string +} + +func (*IntelRDT) SampleConfig() string { + return sampleConfig +} + // All gathering is done in the Start function func (r *IntelRDT) Gather(_ telegraf.Accumulator) error { return nil } -func (r *IntelRDT) Description() string { - return "Intel Resource Director Technology plugin" -} - -func (r *IntelRDT) SampleConfig() string { - return ` - ## Optionally set sampling interval to Nx100ms. - ## This value is propagated to pqos tool. Interval format is defined by pqos itself. - ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. - # sampling_interval = "10" - - ## Optionally specify the path to pqos executable. - ## If not provided, auto discovery will be performed. - # pqos_path = "/usr/local/bin/pqos" - - ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. - ## If not provided, default value is false. - # shortened_metrics = false - - ## Specify the list of groups of CPU core(s) to be provided as pqos input. - ## Mandatory if processes aren't set and forbidden if processes are specified. - ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] - # cores = ["0-3"] - - ## Specify the list of processes for which Metrics will be collected. - ## Mandatory if cores aren't set and forbidden if cores are specified. - ## e.g. ["qemu", "pmd"] - # processes = ["process"] -` -} - func (r *IntelRDT) Start(acc telegraf.Accumulator) error { ctx, cancel := context.WithCancel(context.Background()) r.cancel = cancel @@ -223,8 +211,8 @@ func (r *IntelRDT) associateProcessesWithPIDs(providedProcesses []string) (map[s } for _, availableProcess := range availableProcesses { if choice.Contains(availableProcess.Name, providedProcesses) { - PID := availableProcess.PID - mapProcessPIDs[availableProcess.Name] = mapProcessPIDs[availableProcess.Name] + fmt.Sprintf("%d", PID) + "," + pid := availableProcess.PID + mapProcessPIDs[availableProcess.Name] = mapProcessPIDs[availableProcess.Name] + fmt.Sprintf("%d", pid) + "," } } for key := range mapProcessPIDs { @@ -239,21 +227,25 @@ func (r *IntelRDT) createArgsAndStartPQOS(ctx context.Context) { if len(r.parsedCores) != 0 { coresArg := createArgCores(r.parsedCores) args = append(args, coresArg) - go r.readData(args, nil, ctx) - + go r.readData(ctx, args, nil) } else if len(r.processesPIDsMap) != 0 { processArg := createArgProcess(r.processesPIDsMap) args = append(args, processArg) - go r.readData(args, r.processesPIDsMap, ctx) + go r.readData(ctx, args, r.processesPIDsMap) } - return } -func (r *IntelRDT) readData(args []string, processesPIDsAssociation map[string]string, ctx context.Context) { +func (r *IntelRDT) readData(ctx context.Context, args []string, processesPIDsAssociation map[string]string) { r.wg.Add(1) defer r.wg.Done() - cmd := exec.Command(r.PqosPath, append(args)...) + cmd := exec.Command(r.PqosPath, args...) + + if r.UseSudo { + // run pqos with `/bin/sh -c "sudo /path/to/pqos ..."` + args = []string{"-c", fmt.Sprintf("sudo %s %s", r.PqosPath, strings.ReplaceAll(strings.Join(args, " "), ";", "\\;"))} + cmd = exec.Command("/bin/sh", args...) + } cmdReader, err := cmd.StdoutPipe() if err != nil { @@ -279,12 +271,12 @@ func (r *IntelRDT) readData(args []string, processesPIDsAssociation map[string]s }() err = cmd.Start() if err != nil { - r.errorChan <- fmt.Errorf("pqos: %v", err) + r.Log.Errorf("pqos: %v", err) return } err = cmd.Wait() if err != nil { - r.errorChan <- fmt.Errorf("pqos: %v", err) + r.Log.Errorf("pqos: %v", err) } } @@ -299,11 +291,9 @@ func (r *IntelRDT) processOutput(cmdReader io.ReadCloser, processesPIDsAssociati */ toOmit := pqosInitOutputLinesNumber - // omit first measurements which are zeroes - if len(r.parsedCores) != 0 { + if len(r.parsedCores) != 0 { // omit first measurements which are zeroes toOmit = toOmit + len(r.parsedCores) - // specify how many lines should pass before stopping - } else if len(processesPIDsAssociation) != 0 { + } else if len(processesPIDsAssociation) != 0 { // specify how many lines should pass before stopping toOmit = toOmit + len(processesPIDsAssociation) } for omitCounter := 0; omitCounter < toOmit; omitCounter++ { @@ -318,13 +308,13 @@ func (r *IntelRDT) processOutput(cmdReader io.ReadCloser, processesPIDsAssociati if len(r.Processes) != 0 { newMetric := processMeasurement{} - PIDs, err := findPIDsInMeasurement(out) + pids, err := findPIDsInMeasurement(out) if err != nil { r.errorChan <- err break } for processName, PIDsProcess := range processesPIDsAssociation { - if PIDs == PIDsProcess { + if pids == PIDsProcess { newMetric.name = processName newMetric.measurement = out } @@ -337,14 +327,30 @@ func (r *IntelRDT) processOutput(cmdReader io.ReadCloser, processesPIDsAssociati } func shutDownPqos(pqos *exec.Cmd) error { + timeout := time.Second * 2 + if pqos.Process != nil { - err := pqos.Process.Signal(os.Interrupt) - if err != nil { - err = pqos.Process.Kill() - if err != nil { - return fmt.Errorf("failed to shut down pqos: %v", err) + // try to send interrupt signal, ignore err for now + _ = pqos.Process.Signal(os.Interrupt) + + // wait and constantly check if pqos is still running + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + for { + if err := pqos.Process.Signal(syscall.Signal(0)); err == os.ErrProcessDone { + return nil + } else if ctx.Err() != nil { + break } } + + // if pqos is still running after some period, try to kill it + // this will send SIGTERM to pqos, and leave garbage in `/sys/fs/resctrl/mon_groups` + // fixed in https://github.com/intel/intel-cmt-cat/issues/197 + err := pqos.Process.Kill() + if err != nil { + return fmt.Errorf("failed to shut down pqos: %v", err) + } } return nil } @@ -457,29 +463,29 @@ func validateAndParseCores(coreStr string) ([]int, error) { func findPIDsInMeasurement(measurements string) (string, error) { // to distinguish PIDs from Cores (PIDs should be in quotes) var insideQuoteRegex = regexp.MustCompile(`"(.*?)"`) - PIDsMatch := insideQuoteRegex.FindStringSubmatch(measurements) - if len(PIDsMatch) < 2 { + pidsMatch := insideQuoteRegex.FindStringSubmatch(measurements) + if len(pidsMatch) < 2 { return "", fmt.Errorf("cannot find PIDs in measurement line") } - PIDs := PIDsMatch[1] - return PIDs, nil + pids := pidsMatch[1] + return pids, nil } -func splitCSVLineIntoValues(line string) (timeValue string, metricsValues, coreOrPIDsValues []string, err error) { +func splitCSVLineIntoValues(line string) (splitCSVLine, error) { values, err := splitMeasurementLine(line) if err != nil { - return "", nil, nil, err + return splitCSVLine{}, err } - timeValue = values[0] + timeValue := values[0] // Because pqos csv format is broken when many cores are involved in PID or // group of PIDs, there is need to work around it. E.g.: // Time,PID,Core,IPC,LLC Misses,LLC[KB],MBL[MB/s],MBR[MB/s],MBT[MB/s] // 2020-08-12 13:34:36,"45417,29170,",37,44,0.00,0,0.0,0.0,0.0,0.0 - metricsValues = values[len(values)-numberOfMetrics:] - coreOrPIDsValues = values[1 : len(values)-numberOfMetrics] + metricsValues := values[len(values)-numberOfMetrics:] + coreOrPIDsValues := values[1 : len(values)-numberOfMetrics] - return timeValue, metricsValues, coreOrPIDsValues, nil + return splitCSVLine{timeValue, metricsValues, coreOrPIDsValues}, nil } func validateInterval(interval int32) error { @@ -498,7 +504,7 @@ func splitMeasurementLine(line string) ([]string, error) { } func parseTime(value string) (time.Time, error) { - timestamp, err := time.Parse(timestampFormat, value) + timestamp, err := time.ParseInLocation(timestampFormat, value, time.Local) if err != nil { return time.Time{}, err } diff --git a/plugins/inputs/intel_rdt/intel_rdt_test.go b/plugins/inputs/intel_rdt/intel_rdt_test.go index 7e876425724ec..18dd2e93aa1c1 100644 --- a/plugins/inputs/intel_rdt/intel_rdt_test.go +++ b/plugins/inputs/intel_rdt/intel_rdt_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package intel_rdt @@ -51,18 +52,18 @@ func TestSplitCSVLineIntoValues(t *testing.T) { expectedMetricsValue := []string{"0.00", "0", "0.0", "0.0", "0.0", "0.0"} expectedCoreOrPidsValue := []string{"\"45417", "29170\"", "37", "44"} - timeValue, metricsValue, coreOrPidsValue, err := splitCSVLineIntoValues(line) + splitCSV, err := splitCSVLineIntoValues(line) assert.Nil(t, err) - assert.Equal(t, expectedTimeValue, timeValue) - assert.Equal(t, expectedMetricsValue, metricsValue) - assert.Equal(t, expectedCoreOrPidsValue, coreOrPidsValue) + assert.Equal(t, expectedTimeValue, splitCSV.timeValue) + assert.Equal(t, expectedMetricsValue, splitCSV.metricsValues) + assert.Equal(t, expectedCoreOrPidsValue, splitCSV.coreOrPIDsValues) wrongLine := "2020-08-12 13:34:36,37,44,0.00,0,0.0" - timeValue, metricsValue, coreOrPidsValue, err = splitCSVLineIntoValues(wrongLine) + splitCSV, err = splitCSVLineIntoValues(wrongLine) assert.NotNil(t, err) - assert.Equal(t, "", timeValue) - assert.Nil(t, nil, metricsValue) - assert.Nil(t, nil, coreOrPidsValue) + assert.Equal(t, "", splitCSV.timeValue) + assert.Nil(t, nil, splitCSV.metricsValues) + assert.Nil(t, nil, splitCSV.coreOrPIDsValues) } func TestFindPIDsInMeasurement(t *testing.T) { @@ -106,7 +107,6 @@ func TestCreateArgsCores(t *testing.T) { assert.EqualValues(t, expected, result) cores = []string{"1,2,3", "4,5,6"} - expected = "--mon-core=" expectedPrefix := "--mon-core=" expectedSubstring := "all:[1,2,3];mbt:[1,2,3];" expectedSubstring2 := "all:[4,5,6];mbt:[4,5,6];" diff --git a/plugins/inputs/intel_rdt/intel_rdt_windows.go b/plugins/inputs/intel_rdt/intel_rdt_windows.go index e3ab0978fb374..64f9ebbe94b68 100644 --- a/plugins/inputs/intel_rdt/intel_rdt_windows.go +++ b/plugins/inputs/intel_rdt/intel_rdt_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package intel_rdt diff --git a/plugins/inputs/intel_rdt/processes.go b/plugins/inputs/intel_rdt/processes.go index ff86a4e6b745c..dd172b6d92dd2 100644 --- a/plugins/inputs/intel_rdt/processes.go +++ b/plugins/inputs/intel_rdt/processes.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package intel_rdt diff --git a/plugins/inputs/intel_rdt/publisher.go b/plugins/inputs/intel_rdt/publisher.go index 5ca9890472b27..4fdb91dc7b128 100644 --- a/plugins/inputs/intel_rdt/publisher.go +++ b/plugins/inputs/intel_rdt/publisher.go @@ -1,15 +1,30 @@ +//go:build !windows // +build !windows package intel_rdt import ( "context" + "errors" "strings" "time" "github.com/influxdata/telegraf" ) +type parsedCoresMeasurement struct { + cores string + values []float64 + time time.Time +} + +type parsedProcessMeasurement struct { + process string + cores string + values []float64 + time time.Time +} + // Publisher for publish new RDT metrics to telegraf accumulator type Publisher struct { acc telegraf.Accumulator @@ -18,7 +33,6 @@ type Publisher struct { BufferChanProcess chan processMeasurement BufferChanCores chan string errChan chan error - stopChan chan bool } func NewPublisher(acc telegraf.Accumulator, log telegraf.Logger, shortenedMetrics bool) Publisher { @@ -50,50 +64,48 @@ func (p *Publisher) publish(ctx context.Context) { } func (p *Publisher) publishCores(measurement string) { - coresString, values, timestamp, err := parseCoresMeasurement(measurement) + parsedCoresMeasurement, err := parseCoresMeasurement(measurement) if err != nil { p.errChan <- err } - p.addToAccumulatorCores(coresString, values, timestamp) - return + p.addToAccumulatorCores(parsedCoresMeasurement) } func (p *Publisher) publishProcess(measurement processMeasurement) { - process, coresString, values, timestamp, err := parseProcessesMeasurement(measurement) + parsedProcessMeasurement, err := parseProcessesMeasurement(measurement) if err != nil { p.errChan <- err } - p.addToAccumulatorProcesses(process, coresString, values, timestamp) - return + p.addToAccumulatorProcesses(parsedProcessMeasurement) } -func parseCoresMeasurement(measurements string) (string, []float64, time.Time, error) { +func parseCoresMeasurement(measurements string) (parsedCoresMeasurement, error) { var values []float64 - timeValue, metricsValues, cores, err := splitCSVLineIntoValues(measurements) + splitCSV, err := splitCSVLineIntoValues(measurements) if err != nil { - return "", nil, time.Time{}, err + return parsedCoresMeasurement{}, err } - timestamp, err := parseTime(timeValue) + timestamp, err := parseTime(splitCSV.timeValue) if err != nil { - return "", nil, time.Time{}, err + return parsedCoresMeasurement{}, err } // change string slice to one string and separate it by coma - coresString := strings.Join(cores, ",") + coresString := strings.Join(splitCSV.coreOrPIDsValues, ",") // trim unwanted quotes coresString = strings.Trim(coresString, "\"") - for _, metric := range metricsValues { + for _, metric := range splitCSV.metricsValues { parsedValue, err := parseFloat(metric) if err != nil { - return "", nil, time.Time{}, err + return parsedCoresMeasurement{}, err } values = append(values, parsedValue) } - return coresString, values, timestamp, nil + return parsedCoresMeasurement{coresString, values, timestamp}, nil } -func (p *Publisher) addToAccumulatorCores(cores string, metricsValues []float64, timestamp time.Time) { - for i, value := range metricsValues { +func (p *Publisher) addToAccumulatorCores(measurement parsedCoresMeasurement) { + for i, value := range measurement.values { if p.shortenedMetrics { //0: "IPC" //1: "LLC_Misses" @@ -104,41 +116,47 @@ func (p *Publisher) addToAccumulatorCores(cores string, metricsValues []float64, tags := map[string]string{} fields := make(map[string]interface{}) - tags["cores"] = cores + tags["cores"] = measurement.cores tags["name"] = pqosMetricOrder[i] fields["value"] = value - p.acc.AddFields("rdt_metric", fields, tags, timestamp) + p.acc.AddFields("rdt_metric", fields, tags, measurement.time) } } -func parseProcessesMeasurement(measurement processMeasurement) (string, string, []float64, time.Time, error) { - var values []float64 - timeValue, metricsValues, coreOrPidsValues, pids, err := parseProcessMeasurement(measurement.measurement) +func parseProcessesMeasurement(measurement processMeasurement) (parsedProcessMeasurement, error) { + splitCSV, err := splitCSVLineIntoValues(measurement.measurement) + if err != nil { + return parsedProcessMeasurement{}, err + } + pids, err := findPIDsInMeasurement(measurement.measurement) if err != nil { - return "", "", nil, time.Time{}, err + return parsedProcessMeasurement{}, err + } + lenOfPIDs := len(strings.Split(pids, ",")) + if lenOfPIDs > len(splitCSV.coreOrPIDsValues) { + return parsedProcessMeasurement{}, errors.New("detected more pids (quoted) than actual number of pids in csv line") } - timestamp, err := parseTime(timeValue) + timestamp, err := parseTime(splitCSV.timeValue) if err != nil { - return "", "", nil, time.Time{}, err + return parsedProcessMeasurement{}, err } actualProcess := measurement.name - lenOfPids := len(strings.Split(pids, ",")) - cores := coreOrPidsValues[lenOfPids:] - coresString := strings.Trim(strings.Join(cores, ","), `"`) + cores := strings.Trim(strings.Join(splitCSV.coreOrPIDsValues[lenOfPIDs:], ","), `"`) - for _, metric := range metricsValues { + var values []float64 + for _, metric := range splitCSV.metricsValues { parsedValue, err := parseFloat(metric) if err != nil { - return "", "", nil, time.Time{}, err + return parsedProcessMeasurement{}, err } values = append(values, parsedValue) } - return actualProcess, coresString, values, timestamp, nil + return parsedProcessMeasurement{actualProcess, cores, values, timestamp}, nil } -func (p *Publisher) addToAccumulatorProcesses(process string, cores string, metricsValues []float64, timestamp time.Time) { - for i, value := range metricsValues { +func (p *Publisher) addToAccumulatorProcesses(measurement parsedProcessMeasurement) { + for i, value := range measurement.values { if p.shortenedMetrics { //0: "IPC" //1: "LLC_Misses" @@ -149,23 +167,11 @@ func (p *Publisher) addToAccumulatorProcesses(process string, cores string, metr tags := map[string]string{} fields := make(map[string]interface{}) - tags["process"] = process - tags["cores"] = cores + tags["process"] = measurement.process + tags["cores"] = measurement.cores tags["name"] = pqosMetricOrder[i] fields["value"] = value - p.acc.AddFields("rdt_metric", fields, tags, timestamp) - } -} - -func parseProcessMeasurement(measurements string) (string, []string, []string, string, error) { - timeValue, metricsValues, coreOrPidsValues, err := splitCSVLineIntoValues(measurements) - if err != nil { - return "", nil, nil, "", err - } - pids, err := findPIDsInMeasurement(measurements) - if err != nil { - return "", nil, nil, "", err + p.acc.AddFields("rdt_metric", fields, tags, measurement.time) } - return timeValue, metricsValues, coreOrPidsValues, pids, nil } diff --git a/plugins/inputs/intel_rdt/publisher_test.go b/plugins/inputs/intel_rdt/publisher_test.go index 5248ede7a16db..2529a2235a1b9 100644 --- a/plugins/inputs/intel_rdt/publisher_test.go +++ b/plugins/inputs/intel_rdt/publisher_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package intel_rdt @@ -36,29 +37,29 @@ func TestParseCoresMeasurement(t *testing.T) { metricsValues["MBT"]) expectedCores := "37,44" - expectedTimestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + expectedTimestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - resultCoresString, resultValues, resultTimestamp, err := parseCoresMeasurement(measurement) + result, err := parseCoresMeasurement(measurement) assert.Nil(t, err) - assert.Equal(t, expectedCores, resultCoresString) - assert.Equal(t, expectedTimestamp, resultTimestamp) - assert.Equal(t, resultValues[0], metricsValues["IPC"]) - assert.Equal(t, resultValues[1], metricsValues["LLC_Misses"]) - assert.Equal(t, resultValues[2], metricsValues["LLC"]) - assert.Equal(t, resultValues[3], metricsValues["MBL"]) - assert.Equal(t, resultValues[4], metricsValues["MBR"]) - assert.Equal(t, resultValues[5], metricsValues["MBT"]) + assert.Equal(t, expectedCores, result.cores) + assert.Equal(t, expectedTimestamp, result.time) + assert.Equal(t, result.values[0], metricsValues["IPC"]) + assert.Equal(t, result.values[1], metricsValues["LLC_Misses"]) + assert.Equal(t, result.values[2], metricsValues["LLC"]) + assert.Equal(t, result.values[3], metricsValues["MBL"]) + assert.Equal(t, result.values[4], metricsValues["MBR"]) + assert.Equal(t, result.values[5], metricsValues["MBT"]) }) t.Run("not valid measurement string", func(t *testing.T) { measurement := "not, valid, measurement" - resultCoresString, resultValues, resultTimestamp, err := parseCoresMeasurement(measurement) + result, err := parseCoresMeasurement(measurement) assert.NotNil(t, err) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) + assert.Equal(t, "", result.cores) + assert.Nil(t, result.values) + assert.Equal(t, time.Time{}, result.time) }) t.Run("not valid values string", func(t *testing.T) { measurement := fmt.Sprintf("%s,%s,%s,%s,%f,%f,%f,%f", @@ -71,12 +72,12 @@ func TestParseCoresMeasurement(t *testing.T) { metricsValues["MBR"], metricsValues["MBT"]) - resultCoresString, resultValues, resultTimestamp, err := parseCoresMeasurement(measurement) + result, err := parseCoresMeasurement(measurement) assert.NotNil(t, err) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) + assert.Equal(t, "", result.cores) + assert.Nil(t, result.values) + assert.Equal(t, time.Time{}, result.time) }) t.Run("not valid timestamp format", func(t *testing.T) { invalidTimestamp := "2020-08-12-21 13:34:" @@ -90,12 +91,12 @@ func TestParseCoresMeasurement(t *testing.T) { metricsValues["MBR"], metricsValues["MBT"]) - resultCoresString, resultValues, resultTimestamp, err := parseCoresMeasurement(measurement) + result, err := parseCoresMeasurement(measurement) assert.NotNil(t, err) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) + assert.Equal(t, "", result.cores) + assert.Nil(t, result.values) + assert.Equal(t, time.Time{}, result.time) }) } @@ -118,44 +119,36 @@ func TestParseProcessesMeasurement(t *testing.T) { metricsValues["MBT"]) expectedCores := "37,44" - expectedTimestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + expectedTimestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) newMeasurement := processMeasurement{ name: processName, measurement: measurement, } - actualProcess, resultCoresString, resultValues, resultTimestamp, err := parseProcessesMeasurement(newMeasurement) + result, err := parseProcessesMeasurement(newMeasurement) assert.Nil(t, err) - assert.Equal(t, processName, actualProcess) - assert.Equal(t, expectedCores, resultCoresString) - assert.Equal(t, expectedTimestamp, resultTimestamp) - assert.Equal(t, resultValues[0], metricsValues["IPC"]) - assert.Equal(t, resultValues[1], metricsValues["LLC_Misses"]) - assert.Equal(t, resultValues[2], metricsValues["LLC"]) - assert.Equal(t, resultValues[3], metricsValues["MBL"]) - assert.Equal(t, resultValues[4], metricsValues["MBR"]) - assert.Equal(t, resultValues[5], metricsValues["MBT"]) + assert.Equal(t, processName, result.process) + assert.Equal(t, expectedCores, result.cores) + assert.Equal(t, expectedTimestamp, result.time) + assert.Equal(t, result.values[0], metricsValues["IPC"]) + assert.Equal(t, result.values[1], metricsValues["LLC_Misses"]) + assert.Equal(t, result.values[2], metricsValues["LLC"]) + assert.Equal(t, result.values[3], metricsValues["MBL"]) + assert.Equal(t, result.values[4], metricsValues["MBR"]) + assert.Equal(t, result.values[5], metricsValues["MBT"]) }) - t.Run("not valid measurement string", func(t *testing.T) { - processName := "process_name" - measurement := "invalid,measurement,format" - - newMeasurement := processMeasurement{ - name: processName, - measurement: measurement, - } - actualProcess, resultCoresString, resultValues, resultTimestamp, err := parseProcessesMeasurement(newMeasurement) - assert.NotNil(t, err) - assert.Equal(t, "", actualProcess) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) - }) - t.Run("not valid timestamp format", func(t *testing.T) { - invalidTimestamp := "2020-20-20-31" - measurement := fmt.Sprintf("%s,%s,%s,%f,%f,%f,%f,%f,%f", + invalidTimestamp := "2020-20-20-31" + negativeTests := []struct { + name string + measurement string + }{{ + name: "not valid measurement string", + measurement: "invalid,measurement,format", + }, { + name: "not valid timestamp format", + measurement: fmt.Sprintf("%s,%s,%s,%f,%f,%f,%f,%f,%f", invalidTimestamp, pids, cores, @@ -164,44 +157,42 @@ func TestParseProcessesMeasurement(t *testing.T) { metricsValues["LLC"], metricsValues["MBL"], metricsValues["MBR"], - metricsValues["MBT"]) - - newMeasurement := processMeasurement{ - name: processName, - measurement: measurement, - } - actualProcess, resultCoresString, resultValues, resultTimestamp, err := parseProcessesMeasurement(newMeasurement) - - assert.NotNil(t, err) - assert.Equal(t, "", actualProcess) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) - }) - t.Run("not valid values string", func(t *testing.T) { - measurement := fmt.Sprintf("%s,%s,%s,%s,%s,%f,%f,%f,%f", - timestamp, - pids, - cores, - "1##", - "da", - metricsValues["LLC"], - metricsValues["MBL"], - metricsValues["MBR"], - metricsValues["MBT"]) - - newMeasurement := processMeasurement{ - name: processName, - measurement: measurement, - } - actualProcess, resultCoresString, resultValues, resultTimestamp, err := parseProcessesMeasurement(newMeasurement) + metricsValues["MBT"]), + }, + { + name: "not valid values string", + measurement: fmt.Sprintf("%s,%s,%s,%s,%s,%f,%f,%f,%f", + timestamp, + pids, + cores, + "1##", + "da", + metricsValues["LLC"], + metricsValues["MBL"], + metricsValues["MBR"], + metricsValues["MBT"]), + }, + { + name: "not valid csv line with quotes", + measurement: "0000-08-02 0:00:00,,\",,,,,,,,,,,,,,,,,,,,,,,,\",,", + }, + } - assert.NotNil(t, err) - assert.Equal(t, "", actualProcess) - assert.Equal(t, "", resultCoresString) - assert.Nil(t, resultValues) - assert.Equal(t, time.Time{}, resultTimestamp) - }) + for _, test := range negativeTests { + t.Run(test.name, func(t *testing.T) { + newMeasurement := processMeasurement{ + name: processName, + measurement: test.measurement, + } + result, err := parseProcessesMeasurement(newMeasurement) + + assert.NotNil(t, err) + assert.Equal(t, "", result.process) + assert.Equal(t, "", result.cores) + assert.Nil(t, result.values) + assert.Equal(t, time.Time{}, result.time) + }) + } } func TestAddToAccumulatorCores(t *testing.T) { @@ -211,9 +202,9 @@ func TestAddToAccumulatorCores(t *testing.T) { cores := "1,2,3" metricsValues := []float64{1, 2, 3, 4, 5, 6} - timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - publisher.addToAccumulatorCores(cores, metricsValues, timestamp) + publisher.addToAccumulatorCores(parsedCoresMeasurement{cores, metricsValues, timestamp}) for _, test := range testCoreMetrics { acc.AssertContainsTaggedFields(t, "rdt_metric", test.fields, test.tags) @@ -225,9 +216,9 @@ func TestAddToAccumulatorCores(t *testing.T) { cores := "1,2,3" metricsValues := []float64{1, 2, 3, 4, 5, 6} - timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - publisher.addToAccumulatorCores(cores, metricsValues, timestamp) + publisher.addToAccumulatorCores(parsedCoresMeasurement{cores, metricsValues, timestamp}) for _, test := range testCoreMetricsShortened { acc.AssertDoesNotContainsTaggedFields(t, "rdt_metric", test.fields, test.tags) @@ -243,9 +234,9 @@ func TestAddToAccumulatorProcesses(t *testing.T) { process := "process_name" cores := "1,2,3" metricsValues := []float64{1, 2, 3, 4, 5, 6} - timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - publisher.addToAccumulatorProcesses(process, cores, metricsValues, timestamp) + publisher.addToAccumulatorProcesses(parsedProcessMeasurement{process, cores, metricsValues, timestamp}) for _, test := range testCoreProcesses { acc.AssertContainsTaggedFields(t, "rdt_metric", test.fields, test.tags) @@ -258,9 +249,9 @@ func TestAddToAccumulatorProcesses(t *testing.T) { process := "process_name" cores := "1,2,3" metricsValues := []float64{1, 2, 3, 4, 5, 6} - timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.UTC) + timestamp := time.Date(2020, 8, 12, 13, 34, 36, 0, time.Local) - publisher.addToAccumulatorProcesses(process, cores, metricsValues, timestamp) + publisher.addToAccumulatorProcesses(parsedProcessMeasurement{process, cores, metricsValues, timestamp}) for _, test := range testCoreProcessesShortened { acc.AssertDoesNotContainsTaggedFields(t, "rdt_metric", test.fields, test.tags) diff --git a/plugins/inputs/intel_rdt/sample.conf b/plugins/inputs/intel_rdt/sample.conf new file mode 100644 index 0000000000000..9843b40f073ef --- /dev/null +++ b/plugins/inputs/intel_rdt/sample.conf @@ -0,0 +1,28 @@ +# Read Intel RDT metrics +[[inputs.intel_rdt]] + ## Optionally set sampling interval to Nx100ms. + ## This value is propagated to pqos tool. Interval format is defined by pqos itself. + ## If not provided or provided 0, will be set to 10 = 10x100ms = 1s. + # sampling_interval = "10" + + ## Optionally specify the path to pqos executable. + ## If not provided, auto discovery will be performed. + # pqos_path = "/usr/local/bin/pqos" + + ## Optionally specify if IPC and LLC_Misses metrics shouldn't be propagated. + ## If not provided, default value is false. + # shortened_metrics = false + + ## Specify the list of groups of CPU core(s) to be provided as pqos input. + ## Mandatory if processes aren't set and forbidden if processes are specified. + ## e.g. ["0-3", "4,5,6"] or ["1-3,4"] + # cores = ["0-3"] + + ## Specify the list of processes for which Metrics will be collected. + ## Mandatory if cores aren't set and forbidden if cores are specified. + ## e.g. ["qemu", "pmd"] + # processes = ["process"] + + ## Specify if the pqos process should be called with sudo. + ## Mandatory if the telegraf process does not run as root. + # use_sudo = false diff --git a/plugins/inputs/internal/README.md b/plugins/inputs/internal/README.md index 35e14c77d0fbb..40e0482f8bac1 100644 --- a/plugins/inputs/internal/README.md +++ b/plugins/inputs/internal/README.md @@ -5,80 +5,79 @@ The `internal` plugin collects metrics about the telegraf agent itself. Note that some metrics are aggregates across all instances of one type of plugin. -### Configuration: +## Configuration -```toml +```toml @sample.conf # Collect statistics about itself [[inputs.internal]] ## If true, collect telegraf memory stats. # collect_memstats = true ``` -### Measurements & Fields: +## Measurements & Fields -memstats are taken from the Go runtime: https://golang.org/pkg/runtime/#MemStats +memstats are taken from the Go runtime: + - internal_memstats - - alloc_bytes - - frees - - heap_alloc_bytes - - heap_idle_bytes - - heap_in_use_bytes - - heap_objects_bytes - - heap_released_bytes - - heap_sys_bytes - - mallocs - - num_gc - - pointer_lookups - - sys_bytes - - total_alloc_bytes + - alloc_bytes + - frees + - heap_alloc_bytes + - heap_idle_bytes + - heap_in_use_bytes + - heap_objects_bytes + - heap_released_bytes + - heap_sys_bytes + - mallocs + - num_gc + - pointer_lookups + - sys_bytes + - total_alloc_bytes agent stats collect aggregate stats on all telegraf plugins. - internal_agent - - gather_errors - - metrics_dropped - - metrics_gathered - - metrics_written + - gather_errors + - metrics_dropped + - metrics_gathered + - metrics_written internal_gather stats collect aggregate stats on all input plugins that are of the same input type. They are tagged with `input=` `version=` and `go_version=`. - internal_gather - - gather_time_ns - - metrics_gathered + - gather_time_ns + - metrics_gathered internal_write stats collect aggregate stats on all output plugins that are of the same input type. They are tagged with `output=` and `version=`. - - internal_write - - buffer_limit - - buffer_size - - metrics_added - - metrics_written - - metrics_dropped - - metrics_filtered - - write_time_ns + - buffer_limit + - buffer_size + - metrics_added + - metrics_written + - metrics_dropped + - metrics_filtered + - write_time_ns internal_ are metrics which are defined on a per-plugin basis, and usually contain tags which differentiate each instance of a particular type of plugin and `version=`. - internal_ - - individual plugin-specific fields, such as requests counts. + - individual plugin-specific fields, such as requests counts. -### Tags: +## Tags All measurements for specific plugins are tagged with information relevant to each particular plugin and with `version=`. +## Example Output -### Example Output: - -``` +```shell internal_memstats,host=tyrion alloc_bytes=4457408i,sys_bytes=10590456i,pointer_lookups=7i,mallocs=17642i,frees=7473i,heap_sys_bytes=6848512i,heap_idle_bytes=1368064i,heap_in_use_bytes=5480448i,heap_released_bytes=0i,total_alloc_bytes=6875560i,heap_alloc_bytes=4457408i,heap_objects_bytes=10169i,num_gc=2i 1480682800000000000 internal_agent,host=tyrion,go_version=1.12.7,version=1.99.0 metrics_written=18i,metrics_dropped=0i,metrics_gathered=19i,gather_errors=0i 1480682800000000000 internal_write,output=file,host=tyrion,version=1.99.0 buffer_limit=10000i,write_time_ns=636609i,metrics_added=18i,metrics_written=18i,buffer_size=0i 1480682800000000000 diff --git a/plugins/inputs/internal/internal.go b/plugins/inputs/internal/internal.go index 2eb8b91c9e39d..664d8b98acf1d 100644 --- a/plugins/inputs/internal/internal.go +++ b/plugins/inputs/internal/internal.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package internal import ( + _ "embed" "runtime" "strings" @@ -10,6 +12,10 @@ import ( "github.com/influxdata/telegraf/selfstat" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Self struct { CollectMemstats bool } @@ -20,16 +26,7 @@ func NewSelf() telegraf.Input { } } -var sampleConfig = ` - ## If true, collect telegraf memory stats. - # collect_memstats = true -` - -func (s *Self) Description() string { - return "Collect statistics about itself" -} - -func (s *Self) SampleConfig() string { +func (*Self) SampleConfig() string { return sampleConfig } diff --git a/plugins/inputs/internal/internal_test.go b/plugins/inputs/internal/internal_test.go index 4cdba9099edf0..0b89a974a0a74 100644 --- a/plugins/inputs/internal/internal_test.go +++ b/plugins/inputs/internal/internal_test.go @@ -6,21 +6,21 @@ import ( "github.com/influxdata/telegraf/selfstat" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestSelfPlugin(t *testing.T) { s := NewSelf() acc := &testutil.Accumulator{} - s.Gather(acc) - assert.True(t, acc.HasMeasurement("internal_memstats")) + require.NoError(t, s.Gather(acc)) + require.True(t, acc.HasMeasurement("internal_memstats")) // test that a registered stat is incremented stat := selfstat.Register("mytest", "test", map[string]string{"test": "foo"}) stat.Incr(1) stat.Incr(2) - s.Gather(acc) + require.NoError(t, s.Gather(acc)) acc.AssertContainsTaggedFields(t, "internal_mytest", map[string]interface{}{ "test": int64(3), @@ -34,7 +34,7 @@ func TestSelfPlugin(t *testing.T) { // test that a registered stat is set properly stat.Set(101) - s.Gather(acc) + require.NoError(t, s.Gather(acc)) acc.AssertContainsTaggedFields(t, "internal_mytest", map[string]interface{}{ "test": int64(101), @@ -51,7 +51,7 @@ func TestSelfPlugin(t *testing.T) { timing := selfstat.RegisterTiming("mytest", "test_ns", map[string]string{"test": "foo"}) timing.Incr(100) timing.Incr(200) - s.Gather(acc) + require.NoError(t, s.Gather(acc)) acc.AssertContainsTaggedFields(t, "internal_mytest", map[string]interface{}{ "test": int64(101), diff --git a/plugins/inputs/internal/sample.conf b/plugins/inputs/internal/sample.conf new file mode 100644 index 0000000000000..4292b8e5e1e52 --- /dev/null +++ b/plugins/inputs/internal/sample.conf @@ -0,0 +1,4 @@ +# Collect statistics about itself +[[inputs.internal]] + ## If true, collect telegraf memory stats. + # collect_memstats = true diff --git a/plugins/inputs/internet_speed/README.md b/plugins/inputs/internet_speed/README.md new file mode 100644 index 0000000000000..ae985acb3a040 --- /dev/null +++ b/plugins/inputs/internet_speed/README.md @@ -0,0 +1,37 @@ +# Internet Speed Monitor Input Plugin + +The `Internet Speed Monitor` collects data about the internet speed on the +system. + +## Configuration + +```toml @sample.conf +# Monitors internet speed using speedtest.net service +[[inputs.internet_speed]] + ## This plugin downloads many MB of data each time it is run. As such + ## consider setting a higher interval for this plugin to reduce the + ## demand on your internet connection. + # interval = "60m" + + ## Sets if runs file download test + # enable_file_download = false + + ## Caches the closest server location + # cache = false +``` + +## Metrics + +It collects latency, download speed and upload speed + +| Name | filed name | type | Unit | +| -------------- | ---------- | ------- | ---- | +| Download Speed | download | float64 | Mbps | +| Upload Speed | upload | float64 | Mbps | +| Latency | latency | float64 | ms | + +## Example Output + +```sh +internet_speed,host=Sanyam-Ubuntu download=41.791,latency=28.518,upload=59.798 1631031183000000000 +``` diff --git a/plugins/inputs/internet_speed/internet_speed.go b/plugins/inputs/internet_speed/internet_speed.go new file mode 100644 index 0000000000000..b236df6992377 --- /dev/null +++ b/plugins/inputs/internet_speed/internet_speed.go @@ -0,0 +1,94 @@ +//go:generate ../../../tools/readme_config_includer/generator +package internet_speed + +import ( + _ "embed" + "fmt" + "time" + + "github.com/showwin/speedtest-go/speedtest" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +// InternetSpeed is used to store configuration values. +type InternetSpeed struct { + EnableFileDownload bool `toml:"enable_file_download"` + Cache bool `toml:"cache"` + Log telegraf.Logger `toml:"-"` + serverCache *speedtest.Server +} + +const measurement = "internet_speed" + +func (*InternetSpeed) SampleConfig() string { + return sampleConfig +} + +func (is *InternetSpeed) Gather(acc telegraf.Accumulator) error { + + // Get closest server + s := is.serverCache + if s == nil { + user, err := speedtest.FetchUserInfo() + if err != nil { + return fmt.Errorf("fetching user info failed: %v", err) + } + serverList, err := speedtest.FetchServers(user) + if err != nil { + return fmt.Errorf("fetching server list failed: %v", err) + } + if len(serverList) < 1 { + return fmt.Errorf("no servers found") + } + s = serverList[0] + is.Log.Debugf("Found server: %v", s) + if is.Cache { + is.serverCache = s + } + } + + is.Log.Debug("Starting Speed Test") + is.Log.Debug("Running Ping...") + err := s.PingTest() + if err != nil { + return fmt.Errorf("ping test failed: %v", err) + } + is.Log.Debug("Running Download...") + err = s.DownloadTest(is.EnableFileDownload) + if err != nil { + return fmt.Errorf("download test failed: %v", err) + } + is.Log.Debug("Running Upload...") + err = s.UploadTest(is.EnableFileDownload) + if err != nil { + return fmt.Errorf("upload test failed failed: %v", err) + } + + is.Log.Debug("Test finished.") + + fields := make(map[string]interface{}) + fields["download"] = s.DLSpeed + fields["upload"] = s.ULSpeed + fields["latency"] = timeDurationMillisecondToFloat64(s.Latency) + + tags := make(map[string]string) + + acc.AddFields(measurement, fields, tags) + return nil +} + +func init() { + inputs.Add("internet_speed", func() telegraf.Input { + return &InternetSpeed{} + }) +} + +func timeDurationMillisecondToFloat64(d time.Duration) float64 { + return float64(d) / float64(time.Millisecond) +} diff --git a/plugins/inputs/internet_speed/internet_speed_test.go b/plugins/inputs/internet_speed/internet_speed_test.go new file mode 100644 index 0000000000000..ccc887d6c286a --- /dev/null +++ b/plugins/inputs/internet_speed/internet_speed_test.go @@ -0,0 +1,44 @@ +package internet_speed + +import ( + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestGathering(t *testing.T) { + if testing.Short() { + t.Skip("Skipping network-dependent test in short mode.") + } + internetSpeed := &InternetSpeed{ + EnableFileDownload: true, + Log: testutil.Logger{}, + } + + acc := &testutil.Accumulator{} + + require.NoError(t, internetSpeed.Gather(acc)) +} + +func TestDataGen(t *testing.T) { + if testing.Short() { + t.Skip("Skipping network-dependent test in short mode.") + } + internetSpeed := &InternetSpeed{ + EnableFileDownload: true, + Log: testutil.Logger{}, + } + + acc := &testutil.Accumulator{} + require.NoError(t, internetSpeed.Gather(acc)) + + metric, ok := acc.Get("internet_speed") + require.True(t, ok) + + tags := metric.Tags + + fields := metric.Fields + + acc.AssertContainsTaggedFields(t, "internet_speed", fields, tags) +} diff --git a/plugins/inputs/internet_speed/sample.conf b/plugins/inputs/internet_speed/sample.conf new file mode 100644 index 0000000000000..a51a88843d275 --- /dev/null +++ b/plugins/inputs/internet_speed/sample.conf @@ -0,0 +1,12 @@ +# Monitors internet speed using speedtest.net service +[[inputs.internet_speed]] + ## This plugin downloads many MB of data each time it is run. As such + ## consider setting a higher interval for this plugin to reduce the + ## demand on your internet connection. + # interval = "60m" + + ## Sets if runs file download test + # enable_file_download = false + + ## Caches the closest server location + # cache = false diff --git a/plugins/inputs/interrupts/README.md b/plugins/inputs/interrupts/README.md index 5da647f47793f..cbebed9773b61 100644 --- a/plugins/inputs/interrupts/README.md +++ b/plugins/inputs/interrupts/README.md @@ -1,9 +1,12 @@ # Interrupts Input Plugin -The interrupts plugin gathers metrics about IRQs from `/proc/interrupts` and `/proc/softirqs`. +The interrupts plugin gathers metrics about IRQs from `/proc/interrupts` and +`/proc/softirqs`. -### Configuration -```toml +## Configuration + +```toml @sample.conf +# This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. [[inputs.interrupts]] ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is ## stored as a field. @@ -18,7 +21,7 @@ The interrupts plugin gathers metrics about IRQs from `/proc/interrupts` and `/p # irq = [ "NET_RX", "TASKLET" ] ``` -### Metrics +## Metrics There are two styles depending on the value of `cpu_as_tag`. @@ -64,10 +67,11 @@ With `cpu_as_tag = true`: - fields: - count (int, number of interrupts) -### Example Output +## Example Output With `cpu_as_tag = false`: -``` + +```shell interrupts,irq=0,type=IO-APIC,device=2-edge\ timer,cpu=cpu0 count=23i 1489346531000000000 interrupts,irq=1,type=IO-APIC,device=1-edge\ i8042,cpu=cpu0 count=9i 1489346531000000000 interrupts,irq=30,type=PCI-MSI,device=65537-edge\ virtio1-input.0,cpu=cpu1 count=1i 1489346531000000000 @@ -75,7 +79,8 @@ soft_interrupts,irq=NET_RX,cpu=cpu0 count=280879i 1489346531000000000 ``` With `cpu_as_tag = true`: -``` + +```shell interrupts,cpu=cpu6,irq=PIW,type=Posted-interrupt\ wakeup\ event count=0i 1543539773000000000 interrupts,cpu=cpu7,irq=PIW,type=Posted-interrupt\ wakeup\ event count=0i 1543539773000000000 soft_interrupts,cpu=cpu0,irq=HI count=246441i 1543539773000000000 diff --git a/plugins/inputs/interrupts/interrupts.go b/plugins/inputs/interrupts/interrupts.go index 39b3020ddbd39..a51e570617cae 100644 --- a/plugins/inputs/interrupts/interrupts.go +++ b/plugins/inputs/interrupts/interrupts.go @@ -1,7 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package interrupts import ( "bufio" + _ "embed" "fmt" "io" "os" @@ -12,8 +14,12 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Interrupts struct { - CpuAsTag bool `toml:"cpu_as_tag"` + CPUAsTag bool `toml:"cpu_as_tag"` } type IRQ struct { @@ -28,28 +34,6 @@ func NewIRQ(id string) *IRQ { return &IRQ{ID: id, Cpus: []int64{}} } -const sampleConfig = ` - ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is - ## stored as a field. - ## - ## The default is false for backwards compatibility, and will be changed to - ## true in a future version. It is recommended to set to true on new - ## deployments. - # cpu_as_tag = false - - ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. - # [inputs.interrupts.tagdrop] - # irq = [ "NET_RX", "TASKLET" ] -` - -func (s *Interrupts) Description() string { - return "This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs." -} - -func (s *Interrupts) SampleConfig() string { - return sampleConfig -} - func parseInterrupts(r io.Reader) ([]IRQ, error) { var irqs []IRQ var cpucount int @@ -57,7 +41,7 @@ func parseInterrupts(r io.Reader) ([]IRQ, error) { if scanner.Scan() { cpus := strings.Fields(scanner.Text()) if cpus[0] != "CPU0" { - return nil, fmt.Errorf("Expected first line to start with CPU0, but was %s", scanner.Text()) + return nil, fmt.Errorf("expected first line to start with CPU0, but was %s", scanner.Text()) } cpucount = len(cpus) } @@ -93,7 +77,7 @@ scan: irqs = append(irqs, *irq) } if scanner.Err() != nil { - return nil, fmt.Errorf("Error scanning file: %s", scanner.Err()) + return nil, fmt.Errorf("error scanning file: %s", scanner.Err()) } return irqs, nil } @@ -108,24 +92,36 @@ func gatherTagsFields(irq IRQ) (map[string]string, map[string]interface{}) { return tags, fields } +func (*Interrupts) SampleConfig() string { + return sampleConfig +} + func (s *Interrupts) Gather(acc telegraf.Accumulator) error { for measurement, file := range map[string]string{"interrupts": "/proc/interrupts", "soft_interrupts": "/proc/softirqs"} { - f, err := os.Open(file) + irqs, err := parseFile(file) if err != nil { - acc.AddError(fmt.Errorf("Could not open file: %s", file)) + acc.AddError(err) continue } - defer f.Close() - irqs, err := parseInterrupts(f) - if err != nil { - acc.AddError(fmt.Errorf("Parsing %s: %s", file, err)) - continue - } - reportMetrics(measurement, irqs, acc, s.CpuAsTag) + reportMetrics(measurement, irqs, acc, s.CPUAsTag) } return nil } +func parseFile(file string) ([]IRQ, error) { + f, err := os.Open(file) + if err != nil { + return nil, fmt.Errorf("could not open file: %s", file) + } + defer f.Close() + + irqs, err := parseInterrupts(f) + if err != nil { + return nil, fmt.Errorf("parsing %s: %s", file, err) + } + return irqs, nil +} + func reportMetrics(measurement string, irqs []IRQ, acc telegraf.Accumulator, cpusAsTags bool) { for _, irq := range irqs { tags, fields := gatherTagsFields(irq) diff --git a/plugins/inputs/interrupts/interrupts_test.go b/plugins/inputs/interrupts/interrupts_test.go index 63ff765b678dd..3ed0cd394cfdc 100644 --- a/plugins/inputs/interrupts/interrupts_test.go +++ b/plugins/inputs/interrupts/interrupts_test.go @@ -13,13 +13,13 @@ import ( // Setup and helper functions // ===================================================================================== -func expectCpuAsTags(m *testutil.Accumulator, t *testing.T, measurement string, irq IRQ) { +func expectCPUAsTags(m *testutil.Accumulator, t *testing.T, measurement string, irq IRQ) { for idx, value := range irq.Cpus { m.AssertContainsTaggedFields(t, measurement, map[string]interface{}{"count": value}, map[string]string{"irq": irq.ID, "type": irq.Type, "device": irq.Device, "cpu": fmt.Sprintf("cpu%d", idx)}) } } -func expectCpuAsFields(m *testutil.Accumulator, t *testing.T, measurement string, irq IRQ) { +func expectCPUAsFields(m *testutil.Accumulator, t *testing.T, measurement string, irq IRQ) { fields := map[string]interface{}{} total := int64(0) for idx, count := range irq.Cpus { @@ -70,7 +70,7 @@ func TestCpuAsTagsSoftIrqs(t *testing.T) { reportMetrics("soft_interrupts", irqs, acc, true) for _, irq := range softIrqsExpectedArgs { - expectCpuAsTags(acc, t, "soft_interrupts", irq) + expectCPUAsTags(acc, t, "soft_interrupts", irq) } } @@ -79,7 +79,7 @@ func TestCpuAsFieldsSoftIrqs(t *testing.T) { reportMetrics("soft_interrupts", irqs, acc, false) for _, irq := range softIrqsExpectedArgs { - expectCpuAsFields(acc, t, "soft_interrupts", irq) + expectCPUAsFields(acc, t, "soft_interrupts", irq) } } @@ -142,7 +142,7 @@ func TestCpuAsTagsHwIrqs(t *testing.T) { reportMetrics("interrupts", irqs, acc, true) for _, irq := range hwIrqsExpectedArgs { - expectCpuAsTags(acc, t, "interrupts", irq) + expectCPUAsTags(acc, t, "interrupts", irq) } } @@ -151,6 +151,6 @@ func TestCpuAsFieldsHwIrqs(t *testing.T) { reportMetrics("interrupts", irqs, acc, false) for _, irq := range hwIrqsExpectedArgs { - expectCpuAsFields(acc, t, "interrupts", irq) + expectCPUAsFields(acc, t, "interrupts", irq) } } diff --git a/plugins/inputs/interrupts/sample.conf b/plugins/inputs/interrupts/sample.conf new file mode 100644 index 0000000000000..126e7bfa19173 --- /dev/null +++ b/plugins/inputs/interrupts/sample.conf @@ -0,0 +1,13 @@ +# This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. +[[inputs.interrupts]] + ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is + ## stored as a field. + ## + ## The default is false for backwards compatibility, and will be changed to + ## true in a future version. It is recommended to set to true on new + ## deployments. + # cpu_as_tag = false + + ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. + # [inputs.interrupts.tagdrop] + # irq = [ "NET_RX", "TASKLET" ] diff --git a/plugins/inputs/ipmi_sensor/README.md b/plugins/inputs/ipmi_sensor/README.md index 0f9faa97f1f3d..64361950d676a 100644 --- a/plugins/inputs/ipmi_sensor/README.md +++ b/plugins/inputs/ipmi_sensor/README.md @@ -3,25 +3,36 @@ Get bare metal metrics using the command line utility [`ipmitool`](https://github.com/ipmitool/ipmitool). -If no servers are specified, the plugin will query the local machine sensor stats via the following command: +If no servers are specified, the plugin will query the local machine sensor +stats via the following command: -``` +```sh ipmitool sdr ``` + or with the version 2 schema: -``` + +```sh ipmitool sdr elist ``` -When one or more servers are specified, the plugin will use the following command to collect remote host sensor stats: +When one or more servers are specified, the plugin will use the following +command to collect remote host sensor stats: -``` +```sh ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr ``` -### Configuration +Any of the following parameters will be added to the aformentioned query if +they're configured: -```toml +```sh +-y hex_key -L privilege +``` + +## Configuration + +```toml @sample.conf # Read metrics from the bare metal servers via IPMI [[inputs.ipmi_sensor]] ## optionally specify the path to the ipmitool executable @@ -53,11 +64,24 @@ ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr ## Schema Version: (Optional, defaults to version 1) metric_version = 2 + + ## Optionally provide the hex key for the IMPI connection. + # hex_key = "" + + ## If ipmitool should use a cache + ## for me ipmitool runs about 2 to 10 times faster with cache enabled on HP G10 servers (when using ubuntu20.04) + ## the cache file may not work well for you if some sensors come up late + # use_cache = false + + ## Path to the ipmitools cache file (defaults to OS temp dir) + ## The provided path must exist and must be writable + # cache_path = "" ``` -### Measurements +## Measurements Version 1 schema: + - ipmi_sensor: - tags: - name @@ -69,6 +93,7 @@ Version 1 schema: - value (float) Version 2 schema: + - ipmi_sensor: - tags: - name @@ -81,17 +106,20 @@ Version 2 schema: - fields: - value (float) -#### Permissions +### Permissions When gathering from the local system, Telegraf will need permission to the ipmi device node. When using udev you can create the device node giving `rw` permissions to the `telegraf` user by adding the following rule to `/etc/udev/rules.d/52-telegraf-ipmi.rules`: -``` +```sh KERNEL=="ipmi*", MODE="660", GROUP="telegraf" ``` -Alternatively, it is possible to use sudo. You will need the following in your telegraf config: + +Alternatively, it is possible to use sudo. You will need the following in your +telegraf config: + ```toml [[inputs.ipmi_sensor]] use_sudo = true @@ -107,11 +135,13 @@ telegraf ALL=(root) NOPASSWD: IPMITOOL Defaults!IPMITOOL !logfile, !syslog, !pam_session ``` -### Example Output +## Example Output + +### Version 1 Schema -#### Version 1 Schema When retrieving stats from a remote server: -``` + +```shell ipmi_sensor,server=10.20.2.203,name=uid_light value=0,status=1i 1517125513000000000 ipmi_sensor,server=10.20.2.203,name=sys._health_led status=1i,value=0 1517125513000000000 ipmi_sensor,server=10.20.2.203,name=power_supply_1,unit=watts status=1i,value=110 1517125513000000000 @@ -120,9 +150,9 @@ ipmi_sensor,server=10.20.2.203,name=power_supplies value=0,status=1i 15171255130 ipmi_sensor,server=10.20.2.203,name=fan_1,unit=percent status=1i,value=43.12 1517125513000000000 ``` - When retrieving stats from the local machine (no server specified): -``` + +```shell ipmi_sensor,name=uid_light value=0,status=1i 1517125513000000000 ipmi_sensor,name=sys._health_led status=1i,value=0 1517125513000000000 ipmi_sensor,name=power_supply_1,unit=watts status=1i,value=110 1517125513000000000 @@ -134,7 +164,8 @@ ipmi_sensor,name=fan_1,unit=percent status=1i,value=43.12 1517125513000000000 #### Version 2 Schema When retrieving stats from the local machine (no server specified): -``` + +```shell ipmi_sensor,name=uid_light,entity_id=23.1,status_code=ok,status_desc=ok value=0 1517125474000000000 ipmi_sensor,name=sys._health_led,entity_id=23.2,status_code=ok,status_desc=ok value=0 1517125474000000000 ipmi_sensor,entity_id=10.1,name=power_supply_1,status_code=ok,status_desc=presence_detected,unit=watts value=110 1517125474000000000 diff --git a/plugins/inputs/ipmi_sensor/connection.go b/plugins/inputs/ipmi_sensor/connection.go index 7f6a4c3594f61..b67ba06b9a619 100644 --- a/plugins/inputs/ipmi_sensor/connection.go +++ b/plugins/inputs/ipmi_sensor/connection.go @@ -15,11 +15,14 @@ type Connection struct { Port int Interface string Privilege string + HexKey string } -func NewConnection(server string, privilege string) *Connection { - conn := &Connection{} - conn.Privilege = privilege +func NewConnection(server, privilege, hexKey string) *Connection { + conn := &Connection{ + Privilege: privilege, + HexKey: hexKey, + } inx1 := strings.LastIndex(server, "@") inx2 := strings.Index(server, "(") @@ -29,8 +32,10 @@ func NewConnection(server string, privilege string) *Connection { security := server[0:inx1] connstr = server[inx1+1:] up := strings.SplitN(security, ":", 2) - conn.Username = up[0] - conn.Password = up[1] + if len(up) == 2 { + conn.Username = up[0] + conn.Password = up[1] + } } if inx2 > 0 { @@ -44,24 +49,27 @@ func NewConnection(server string, privilege string) *Connection { return conn } -func (t *Connection) options() []string { - intf := t.Interface +func (c *Connection) options() []string { + intf := c.Interface if intf == "" { intf = "lan" } options := []string{ - "-H", t.Hostname, - "-U", t.Username, - "-P", t.Password, + "-H", c.Hostname, + "-U", c.Username, + "-P", c.Password, "-I", intf, } - if t.Port != 0 { - options = append(options, "-p", strconv.Itoa(t.Port)) + if c.HexKey != "" { + options = append(options, "-y", c.HexKey) + } + if c.Port != 0 { + options = append(options, "-p", strconv.Itoa(c.Port)) } - if t.Privilege != "" { - options = append(options, "-L", t.Privilege) + if c.Privilege != "" { + options = append(options, "-L", c.Privilege) } return options } diff --git a/plugins/inputs/ipmi_sensor/connection_test.go b/plugins/inputs/ipmi_sensor/connection_test.go index 74944890f7a0c..3be902e3264bc 100644 --- a/plugins/inputs/ipmi_sensor/connection_test.go +++ b/plugins/inputs/ipmi_sensor/connection_test.go @@ -3,14 +3,9 @@ package ipmi_sensor import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -type conTest struct { - Got string - Want *Connection -} - func TestNewConnection(t *testing.T) { testData := []struct { addr string @@ -24,6 +19,7 @@ func TestNewConnection(t *testing.T) { Password: "PASSW0RD", Interface: "lan", Privilege: "USER", + HexKey: "0001", }, }, { @@ -34,11 +30,58 @@ func TestNewConnection(t *testing.T) { Password: "PASS:!@#$%^&*(234)_+W0RD", Interface: "lan", Privilege: "USER", + HexKey: "0001", + }, + }, + // test connection doesn't panic if incorrect symbol used + { + "USERID@PASSW0RD@lan(192.168.1.1)", + &Connection{ + Hostname: "192.168.1.1", + Username: "", + Password: "", + Interface: "lan", + Privilege: "USER", + HexKey: "0001", }, }, } for _, v := range testData { - assert.Equal(t, v.con, NewConnection(v.addr, "USER")) + require.EqualValues(t, v.con, NewConnection(v.addr, "USER", "0001")) + } +} + +func TestGetCommandOptions(t *testing.T) { + testData := []struct { + connection *Connection + options []string + }{ + { + &Connection{ + Hostname: "192.168.1.1", + Username: "user", + Password: "password", + Interface: "lan", + Privilege: "USER", + HexKey: "0001", + }, + []string{"-H", "192.168.1.1", "-U", "user", "-P", "password", "-I", "lan", "-y", "0001", "-L", "USER"}, + }, + { + &Connection{ + Hostname: "192.168.1.1", + Username: "user", + Password: "password", + Interface: "lan", + Privilege: "USER", + HexKey: "", + }, + []string{"-H", "192.168.1.1", "-U", "user", "-P", "password", "-I", "lan", "-L", "USER"}, + }, + } + + for _, data := range testData { + require.EqualValues(t, data.options, data.connection.options()) } } diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi_sensor.go similarity index 60% rename from plugins/inputs/ipmi_sensor/ipmi.go rename to plugins/inputs/ipmi_sensor/ipmi_sensor.go index fb53e1bc746fe..ade0a7f19af14 100644 --- a/plugins/inputs/ipmi_sensor/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi_sensor.go @@ -1,11 +1,14 @@ +//go:generate ../../../tools/readme_config_includer/generator package ipmi_sensor import ( "bufio" "bytes" + _ "embed" "fmt" - "log" + "os" "os/exec" + "path/filepath" "regexp" "strconv" "strings" @@ -13,68 +16,63 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + var ( - execCommand = exec.Command // execCommand is used to mock commands in tests. - re_v1_parse_line = regexp.MustCompile(`^(?P[^|]*)\|(?P[^|]*)\|(?P.*)`) - re_v2_parse_line = regexp.MustCompile(`^(?P[^|]*)\|[^|]+\|(?P[^|]*)\|(?P[^|]*)\|(?:(?P[^|]+))?`) - re_v2_parse_description = regexp.MustCompile(`^(?P-?[0-9.]+)\s(?P.*)|(?P.+)|^$`) - re_v2_parse_unit = regexp.MustCompile(`^(?P[^,]+)(?:,\s*(?P.*))?`) + execCommand = exec.Command // execCommand is used to mock commands in tests. + reV1ParseLine = regexp.MustCompile(`^(?P[^|]*)\|(?P[^|]*)\|(?P.*)`) + reV2ParseLine = regexp.MustCompile(`^(?P[^|]*)\|[^|]+\|(?P[^|]*)\|(?P[^|]*)\|(?:(?P[^|]+))?`) + reV2ParseDescription = regexp.MustCompile(`^(?P-?[0-9.]+)\s(?P.*)|(?P.+)|^$`) + reV2ParseUnit = regexp.MustCompile(`^(?P[^,]+)(?:,\s*(?P.*))?`) ) // Ipmi stores the configuration values for the ipmi_sensor input plugin type Ipmi struct { Path string Privilege string + HexKey string `toml:"hex_key"` Servers []string - Timeout internal.Duration + Timeout config.Duration MetricVersion int UseSudo bool + UseCache bool + CachePath string + + Log telegraf.Logger `toml:"-"` } -var sampleConfig = ` - ## optionally specify the path to the ipmitool executable - # path = "/usr/bin/ipmitool" - ## - ## Setting 'use_sudo' to true will make use of sudo to run ipmitool. - ## Sudo must be configured to allow the telegraf user to run ipmitool - ## without a password. - # use_sudo = false - ## - ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR - # privilege = "ADMINISTRATOR" - ## - ## optionally specify one or more servers via a url matching - ## [username[:password]@][protocol[(address)]] - ## e.g. - ## root:passwd@lan(127.0.0.1) - ## - ## if no servers are specified, local machine sensor stats will be queried - ## - # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] - - ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid - ## gaps or overlap in pulled data - interval = "30s" - - ## Timeout for the ipmitool command to complete - timeout = "20s" - - ## Schema Version: (Optional, defaults to version 1) - metric_version = 2 -` - -// SampleConfig returns the documentation about the sample configuration -func (m *Ipmi) SampleConfig() string { +const cmd = "ipmitool" + +func (*Ipmi) SampleConfig() string { return sampleConfig } -// Description returns a basic description for the plugin functions -func (m *Ipmi) Description() string { - return "Read metrics from the bare metal servers via IPMI" +func (m *Ipmi) Init() error { + // Set defaults + if m.Path == "" { + path, err := exec.LookPath(cmd) + if err != nil { + return fmt.Errorf("looking up %q failed: %v", cmd, err) + } + m.Path = path + } + if m.CachePath == "" { + m.CachePath = os.TempDir() + } + + // Check parameters + if m.Path == "" { + return fmt.Errorf("no path for %q specified", cmd) + } + + return nil } // Gather is the main execution function for the plugin @@ -110,11 +108,34 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { opts := make([]string, 0) hostname := "" if server != "" { - conn := NewConnection(server, m.Privilege) + conn := NewConnection(server, m.Privilege, m.HexKey) hostname = conn.Hostname opts = conn.options() } opts = append(opts, "sdr") + if m.UseCache { + cacheFile := filepath.Join(m.CachePath, server+"_ipmi_cache") + _, err := os.Stat(cacheFile) + if os.IsNotExist(err) { + dumpOpts := opts + // init cache file + dumpOpts = append(dumpOpts, "dump") + dumpOpts = append(dumpOpts, cacheFile) + name := m.Path + if m.UseSudo { + // -n - avoid prompting the user for input of any kind + dumpOpts = append([]string{"-n", name}, dumpOpts...) + name = "sudo" + } + cmd := execCommand(name, dumpOpts...) + out, err := internal.CombinedOutputTimeout(cmd, time.Duration(m.Timeout)) + if err != nil { + return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(sanitizeIPMICmd(cmd.Args), " "), err, string(out)) + } + } + opts = append(opts, "-S") + opts = append(opts, cacheFile) + } if m.MetricVersion == 2 { opts = append(opts, "elist") } @@ -125,23 +146,23 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { name = "sudo" } cmd := execCommand(name, opts...) - out, err := internal.CombinedOutputTimeout(cmd, m.Timeout.Duration) + out, err := internal.CombinedOutputTimeout(cmd, time.Duration(m.Timeout)) timestamp := time.Now() if err != nil { - return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) + return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(sanitizeIPMICmd(cmd.Args), " "), err, string(out)) } if m.MetricVersion == 2 { - return parseV2(acc, hostname, out, timestamp) + return m.parseV2(acc, hostname, out, timestamp) } - return parseV1(acc, hostname, out, timestamp) + return m.parseV1(acc, hostname, out, timestamp) } -func parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_at time.Time) error { +func (m *Ipmi) parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error { // each line will look something like // Planar VBAT | 3.05 Volts | ok scanner := bufio.NewScanner(bytes.NewReader(cmdOut)) for scanner.Scan() { - ipmiFields := extractFieldsFromRegex(re_v1_parse_line, scanner.Text()) + ipmiFields := m.extractFieldsFromRegex(reV1ParseLine, scanner.Text()) if len(ipmiFields) != 3 { continue } @@ -187,20 +208,20 @@ func parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_ fields["value"] = 0.0 } - acc.AddFields("ipmi_sensor", fields, tags, measured_at) + acc.AddFields("ipmi_sensor", fields, tags, measuredAt) } return scanner.Err() } -func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_at time.Time) error { +func (m *Ipmi) parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measuredAt time.Time) error { // each line will look something like // CMOS Battery | 65h | ok | 7.1 | // Temp | 0Eh | ok | 3.1 | 55 degrees C // Drive 0 | A0h | ok | 7.1 | Drive Present scanner := bufio.NewScanner(bytes.NewReader(cmdOut)) for scanner.Scan() { - ipmiFields := extractFieldsFromRegex(re_v2_parse_line, scanner.Text()) + ipmiFields := m.extractFieldsFromRegex(reV2ParseLine, scanner.Text()) if len(ipmiFields) < 3 || len(ipmiFields) > 4 { continue } @@ -216,7 +237,7 @@ func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_ tags["entity_id"] = transform(ipmiFields["entity_id"]) tags["status_code"] = trim(ipmiFields["status_code"]) fields := make(map[string]interface{}) - descriptionResults := extractFieldsFromRegex(re_v2_parse_description, trim(ipmiFields["description"])) + descriptionResults := m.extractFieldsFromRegex(reV2ParseDescription, trim(ipmiFields["description"])) // This is an analog value with a unit if descriptionResults["analogValue"] != "" && len(descriptionResults["analogUnit"]) >= 1 { var err error @@ -225,7 +246,7 @@ func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_ continue } // Some implementations add an extra status to their analog units - unitResults := extractFieldsFromRegex(re_v2_parse_unit, descriptionResults["analogUnit"]) + unitResults := m.extractFieldsFromRegex(reV2ParseUnit, descriptionResults["analogUnit"]) tags["unit"] = transform(unitResults["realAnalogUnit"]) if unitResults["statusDesc"] != "" { tags["status_desc"] = transform(unitResults["statusDesc"]) @@ -241,19 +262,19 @@ func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_ } } - acc.AddFields("ipmi_sensor", fields, tags, measured_at) + acc.AddFields("ipmi_sensor", fields, tags, measuredAt) } return scanner.Err() } // extractFieldsFromRegex consumes a regex with named capture groups and returns a kvp map of strings with the results -func extractFieldsFromRegex(re *regexp.Regexp, input string) map[string]string { +func (m *Ipmi) extractFieldsFromRegex(re *regexp.Regexp, input string) map[string]string { submatches := re.FindStringSubmatch(input) results := make(map[string]string) subexpNames := re.SubexpNames() if len(subexpNames) > len(submatches) { - log.Printf("D! No matches found in '%s'", input) + m.Log.Debugf("No matches found in '%s'", input) return results } for i, name := range subexpNames { @@ -273,6 +294,16 @@ func aToFloat(val string) (float64, error) { return f, nil } +func sanitizeIPMICmd(args []string) []string { + for i, v := range args { + if v == "-P" { + args[i+1] = "REDACTED" + } + } + + return args +} + func trim(s string) string { return strings.TrimSpace(s) } @@ -280,18 +311,11 @@ func trim(s string) string { func transform(s string) string { s = trim(s) s = strings.ToLower(s) - return strings.Replace(s, " ", "_", -1) + return strings.ReplaceAll(s, " ", "_") } func init() { - m := Ipmi{} - path, _ := exec.LookPath("ipmitool") - if len(path) > 0 { - m.Path = path - } - m.Timeout = internal.Duration{Duration: time.Second * 20} inputs.Add("ipmi_sensor", func() telegraf.Input { - m := m - return &m + return &Ipmi{Timeout: config.Duration(20 * time.Second)} }) } diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_sensor_test.go similarity index 84% rename from plugins/inputs/ipmi_sensor/ipmi_test.go rename to plugins/inputs/ipmi_sensor/ipmi_sensor_test.go index bd5e02c196e76..a65dc5a1561bf 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_sensor_test.go @@ -7,11 +7,11 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestGather(t *testing.T) { @@ -19,21 +19,23 @@ func TestGather(t *testing.T) { Servers: []string{"USERID:PASSW0RD@lan(192.168.1.1)"}, Path: "ipmitool", Privilege: "USER", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), + HexKey: "1234567F", + Log: testutil.Logger{}, } + // overwriting exec commands with mock commands execCommand = fakeExecCommand var acc testutil.Accumulator - err := acc.GatherError(i.Gather) - - require.NoError(t, err) + require.NoError(t, i.Init()) + require.NoError(t, acc.GatherError(i.Gather)) + require.EqualValues(t, acc.NFields(), 262, "non-numeric measurements should be ignored") - assert.Equal(t, acc.NFields(), 262, "non-numeric measurements should be ignored") - - conn := NewConnection(i.Servers[0], i.Privilege) - assert.Equal(t, "USERID", conn.Username) - assert.Equal(t, "lan", conn.Interface) + conn := NewConnection(i.Servers[0], i.Privilege, i.HexKey) + require.EqualValues(t, "USERID", conn.Username) + require.EqualValues(t, "lan", conn.Interface) + require.EqualValues(t, "1234567F", conn.HexKey) var testsWithServer = []struct { fields map[string]interface{} @@ -42,7 +44,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(20), - "status": int(1), + "status": 1, }, map[string]string{ "name": "ambient_temp", @@ -53,7 +55,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(80), - "status": int(1), + "status": 1, }, map[string]string{ "name": "altitude", @@ -64,7 +66,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(210), - "status": int(1), + "status": 1, }, map[string]string{ "name": "avg_power", @@ -75,7 +77,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(4.9), - "status": int(1), + "status": 1, }, map[string]string{ "name": "planar_5v", @@ -86,7 +88,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(3.05), - "status": int(1), + "status": 1, }, map[string]string{ "name": "planar_vbat", @@ -97,7 +99,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(2610), - "status": int(1), + "status": 1, }, map[string]string{ "name": "fan_1a_tach", @@ -108,7 +110,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(1775), - "status": int(1), + "status": 1, }, map[string]string{ "name": "fan_1b_tach", @@ -124,11 +126,12 @@ func TestGather(t *testing.T) { i = &Ipmi{ Path: "ipmitool", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), + Log: testutil.Logger{}, } - err = acc.GatherError(i.Gather) - require.NoError(t, err) + require.NoError(t, i.Init()) + require.NoError(t, acc.GatherError(i.Gather)) var testsWithoutServer = []struct { fields map[string]interface{} @@ -137,7 +140,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(20), - "status": int(1), + "status": 1, }, map[string]string{ "name": "ambient_temp", @@ -147,7 +150,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(80), - "status": int(1), + "status": 1, }, map[string]string{ "name": "altitude", @@ -157,7 +160,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(210), - "status": int(1), + "status": 1, }, map[string]string{ "name": "avg_power", @@ -167,7 +170,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(4.9), - "status": int(1), + "status": 1, }, map[string]string{ "name": "planar_5v", @@ -177,7 +180,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(3.05), - "status": int(1), + "status": 1, }, map[string]string{ "name": "planar_vbat", @@ -187,7 +190,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(2610), - "status": int(1), + "status": 1, }, map[string]string{ "name": "fan_1a_tach", @@ -197,7 +200,7 @@ func TestGather(t *testing.T) { { map[string]interface{}{ "value": float64(1775), - "status": int(1), + "status": 1, }, map[string]string{ "name": "fan_1b_tach", @@ -225,7 +228,7 @@ func fakeExecCommand(command string, args ...string) *exec.Cmd { // For example, if you run: // GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking // it returns below mockData. -func TestHelperProcess(t *testing.T) { +func TestHelperProcess(_ *testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } @@ -369,15 +372,19 @@ OS RealTime Mod | 0x00 | ok // Previous arguments are tests stuff, that looks like : // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- - cmd, args := args[3], args[4:] + cmd := args[3] + // Ignore the returned errors for the mocked interface as tests will fail anyway if cmd == "ipmitool" { + //nolint:errcheck,revive fmt.Fprint(os.Stdout, mockData) } else { + //nolint:errcheck,revive fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // error code is important for this "test" os.Exit(1) - } + //nolint:revive // error code is important for this "test" os.Exit(0) } @@ -386,20 +393,22 @@ func TestGatherV2(t *testing.T) { Servers: []string{"USERID:PASSW0RD@lan(192.168.1.1)"}, Path: "ipmitool", Privilege: "USER", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), MetricVersion: 2, + HexKey: "0000000F", + Log: testutil.Logger{}, } // overwriting exec commands with mock commands execCommand = fakeExecCommandV2 var acc testutil.Accumulator - err := acc.GatherError(i.Gather) + require.NoError(t, i.Init()) + require.NoError(t, acc.GatherError(i.Gather)) - require.NoError(t, err) - - conn := NewConnection(i.Servers[0], i.Privilege) - assert.Equal(t, "USERID", conn.Username) - assert.Equal(t, "lan", conn.Interface) + conn := NewConnection(i.Servers[0], i.Privilege, i.HexKey) + require.EqualValues(t, "USERID", conn.Username) + require.EqualValues(t, "lan", conn.Interface) + require.EqualValues(t, "0000000F", conn.HexKey) var testsWithServer = []struct { fields map[string]interface{} @@ -426,12 +435,13 @@ func TestGatherV2(t *testing.T) { i = &Ipmi{ Path: "ipmitool", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), MetricVersion: 2, + Log: testutil.Logger{}, } - err = acc.GatherError(i.Gather) - require.NoError(t, err) + require.NoError(t, i.Init()) + require.NoError(t, acc.GatherError(i.Gather)) var testsWithoutServer = []struct { fields map[string]interface{} @@ -543,7 +553,7 @@ func fakeExecCommandV2(command string, args ...string) *exec.Cmd { // For example, if you run: // GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcessV2 -- chrony tracking // it returns below mockData. -func TestHelperProcessV2(t *testing.T) { +func TestHelperProcessV2(_ *testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } @@ -562,15 +572,19 @@ Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected // Previous arguments are tests stuff, that looks like : // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- - cmd, args := args[3], args[4:] + cmd := args[3] + // Ignore the returned errors for the mocked interface as tests will fail anyway if cmd == "ipmitool" { + //nolint:errcheck,revive fmt.Fprint(os.Stdout, mockData) } else { + //nolint:errcheck,revive fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // error code is important for this "test" os.Exit(1) - } + //nolint:revive // error code is important for this "test" os.Exit(0) } @@ -605,10 +619,14 @@ Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected v2Data, } + ipmi := &Ipmi{ + Log: testutil.Logger{}, + } + for i := range tests { t.Logf("Checking v%d data...", i+1) - extractFieldsFromRegex(re_v1_parse_line, tests[i]) - extractFieldsFromRegex(re_v2_parse_line, tests[i]) + ipmi.extractFieldsFromRegex(reV1ParseLine, tests[i]) + ipmi.extractFieldsFromRegex(reV2ParseLine, tests[i]) } } @@ -645,11 +663,16 @@ func Test_parseV1(t *testing.T) { wantErr: false, }, } + + ipmi := &Ipmi{ + Log: testutil.Logger{}, + } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - if err := parseV1(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr { + if err := ipmi.parseV1(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr { t.Errorf("parseV1() error = %v, wantErr %v", err, tt.wantErr) } @@ -738,13 +761,66 @@ func Test_parseV2(t *testing.T) { wantErr: false, }, } + + ipmi := &Ipmi{ + Log: testutil.Logger{}, + } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - if err := parseV2(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr { + if err := ipmi.parseV2(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr { t.Errorf("parseV2() error = %v, wantErr %v", err, tt.wantErr) } testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) } } + +func TestSanitizeIPMICmd(t *testing.T) { + tests := []struct { + name string + args []string + expected []string + }{ + { + name: "default args", + args: []string{ + "-H", "localhost", + "-U", "username", + "-P", "password", + "-I", "lan", + }, + expected: []string{ + "-H", "localhost", + "-U", "username", + "-P", "REDACTED", + "-I", "lan", + }, + }, + { + name: "no password", + args: []string{ + "-H", "localhost", + "-U", "username", + "-I", "lan", + }, + expected: []string{ + "-H", "localhost", + "-U", "username", + "-I", "lan", + }, + }, + { + name: "empty args", + args: []string{}, + expected: []string{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var sanitizedArgs []string = sanitizeIPMICmd(tt.args) + require.Equal(t, tt.expected, sanitizedArgs) + }) + } +} diff --git a/plugins/inputs/ipmi_sensor/sample.conf b/plugins/inputs/ipmi_sensor/sample.conf new file mode 100644 index 0000000000000..3cabeb204d9f9 --- /dev/null +++ b/plugins/inputs/ipmi_sensor/sample.conf @@ -0,0 +1,43 @@ +# Read metrics from the bare metal servers via IPMI +[[inputs.ipmi_sensor]] + ## optionally specify the path to the ipmitool executable + # path = "/usr/bin/ipmitool" + ## + ## Setting 'use_sudo' to true will make use of sudo to run ipmitool. + ## Sudo must be configured to allow the telegraf user to run ipmitool + ## without a password. + # use_sudo = false + ## + ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR + # privilege = "ADMINISTRATOR" + ## + ## optionally specify one or more servers via a url matching + ## [username[:password]@][protocol[(address)]] + ## e.g. + ## root:passwd@lan(127.0.0.1) + ## + ## if no servers are specified, local machine sensor stats will be queried + ## + # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] + + ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid + ## gaps or overlap in pulled data + interval = "30s" + + ## Timeout for the ipmitool command to complete. Default is 20 seconds. + timeout = "20s" + + ## Schema Version: (Optional, defaults to version 1) + metric_version = 2 + + ## Optionally provide the hex key for the IMPI connection. + # hex_key = "" + + ## If ipmitool should use a cache + ## for me ipmitool runs about 2 to 10 times faster with cache enabled on HP G10 servers (when using ubuntu20.04) + ## the cache file may not work well for you if some sensors come up late + # use_cache = false + + ## Path to the ipmitools cache file (defaults to OS temp dir) + ## The provided path must exist and must be writable + # cache_path = "" diff --git a/plugins/inputs/ipset/README.md b/plugins/inputs/ipset/README.md index f4477254f117d..4fba5a292522a 100644 --- a/plugins/inputs/ipset/README.md +++ b/plugins/inputs/ipset/README.md @@ -5,33 +5,37 @@ It uses the output of the command "ipset save". Ipsets created without the "counters" option are ignored. Results are tagged with: + - ipset name - ipset entry There are 3 ways to grant telegraf the right to run ipset: -* Run as root (strongly discouraged) -* Use sudo -* Configure systemd to run telegraf with CAP_NET_ADMIN and CAP_NET_RAW capabilities. -### Using systemd capabilities +- Run as root (strongly discouraged) +- Use sudo +- Configure systemd to run telegraf with CAP_NET_ADMIN and CAP_NET_RAW capabilities. + +## Using systemd capabilities You may run `systemctl edit telegraf.service` and add the following: -``` +```text [Service] CapabilityBoundingSet=CAP_NET_RAW CAP_NET_ADMIN AmbientCapabilities=CAP_NET_RAW CAP_NET_ADMIN ``` -### Using sudo +## Using sudo You will need the following in your telegraf config: + ```toml [[inputs.ipset]] use_sudo = true ``` You will also need to update your sudoers file: + ```bash $ visudo # Add the following line: @@ -40,9 +44,10 @@ telegraf ALL=(root) NOPASSWD: IPSETSAVE Defaults!IPSETSAVE !logfile, !syslog, !pam_session ``` -### Configuration +## Configuration -```toml +```toml @sample.conf +# Gather packets and bytes counters from Linux ipsets [[inputs.ipset]] ## By default, we only show sets which have already matched at least 1 packet. ## set include_unmatched_sets = true to gather them all. @@ -56,15 +61,15 @@ Defaults!IPSETSAVE !logfile, !syslog, !pam_session ``` -### Example Output +## Example Output -``` +```sh $ sudo ipset save create myset hash:net family inet hashsize 1024 maxelem 65536 counters comment add myset 10.69.152.1 packets 8 bytes 672 comment "machine A" ``` -``` +```sh $ telegraf --config telegraf.conf --input-filter ipset --test --debug * Plugin: inputs.ipset, Collection 1 > ipset,rule=10.69.152.1,host=trashme,set=myset bytes_total=8i,packets_total=672i 1507615028000000000 diff --git a/plugins/inputs/ipset/ipset.go b/plugins/inputs/ipset/ipset.go index c459ebf4cfe26..bbcbf1f4e92b3 100644 --- a/plugins/inputs/ipset/ipset.go +++ b/plugins/inputs/ipset/ipset.go @@ -1,8 +1,10 @@ +//go:generate ../../../tools/readme_config_includer/generator package ipset import ( "bufio" "bytes" + _ "embed" "fmt" "os/exec" "strconv" @@ -10,44 +12,44 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // Ipsets is a telegraf plugin to gather packets and bytes counters from ipset type Ipset struct { IncludeUnmatchedSets bool UseSudo bool - Timeout internal.Duration + Timeout config.Duration lister setLister } -type setLister func(Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) +type setLister func(Timeout config.Duration, UseSudo bool) (*bytes.Buffer, error) const measurement = "ipset" -var defaultTimeout = internal.Duration{Duration: time.Second} +var defaultTimeout = config.Duration(time.Second) -// Description returns a short description of the plugin -func (ipset *Ipset) Description() string { - return "Gather packets and bytes counters from Linux ipsets" +func (*Ipset) SampleConfig() string { + return sampleConfig } -// SampleConfig returns sample configuration options. -func (ipset *Ipset) SampleConfig() string { - return ` - ## By default, we only show sets which have already matched at least 1 packet. - ## set include_unmatched_sets = true to gather them all. - include_unmatched_sets = false - ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") - use_sudo = false - ## The default timeout of 1s for ipset execution can be overridden here: - # timeout = "1s" -` +func (i *Ipset) Init() error { + _, err := exec.LookPath("ipset") + if err != nil { + return err + } + + return nil } -func (ips *Ipset) Gather(acc telegraf.Accumulator) error { - out, e := ips.lister(ips.Timeout, ips.UseSudo) +func (i *Ipset) Gather(acc telegraf.Accumulator) error { + out, e := i.lister(i.Timeout, i.UseSudo) if e != nil { acc.AddError(e) } @@ -64,25 +66,25 @@ func (ips *Ipset) Gather(acc telegraf.Accumulator) error { data := strings.Fields(line) if len(data) < 7 { - acc.AddError(fmt.Errorf("Error parsing line (expected at least 7 fields): %s", line)) + acc.AddError(fmt.Errorf("error parsing line (expected at least 7 fields): %s", line)) continue } - if data[0] == "add" && (data[4] != "0" || ips.IncludeUnmatchedSets) { + if data[0] == "add" && (data[4] != "0" || i.IncludeUnmatchedSets) { tags := map[string]string{ "set": data[1], "rule": data[2], } - packets_total, err := strconv.ParseUint(data[4], 10, 64) + packetsTotal, err := strconv.ParseUint(data[4], 10, 64) if err != nil { acc.AddError(err) } - bytes_total, err := strconv.ParseUint(data[6], 10, 64) + bytesTotal, err := strconv.ParseUint(data[6], 10, 64) if err != nil { acc.AddError(err) } fields := map[string]interface{}{ - "packets_total": packets_total, - "bytes_total": bytes_total, + "packets_total": packetsTotal, + "bytes_total": bytesTotal, } acc.AddCounter(measurement, fields, tags) } @@ -90,7 +92,7 @@ func (ips *Ipset) Gather(acc telegraf.Accumulator) error { return nil } -func setList(Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { +func setList(timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { // Is ipset installed ? ipsetPath, err := exec.LookPath("ipset") if err != nil { @@ -98,7 +100,7 @@ func setList(Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { } var args []string cmdName := ipsetPath - if UseSudo { + if useSudo { cmdName = "sudo" args = append(args, ipsetPath) } @@ -108,7 +110,7 @@ func setList(Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { var out bytes.Buffer cmd.Stdout = &out - err = internal.RunTimeout(cmd, Timeout.Duration) + err = internal.RunTimeout(cmd, time.Duration(timeout)) if err != nil { return &out, fmt.Errorf("error running ipset save: %s", err) } diff --git a/plugins/inputs/ipset/ipset_test.go b/plugins/inputs/ipset/ipset_test.go index 31a9f3cfc113d..f205728c0dbad 100644 --- a/plugins/inputs/ipset/ipset_test.go +++ b/plugins/inputs/ipset/ipset_test.go @@ -7,7 +7,7 @@ import ( "reflect" "testing" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) @@ -40,7 +40,7 @@ func TestIpset(t *testing.T) { value: `create hash:net family inet hashsize 1024 maxelem 65536 counters add myset 4.5.6.7 packets 123 bytes `, - err: fmt.Errorf("Error parsing line (expected at least 7 fields): \t\t\t\tadd myset 4.5.6.7 packets 123 bytes"), + err: fmt.Errorf("error parsing line (expected at least 7 fields): \t\t\t\tadd myset 4.5.6.7 packets 123 bytes"), }, { name: "Non-empty sets, counters, no comment", @@ -80,7 +80,7 @@ func TestIpset(t *testing.T) { t.Run(tt.name, func(t *testing.T) { i++ ips := &Ipset{ - lister: func(Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { + lister: func(timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { return bytes.NewBufferString(tt.value), nil }, } @@ -123,7 +123,7 @@ func TestIpset(t *testing.T) { func TestIpset_Gather_listerError(t *testing.T) { errFoo := errors.New("error foobar") ips := &Ipset{ - lister: func(Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { + lister: func(timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { return new(bytes.Buffer), errFoo }, } diff --git a/plugins/inputs/ipset/sample.conf b/plugins/inputs/ipset/sample.conf new file mode 100644 index 0000000000000..a873eb79227f1 --- /dev/null +++ b/plugins/inputs/ipset/sample.conf @@ -0,0 +1,12 @@ +# Gather packets and bytes counters from Linux ipsets + [[inputs.ipset]] + ## By default, we only show sets which have already matched at least 1 packet. + ## set include_unmatched_sets = true to gather them all. + include_unmatched_sets = false + ## Adjust your sudo settings appropriately if using this option ("sudo ipset save") + ## You can avoid using sudo or root, by setting appropriate privileges for + ## the telegraf.service systemd service. + use_sudo = false + ## The default timeout of 1s for ipset execution can be overridden here: + # timeout = "1s" + diff --git a/plugins/inputs/iptables/README.md b/plugins/inputs/iptables/README.md index db730c88178ff..153162e586dfe 100644 --- a/plugins/inputs/iptables/README.md +++ b/plugins/inputs/iptables/README.md @@ -1,24 +1,33 @@ # Iptables Input Plugin -The iptables plugin gathers packets and bytes counters for rules within a set of table and chain from the Linux's iptables firewall. +The iptables plugin gathers packets and bytes counters for rules within a set of +table and chain from the Linux's iptables firewall. -Rules are identified through associated comment. **Rules without comment are ignored**. -Indeed we need a unique ID for the rule and the rule number is not a constant: it may vary when rules are inserted/deleted at start-up or by automatic tools (interactive firewalls, fail2ban, ...). -Also when the rule set is becoming big (hundreds of lines) most people are interested in monitoring only a small part of the rule set. +Rules are identified through associated comment. **Rules without comment are +ignored**. Indeed we need a unique ID for the rule and the rule number is not a +constant: it may vary when rules are inserted/deleted at start-up or by +automatic tools (interactive firewalls, fail2ban, ...). Also when the rule set +is becoming big (hundreds of lines) most people are interested in monitoring +only a small part of the rule set. -Before using this plugin **you must ensure that the rules you want to monitor are named with a unique comment**. Comments are added using the `-m comment --comment "my comment"` iptables options. +Before using this plugin **you must ensure that the rules you want to monitor +are named with a unique comment**. Comments are added using the `-m comment +--comment "my comment"` iptables options. -The iptables command requires CAP_NET_ADMIN and CAP_NET_RAW capabilities. You have several options to grant telegraf to run iptables: +The iptables command requires CAP_NET_ADMIN and CAP_NET_RAW capabilities. You +have several options to grant telegraf to run iptables: * Run telegraf as root. This is strongly discouraged. -* Configure systemd to run telegraf with CAP_NET_ADMIN and CAP_NET_RAW. This is the simplest and recommended option. -* Configure sudo to grant telegraf to run iptables. This is the most restrictive option, but require sudo setup. +* Configure systemd to run telegraf with CAP_NET_ADMIN and CAP_NET_RAW. This is + the simplest and recommended option. +* Configure sudo to grant telegraf to run iptables. This is the most restrictive + option, but require sudo setup. -### Using systemd capabilities +## Using systemd capabilities You may run `systemctl edit telegraf.service` and add the following: -``` +```shell [Service] CapabilityBoundingSet=CAP_NET_RAW CAP_NET_ADMIN AmbientCapabilities=CAP_NET_RAW CAP_NET_ADMIN @@ -26,9 +35,10 @@ AmbientCapabilities=CAP_NET_RAW CAP_NET_ADMIN Since telegraf will fork a process to run iptables, `AmbientCapabilities` is required to transmit the capabilities bounding set to the forked process. -### Using sudo +## Using sudo You will need the following in your telegraf config: + ```toml [[inputs.iptables]] use_sudo = true @@ -44,44 +54,55 @@ telegraf ALL=(root) NOPASSWD: IPTABLESSHOW Defaults!IPTABLESSHOW !logfile, !syslog, !pam_session ``` -### Using IPtables lock feature +## Using IPtables lock feature -Defining multiple instances of this plugin in telegraf.conf can lead to concurrent IPtables access resulting in "ERROR in input [inputs.iptables]: exit status 4" messages in telegraf.log and missing metrics. Setting 'use_lock = true' in the plugin configuration will run IPtables with the '-w' switch, allowing a lock usage to prevent this error. +Defining multiple instances of this plugin in telegraf.conf can lead to +concurrent IPtables access resulting in "ERROR in input [inputs.iptables]: exit +status 4" messages in telegraf.log and missing metrics. Setting 'use_lock = +true' in the plugin configuration will run IPtables with the '-w' switch, +allowing a lock usage to prevent this error. -### Configuration: +## Configuration -```toml - # use sudo to run iptables +```toml @sample.conf +# Gather packets and bytes throughput from iptables +[[inputs.iptables]] + ## iptables require root access on most systems. + ## Setting 'use_sudo' to true will make use of sudo to run iptables. + ## Users must configure sudo to allow telegraf user to run iptables with no password. + ## iptables can be restricted to only list command "iptables -nvL". use_sudo = false - # run iptables with the lock option + ## Setting 'use_lock' to true runs iptables with the "-w" option. + ## Adjust your sudo settings appropriately if using this option ("iptables -w 5 -nvl") use_lock = false - # Define an alternate executable, such as "ip6tables". Default is "iptables". + ## Define an alternate executable, such as "ip6tables". Default is "iptables". # binary = "ip6tables" - # defines the table to monitor: + ## defines the table to monitor: table = "filter" - # defines the chains to monitor: + ## defines the chains to monitor. + ## NOTE: iptables rules without a comment will not be monitored. + ## Read the plugin documentation for more information. chains = [ "INPUT" ] ``` -### Measurements & Fields: +## Measurements & Fields +* iptables + * pkts (integer, count) + * bytes (integer, bytes) -- iptables - - pkts (integer, count) - - bytes (integer, bytes) +## Tags -### Tags: - -- All measurements have the following tags: - - table - - chain - - ruleid +* All measurements have the following tags: + * table + * chain + * ruleid The `ruleid` is the comment associated to the rule. -### Example Output: +## Example Output -``` +```text $ iptables -nvL INPUT Chain INPUT (policy DROP 0 packets, 0 bytes) pkts bytes target prot opt in out source destination @@ -89,7 +110,7 @@ pkts bytes target prot opt in out source destination 42 2048 ACCEPT tcp -- * * 192.168.0.0/24 0.0.0.0/0 tcp dpt:80 /* httpd */ ``` -``` +```shell $ ./telegraf --config telegraf.conf --input-filter iptables --test iptables,table=filter,chain=INPUT,ruleid=ssh pkts=100i,bytes=1024i 1453831884664956455 iptables,table=filter,chain=INPUT,ruleid=httpd pkts=42i,bytes=2048i 1453831884664956455 diff --git a/plugins/inputs/iptables/iptables.go b/plugins/inputs/iptables/iptables.go index e56f8b31d5725..eb29c510709cb 100644 --- a/plugins/inputs/iptables/iptables.go +++ b/plugins/inputs/iptables/iptables.go @@ -1,8 +1,11 @@ +//go:generate ../../../tools/readme_config_includer/generator +//go:build linux // +build linux package iptables import ( + _ "embed" "errors" "os/exec" "regexp" @@ -13,6 +16,10 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // Iptables is a telegraf plugin to gather packets and bytes throughput from Linux's iptables packet filter. type Iptables struct { UseSudo bool @@ -23,31 +30,8 @@ type Iptables struct { lister chainLister } -// Description returns a short description of the plugin. -func (ipt *Iptables) Description() string { - return "Gather packets and bytes throughput from iptables" -} - -// SampleConfig returns sample configuration options. -func (ipt *Iptables) SampleConfig() string { - return ` - ## iptables require root access on most systems. - ## Setting 'use_sudo' to true will make use of sudo to run iptables. - ## Users must configure sudo to allow telegraf user to run iptables with no password. - ## iptables can be restricted to only list command "iptables -nvL". - use_sudo = false - ## Setting 'use_lock' to true runs iptables with the "-w" option. - ## Adjust your sudo settings appropriately if using this option ("iptables -w 5 -nvl") - use_lock = false - ## Define an alternate executable, such as "ip6tables". Default is "iptables". - # binary = "ip6tables" - ## defines the table to monitor: - table = "filter" - ## defines the chains to monitor. - ## NOTE: iptables rules without a comment will not be monitored. - ## Read the plugin documentation for more information. - chains = [ "INPUT" ] -` +func (*Iptables) SampleConfig() string { + return sampleConfig } // Gather gathers iptables packets and bytes throughput from the configured tables and chains. @@ -149,7 +133,7 @@ type chainLister func(table, chain string) (string, error) func init() { inputs.Add("iptables", func() telegraf.Input { - ipt := new(Iptables) + ipt := &Iptables{} ipt.lister = ipt.chainList return ipt }) diff --git a/plugins/inputs/iptables/iptables_nocompile.go b/plugins/inputs/iptables/iptables_nocompile.go index f71b4208e62fb..17c0eaced90e5 100644 --- a/plugins/inputs/iptables/iptables_nocompile.go +++ b/plugins/inputs/iptables/iptables_nocompile.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package iptables diff --git a/plugins/inputs/iptables/iptables_test.go b/plugins/inputs/iptables/iptables_test.go index 681d8bbfc130e..4c62ef6d6a86a 100644 --- a/plugins/inputs/iptables/iptables_test.go +++ b/plugins/inputs/iptables/iptables_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package iptables diff --git a/plugins/inputs/iptables/sample.conf b/plugins/inputs/iptables/sample.conf new file mode 100644 index 0000000000000..3e039c669a6cb --- /dev/null +++ b/plugins/inputs/iptables/sample.conf @@ -0,0 +1,18 @@ +# Gather packets and bytes throughput from iptables +[[inputs.iptables]] + ## iptables require root access on most systems. + ## Setting 'use_sudo' to true will make use of sudo to run iptables. + ## Users must configure sudo to allow telegraf user to run iptables with no password. + ## iptables can be restricted to only list command "iptables -nvL". + use_sudo = false + ## Setting 'use_lock' to true runs iptables with the "-w" option. + ## Adjust your sudo settings appropriately if using this option ("iptables -w 5 -nvl") + use_lock = false + ## Define an alternate executable, such as "ip6tables". Default is "iptables". + # binary = "ip6tables" + ## defines the table to monitor: + table = "filter" + ## defines the chains to monitor. + ## NOTE: iptables rules without a comment will not be monitored. + ## Read the plugin documentation for more information. + chains = [ "INPUT" ] diff --git a/plugins/inputs/ipvs/README.md b/plugins/inputs/ipvs/README.md index 75e5b51037085..e5779ac907711 100644 --- a/plugins/inputs/ipvs/README.md +++ b/plugins/inputs/ipvs/README.md @@ -5,14 +5,15 @@ metrics about ipvs virtual and real servers. **Supported Platforms:** Linux -### Configuration +## Configuration -```toml +```toml @sample.conf +# Collect virtual and real server stats from Linux IPVS [[inputs.ipvs]] # no configuration ``` -#### Permissions +### Permissions Assuming you installed the telegraf package via one of the published packages, the process will be running as the `telegraf` user. However, in order for this @@ -20,7 +21,7 @@ plugin to communicate over netlink sockets it needs the telegraf process to be running as `root` (or some user with `CAP_NET_ADMIN` and `CAP_NET_RAW`). Be sure to ensure these permissions before running telegraf with this plugin included. -### Metrics +## Metrics Server will contain tags identifying how it was configured, using one of `address` + `port` + `protocol` *OR* `fwmark`. This is how one would normally @@ -66,17 +67,20 @@ configure a virtual server using `ipvsadm`. - pps_out - cps -### Example Output +## Example Output Virtual server is configured using `fwmark` and backed by 2 real servers: -``` + +```shell ipvs_virtual_server,address=172.18.64.234,address_family=inet,netmask=32,port=9000,protocol=tcp,sched=rr bytes_in=0i,bytes_out=0i,pps_in=0i,pps_out=0i,cps=0i,connections=0i,pkts_in=0i,pkts_out=0i 1541019340000000000 ipvs_real_server,address=172.18.64.220,address_family=inet,port=9000,virtual_address=172.18.64.234,virtual_port=9000,virtual_protocol=tcp active_connections=0i,inactive_connections=0i,pkts_in=0i,bytes_out=0i,pps_out=0i,connections=0i,pkts_out=0i,bytes_in=0i,pps_in=0i,cps=0i 1541019340000000000 ipvs_real_server,address=172.18.64.219,address_family=inet,port=9000,virtual_address=172.18.64.234,virtual_port=9000,virtual_protocol=tcp active_connections=0i,inactive_connections=0i,pps_in=0i,pps_out=0i,connections=0i,pkts_in=0i,pkts_out=0i,bytes_in=0i,bytes_out=0i,cps=0i 1541019340000000000 ``` -Virtual server is configured using `proto+addr+port` and backed by 2 real servers: -``` +Virtual server is configured using `proto+addr+port` and backed by 2 real +servers: + +```shell ipvs_virtual_server,address_family=inet,fwmark=47,netmask=32,sched=rr cps=0i,connections=0i,pkts_in=0i,pkts_out=0i,bytes_in=0i,bytes_out=0i,pps_in=0i,pps_out=0i 1541019340000000000 ipvs_real_server,address=172.18.64.220,address_family=inet,port=9000,virtual_fwmark=47 inactive_connections=0i,pkts_out=0i,bytes_out=0i,pps_in=0i,cps=0i,active_connections=0i,pkts_in=0i,bytes_in=0i,pps_out=0i,connections=0i 1541019340000000000 ipvs_real_server,address=172.18.64.219,address_family=inet,port=9000,virtual_fwmark=47 cps=0i,active_connections=0i,inactive_connections=0i,connections=0i,pkts_in=0i,bytes_out=0i,pkts_out=0i,bytes_in=0i,pps_in=0i,pps_out=0i 1541019340000000000 diff --git a/plugins/inputs/ipvs/ipvs.go b/plugins/inputs/ipvs/ipvs.go index 5e3ae0d5637b0..9d737dc46284c 100644 --- a/plugins/inputs/ipvs/ipvs.go +++ b/plugins/inputs/ipvs/ipvs.go @@ -1,33 +1,35 @@ +//go:generate ../../../tools/readme_config_includer/generator +//go:build linux // +build linux package ipvs import ( + _ "embed" "fmt" "math/bits" "strconv" "syscall" - "github.com/docker/libnetwork/ipvs" + "github.com/moby/ipvs" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/logrus" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // IPVS holds the state for this input plugin type IPVS struct { handle *ipvs.Handle Log telegraf.Logger } -// Description returns a description string -func (i *IPVS) Description() string { - return "Collect virtual and real server stats from Linux IPVS" -} - -// SampleConfig returns a sample configuration for this input plugin -func (i *IPVS) SampleConfig() string { - return `` +func (*IPVS) SampleConfig() string { + return sampleConfig } // Gather gathers the stats diff --git a/plugins/inputs/ipvs/ipvs_notlinux.go b/plugins/inputs/ipvs/ipvs_notlinux.go index bbbb1240b62a8..b46035f2c2b3c 100644 --- a/plugins/inputs/ipvs/ipvs_notlinux.go +++ b/plugins/inputs/ipvs/ipvs_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package ipvs diff --git a/plugins/inputs/ipvs/sample.conf b/plugins/inputs/ipvs/sample.conf new file mode 100644 index 0000000000000..003be06325ba3 --- /dev/null +++ b/plugins/inputs/ipvs/sample.conf @@ -0,0 +1,3 @@ +# Collect virtual and real server stats from Linux IPVS +[[inputs.ipvs]] + # no configuration diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md index f4e9f94ac22a7..c9aed567cbea6 100644 --- a/plugins/inputs/jenkins/README.md +++ b/plugins/inputs/jenkins/README.md @@ -1,12 +1,15 @@ # Jenkins Input Plugin -The jenkins plugin gathers information about the nodes and jobs running in a jenkins instance. +The jenkins plugin gathers information about the nodes and jobs running in a +jenkins instance. -This plugin does not require a plugin on jenkins and it makes use of Jenkins API to retrieve all the information needed. +This plugin does not require a plugin on jenkins and it makes use of Jenkins API +to retrieve all the information needed. -### Configuration: +## Configuration -```toml +```toml @sample.conf +# Read jobs and cluster metrics from Jenkins instances [[inputs.jenkins]] ## The Jenkins URL in the format "schema://host:port" url = "http://my-jenkins-instance:8080" @@ -39,20 +42,25 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API ## empty will use default value 10 # max_subjob_per_layer = 10 - ## Jobs to exclude from gathering - # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] + ## Jobs to include or exclude from gathering + ## When using both lists, job_exclude has priority. + ## Wildcards are supported: [ "jobA/*", "jobB/subjob1/*"] + # job_include = [ "*" ] + # job_exclude = [ ] - ## Nodes to exclude from gathering - # node_exclude = [ "node1", "node2" ] + ## Nodes to include or exclude from gathering + ## When using both lists, node_exclude has priority. + # node_include = [ "*" ] + # node_exclude = [ ] ## Worker pool for jenkins plugin only ## Empty this field will use default value 5 # max_connections = 5 ``` -### Metrics: +## Metrics -- jenkins_node +- jenkins - tags: - source - port @@ -60,7 +68,7 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API - busy_executors - total_executors -+ jenkins_node +- jenkins_node - tags: - arch - disk_path @@ -88,25 +96,25 @@ This plugin does not require a plugin on jenkins and it makes use of Jenkins API - port - fields: - duration (ms) + - number - result_code (0 = SUCCESS, 1 = FAILURE, 2 = NOT_BUILD, 3 = UNSTABLE, 4 = ABORTED) -### Sample Queries: +## Sample Queries -``` +```sql SELECT mean("memory_available") AS "mean_memory_available", mean("memory_total") AS "mean_memory_total", mean("temp_available") AS "mean_temp_available" FROM "jenkins_node" WHERE time > now() - 15m GROUP BY time(:interval:) FILL(null) ``` -``` +```sql SELECT mean("duration") AS "mean_duration" FROM "jenkins_job" WHERE time > now() - 24h GROUP BY time(:interval:) FILL(null) ``` -### Example Output: +## Example Output -``` +```shell $ ./telegraf --config telegraf.conf --input-filter jenkins --test jenkins,host=myhost,port=80,source=my-jenkins-instance busy_executors=4i,total_executors=8i 1580418261000000000 jenkins_node,arch=Linux\ (amd64),disk_path=/var/jenkins_home,temp_path=/tmp,host=myhost,node_name=master,source=my-jenkins-instance,port=8080 swap_total=4294963200,memory_available=586711040,memory_total=6089498624,status=online,response_time=1000i,disk_available=152392036352,temp_available=152392036352,swap_available=3503263744,num_executors=2i 1516031535000000000 jenkins_job,host=myhost,name=JOB1,parents=apps/br1,result=SUCCESS,source=my-jenkins-instance,port=8080 duration=2831i,result_code=0i 1516026630000000000 jenkins_job,host=myhost,name=JOB2,parents=apps/br2,result=SUCCESS,source=my-jenkins-instance,port=8080 duration=2285i,result_code=0i 1516027230000000000 ``` - diff --git a/plugins/inputs/jenkins/client.go b/plugins/inputs/jenkins/client.go index 6c0a125aaaf56..00c9bb54251f4 100644 --- a/plugins/inputs/jenkins/client.go +++ b/plugins/inputs/jenkins/client.go @@ -47,11 +47,9 @@ func (c *client) init() error { break } } + // first api fetch - if err := c.doGet(context.Background(), jobPath, new(jobResponse)); err != nil { - return err - } - return nil + return c.doGet(context.Background(), jobPath, new(jobResponse)) } func (c *client) doGet(ctx context.Context, url string, v interface{}) error { @@ -71,6 +69,8 @@ func (c *client) doGet(ctx context.Context, url string, v interface{}) error { return err } defer func() { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive resp.Body.Close() <-c.semaphore }() @@ -97,10 +97,8 @@ func (c *client) doGet(ctx context.Context, url string, v interface{}) error { Title: resp.Status, } } - if err = json.NewDecoder(resp.Body).Decode(v); err != nil { - return err - } - return nil + + return json.NewDecoder(resp.Body).Decode(v) } type APIError struct { diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go index a2d3e3500bc30..b303595850b5a 100644 --- a/plugins/inputs/jenkins/jenkins.go +++ b/plugins/inputs/jenkins/jenkins.go @@ -1,7 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package jenkins import ( "context" + _ "embed" "fmt" "net/http" "net/url" @@ -11,12 +13,16 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // Jenkins plugin gathers information about the nodes and jobs running in a jenkins instance. type Jenkins struct { URL string @@ -25,69 +31,28 @@ type Jenkins struct { Source string Port string // HTTP Timeout specified as a string - 3s, 1m, 1h - ResponseTimeout internal.Duration + ResponseTimeout config.Duration tls.ClientConfig client *client Log telegraf.Logger - MaxConnections int `toml:"max_connections"` - MaxBuildAge internal.Duration `toml:"max_build_age"` - MaxSubJobDepth int `toml:"max_subjob_depth"` - MaxSubJobPerLayer int `toml:"max_subjob_per_layer"` - JobExclude []string `toml:"job_exclude"` + MaxConnections int `toml:"max_connections"` + MaxBuildAge config.Duration `toml:"max_build_age"` + MaxSubJobDepth int `toml:"max_subjob_depth"` + MaxSubJobPerLayer int `toml:"max_subjob_per_layer"` + JobExclude []string `toml:"job_exclude"` + JobInclude []string `toml:"job_include"` jobFilter filter.Filter NodeExclude []string `toml:"node_exclude"` + NodeInclude []string `toml:"node_include"` nodeFilter filter.Filter semaphore chan struct{} } -const sampleConfig = ` - ## The Jenkins URL in the format "schema://host:port" - url = "http://my-jenkins-instance:8080" - # username = "admin" - # password = "admin" - - ## Set response_timeout - response_timeout = "5s" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use SSL but skip chain & host verification - # insecure_skip_verify = false - - ## Optional Max Job Build Age filter - ## Default 1 hour, ignore builds older than max_build_age - # max_build_age = "1h" - - ## Optional Sub Job Depth filter - ## Jenkins can have unlimited layer of sub jobs - ## This config will limit the layers of pulling, default value 0 means - ## unlimited pulling until no more sub jobs - # max_subjob_depth = 0 - - ## Optional Sub Job Per Layer - ## In workflow-multibranch-plugin, each branch will be created as a sub job. - ## This config will limit to call only the lasted branches in each layer, - ## empty will use default value 10 - # max_subjob_per_layer = 10 - - ## Jobs to exclude from gathering - # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] - - ## Nodes to exclude from gathering - # node_exclude = [ "node1", "node2" ] - - ## Worker pool for jenkins plugin only - ## Empty this field will use default value 5 - # max_connections = 5 -` - // measurement const ( measurementJenkins = "jenkins" @@ -95,16 +60,10 @@ const ( measurementJob = "jenkins_job" ) -// SampleConfig implements telegraf.Input interface -func (j *Jenkins) SampleConfig() string { +func (*Jenkins) SampleConfig() string { return sampleConfig } -// Description implements telegraf.Input interface -func (j *Jenkins) Description() string { - return "Read jobs and cluster metrics from Jenkins instances" -} - // Gather implements telegraf.Input interface func (j *Jenkins) Gather(acc telegraf.Accumulator) error { if j.client == nil { @@ -133,7 +92,7 @@ func (j *Jenkins) newHTTPClient() (*http.Client, error) { TLSClientConfig: tlsCfg, MaxIdleConns: j.MaxConnections, }, - Timeout: j.ResponseTimeout.Duration, + Timeout: time.Duration(j.ResponseTimeout), }, nil } @@ -157,16 +116,14 @@ func (j *Jenkins) initialize(client *http.Client) error { } j.Source = u.Hostname() - // init job filter - j.jobFilter, err = filter.Compile(j.JobExclude) + // init filters + j.jobFilter, err = filter.NewIncludeExcludeFilter(j.JobInclude, j.JobExclude) if err != nil { - return fmt.Errorf("error compile job filters[%s]: %v", j.URL, err) + return fmt.Errorf("error compiling job filters[%s]: %v", j.URL, err) } - - // init node filter - j.nodeFilter, err = filter.Compile(j.NodeExclude) + j.nodeFilter, err = filter.NewIncludeExcludeFilter(j.NodeInclude, j.NodeExclude) if err != nil { - return fmt.Errorf("error compile node filters[%s]: %v", j.URL, err) + return fmt.Errorf("error compiling node filters[%s]: %v", j.URL, err) } // init tcp pool with default value @@ -187,15 +144,15 @@ func (j *Jenkins) initialize(client *http.Client) error { } func (j *Jenkins) gatherNodeData(n node, acc telegraf.Accumulator) error { - tags := map[string]string{} if n.DisplayName == "" { return fmt.Errorf("error empty node name") } tags["node_name"] = n.DisplayName - // filter out excluded node_name - if j.nodeFilter != nil && j.nodeFilter.Match(tags["node_name"]) { + + // filter out excluded or not included node_name + if !j.nodeFilter.Match(tags["node_name"]) { return nil } @@ -239,7 +196,6 @@ func (j *Jenkins) gatherNodeData(n node, acc telegraf.Accumulator) error { } func (j *Jenkins) gatherNodesData(acc telegraf.Accumulator) { - nodeResp, err := j.client.getAllNodes(context.Background()) if err != nil { acc.AddError(err) @@ -287,24 +243,13 @@ func (j *Jenkins) gatherJobs(acc telegraf.Accumulator) { wg.Wait() } -// wrap the tcp request with doGet -// block tcp request if buffered channel is full -func (j *Jenkins) doGet(tcp func() error) error { - j.semaphore <- struct{}{} - if err := tcp(); err != nil { - <-j.semaphore - return err - } - <-j.semaphore - return nil -} - func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error { if j.MaxSubJobDepth > 0 && jr.layer == j.MaxSubJobDepth { return nil } - // filter out excluded job. - if j.jobFilter != nil && j.jobFilter.Match(jr.hierarchyName()) { + + // filter out excluded or not included jobs + if !j.jobFilter.Match(jr.hierarchyName()) { return nil } @@ -351,7 +296,7 @@ func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error { // stop if build is too old // Higher up in gatherJobs - cutoff := time.Now().Add(-1 * j.MaxBuildAge.Duration) + cutoff := time.Now().Add(-1 * time.Duration(j.MaxBuildAge)) // Here we just test if build.GetTimestamp().Before(cutoff) { @@ -419,12 +364,13 @@ type jobBuild struct { type buildResponse struct { Building bool `json:"building"` Duration int64 `json:"duration"` + Number int64 `json:"number"` Result string `json:"result"` Timestamp int64 `json:"timestamp"` } func (b *buildResponse) GetTimestamp() time.Time { - return time.Unix(0, int64(b.Timestamp)*int64(time.Millisecond)) + return time.Unix(0, b.Timestamp*int64(time.Millisecond)) } const ( @@ -473,6 +419,7 @@ func (j *Jenkins) gatherJobBuild(jr jobRequest, b *buildResponse, acc telegraf.A fields := make(map[string]interface{}) fields["duration"] = b.Duration fields["result_code"] = mapResultCode(b.Result) + fields["number"] = b.Number acc.AddFields(measurementJob, fields, tags, b.GetTimestamp()) } @@ -497,7 +444,7 @@ func mapResultCode(s string) int { func init() { inputs.Add("jenkins", func() telegraf.Input { return &Jenkins{ - MaxBuildAge: internal.Duration{Duration: time.Duration(time.Hour)}, + MaxBuildAge: config.Duration(time.Hour), MaxConnections: 5, MaxSubJobPerLayer: 10, } diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go index be899476d8595..e5f09ad66d1ca 100644 --- a/plugins/inputs/jenkins/jenkins_test.go +++ b/plugins/inputs/jenkins/jenkins_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) @@ -44,13 +44,13 @@ func TestJobRequest(t *testing.T) { } for _, test := range tests { hierarchyName := test.input.hierarchyName() - URL := test.input.URL() + address := test.input.URL() if hierarchyName != test.hierarchyName { t.Errorf("Expected %s, got %s\n", test.hierarchyName, hierarchyName) } - if test.URL != "" && URL != test.URL { - t.Errorf("Expected %s, got %s\n", test.URL, URL) + if test.URL != "" && address != test.URL { + t.Errorf("Expected %s, got %s\n", test.URL, address) } } } @@ -97,6 +97,8 @@ func (h mockHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNoContent) return } + // Ignore the returned error as the tests will fail anyway + //nolint:errcheck,revive w.Write(b) } @@ -154,7 +156,7 @@ func TestGatherNodeData(t *testing.T) { }, }, { - name: "filtered nodes", + name: "filtered nodes (excluded)", input: mockHandler{ responseMap: map[string]interface{}{ "/api/json": struct{}{}, @@ -182,6 +184,35 @@ func TestGatherNodeData(t *testing.T) { }, }, }, + { + name: "filtered nodes (included)", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": struct{}{}, + "/computer/api/json": nodeResponse{ + BusyExecutors: 4, + TotalExecutors: 8, + Computers: []node{ + {DisplayName: "filtered-1"}, + {DisplayName: "filtered-1"}, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "source": "127.0.0.1", + }, + Fields: map[string]interface{}{ + "busy_executors": 4, + "total_executors": 8, + }, + }, + }, + }, + }, { name: "normal data collection", input: mockHandler{ @@ -302,8 +333,9 @@ func TestGatherNodeData(t *testing.T) { j := &Jenkins{ Log: testutil.Logger{}, URL: ts.URL, - ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + ResponseTimeout: config.Duration(time.Microsecond), NodeExclude: []string{"ignore-1", "ignore-2"}, + NodeInclude: []string{"master", "slave"}, } te := j.initialize(&http.Client{Transport: &http.Transport{}}) acc := new(testutil.Accumulator) @@ -358,7 +390,7 @@ func TestInitialize(t *testing.T) { input: &Jenkins{ Log: testutil.Logger{}, URL: "http://a bad url", - ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + ResponseTimeout: config.Duration(time.Microsecond), }, wantErr: true, }, @@ -367,7 +399,8 @@ func TestInitialize(t *testing.T) { input: &Jenkins{ Log: testutil.Logger{}, URL: ts.URL, - ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + ResponseTimeout: config.Duration(time.Microsecond), + JobInclude: []string{"jobA", "jobB"}, JobExclude: []string{"job1", "job2"}, NodeExclude: []string{"node1", "node2"}, }, @@ -377,7 +410,7 @@ func TestInitialize(t *testing.T) { input: &Jenkins{ Log: testutil.Logger{}, URL: ts.URL, - ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + ResponseTimeout: config.Duration(time.Microsecond), }, output: &Jenkins{ Log: testutil.Logger{}, @@ -396,7 +429,7 @@ func TestInitialize(t *testing.T) { } if test.output != nil { if test.input.client == nil { - t.Fatalf("%s: failed %s, jenkins instance shouldn't be nil", test.name, te.Error()) + t.Fatalf("%s: failed %v, jenkins instance shouldn't be nil", test.name, te) } if test.input.MaxConnections != test.output.MaxConnections { t.Fatalf("%s: different MaxConnections Expected %d, got %d\n", test.name, test.output.MaxConnections, test.input.MaxConnections) @@ -530,12 +563,14 @@ func TestGatherJobs(t *testing.T) { Building: false, Result: "SUCCESS", Duration: 25558, + Number: 3, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, "/job/job2/1/api/json": &buildResponse{ Building: false, Result: "FAILURE", Duration: 1558, + Number: 1, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, }, @@ -549,6 +584,7 @@ func TestGatherJobs(t *testing.T) { }, Fields: map[string]interface{}{ "duration": int64(25558), + "number": int64(3), "result_code": 0, }, }, @@ -559,6 +595,7 @@ func TestGatherJobs(t *testing.T) { }, Fields: map[string]interface{}{ "duration": int64(1558), + "number": int64(1), "result_code": 1, }, }, @@ -583,6 +620,7 @@ func TestGatherJobs(t *testing.T) { Building: false, Result: "SUCCESS", Duration: 25558, + Number: 3, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, }, @@ -596,6 +634,7 @@ func TestGatherJobs(t *testing.T) { }, Fields: map[string]interface{}{ "duration": int64(25558), + "number": int64(3), "result_code": 0, }, }, @@ -711,24 +750,28 @@ func TestGatherJobs(t *testing.T) { Building: false, Result: "FAILURE", Duration: 1558, + Number: 1, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, "/job/apps/job/k8s-cloud/job/PR-101/4/api/json": &buildResponse{ Building: false, Result: "SUCCESS", Duration: 76558, + Number: 4, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, "/job/apps/job/k8s-cloud/job/PR-100/1/api/json": &buildResponse{ Building: false, Result: "SUCCESS", Duration: 91558, + Number: 1, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, "/job/apps/job/k8s-cloud/job/PR%201/1/api/json": &buildResponse{ Building: false, Result: "SUCCESS", Duration: 87832, + Number: 1, Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, }, }, @@ -743,6 +786,7 @@ func TestGatherJobs(t *testing.T) { }, Fields: map[string]interface{}{ "duration": int64(87832), + "number": int64(1), "result_code": 0, }, }, @@ -754,6 +798,7 @@ func TestGatherJobs(t *testing.T) { }, Fields: map[string]interface{}{ "duration": int64(91558), + "number": int64(1), "result_code": 0, }, }, @@ -765,6 +810,7 @@ func TestGatherJobs(t *testing.T) { }, Fields: map[string]interface{}{ "duration": int64(76558), + "number": int64(4), "result_code": 0, }, }, @@ -776,6 +822,7 @@ func TestGatherJobs(t *testing.T) { }, Fields: map[string]interface{}{ "duration": int64(1558), + "number": int64(1), "result_code": 1, }, }, @@ -790,8 +837,11 @@ func TestGatherJobs(t *testing.T) { j := &Jenkins{ Log: testutil.Logger{}, URL: ts.URL, - MaxBuildAge: internal.Duration{Duration: time.Hour}, - ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + MaxBuildAge: config.Duration(time.Hour), + ResponseTimeout: config.Duration(time.Microsecond), + JobInclude: []string{ + "*", + }, JobExclude: []string{ "ignore-1", "apps/ignore-all/*", @@ -828,7 +878,6 @@ func TestGatherJobs(t *testing.T) { } } } - } }) } diff --git a/plugins/inputs/jenkins/sample.conf b/plugins/inputs/jenkins/sample.conf new file mode 100644 index 0000000000000..1ea19ced45806 --- /dev/null +++ b/plugins/inputs/jenkins/sample.conf @@ -0,0 +1,47 @@ +# Read jobs and cluster metrics from Jenkins instances +[[inputs.jenkins]] + ## The Jenkins URL in the format "schema://host:port" + url = "http://my-jenkins-instance:8080" + # username = "admin" + # password = "admin" + + ## Set response_timeout + response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false + + ## Optional Max Job Build Age filter + ## Default 1 hour, ignore builds older than max_build_age + # max_build_age = "1h" + + ## Optional Sub Job Depth filter + ## Jenkins can have unlimited layer of sub jobs + ## This config will limit the layers of pulling, default value 0 means + ## unlimited pulling until no more sub jobs + # max_subjob_depth = 0 + + ## Optional Sub Job Per Layer + ## In workflow-multibranch-plugin, each branch will be created as a sub job. + ## This config will limit to call only the lasted branches in each layer, + ## empty will use default value 10 + # max_subjob_per_layer = 10 + + ## Jobs to include or exclude from gathering + ## When using both lists, job_exclude has priority. + ## Wildcards are supported: [ "jobA/*", "jobB/subjob1/*"] + # job_include = [ "*" ] + # job_exclude = [ ] + + ## Nodes to include or exclude from gathering + ## When using both lists, node_exclude has priority. + # node_include = [ "*" ] + # node_exclude = [ ] + + ## Worker pool for jenkins plugin only + ## Empty this field will use default value 5 + # max_connections = 5 diff --git a/plugins/inputs/jolokia/README.md b/plugins/inputs/jolokia/README.md index 96ee48701b464..2d71870cc4bc7 100644 --- a/plugins/inputs/jolokia/README.md +++ b/plugins/inputs/jolokia/README.md @@ -1,10 +1,10 @@ # Jolokia Input Plugin -**Deprecated in version 1.5:** Please use the [jolokia2](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2) plugin. +**Deprecated in version 1.5: Please use the [jolokia2][] plugin** -#### Configuration +## Configuration -```toml +```toml @sample.conf # Read JMX metrics through Jolokia [[inputs.jolokia]] ## This is the context root used to compose the jolokia url @@ -61,13 +61,15 @@ attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" ``` -#### Description +## Description The Jolokia plugin collects JVM metrics exposed as MBean's attributes through jolokia REST endpoint. All metrics are collected for each server configured. +See [official Jolokia website](https://jolokia.org/) for more information. -See: https://jolokia.org/ +## Measurements -# Measurements: Jolokia plugin produces one measure for each metric configured, adding Server's `jolokia_name`, `jolokia_host` and `jolokia_port` as tags. + +[jolokia2]: /plugins/inputs/jolokia2 diff --git a/plugins/inputs/jolokia/jolokia.go b/plugins/inputs/jolokia/jolokia.go index 317a47efbd115..625bfb55c7822 100644 --- a/plugins/inputs/jolokia/jolokia.go +++ b/plugins/inputs/jolokia/jolokia.go @@ -1,23 +1,28 @@ +//go:generate ../../../tools/readme_config_includer/generator package jolokia import ( "bytes" + _ "embed" "encoding/json" "fmt" - "io/ioutil" - "log" + "io" "net/http" "net/url" "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // Default http timeouts -var DefaultResponseHeaderTimeout = internal.Duration{Duration: 3 * time.Second} -var DefaultClientTimeout = internal.Duration{Duration: 4 * time.Second} +var DefaultResponseHeaderTimeout = config.Duration(3 * time.Second) +var DefaultClientTimeout = config.Duration(4 * time.Second) type Server struct { Name string @@ -55,83 +60,9 @@ type Jolokia struct { Proxy Server Delimiter string - ResponseHeaderTimeout internal.Duration `toml:"response_header_timeout"` - ClientTimeout internal.Duration `toml:"client_timeout"` -} - -const sampleConfig = ` - # DEPRECATED: the jolokia plugin has been deprecated in favor of the - # jolokia2 plugin - # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2 - - ## This is the context root used to compose the jolokia url - ## NOTE that Jolokia requires a trailing slash at the end of the context root - ## NOTE that your jolokia security policy must allow for POST requests. - context = "/jolokia/" - - ## This specifies the mode used - # mode = "proxy" - # - ## When in proxy mode this section is used to specify further - ## proxy address configurations. - ## Remember to change host address to fit your environment. - # [inputs.jolokia.proxy] - # host = "127.0.0.1" - # port = "8080" - - ## Optional http timeouts - ## - ## response_header_timeout, if non-zero, specifies the amount of time to wait - ## for a server's response headers after fully writing the request. - # response_header_timeout = "3s" - ## - ## client_timeout specifies a time limit for requests made by this client. - ## Includes connection time, any redirects, and reading the response body. - # client_timeout = "4s" - - ## Attribute delimiter - ## - ## When multiple attributes are returned for a single - ## [inputs.jolokia.metrics], the field name is a concatenation of the metric - ## name, and the attribute name, separated by the given delimiter. - # delimiter = "_" - - ## List of servers exposing jolokia read service - [[inputs.jolokia.servers]] - name = "as-server-01" - host = "127.0.0.1" - port = "8080" - # username = "myuser" - # password = "mypassword" - - ## List of metrics collected on above servers - ## Each metric consists in a name, a jmx path and either - ## a pass or drop slice attribute. - ## This collect all heap memory usage metrics. - [[inputs.jolokia.metrics]] - name = "heap_memory_usage" - mbean = "java.lang:type=Memory" - attribute = "HeapMemoryUsage" - - ## This collect thread counts metrics. - [[inputs.jolokia.metrics]] - name = "thread_count" - mbean = "java.lang:type=Threading" - attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" - - ## This collect number of class loaded/unloaded counts metrics. - [[inputs.jolokia.metrics]] - name = "class_count" - mbean = "java.lang:type=ClassLoading" - attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" -` - -func (j *Jolokia) SampleConfig() string { - return sampleConfig -} - -func (j *Jolokia) Description() string { - return "Read JMX metrics through Jolokia" + ResponseHeaderTimeout config.Duration `toml:"response_header_timeout"` + ClientTimeout config.Duration `toml:"client_timeout"` + Log telegraf.Logger `toml:"-"` } func (j *Jolokia) doRequest(req *http.Request) ([]map[string]interface{}, error) { @@ -143,7 +74,7 @@ func (j *Jolokia) doRequest(req *http.Request) ([]map[string]interface{}, error) // Process response if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)", + err = fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", req.RequestURI, resp.StatusCode, http.StatusText(resp.StatusCode), @@ -153,22 +84,22 @@ func (j *Jolokia) doRequest(req *http.Request) ([]map[string]interface{}, error) } // read body - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } // Unmarshal json var jsonOut []map[string]interface{} - if err = json.Unmarshal([]byte(body), &jsonOut); err != nil { - return nil, fmt.Errorf("Error decoding JSON response: %s: %s", err, body) + if err = json.Unmarshal(body, &jsonOut); err != nil { + return nil, fmt.Errorf("error decoding JSON response: %s: %s", err, body) } return jsonOut, nil } func (j *Jolokia) prepareRequest(server Server, metrics []Metric) (*http.Request, error) { - var jolokiaUrl *url.URL + var jolokiaURL *url.URL context := j.Context // Usually "/jolokia/" var bulkBodyContent []map[string]interface{} @@ -188,11 +119,11 @@ func (j *Jolokia) prepareRequest(server Server, metrics []Metric) (*http.Request // Add target, only in proxy mode if j.Mode == "proxy" { - serviceUrl := fmt.Sprintf("service:jmx:rmi:///jndi/rmi://%s:%s/jmxrmi", + serviceURL := fmt.Sprintf("service:jmx:rmi:///jndi/rmi://%s:%s/jmxrmi", server.Host, server.Port) target := map[string]string{ - "url": serviceUrl, + "url": serviceURL, } if server.Username != "" { @@ -208,26 +139,25 @@ func (j *Jolokia) prepareRequest(server Server, metrics []Metric) (*http.Request proxy := j.Proxy // Prepare ProxyURL - proxyUrl, err := url.Parse("http://" + proxy.Host + ":" + proxy.Port + context) + proxyURL, err := url.Parse("http://" + proxy.Host + ":" + proxy.Port + context) if err != nil { return nil, err } if proxy.Username != "" || proxy.Password != "" { - proxyUrl.User = url.UserPassword(proxy.Username, proxy.Password) + proxyURL.User = url.UserPassword(proxy.Username, proxy.Password) } - jolokiaUrl = proxyUrl - + jolokiaURL = proxyURL } else { - serverUrl, err := url.Parse("http://" + server.Host + ":" + server.Port + context) + serverURL, err := url.Parse("http://" + server.Host + ":" + server.Port + context) if err != nil { return nil, err } if server.Username != "" || server.Password != "" { - serverUrl.User = url.UserPassword(server.Username, server.Password) + serverURL.User = url.UserPassword(server.Username, server.Password) } - jolokiaUrl = serverUrl + jolokiaURL = serverURL } bulkBodyContent = append(bulkBodyContent, bodyContent) @@ -238,7 +168,7 @@ func (j *Jolokia) prepareRequest(server Server, metrics []Metric) (*http.Request return nil, err } - req, err := http.NewRequest("POST", jolokiaUrl.String(), bytes.NewBuffer(requestBody)) + req, err := http.NewRequest("POST", jolokiaURL.String(), bytes.NewBuffer(requestBody)) if err != nil { return nil, err } @@ -258,17 +188,20 @@ func (j *Jolokia) extractValues(measurement string, value interface{}, fields ma } } -func (j *Jolokia) Gather(acc telegraf.Accumulator) error { +func (*Jolokia) SampleConfig() string { + return sampleConfig +} +func (j *Jolokia) Gather(acc telegraf.Accumulator) error { if j.jClient == nil { - log.Println("W! DEPRECATED: the jolokia plugin has been deprecated " + + j.Log.Warn("DEPRECATED: the jolokia plugin has been deprecated " + "in favor of the jolokia2 plugin " + "(https://github.com/influxdata/telegraf/tree/master/plugins/inputs/jolokia2)") - tr := &http.Transport{ResponseHeaderTimeout: j.ResponseHeaderTimeout.Duration} + tr := &http.Transport{ResponseHeaderTimeout: time.Duration(j.ResponseHeaderTimeout)} j.jClient = &JolokiaClientImpl{&http.Client{ Transport: tr, - Timeout: j.ClientTimeout.Duration, + Timeout: time.Duration(j.ClientTimeout), }} } @@ -299,18 +232,18 @@ func (j *Jolokia) Gather(acc telegraf.Accumulator) error { } for i, resp := range out { if status, ok := resp["status"]; ok && status != float64(200) { - acc.AddError(fmt.Errorf("Not expected status value in response body (%s:%s mbean=\"%s\" attribute=\"%s\"): %3.f", + acc.AddError(fmt.Errorf("not expected status value in response body (%s:%s mbean=\"%s\" attribute=\"%s\"): %3.f", server.Host, server.Port, metrics[i].Mbean, metrics[i].Attribute, status)) continue } else if !ok { - acc.AddError(fmt.Errorf("Missing status in response body")) + acc.AddError(fmt.Errorf("missing status in response body")) continue } if values, ok := resp["value"]; ok { j.extractValues(metrics[i].Name, values, fields) } else { - acc.AddError(fmt.Errorf("Missing key 'value' in output response\n")) + acc.AddError(fmt.Errorf("missing key 'value' in output response")) } } diff --git a/plugins/inputs/jolokia/jolokia_test.go b/plugins/inputs/jolokia/jolokia_test.go index a1ca60604cf00..084a84577fdc9 100644 --- a/plugins/inputs/jolokia/jolokia_test.go +++ b/plugins/inputs/jolokia/jolokia_test.go @@ -2,14 +2,14 @@ package jolokia import ( _ "fmt" - "io/ioutil" + "io" "net/http" "strings" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - _ "github.com/stretchr/testify/require" ) const validThreeLevelMultiValueJSON = ` @@ -98,25 +98,8 @@ const validMultiValueJSON = ` } ]` -const validSingleValueJSON = ` -[ - { - "request":{ - "path":"used", - "mbean":"java.lang:type=Memory", - "attribute":"HeapMemoryUsage", - "type":"read" - }, - "value":209274376, - "timestamp":1446129256, - "status":200 - } -]` - const invalidJSON = "I don't think this is JSON" -const empty = "" - var Servers = []Server{{Name: "as1", Host: "127.0.0.1", Port: "8080"}} var HeapMetric = Metric{Name: "heap_memory_usage", Mbean: "java.lang:type=Memory", Attribute: "HeapMemoryUsage"} @@ -130,10 +113,10 @@ type jolokiaClientStub struct { statusCode int } -func (c jolokiaClientStub) MakeRequest(req *http.Request) (*http.Response, error) { +func (c jolokiaClientStub) MakeRequest(_ *http.Request) (*http.Response, error) { resp := http.Response{} resp.StatusCode = c.statusCode - resp.Body = ioutil.NopCloser(strings.NewReader(c.responseBody)) + resp.Body = io.NopCloser(strings.NewReader(c.responseBody)) return &resp, nil } @@ -160,8 +143,8 @@ func TestHttpJsonMultiValue(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(jolokia.Gather) - assert.NoError(t, err) - assert.Equal(t, 1, len(acc.Metrics)) + require.NoError(t, err) + require.Equal(t, 1, len(acc.Metrics)) fields := map[string]interface{}{ "heap_memory_usage_init": 67108864.0, @@ -184,8 +167,8 @@ func TestHttpJsonBulkResponse(t *testing.T) { var acc testutil.Accumulator err := jolokia.Gather(&acc) - assert.NoError(t, err) - assert.Equal(t, 1, len(acc.Metrics)) + require.NoError(t, err) + require.Equal(t, 1, len(acc.Metrics)) fields := map[string]interface{}{ "heap_memory_usage_init": 67108864.0, @@ -212,8 +195,8 @@ func TestHttpJsonThreeLevelMultiValue(t *testing.T) { var acc testutil.Accumulator err := acc.GatherError(jolokia.Gather) - assert.NoError(t, err) - assert.Equal(t, 1, len(acc.Metrics)) + require.NoError(t, err) + require.Equal(t, 1, len(acc.Metrics)) fields := map[string]interface{}{ "heap_memory_usage_java.lang:type=Memory_ObjectPendingFinalizationCount": 0.0, @@ -239,30 +222,26 @@ func TestHttpJsonThreeLevelMultiValue(t *testing.T) { // Test that the proper values are ignored or collected func TestHttp404(t *testing.T) { - - jolokia := genJolokiaClientStub(invalidJSON, 404, Servers, - []Metric{UsedHeapMetric}) + jolokia := genJolokiaClientStub(invalidJSON, 404, Servers, []Metric{UsedHeapMetric}) var acc testutil.Accumulator acc.SetDebug(true) err := acc.GatherError(jolokia.Gather) - assert.Error(t, err) - assert.Equal(t, 0, len(acc.Metrics)) - assert.Contains(t, err.Error(), "has status code 404") + require.Error(t, err) + require.Equal(t, 0, len(acc.Metrics)) + require.Contains(t, err.Error(), "has status code 404") } // Test that the proper values are ignored or collected func TestHttpInvalidJson(t *testing.T) { - - jolokia := genJolokiaClientStub(invalidJSON, 200, Servers, - []Metric{UsedHeapMetric}) + jolokia := genJolokiaClientStub(invalidJSON, 200, Servers, []Metric{UsedHeapMetric}) var acc testutil.Accumulator acc.SetDebug(true) err := acc.GatherError(jolokia.Gather) - assert.Error(t, err) - assert.Equal(t, 0, len(acc.Metrics)) - assert.Contains(t, err.Error(), "Error decoding JSON response") + require.Error(t, err) + require.Equal(t, 0, len(acc.Metrics)) + require.Contains(t, err.Error(), "error decoding JSON response") } diff --git a/plugins/inputs/jolokia/sample.conf b/plugins/inputs/jolokia/sample.conf new file mode 100644 index 0000000000000..cdbe6783ba426 --- /dev/null +++ b/plugins/inputs/jolokia/sample.conf @@ -0,0 +1,54 @@ +# Read JMX metrics through Jolokia +[[inputs.jolokia]] + ## This is the context root used to compose the jolokia url + ## NOTE that Jolokia requires a trailing slash at the end of the context root + context = "/jolokia/" + + ## This specifies the mode used + # mode = "proxy" + # + ## When in proxy mode this section is used to specify further + ## proxy address configurations. + ## Remember to change host address to fit your environment. + # [inputs.jolokia.proxy] + # host = "127.0.0.1" + # port = "8080" + + ## Optional http timeouts + ## + ## response_header_timeout, if non-zero, specifies the amount of time to wait + ## for a server's response headers after fully writing the request. + # response_header_timeout = "3s" + ## + ## client_timeout specifies a time limit for requests made by this client. + ## Includes connection time, any redirects, and reading the response body. + # client_timeout = "4s" + + ## List of servers exposing jolokia read service + [[inputs.jolokia.servers]] + name = "as-server-01" + host = "127.0.0.1" + port = "8080" + # username = "myuser" + # password = "mypassword" + + ## List of metrics collected on above servers + ## Each metric consists in a name, a jmx path and either + ## a pass or drop slice attribute. + ## This collect all heap memory usage metrics. + [[inputs.jolokia.metrics]] + name = "heap_memory_usage" + mbean = "java.lang:type=Memory" + attribute = "HeapMemoryUsage" + + ## This collect thread counts metrics. + [[inputs.jolokia.metrics]] + name = "thread_count" + mbean = "java.lang:type=Threading" + attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" + + ## This collect number of class loaded/unloaded counts metrics. + [[inputs.jolokia.metrics]] + name = "class_count" + mbean = "java.lang:type=ClassLoading" + attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" diff --git a/plugins/inputs/jolokia2/README.md b/plugins/inputs/jolokia2/README.md index 4a7b8f4200a42..65abc22761d2b 100644 --- a/plugins/inputs/jolokia2/README.md +++ b/plugins/inputs/jolokia2/README.md @@ -1,14 +1,20 @@ # Jolokia2 Input Plugin -The [Jolokia](http://jolokia.org) _agent_ and _proxy_ input plugins collect JMX metrics from an HTTP endpoint using Jolokia's [JSON-over-HTTP protocol](https://jolokia.org/reference/html/protocol.html). +The [Jolokia](http://jolokia.org) _agent_ and _proxy_ input plugins collect JMX +metrics from an HTTP endpoint using Jolokia's [JSON-over-HTTP +protocol](https://jolokia.org/reference/html/protocol.html). -### Configuration: +* [jolokia2_agent Configuration](jolokia2_agent/README.md) +* [jolokia2_proxy Configuration](jolokia2_proxy/README.md) -#### Jolokia Agent Configuration +## Configuration -The `jolokia2_agent` input plugin reads JMX metrics from one or more [Jolokia agent](https://jolokia.org/agent/jvm.html) REST endpoints. +### Jolokia Agent Configuration -```toml +The `jolokia2_agent` input plugin reads JMX metrics from one or more [Jolokia +agent](https://jolokia.org/agent/jvm.html) REST endpoints. + +```toml @sample.conf [[inputs.jolokia2_agent]] urls = ["http://agent:8080/jolokia"] @@ -34,9 +40,11 @@ Optionally, specify TLS options for communicating with agents: paths = ["Uptime"] ``` -#### Jolokia Proxy Configuration +### Jolokia Proxy Configuration -The `jolokia2_proxy` input plugin reads JMX metrics from one or more _targets_ by interacting with a [Jolokia proxy](https://jolokia.org/features/proxy.html) REST endpoint. +The `jolokia2_proxy` input plugin reads JMX metrics from one or more _targets_ +by interacting with a [Jolokia proxy](https://jolokia.org/features/proxy.html) +REST endpoint. ```toml [[inputs.jolokia2_proxy]] @@ -79,9 +87,10 @@ Optionally, specify TLS options for communicating with proxies: paths = ["Uptime"] ``` -#### Jolokia Metric Configuration +### Jolokia Metric Configuration -Each `metric` declaration generates a Jolokia request to fetch telemetry from a JMX MBean. +Each `metric` declaration generates a Jolokia request to fetch telemetry from a +JMX MBean. | Key | Required | Description | |----------------|----------|-------------| @@ -103,11 +112,12 @@ Use `paths` to refine which fields to collect. The preceeding `jvm_memory` `metric` declaration produces the following output: -``` +```text jvm_memory HeapMemoryUsage.committed=4294967296,HeapMemoryUsage.init=4294967296,HeapMemoryUsage.max=4294967296,HeapMemoryUsage.used=1750658992,NonHeapMemoryUsage.committed=67350528,NonHeapMemoryUsage.init=2555904,NonHeapMemoryUsage.max=-1,NonHeapMemoryUsage.used=65821352,ObjectPendingFinalizationCount=0 1503762436000000000 ``` -Use `*` wildcards against `mbean` property-key values to create distinct series by capturing values into `tag_keys`. +Use `*` wildcards against `mbean` property-key values to create distinct series +by capturing values into `tag_keys`. ```toml [[inputs.jolokia2_agent.metric]] @@ -117,9 +127,11 @@ Use `*` wildcards against `mbean` property-key values to create distinct series tag_keys = ["name"] ``` -Since `name=*` matches both `G1 Old Generation` and `G1 Young Generation`, and `name` is used as a tag, the preceeding `jvm_garbage_collector` `metric` declaration produces two metrics. +Since `name=*` matches both `G1 Old Generation` and `G1 Young Generation`, and +`name` is used as a tag, the preceeding `jvm_garbage_collector` `metric` +declaration produces two metrics. -``` +```shell jvm_garbage_collector,name=G1\ Old\ Generation CollectionCount=0,CollectionTime=0 1503762520000000000 jvm_garbage_collector,name=G1\ Young\ Generation CollectionTime=32,CollectionCount=2 1503762520000000000 ``` @@ -135,9 +147,10 @@ Use `tag_prefix` along with `tag_keys` to add detail to tag names. tag_prefix = "pool_" ``` -The preceeding `jvm_memory_pool` `metric` declaration produces six metrics, each with a distinct `pool_name` tag. +The preceeding `jvm_memory_pool` `metric` declaration produces six metrics, each +with a distinct `pool_name` tag. -``` +```text jvm_memory_pool,pool_name=Compressed\ Class\ Space PeakUsage.max=1073741824,PeakUsage.committed=3145728,PeakUsage.init=0,Usage.committed=3145728,Usage.init=0,PeakUsage.used=3017976,Usage.max=1073741824,Usage.used=3017976 1503764025000000000 jvm_memory_pool,pool_name=Code\ Cache PeakUsage.init=2555904,PeakUsage.committed=6291456,Usage.committed=6291456,PeakUsage.used=6202752,PeakUsage.max=251658240,Usage.used=6210368,Usage.max=251658240,Usage.init=2555904 1503764025000000000 jvm_memory_pool,pool_name=G1\ Eden\ Space CollectionUsage.max=-1,PeakUsage.committed=56623104,PeakUsage.init=56623104,PeakUsage.used=53477376,Usage.max=-1,Usage.committed=49283072,Usage.used=19922944,CollectionUsage.committed=49283072,CollectionUsage.init=56623104,CollectionUsage.used=0,PeakUsage.max=-1,Usage.init=56623104 1503764025000000000 @@ -146,7 +159,10 @@ jvm_memory_pool,pool_name=G1\ Survivor\ Space Usage.max=-1,Usage.init=0,Collecti jvm_memory_pool,pool_name=Metaspace PeakUsage.init=0,PeakUsage.used=21852224,PeakUsage.max=-1,Usage.max=-1,Usage.committed=22282240,Usage.init=0,Usage.used=21852224,PeakUsage.committed=22282240 1503764025000000000 ``` -Use substitutions to create fields and field prefixes with MBean property-keys captured by wildcards. In the following example, `$1` represents the value of the property-key `name`, and `$2` represents the value of the property-key `topic`. +Use substitutions to create fields and field prefixes with MBean property-keys +captured by wildcards. In the following example, `$1` represents the value of +the property-key `name`, and `$2` represents the value of the property-key +`topic`. ```toml [[inputs.jolokia2_agent.metric]] @@ -156,13 +172,16 @@ Use substitutions to create fields and field prefixes with MBean property-keys c tag_keys = ["topic"] ``` -The preceeding `kafka_topic` `metric` declaration produces a metric per Kafka topic. The `name` Mbean property-key is used as a field prefix to aid in gathering fields together into the single metric. +The preceeding `kafka_topic` `metric` declaration produces a metric per Kafka +topic. The `name` Mbean property-key is used as a field prefix to aid in +gathering fields together into the single metric. -``` +```text kafka_topic,topic=my-topic BytesOutPerSec.MeanRate=0,FailedProduceRequestsPerSec.MeanRate=0,BytesOutPerSec.EventType="bytes",BytesRejectedPerSec.Count=0,FailedProduceRequestsPerSec.RateUnit="SECONDS",FailedProduceRequestsPerSec.EventType="requests",MessagesInPerSec.RateUnit="SECONDS",BytesInPerSec.EventType="bytes",BytesOutPerSec.RateUnit="SECONDS",BytesInPerSec.OneMinuteRate=0,FailedFetchRequestsPerSec.EventType="requests",TotalFetchRequestsPerSec.MeanRate=146.301533938701,BytesOutPerSec.FifteenMinuteRate=0,TotalProduceRequestsPerSec.MeanRate=0,BytesRejectedPerSec.FifteenMinuteRate=0,MessagesInPerSec.FiveMinuteRate=0,BytesInPerSec.Count=0,BytesRejectedPerSec.MeanRate=0,FailedFetchRequestsPerSec.MeanRate=0,FailedFetchRequestsPerSec.FiveMinuteRate=0,FailedFetchRequestsPerSec.FifteenMinuteRate=0,FailedProduceRequestsPerSec.Count=0,TotalFetchRequestsPerSec.FifteenMinuteRate=128.59314292334466,TotalFetchRequestsPerSec.OneMinuteRate=126.71551273850747,TotalFetchRequestsPerSec.Count=1353483,TotalProduceRequestsPerSec.FifteenMinuteRate=0,FailedFetchRequestsPerSec.OneMinuteRate=0,FailedFetchRequestsPerSec.Count=0,FailedProduceRequestsPerSec.FifteenMinuteRate=0,TotalFetchRequestsPerSec.FiveMinuteRate=130.8516148751592,TotalFetchRequestsPerSec.RateUnit="SECONDS",BytesRejectedPerSec.RateUnit="SECONDS",BytesInPerSec.MeanRate=0,FailedFetchRequestsPerSec.RateUnit="SECONDS",BytesRejectedPerSec.OneMinuteRate=0,BytesOutPerSec.Count=0,BytesOutPerSec.OneMinuteRate=0,MessagesInPerSec.FifteenMinuteRate=0,MessagesInPerSec.MeanRate=0,BytesInPerSec.FiveMinuteRate=0,TotalProduceRequestsPerSec.RateUnit="SECONDS",FailedProduceRequestsPerSec.OneMinuteRate=0,TotalProduceRequestsPerSec.EventType="requests",BytesRejectedPerSec.FiveMinuteRate=0,BytesRejectedPerSec.EventType="bytes",BytesOutPerSec.FiveMinuteRate=0,FailedProduceRequestsPerSec.FiveMinuteRate=0,MessagesInPerSec.Count=0,TotalProduceRequestsPerSec.FiveMinuteRate=0,TotalProduceRequestsPerSec.OneMinuteRate=0,MessagesInPerSec.EventType="messages",MessagesInPerSec.OneMinuteRate=0,TotalFetchRequestsPerSec.EventType="requests",BytesInPerSec.RateUnit="SECONDS",BytesInPerSec.FifteenMinuteRate=0,TotalProduceRequestsPerSec.Count=0 1503767532000000000 ``` -Both `jolokia2_agent` and `jolokia2_proxy` plugins support default configurations that apply to every `metric` declaration. +Both `jolokia2_agent` and `jolokia2_proxy` plugins support default +configurations that apply to every `metric` declaration. | Key | Default Value | Description | |---------------------------|---------------|-------------| @@ -170,17 +189,19 @@ Both `jolokia2_agent` and `jolokia2_proxy` plugins support default configuration | `default_field_prefix` | _None_ | A string to prepend to the field names produced by all `metric` declarations. | | `default_tag_prefix` | _None_ | A string to prepend to the tag names produced by all `metric` declarations. | -### Example Configurations: - -- [ActiveMQ](/plugins/inputs/jolokia2/examples/activemq.conf) -- [BitBucket](/plugins/inputs/jolokia2/examples/bitbucket.conf) -- [Cassandra](/plugins/inputs/jolokia2/examples/cassandra.conf) -- [Hadoop-HDFS](/plugins/inputs/jolokia2/examples/hadoop-hdfs.conf) -- [Java JVM](/plugins/inputs/jolokia2/examples/java.conf) -- [JBoss](/plugins/inputs/jolokia2/examples/jboss.conf) -- [Kafka](/plugins/inputs/jolokia2/examples/kafka.conf) -- [Tomcat](/plugins/inputs/jolokia2/examples/tomcat.conf) -- [Weblogic](/plugins/inputs/jolokia2/examples/weblogic.conf) -- [ZooKeeper](/plugins/inputs/jolokia2/examples/zookeeper.conf) - -Please help improve this list and contribute new configuration files by opening an issue or pull request. +## Example Configurations + +* [ActiveMQ](/plugins/inputs/jolokia2/examples/activemq.conf) +* [BitBucket](/plugins/inputs/jolokia2/examples/bitbucket.conf) +* [Cassandra](/plugins/inputs/jolokia2/examples/cassandra.conf) +* [Hadoop-HDFS](/plugins/inputs/jolokia2/examples/hadoop-hdfs.conf) +* [Java JVM](/plugins/inputs/jolokia2/examples/java.conf) +* [JBoss](/plugins/inputs/jolokia2/examples/jboss.conf) +* [Kafka](/plugins/inputs/jolokia2/examples/kafka.conf) +* [Kafka Connect](/plugins/inputs/jolokia2/examples/kafka-connect.conf) +* [Tomcat](/plugins/inputs/jolokia2/examples/tomcat.conf) +* [Weblogic](/plugins/inputs/jolokia2/examples/weblogic.conf) +* [ZooKeeper](/plugins/inputs/jolokia2/examples/zookeeper.conf) + +Please help improve this list and contribute new configuration files by opening +an issue or pull request. diff --git a/plugins/inputs/jolokia2/client_test.go b/plugins/inputs/jolokia2/client_test.go index 0c7cd4c010d50..e55daf6b1a0fe 100644 --- a/plugins/inputs/jolokia2/client_test.go +++ b/plugins/inputs/jolokia2/client_test.go @@ -1,14 +1,15 @@ -package jolokia2 +package jolokia2_test import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "testing" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestJolokia2_ClientAuthRequest(t *testing.T) { @@ -19,17 +20,14 @@ func TestJolokia2_ClientAuthRequest(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { username, password, _ = r.BasicAuth() - body, _ := ioutil.ReadAll(r.Body) - err := json.Unmarshal(body, &requests) - if err != nil { - t.Error(err) - } + body, _ := io.ReadAll(r.Body) + require.NoError(t, json.Unmarshal(body, &requests)) w.WriteHeader(http.StatusOK) })) defer server.Close() - plugin := setupPlugin(t, fmt.Sprintf(` + plugin := SetupPlugin(t, fmt.Sprintf(` [jolokia2_agent] urls = ["%s/jolokia"] username = "sally" @@ -40,22 +38,14 @@ func TestJolokia2_ClientAuthRequest(t *testing.T) { `, server.URL)) var acc testutil.Accumulator - plugin.Gather(&acc) - - if username != "sally" { - t.Errorf("Expected to post with username %s, but was %s", "sally", username) - } - if password != "seashore" { - t.Errorf("Expected to post with password %s, but was %s", "seashore", password) - } - if len(requests) == 0 { - t.Fatal("Expected to post a request body, but was empty.") - } + require.NoError(t, plugin.Gather(&acc)) - request := requests[0] - if expect := "hello:foo=bar"; request["mbean"] != expect { - t.Errorf("Expected to query mbean %s, but was %s", expect, request["mbean"]) - } + require.EqualValuesf(t, "sally", username, "Expected to post with username %s, but was %s", "sally", username) + require.EqualValuesf(t, "seashore", password, "Expected to post with password %s, but was %s", "seashore", password) + require.NotZero(t, len(requests), "Expected to post a request body, but was empty.") + + request := requests[0]["mbean"] + require.EqualValuesf(t, "hello:foo=bar", request, "Expected to query mbean %s, but was %s", "hello:foo=bar", request) } func TestJolokia2_ClientProxyAuthRequest(t *testing.T) { @@ -66,17 +56,15 @@ func TestJolokia2_ClientProxyAuthRequest(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { username, password, _ = r.BasicAuth() - body, _ := ioutil.ReadAll(r.Body) - err := json.Unmarshal(body, &requests) - if err != nil { - t.Error(err) - } - + body, _ := io.ReadAll(r.Body) + require.NoError(t, json.Unmarshal(body, &requests)) w.WriteHeader(http.StatusOK) + _, err := fmt.Fprintf(w, "[]") + require.NoError(t, err) })) defer server.Close() - plugin := setupPlugin(t, fmt.Sprintf(` + plugin := SetupPlugin(t, fmt.Sprintf(` [jolokia2_proxy] url = "%s/jolokia" username = "sally" @@ -93,37 +81,22 @@ func TestJolokia2_ClientProxyAuthRequest(t *testing.T) { `, server.URL)) var acc testutil.Accumulator - plugin.Gather(&acc) - - if username != "sally" { - t.Errorf("Expected to post with username %s, but was %s", "sally", username) - } - if password != "seashore" { - t.Errorf("Expected to post with password %s, but was %s", "seashore", password) - } - if len(requests) == 0 { - t.Fatal("Expected to post a request body, but was empty.") - } + require.NoError(t, plugin.Gather(&acc)) + require.EqualValuesf(t, "sally", username, "Expected to post with username %s, but was %s", "sally", username) + require.EqualValuesf(t, "seashore", password, "Expected to post with password %s, but was %s", "seashore", password) + require.NotZero(t, len(requests), "Expected to post a request body, but was empty.") request := requests[0] - if expect := "hello:foo=bar"; request["mbean"] != expect { - t.Errorf("Expected to query mbean %s, but was %s", expect, request["mbean"]) - } + expected := "hello:foo=bar" + require.EqualValuesf(t, expected, request["mbean"], "Expected to query mbean %s, but was %s", expected, request["mbean"]) target, ok := request["target"].(map[string]interface{}) - if !ok { - t.Fatal("Expected a proxy target, but was empty.") - } - - if expect := "service:jmx:rmi:///jndi/rmi://target:9010/jmxrmi"; target["url"] != expect { - t.Errorf("Expected proxy target url %s, but was %s", expect, target["url"]) - } - - if expect := "jack"; target["user"] != expect { - t.Errorf("Expected proxy target username %s, but was %s", expect, target["user"]) - } - - if expect := "benimble"; target["password"] != expect { - t.Errorf("Expected proxy target password %s, but was %s", expect, target["password"]) - } + require.True(t, ok, "Expected a proxy target, but was empty.") + + expected = "service:jmx:rmi:///jndi/rmi://target:9010/jmxrmi" + require.Equalf(t, expected, target["url"], "Expected proxy target url %s, but was %s", expected, target["url"]) + expected = "jack" + require.Equalf(t, expected, target["user"], "Expected proxy target username %s, but was %s", expected, target["user"]) + expected = "benimble" + require.Equalf(t, expected, target["password"], "Expected proxy target username %s, but was %s", expected, target["password"]) } diff --git a/plugins/inputs/jolokia2/client.go b/plugins/inputs/jolokia2/common/client.go similarity index 80% rename from plugins/inputs/jolokia2/client.go rename to plugins/inputs/jolokia2/common/client.go index 90aa9c0db7fce..04624eb26b6df 100644 --- a/plugins/inputs/jolokia2/client.go +++ b/plugins/inputs/jolokia2/common/client.go @@ -1,10 +1,10 @@ -package jolokia2 +package common import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "path" @@ -95,7 +95,7 @@ type jolokiaResponse struct { Status int `json:"status"` } -func NewClient(url string, config *ClientConfig) (*Client, error) { +func NewClient(address string, config *ClientConfig) (*Client, error) { tlsConfig, err := config.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -112,27 +112,28 @@ func NewClient(url string, config *ClientConfig) (*Client, error) { } return &Client{ - URL: url, + URL: address, config: config, client: client, }, nil } func (c *Client) read(requests []ReadRequest) ([]ReadResponse, error) { - jrequests := makeJolokiaRequests(requests, c.config.ProxyConfig) - requestBody, err := json.Marshal(jrequests) + jRequests := makeJolokiaRequests(requests, c.config.ProxyConfig) + requestBody, err := json.Marshal(jRequests) if err != nil { return nil, err } - requestUrl, err := formatReadUrl(c.URL, c.config.Username, c.config.Password) + requestURL, err := formatReadURL(c.URL, c.config.Username, c.config.Password) if err != nil { return nil, err } - req, err := http.NewRequest("POST", requestUrl, bytes.NewBuffer(requestBody)) + req, err := http.NewRequest("POST", requestURL, bytes.NewBuffer(requestBody)) if err != nil { - return nil, fmt.Errorf("unable to create new request '%s': %s", requestUrl, err) + //err is not contained in returned error - it may contain sensitive data (password) which should not be logged + return nil, fmt.Errorf("unable to create new request for: '%s'", c.URL) } req.Header.Add("Content-type", "application/json") @@ -144,21 +145,21 @@ func (c *Client) read(requests []ReadRequest) ([]ReadResponse, error) { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)", + return nil, fmt.Errorf("response from url \"%s\" has status code %d (%s), expected %d (%s)", c.URL, resp.StatusCode, http.StatusText(resp.StatusCode), http.StatusOK, http.StatusText(http.StatusOK)) } - responseBody, err := ioutil.ReadAll(resp.Body) + responseBody, err := io.ReadAll(resp.Body) if err != nil { return nil, err } - var jresponses []jolokiaResponse - if err = json.Unmarshal([]byte(responseBody), &jresponses); err != nil { - return nil, fmt.Errorf("Error decoding JSON response: %s: %s", err, responseBody) + var jResponses []jolokiaResponse + if err = json.Unmarshal(responseBody, &jResponses); err != nil { + return nil, fmt.Errorf("decoding JSON response: %s: %s", err, responseBody) } - return makeReadResponses(jresponses), nil + return makeReadResponses(jResponses), nil } func makeJolokiaRequests(rrequests []ReadRequest, proxyConfig *ProxyConfig) []jolokiaRequest { @@ -249,22 +250,22 @@ func makeReadResponses(jresponses []jolokiaResponse) []ReadResponse { return rresponses } -func formatReadUrl(configUrl, username, password string) (string, error) { - parsedUrl, err := url.Parse(configUrl) +func formatReadURL(configURL, username, password string) (string, error) { + parsedURL, err := url.Parse(configURL) if err != nil { return "", err } - readUrl := url.URL{ - Host: parsedUrl.Host, - Scheme: parsedUrl.Scheme, + readURL := url.URL{ + Host: parsedURL.Host, + Scheme: parsedURL.Scheme, } if username != "" || password != "" { - readUrl.User = url.UserPassword(username, password) + readURL.User = url.UserPassword(username, password) } - readUrl.Path = path.Join(parsedUrl.Path, "read") - readUrl.Query().Add("ignoreErrors", "true") - return readUrl.String(), nil + readURL.Path = path.Join(parsedURL.Path, "read") + readURL.Query().Add("ignoreErrors", "true") + return readURL.String(), nil } diff --git a/plugins/inputs/jolokia2/gatherer.go b/plugins/inputs/jolokia2/common/gatherer.go similarity index 93% rename from plugins/inputs/jolokia2/gatherer.go rename to plugins/inputs/jolokia2/common/gatherer.go index f24918998248e..5b426bb01f5f2 100644 --- a/plugins/inputs/jolokia2/gatherer.go +++ b/plugins/inputs/jolokia2/common/gatherer.go @@ -1,4 +1,4 @@ -package jolokia2 +package common import ( "fmt" @@ -46,7 +46,7 @@ func (g *Gatherer) Gather(client *Client, acc telegraf.Accumulator) error { // gatherResponses adds points to an accumulator from the ReadResponse objects // returned by a Jolokia agent. func (g *Gatherer) gatherResponses(responses []ReadResponse, tags map[string]string, acc telegraf.Accumulator) { - series := make(map[string][]point, 0) + series := make(map[string][]point) for _, metric := range g.metrics { points, ok := series[metric.Name] @@ -55,11 +55,7 @@ func (g *Gatherer) gatherResponses(responses []ReadResponse, tags map[string]str } responsePoints, responseErrors := g.generatePoints(metric, responses) - - for _, responsePoint := range responsePoints { - points = append(points, responsePoint) - } - + points = append(points, responsePoints...) for _, err := range responseErrors { acc.AddError(err) } @@ -84,11 +80,11 @@ func (g *Gatherer) generatePoints(metric Metric, responses []ReadResponse) ([]po for _, response := range responses { switch response.Status { case 200: - break + // Correct response status - do nothing. case 404: continue default: - errors = append(errors, fmt.Errorf("Unexpected status in response from target %s (%q): %d", + errors = append(errors, fmt.Errorf("unexpected status in response from target %s (%q): %d", response.RequestTarget, response.RequestMbean, response.Status)) continue } @@ -97,8 +93,14 @@ func (g *Gatherer) generatePoints(metric Metric, responses []ReadResponse) ([]po continue } - pb := newPointBuilder(metric, response.RequestAttributes, response.RequestPath) - for _, point := range pb.Build(metric.Mbean, response.Value) { + pb := NewPointBuilder(metric, response.RequestAttributes, response.RequestPath) + ps, err := pb.Build(metric.Mbean, response.Value) + if err != nil { + errors = append(errors, err) + continue + } + + for _, point := range ps { if response.RequestTarget != "" { point.Tags["jolokia_agent_url"] = response.RequestTarget } @@ -195,7 +197,6 @@ func tagSetsMatch(a, b map[string]string) bool { func makeReadRequests(metrics []Metric) []ReadRequest { var requests []ReadRequest for _, metric := range metrics { - if len(metric.Paths) == 0 { requests = append(requests, ReadRequest{ Mbean: metric.Mbean, diff --git a/plugins/inputs/jolokia2/gatherer_test.go b/plugins/inputs/jolokia2/common/gatherer_test.go similarity index 90% rename from plugins/inputs/jolokia2/gatherer_test.go rename to plugins/inputs/jolokia2/common/gatherer_test.go index 4ba4b586ad5f4..02e2f9f74c92c 100644 --- a/plugins/inputs/jolokia2/gatherer_test.go +++ b/plugins/inputs/jolokia2/common/gatherer_test.go @@ -1,9 +1,9 @@ -package jolokia2 +package common import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestJolokia2_makeReadRequests(t *testing.T) { @@ -96,9 +96,9 @@ func TestJolokia2_makeReadRequests(t *testing.T) { for _, c := range cases { payload := makeReadRequests([]Metric{c.metric}) - assert.Equal(t, len(c.expected), len(payload), "Failing case: "+c.metric.Name) + require.Equal(t, len(c.expected), len(payload), "Failing case: "+c.metric.Name) for _, actual := range payload { - assert.Contains(t, c.expected, actual, "Failing case: "+c.metric.Name) + require.Contains(t, c.expected, actual, "Failing case: "+c.metric.Name) } } } diff --git a/plugins/inputs/jolokia2/metric.go b/plugins/inputs/jolokia2/common/metric.go similarity index 99% rename from plugins/inputs/jolokia2/metric.go rename to plugins/inputs/jolokia2/common/metric.go index 0f438b3c03df0..f5e299460957e 100644 --- a/plugins/inputs/jolokia2/metric.go +++ b/plugins/inputs/jolokia2/common/metric.go @@ -1,4 +1,4 @@ -package jolokia2 +package common import "strings" diff --git a/plugins/inputs/jolokia2/point_builder.go b/plugins/inputs/jolokia2/common/point_builder.go similarity index 87% rename from plugins/inputs/jolokia2/point_builder.go rename to plugins/inputs/jolokia2/common/point_builder.go index f5ae1d31410ec..b094653eb8c24 100644 --- a/plugins/inputs/jolokia2/point_builder.go +++ b/plugins/inputs/jolokia2/common/point_builder.go @@ -1,4 +1,4 @@ -package jolokia2 +package common import ( "fmt" @@ -17,7 +17,7 @@ type pointBuilder struct { substitutions []string } -func newPointBuilder(metric Metric, attributes []string, path string) *pointBuilder { +func NewPointBuilder(metric Metric, attributes []string, path string) *pointBuilder { return &pointBuilder{ metric: metric, objectAttributes: attributes, @@ -27,27 +27,26 @@ func newPointBuilder(metric Metric, attributes []string, path string) *pointBuil } // Build generates a point for a given mbean name/pattern and value object. -func (pb *pointBuilder) Build(mbean string, value interface{}) []point { +func (pb *pointBuilder) Build(mbean string, value interface{}) ([]point, error) { hasPattern := strings.Contains(mbean, "*") - if !hasPattern { + if !hasPattern || value == nil { value = map[string]interface{}{mbean: value} } valueMap, ok := value.(map[string]interface{}) - if !ok { // FIXME: log it and move on. - panic(fmt.Sprintf("There should be a map here for %s!\n", mbean)) + if !ok { + return nil, fmt.Errorf("the response of %s's value should be a map", mbean) } points := make([]point, 0) for mbean, value := range valueMap { - points = append(points, point{ Tags: pb.extractTags(mbean), Fields: pb.extractFields(mbean, value), }) } - return compactPoints(points) + return compactPoints(points), nil } // extractTags generates the map of tags for a given mbean name/pattern. @@ -98,20 +97,18 @@ func (pb *pointBuilder) extractFields(mbean string, value interface{}) map[strin if len(pb.objectAttributes) == 0 { // if there were no attributes requested, // then the keys are attributes - pb.fillFields("", valueMap, fieldMap) - + pb.FillFields("", valueMap, fieldMap) } else if len(pb.objectAttributes) == 1 { // if there was a single attribute requested, // then the keys are the attribute's properties fieldName := pb.formatFieldName(pb.objectAttributes[0], pb.objectPath) - pb.fillFields(fieldName, valueMap, fieldMap) - + pb.FillFields(fieldName, valueMap, fieldMap) } else { // if there were multiple attributes requested, // then the keys are the attribute names for _, attribute := range pb.objectAttributes { fieldName := pb.formatFieldName(attribute, pb.objectPath) - pb.fillFields(fieldName, valueMap[attribute], fieldMap) + pb.FillFields(fieldName, valueMap[attribute], fieldMap) } } } else { @@ -123,7 +120,7 @@ func (pb *pointBuilder) extractFields(mbean string, value interface{}) map[strin fieldName = pb.formatFieldName(pb.objectAttributes[0], pb.objectPath) } - pb.fillFields(fieldName, value, fieldMap) + pb.FillFields(fieldName, value, fieldMap) } if len(pb.substitutions) > 1 { @@ -146,15 +143,15 @@ func (pb *pointBuilder) formatFieldName(attribute, path string) string { } if path != "" { - fieldName = fieldName + fieldSeparator + strings.Replace(path, "/", fieldSeparator, -1) + fieldName = fieldName + fieldSeparator + strings.ReplaceAll(path, "/", fieldSeparator) } return fieldName } -// fillFields recurses into the supplied value object, generating a named field +// FillFields recurses into the supplied value object, generating a named field // for every value it discovers. -func (pb *pointBuilder) fillFields(name string, value interface{}, fieldMap map[string]interface{}) { +func (pb *pointBuilder) FillFields(name string, value interface{}, fieldMap map[string]interface{}) { if valueMap, ok := value.(map[string]interface{}); ok { // keep going until we get to something that is not a map for key, innerValue := range valueMap { @@ -169,7 +166,7 @@ func (pb *pointBuilder) fillFields(name string, value interface{}, fieldMap map[ innerName = name + pb.metric.FieldSeparator + key } - pb.fillFields(innerName, innerValue, fieldMap) + pb.FillFields(innerName, innerValue, fieldMap) } return @@ -199,12 +196,11 @@ func (pb *pointBuilder) applySubstitutions(mbean string, fieldMap map[string]int properties := makePropertyMap(mbean) for i, subKey := range pb.substitutions[1:] { - symbol := fmt.Sprintf("$%d", i+1) substitution := properties[subKey] for fieldName, fieldValue := range fieldMap { - newFieldName := strings.Replace(fieldName, symbol, substitution, -1) + newFieldName := strings.ReplaceAll(fieldName, symbol, substitution) if fieldName != newFieldName { fieldMap[newFieldName] = fieldValue delete(fieldMap, fieldName) diff --git a/plugins/inputs/jolokia2/examples/kafka-connect.conf b/plugins/inputs/jolokia2/examples/kafka-connect.conf new file mode 100644 index 0000000000000..d84f5fd58df2c --- /dev/null +++ b/plugins/inputs/jolokia2/examples/kafka-connect.conf @@ -0,0 +1,90 @@ +[[inputs.jolokia2_agent]] + urls = ["http://localhost:8080/jolokia"] + name_prefix = "kafka.connect." + + [[processors.enum]] + [[processors.enum.mapping]] + field = "status" + + [processors.enum.mapping.value_mappings] + paused = 0 + running = 1 + unassigned = 2 + failed = 3 + destroyed = 4 + + [inputs.jolokia2_agent.tags] + input_type = "kafka-connect" + + # https://kafka.apache.org/documentation/#connect_monitoring + [[inputs.jolokia2_agent.metric]] + name = "connectWorkerMetrics" + mbean = "kafka.connect:type=connect-worker-metrics" + paths = ["connector-count", "connector-startup-attempts-total", "connector-startup-failure-percentage", "connector-startup-failure-total", "connector-startup-success-percentage", "connector-startup-success-total", "task-count", "task-startup-attempts-total", "task-startup-failure-percentage", "task-startup-failure-total", "task-startup-success-percentage", "task-startup-success-total"] + + [[inputs.jolokia2_agent.metric]] + name = "connectWorkerMetrics" + mbean = "kafka.connect:type=connect-worker-metrics,connector=*" + paths = ["connector-destroyed-task-count", "connector-failed-task-count", "connector-paused-task-count", "connector-running-task-count", "connector-total-task-count", "connector-unassigned-task-count"] + tag_keys = ["connector"] + + [[inputs.jolokia2_agent.metric]] + name = "connectWorkerRebalanceMetrics" + mbean = "kafka.connect:type=connect-worker-rebalance-metrics" + paths = ["completed-rebalances-total", "connect-protocol", "epoch", "leader-name", "rebalance-avg-time-ms", "rebalance-max-time-ms", "rebalancing", "time-since-last-rebalance-ms"] + + [[inputs.jolokia2_agent.metric]] + name = "connectorMetrics" + mbean = "kafka.connect:type=connector-metrics,connector=*" + paths = ["connector-class", "connector-version", "connector-type", "status"] + tag_keys = ["connector"] + + [[inputs.jolokia2_agent.metric]] + name = "connectorTaskMetrics" + mbean = "kafka.connect:type=connector-task-metrics,connector=*,task=*" + paths = ["batch-size-avg", "batch-size-max", "offset-commit-avg-time-ms", "offset-commit-failure-percentage", "offset-commit-max-time-ms", "offset-commit-success-percentage", "pause-ratio", "running-ratio", "status"] + tag_keys = ["connector", "task"] + + [[inputs.jolokia2_agent.metric]] + name = "sinkTaskMetrics" + mbean = "kafka.connect:type=sink-task-metrics,connector=*,task=*" + paths = ["offset-commit-completion-rate", "offset-commit-completion-total", "offset-commit-seq-no", "offset-commit-skip-rate", "offset-commit-skip-total", "partition-count", "put-batch-avg-time-ms", "put-batch-max-time-ms", "sink-record-active-count", "sink-record-active-count-avg", "sink-record-active-count-max", "sink-record-lag-max", "sink-record-read-rate", "sink-record-read-total", "sink-record-send-rate", "sink-record-send-total"] + tag_keys = ["connector", "task"] + + [[inputs.jolokia2_agent.metric]] + name = "sourceTaskMetrics" + mbean = "kafka.connect:type=source-task-metrics,connector=*,task=*" + paths = ["poll-batch-avg-time-ms", "poll-batch-max-time-ms", "source-record-active-count", "source-record-active-count-avg", "source-record-active-count-max", "source-record-poll-rate", "source-record-poll-total", "source-record-write-rate", "source-record-write-total"] + tag_keys = ["connector", "task"] + + [[inputs.jolokia2_agent.metric]] + name = "taskErrorMetrics" + mbean = "kafka.connect:type=task-error-metrics,connector=*,task=*" + paths = ["deadletterqueue-produce-failures", "deadletterqueue-produce-requests", "last-error-timestamp", "total-errors-logged", "total-record-errors", "total-record-failures", "total-records-skipped", "total-retries"] + tag_keys = ["connector", "task"] + + # https://kafka.apache.org/documentation/#selector_monitoring + [[inputs.jolokia2_agent.metric]] + name = "connectMetrics" + mbean = "kafka.connect:type=connect-metrics,client-id=*" + paths = ["connection-close-rate", "connection-close-total", "connection-creation-rate", "connection-creation-total", "network-io-rate", "network-io-total", "outgoing-byte-rate", "outgoing-byte-total", "request-rate", "request-total", "request-size-avg", "request-size-max", "incoming-byte-rate", "incoming-byte-rate", "incoming-byte-total", "response-rate", "response-total", "select-rate", "select-total", "io-wait-time-ns-avg", "io-wait-ratio", "io-time-ns-avg", "io-ratio", "connection-count", "successful-authentication-rate", "successful-authentication-total", "failed-authentication-rate", "failed-authentication-total", "successful-reauthentication-rate", "successful-reauthentication-total", "reauthentication-latency-max", "reauthentication-latency-avg", "failed-reauthentication-rate", "failed-reauthentication-total", "successful-authentication-no-reauth-total"] + tag_keys = ["client-id"] + + # https://kafka.apache.org/documentation/#common_node_monitoring + [[inputs.jolokia2_agent.metric]] + name = "connectNodeMetrics" + mbean = "kafka.connect:type=connect-node-metrics,client-id=*,node-id=*" + paths = ["outgoing-byte-rate", "outgoing-byte-total", "request-rate", "request-total", "request-size-avg", "request-size-max", "incoming-byte-rate", "incoming-byte-total", "request-latency-avg", "request-latency-max", "response-rate", "response-total"] + tag_keys = ["client-id", "node-id"] + + [[inputs.jolokia2_agent.metric]] + name = "appInfo" + mbean = "kafka.connect:type=app-info,client-id=*" + paths = ["start-time-ms", "commit-id", "version"] + tag_keys = ["client-id"] + + [[inputs.jolokia2_agent.metric]] + name = "connectCoordinatorMetrics" + mbean = "kafka.connect:type=connect-coordinator-metrics,client-id=*" + paths = ["join-time-max", "failed-rebalance-rate-per-hour", "rebalance-latency-total", "sync-time-avg", "join-rate", "sync-rate", "failed-rebalance-total", "rebalance-total", "last-heartbeat-seconds-ago", "heartbeat-rate", "join-time-avg", "sync-total", "rebalance-latency-max", "sync-time-max", "last-rebalance-seconds-ago", "rebalance-rate-per-hour", "assigned-connectors", "heartbeat-total", "assigned-tasks", "heartbeat-response-time-max", "rebalance-latency-avg", "join-total"] + tag_keys = ["client-id"] \ No newline at end of file diff --git a/plugins/inputs/jolokia2/examples/kafka.conf b/plugins/inputs/jolokia2/examples/kafka.conf index ae34831fc55c9..24053b5ad6fa7 100644 --- a/plugins/inputs/jolokia2/examples/kafka.conf +++ b/plugins/inputs/jolokia2/examples/kafka.conf @@ -1,6 +1,30 @@ [[inputs.jolokia2_agent]] name_prefix = "kafka_" + + ## If you intend to use "non_negative_derivative(1s)" with "*.count" fields, you don't need precalculated fields. + # fielddrop = [ + # "*.EventType", + # "*.FifteenMinuteRate", + # "*.FiveMinuteRate", + # "*.MeanRate", + # "*.OneMinuteRate", + # "*.RateUnit", + # "*.LatencyUnit", + # "*.50thPercentile", + # "*.75thPercentile", + # "*.95thPercentile", + # "*.98thPercentile", + # "*.99thPercentile", + # "*.999thPercentile", + # "*.Min", + # "*.Mean", + # "*.Max", + # "*.StdDev" + # ] + + ## jolokia_agent_url tag is not needed if you have only one instance of Kafka on the server. + # tagexclude = ["jolokia_agent_url"] urls = ["http://localhost:8080/jolokia"] @@ -21,9 +45,15 @@ field_name = "$2" [[inputs.jolokia2_agent.metric]] - name = "client" - mbean = "kafka.server:client-id=*,type=*" - tag_keys = ["client-id", "type"] + name = "zookeeper" + mbean = "kafka.server:name=*,type=SessionExpireListener" + field_prefix = "$1." + + [[inputs.jolokia2_agent.metric]] + name = "user" + mbean = "kafka.server:user=*,type=Request" + field_prefix = "" + tag_keys = ["user"] [[inputs.jolokia2_agent.metric]] name = "request" @@ -53,3 +83,27 @@ mbean = "kafka.cluster:name=UnderReplicated,partition=*,topic=*,type=Partition" field_name = "UnderReplicatedPartitions" tag_keys = ["topic", "partition"] + +## If you have multiple instances of Kafka on the server, use 'jolokia_agent_url' as identity of each instance +# [[processors.rename]] +# namepass = ["kafka_*"] +# order = 1 +# [[processors.rename.replace]] +# tag = "jolokia_agent_url" +# dest = "instance" +# +# [[processors.regex]] +# namepass = ["kafka_*"] +# order = 2 +# [[processors.regex.tags]] +# key = "instance" +# pattern = "^.+:8080/.+$" +# replacement = "0" +# [[processors.regex.tags]] +# key = "instance" +# pattern = "^.+:8081/.+$" +# replacement = "1" +# [[processors.regex.tags]] +# key = "instance" +# pattern = "^.+:8082/.+$" +# replacement = "2" diff --git a/plugins/inputs/jolokia2/jolokia.go b/plugins/inputs/jolokia2/jolokia.go deleted file mode 100644 index 430f587410d45..0000000000000 --- a/plugins/inputs/jolokia2/jolokia.go +++ /dev/null @@ -1,21 +0,0 @@ -package jolokia2 - -import ( - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/inputs" -) - -func init() { - inputs.Add("jolokia2_agent", func() telegraf.Input { - return &JolokiaAgent{ - Metrics: []MetricConfig{}, - DefaultFieldSeparator: ".", - } - }) - inputs.Add("jolokia2_proxy", func() telegraf.Input { - return &JolokiaProxy{ - Metrics: []MetricConfig{}, - DefaultFieldSeparator: ".", - } - }) -} diff --git a/plugins/inputs/jolokia2/jolokia2.go b/plugins/inputs/jolokia2/jolokia2.go new file mode 100644 index 0000000000000..bf124e2e90f79 --- /dev/null +++ b/plugins/inputs/jolokia2/jolokia2.go @@ -0,0 +1,24 @@ +package jolokia2 + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/jolokia2/common" + "github.com/influxdata/telegraf/plugins/inputs/jolokia2/jolokia2_agent" + "github.com/influxdata/telegraf/plugins/inputs/jolokia2/jolokia2_proxy" +) + +func init() { + inputs.Add("jolokia2_agent", func() telegraf.Input { + return &jolokia2_agent.JolokiaAgent{ + Metrics: []common.MetricConfig{}, + DefaultFieldSeparator: ".", + } + }) + inputs.Add("jolokia2_proxy", func() telegraf.Input { + return &jolokia2_proxy.JolokiaProxy{ + Metrics: []common.MetricConfig{}, + DefaultFieldSeparator: ".", + } + }) +} diff --git a/plugins/inputs/jolokia2/jolokia2_agent/README.md b/plugins/inputs/jolokia2/jolokia2_agent/README.md new file mode 100644 index 0000000000000..54f1428c00d62 --- /dev/null +++ b/plugins/inputs/jolokia2/jolokia2_agent/README.md @@ -0,0 +1,31 @@ +# Jolokia2 Proxy input plugin + +The `jolokia2_proxy` input plugin reads JMX metrics from one or more _targets_ by interacting with a [Jolokia proxy](https://jolokia.org/features/proxy.html) REST endpoint. + +## Configuration + +```toml +# Read JMX metrics from a Jolokia REST agent endpoint +[[inputs.jolokia2_agent]] + # default_tag_prefix = "" + # default_field_prefix = "" + # default_field_separator = "." + + # Add agents URLs to query + urls = ["http://localhost:8080/jolokia"] + # username = "" + # password = "" + # response_timeout = "5s" + + ## Optional TLS config + # tls_ca = "/var/private/ca.pem" + # tls_cert = "/var/private/client.pem" + # tls_key = "/var/private/client-key.pem" + # insecure_skip_verify = false + + ## Add metrics to read + [[inputs.jolokia2_agent.metric]] + name = "java_runtime" + mbean = "java.lang:type=Runtime" + paths = ["Uptime"] +``` diff --git a/plugins/inputs/jolokia2/jolokia2_agent/jolokia2_agent.go b/plugins/inputs/jolokia2/jolokia2_agent/jolokia2_agent.go new file mode 100644 index 0000000000000..5613c0bd42f25 --- /dev/null +++ b/plugins/inputs/jolokia2/jolokia2_agent/jolokia2_agent.go @@ -0,0 +1,97 @@ +//go:generate ../../../../tools/readme_config_includer/generator +package jolokia2_agent + +import ( + _ "embed" + + "fmt" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs/jolokia2/common" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +type JolokiaAgent struct { + DefaultFieldPrefix string + DefaultFieldSeparator string + DefaultTagPrefix string + + URLs []string `toml:"urls"` + Username string + Password string + ResponseTimeout config.Duration `toml:"response_timeout"` + + tls.ClientConfig + + Metrics []common.MetricConfig `toml:"metric"` + gatherer *common.Gatherer + clients []*common.Client +} + +func (*JolokiaAgent) SampleConfig() string { + return sampleConfig +} + +func (ja *JolokiaAgent) Gather(acc telegraf.Accumulator) error { + if ja.gatherer == nil { + ja.gatherer = common.NewGatherer(ja.createMetrics()) + } + + // Initialize clients once + if ja.clients == nil { + ja.clients = make([]*common.Client, 0, len(ja.URLs)) + for _, url := range ja.URLs { + client, err := ja.createClient(url) + if err != nil { + acc.AddError(fmt.Errorf("unable to create client for %s: %v", url, err)) + continue + } + ja.clients = append(ja.clients, client) + } + } + + var wg sync.WaitGroup + + for _, client := range ja.clients { + wg.Add(1) + go func(client *common.Client) { + defer wg.Done() + + err := ja.gatherer.Gather(client, acc) + if err != nil { + acc.AddError(fmt.Errorf("unable to gather metrics for %s: %v", client.URL, err)) + } + }(client) + } + + wg.Wait() + + return nil +} + +func (ja *JolokiaAgent) createMetrics() []common.Metric { + var metrics []common.Metric + + for _, metricConfig := range ja.Metrics { + metrics = append(metrics, common.NewMetric(metricConfig, + ja.DefaultFieldPrefix, ja.DefaultFieldSeparator, ja.DefaultTagPrefix)) + } + + return metrics +} + +func (ja *JolokiaAgent) createClient(url string) (*common.Client, error) { + return common.NewClient(url, &common.ClientConfig{ + Username: ja.Username, + Password: ja.Password, + ResponseTimeout: time.Duration(ja.ResponseTimeout), + ClientConfig: ja.ClientConfig, + }) +} diff --git a/plugins/inputs/jolokia2/jolokia2_agent/sample.conf b/plugins/inputs/jolokia2/jolokia2_agent/sample.conf new file mode 100644 index 0000000000000..24f0e4636501d --- /dev/null +++ b/plugins/inputs/jolokia2/jolokia2_agent/sample.conf @@ -0,0 +1,23 @@ +# Read JMX metrics from a Jolokia REST agent endpoint +[[inputs.jolokia2_agent]] + # default_tag_prefix = "" + # default_field_prefix = "" + # default_field_separator = "." + + # Add agents URLs to query + urls = ["http://localhost:8080/jolokia"] + # username = "" + # password = "" + # response_timeout = "5s" + + ## Optional TLS config + # tls_ca = "/var/private/ca.pem" + # tls_cert = "/var/private/client.pem" + # tls_key = "/var/private/client-key.pem" + # insecure_skip_verify = false + + ## Add metrics to read + [[inputs.jolokia2_agent.metric]] + name = "java_runtime" + mbean = "java.lang:type=Runtime" + paths = ["Uptime"] diff --git a/plugins/inputs/jolokia2/jolokia2_proxy/README.md b/plugins/inputs/jolokia2/jolokia2_proxy/README.md new file mode 100644 index 0000000000000..861ef19cd5b0f --- /dev/null +++ b/plugins/inputs/jolokia2/jolokia2_proxy/README.md @@ -0,0 +1,39 @@ +# Jolokia2 Agent plugin + +The `jolokia2_agent` input plugin reads JMX metrics from one or more [Jolokia agent](https://jolokia.org/agent/jvm.html) REST endpoints. + +## Configuration + +```toml +# Read JMX metrics from a Jolokia REST proxy endpoint +[[inputs.jolokia2_proxy]] + # default_tag_prefix = "" + # default_field_prefix = "" + # default_field_separator = "." + + ## Proxy agent + url = "http://localhost:8080/jolokia" + # username = "" + # password = "" + # response_timeout = "5s" + + ## Optional TLS config + # tls_ca = "/var/private/ca.pem" + # tls_cert = "/var/private/client.pem" + # tls_key = "/var/private/client-key.pem" + # insecure_skip_verify = false + + ## Add proxy targets to query + # default_target_username = "" + # default_target_password = "" + [[inputs.jolokia2_proxy.target]] + url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi" + # username = "" + # password = "" + + ## Add metrics to read + [[inputs.jolokia2_proxy.metric]] + name = "java_runtime" + mbean = "java.lang:type=Runtime" + paths = ["Uptime"] +``` diff --git a/plugins/inputs/jolokia2/jolokia2_proxy/jolokia_proxy.go b/plugins/inputs/jolokia2/jolokia2_proxy/jolokia_proxy.go new file mode 100644 index 0000000000000..33074c7b3e9b9 --- /dev/null +++ b/plugins/inputs/jolokia2/jolokia2_proxy/jolokia_proxy.go @@ -0,0 +1,98 @@ +//go:generate ../../../../tools/readme_config_includer/generator +package jolokia2_proxy + +import ( + _ "embed" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs/jolokia2/common" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +type JolokiaProxy struct { + DefaultFieldPrefix string + DefaultFieldSeparator string + DefaultTagPrefix string + + URL string `toml:"url"` + DefaultTargetPassword string + DefaultTargetUsername string + Targets []JolokiaProxyTargetConfig `toml:"target"` + + Username string + Password string + ResponseTimeout config.Duration `toml:"response_timeout"` + tls.ClientConfig + + Metrics []common.MetricConfig `toml:"metric"` + client *common.Client + gatherer *common.Gatherer +} + +type JolokiaProxyTargetConfig struct { + URL string `toml:"url"` + Username string + Password string +} + +func (*JolokiaProxy) SampleConfig() string { + return sampleConfig +} + +func (jp *JolokiaProxy) Gather(acc telegraf.Accumulator) error { + if jp.gatherer == nil { + jp.gatherer = common.NewGatherer(jp.createMetrics()) + } + + if jp.client == nil { + client, err := jp.createClient() + + if err != nil { + return err + } + + jp.client = client + } + + return jp.gatherer.Gather(jp.client, acc) +} + +func (jp *JolokiaProxy) createMetrics() []common.Metric { + var metrics []common.Metric + + for _, metricConfig := range jp.Metrics { + metrics = append(metrics, common.NewMetric(metricConfig, + jp.DefaultFieldPrefix, jp.DefaultFieldSeparator, jp.DefaultTagPrefix)) + } + + return metrics +} + +func (jp *JolokiaProxy) createClient() (*common.Client, error) { + proxyConfig := &common.ProxyConfig{ + DefaultTargetUsername: jp.DefaultTargetUsername, + DefaultTargetPassword: jp.DefaultTargetPassword, + } + + for _, target := range jp.Targets { + proxyConfig.Targets = append(proxyConfig.Targets, common.ProxyTargetConfig{ + URL: target.URL, + Username: target.Username, + Password: target.Password, + }) + } + + return common.NewClient(jp.URL, &common.ClientConfig{ + Username: jp.Username, + Password: jp.Password, + ResponseTimeout: time.Duration(jp.ResponseTimeout), + ClientConfig: jp.ClientConfig, + ProxyConfig: proxyConfig, + }) +} diff --git a/plugins/inputs/jolokia2/jolokia2_proxy/sample.conf b/plugins/inputs/jolokia2/jolokia2_proxy/sample.conf new file mode 100644 index 0000000000000..d5b4e41b00c17 --- /dev/null +++ b/plugins/inputs/jolokia2/jolokia2_proxy/sample.conf @@ -0,0 +1,31 @@ +# Read JMX metrics from a Jolokia REST proxy endpoint +[[inputs.jolokia2_proxy]] + # default_tag_prefix = "" + # default_field_prefix = "" + # default_field_separator = "." + + ## Proxy agent + url = "http://localhost:8080/jolokia" + # username = "" + # password = "" + # response_timeout = "5s" + + ## Optional TLS config + # tls_ca = "/var/private/ca.pem" + # tls_cert = "/var/private/client.pem" + # tls_key = "/var/private/client-key.pem" + # insecure_skip_verify = false + + ## Add proxy targets to query + # default_target_username = "" + # default_target_password = "" + [[inputs.jolokia2_proxy.target]] + url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi" + # username = "" + # password = "" + + ## Add metrics to read + [[inputs.jolokia2_proxy.metric]] + name = "java_runtime" + mbean = "java.lang:type=Runtime" + paths = ["Uptime"] diff --git a/plugins/inputs/jolokia2/jolokia_test.go b/plugins/inputs/jolokia2/jolokia2_test.go similarity index 86% rename from plugins/inputs/jolokia2/jolokia_test.go rename to plugins/inputs/jolokia2/jolokia2_test.go index 61c410c0b2067..0320c2d5198b3 100644 --- a/plugins/inputs/jolokia2/jolokia_test.go +++ b/plugins/inputs/jolokia2/jolokia2_test.go @@ -1,4 +1,4 @@ -package jolokia2 +package jolokia2_test import ( "fmt" @@ -6,11 +6,15 @@ import ( "net/http/httptest" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs/jolokia2/common" + "github.com/influxdata/telegraf/plugins/inputs/jolokia2/jolokia2_agent" + "github.com/influxdata/telegraf/plugins/inputs/jolokia2/jolokia2_proxy" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" "github.com/influxdata/toml/ast" - "github.com/stretchr/testify/assert" ) func TestJolokia2_ScalarValues(t *testing.T) { @@ -74,12 +78,12 @@ func TestJolokia2_ScalarValues(t *testing.T) { "status": 200 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() - plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) + plugin := SetupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "scalar_without_attribute", map[string]interface{}{ "value": 123.0, @@ -234,12 +238,12 @@ func TestJolokia2_ObjectValues(t *testing.T) { "status": 200 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() - plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) + plugin := SetupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "object_without_attribute", map[string]interface{}{ "biz": 123.0, @@ -322,12 +326,12 @@ func TestJolokia2_StatusCodes(t *testing.T) { "status": 500 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() - plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) + plugin := SetupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "ok", map[string]interface{}{ "value": 1.0, @@ -372,12 +376,12 @@ func TestJolokia2_TagRenaming(t *testing.T) { "status": 200 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() - plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) + plugin := SetupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "default_tag_prefix", map[string]interface{}{ "value": 123.0, @@ -465,12 +469,12 @@ func TestJolokia2_FieldRenaming(t *testing.T) { "status": 200 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() - plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) + plugin := SetupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "default_field_modifiers", map[string]interface{}{ "DEFAULT_PREFIX_hello_DEFAULT_SEPARATOR_world": 123.0, @@ -573,12 +577,12 @@ func TestJolokia2_MetricMbeanMatching(t *testing.T) { "status": 200 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() - plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) + plugin := SetupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "mbean_name_and_object_keys", map[string]interface{}{ "value": 123.0, @@ -666,12 +670,12 @@ func TestJolokia2_MetricCompaction(t *testing.T) { "status": 200 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() - plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) + plugin := SetupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "compact_metric", map[string]interface{}{ "value": 123.0, @@ -727,12 +731,12 @@ func TestJolokia2_ProxyTargets(t *testing.T) { "status": 200 }]` - server := setupServer(http.StatusOK, response) + server := setupServer(response) defer server.Close() - plugin := setupPlugin(t, fmt.Sprintf(config, server.URL)) + plugin := SetupPlugin(t, fmt.Sprintf(config, server.URL)) var acc testutil.Accumulator - assert.NoError(t, plugin.Gather(&acc)) + require.NoError(t, plugin.Gather(&acc)) acc.AssertContainsTaggedFields(t, "hello", map[string]interface{}{ "value": 123.0, @@ -749,32 +753,28 @@ func TestJolokia2_ProxyTargets(t *testing.T) { } func TestFillFields(t *testing.T) { - complex := map[string]interface{}{"Value": []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} - var scalar interface{} - scalar = []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + complexPoint := map[string]interface{}{"Value": []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + scalarPoint := []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} results := map[string]interface{}{} - newPointBuilder(Metric{Name: "test", Mbean: "complex"}, []string{"this", "that"}, "/").fillFields("", complex, results) - assert.Equal(t, map[string]interface{}{}, results) + common.NewPointBuilder(common.Metric{Name: "test", Mbean: "complex"}, []string{"this", "that"}, "/").FillFields("", complexPoint, results) + require.Equal(t, map[string]interface{}{}, results) results = map[string]interface{}{} - newPointBuilder(Metric{Name: "test", Mbean: "scalar"}, []string{"this", "that"}, "/").fillFields("", scalar, results) - assert.Equal(t, map[string]interface{}{}, results) + common.NewPointBuilder(common.Metric{Name: "test", Mbean: "scalar"}, []string{"this", "that"}, "/").FillFields("", scalarPoint, results) + require.Equal(t, map[string]interface{}{}, results) } -func setupServer(status int, resp string) *httptest.Server { +func setupServer(resp string) *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - //body, err := ioutil.ReadAll(r.Body) - //if err == nil { - // fmt.Println(string(body)) - //} - + // Ignore the returned error as the tests will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, resp) })) } -func setupPlugin(t *testing.T, conf string) telegraf.Input { +func SetupPlugin(t *testing.T, conf string) telegraf.Input { table, err := toml.Parse([]byte(conf)) if err != nil { t.Fatalf("Unable to parse config! %v", err) @@ -784,8 +784,8 @@ func setupPlugin(t *testing.T, conf string) telegraf.Input { object := table.Fields[name] switch name { case "jolokia2_agent": - plugin := JolokiaAgent{ - Metrics: []MetricConfig{}, + plugin := jolokia2_agent.JolokiaAgent{ + Metrics: []common.MetricConfig{}, DefaultFieldSeparator: ".", } @@ -796,8 +796,8 @@ func setupPlugin(t *testing.T, conf string) telegraf.Input { return &plugin case "jolokia2_proxy": - plugin := JolokiaProxy{ - Metrics: []MetricConfig{}, + plugin := jolokia2_proxy.JolokiaProxy{ + Metrics: []common.MetricConfig{}, DefaultFieldSeparator: ".", } diff --git a/plugins/inputs/jolokia2/jolokia_agent.go b/plugins/inputs/jolokia2/jolokia_agent.go deleted file mode 100644 index 58b67ce5a1c9a..0000000000000 --- a/plugins/inputs/jolokia2/jolokia_agent.go +++ /dev/null @@ -1,115 +0,0 @@ -package jolokia2 - -import ( - "fmt" - "sync" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/plugins/common/tls" -) - -type JolokiaAgent struct { - DefaultFieldPrefix string - DefaultFieldSeparator string - DefaultTagPrefix string - - URLs []string `toml:"urls"` - Username string - Password string - ResponseTimeout internal.Duration `toml:"response_timeout"` - - tls.ClientConfig - - Metrics []MetricConfig `toml:"metric"` - gatherer *Gatherer - clients []*Client -} - -func (ja *JolokiaAgent) SampleConfig() string { - return ` - # default_tag_prefix = "" - # default_field_prefix = "" - # default_field_separator = "." - - # Add agents URLs to query - urls = ["http://localhost:8080/jolokia"] - # username = "" - # password = "" - # response_timeout = "5s" - - ## Optional TLS config - # tls_ca = "/var/private/ca.pem" - # tls_cert = "/var/private/client.pem" - # tls_key = "/var/private/client-key.pem" - # insecure_skip_verify = false - - ## Add metrics to read - [[inputs.jolokia2_agent.metric]] - name = "java_runtime" - mbean = "java.lang:type=Runtime" - paths = ["Uptime"] -` -} - -func (ja *JolokiaAgent) Description() string { - return "Read JMX metrics from a Jolokia REST agent endpoint" -} - -func (ja *JolokiaAgent) Gather(acc telegraf.Accumulator) error { - if ja.gatherer == nil { - ja.gatherer = NewGatherer(ja.createMetrics()) - } - - // Initialize clients once - if ja.clients == nil { - ja.clients = make([]*Client, 0, len(ja.URLs)) - for _, url := range ja.URLs { - client, err := ja.createClient(url) - if err != nil { - acc.AddError(fmt.Errorf("Unable to create client for %s: %v", url, err)) - continue - } - ja.clients = append(ja.clients, client) - } - } - - var wg sync.WaitGroup - - for _, client := range ja.clients { - wg.Add(1) - go func(client *Client) { - defer wg.Done() - - err := ja.gatherer.Gather(client, acc) - if err != nil { - acc.AddError(fmt.Errorf("Unable to gather metrics for %s: %v", client.URL, err)) - } - - }(client) - } - - wg.Wait() - - return nil -} - -func (ja *JolokiaAgent) createMetrics() []Metric { - var metrics []Metric - - for _, config := range ja.Metrics { - metrics = append(metrics, NewMetric(config, - ja.DefaultFieldPrefix, ja.DefaultFieldSeparator, ja.DefaultTagPrefix)) - } - - return metrics -} - -func (ja *JolokiaAgent) createClient(url string) (*Client, error) { - return NewClient(url, &ClientConfig{ - Username: ja.Username, - Password: ja.Password, - ResponseTimeout: ja.ResponseTimeout.Duration, - ClientConfig: ja.ClientConfig, - }) -} diff --git a/plugins/inputs/jolokia2/jolokia_proxy.go b/plugins/inputs/jolokia2/jolokia_proxy.go deleted file mode 100644 index 6428a88515aee..0000000000000 --- a/plugins/inputs/jolokia2/jolokia_proxy.go +++ /dev/null @@ -1,123 +0,0 @@ -package jolokia2 - -import ( - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/plugins/common/tls" -) - -type JolokiaProxy struct { - DefaultFieldPrefix string - DefaultFieldSeparator string - DefaultTagPrefix string - - URL string `toml:"url"` - DefaultTargetPassword string - DefaultTargetUsername string - Targets []JolokiaProxyTargetConfig `toml:"target"` - - Username string - Password string - ResponseTimeout internal.Duration `toml:"response_timeout"` - tls.ClientConfig - - Metrics []MetricConfig `toml:"metric"` - client *Client - gatherer *Gatherer -} - -type JolokiaProxyTargetConfig struct { - URL string `toml:"url"` - Username string - Password string -} - -func (jp *JolokiaProxy) SampleConfig() string { - return ` - # default_tag_prefix = "" - # default_field_prefix = "" - # default_field_separator = "." - - ## Proxy agent - url = "http://localhost:8080/jolokia" - # username = "" - # password = "" - # response_timeout = "5s" - - ## Optional TLS config - # tls_ca = "/var/private/ca.pem" - # tls_cert = "/var/private/client.pem" - # tls_key = "/var/private/client-key.pem" - # insecure_skip_verify = false - - ## Add proxy targets to query - # default_target_username = "" - # default_target_password = "" - [[inputs.jolokia2_proxy.target]] - url = "service:jmx:rmi:///jndi/rmi://targethost:9999/jmxrmi" - # username = "" - # password = "" - - ## Add metrics to read - [[inputs.jolokia2_proxy.metric]] - name = "java_runtime" - mbean = "java.lang:type=Runtime" - paths = ["Uptime"] -` -} - -func (jp *JolokiaProxy) Description() string { - return "Read JMX metrics from a Jolokia REST proxy endpoint" -} - -func (jp *JolokiaProxy) Gather(acc telegraf.Accumulator) error { - if jp.gatherer == nil { - jp.gatherer = NewGatherer(jp.createMetrics()) - } - - if jp.client == nil { - client, err := jp.createClient() - - if err != nil { - return err - } - - jp.client = client - } - - return jp.gatherer.Gather(jp.client, acc) -} - -func (jp *JolokiaProxy) createMetrics() []Metric { - var metrics []Metric - - for _, config := range jp.Metrics { - metrics = append(metrics, NewMetric(config, - jp.DefaultFieldPrefix, jp.DefaultFieldSeparator, jp.DefaultTagPrefix)) - } - - return metrics -} - -func (jp *JolokiaProxy) createClient() (*Client, error) { - proxyConfig := &ProxyConfig{ - DefaultTargetUsername: jp.DefaultTargetUsername, - DefaultTargetPassword: jp.DefaultTargetPassword, - } - - for _, target := range jp.Targets { - proxyConfig.Targets = append(proxyConfig.Targets, ProxyTargetConfig{ - URL: target.URL, - Username: target.Username, - Password: target.Password, - }) - } - - return NewClient(jp.URL, &ClientConfig{ - Username: jp.Username, - Password: jp.Password, - ResponseTimeout: jp.ResponseTimeout.Duration, - ClientConfig: jp.ClientConfig, - ProxyConfig: proxyConfig, - }) -} diff --git a/plugins/inputs/jti_openconfig_telemetry/README.md b/plugins/inputs/jti_openconfig_telemetry/README.md index 1a28b55aeb8d9..c325b2305e535 100644 --- a/plugins/inputs/jti_openconfig_telemetry/README.md +++ b/plugins/inputs/jti_openconfig_telemetry/README.md @@ -1,11 +1,15 @@ # JTI OpenConfig Telemetry Input Plugin -This plugin reads Juniper Networks implementation of OpenConfig telemetry data from listed sensors using Junos Telemetry Interface. Refer to -[openconfig.net](http://openconfig.net/) for more details about OpenConfig and [Junos Telemetry Interface (JTI)](https://www.juniper.net/documentation/en_US/junos/topics/concept/junos-telemetry-interface-oveview.html). +This plugin reads Juniper Networks implementation of OpenConfig telemetry data +from listed sensors using Junos Telemetry Interface. Refer to +[openconfig.net](http://openconfig.net/) for more details about OpenConfig and +[Junos Telemetry Interface (JTI)][1]. -### Configuration: +[1]: https://www.juniper.net/documentation/en_US/junos/topics/concept/junos-telemetry-interface-oveview.html -```toml +## Configuration + +```toml @sample.conf # Subscribe and receive OpenConfig Telemetry data using JTI [[inputs.jti_openconfig_telemetry]] ## List of device addresses to collect telemetry from @@ -57,7 +61,7 @@ This plugin reads Juniper Networks implementation of OpenConfig telemetry data f str_as_tags = false ``` -### Tags: +## Tags - All measurements are tagged appropriately using the identifier information in incoming data diff --git a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.pb.go b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.pb.go index 7ddeefacab635..1342758887932 100644 --- a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.pb.go +++ b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.pb.go @@ -1,182 +1,238 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: authentication_service.proto +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.17.3 +// source: auth/authentication_service.proto -/* -Package authentication is a generated protocol buffer package. - -It is generated from these files: - authentication_service.proto - -It has these top-level messages: - LoginRequest - LoginReply -*/ package authentication -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // The request message containing the user's name, password and client id type LoginRequest struct { - UserName string `protobuf:"bytes,1,opt,name=user_name,json=userName" json:"user_name,omitempty"` - Password string `protobuf:"bytes,2,opt,name=password" json:"password,omitempty"` - ClientId string `protobuf:"bytes,3,opt,name=client_id,json=clientId" json:"client_id,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UserName string `protobuf:"bytes,1,opt,name=user_name,json=userName,proto3" json:"user_name,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + ClientId string `protobuf:"bytes,3,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` +} + +func (x *LoginRequest) Reset() { + *x = LoginRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_auth_authentication_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LoginRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LoginRequest) ProtoMessage() {} + +func (x *LoginRequest) ProtoReflect() protoreflect.Message { + mi := &file_auth_authentication_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *LoginRequest) Reset() { *m = LoginRequest{} } -func (m *LoginRequest) String() string { return proto.CompactTextString(m) } -func (*LoginRequest) ProtoMessage() {} -func (*LoginRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +// Deprecated: Use LoginRequest.ProtoReflect.Descriptor instead. +func (*LoginRequest) Descriptor() ([]byte, []int) { + return file_auth_authentication_service_proto_rawDescGZIP(), []int{0} +} -func (m *LoginRequest) GetUserName() string { - if m != nil { - return m.UserName +func (x *LoginRequest) GetUserName() string { + if x != nil { + return x.UserName } return "" } -func (m *LoginRequest) GetPassword() string { - if m != nil { - return m.Password +func (x *LoginRequest) GetPassword() string { + if x != nil { + return x.Password } return "" } -func (m *LoginRequest) GetClientId() string { - if m != nil { - return m.ClientId +func (x *LoginRequest) GetClientId() string { + if x != nil { + return x.ClientId } return "" } +// // The response message containing the result of login attempt. // result value of true indicates success and false indicates // failure type LoginReply struct { - Result bool `protobuf:"varint,1,opt,name=result" json:"result,omitempty"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *LoginReply) Reset() { *m = LoginReply{} } -func (m *LoginReply) String() string { return proto.CompactTextString(m) } -func (*LoginReply) ProtoMessage() {} -func (*LoginReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + Result bool `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` +} -func (m *LoginReply) GetResult() bool { - if m != nil { - return m.Result +func (x *LoginReply) Reset() { + *x = LoginReply{} + if protoimpl.UnsafeEnabled { + mi := &file_auth_authentication_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return false } -func init() { - proto.RegisterType((*LoginRequest)(nil), "authentication.LoginRequest") - proto.RegisterType((*LoginReply)(nil), "authentication.LoginReply") +func (x *LoginReply) String() string { + return protoimpl.X.MessageStringOf(x) } -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +func (*LoginReply) ProtoMessage() {} -// Client API for Login service - -type LoginClient interface { - LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error) -} - -type loginClient struct { - cc *grpc.ClientConn +func (x *LoginReply) ProtoReflect() protoreflect.Message { + mi := &file_auth_authentication_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func NewLoginClient(cc *grpc.ClientConn) LoginClient { - return &loginClient{cc} +// Deprecated: Use LoginReply.ProtoReflect.Descriptor instead. +func (*LoginReply) Descriptor() ([]byte, []int) { + return file_auth_authentication_service_proto_rawDescGZIP(), []int{1} } -func (c *loginClient) LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error) { - out := new(LoginReply) - err := grpc.Invoke(ctx, "/authentication.Login/LoginCheck", in, out, c.cc, opts...) - if err != nil { - return nil, err +func (x *LoginReply) GetResult() bool { + if x != nil { + return x.Result } - return out, nil + return false } -// Server API for Login service +var File_auth_authentication_service_proto protoreflect.FileDescriptor + +var file_auth_authentication_service_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0x64, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x22, 0x24, 0x0a, 0x0a, 0x4c, 0x6f, 0x67, + 0x69, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x32, + 0x51, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x48, 0x0a, 0x0a, 0x4c, 0x6f, 0x67, 0x69, + 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1c, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x22, 0x00, 0x42, 0x12, 0x5a, 0x10, 0x2e, 0x3b, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_auth_authentication_service_proto_rawDescOnce sync.Once + file_auth_authentication_service_proto_rawDescData = file_auth_authentication_service_proto_rawDesc +) -type LoginServer interface { - LoginCheck(context.Context, *LoginRequest) (*LoginReply, error) +func file_auth_authentication_service_proto_rawDescGZIP() []byte { + file_auth_authentication_service_proto_rawDescOnce.Do(func() { + file_auth_authentication_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_auth_authentication_service_proto_rawDescData) + }) + return file_auth_authentication_service_proto_rawDescData } -func RegisterLoginServer(s *grpc.Server, srv LoginServer) { - s.RegisterService(&_Login_serviceDesc, srv) +var file_auth_authentication_service_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_auth_authentication_service_proto_goTypes = []interface{}{ + (*LoginRequest)(nil), // 0: authentication.LoginRequest + (*LoginReply)(nil), // 1: authentication.LoginReply +} +var file_auth_authentication_service_proto_depIdxs = []int32{ + 0, // 0: authentication.Login.LoginCheck:input_type -> authentication.LoginRequest + 1, // 1: authentication.Login.LoginCheck:output_type -> authentication.LoginReply + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func _Login_LoginCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LoginRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LoginServer).LoginCheck(ctx, in) +func init() { file_auth_authentication_service_proto_init() } +func file_auth_authentication_service_proto_init() { + if File_auth_authentication_service_proto != nil { + return } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/authentication.Login/LoginCheck", + if !protoimpl.UnsafeEnabled { + file_auth_authentication_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoginRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_auth_authentication_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LoginReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LoginServer).LoginCheck(ctx, req.(*LoginRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Login_serviceDesc = grpc.ServiceDesc{ - ServiceName: "authentication.Login", - HandlerType: (*LoginServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "LoginCheck", - Handler: _Login_LoginCheck_Handler, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_auth_authentication_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "authentication_service.proto", -} - -func init() { proto.RegisterFile("authentication_service.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 200 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0x2c, 0x2d, 0xc9, - 0x48, 0xcd, 0x2b, 0xc9, 0x4c, 0x4e, 0x2c, 0xc9, 0xcc, 0xcf, 0x8b, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, - 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x43, 0x95, 0x55, 0x4a, 0xe1, 0xe2, - 0xf1, 0xc9, 0x4f, 0xcf, 0xcc, 0x0b, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0x92, 0xe6, 0xe2, - 0x2c, 0x2d, 0x4e, 0x2d, 0x8a, 0xcf, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, - 0xe2, 0x00, 0x09, 0xf8, 0x25, 0xe6, 0xa6, 0x0a, 0x49, 0x71, 0x71, 0x14, 0x24, 0x16, 0x17, 0x97, - 0xe7, 0x17, 0xa5, 0x48, 0x30, 0x41, 0xe4, 0x60, 0x7c, 0x90, 0xc6, 0xe4, 0x9c, 0xcc, 0xd4, 0xbc, - 0x92, 0xf8, 0xcc, 0x14, 0x09, 0x66, 0x88, 0x24, 0x44, 0xc0, 0x33, 0x45, 0x49, 0x85, 0x8b, 0x0b, - 0x6a, 0x4b, 0x41, 0x4e, 0xa5, 0x90, 0x18, 0x17, 0x5b, 0x51, 0x6a, 0x71, 0x69, 0x4e, 0x09, 0xd8, - 0x02, 0x8e, 0x20, 0x28, 0xcf, 0x28, 0x90, 0x8b, 0x15, 0xac, 0x4a, 0xc8, 0x03, 0xaa, 0xdc, 0x39, - 0x23, 0x35, 0x39, 0x5b, 0x48, 0x46, 0x0f, 0xd5, 0xcd, 0x7a, 0xc8, 0x0e, 0x96, 0x92, 0xc2, 0x21, - 0x5b, 0x90, 0x53, 0xa9, 0xc4, 0x90, 0xc4, 0x06, 0xf6, 0xb5, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, - 0x11, 0x57, 0x52, 0xd2, 0x15, 0x01, 0x00, 0x00, + GoTypes: file_auth_authentication_service_proto_goTypes, + DependencyIndexes: file_auth_authentication_service_proto_depIdxs, + MessageInfos: file_auth_authentication_service_proto_msgTypes, + }.Build() + File_auth_authentication_service_proto = out.File + file_auth_authentication_service_proto_rawDesc = nil + file_auth_authentication_service_proto_goTypes = nil + file_auth_authentication_service_proto_depIdxs = nil } diff --git a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.proto b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.proto index a41e13a09f7d9..f67b67a6c5730 100644 --- a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.proto +++ b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service.proto @@ -25,6 +25,7 @@ syntax = "proto3"; package authentication; +option go_package = ".;authentication"; // The Login service definition. service Login { diff --git a/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service_grpc.pb.go b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service_grpc.pb.go new file mode 100644 index 0000000000000..bbbf200ec68be --- /dev/null +++ b/plugins/inputs/jti_openconfig_telemetry/auth/authentication_service_grpc.pb.go @@ -0,0 +1,101 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package authentication + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// LoginClient is the client API for Login service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type LoginClient interface { + LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error) +} + +type loginClient struct { + cc grpc.ClientConnInterface +} + +func NewLoginClient(cc grpc.ClientConnInterface) LoginClient { + return &loginClient{cc} +} + +func (c *loginClient) LoginCheck(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginReply, error) { + out := new(LoginReply) + err := c.cc.Invoke(ctx, "/authentication.Login/LoginCheck", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// LoginServer is the server API for Login service. +// All implementations must embed UnimplementedLoginServer +// for forward compatibility +type LoginServer interface { + LoginCheck(context.Context, *LoginRequest) (*LoginReply, error) + mustEmbedUnimplementedLoginServer() +} + +// UnimplementedLoginServer must be embedded to have forward compatible implementations. +type UnimplementedLoginServer struct { +} + +func (UnimplementedLoginServer) LoginCheck(context.Context, *LoginRequest) (*LoginReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method LoginCheck not implemented") +} +func (UnimplementedLoginServer) mustEmbedUnimplementedLoginServer() {} + +// UnsafeLoginServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to LoginServer will +// result in compilation errors. +type UnsafeLoginServer interface { + mustEmbedUnimplementedLoginServer() +} + +func RegisterLoginServer(s grpc.ServiceRegistrar, srv LoginServer) { + s.RegisterService(&Login_ServiceDesc, srv) +} + +func _Login_LoginCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LoginRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LoginServer).LoginCheck(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/authentication.Login/LoginCheck", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LoginServer).LoginCheck(ctx, req.(*LoginRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Login_ServiceDesc is the grpc.ServiceDesc for Login service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Login_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "authentication.Login", + HandlerType: (*LoginServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "LoginCheck", + Handler: _Login_LoginCheck_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "auth/authentication_service.proto", +} diff --git a/plugins/inputs/jti_openconfig_telemetry/collection.go b/plugins/inputs/jti_openconfig_telemetry/collection.go index ffd9019f5f317..d1bad8b30c739 100644 --- a/plugins/inputs/jti_openconfig_telemetry/collection.go +++ b/plugins/inputs/jti_openconfig_telemetry/collection.go @@ -17,7 +17,7 @@ func (a CollectionByKeys) Less(i, j int) bool { return a[i].numKeys < a[j].numKe // Checks to see if there is already a group with these tags and returns its index. Returns -1 if unavailable. func (a CollectionByKeys) IsAvailable(tags map[string]string) *DataGroup { - sort.Sort(CollectionByKeys(a)) + sort.Sort(a) // Iterate through all the groups and see if we have group with these tags for _, group := range a { diff --git a/plugins/inputs/jti_openconfig_telemetry/gen.go b/plugins/inputs/jti_openconfig_telemetry/gen.go new file mode 100644 index 0000000000000..0b97e3bea9e55 --- /dev/null +++ b/plugins/inputs/jti_openconfig_telemetry/gen.go @@ -0,0 +1,11 @@ +package jti_openconfig_telemetry + +// To run these commands, make sure that protoc-gen-go and protoc-gen-go-grpc are installed +// > go install google.golang.org/protobuf/cmd/protoc-gen-go +// > go install google.golang.org/grpc/cmd/protoc-gen-go-grpc +// +// Generated files were last generated with: +// - protoc-gen-go: v1.27.1 +// - protoc-gen-go-grpc: v1.1.0 +//go:generate protoc --go_out=auth/ --go-grpc_out=auth/ auth/authentication_service.proto +//go:generate protoc --go_out=oc/ --go-grpc_out=oc/ oc/oc.proto diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go b/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry.go similarity index 73% rename from plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go rename to plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry.go index 0c6fc9e052d43..64bd9880e009d 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go +++ b/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package jti_openconfig_telemetry import ( + _ "embed" "fmt" "net" "regexp" @@ -8,29 +10,34 @@ import ( "sync" "time" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - internaltls "github.com/influxdata/telegraf/plugins/common/tls" - "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/auth" - "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/oc" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/status" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + internaltls "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" + authentication "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/auth" + telemetry "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/oc" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type OpenConfigTelemetry struct { - Servers []string `toml:"servers"` - Sensors []string `toml:"sensors"` - Username string `toml:"username"` - Password string `toml:"password"` - ClientID string `toml:"client_id"` - SampleFrequency internal.Duration `toml:"sample_frequency"` - StrAsTags bool `toml:"str_as_tags"` - RetryDelay internal.Duration `toml:"retry_delay"` - EnableTLS bool `toml:"enable_tls"` + Servers []string `toml:"servers"` + Sensors []string `toml:"sensors"` + Username string `toml:"username"` + Password string `toml:"password"` + ClientID string `toml:"client_id"` + SampleFrequency config.Duration `toml:"sample_frequency"` + StrAsTags bool `toml:"str_as_tags"` + RetryDelay config.Duration `toml:"retry_delay"` + EnableTLS bool `toml:"enable_tls"` internaltls.ClientConfig Log telegraf.Logger @@ -42,67 +49,14 @@ type OpenConfigTelemetry struct { var ( // Regex to match and extract data points from path value in received key - keyPathRegex = regexp.MustCompile("\\/([^\\/]*)\\[([A-Za-z0-9\\-\\/]*\\=[^\\[]*)\\]") - sampleConfig = ` - ## List of device addresses to collect telemetry from - servers = ["localhost:1883"] - - ## Authentication details. Username and password are must if device expects - ## authentication. Client ID must be unique when connecting from multiple instances - ## of telegraf to the same device - username = "user" - password = "pass" - client_id = "telegraf" - - ## Frequency to get data - sample_frequency = "1000ms" - - ## Sensors to subscribe for - ## A identifier for each sensor can be provided in path by separating with space - ## Else sensor path will be used as identifier - ## When identifier is used, we can provide a list of space separated sensors. - ## A single subscription will be created with all these sensors and data will - ## be saved to measurement with this identifier name - sensors = [ - "/interfaces/", - "collection /components/ /lldp", - ] - - ## We allow specifying sensor group level reporting rate. To do this, specify the - ## reporting rate in Duration at the beginning of sensor paths / collection - ## name. For entries without reporting rate, we use configured sample frequency - sensors = [ - "1000ms customReporting /interfaces /lldp", - "2000ms collection /components", - "/interfaces", - ] - - ## Optional TLS Config - # enable_tls = true - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. - ## Failed streams/calls will not be retried if 0 is provided - retry_delay = "1000ms" - - ## To treat all string values as tags, set this to true - str_as_tags = false -` + keyPathRegex = regexp.MustCompile(`/([^/]*)\[([A-Za-z0-9\-/]*=[^\[]*)]`) ) -func (m *OpenConfigTelemetry) SampleConfig() string { +func (*OpenConfigTelemetry) SampleConfig() string { return sampleConfig } -func (m *OpenConfigTelemetry) Description() string { - return "Read JTI OpenConfig Telemetry from listed sensors" -} - -func (m *OpenConfigTelemetry) Gather(acc telegraf.Accumulator) error { +func (m *OpenConfigTelemetry) Gather(_ telegraf.Accumulator) error { return nil } @@ -130,7 +84,7 @@ func spitTagsNPath(xmlpath string) (string, map[string]string) { // we must emit multiple tags for _, kv := range strings.Split(sub[2], " and ") { key := tagKey + strings.TrimSpace(strings.Split(kv, "=")[0]) - tagValue := strings.Replace(strings.Split(kv, "=")[1], "'", "", -1) + tagValue := strings.ReplaceAll(strings.Split(kv, "=")[1], "'", "") tags[key] = tagValue } @@ -169,25 +123,18 @@ func (m *OpenConfigTelemetry) extractData(r *telemetry.OpenConfigData, grpcServe } else { kv[xmlpath] = v.GetStrValue() } - break case *telemetry.KeyValue_DoubleValue: kv[xmlpath] = v.GetDoubleValue() - break case *telemetry.KeyValue_IntValue: kv[xmlpath] = v.GetIntValue() - break case *telemetry.KeyValue_UintValue: kv[xmlpath] = v.GetUintValue() - break case *telemetry.KeyValue_SintValue: kv[xmlpath] = v.GetSintValue() - break case *telemetry.KeyValue_BoolValue: kv[xmlpath] = v.GetBoolValue() - break case *telemetry.KeyValue_BytesValue: kv[xmlpath] = v.GetBytesValue() - break } // Insert other tags from message @@ -226,7 +173,7 @@ func (m *OpenConfigTelemetry) splitSensorConfig() int { m.sensorsConfig = make([]sensorConfig, 0) for _, sensor := range m.Sensors { spathSplit := strings.Fields(sensor) - reportingRate = uint32(m.SampleFrequency.Duration / time.Millisecond) + reportingRate = uint32(time.Duration(m.SampleFrequency) / time.Millisecond) // Extract measurement name and custom reporting rate if specified. Custom // reporting rate will be specified at the beginning of sensor list, @@ -272,16 +219,18 @@ func (m *OpenConfigTelemetry) splitSensorConfig() int { m.sensorsConfig = append(m.sensorsConfig, sensorConfig{ measurementName: measurementName, pathList: pathlist, }) - } return len(m.sensorsConfig) } // Subscribes and collects OpenConfig telemetry data from given server -func (m *OpenConfigTelemetry) collectData(ctx context.Context, - grpcServer string, grpcClientConn *grpc.ClientConn, - acc telegraf.Accumulator) error { +func (m *OpenConfigTelemetry) collectData( + ctx context.Context, + grpcServer string, + grpcClientConn *grpc.ClientConn, + acc telegraf.Accumulator, +) { c := telemetry.NewOpenConfigTelemetryClient(grpcClientConn) for _, sensor := range m.sensorsConfig { m.wg.Add(1) @@ -298,17 +247,15 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context, acc.AddError(fmt.Errorf("could not subscribe to %s: %v", grpcServer, err)) return - } else { - // Retry with delay. If delay is not provided, use default - if m.RetryDelay.Duration > 0 { - m.Log.Debugf("Retrying %s with timeout %v", grpcServer, - m.RetryDelay.Duration) - time.Sleep(m.RetryDelay.Duration) - continue - } else { - return - } } + + // Retry with delay. If delay is not provided, use default + if time.Duration(m.RetryDelay) > 0 { + m.Log.Debugf("Retrying %s with timeout %v", grpcServer, time.Duration(m.RetryDelay)) + time.Sleep(time.Duration(m.RetryDelay)) + continue + } + return } for { r, err := stream.Recv() @@ -345,8 +292,6 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context, } }(ctx, sensor) } - - return nil } func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error { @@ -417,7 +362,7 @@ func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error { func init() { inputs.Add("jti_openconfig_telemetry", func() telegraf.Input { return &OpenConfigTelemetry{ - RetryDelay: internal.Duration{Duration: time.Second}, + RetryDelay: config.Duration(time.Second), StrAsTags: false, } }) diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go b/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry_test.go similarity index 75% rename from plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go rename to plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry_test.go index a3df62e1bb0c0..efc9d7e9955e1 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go +++ b/plugins/inputs/jti_openconfig_telemetry/jti_openconfig_telemetry_test.go @@ -10,8 +10,8 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/oc" + "github.com/influxdata/telegraf/config" + telemetry "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/oc" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -19,7 +19,7 @@ import ( var cfg = &OpenConfigTelemetry{ Log: testutil.Logger{}, Servers: []string{"127.0.0.1:50051"}, - SampleFrequency: internal.Duration{Duration: time.Second * 2}, + SampleFrequency: config.Duration(time.Millisecond * 10), } var data = &telemetry.OpenConfigData{ @@ -27,55 +27,57 @@ var data = &telemetry.OpenConfigData{ Kv: []*telemetry.KeyValue{{Key: "/sensor[tag='tagValue']/intKey", Value: &telemetry.KeyValue_IntValue{IntValue: 10}}}, } -var data_with_prefix = &telemetry.OpenConfigData{ +var dataWithPrefix = &telemetry.OpenConfigData{ Path: "/sensor_with_prefix", Kv: []*telemetry.KeyValue{{Key: "__prefix__", Value: &telemetry.KeyValue_StrValue{StrValue: "/sensor/prefix/"}}, {Key: "intKey", Value: &telemetry.KeyValue_IntValue{IntValue: 10}}}, } -var data_with_multiple_tags = &telemetry.OpenConfigData{ +var dataWithMultipleTags = &telemetry.OpenConfigData{ Path: "/sensor_with_multiple_tags", Kv: []*telemetry.KeyValue{{Key: "__prefix__", Value: &telemetry.KeyValue_StrValue{StrValue: "/sensor/prefix/"}}, {Key: "tagKey[tag='tagValue']/boolKey", Value: &telemetry.KeyValue_BoolValue{BoolValue: false}}, {Key: "intKey", Value: &telemetry.KeyValue_IntValue{IntValue: 10}}}, } -var data_with_string_values = &telemetry.OpenConfigData{ +var dataWithStringValues = &telemetry.OpenConfigData{ Path: "/sensor_with_string_values", Kv: []*telemetry.KeyValue{{Key: "__prefix__", Value: &telemetry.KeyValue_StrValue{StrValue: "/sensor/prefix/"}}, {Key: "strKey[tag='tagValue']/strValue", Value: &telemetry.KeyValue_StrValue{StrValue: "10"}}}, } type openConfigTelemetryServer struct { + telemetry.UnimplementedOpenConfigTelemetryServer } func (s *openConfigTelemetryServer) TelemetrySubscribe(req *telemetry.SubscriptionRequest, stream telemetry.OpenConfigTelemetry_TelemetrySubscribeServer) error { path := req.PathList[0].Path - if path == "/sensor" { - stream.Send(data) - } else if path == "/sensor_with_prefix" { - stream.Send(data_with_prefix) - } else if path == "/sensor_with_multiple_tags" { - stream.Send(data_with_multiple_tags) - } else if path == "/sensor_with_string_values" { - stream.Send(data_with_string_values) + switch path { + case "/sensor": + return stream.Send(data) + case "/sensor_with_prefix": + return stream.Send(dataWithPrefix) + case "/sensor_with_multiple_tags": + return stream.Send(dataWithMultipleTags) + case "/sensor_with_string_values": + return stream.Send(dataWithStringValues) } return nil } -func (s *openConfigTelemetryServer) CancelTelemetrySubscription(ctx context.Context, req *telemetry.CancelSubscriptionRequest) (*telemetry.CancelSubscriptionReply, error) { +func (s *openConfigTelemetryServer) CancelTelemetrySubscription(_ context.Context, _ *telemetry.CancelSubscriptionRequest) (*telemetry.CancelSubscriptionReply, error) { return nil, nil } -func (s *openConfigTelemetryServer) GetTelemetrySubscriptions(ctx context.Context, req *telemetry.GetSubscriptionsRequest) (*telemetry.GetSubscriptionsReply, error) { +func (s *openConfigTelemetryServer) GetTelemetrySubscriptions(_ context.Context, _ *telemetry.GetSubscriptionsRequest) (*telemetry.GetSubscriptionsReply, error) { return nil, nil } -func (s *openConfigTelemetryServer) GetTelemetryOperationalState(ctx context.Context, req *telemetry.GetOperationalStateRequest) (*telemetry.GetOperationalStateReply, error) { +func (s *openConfigTelemetryServer) GetTelemetryOperationalState(_ context.Context, _ *telemetry.GetOperationalStateRequest) (*telemetry.GetOperationalStateReply, error) { return nil, nil } -func (s *openConfigTelemetryServer) GetDataEncodings(ctx context.Context, req *telemetry.DataEncodingRequest) (*telemetry.DataEncodingReply, error) { +func (s *openConfigTelemetryServer) GetDataEncodings(_ context.Context, _ *telemetry.DataEncodingRequest) (*telemetry.DataEncodingReply, error) { return nil, nil } @@ -106,9 +108,7 @@ func TestOpenConfigTelemetryData(t *testing.T) { "_subcomponent_id": uint32(0), } - // Give sometime for gRPC channel to be established - time.Sleep(2 * time.Second) - + require.Eventually(t, func() bool { return acc.HasMeasurement("/sensor") }, 5*time.Second, 10*time.Millisecond) acc.AssertContainsTaggedFields(t, "/sensor", fields, tags) } @@ -132,9 +132,7 @@ func TestOpenConfigTelemetryDataWithPrefix(t *testing.T) { "_subcomponent_id": uint32(0), } - // Give sometime for gRPC channel to be established - time.Sleep(2 * time.Second) - + require.Eventually(t, func() bool { return acc.HasMeasurement("/sensor_with_prefix") }, 5*time.Second, 10*time.Millisecond) acc.AssertContainsTaggedFields(t, "/sensor_with_prefix", fields, tags) } @@ -173,9 +171,7 @@ func TestOpenConfigTelemetryDataWithMultipleTags(t *testing.T) { "_subcomponent_id": uint32(0), } - // Give sometime for gRPC channel to be established - time.Sleep(2 * time.Second) - + require.Eventually(t, func() bool { return acc.HasMeasurement("/sensor_with_multiple_tags") }, 5*time.Second, 10*time.Millisecond) acc.AssertContainsTaggedFields(t, "/sensor_with_multiple_tags", fields1, tags1) acc.AssertContainsTaggedFields(t, "/sensor_with_multiple_tags", fields2, tags2) } @@ -201,9 +197,7 @@ func TestOpenConfigTelemetryDataWithStringValues(t *testing.T) { "_subcomponent_id": uint32(0), } - // Give sometime for gRPC channel to be established - time.Sleep(2 * time.Second) - + require.Eventually(t, func() bool { return acc.HasMeasurement("/sensor_with_string_values") }, 5*time.Second, 10*time.Millisecond) acc.AssertContainsTaggedFields(t, "/sensor_with_string_values", fields, tags) } @@ -219,6 +213,8 @@ func TestMain(m *testing.M) { grpcServer := grpc.NewServer(opts...) telemetry.RegisterOpenConfigTelemetryServer(grpcServer, newServer()) go func() { + // Ignore the returned error as the tests will fail anyway + //nolint:errcheck,revive grpcServer.Serve(lis) }() defer grpcServer.Stop() diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go b/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go index bc7c780458f99..19d16dccc501a 100644 --- a/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go +++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go @@ -1,54 +1,24 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: oc.proto - -/* -Package telemetry is a generated protocol buffer package. - -It is generated from these files: - oc.proto - -It has these top-level messages: - SubscriptionRequest - SubscriptionInput - Collector - Path - SubscriptionAdditionalConfig - SubscriptionReply - SubscriptionResponse - OpenConfigData - KeyValue - Delete - Eom - CancelSubscriptionRequest - CancelSubscriptionReply - GetSubscriptionsRequest - GetSubscriptionsReply - GetOperationalStateRequest - GetOperationalStateReply - DataEncodingRequest - DataEncodingReply -*/ -package telemetry +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.17.3 +// source: oc/oc.proto -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +package telemetry import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // Result of the operation type ReturnCode int32 @@ -59,21 +29,46 @@ const ( ReturnCode_UNKNOWN_ERROR ReturnCode = 2 ) -var ReturnCode_name = map[int32]string{ - 0: "SUCCESS", - 1: "NO_SUBSCRIPTION_ENTRY", - 2: "UNKNOWN_ERROR", -} -var ReturnCode_value = map[string]int32{ - "SUCCESS": 0, - "NO_SUBSCRIPTION_ENTRY": 1, - "UNKNOWN_ERROR": 2, +// Enum value maps for ReturnCode. +var ( + ReturnCode_name = map[int32]string{ + 0: "SUCCESS", + 1: "NO_SUBSCRIPTION_ENTRY", + 2: "UNKNOWN_ERROR", + } + ReturnCode_value = map[string]int32{ + "SUCCESS": 0, + "NO_SUBSCRIPTION_ENTRY": 1, + "UNKNOWN_ERROR": 2, + } +) + +func (x ReturnCode) Enum() *ReturnCode { + p := new(ReturnCode) + *p = x + return p } func (x ReturnCode) String() string { - return proto.EnumName(ReturnCode_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ReturnCode) Descriptor() protoreflect.EnumDescriptor { + return file_oc_oc_proto_enumTypes[0].Descriptor() +} + +func (ReturnCode) Type() protoreflect.EnumType { + return &file_oc_oc_proto_enumTypes[0] +} + +func (x ReturnCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ReturnCode.Descriptor instead. +func (ReturnCode) EnumDescriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{0} } -func (ReturnCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } // Verbosity Level type VerbosityLevel int32 @@ -84,21 +79,46 @@ const ( VerbosityLevel_BRIEF VerbosityLevel = 2 ) -var VerbosityLevel_name = map[int32]string{ - 0: "DETAIL", - 1: "TERSE", - 2: "BRIEF", -} -var VerbosityLevel_value = map[string]int32{ - "DETAIL": 0, - "TERSE": 1, - "BRIEF": 2, +// Enum value maps for VerbosityLevel. +var ( + VerbosityLevel_name = map[int32]string{ + 0: "DETAIL", + 1: "TERSE", + 2: "BRIEF", + } + VerbosityLevel_value = map[string]int32{ + "DETAIL": 0, + "TERSE": 1, + "BRIEF": 2, + } +) + +func (x VerbosityLevel) Enum() *VerbosityLevel { + p := new(VerbosityLevel) + *p = x + return p } func (x VerbosityLevel) String() string { - return proto.EnumName(VerbosityLevel_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (VerbosityLevel) Descriptor() protoreflect.EnumDescriptor { + return file_oc_oc_proto_enumTypes[1].Descriptor() +} + +func (VerbosityLevel) Type() protoreflect.EnumType { + return &file_oc_oc_proto_enumTypes[1] +} + +func (x VerbosityLevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use VerbosityLevel.Descriptor instead. +func (VerbosityLevel) EnumDescriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{1} } -func (VerbosityLevel) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } // Encoding Type Supported type EncodingType int32 @@ -110,126 +130,248 @@ const ( EncodingType_PROTO3 EncodingType = 3 ) -var EncodingType_name = map[int32]string{ - 0: "UNDEFINED", - 1: "XML", - 2: "JSON_IETF", - 3: "PROTO3", -} -var EncodingType_value = map[string]int32{ - "UNDEFINED": 0, - "XML": 1, - "JSON_IETF": 2, - "PROTO3": 3, +// Enum value maps for EncodingType. +var ( + EncodingType_name = map[int32]string{ + 0: "UNDEFINED", + 1: "XML", + 2: "JSON_IETF", + 3: "PROTO3", + } + EncodingType_value = map[string]int32{ + "UNDEFINED": 0, + "XML": 1, + "JSON_IETF": 2, + "PROTO3": 3, + } +) + +func (x EncodingType) Enum() *EncodingType { + p := new(EncodingType) + *p = x + return p } func (x EncodingType) String() string { - return proto.EnumName(EncodingType_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (EncodingType) Descriptor() protoreflect.EnumDescriptor { + return file_oc_oc_proto_enumTypes[2].Descriptor() +} + +func (EncodingType) Type() protoreflect.EnumType { + return &file_oc_oc_proto_enumTypes[2] +} + +func (x EncodingType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use EncodingType.Descriptor instead. +func (EncodingType) EnumDescriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{2} } -func (EncodingType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } // Message sent for a telemetry subscription request type SubscriptionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Data associated with a telemetry subscription - Input *SubscriptionInput `protobuf:"bytes,1,opt,name=input" json:"input,omitempty"` + Input *SubscriptionInput `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"` // List of data models paths and filters // which are used in a telemetry operation. - PathList []*Path `protobuf:"bytes,2,rep,name=path_list,json=pathList" json:"path_list,omitempty"` + PathList []*Path `protobuf:"bytes,2,rep,name=path_list,json=pathList,proto3" json:"path_list,omitempty"` // The below configuration is not defined in Openconfig RPC. // It is a proposed extension to configure additional // subscription request features. - AdditionalConfig *SubscriptionAdditionalConfig `protobuf:"bytes,3,opt,name=additional_config,json=additionalConfig" json:"additional_config,omitempty"` + AdditionalConfig *SubscriptionAdditionalConfig `protobuf:"bytes,3,opt,name=additional_config,json=additionalConfig,proto3" json:"additional_config,omitempty"` } -func (m *SubscriptionRequest) Reset() { *m = SubscriptionRequest{} } -func (m *SubscriptionRequest) String() string { return proto.CompactTextString(m) } -func (*SubscriptionRequest) ProtoMessage() {} -func (*SubscriptionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (x *SubscriptionRequest) Reset() { + *x = SubscriptionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *SubscriptionRequest) GetInput() *SubscriptionInput { - if m != nil { - return m.Input +func (x *SubscriptionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriptionRequest) ProtoMessage() {} + +func (x *SubscriptionRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionRequest.ProtoReflect.Descriptor instead. +func (*SubscriptionRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{0} +} + +func (x *SubscriptionRequest) GetInput() *SubscriptionInput { + if x != nil { + return x.Input } return nil } -func (m *SubscriptionRequest) GetPathList() []*Path { - if m != nil { - return m.PathList +func (x *SubscriptionRequest) GetPathList() []*Path { + if x != nil { + return x.PathList } return nil } -func (m *SubscriptionRequest) GetAdditionalConfig() *SubscriptionAdditionalConfig { - if m != nil { - return m.AdditionalConfig +func (x *SubscriptionRequest) GetAdditionalConfig() *SubscriptionAdditionalConfig { + if x != nil { + return x.AdditionalConfig } return nil } // Data associated with a telemetry subscription type SubscriptionInput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // List of optional collector endpoints to send data for // this subscription. // If no collector destinations are specified, the collector // destination is assumed to be the requester on the rpc channel. - CollectorList []*Collector `protobuf:"bytes,1,rep,name=collector_list,json=collectorList" json:"collector_list,omitempty"` + CollectorList []*Collector `protobuf:"bytes,1,rep,name=collector_list,json=collectorList,proto3" json:"collector_list,omitempty"` +} + +func (x *SubscriptionInput) Reset() { + *x = SubscriptionInput{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *SubscriptionInput) Reset() { *m = SubscriptionInput{} } -func (m *SubscriptionInput) String() string { return proto.CompactTextString(m) } -func (*SubscriptionInput) ProtoMessage() {} -func (*SubscriptionInput) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (x *SubscriptionInput) String() string { + return protoimpl.X.MessageStringOf(x) +} -func (m *SubscriptionInput) GetCollectorList() []*Collector { - if m != nil { - return m.CollectorList +func (*SubscriptionInput) ProtoMessage() {} + +func (x *SubscriptionInput) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionInput.ProtoReflect.Descriptor instead. +func (*SubscriptionInput) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{1} +} + +func (x *SubscriptionInput) GetCollectorList() []*Collector { + if x != nil { + return x.CollectorList } return nil } // Collector endpoints to send data specified as an ip+port combination. type Collector struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // IP address of collector endpoint - Address string `protobuf:"bytes,1,opt,name=address" json:"address,omitempty"` + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` // Transport protocol port number for the collector destination. - Port uint32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` + Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` } -func (m *Collector) Reset() { *m = Collector{} } -func (m *Collector) String() string { return proto.CompactTextString(m) } -func (*Collector) ProtoMessage() {} -func (*Collector) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (x *Collector) Reset() { + *x = Collector{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *Collector) GetAddress() string { - if m != nil { - return m.Address +func (x *Collector) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Collector) ProtoMessage() {} + +func (x *Collector) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Collector.ProtoReflect.Descriptor instead. +func (*Collector) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{2} +} + +func (x *Collector) GetAddress() string { + if x != nil { + return x.Address } return "" } -func (m *Collector) GetPort() uint32 { - if m != nil { - return m.Port +func (x *Collector) GetPort() uint32 { + if x != nil { + return x.Port } return 0 } // Data model path type Path struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Data model path of interest // Path specification for elements of OpenConfig data models - Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // Regular expression to be used in filtering state leaves - Filter string `protobuf:"bytes,2,opt,name=filter" json:"filter,omitempty"` + Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` // If this is set to true, the target device will only send // updates to the collector upon a change in data value - SuppressUnchanged bool `protobuf:"varint,3,opt,name=suppress_unchanged,json=suppressUnchanged" json:"suppress_unchanged,omitempty"` + SuppressUnchanged bool `protobuf:"varint,3,opt,name=suppress_unchanged,json=suppressUnchanged,proto3" json:"suppress_unchanged,omitempty"` // Maximum time in ms the target device may go without sending // a message to the collector. If this time expires with // suppress-unchanged set, the target device must send an update // message regardless if the data values have changed. - MaxSilentInterval uint32 `protobuf:"varint,4,opt,name=max_silent_interval,json=maxSilentInterval" json:"max_silent_interval,omitempty"` + MaxSilentInterval uint32 `protobuf:"varint,4,opt,name=max_silent_interval,json=maxSilentInterval,proto3" json:"max_silent_interval,omitempty"` // Time in ms between collection and transmission of the // specified data to the collector platform. The target device // will sample the corresponding data (e.g,. a counter) and @@ -237,143 +379,263 @@ type Path struct { // // If sample-frequency is set to 0, then the network device // must emit an update upon every datum change. - SampleFrequency uint32 `protobuf:"varint,5,opt,name=sample_frequency,json=sampleFrequency" json:"sample_frequency,omitempty"` + SampleFrequency uint32 `protobuf:"varint,5,opt,name=sample_frequency,json=sampleFrequency,proto3" json:"sample_frequency,omitempty"` // EOM needed for each walk cycle of this path? // For periodic sensor, applicable for each complete reap // For event sensor, applicable when initial dump is over // (same as EOS) // This feature is not implemented currently. - NeedEom bool `protobuf:"varint,6,opt,name=need_eom,json=needEom" json:"need_eom,omitempty"` + NeedEom bool `protobuf:"varint,6,opt,name=need_eom,json=needEom,proto3" json:"need_eom,omitempty"` } -func (m *Path) Reset() { *m = Path{} } -func (m *Path) String() string { return proto.CompactTextString(m) } -func (*Path) ProtoMessage() {} -func (*Path) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (x *Path) Reset() { + *x = Path{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *Path) GetPath() string { - if m != nil { - return m.Path +func (x *Path) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Path) ProtoMessage() {} + +func (x *Path) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Path.ProtoReflect.Descriptor instead. +func (*Path) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{3} +} + +func (x *Path) GetPath() string { + if x != nil { + return x.Path } return "" } -func (m *Path) GetFilter() string { - if m != nil { - return m.Filter +func (x *Path) GetFilter() string { + if x != nil { + return x.Filter } return "" } -func (m *Path) GetSuppressUnchanged() bool { - if m != nil { - return m.SuppressUnchanged +func (x *Path) GetSuppressUnchanged() bool { + if x != nil { + return x.SuppressUnchanged } return false } -func (m *Path) GetMaxSilentInterval() uint32 { - if m != nil { - return m.MaxSilentInterval +func (x *Path) GetMaxSilentInterval() uint32 { + if x != nil { + return x.MaxSilentInterval } return 0 } -func (m *Path) GetSampleFrequency() uint32 { - if m != nil { - return m.SampleFrequency +func (x *Path) GetSampleFrequency() uint32 { + if x != nil { + return x.SampleFrequency } return 0 } -func (m *Path) GetNeedEom() bool { - if m != nil { - return m.NeedEom +func (x *Path) GetNeedEom() bool { + if x != nil { + return x.NeedEom } return false } // Configure subscription request additional features. type SubscriptionAdditionalConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // limit the number of records sent in the stream - LimitRecords int32 `protobuf:"varint,1,opt,name=limit_records,json=limitRecords" json:"limit_records,omitempty"` + LimitRecords int32 `protobuf:"varint,1,opt,name=limit_records,json=limitRecords,proto3" json:"limit_records,omitempty"` // limit the time the stream remains open - LimitTimeSeconds int32 `protobuf:"varint,2,opt,name=limit_time_seconds,json=limitTimeSeconds" json:"limit_time_seconds,omitempty"` + LimitTimeSeconds int32 `protobuf:"varint,2,opt,name=limit_time_seconds,json=limitTimeSeconds,proto3" json:"limit_time_seconds,omitempty"` // EOS needed for this subscription? - NeedEos bool `protobuf:"varint,3,opt,name=need_eos,json=needEos" json:"need_eos,omitempty"` + NeedEos bool `protobuf:"varint,3,opt,name=need_eos,json=needEos,proto3" json:"need_eos,omitempty"` +} + +func (x *SubscriptionAdditionalConfig) Reset() { + *x = SubscriptionAdditionalConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubscriptionAdditionalConfig) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SubscriptionAdditionalConfig) Reset() { *m = SubscriptionAdditionalConfig{} } -func (m *SubscriptionAdditionalConfig) String() string { return proto.CompactTextString(m) } -func (*SubscriptionAdditionalConfig) ProtoMessage() {} -func (*SubscriptionAdditionalConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (*SubscriptionAdditionalConfig) ProtoMessage() {} -func (m *SubscriptionAdditionalConfig) GetLimitRecords() int32 { - if m != nil { - return m.LimitRecords +func (x *SubscriptionAdditionalConfig) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionAdditionalConfig.ProtoReflect.Descriptor instead. +func (*SubscriptionAdditionalConfig) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{4} +} + +func (x *SubscriptionAdditionalConfig) GetLimitRecords() int32 { + if x != nil { + return x.LimitRecords } return 0 } -func (m *SubscriptionAdditionalConfig) GetLimitTimeSeconds() int32 { - if m != nil { - return m.LimitTimeSeconds +func (x *SubscriptionAdditionalConfig) GetLimitTimeSeconds() int32 { + if x != nil { + return x.LimitTimeSeconds } return 0 } -func (m *SubscriptionAdditionalConfig) GetNeedEos() bool { - if m != nil { - return m.NeedEos +func (x *SubscriptionAdditionalConfig) GetNeedEos() bool { + if x != nil { + return x.NeedEos } return false } // 1. Reply data message sent out using out-of-band channel. type SubscriptionReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Response message to a telemetry subscription creation or // get request. - Response *SubscriptionResponse `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"` + Response *SubscriptionResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` // List of data models paths and filters // which are used in a telemetry operation. - PathList []*Path `protobuf:"bytes,2,rep,name=path_list,json=pathList" json:"path_list,omitempty"` + PathList []*Path `protobuf:"bytes,2,rep,name=path_list,json=pathList,proto3" json:"path_list,omitempty"` } -func (m *SubscriptionReply) Reset() { *m = SubscriptionReply{} } -func (m *SubscriptionReply) String() string { return proto.CompactTextString(m) } -func (*SubscriptionReply) ProtoMessage() {} -func (*SubscriptionReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (x *SubscriptionReply) Reset() { + *x = SubscriptionReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *SubscriptionReply) GetResponse() *SubscriptionResponse { - if m != nil { - return m.Response +func (x *SubscriptionReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriptionReply) ProtoMessage() {} + +func (x *SubscriptionReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionReply.ProtoReflect.Descriptor instead. +func (*SubscriptionReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{5} +} + +func (x *SubscriptionReply) GetResponse() *SubscriptionResponse { + if x != nil { + return x.Response } return nil } -func (m *SubscriptionReply) GetPathList() []*Path { - if m != nil { - return m.PathList +func (x *SubscriptionReply) GetPathList() []*Path { + if x != nil { + return x.PathList } return nil } // Response message to a telemetry subscription creation or get request. type SubscriptionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Unique id for the subscription on the device. This is // generated by the device and returned in a subscription // request or when listing existing subscriptions - SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId" json:"subscription_id,omitempty"` + SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` } -func (m *SubscriptionResponse) Reset() { *m = SubscriptionResponse{} } -func (m *SubscriptionResponse) String() string { return proto.CompactTextString(m) } -func (*SubscriptionResponse) ProtoMessage() {} -func (*SubscriptionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (x *SubscriptionResponse) Reset() { + *x = SubscriptionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *SubscriptionResponse) GetSubscriptionId() uint32 { - if m != nil { - return m.SubscriptionId +func (x *SubscriptionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubscriptionResponse) ProtoMessage() {} + +func (x *SubscriptionResponse) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubscriptionResponse.ProtoReflect.Descriptor instead. +func (*SubscriptionResponse) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{6} +} + +func (x *SubscriptionResponse) GetSubscriptionId() uint32 { + if x != nil { + return x.SubscriptionId } return 0 } @@ -381,112 +643,147 @@ func (m *SubscriptionResponse) GetSubscriptionId() uint32 { // 2. Telemetry data send back on the same connection as the // subscription request. type OpenConfigData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // router name:export IP address - SystemId string `protobuf:"bytes,1,opt,name=system_id,json=systemId" json:"system_id,omitempty"` + SystemId string `protobuf:"bytes,1,opt,name=system_id,json=systemId,proto3" json:"system_id,omitempty"` // line card / RE (slot number) - ComponentId uint32 `protobuf:"varint,2,opt,name=component_id,json=componentId" json:"component_id,omitempty"` + ComponentId uint32 `protobuf:"varint,2,opt,name=component_id,json=componentId,proto3" json:"component_id,omitempty"` // PFE (if applicable) - SubComponentId uint32 `protobuf:"varint,3,opt,name=sub_component_id,json=subComponentId" json:"sub_component_id,omitempty"` + SubComponentId uint32 `protobuf:"varint,3,opt,name=sub_component_id,json=subComponentId,proto3" json:"sub_component_id,omitempty"` // Path specification for elements of OpenConfig data models - Path string `protobuf:"bytes,4,opt,name=path" json:"path,omitempty"` + Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"` // Sequence number, monotonically increasing for each // system_id, component_id, sub_component_id + path. - SequenceNumber uint64 `protobuf:"varint,5,opt,name=sequence_number,json=sequenceNumber" json:"sequence_number,omitempty"` + SequenceNumber uint64 `protobuf:"varint,5,opt,name=sequence_number,json=sequenceNumber,proto3" json:"sequence_number,omitempty"` // timestamp (milliseconds since epoch) - Timestamp uint64 `protobuf:"varint,6,opt,name=timestamp" json:"timestamp,omitempty"` + Timestamp uint64 `protobuf:"varint,6,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // List of key-value pairs - Kv []*KeyValue `protobuf:"bytes,7,rep,name=kv" json:"kv,omitempty"` + Kv []*KeyValue `protobuf:"bytes,7,rep,name=kv,proto3" json:"kv,omitempty"` // For delete. If filled, it indicates delete - Delete []*Delete `protobuf:"bytes,8,rep,name=delete" json:"delete,omitempty"` + Delete []*Delete `protobuf:"bytes,8,rep,name=delete,proto3" json:"delete,omitempty"` // If filled, it indicates end of marker for the // respective path in the list. - Eom []*Eom `protobuf:"bytes,9,rep,name=eom" json:"eom,omitempty"` + Eom []*Eom `protobuf:"bytes,9,rep,name=eom,proto3" json:"eom,omitempty"` // If filled, it indicates end of sync for complete subscription - SyncResponse bool `protobuf:"varint,10,opt,name=sync_response,json=syncResponse" json:"sync_response,omitempty"` + SyncResponse bool `protobuf:"varint,10,opt,name=sync_response,json=syncResponse,proto3" json:"sync_response,omitempty"` } -func (m *OpenConfigData) Reset() { *m = OpenConfigData{} } -func (m *OpenConfigData) String() string { return proto.CompactTextString(m) } -func (*OpenConfigData) ProtoMessage() {} -func (*OpenConfigData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (x *OpenConfigData) Reset() { + *x = OpenConfigData{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *OpenConfigData) GetSystemId() string { - if m != nil { - return m.SystemId +func (x *OpenConfigData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OpenConfigData) ProtoMessage() {} + +func (x *OpenConfigData) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OpenConfigData.ProtoReflect.Descriptor instead. +func (*OpenConfigData) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{7} +} + +func (x *OpenConfigData) GetSystemId() string { + if x != nil { + return x.SystemId } return "" } -func (m *OpenConfigData) GetComponentId() uint32 { - if m != nil { - return m.ComponentId +func (x *OpenConfigData) GetComponentId() uint32 { + if x != nil { + return x.ComponentId } return 0 } -func (m *OpenConfigData) GetSubComponentId() uint32 { - if m != nil { - return m.SubComponentId +func (x *OpenConfigData) GetSubComponentId() uint32 { + if x != nil { + return x.SubComponentId } return 0 } -func (m *OpenConfigData) GetPath() string { - if m != nil { - return m.Path +func (x *OpenConfigData) GetPath() string { + if x != nil { + return x.Path } return "" } -func (m *OpenConfigData) GetSequenceNumber() uint64 { - if m != nil { - return m.SequenceNumber +func (x *OpenConfigData) GetSequenceNumber() uint64 { + if x != nil { + return x.SequenceNumber } return 0 } -func (m *OpenConfigData) GetTimestamp() uint64 { - if m != nil { - return m.Timestamp +func (x *OpenConfigData) GetTimestamp() uint64 { + if x != nil { + return x.Timestamp } return 0 } -func (m *OpenConfigData) GetKv() []*KeyValue { - if m != nil { - return m.Kv +func (x *OpenConfigData) GetKv() []*KeyValue { + if x != nil { + return x.Kv } return nil } -func (m *OpenConfigData) GetDelete() []*Delete { - if m != nil { - return m.Delete +func (x *OpenConfigData) GetDelete() []*Delete { + if x != nil { + return x.Delete } return nil } -func (m *OpenConfigData) GetEom() []*Eom { - if m != nil { - return m.Eom +func (x *OpenConfigData) GetEom() []*Eom { + if x != nil { + return x.Eom } return nil } -func (m *OpenConfigData) GetSyncResponse() bool { - if m != nil { - return m.SyncResponse +func (x *OpenConfigData) GetSyncResponse() bool { + if x != nil { + return x.SyncResponse } return false } // Simple Key-value, where value could be one of scalar types type KeyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Key - Key string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // One of possible values // - // Types that are valid to be assigned to Value: + // Types that are assignable to Value: // *KeyValue_DoubleValue // *KeyValue_IntValue // *KeyValue_UintValue @@ -497,44 +794,44 @@ type KeyValue struct { Value isKeyValue_Value `protobuf_oneof:"value"` } -func (m *KeyValue) Reset() { *m = KeyValue{} } -func (m *KeyValue) String() string { return proto.CompactTextString(m) } -func (*KeyValue) ProtoMessage() {} -func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -type isKeyValue_Value interface { - isKeyValue_Value() +func (x *KeyValue) Reset() { + *x = KeyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -type KeyValue_DoubleValue struct { - DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,oneof"` +func (x *KeyValue) String() string { + return protoimpl.X.MessageStringOf(x) } -type KeyValue_IntValue struct { - IntValue int64 `protobuf:"varint,6,opt,name=int_value,json=intValue,oneof"` -} -type KeyValue_UintValue struct { - UintValue uint64 `protobuf:"varint,7,opt,name=uint_value,json=uintValue,oneof"` -} -type KeyValue_SintValue struct { - SintValue int64 `protobuf:"zigzag64,8,opt,name=sint_value,json=sintValue,oneof"` -} -type KeyValue_BoolValue struct { - BoolValue bool `protobuf:"varint,9,opt,name=bool_value,json=boolValue,oneof"` -} -type KeyValue_StrValue struct { - StrValue string `protobuf:"bytes,10,opt,name=str_value,json=strValue,oneof"` + +func (*KeyValue) ProtoMessage() {} + +func (x *KeyValue) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type KeyValue_BytesValue struct { - BytesValue []byte `protobuf:"bytes,11,opt,name=bytes_value,json=bytesValue,proto3,oneof"` + +// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead. +func (*KeyValue) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{8} } -func (*KeyValue_DoubleValue) isKeyValue_Value() {} -func (*KeyValue_IntValue) isKeyValue_Value() {} -func (*KeyValue_UintValue) isKeyValue_Value() {} -func (*KeyValue_SintValue) isKeyValue_Value() {} -func (*KeyValue_BoolValue) isKeyValue_Value() {} -func (*KeyValue_StrValue) isKeyValue_Value() {} -func (*KeyValue_BytesValue) isKeyValue_Value() {} +func (x *KeyValue) GetKey() string { + if x != nil { + return x.Key + } + return "" +} func (m *KeyValue) GetValue() isKeyValue_Value { if m != nil { @@ -543,323 +840,412 @@ func (m *KeyValue) GetValue() isKeyValue_Value { return nil } -func (m *KeyValue) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *KeyValue) GetDoubleValue() float64 { - if x, ok := m.GetValue().(*KeyValue_DoubleValue); ok { +func (x *KeyValue) GetDoubleValue() float64 { + if x, ok := x.GetValue().(*KeyValue_DoubleValue); ok { return x.DoubleValue } return 0 } -func (m *KeyValue) GetIntValue() int64 { - if x, ok := m.GetValue().(*KeyValue_IntValue); ok { +func (x *KeyValue) GetIntValue() int64 { + if x, ok := x.GetValue().(*KeyValue_IntValue); ok { return x.IntValue } return 0 } -func (m *KeyValue) GetUintValue() uint64 { - if x, ok := m.GetValue().(*KeyValue_UintValue); ok { +func (x *KeyValue) GetUintValue() uint64 { + if x, ok := x.GetValue().(*KeyValue_UintValue); ok { return x.UintValue } return 0 } -func (m *KeyValue) GetSintValue() int64 { - if x, ok := m.GetValue().(*KeyValue_SintValue); ok { +func (x *KeyValue) GetSintValue() int64 { + if x, ok := x.GetValue().(*KeyValue_SintValue); ok { return x.SintValue } return 0 } -func (m *KeyValue) GetBoolValue() bool { - if x, ok := m.GetValue().(*KeyValue_BoolValue); ok { +func (x *KeyValue) GetBoolValue() bool { + if x, ok := x.GetValue().(*KeyValue_BoolValue); ok { return x.BoolValue } return false } -func (m *KeyValue) GetStrValue() string { - if x, ok := m.GetValue().(*KeyValue_StrValue); ok { +func (x *KeyValue) GetStrValue() string { + if x, ok := x.GetValue().(*KeyValue_StrValue); ok { return x.StrValue } return "" } -func (m *KeyValue) GetBytesValue() []byte { - if x, ok := m.GetValue().(*KeyValue_BytesValue); ok { +func (x *KeyValue) GetBytesValue() []byte { + if x, ok := x.GetValue().(*KeyValue_BytesValue); ok { return x.BytesValue } return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*KeyValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _KeyValue_OneofMarshaler, _KeyValue_OneofUnmarshaler, _KeyValue_OneofSizer, []interface{}{ - (*KeyValue_DoubleValue)(nil), - (*KeyValue_IntValue)(nil), - (*KeyValue_UintValue)(nil), - (*KeyValue_SintValue)(nil), - (*KeyValue_BoolValue)(nil), - (*KeyValue_StrValue)(nil), - (*KeyValue_BytesValue)(nil), - } +type isKeyValue_Value interface { + isKeyValue_Value() } -func _KeyValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*KeyValue) - // value - switch x := m.Value.(type) { - case *KeyValue_DoubleValue: - b.EncodeVarint(5<<3 | proto.WireFixed64) - b.EncodeFixed64(math.Float64bits(x.DoubleValue)) - case *KeyValue_IntValue: - b.EncodeVarint(6<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.IntValue)) - case *KeyValue_UintValue: - b.EncodeVarint(7<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.UintValue)) - case *KeyValue_SintValue: - b.EncodeVarint(8<<3 | proto.WireVarint) - b.EncodeZigzag64(uint64(x.SintValue)) - case *KeyValue_BoolValue: - t := uint64(0) - if x.BoolValue { - t = 1 - } - b.EncodeVarint(9<<3 | proto.WireVarint) - b.EncodeVarint(t) - case *KeyValue_StrValue: - b.EncodeVarint(10<<3 | proto.WireBytes) - b.EncodeStringBytes(x.StrValue) - case *KeyValue_BytesValue: - b.EncodeVarint(11<<3 | proto.WireBytes) - b.EncodeRawBytes(x.BytesValue) - case nil: - default: - return fmt.Errorf("KeyValue.Value has unexpected type %T", x) - } - return nil +type KeyValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` } -func _KeyValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*KeyValue) - switch tag { - case 5: // value.double_value - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Value = &KeyValue_DoubleValue{math.Float64frombits(x)} - return true, err - case 6: // value.int_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Value = &KeyValue_IntValue{int64(x)} - return true, err - case 7: // value.uint_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Value = &KeyValue_UintValue{x} - return true, err - case 8: // value.sint_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeZigzag64() - m.Value = &KeyValue_SintValue{int64(x)} - return true, err - case 9: // value.bool_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Value = &KeyValue_BoolValue{x != 0} - return true, err - case 10: // value.str_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Value = &KeyValue_StrValue{x} - return true, err - case 11: // value.bytes_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Value = &KeyValue_BytesValue{x} - return true, err - default: - return false, nil - } -} - -func _KeyValue_OneofSizer(msg proto.Message) (n int) { - m := msg.(*KeyValue) - // value - switch x := m.Value.(type) { - case *KeyValue_DoubleValue: - n += proto.SizeVarint(5<<3 | proto.WireFixed64) - n += 8 - case *KeyValue_IntValue: - n += proto.SizeVarint(6<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.IntValue)) - case *KeyValue_UintValue: - n += proto.SizeVarint(7<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.UintValue)) - case *KeyValue_SintValue: - n += proto.SizeVarint(8<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(uint64(x.SintValue<<1) ^ uint64((int64(x.SintValue) >> 63)))) - case *KeyValue_BoolValue: - n += proto.SizeVarint(9<<3 | proto.WireVarint) - n += 1 - case *KeyValue_StrValue: - n += proto.SizeVarint(10<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.StrValue))) - n += len(x.StrValue) - case *KeyValue_BytesValue: - n += proto.SizeVarint(11<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.BytesValue))) - n += len(x.BytesValue) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n +type KeyValue_IntValue struct { + IntValue int64 `protobuf:"varint,6,opt,name=int_value,json=intValue,proto3,oneof"` } +type KeyValue_UintValue struct { + UintValue uint64 `protobuf:"varint,7,opt,name=uint_value,json=uintValue,proto3,oneof"` +} + +type KeyValue_SintValue struct { + SintValue int64 `protobuf:"zigzag64,8,opt,name=sint_value,json=sintValue,proto3,oneof"` +} + +type KeyValue_BoolValue struct { + BoolValue bool `protobuf:"varint,9,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type KeyValue_StrValue struct { + StrValue string `protobuf:"bytes,10,opt,name=str_value,json=strValue,proto3,oneof"` +} + +type KeyValue_BytesValue struct { + BytesValue []byte `protobuf:"bytes,11,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +func (*KeyValue_DoubleValue) isKeyValue_Value() {} + +func (*KeyValue_IntValue) isKeyValue_Value() {} + +func (*KeyValue_UintValue) isKeyValue_Value() {} + +func (*KeyValue_SintValue) isKeyValue_Value() {} + +func (*KeyValue_BoolValue) isKeyValue_Value() {} + +func (*KeyValue_StrValue) isKeyValue_Value() {} + +func (*KeyValue_BytesValue) isKeyValue_Value() {} + // Message indicating delete for a particular path type Delete struct { - Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` +} + +func (x *Delete) Reset() { + *x = Delete{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Delete) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Delete) Reset() { *m = Delete{} } -func (m *Delete) String() string { return proto.CompactTextString(m) } -func (*Delete) ProtoMessage() {} -func (*Delete) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (*Delete) ProtoMessage() {} -func (m *Delete) GetPath() string { - if m != nil { - return m.Path +func (x *Delete) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Delete.ProtoReflect.Descriptor instead. +func (*Delete) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{9} +} + +func (x *Delete) GetPath() string { + if x != nil { + return x.Path } return "" } // Message indicating EOM for a particular path type Eom struct { - Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` } -func (m *Eom) Reset() { *m = Eom{} } -func (m *Eom) String() string { return proto.CompactTextString(m) } -func (*Eom) ProtoMessage() {} -func (*Eom) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (x *Eom) Reset() { + *x = Eom{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *Eom) GetPath() string { - if m != nil { - return m.Path +func (x *Eom) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Eom) ProtoMessage() {} + +func (x *Eom) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Eom.ProtoReflect.Descriptor instead. +func (*Eom) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{10} +} + +func (x *Eom) GetPath() string { + if x != nil { + return x.Path } return "" } // Message sent for a telemetry subscription cancellation request type CancelSubscriptionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Subscription identifier as returned by the device when // subscription was requested - SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId" json:"subscription_id,omitempty"` + SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` } -func (m *CancelSubscriptionRequest) Reset() { *m = CancelSubscriptionRequest{} } -func (m *CancelSubscriptionRequest) String() string { return proto.CompactTextString(m) } -func (*CancelSubscriptionRequest) ProtoMessage() {} -func (*CancelSubscriptionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (x *CancelSubscriptionRequest) Reset() { + *x = CancelSubscriptionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *CancelSubscriptionRequest) GetSubscriptionId() uint32 { - if m != nil { - return m.SubscriptionId +func (x *CancelSubscriptionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelSubscriptionRequest) ProtoMessage() {} + +func (x *CancelSubscriptionRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelSubscriptionRequest.ProtoReflect.Descriptor instead. +func (*CancelSubscriptionRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{11} +} + +func (x *CancelSubscriptionRequest) GetSubscriptionId() uint32 { + if x != nil { + return x.SubscriptionId } return 0 } // Reply to telemetry subscription cancellation request type CancelSubscriptionReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Return code - Code ReturnCode `protobuf:"varint,1,opt,name=code,enum=telemetry.ReturnCode" json:"code,omitempty"` + Code ReturnCode `protobuf:"varint,1,opt,name=code,proto3,enum=telemetry.ReturnCode" json:"code,omitempty"` // Return code string - CodeStr string `protobuf:"bytes,2,opt,name=code_str,json=codeStr" json:"code_str,omitempty"` + CodeStr string `protobuf:"bytes,2,opt,name=code_str,json=codeStr,proto3" json:"code_str,omitempty"` } -func (m *CancelSubscriptionReply) Reset() { *m = CancelSubscriptionReply{} } -func (m *CancelSubscriptionReply) String() string { return proto.CompactTextString(m) } -func (*CancelSubscriptionReply) ProtoMessage() {} -func (*CancelSubscriptionReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (x *CancelSubscriptionReply) Reset() { + *x = CancelSubscriptionReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *CancelSubscriptionReply) GetCode() ReturnCode { - if m != nil { - return m.Code +func (x *CancelSubscriptionReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancelSubscriptionReply) ProtoMessage() {} + +func (x *CancelSubscriptionReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancelSubscriptionReply.ProtoReflect.Descriptor instead. +func (*CancelSubscriptionReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{12} +} + +func (x *CancelSubscriptionReply) GetCode() ReturnCode { + if x != nil { + return x.Code } return ReturnCode_SUCCESS } -func (m *CancelSubscriptionReply) GetCodeStr() string { - if m != nil { - return m.CodeStr +func (x *CancelSubscriptionReply) GetCodeStr() string { + if x != nil { + return x.CodeStr } return "" } // Message sent for a telemetry get request type GetSubscriptionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Subscription identifier as returned by the device when // subscription was requested // --- or --- // 0xFFFFFFFF for all subscription identifiers - SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId" json:"subscription_id,omitempty"` + SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` } -func (m *GetSubscriptionsRequest) Reset() { *m = GetSubscriptionsRequest{} } -func (m *GetSubscriptionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetSubscriptionsRequest) ProtoMessage() {} -func (*GetSubscriptionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (x *GetSubscriptionsRequest) Reset() { + *x = GetSubscriptionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *GetSubscriptionsRequest) GetSubscriptionId() uint32 { - if m != nil { - return m.SubscriptionId +func (x *GetSubscriptionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSubscriptionsRequest) ProtoMessage() {} + +func (x *GetSubscriptionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSubscriptionsRequest.ProtoReflect.Descriptor instead. +func (*GetSubscriptionsRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{13} +} + +func (x *GetSubscriptionsRequest) GetSubscriptionId() uint32 { + if x != nil { + return x.SubscriptionId } return 0 } // Reply to telemetry subscription get request type GetSubscriptionsReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // List of current telemetry subscriptions - SubscriptionList []*SubscriptionReply `protobuf:"bytes,1,rep,name=subscription_list,json=subscriptionList" json:"subscription_list,omitempty"` + SubscriptionList []*SubscriptionReply `protobuf:"bytes,1,rep,name=subscription_list,json=subscriptionList,proto3" json:"subscription_list,omitempty"` } -func (m *GetSubscriptionsReply) Reset() { *m = GetSubscriptionsReply{} } -func (m *GetSubscriptionsReply) String() string { return proto.CompactTextString(m) } -func (*GetSubscriptionsReply) ProtoMessage() {} -func (*GetSubscriptionsReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (x *GetSubscriptionsReply) Reset() { + *x = GetSubscriptionsReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *GetSubscriptionsReply) GetSubscriptionList() []*SubscriptionReply { - if m != nil { - return m.SubscriptionList +func (x *GetSubscriptionsReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSubscriptionsReply) ProtoMessage() {} + +func (x *GetSubscriptionsReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSubscriptionsReply.ProtoReflect.Descriptor instead. +func (*GetSubscriptionsReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{14} +} + +func (x *GetSubscriptionsReply) GetSubscriptionList() []*SubscriptionReply { + if x != nil { + return x.SubscriptionList } return nil } // Message sent for telemetry agent operational states request type GetOperationalStateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Per-subscription_id level operational state can be requested. // // Subscription identifier as returned by the device when @@ -870,434 +1256,718 @@ type GetOperationalStateRequest struct { // --- or --- // If subscription_id is not present then sent only agent-level // operational stats - SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId" json:"subscription_id,omitempty"` + SubscriptionId uint32 `protobuf:"varint,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` // Control verbosity of the output - Verbosity VerbosityLevel `protobuf:"varint,2,opt,name=verbosity,enum=telemetry.VerbosityLevel" json:"verbosity,omitempty"` + Verbosity VerbosityLevel `protobuf:"varint,2,opt,name=verbosity,proto3,enum=telemetry.VerbosityLevel" json:"verbosity,omitempty"` } -func (m *GetOperationalStateRequest) Reset() { *m = GetOperationalStateRequest{} } -func (m *GetOperationalStateRequest) String() string { return proto.CompactTextString(m) } -func (*GetOperationalStateRequest) ProtoMessage() {} -func (*GetOperationalStateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (x *GetOperationalStateRequest) Reset() { + *x = GetOperationalStateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -func (m *GetOperationalStateRequest) GetSubscriptionId() uint32 { - if m != nil { - return m.SubscriptionId +func (x *GetOperationalStateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetOperationalStateRequest) ProtoMessage() {} + +func (x *GetOperationalStateRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetOperationalStateRequest.ProtoReflect.Descriptor instead. +func (*GetOperationalStateRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{15} +} + +func (x *GetOperationalStateRequest) GetSubscriptionId() uint32 { + if x != nil { + return x.SubscriptionId } return 0 } -func (m *GetOperationalStateRequest) GetVerbosity() VerbosityLevel { - if m != nil { - return m.Verbosity +func (x *GetOperationalStateRequest) GetVerbosity() VerbosityLevel { + if x != nil { + return x.Verbosity } return VerbosityLevel_DETAIL } // Reply to telemetry agent operational states request type GetOperationalStateReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // List of key-value pairs where // key = operational state definition // value = operational state value - Kv []*KeyValue `protobuf:"bytes,1,rep,name=kv" json:"kv,omitempty"` + Kv []*KeyValue `protobuf:"bytes,1,rep,name=kv,proto3" json:"kv,omitempty"` } -func (m *GetOperationalStateReply) Reset() { *m = GetOperationalStateReply{} } -func (m *GetOperationalStateReply) String() string { return proto.CompactTextString(m) } -func (*GetOperationalStateReply) ProtoMessage() {} -func (*GetOperationalStateReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } - -func (m *GetOperationalStateReply) GetKv() []*KeyValue { - if m != nil { - return m.Kv +func (x *GetOperationalStateReply) Reset() { + *x = GetOperationalStateReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -// Message sent for a data encoding request -type DataEncodingRequest struct { -} - -func (m *DataEncodingRequest) Reset() { *m = DataEncodingRequest{} } -func (m *DataEncodingRequest) String() string { return proto.CompactTextString(m) } -func (*DataEncodingRequest) ProtoMessage() {} -func (*DataEncodingRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } - -// Reply to data encodings supported request -type DataEncodingReply struct { - EncodingList []EncodingType `protobuf:"varint,1,rep,packed,name=encoding_list,json=encodingList,enum=telemetry.EncodingType" json:"encoding_list,omitempty"` +func (x *GetOperationalStateReply) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DataEncodingReply) Reset() { *m = DataEncodingReply{} } -func (m *DataEncodingReply) String() string { return proto.CompactTextString(m) } -func (*DataEncodingReply) ProtoMessage() {} -func (*DataEncodingReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (*GetOperationalStateReply) ProtoMessage() {} -func (m *DataEncodingReply) GetEncodingList() []EncodingType { - if m != nil { - return m.EncodingList +func (x *GetOperationalStateReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil -} - -func init() { - proto.RegisterType((*SubscriptionRequest)(nil), "telemetry.SubscriptionRequest") - proto.RegisterType((*SubscriptionInput)(nil), "telemetry.SubscriptionInput") - proto.RegisterType((*Collector)(nil), "telemetry.Collector") - proto.RegisterType((*Path)(nil), "telemetry.Path") - proto.RegisterType((*SubscriptionAdditionalConfig)(nil), "telemetry.SubscriptionAdditionalConfig") - proto.RegisterType((*SubscriptionReply)(nil), "telemetry.SubscriptionReply") - proto.RegisterType((*SubscriptionResponse)(nil), "telemetry.SubscriptionResponse") - proto.RegisterType((*OpenConfigData)(nil), "telemetry.OpenConfigData") - proto.RegisterType((*KeyValue)(nil), "telemetry.KeyValue") - proto.RegisterType((*Delete)(nil), "telemetry.Delete") - proto.RegisterType((*Eom)(nil), "telemetry.Eom") - proto.RegisterType((*CancelSubscriptionRequest)(nil), "telemetry.CancelSubscriptionRequest") - proto.RegisterType((*CancelSubscriptionReply)(nil), "telemetry.CancelSubscriptionReply") - proto.RegisterType((*GetSubscriptionsRequest)(nil), "telemetry.GetSubscriptionsRequest") - proto.RegisterType((*GetSubscriptionsReply)(nil), "telemetry.GetSubscriptionsReply") - proto.RegisterType((*GetOperationalStateRequest)(nil), "telemetry.GetOperationalStateRequest") - proto.RegisterType((*GetOperationalStateReply)(nil), "telemetry.GetOperationalStateReply") - proto.RegisterType((*DataEncodingRequest)(nil), "telemetry.DataEncodingRequest") - proto.RegisterType((*DataEncodingReply)(nil), "telemetry.DataEncodingReply") - proto.RegisterEnum("telemetry.ReturnCode", ReturnCode_name, ReturnCode_value) - proto.RegisterEnum("telemetry.VerbosityLevel", VerbosityLevel_name, VerbosityLevel_value) - proto.RegisterEnum("telemetry.EncodingType", EncodingType_name, EncodingType_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for OpenConfigTelemetry service - -type OpenConfigTelemetryClient interface { - // Request an inline subscription for data at the specified path. - // The device should send telemetry data back on the same - // connection as the subscription request. - TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) - // Terminates and removes an existing telemetry subscription - CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) - // Get the list of current telemetry subscriptions from the - // target. This command returns a list of existing subscriptions - // not including those that are established via configuration. - GetTelemetrySubscriptions(ctx context.Context, in *GetSubscriptionsRequest, opts ...grpc.CallOption) (*GetSubscriptionsReply, error) - // Get Telemetry Agent Operational States - GetTelemetryOperationalState(ctx context.Context, in *GetOperationalStateRequest, opts ...grpc.CallOption) (*GetOperationalStateReply, error) - // Return the set of data encodings supported by the device for - // telemetry data - GetDataEncodings(ctx context.Context, in *DataEncodingRequest, opts ...grpc.CallOption) (*DataEncodingReply, error) + return mi.MessageOf(x) } -type openConfigTelemetryClient struct { - cc *grpc.ClientConn +// Deprecated: Use GetOperationalStateReply.ProtoReflect.Descriptor instead. +func (*GetOperationalStateReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{16} } -func NewOpenConfigTelemetryClient(cc *grpc.ClientConn) OpenConfigTelemetryClient { - return &openConfigTelemetryClient{cc} -} - -func (c *openConfigTelemetryClient) TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) { - stream, err := grpc.NewClientStream(ctx, &_OpenConfigTelemetry_serviceDesc.Streams[0], c.cc, "/telemetry.OpenConfigTelemetry/telemetrySubscribe", opts...) - if err != nil { - return nil, err - } - x := &openConfigTelemetryTelemetrySubscribeClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err +func (x *GetOperationalStateReply) GetKv() []*KeyValue { + if x != nil { + return x.Kv } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type OpenConfigTelemetry_TelemetrySubscribeClient interface { - Recv() (*OpenConfigData, error) - grpc.ClientStream + return nil } -type openConfigTelemetryTelemetrySubscribeClient struct { - grpc.ClientStream +// Message sent for a data encoding request +type DataEncodingRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (x *openConfigTelemetryTelemetrySubscribeClient) Recv() (*OpenConfigData, error) { - m := new(OpenConfigData) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (x *DataEncodingRequest) Reset() { + *x = DataEncodingRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return m, nil } -func (c *openConfigTelemetryClient) CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) { - out := new(CancelSubscriptionReply) - err := grpc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/cancelTelemetrySubscription", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil +func (x *DataEncodingRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *openConfigTelemetryClient) GetTelemetrySubscriptions(ctx context.Context, in *GetSubscriptionsRequest, opts ...grpc.CallOption) (*GetSubscriptionsReply, error) { - out := new(GetSubscriptionsReply) - err := grpc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getTelemetrySubscriptions", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} +func (*DataEncodingRequest) ProtoMessage() {} -func (c *openConfigTelemetryClient) GetTelemetryOperationalState(ctx context.Context, in *GetOperationalStateRequest, opts ...grpc.CallOption) (*GetOperationalStateReply, error) { - out := new(GetOperationalStateReply) - err := grpc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getTelemetryOperationalState", in, out, c.cc, opts...) - if err != nil { - return nil, err +func (x *DataEncodingRequest) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return out, nil + return mi.MessageOf(x) } -func (c *openConfigTelemetryClient) GetDataEncodings(ctx context.Context, in *DataEncodingRequest, opts ...grpc.CallOption) (*DataEncodingReply, error) { - out := new(DataEncodingReply) - err := grpc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getDataEncodings", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil +// Deprecated: Use DataEncodingRequest.ProtoReflect.Descriptor instead. +func (*DataEncodingRequest) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{17} } -// Server API for OpenConfigTelemetry service - -type OpenConfigTelemetryServer interface { - // Request an inline subscription for data at the specified path. - // The device should send telemetry data back on the same - // connection as the subscription request. - TelemetrySubscribe(*SubscriptionRequest, OpenConfigTelemetry_TelemetrySubscribeServer) error - // Terminates and removes an existing telemetry subscription - CancelTelemetrySubscription(context.Context, *CancelSubscriptionRequest) (*CancelSubscriptionReply, error) - // Get the list of current telemetry subscriptions from the - // target. This command returns a list of existing subscriptions - // not including those that are established via configuration. - GetTelemetrySubscriptions(context.Context, *GetSubscriptionsRequest) (*GetSubscriptionsReply, error) - // Get Telemetry Agent Operational States - GetTelemetryOperationalState(context.Context, *GetOperationalStateRequest) (*GetOperationalStateReply, error) - // Return the set of data encodings supported by the device for - // telemetry data - GetDataEncodings(context.Context, *DataEncodingRequest) (*DataEncodingReply, error) -} +// Reply to data encodings supported request +type DataEncodingReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func RegisterOpenConfigTelemetryServer(s *grpc.Server, srv OpenConfigTelemetryServer) { - s.RegisterService(&_OpenConfigTelemetry_serviceDesc, srv) + EncodingList []EncodingType `protobuf:"varint,1,rep,packed,name=encoding_list,json=encodingList,proto3,enum=telemetry.EncodingType" json:"encoding_list,omitempty"` } -func _OpenConfigTelemetry_TelemetrySubscribe_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SubscriptionRequest) - if err := stream.RecvMsg(m); err != nil { - return err +func (x *DataEncodingReply) Reset() { + *x = DataEncodingReply{} + if protoimpl.UnsafeEnabled { + mi := &file_oc_oc_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return srv.(OpenConfigTelemetryServer).TelemetrySubscribe(m, &openConfigTelemetryTelemetrySubscribeServer{stream}) } -type OpenConfigTelemetry_TelemetrySubscribeServer interface { - Send(*OpenConfigData) error - grpc.ServerStream +func (x *DataEncodingReply) String() string { + return protoimpl.X.MessageStringOf(x) } -type openConfigTelemetryTelemetrySubscribeServer struct { - grpc.ServerStream -} +func (*DataEncodingReply) ProtoMessage() {} -func (x *openConfigTelemetryTelemetrySubscribeServer) Send(m *OpenConfigData) error { - return x.ServerStream.SendMsg(m) -} - -func _OpenConfigTelemetry_CancelTelemetrySubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CancelSubscriptionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OpenConfigTelemetryServer).CancelTelemetrySubscription(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/telemetry.OpenConfigTelemetry/CancelTelemetrySubscription", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OpenConfigTelemetryServer).CancelTelemetrySubscription(ctx, req.(*CancelSubscriptionRequest)) +func (x *DataEncodingReply) ProtoReflect() protoreflect.Message { + mi := &file_oc_oc_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return interceptor(ctx, in, info, handler) + return mi.MessageOf(x) } -func _OpenConfigTelemetry_GetTelemetrySubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetSubscriptionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OpenConfigTelemetryServer).GetTelemetrySubscriptions(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/telemetry.OpenConfigTelemetry/GetTelemetrySubscriptions", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OpenConfigTelemetryServer).GetTelemetrySubscriptions(ctx, req.(*GetSubscriptionsRequest)) - } - return interceptor(ctx, in, info, handler) +// Deprecated: Use DataEncodingReply.ProtoReflect.Descriptor instead. +func (*DataEncodingReply) Descriptor() ([]byte, []int) { + return file_oc_oc_proto_rawDescGZIP(), []int{18} } -func _OpenConfigTelemetry_GetTelemetryOperationalState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetOperationalStateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OpenConfigTelemetryServer).GetTelemetryOperationalState(ctx, in) +func (x *DataEncodingReply) GetEncodingList() []EncodingType { + if x != nil { + return x.EncodingList } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/telemetry.OpenConfigTelemetry/GetTelemetryOperationalState", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OpenConfigTelemetryServer).GetTelemetryOperationalState(ctx, req.(*GetOperationalStateRequest)) - } - return interceptor(ctx, in, info, handler) + return nil } -func _OpenConfigTelemetry_GetDataEncodings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DataEncodingRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(OpenConfigTelemetryServer).GetDataEncodings(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/telemetry.OpenConfigTelemetry/GetDataEncodings", +var File_oc_oc_proto protoreflect.FileDescriptor + +var file_oc_oc_proto_rawDesc = []byte{ + 0x0a, 0x0b, 0x6f, 0x63, 0x2f, 0x6f, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x22, 0xcd, 0x01, 0x0a, 0x13, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x32, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x05, 0x69, + 0x6e, 0x70, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6c, 0x69, 0x73, + 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x52, 0x08, 0x70, 0x61, 0x74, 0x68, 0x4c, 0x69, + 0x73, 0x74, 0x12, 0x54, 0x0a, 0x11, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x50, 0x0a, 0x11, 0x53, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x3b, 0x0a, + 0x0e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x0d, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x22, 0x39, 0x0a, 0x09, 0x43, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x04, 0x70, 0x6f, 0x72, 0x74, 0x22, 0xd7, 0x01, 0x0a, 0x04, 0x50, 0x61, 0x74, 0x68, 0x12, 0x12, + 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2d, 0x0a, 0x12, 0x73, 0x75, + 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x75, 0x6e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x73, 0x75, 0x70, 0x70, 0x72, 0x65, 0x73, 0x73, + 0x55, 0x6e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x61, 0x78, + 0x5f, 0x73, 0x69, 0x6c, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x69, 0x6c, 0x65, 0x6e, + 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x72, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x46, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x5f, 0x65, 0x6f, 0x6d, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6e, 0x65, 0x65, 0x64, 0x45, 0x6f, 0x6d, 0x22, + 0x8c, 0x01, 0x0a, 0x1c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x10, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x65, 0x64, 0x5f, 0x65, 0x6f, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6e, 0x65, 0x65, 0x64, 0x45, 0x6f, 0x73, 0x22, 0x7e, + 0x0a, 0x11, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x3b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x2c, 0x0a, 0x09, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x50, 0x61, 0x74, 0x68, 0x52, 0x08, 0x70, 0x61, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x22, 0x3f, + 0x0a, 0x14, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, + 0xec, 0x02, 0x0a, 0x0e, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x49, 0x64, 0x12, + 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x49, 0x64, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x75, 0x62, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x73, 0x75, + 0x62, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73, 0x65, 0x71, 0x75, 0x65, + 0x6e, 0x63, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x23, 0x0a, 0x02, 0x6b, 0x76, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x02, 0x6b, 0x76, 0x12, 0x29, 0x0a, 0x06, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x6f, 0x6d, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x45, 0x6f, 0x6d, 0x52, 0x03, 0x65, 0x6f, 0x6d, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x79, 0x6e, + 0x63, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0c, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8e, + 0x02, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x23, 0x0a, + 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x75, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x09, 0x75, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x73, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x12, 0x48, 0x00, 0x52, 0x09, 0x73, 0x69, 0x6e, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x74, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x1c, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x19, 0x0a, + 0x03, 0x45, 0x6f, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x44, 0x0a, 0x19, 0x43, 0x61, 0x6e, 0x63, + 0x65, 0x6c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x5f, + 0x0a, 0x17, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x29, 0x0a, 0x04, 0x63, 0x6f, 0x64, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, + 0x63, 0x6f, 0x64, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x72, 0x22, + 0x42, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x22, 0x62, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x49, 0x0a, 0x11, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x69, 0x73, + 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x52, 0x10, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x22, 0x7e, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x37, + 0x0a, 0x09, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x19, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x56, 0x65, + 0x72, 0x62, 0x6f, 0x73, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x09, 0x76, 0x65, + 0x72, 0x62, 0x6f, 0x73, 0x69, 0x74, 0x79, 0x22, 0x3f, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x23, 0x0a, 0x02, 0x6b, 0x76, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x4b, 0x65, 0x79, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x02, 0x6b, 0x76, 0x22, 0x15, 0x0a, 0x13, 0x44, 0x61, 0x74, 0x61, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x51, 0x0a, 0x11, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3c, 0x0a, 0x0d, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, + 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x69, + 0x73, 0x74, 0x2a, 0x47, 0x0a, 0x0a, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x43, 0x6f, 0x64, 0x65, + 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x19, 0x0a, + 0x15, 0x4e, 0x4f, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x43, 0x52, 0x49, 0x50, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x2a, 0x32, 0x0a, 0x0e, 0x56, + 0x65, 0x72, 0x62, 0x6f, 0x73, 0x69, 0x74, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x0a, 0x0a, + 0x06, 0x44, 0x45, 0x54, 0x41, 0x49, 0x4c, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x45, 0x52, + 0x53, 0x45, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x52, 0x49, 0x45, 0x46, 0x10, 0x02, 0x2a, + 0x41, 0x0a, 0x0c, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, + 0x0a, 0x03, 0x58, 0x4d, 0x4c, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, + 0x49, 0x45, 0x54, 0x46, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, + 0x10, 0x03, 0x32, 0xfc, 0x03, 0x0a, 0x13, 0x4f, 0x70, 0x65, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x12, 0x53, 0x0a, 0x12, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, + 0x12, 0x1e, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x19, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x4f, 0x70, 0x65, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x69, 0x0a, 0x1b, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, + 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, + 0x6c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x63, 0x0a, 0x19, 0x67, 0x65, + 0x74, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x22, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, + 0x6c, 0x0a, 0x1c, 0x67, 0x65, 0x74, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x25, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x52, 0x0a, + 0x10, 0x67, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x1e, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1c, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, + 0x00, 0x42, 0x0d, 0x5a, 0x0b, 0x2e, 0x3b, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_oc_oc_proto_rawDescOnce sync.Once + file_oc_oc_proto_rawDescData = file_oc_oc_proto_rawDesc +) + +func file_oc_oc_proto_rawDescGZIP() []byte { + file_oc_oc_proto_rawDescOnce.Do(func() { + file_oc_oc_proto_rawDescData = protoimpl.X.CompressGZIP(file_oc_oc_proto_rawDescData) + }) + return file_oc_oc_proto_rawDescData +} + +var file_oc_oc_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_oc_oc_proto_msgTypes = make([]protoimpl.MessageInfo, 19) +var file_oc_oc_proto_goTypes = []interface{}{ + (ReturnCode)(0), // 0: telemetry.ReturnCode + (VerbosityLevel)(0), // 1: telemetry.VerbosityLevel + (EncodingType)(0), // 2: telemetry.EncodingType + (*SubscriptionRequest)(nil), // 3: telemetry.SubscriptionRequest + (*SubscriptionInput)(nil), // 4: telemetry.SubscriptionInput + (*Collector)(nil), // 5: telemetry.Collector + (*Path)(nil), // 6: telemetry.Path + (*SubscriptionAdditionalConfig)(nil), // 7: telemetry.SubscriptionAdditionalConfig + (*SubscriptionReply)(nil), // 8: telemetry.SubscriptionReply + (*SubscriptionResponse)(nil), // 9: telemetry.SubscriptionResponse + (*OpenConfigData)(nil), // 10: telemetry.OpenConfigData + (*KeyValue)(nil), // 11: telemetry.KeyValue + (*Delete)(nil), // 12: telemetry.Delete + (*Eom)(nil), // 13: telemetry.Eom + (*CancelSubscriptionRequest)(nil), // 14: telemetry.CancelSubscriptionRequest + (*CancelSubscriptionReply)(nil), // 15: telemetry.CancelSubscriptionReply + (*GetSubscriptionsRequest)(nil), // 16: telemetry.GetSubscriptionsRequest + (*GetSubscriptionsReply)(nil), // 17: telemetry.GetSubscriptionsReply + (*GetOperationalStateRequest)(nil), // 18: telemetry.GetOperationalStateRequest + (*GetOperationalStateReply)(nil), // 19: telemetry.GetOperationalStateReply + (*DataEncodingRequest)(nil), // 20: telemetry.DataEncodingRequest + (*DataEncodingReply)(nil), // 21: telemetry.DataEncodingReply +} +var file_oc_oc_proto_depIdxs = []int32{ + 4, // 0: telemetry.SubscriptionRequest.input:type_name -> telemetry.SubscriptionInput + 6, // 1: telemetry.SubscriptionRequest.path_list:type_name -> telemetry.Path + 7, // 2: telemetry.SubscriptionRequest.additional_config:type_name -> telemetry.SubscriptionAdditionalConfig + 5, // 3: telemetry.SubscriptionInput.collector_list:type_name -> telemetry.Collector + 9, // 4: telemetry.SubscriptionReply.response:type_name -> telemetry.SubscriptionResponse + 6, // 5: telemetry.SubscriptionReply.path_list:type_name -> telemetry.Path + 11, // 6: telemetry.OpenConfigData.kv:type_name -> telemetry.KeyValue + 12, // 7: telemetry.OpenConfigData.delete:type_name -> telemetry.Delete + 13, // 8: telemetry.OpenConfigData.eom:type_name -> telemetry.Eom + 0, // 9: telemetry.CancelSubscriptionReply.code:type_name -> telemetry.ReturnCode + 8, // 10: telemetry.GetSubscriptionsReply.subscription_list:type_name -> telemetry.SubscriptionReply + 1, // 11: telemetry.GetOperationalStateRequest.verbosity:type_name -> telemetry.VerbosityLevel + 11, // 12: telemetry.GetOperationalStateReply.kv:type_name -> telemetry.KeyValue + 2, // 13: telemetry.DataEncodingReply.encoding_list:type_name -> telemetry.EncodingType + 3, // 14: telemetry.OpenConfigTelemetry.telemetrySubscribe:input_type -> telemetry.SubscriptionRequest + 14, // 15: telemetry.OpenConfigTelemetry.cancelTelemetrySubscription:input_type -> telemetry.CancelSubscriptionRequest + 16, // 16: telemetry.OpenConfigTelemetry.getTelemetrySubscriptions:input_type -> telemetry.GetSubscriptionsRequest + 18, // 17: telemetry.OpenConfigTelemetry.getTelemetryOperationalState:input_type -> telemetry.GetOperationalStateRequest + 20, // 18: telemetry.OpenConfigTelemetry.getDataEncodings:input_type -> telemetry.DataEncodingRequest + 10, // 19: telemetry.OpenConfigTelemetry.telemetrySubscribe:output_type -> telemetry.OpenConfigData + 15, // 20: telemetry.OpenConfigTelemetry.cancelTelemetrySubscription:output_type -> telemetry.CancelSubscriptionReply + 17, // 21: telemetry.OpenConfigTelemetry.getTelemetrySubscriptions:output_type -> telemetry.GetSubscriptionsReply + 19, // 22: telemetry.OpenConfigTelemetry.getTelemetryOperationalState:output_type -> telemetry.GetOperationalStateReply + 21, // 23: telemetry.OpenConfigTelemetry.getDataEncodings:output_type -> telemetry.DataEncodingReply + 19, // [19:24] is the sub-list for method output_type + 14, // [14:19] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_oc_oc_proto_init() } +func file_oc_oc_proto_init() { + if File_oc_oc_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_oc_oc_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionInput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Collector); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Path); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionAdditionalConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscriptionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OpenConfigData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Delete); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Eom); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelSubscriptionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelSubscriptionReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSubscriptionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSubscriptionsReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOperationalStateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOperationalStateReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataEncodingRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_oc_oc_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DataEncodingReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(OpenConfigTelemetryServer).GetDataEncodings(ctx, req.(*DataEncodingRequest)) + file_oc_oc_proto_msgTypes[8].OneofWrappers = []interface{}{ + (*KeyValue_DoubleValue)(nil), + (*KeyValue_IntValue)(nil), + (*KeyValue_UintValue)(nil), + (*KeyValue_SintValue)(nil), + (*KeyValue_BoolValue)(nil), + (*KeyValue_StrValue)(nil), + (*KeyValue_BytesValue)(nil), } - return interceptor(ctx, in, info, handler) -} - -var _OpenConfigTelemetry_serviceDesc = grpc.ServiceDesc{ - ServiceName: "telemetry.OpenConfigTelemetry", - HandlerType: (*OpenConfigTelemetryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "cancelTelemetrySubscription", - Handler: _OpenConfigTelemetry_CancelTelemetrySubscription_Handler, - }, - { - MethodName: "getTelemetrySubscriptions", - Handler: _OpenConfigTelemetry_GetTelemetrySubscriptions_Handler, - }, - { - MethodName: "getTelemetryOperationalState", - Handler: _OpenConfigTelemetry_GetTelemetryOperationalState_Handler, - }, - { - MethodName: "getDataEncodings", - Handler: _OpenConfigTelemetry_GetDataEncodings_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "telemetrySubscribe", - Handler: _OpenConfigTelemetry_TelemetrySubscribe_Handler, - ServerStreams: true, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_oc_oc_proto_rawDesc, + NumEnums: 3, + NumMessages: 19, + NumExtensions: 0, + NumServices: 1, }, - }, - Metadata: "oc.proto", -} - -func init() { proto.RegisterFile("oc.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 1254 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xcd, 0x6e, 0xdb, 0x46, - 0x17, 0x15, 0x25, 0xd9, 0x12, 0xaf, 0x7e, 0x42, 0x8d, 0xe3, 0x2f, 0xb2, 0xa3, 0xaf, 0x71, 0xe8, - 0x16, 0x71, 0x82, 0xd4, 0x28, 0x94, 0x45, 0x51, 0xa4, 0x40, 0x10, 0xcb, 0x74, 0xac, 0xc6, 0x95, - 0xdc, 0xa1, 0x9c, 0xb6, 0x2b, 0x82, 0x22, 0x27, 0x36, 0x11, 0xfe, 0x95, 0x33, 0x12, 0xc2, 0x4d, - 0x9e, 0xa0, 0xe8, 0x9b, 0x75, 0xdd, 0x97, 0xe8, 0x23, 0x74, 0x51, 0xcc, 0x90, 0x94, 0x46, 0x89, - 0x94, 0x34, 0x2b, 0x91, 0xe7, 0x9e, 0xb9, 0xf7, 0xcc, 0xbd, 0x67, 0x86, 0x82, 0x7a, 0xe4, 0x1c, - 0xc7, 0x49, 0xc4, 0x22, 0xa4, 0x32, 0xe2, 0x93, 0x80, 0xb0, 0x24, 0xd5, 0xff, 0x54, 0x60, 0xc7, - 0x9c, 0x4d, 0xa9, 0x93, 0x78, 0x31, 0xf3, 0xa2, 0x10, 0x93, 0xdf, 0x66, 0x84, 0x32, 0xd4, 0x87, - 0x2d, 0x2f, 0x8c, 0x67, 0xac, 0xab, 0x1c, 0x28, 0x47, 0x8d, 0x7e, 0xef, 0x78, 0xb1, 0xe4, 0x58, - 0xa6, 0x0f, 0x39, 0x07, 0x67, 0x54, 0xf4, 0x18, 0xd4, 0xd8, 0x66, 0x37, 0x96, 0xef, 0x51, 0xd6, - 0x2d, 0x1f, 0x54, 0x8e, 0x1a, 0xfd, 0x5b, 0xd2, 0xba, 0x4b, 0x9b, 0xdd, 0xe0, 0x3a, 0x67, 0x5c, - 0x78, 0x94, 0xa1, 0x09, 0x74, 0x6c, 0xd7, 0xf5, 0x78, 0x16, 0xdb, 0xb7, 0x9c, 0x28, 0x7c, 0xed, - 0x5d, 0x77, 0x2b, 0xa2, 0xda, 0x83, 0x0d, 0xd5, 0x9e, 0x2f, 0xf8, 0x03, 0x41, 0xc7, 0x9a, 0xfd, - 0x1e, 0xa2, 0x5f, 0x42, 0xe7, 0x03, 0x7d, 0xe8, 0x29, 0xb4, 0x9d, 0xc8, 0xf7, 0x89, 0xc3, 0xa2, - 0x24, 0x53, 0xa7, 0x08, 0x75, 0xb7, 0xa5, 0x3a, 0x83, 0x82, 0x80, 0x5b, 0x0b, 0x2e, 0xd7, 0xa9, - 0x7f, 0x07, 0xea, 0x22, 0x86, 0xba, 0x50, 0xb3, 0x5d, 0x37, 0x21, 0x94, 0x8a, 0xc6, 0xa8, 0xb8, - 0x78, 0x45, 0x08, 0xaa, 0x71, 0x94, 0xf0, 0x7d, 0x2b, 0x47, 0x2d, 0x2c, 0x9e, 0xf5, 0xbf, 0x14, - 0xa8, 0xf2, 0x5d, 0x8b, 0xa0, 0xcd, 0x6e, 0xf2, 0x35, 0xe2, 0x19, 0xfd, 0x0f, 0xb6, 0x5f, 0x7b, - 0x3e, 0x23, 0x89, 0x58, 0xa2, 0xe2, 0xfc, 0x0d, 0x7d, 0x0d, 0x88, 0xce, 0xe2, 0x98, 0x27, 0xb5, - 0x66, 0xa1, 0x73, 0x63, 0x87, 0xd7, 0xc4, 0x15, 0x8d, 0xa9, 0xe3, 0x4e, 0x11, 0xb9, 0x2a, 0x02, - 0xe8, 0x18, 0x76, 0x02, 0xfb, 0xad, 0x45, 0x3d, 0x9f, 0x84, 0xcc, 0xf2, 0x42, 0x46, 0x92, 0xb9, - 0xed, 0x77, 0xab, 0x42, 0x46, 0x27, 0xb0, 0xdf, 0x9a, 0x22, 0x32, 0xcc, 0x03, 0xe8, 0x21, 0x68, - 0xd4, 0x0e, 0x62, 0x9f, 0x58, 0xaf, 0x13, 0x3e, 0xeb, 0xd0, 0x49, 0xbb, 0x5b, 0x82, 0x7c, 0x2b, - 0xc3, 0xcf, 0x0a, 0x18, 0xed, 0x41, 0x3d, 0x24, 0xc4, 0xb5, 0x48, 0x14, 0x74, 0xb7, 0x45, 0xfd, - 0x1a, 0x7f, 0x37, 0xa2, 0x40, 0xff, 0x5d, 0x81, 0xde, 0xc7, 0x26, 0x83, 0x0e, 0xa1, 0xe5, 0x7b, - 0x81, 0xc7, 0xac, 0x84, 0x38, 0x51, 0xe2, 0x66, 0xed, 0xda, 0xc2, 0x4d, 0x01, 0xe2, 0x0c, 0x43, - 0x8f, 0x01, 0x65, 0x24, 0xe6, 0x05, 0xc4, 0xa2, 0xc4, 0x89, 0x42, 0x97, 0x8a, 0x76, 0x6c, 0x61, - 0x4d, 0x44, 0x26, 0x5e, 0x40, 0xcc, 0x0c, 0x97, 0xe4, 0xd0, 0xbc, 0x1d, 0xb9, 0x1c, 0xaa, 0xbf, - 0x5b, 0x9d, 0x3a, 0x26, 0xb1, 0x9f, 0xa2, 0xa7, 0x50, 0x4f, 0x08, 0x8d, 0xa3, 0x90, 0x92, 0xdc, - 0xc5, 0xf7, 0x36, 0xf8, 0x0a, 0xe7, 0x34, 0xbc, 0x58, 0xf0, 0x79, 0x5e, 0xd6, 0x9f, 0xc1, 0xed, - 0x75, 0xf9, 0xd0, 0x03, 0xb8, 0x45, 0x25, 0xdc, 0xf2, 0x5c, 0xa1, 0xa4, 0x85, 0xdb, 0x32, 0x3c, - 0x74, 0xf5, 0xbf, 0xcb, 0xd0, 0x1e, 0xc7, 0x24, 0xcc, 0xba, 0x77, 0x6a, 0x33, 0x1b, 0xdd, 0x05, - 0x95, 0xa6, 0x94, 0x91, 0xa0, 0x58, 0xa5, 0xe2, 0x7a, 0x06, 0x0c, 0x5d, 0x74, 0x1f, 0x9a, 0x4e, - 0x14, 0xc4, 0x51, 0x28, 0x86, 0xee, 0xe6, 0xae, 0x6b, 0x2c, 0xb0, 0xa1, 0x8b, 0x8e, 0x40, 0xa3, - 0xb3, 0xa9, 0xb5, 0x42, 0xab, 0x2c, 0x8a, 0x0f, 0x24, 0x66, 0xe1, 0xce, 0xaa, 0xe4, 0x4e, 0xae, - 0x3c, 0xf3, 0x01, 0xb1, 0xc2, 0x59, 0x30, 0x25, 0x89, 0x70, 0x49, 0x15, 0xb7, 0x0b, 0x78, 0x24, - 0x50, 0xd4, 0x03, 0x95, 0x4f, 0x8f, 0x32, 0x3b, 0x88, 0x85, 0x4b, 0xaa, 0x78, 0x09, 0xa0, 0x43, - 0x28, 0xbf, 0x99, 0x77, 0x6b, 0xa2, 0x7f, 0x3b, 0x52, 0xff, 0x5e, 0x92, 0xf4, 0x95, 0xed, 0xcf, - 0x08, 0x2e, 0xbf, 0x99, 0xa3, 0x87, 0xb0, 0xed, 0x12, 0x9f, 0x30, 0xd2, 0xad, 0x0b, 0x62, 0x47, - 0x22, 0x9e, 0x8a, 0x00, 0xce, 0x09, 0xe8, 0x00, 0x2a, 0xdc, 0x8d, 0xaa, 0xe0, 0xb5, 0x25, 0x9e, - 0x11, 0x05, 0x98, 0x87, 0xb8, 0xf1, 0x68, 0x1a, 0x3a, 0xd6, 0x62, 0xf4, 0x20, 0xac, 0xd2, 0xe4, - 0x60, 0x31, 0x17, 0xfd, 0x8f, 0x32, 0xd4, 0x0b, 0x09, 0x48, 0x83, 0xca, 0x1b, 0x92, 0xe6, 0x2d, - 0xe6, 0x8f, 0xe8, 0x10, 0x9a, 0x6e, 0x34, 0x9b, 0xfa, 0xc4, 0x9a, 0x73, 0x86, 0xd8, 0xb9, 0x72, - 0x5e, 0xc2, 0x8d, 0x0c, 0xcd, 0x96, 0xfd, 0x1f, 0x54, 0x2f, 0x64, 0x39, 0x83, 0x6f, 0xbc, 0x72, - 0x5e, 0xc2, 0x75, 0x2f, 0x64, 0x59, 0xf8, 0x1e, 0xc0, 0x6c, 0x19, 0xaf, 0xf1, 0xc6, 0x9c, 0x97, - 0xb0, 0x3a, 0x93, 0x09, 0x74, 0x49, 0xa8, 0x1f, 0x28, 0x47, 0x88, 0x13, 0xa8, 0x4c, 0x98, 0x46, - 0x91, 0x9f, 0x13, 0x54, 0xbe, 0x0d, 0x4e, 0xe0, 0xd8, 0x42, 0x01, 0x65, 0x49, 0x1e, 0xe7, 0xdb, - 0x54, 0xb9, 0x02, 0xca, 0x92, 0x2c, 0x7c, 0x1f, 0x1a, 0xd3, 0x94, 0x11, 0x9a, 0x13, 0x1a, 0x07, - 0xca, 0x51, 0xf3, 0xbc, 0x84, 0x41, 0x80, 0x82, 0x72, 0x52, 0x83, 0x2d, 0x11, 0xd4, 0x7b, 0xb0, - 0x9d, 0x75, 0x7a, 0xdd, 0x55, 0xa5, 0xef, 0x41, 0xc5, 0x88, 0x82, 0xb5, 0xa1, 0x53, 0xd8, 0x1b, - 0xd8, 0xa1, 0x43, 0xfc, 0x75, 0x1f, 0x91, 0xff, 0x6c, 0x7f, 0x0b, 0xee, 0xac, 0xcb, 0xc2, 0x4f, - 0xf1, 0x43, 0xa8, 0x3a, 0x91, 0x9b, 0x9d, 0xe0, 0x76, 0x7f, 0x57, 0x1a, 0x39, 0x26, 0x6c, 0x96, - 0x84, 0x83, 0xc8, 0x25, 0x58, 0x50, 0xf8, 0x05, 0xc1, 0x7f, 0x2d, 0xca, 0x8a, 0x3b, 0xb5, 0xc6, - 0xdf, 0x4d, 0x96, 0xe8, 0x27, 0x70, 0xe7, 0x05, 0x61, 0x72, 0x76, 0xfa, 0xd9, 0x22, 0xa7, 0xb0, - 0xfb, 0x61, 0x0e, 0x2e, 0x71, 0x08, 0x9d, 0x95, 0x0c, 0xd2, 0x17, 0xa6, 0xb7, 0xf1, 0xc6, 0x89, - 0xfd, 0x14, 0x6b, 0xf2, 0x32, 0x71, 0x91, 0xbc, 0x83, 0xfd, 0x17, 0x84, 0x8d, 0x63, 0x92, 0xd8, - 0xd9, 0x75, 0x6a, 0x32, 0x9b, 0x91, 0xcf, 0x95, 0x8a, 0xbe, 0x05, 0x75, 0x4e, 0x92, 0x69, 0x44, - 0x3d, 0x96, 0x8a, 0x56, 0xb4, 0xfb, 0x7b, 0x92, 0x92, 0x57, 0x45, 0xec, 0x82, 0xcc, 0x89, 0x8f, - 0x97, 0x5c, 0xfd, 0x19, 0x74, 0xd7, 0xd6, 0xe7, 0xdb, 0xcc, 0xce, 0xb2, 0xf2, 0xd1, 0xb3, 0xac, - 0xef, 0xc2, 0x0e, 0xbf, 0xbd, 0x8c, 0xd0, 0x89, 0x5c, 0x2f, 0xbc, 0xce, 0x95, 0xeb, 0x3f, 0x41, - 0x67, 0x15, 0xe6, 0x09, 0xbf, 0x87, 0x16, 0xc9, 0x81, 0x65, 0xcf, 0xda, 0xfd, 0x3b, 0xf2, 0xb1, - 0xce, 0xe3, 0x93, 0x34, 0x26, 0xb8, 0x59, 0xb0, 0x79, 0xab, 0x1e, 0xbd, 0x00, 0x58, 0x3a, 0x00, - 0x35, 0xa0, 0x66, 0x5e, 0x0d, 0x06, 0x86, 0x69, 0x6a, 0x25, 0xb4, 0x07, 0xbb, 0xa3, 0xb1, 0x65, - 0x5e, 0x9d, 0x98, 0x03, 0x3c, 0xbc, 0x9c, 0x0c, 0xc7, 0x23, 0xcb, 0x18, 0x4d, 0xf0, 0xaf, 0x9a, - 0x82, 0x3a, 0xd0, 0xba, 0x1a, 0xbd, 0x1c, 0x8d, 0x7f, 0x1e, 0x59, 0x06, 0xc6, 0x63, 0xac, 0x95, - 0x1f, 0xf5, 0xa1, 0xbd, 0xda, 0x10, 0x04, 0xb0, 0x7d, 0x6a, 0x4c, 0x9e, 0x0f, 0x2f, 0xb4, 0x12, - 0x52, 0x61, 0x6b, 0x62, 0x60, 0xd3, 0xd0, 0x14, 0xfe, 0x78, 0x82, 0x87, 0xc6, 0x99, 0x56, 0x7e, - 0xf4, 0x1c, 0x9a, 0xb2, 0x34, 0xd4, 0x02, 0xf5, 0x6a, 0x74, 0x6a, 0x9c, 0x0d, 0x47, 0xc6, 0xa9, - 0x56, 0x42, 0x35, 0xa8, 0xfc, 0xf2, 0xe3, 0x85, 0xa6, 0x70, 0xfc, 0x07, 0x73, 0x3c, 0xb2, 0x86, - 0xc6, 0xe4, 0x4c, 0x2b, 0xf3, 0xc4, 0x97, 0x78, 0x3c, 0x19, 0x3f, 0xd1, 0x2a, 0xfd, 0x7f, 0x2a, - 0xb0, 0xb3, 0xbc, 0xf2, 0x27, 0xc5, 0x96, 0x91, 0x09, 0x68, 0xb1, 0xff, 0xdc, 0x32, 0x53, 0x82, - 0xbe, 0xd8, 0x68, 0x24, 0xd1, 0xe0, 0x7d, 0x79, 0xbc, 0xab, 0x1f, 0x12, 0xbd, 0xf4, 0x8d, 0x82, - 0x3c, 0xb8, 0xeb, 0x88, 0x03, 0x36, 0x79, 0x2f, 0xb5, 0x48, 0x82, 0xbe, 0x94, 0xff, 0x08, 0x6d, - 0x3a, 0xce, 0xfb, 0xfa, 0x27, 0x58, 0xb1, 0x9f, 0xea, 0x25, 0xe4, 0xc0, 0xde, 0x35, 0x61, 0x6b, - 0xeb, 0x50, 0x24, 0xa7, 0xd8, 0x70, 0x20, 0xf7, 0x0f, 0x3e, 0xca, 0xc9, 0x8a, 0xf8, 0xd0, 0x93, - 0x8b, 0xbc, 0x6f, 0x58, 0xf4, 0xd5, 0x6a, 0x8e, 0x0d, 0x07, 0x6a, 0xff, 0xf0, 0x53, 0xb4, 0xac, - 0x1a, 0x06, 0xed, 0x9a, 0x30, 0xd9, 0xc0, 0x74, 0x65, 0x20, 0x6b, 0x1c, 0xbf, 0xdf, 0xdb, 0x18, - 0x17, 0x39, 0xa7, 0xdb, 0xe2, 0xaf, 0xf8, 0x93, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc9, 0xe3, - 0x4f, 0x0d, 0x96, 0x0b, 0x00, 0x00, + GoTypes: file_oc_oc_proto_goTypes, + DependencyIndexes: file_oc_oc_proto_depIdxs, + EnumInfos: file_oc_oc_proto_enumTypes, + MessageInfos: file_oc_oc_proto_msgTypes, + }.Build() + File_oc_oc_proto = out.File + file_oc_oc_proto_rawDesc = nil + file_oc_oc_proto_goTypes = nil + file_oc_oc_proto_depIdxs = nil } diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto b/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto index cf4aa145e6911..8c3ad32b9913f 100644 --- a/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto +++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto @@ -36,6 +36,7 @@ syntax = "proto3"; package telemetry; +option go_package = ".;telemetry"; // Interface exported by Agent service OpenConfigTelemetry { diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc_grpc.pb.go b/plugins/inputs/jti_openconfig_telemetry/oc/oc_grpc.pb.go new file mode 100644 index 0000000000000..593e5a1e1002a --- /dev/null +++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc_grpc.pb.go @@ -0,0 +1,293 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package telemetry + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// OpenConfigTelemetryClient is the client API for OpenConfigTelemetry service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type OpenConfigTelemetryClient interface { + // Request an inline subscription for data at the specified path. + // The device should send telemetry data back on the same + // connection as the subscription request. + TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) + // Terminates and removes an existing telemetry subscription + CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) + // Get the list of current telemetry subscriptions from the + // target. This command returns a list of existing subscriptions + // not including those that are established via configuration. + GetTelemetrySubscriptions(ctx context.Context, in *GetSubscriptionsRequest, opts ...grpc.CallOption) (*GetSubscriptionsReply, error) + // Get Telemetry Agent Operational States + GetTelemetryOperationalState(ctx context.Context, in *GetOperationalStateRequest, opts ...grpc.CallOption) (*GetOperationalStateReply, error) + // Return the set of data encodings supported by the device for + // telemetry data + GetDataEncodings(ctx context.Context, in *DataEncodingRequest, opts ...grpc.CallOption) (*DataEncodingReply, error) +} + +type openConfigTelemetryClient struct { + cc grpc.ClientConnInterface +} + +func NewOpenConfigTelemetryClient(cc grpc.ClientConnInterface) OpenConfigTelemetryClient { + return &openConfigTelemetryClient{cc} +} + +func (c *openConfigTelemetryClient) TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) { + stream, err := c.cc.NewStream(ctx, &OpenConfigTelemetry_ServiceDesc.Streams[0], "/telemetry.OpenConfigTelemetry/telemetrySubscribe", opts...) + if err != nil { + return nil, err + } + x := &openConfigTelemetryTelemetrySubscribeClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type OpenConfigTelemetry_TelemetrySubscribeClient interface { + Recv() (*OpenConfigData, error) + grpc.ClientStream +} + +type openConfigTelemetryTelemetrySubscribeClient struct { + grpc.ClientStream +} + +func (x *openConfigTelemetryTelemetrySubscribeClient) Recv() (*OpenConfigData, error) { + m := new(OpenConfigData) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *openConfigTelemetryClient) CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) { + out := new(CancelSubscriptionReply) + err := c.cc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/cancelTelemetrySubscription", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *openConfigTelemetryClient) GetTelemetrySubscriptions(ctx context.Context, in *GetSubscriptionsRequest, opts ...grpc.CallOption) (*GetSubscriptionsReply, error) { + out := new(GetSubscriptionsReply) + err := c.cc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getTelemetrySubscriptions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *openConfigTelemetryClient) GetTelemetryOperationalState(ctx context.Context, in *GetOperationalStateRequest, opts ...grpc.CallOption) (*GetOperationalStateReply, error) { + out := new(GetOperationalStateReply) + err := c.cc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getTelemetryOperationalState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *openConfigTelemetryClient) GetDataEncodings(ctx context.Context, in *DataEncodingRequest, opts ...grpc.CallOption) (*DataEncodingReply, error) { + out := new(DataEncodingReply) + err := c.cc.Invoke(ctx, "/telemetry.OpenConfigTelemetry/getDataEncodings", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// OpenConfigTelemetryServer is the server API for OpenConfigTelemetry service. +// All implementations must embed UnimplementedOpenConfigTelemetryServer +// for forward compatibility +type OpenConfigTelemetryServer interface { + // Request an inline subscription for data at the specified path. + // The device should send telemetry data back on the same + // connection as the subscription request. + TelemetrySubscribe(*SubscriptionRequest, OpenConfigTelemetry_TelemetrySubscribeServer) error + // Terminates and removes an existing telemetry subscription + CancelTelemetrySubscription(context.Context, *CancelSubscriptionRequest) (*CancelSubscriptionReply, error) + // Get the list of current telemetry subscriptions from the + // target. This command returns a list of existing subscriptions + // not including those that are established via configuration. + GetTelemetrySubscriptions(context.Context, *GetSubscriptionsRequest) (*GetSubscriptionsReply, error) + // Get Telemetry Agent Operational States + GetTelemetryOperationalState(context.Context, *GetOperationalStateRequest) (*GetOperationalStateReply, error) + // Return the set of data encodings supported by the device for + // telemetry data + GetDataEncodings(context.Context, *DataEncodingRequest) (*DataEncodingReply, error) + mustEmbedUnimplementedOpenConfigTelemetryServer() +} + +// UnimplementedOpenConfigTelemetryServer must be embedded to have forward compatible implementations. +type UnimplementedOpenConfigTelemetryServer struct { +} + +func (UnimplementedOpenConfigTelemetryServer) TelemetrySubscribe(*SubscriptionRequest, OpenConfigTelemetry_TelemetrySubscribeServer) error { + return status.Errorf(codes.Unimplemented, "method TelemetrySubscribe not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) CancelTelemetrySubscription(context.Context, *CancelSubscriptionRequest) (*CancelSubscriptionReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method CancelTelemetrySubscription not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) GetTelemetrySubscriptions(context.Context, *GetSubscriptionsRequest) (*GetSubscriptionsReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTelemetrySubscriptions not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) GetTelemetryOperationalState(context.Context, *GetOperationalStateRequest) (*GetOperationalStateReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTelemetryOperationalState not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) GetDataEncodings(context.Context, *DataEncodingRequest) (*DataEncodingReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetDataEncodings not implemented") +} +func (UnimplementedOpenConfigTelemetryServer) mustEmbedUnimplementedOpenConfigTelemetryServer() {} + +// UnsafeOpenConfigTelemetryServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to OpenConfigTelemetryServer will +// result in compilation errors. +type UnsafeOpenConfigTelemetryServer interface { + mustEmbedUnimplementedOpenConfigTelemetryServer() +} + +func RegisterOpenConfigTelemetryServer(s grpc.ServiceRegistrar, srv OpenConfigTelemetryServer) { + s.RegisterService(&OpenConfigTelemetry_ServiceDesc, srv) +} + +func _OpenConfigTelemetry_TelemetrySubscribe_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscriptionRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(OpenConfigTelemetryServer).TelemetrySubscribe(m, &openConfigTelemetryTelemetrySubscribeServer{stream}) +} + +type OpenConfigTelemetry_TelemetrySubscribeServer interface { + Send(*OpenConfigData) error + grpc.ServerStream +} + +type openConfigTelemetryTelemetrySubscribeServer struct { + grpc.ServerStream +} + +func (x *openConfigTelemetryTelemetrySubscribeServer) Send(m *OpenConfigData) error { + return x.ServerStream.SendMsg(m) +} + +func _OpenConfigTelemetry_CancelTelemetrySubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelSubscriptionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OpenConfigTelemetryServer).CancelTelemetrySubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/telemetry.OpenConfigTelemetry/cancelTelemetrySubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenConfigTelemetryServer).CancelTelemetrySubscription(ctx, req.(*CancelSubscriptionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OpenConfigTelemetry_GetTelemetrySubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSubscriptionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OpenConfigTelemetryServer).GetTelemetrySubscriptions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/telemetry.OpenConfigTelemetry/getTelemetrySubscriptions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenConfigTelemetryServer).GetTelemetrySubscriptions(ctx, req.(*GetSubscriptionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OpenConfigTelemetry_GetTelemetryOperationalState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetOperationalStateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OpenConfigTelemetryServer).GetTelemetryOperationalState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/telemetry.OpenConfigTelemetry/getTelemetryOperationalState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenConfigTelemetryServer).GetTelemetryOperationalState(ctx, req.(*GetOperationalStateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _OpenConfigTelemetry_GetDataEncodings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DataEncodingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OpenConfigTelemetryServer).GetDataEncodings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/telemetry.OpenConfigTelemetry/getDataEncodings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OpenConfigTelemetryServer).GetDataEncodings(ctx, req.(*DataEncodingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// OpenConfigTelemetry_ServiceDesc is the grpc.ServiceDesc for OpenConfigTelemetry service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var OpenConfigTelemetry_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "telemetry.OpenConfigTelemetry", + HandlerType: (*OpenConfigTelemetryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "cancelTelemetrySubscription", + Handler: _OpenConfigTelemetry_CancelTelemetrySubscription_Handler, + }, + { + MethodName: "getTelemetrySubscriptions", + Handler: _OpenConfigTelemetry_GetTelemetrySubscriptions_Handler, + }, + { + MethodName: "getTelemetryOperationalState", + Handler: _OpenConfigTelemetry_GetTelemetryOperationalState_Handler, + }, + { + MethodName: "getDataEncodings", + Handler: _OpenConfigTelemetry_GetDataEncodings_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "telemetrySubscribe", + Handler: _OpenConfigTelemetry_TelemetrySubscribe_Handler, + ServerStreams: true, + }, + }, + Metadata: "oc/oc.proto", +} diff --git a/plugins/inputs/jti_openconfig_telemetry/sample.conf b/plugins/inputs/jti_openconfig_telemetry/sample.conf new file mode 100644 index 0000000000000..1f9872a139d8b --- /dev/null +++ b/plugins/inputs/jti_openconfig_telemetry/sample.conf @@ -0,0 +1,49 @@ +# Subscribe and receive OpenConfig Telemetry data using JTI +[[inputs.jti_openconfig_telemetry]] + ## List of device addresses to collect telemetry from + servers = ["localhost:1883"] + + ## Authentication details. Username and password are must if device expects + ## authentication. Client ID must be unique when connecting from multiple instances + ## of telegraf to the same device + username = "user" + password = "pass" + client_id = "telegraf" + + ## Frequency to get data + sample_frequency = "1000ms" + + ## Sensors to subscribe for + ## A identifier for each sensor can be provided in path by separating with space + ## Else sensor path will be used as identifier + ## When identifier is used, we can provide a list of space separated sensors. + ## A single subscription will be created with all these sensors and data will + ## be saved to measurement with this identifier name + sensors = [ + "/interfaces/", + "collection /components/ /lldp", + ] + + ## We allow specifying sensor group level reporting rate. To do this, specify the + ## reporting rate in Duration at the beginning of sensor paths / collection + ## name. For entries without reporting rate, we use configured sample frequency + sensors = [ + "1000ms customReporting /interfaces /lldp", + "2000ms collection /components", + "/interfaces", + ] + + ## Optional TLS Config + # enable_tls = true + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. + ## Failed streams/calls will not be retried if 0 is provided + retry_delay = "1000ms" + + ## To treat all string values as tags, set this to true + str_as_tags = false diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index 3535f8fce5b5a..c69e5f11b5dba 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -3,12 +3,13 @@ The [Kafka][kafka] consumer plugin reads from Kafka and creates metrics using one of the supported [input data formats][]. -For old kafka version (< 0.8), please use the [kafka_consumer_legacy][] input plugin -and use the old zookeeper connection method. +For old kafka version (< 0.8), please use the [kafka_consumer_legacy][] input +plugin and use the old zookeeper connection method. -### Configuration +## Configuration -```toml +```toml @sample.conf +# Read metrics from Kafka topics [[inputs.kafka_consumer]] ## Kafka brokers. brokers = ["localhost:9092"] @@ -35,7 +36,7 @@ and use the old zookeeper connection method. # insecure_skip_verify = false ## SASL authentication credentials. These settings should typically be used - ## with TLS encryption enabled using the "enable_tls" option. + ## with TLS encryption enabled # sasl_username = "kafka" # sasl_password = "secret" @@ -59,9 +60,20 @@ and use the old zookeeper connection method. ## SASL protocol version. When connecting to Azure EventHub set to 0. # sasl_version = 1 + # Disable Kafka metadata full fetch + # metadata_full = false + ## Name of the consumer group. # consumer_group = "telegraf_metrics_consumers" + ## Compression codec represents the various compression codecs recognized by + ## Kafka in messages. + ## 0 : None + ## 1 : Gzip + ## 2 : Snappy + ## 3 : LZ4 + ## 4 : ZSTD + # compression_codec = 0 ## Initial offset position; one of "oldest" or "newest". # offset = "oldest" @@ -82,6 +94,15 @@ and use the old zookeeper connection method. ## waiting until the next flush_interval. # max_undelivered_messages = 1000 + ## Maximum amount of time the consumer should take to process messages. If + ## the debug log prints messages from sarama about 'abandoning subscription + ## to [topic] because consuming was taking too long', increase this value to + ## longer than the time taken by the output plugin(s). + ## + ## Note that the effective timeout could be between 'max_processing_time' and + ## '2 * max_processing_time'. + # max_processing_time = "100ms" + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index a0b4b41cf6167..1edc1c0608fef 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -1,106 +1,31 @@ +//go:generate ../../../tools/readme_config_includer/generator package kafka_consumer import ( "context" + _ "embed" "fmt" - "log" "strings" "sync" "time" "github.com/Shopify/sarama" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/kafka" - "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" ) -const sampleConfig = ` - ## Kafka brokers. - brokers = ["localhost:9092"] - - ## Topics to consume. - topics = ["telegraf"] - - ## When set this tag will be added to all metrics with the topic as the value. - # topic_tag = "" - - ## Optional Client id - # client_id = "Telegraf" - - ## Set the minimal supported Kafka version. Setting this enables the use of new - ## Kafka features and APIs. Must be 0.10.2.0 or greater. - ## ex: version = "1.1.0" - # version = "" - - ## Optional TLS Config - # enable_tls = true - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## SASL authentication credentials. These settings should typically be used - ## with TLS encryption enabled using the "enable_tls" option. - # sasl_username = "kafka" - # sasl_password = "secret" - - ## Optional SASL: - ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI - ## (defaults to PLAIN) - # sasl_mechanism = "" - - ## used if sasl_mechanism is GSSAPI (experimental) - # sasl_gssapi_service_name = "" - # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH - # sasl_gssapi_auth_type = "KRB5_USER_AUTH" - # sasl_gssapi_kerberos_config_path = "/" - # sasl_gssapi_realm = "realm" - # sasl_gssapi_key_tab_path = "" - # sasl_gssapi_disable_pafxfast = false - - ## used if sasl_mechanism is OAUTHBEARER (experimental) - # sasl_access_token = "" - - ## SASL protocol version. When connecting to Azure EventHub set to 0. - # sasl_version = 1 - - ## Name of the consumer group. - # consumer_group = "telegraf_metrics_consumers" - - ## Initial offset position; one of "oldest" or "newest". - # offset = "oldest" - - ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky". - # balance_strategy = "range" - - ## Maximum length of a message to consume, in bytes (default 0/unlimited); - ## larger messages are dropped - max_message_len = 1000000 - - ## Maximum messages to read from the broker that have not been written by an - ## output. For best throughput set based on the number of metrics within - ## each message and the size of the output's metric_batch_size. - ## - ## For example, if each message from the queue contains 10 metrics and the - ## output metric_batch_size is 1000, setting this to 100 will ensure that a - ## full batch is collected and the write is triggered immediately without - ## waiting until the next flush_interval. - # max_undelivered_messages = 1000 - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" -` +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string const ( defaultMaxUndeliveredMessages = 1000 - defaultMaxMessageLen = 1000000 + defaultMaxProcessingTime = config.Duration(100 * time.Millisecond) defaultConsumerGroup = "telegraf_metrics_consumers" reconnectDelay = 5 * time.Second ) @@ -109,21 +34,17 @@ type empty struct{} type semaphore chan empty type KafkaConsumer struct { - Brokers []string `toml:"brokers"` - ClientID string `toml:"client_id"` - ConsumerGroup string `toml:"consumer_group"` - MaxMessageLen int `toml:"max_message_len"` - MaxUndeliveredMessages int `toml:"max_undelivered_messages"` - Offset string `toml:"offset"` - BalanceStrategy string `toml:"balance_strategy"` - Topics []string `toml:"topics"` - TopicTag string `toml:"topic_tag"` - Version string `toml:"version"` - - kafka.SASLAuth - - EnableTLS *bool `toml:"enable_tls"` - tls.ClientConfig + Brokers []string `toml:"brokers"` + ConsumerGroup string `toml:"consumer_group"` + MaxMessageLen int `toml:"max_message_len"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + MaxProcessingTime config.Duration `toml:"max_processing_time"` + Offset string `toml:"offset"` + BalanceStrategy string `toml:"balance_strategy"` + Topics []string `toml:"topics"` + TopicTag string `toml:"topic_tag"` + + kafka.ReadConfig Log telegraf.Logger `toml:"-"` @@ -143,23 +64,19 @@ type ConsumerGroup interface { } type ConsumerGroupCreator interface { - Create(brokers []string, group string, config *sarama.Config) (ConsumerGroup, error) + Create(brokers []string, group string, cfg *sarama.Config) (ConsumerGroup, error) } type SaramaCreator struct{} -func (*SaramaCreator) Create(brokers []string, group string, config *sarama.Config) (ConsumerGroup, error) { - return sarama.NewConsumerGroup(brokers, group, config) +func (*SaramaCreator) Create(brokers []string, group string, cfg *sarama.Config) (ConsumerGroup, error) { + return sarama.NewConsumerGroup(brokers, group, cfg) } -func (k *KafkaConsumer) SampleConfig() string { +func (*KafkaConsumer) SampleConfig() string { return sampleConfig } -func (k *KafkaConsumer) Description() string { - return "Read metrics from Kafka topics" -} - func (k *KafkaConsumer) SetParser(parser parsers.Parser) { k.parser = parser } @@ -168,71 +85,38 @@ func (k *KafkaConsumer) Init() error { if k.MaxUndeliveredMessages == 0 { k.MaxUndeliveredMessages = defaultMaxUndeliveredMessages } + if time.Duration(k.MaxProcessingTime) == 0 { + k.MaxProcessingTime = defaultMaxProcessingTime + } if k.ConsumerGroup == "" { k.ConsumerGroup = defaultConsumerGroup } - config := sarama.NewConfig() - config.Consumer.Return.Errors = true + cfg := sarama.NewConfig() // Kafka version 0.10.2.0 is required for consumer groups. - config.Version = sarama.V0_10_2_0 - - if k.Version != "" { - version, err := sarama.ParseKafkaVersion(k.Version) - if err != nil { - return err - } - - config.Version = version - } + cfg.Version = sarama.V0_10_2_0 - if k.EnableTLS != nil && *k.EnableTLS { - config.Net.TLS.Enable = true - } - - tlsConfig, err := k.ClientConfig.TLSConfig() - if err != nil { + if err := k.SetConfig(cfg); err != nil { return err } - if tlsConfig != nil { - config.Net.TLS.Config = tlsConfig - - // To maintain backwards compatibility, if the enable_tls option is not - // set TLS is enabled if a non-default TLS config is used. - if k.EnableTLS == nil { - k.Log.Warnf("Use of deprecated configuration: enable_tls should be set when using TLS") - config.Net.TLS.Enable = true - } - } - - if err := k.SetSASLConfig(config); err != nil { - return err - } - - if k.ClientID != "" { - config.ClientID = k.ClientID - } else { - config.ClientID = "Telegraf" - } - switch strings.ToLower(k.Offset) { case "oldest", "": - config.Consumer.Offsets.Initial = sarama.OffsetOldest + cfg.Consumer.Offsets.Initial = sarama.OffsetOldest case "newest": - config.Consumer.Offsets.Initial = sarama.OffsetNewest + cfg.Consumer.Offsets.Initial = sarama.OffsetNewest default: return fmt.Errorf("invalid offset %q", k.Offset) } switch strings.ToLower(k.BalanceStrategy) { case "range", "": - config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange + cfg.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange case "roundrobin": - config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin + cfg.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin case "sticky": - config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategySticky + cfg.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategySticky default: return fmt.Errorf("invalid balance strategy %q", k.BalanceStrategy) } @@ -241,7 +125,9 @@ func (k *KafkaConsumer) Init() error { k.ConsumerCreator = &SaramaCreator{} } - k.config = config + cfg.Consumer.MaxProcessingTime = time.Duration(k.MaxProcessingTime) + + k.config = cfg return nil } @@ -264,12 +150,14 @@ func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error { go func() { defer k.wg.Done() for ctx.Err() == nil { - handler := NewConsumerGroupHandler(acc, k.MaxUndeliveredMessages, k.parser) + handler := NewConsumerGroupHandler(acc, k.MaxUndeliveredMessages, k.parser, k.Log) handler.MaxMessageLen = k.MaxMessageLen handler.TopicTag = k.TopicTag err := k.consumer.Consume(ctx, k.Topics, handler) if err != nil { acc.AddError(err) + // Ignore returned error as we cannot do anything about it anyway + //nolint:errcheck,revive internal.SleepContext(ctx, reconnectDelay) } } @@ -290,7 +178,7 @@ func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error { return nil } -func (k *KafkaConsumer) Gather(acc telegraf.Accumulator) error { +func (k *KafkaConsumer) Gather(_ telegraf.Accumulator) error { return nil } @@ -306,12 +194,13 @@ type Message struct { session sarama.ConsumerGroupSession } -func NewConsumerGroupHandler(acc telegraf.Accumulator, maxUndelivered int, parser parsers.Parser) *ConsumerGroupHandler { +func NewConsumerGroupHandler(acc telegraf.Accumulator, maxUndelivered int, parser parsers.Parser, log telegraf.Logger) *ConsumerGroupHandler { handler := &ConsumerGroupHandler{ acc: acc.WithTracking(maxUndelivered), sem: make(chan empty, maxUndelivered), undelivered: make(map[telegraf.TrackingID]Message, maxUndelivered), parser: parser, + log: log, } return handler } @@ -329,6 +218,8 @@ type ConsumerGroupHandler struct { mu sync.Mutex undelivered map[telegraf.TrackingID]Message + + log telegraf.Logger } // Setup is called once when a new session is opened. It setups up the handler @@ -348,11 +239,11 @@ func (h *ConsumerGroupHandler) Setup(sarama.ConsumerGroupSession) error { } // Run processes any delivered metrics during the lifetime of the session. -func (h *ConsumerGroupHandler) run(ctx context.Context) error { +func (h *ConsumerGroupHandler) run(ctx context.Context) { for { select { case <-ctx.Done(): - return nil + return case track := <-h.acc.Delivered(): h.onDelivery(track) } @@ -365,7 +256,7 @@ func (h *ConsumerGroupHandler) onDelivery(track telegraf.DeliveryInfo) { msg, ok := h.undelivered[track.ID()] if !ok { - log.Printf("E! [inputs.kafka_consumer] Could not mark message delivered: %d", track.ID()) + h.log.Errorf("Could not mark message delivered: %d", track.ID()) return } @@ -428,7 +319,7 @@ func (h *ConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, for { err := h.Reserve(ctx) if err != nil { - return nil + return err } select { diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index 5973fa82a6629..cfc9e8b85b00a 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -6,11 +6,14 @@ import ( "time" "github.com/Shopify/sarama" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/kafka" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/parsers/value" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) type FakeConsumerGroup struct { @@ -22,10 +25,9 @@ type FakeConsumerGroup struct { errors chan error } -func (g *FakeConsumerGroup) Consume(ctx context.Context, topics []string, handler sarama.ConsumerGroupHandler) error { +func (g *FakeConsumerGroup) Consume(_ context.Context, _ []string, handler sarama.ConsumerGroupHandler) error { g.handler = handler - g.handler.Setup(nil) - return nil + return g.handler.Setup(nil) } func (g *FakeConsumerGroup) Errors() <-chan error { @@ -41,10 +43,10 @@ type FakeCreator struct { ConsumerGroup *FakeConsumerGroup } -func (c *FakeCreator) Create(brokers []string, group string, config *sarama.Config) (ConsumerGroup, error) { +func (c *FakeCreator) Create(brokers []string, group string, cfg *sarama.Config) (ConsumerGroup, error) { c.ConsumerGroup.brokers = brokers c.ConsumerGroup.group = group - c.ConsumerGroup.config = config + c.ConsumerGroup.config = cfg return c.ConsumerGroup, nil } @@ -63,13 +65,18 @@ func TestInit(t *testing.T) { require.Equal(t, plugin.MaxUndeliveredMessages, defaultMaxUndeliveredMessages) require.Equal(t, plugin.config.ClientID, "Telegraf") require.Equal(t, plugin.config.Consumer.Offsets.Initial, sarama.OffsetOldest) + require.Equal(t, plugin.config.Consumer.MaxProcessingTime, 100*time.Millisecond) }, }, { name: "parses valid version string", plugin: &KafkaConsumer{ - Version: "1.0.0", - Log: testutil.Logger{}, + ReadConfig: kafka.ReadConfig{ + Config: kafka.Config{ + Version: "1.0.0", + }, + }, + Log: testutil.Logger{}, }, check: func(t *testing.T, plugin *KafkaConsumer) { require.Equal(t, plugin.config.Version, sarama.V1_0_0_0) @@ -78,16 +85,24 @@ func TestInit(t *testing.T) { { name: "invalid version string", plugin: &KafkaConsumer{ - Version: "100", - Log: testutil.Logger{}, + ReadConfig: kafka.ReadConfig{ + Config: kafka.Config{ + Version: "100", + }, + }, + Log: testutil.Logger{}, }, initError: true, }, { name: "custom client_id", plugin: &KafkaConsumer{ - ClientID: "custom", - Log: testutil.Logger{}, + ReadConfig: kafka.ReadConfig{ + Config: kafka.Config{ + ClientID: "custom", + }, + }, + Log: testutil.Logger{}, }, check: func(t *testing.T, plugin *KafkaConsumer) { require.Equal(t, plugin.config.ClientID, "custom") @@ -123,8 +138,12 @@ func TestInit(t *testing.T) { { name: "default tls with a tls config", plugin: &KafkaConsumer{ - ClientConfig: tls.ClientConfig{ - InsecureSkipVerify: true, + ReadConfig: kafka.ReadConfig{ + Config: kafka.Config{ + ClientConfig: tls.ClientConfig{ + InsecureSkipVerify: true, + }, + }, }, Log: testutil.Logger{}, }, @@ -133,26 +152,29 @@ func TestInit(t *testing.T) { }, }, { - name: "disable tls", + name: "Insecure tls", plugin: &KafkaConsumer{ - EnableTLS: func() *bool { v := false; return &v }(), - ClientConfig: tls.ClientConfig{ - InsecureSkipVerify: true, + ReadConfig: kafka.ReadConfig{ + Config: kafka.Config{ + ClientConfig: tls.ClientConfig{ + InsecureSkipVerify: true, + }, + }, }, Log: testutil.Logger{}, }, check: func(t *testing.T, plugin *KafkaConsumer) { - require.False(t, plugin.config.Net.TLS.Enable) + require.True(t, plugin.config.Net.TLS.Enable) }, }, { - name: "enable tls", + name: "custom max_processing_time", plugin: &KafkaConsumer{ - EnableTLS: func() *bool { v := true; return &v }(), - Log: testutil.Logger{}, + MaxProcessingTime: config.Duration(1000 * time.Millisecond), + Log: testutil.Logger{}, }, check: func(t *testing.T, plugin *KafkaConsumer) { - require.True(t, plugin.config.Net.TLS.Enable) + require.Equal(t, plugin.config.Consumer.MaxProcessingTime, 1000*time.Millisecond) }, }, } @@ -165,6 +187,8 @@ func TestInit(t *testing.T) { require.Error(t, err) return } + // No error path + require.NoError(t, err) tt.check(t, tt.plugin) }) @@ -203,15 +227,15 @@ func (s *FakeConsumerGroupSession) GenerationID() int32 { panic("not implemented") } -func (s *FakeConsumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) { +func (s *FakeConsumerGroupSession) MarkOffset(_ string, _ int32, _ int64, _ string) { panic("not implemented") } -func (s *FakeConsumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) { +func (s *FakeConsumerGroupSession) ResetOffset(_ string, _ int32, _ int64, _ string) { panic("not implemented") } -func (s *FakeConsumerGroupSession) MarkMessage(msg *sarama.ConsumerMessage, metadata string) { +func (s *FakeConsumerGroupSession) MarkMessage(_ *sarama.ConsumerMessage, _ string) { } func (s *FakeConsumerGroupSession) Context() context.Context { @@ -247,8 +271,12 @@ func (c *FakeConsumerGroupClaim) Messages() <-chan *sarama.ConsumerMessage { func TestConsumerGroupHandler_Lifecycle(t *testing.T) { acc := &testutil.Accumulator{} - parser := &value.ValueParser{MetricName: "cpu", DataType: "int"} - cg := NewConsumerGroupHandler(acc, 1, parser) + parser := value.Parser{ + MetricName: "cpu", + DataType: "int", + } + require.NoError(t, parser.Init()) + cg := NewConsumerGroupHandler(acc, 1, &parser, testutil.Logger{}) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -263,8 +291,13 @@ func TestConsumerGroupHandler_Lifecycle(t *testing.T) { require.NoError(t, err) cancel() - err = cg.ConsumeClaim(session, &claim) - require.NoError(t, err) + // This produces a flappy testcase probably due to a race between context cancellation and consumption. + // Furthermore, it is not clear what the outcome of this test should be... + // err = cg.ConsumeClaim(session, &claim) + //require.NoError(t, err) + // So stick with the line below for now. + //nolint:errcheck + cg.ConsumeClaim(session, &claim) err = cg.Cleanup(session) require.NoError(t, err) @@ -272,8 +305,12 @@ func TestConsumerGroupHandler_Lifecycle(t *testing.T) { func TestConsumerGroupHandler_ConsumeClaim(t *testing.T) { acc := &testutil.Accumulator{} - parser := &value.ValueParser{MetricName: "cpu", DataType: "int"} - cg := NewConsumerGroupHandler(acc, 1, parser) + parser := value.Parser{ + MetricName: "cpu", + DataType: "int", + } + require.NoError(t, parser.Init()) + cg := NewConsumerGroupHandler(acc, 1, &parser, testutil.Logger{}) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -293,7 +330,8 @@ func TestConsumerGroupHandler_ConsumeClaim(t *testing.T) { go func() { err := cg.ConsumeClaim(session, claim) - require.NoError(t, err) + require.Error(t, err) + require.EqualValues(t, "context canceled", err.Error()) }() acc.Wait(1) @@ -318,11 +356,12 @@ func TestConsumerGroupHandler_ConsumeClaim(t *testing.T) { func TestConsumerGroupHandler_Handle(t *testing.T) { tests := []struct { - name string - maxMessageLen int - topicTag string - msg *sarama.ConsumerMessage - expected []telegraf.Metric + name string + maxMessageLen int + topicTag string + msg *sarama.ConsumerMessage + expected []telegraf.Metric + expectedHandleError string }{ { name: "happy path", @@ -348,7 +387,8 @@ func TestConsumerGroupHandler_Handle(t *testing.T) { Topic: "telegraf", Value: []byte("12345"), }, - expected: []telegraf.Metric{}, + expected: []telegraf.Metric{}, + expectedHandleError: "message exceeds max_message_len (actual 5, max 4)", }, { name: "parse error", @@ -356,7 +396,8 @@ func TestConsumerGroupHandler_Handle(t *testing.T) { Topic: "telegraf", Value: []byte("not an integer"), }, - expected: []telegraf.Metric{}, + expected: []telegraf.Metric{}, + expectedHandleError: "strconv.Atoi: parsing \"integer\": invalid syntax", }, { name: "add topic tag", @@ -382,16 +423,26 @@ func TestConsumerGroupHandler_Handle(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { acc := &testutil.Accumulator{} - parser := &value.ValueParser{MetricName: "cpu", DataType: "int"} - cg := NewConsumerGroupHandler(acc, 1, parser) + parser := value.Parser{ + MetricName: "cpu", + DataType: "int", + } + require.NoError(t, parser.Init()) + cg := NewConsumerGroupHandler(acc, 1, &parser, testutil.Logger{}) cg.MaxMessageLen = tt.maxMessageLen cg.TopicTag = tt.topicTag ctx := context.Background() session := &FakeConsumerGroupSession{ctx: ctx} - cg.Reserve(ctx) - cg.Handle(session, tt.msg) + require.NoError(t, cg.Reserve(ctx)) + err := cg.Handle(session, tt.msg) + if tt.expectedHandleError != "" { + require.Error(t, err) + require.EqualValues(t, tt.expectedHandleError, err.Error()) + } else { + require.NoError(t, err) + } testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) diff --git a/plugins/inputs/kafka_consumer/sample.conf b/plugins/inputs/kafka_consumer/sample.conf new file mode 100644 index 0000000000000..2357ec56ee041 --- /dev/null +++ b/plugins/inputs/kafka_consumer/sample.conf @@ -0,0 +1,99 @@ +# Read metrics from Kafka topics +[[inputs.kafka_consumer]] + ## Kafka brokers. + brokers = ["localhost:9092"] + + ## Topics to consume. + topics = ["telegraf"] + + ## When set this tag will be added to all metrics with the topic as the value. + # topic_tag = "" + + ## Optional Client id + # client_id = "Telegraf" + + ## Set the minimal supported Kafka version. Setting this enables the use of new + ## Kafka features and APIs. Must be 0.10.2.0 or greater. + ## ex: version = "1.1.0" + # version = "" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## SASL authentication credentials. These settings should typically be used + ## with TLS encryption enabled + # sasl_username = "kafka" + # sasl_password = "secret" + + ## Optional SASL: + ## one of: OAUTHBEARER, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, GSSAPI + ## (defaults to PLAIN) + # sasl_mechanism = "" + + ## used if sasl_mechanism is GSSAPI (experimental) + # sasl_gssapi_service_name = "" + # ## One of: KRB5_USER_AUTH and KRB5_KEYTAB_AUTH + # sasl_gssapi_auth_type = "KRB5_USER_AUTH" + # sasl_gssapi_kerberos_config_path = "/" + # sasl_gssapi_realm = "realm" + # sasl_gssapi_key_tab_path = "" + # sasl_gssapi_disable_pafxfast = false + + ## used if sasl_mechanism is OAUTHBEARER (experimental) + # sasl_access_token = "" + + ## SASL protocol version. When connecting to Azure EventHub set to 0. + # sasl_version = 1 + + # Disable Kafka metadata full fetch + # metadata_full = false + + ## Name of the consumer group. + # consumer_group = "telegraf_metrics_consumers" + + ## Compression codec represents the various compression codecs recognized by + ## Kafka in messages. + ## 0 : None + ## 1 : Gzip + ## 2 : Snappy + ## 3 : LZ4 + ## 4 : ZSTD + # compression_codec = 0 + ## Initial offset position; one of "oldest" or "newest". + # offset = "oldest" + + ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky". + # balance_strategy = "range" + + ## Maximum length of a message to consume, in bytes (default 0/unlimited); + ## larger messages are dropped + max_message_len = 1000000 + + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## Maximum amount of time the consumer should take to process messages. If + ## the debug log prints messages from sarama about 'abandoning subscription + ## to [topic] because consuming was taking too long', increase this value to + ## longer than the time taken by the output plugin(s). + ## + ## Note that the effective timeout could be between 'max_processing_time' and + ## '2 * max_processing_time'. + # max_processing_time = "100ms" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" diff --git a/plugins/inputs/kafka_consumer_legacy/README.md b/plugins/inputs/kafka_consumer_legacy/README.md index 2f0c219ea8647..881d226015742 100644 --- a/plugins/inputs/kafka_consumer_legacy/README.md +++ b/plugins/inputs/kafka_consumer_legacy/README.md @@ -1,16 +1,19 @@ # Kafka Consumer Legacy Input Plugin +**Deprecated in version 1.4. Please use [Kafka Consumer input plugin][]** + The [Kafka](http://kafka.apache.org/) consumer plugin polls a specified Kafka -topic and adds messages to InfluxDB. The plugin assumes messages follow the -line protocol. [Consumer Group](http://godoc.org/github.com/wvanbergen/kafka/consumergroup) -is used to talk to the Kafka cluster so multiple instances of telegraf can read -from the same topic in parallel. +topic and adds messages to InfluxDB. The plugin assumes messages follow the line +protocol. [Consumer Group][1] is used to talk to the Kafka cluster so multiple +instances of telegraf can read from the same topic in parallel. + +[1]: http://godoc.org/github.com/wvanbergen/kafka/consumergroup ## Configuration -```toml +```toml @sample.conf # Read metrics from Kafka topic(s) -[[inputs.kafka_consumer]] +[[inputs.kafka_consumer_legacy]] ## topic(s) to consume topics = ["telegraf"] @@ -41,3 +44,5 @@ from the same topic in parallel. Running integration tests requires running Zookeeper & Kafka. See Makefile for kafka container command. + +[Kafka Consumer input plugin]: ../kafka_consumer/README.md diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go index 939fc8850ef5f..6a4abc8eb79bc 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go @@ -1,18 +1,24 @@ +//go:generate ../../../tools/readme_config_includer/generator package kafka_consumer_legacy import ( + _ "embed" "fmt" "strings" "sync" + "github.com/Shopify/sarama" + "github.com/wvanbergen/kafka/consumergroup" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" - - "github.com/Shopify/sarama" - "github.com/wvanbergen/kafka/consumergroup" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Kafka struct { ConsumerGroup string Topics []string @@ -47,41 +53,10 @@ type Kafka struct { doNotCommitMsgs bool } -var sampleConfig = ` - ## topic(s) to consume - topics = ["telegraf"] - - ## an array of Zookeeper connection strings - zookeeper_peers = ["localhost:2181"] - - ## Zookeeper Chroot - zookeeper_chroot = "" - - ## the name of the consumer group - consumer_group = "telegraf_metrics_consumers" - - ## Offset (must be either "oldest" or "newest") - offset = "oldest" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" - - ## Maximum length of a message to consume, in bytes (default 0/unlimited); - ## larger messages are dropped - max_message_len = 65536 -` - -func (k *Kafka) SampleConfig() string { +func (*Kafka) SampleConfig() string { return sampleConfig } -func (k *Kafka) Description() string { - return "Read metrics from Kafka topic(s)" -} - func (k *Kafka) SetParser(parser parsers.Parser) { k.parser = parser } @@ -140,11 +115,11 @@ func (k *Kafka) receiver() { return case err := <-k.errs: if err != nil { - k.acc.AddError(fmt.Errorf("Consumer Error: %s\n", err)) + k.acc.AddError(fmt.Errorf("consumer Error: %s", err)) } case msg := <-k.in: if k.MaxMessageLen != 0 && len(msg.Value) > k.MaxMessageLen { - k.acc.AddError(fmt.Errorf("Message longer than max_message_len (%d > %d)", + k.acc.AddError(fmt.Errorf("message longer than max_message_len (%d > %d)", len(msg.Value), k.MaxMessageLen)) } else { metrics, err := k.parser.Parse(msg.Value) @@ -161,8 +136,11 @@ func (k *Kafka) receiver() { // TODO(cam) this locking can be removed if this PR gets merged: // https://github.com/wvanbergen/kafka/pull/84 k.Lock() - k.Consumer.CommitUpto(msg) + err := k.Consumer.CommitUpto(msg) k.Unlock() + if err != nil { + k.acc.AddError(fmt.Errorf("committing to consumer failed: %v", err)) + } } } } @@ -173,11 +151,11 @@ func (k *Kafka) Stop() { defer k.Unlock() close(k.done) if err := k.Consumer.Close(); err != nil { - k.acc.AddError(fmt.Errorf("Error closing consumer: %s\n", err.Error())) + k.acc.AddError(fmt.Errorf("error closing consumer: %s", err.Error())) } } -func (k *Kafka) Gather(acc telegraf.Accumulator) error { +func (k *Kafka) Gather(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go index 31bea2210b741..19befb1f65308 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go @@ -6,14 +6,13 @@ import ( "time" "github.com/Shopify/sarama" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" ) -func TestReadsMetricsFromKafka(t *testing.T) { +func TestReadsMetricsFromKafkaIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -51,7 +50,7 @@ func TestReadsMetricsFromKafka(t *testing.T) { var acc testutil.Accumulator // Sanity check - assert.Equal(t, 0, len(acc.Metrics), "There should not be any points") + require.Equal(t, 0, len(acc.Metrics), "There should not be any points") if err := k.Start(&acc); err != nil { t.Fatal(err.Error()) } else { @@ -65,25 +64,26 @@ func TestReadsMetricsFromKafka(t *testing.T) { require.NoError(t, err) if len(acc.Metrics) == 1 { point := acc.Metrics[0] - assert.Equal(t, "cpu_load_short", point.Measurement) - assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields) - assert.Equal(t, map[string]string{ + require.Equal(t, "cpu_load_short", point.Measurement) + require.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields) + require.Equal(t, map[string]string{ "host": "server01", "direction": "in", "region": "us-west", }, point.Tags) - assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix()) + require.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix()) } else { t.Errorf("No points found in accumulator, expected 1") } } -// Waits for the metric that was sent to the kafka broker to arrive at the kafka -// consumer +//nolint:unused // Used in skipped tests +// Waits for the metric that was sent to the kafka broker to arrive at the kafka consumer func waitForPoint(acc *testutil.Accumulator, t *testing.T) { // Give the kafka container up to 2 seconds to get the point to the consumer ticker := time.NewTicker(5 * time.Millisecond) counter := 0 + //nolint:gosimple // for-select used on purpose for { select { case <-ticker.C: diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go index 8037f49a053b5..50987fb32cafd 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go @@ -4,11 +4,14 @@ import ( "strings" "testing" + "github.com/Shopify/sarama" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/graphite" + "github.com/influxdata/telegraf/plugins/parsers/json" "github.com/influxdata/telegraf/testutil" - "github.com/Shopify/sarama" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const ( @@ -41,12 +44,14 @@ func TestRunParser(t *testing.T) { k.acc = &acc defer close(k.done) - k.parser, _ = parsers.NewInfluxParser() + var err error + k.parser, err = parsers.NewInfluxParser() + require.NoError(t, err) go k.receiver() in <- saramaMsg(testMsg) acc.Wait(1) - assert.Equal(t, acc.NFields(), 1) + require.Equal(t, acc.NFields(), 1) } // Test that the parser ignores invalid messages @@ -56,12 +61,14 @@ func TestRunParserInvalidMsg(t *testing.T) { k.acc = &acc defer close(k.done) - k.parser, _ = parsers.NewInfluxParser() + var err error + k.parser, err = parsers.NewInfluxParser() + require.NoError(t, err) go k.receiver() in <- saramaMsg(invalidMsg) acc.WaitError(1) - assert.Equal(t, acc.NFields(), 0) + require.Equal(t, acc.NFields(), 0) } // Test that overlong messages are dropped @@ -78,7 +85,7 @@ func TestDropOverlongMsg(t *testing.T) { in <- saramaMsg(overlongMsg) acc.WaitError(1) - assert.Equal(t, acc.NFields(), 0) + require.Equal(t, acc.NFields(), 0) } // Test that the parser parses kafka messages into points @@ -88,14 +95,16 @@ func TestRunParserAndGather(t *testing.T) { k.acc = &acc defer close(k.done) - k.parser, _ = parsers.NewInfluxParser() + var err error + k.parser, err = parsers.NewInfluxParser() + require.NoError(t, err) go k.receiver() in <- saramaMsg(testMsg) acc.Wait(1) - acc.GatherError(k.Gather) + require.NoError(t, acc.GatherError(k.Gather)) - assert.Equal(t, acc.NFields(), 1) + require.Equal(t, acc.NFields(), 1) acc.AssertContainsFields(t, "cpu_load_short", map[string]interface{}{"value": float64(23422)}) } @@ -107,14 +116,16 @@ func TestRunParserAndGatherGraphite(t *testing.T) { k.acc = &acc defer close(k.done) - k.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) + p := graphite.Parser{Separator: "_", Templates: []string{}} + require.NoError(t, p.Init()) + k.parser = &p go k.receiver() in <- saramaMsg(testMsgGraphite) acc.Wait(1) - acc.GatherError(k.Gather) + require.NoError(t, acc.GatherError(k.Gather)) - assert.Equal(t, acc.NFields(), 1) + require.Equal(t, acc.NFields(), 1) acc.AssertContainsFields(t, "cpu_load_short_graphite", map[string]interface{}{"value": float64(23422)}) } @@ -126,17 +137,18 @@ func TestRunParserAndGatherJSON(t *testing.T) { k.acc = &acc defer close(k.done) - k.parser, _ = parsers.NewParser(&parsers.Config{ - DataFormat: "json", + parser := &json.Parser{ MetricName: "kafka_json_test", - }) + } + require.NoError(t, parser.Init()) + k.parser = parser go k.receiver() in <- saramaMsg(testMsgJSON) acc.Wait(1) - acc.GatherError(k.Gather) + require.NoError(t, acc.GatherError(k.Gather)) - assert.Equal(t, acc.NFields(), 2) + require.Equal(t, acc.NFields(), 2) acc.AssertContainsFields(t, "kafka_json_test", map[string]interface{}{ "a": float64(5), diff --git a/plugins/inputs/kafka_consumer_legacy/sample.conf b/plugins/inputs/kafka_consumer_legacy/sample.conf new file mode 100644 index 0000000000000..3646b66aa8937 --- /dev/null +++ b/plugins/inputs/kafka_consumer_legacy/sample.conf @@ -0,0 +1,26 @@ +# Read metrics from Kafka topic(s) +[[inputs.kafka_consumer_legacy]] + ## topic(s) to consume + topics = ["telegraf"] + + ## an array of Zookeeper connection strings + zookeeper_peers = ["localhost:2181"] + + ## Zookeeper Chroot + zookeeper_chroot = "" + + ## the name of the consumer group + consumer_group = "telegraf_metrics_consumers" + + ## Offset (must be either "oldest" or "newest") + offset = "oldest" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + + ## Maximum length of a message to consume, in bytes (default 0/unlimited); + ## larger messages are dropped + max_message_len = 65536 diff --git a/plugins/inputs/kapacitor/README.md b/plugins/inputs/kapacitor/README.md index 6a70387ee587b..55cf9e356b118 100644 --- a/plugins/inputs/kapacitor/README.md +++ b/plugins/inputs/kapacitor/README.md @@ -2,9 +2,10 @@ The Kapacitor plugin collects metrics from the given Kapacitor instances. -### Configuration: +## Configuration -```toml +```toml @sample.conf +# Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints [[inputs.kapacitor]] ## Multiple URLs from which to read Kapacitor-formatted JSON ## Default is "http://localhost:9092/kapacitor/v1/debug/vars". @@ -23,287 +24,362 @@ The Kapacitor plugin collects metrics from the given Kapacitor instances. # insecure_skip_verify = false ``` -### Measurements and fields +## Measurements and fields - [kapacitor](#kapacitor) - - [num_enabled_tasks](#num_enabled_tasks) _(integer)_ - - [num_subscriptions](#num_subscriptions) _(integer)_ - - [num_tasks](#num_tasks) _(integer)_ + - [num_enabled_tasks](#num_enabled_tasks) _(integer)_ + - [num_subscriptions](#num_subscriptions) _(integer)_ + - [num_tasks](#num_tasks) _(integer)_ - [kapacitor_alert](#kapacitor_alert) - - [notification_dropped](#notification_dropped) _(integer)_ - - [primary-handle-count](#primary-handle-count) _(integer)_ - - [secondary-handle-count](#secondary-handle-count) _(integer)_ + - [notification_dropped](#notification_dropped) _(integer)_ + - [primary-handle-count](#primary-handle-count) _(integer)_ + - [secondary-handle-count](#secondary-handle-count) _(integer)_ - (Kapacitor Enterprise only) [kapacitor_cluster](#kapacitor_cluster) _(integer)_ - - [dropped_member_events](#dropped_member_events) _(integer)_ - - [dropped_user_events](#dropped_user_events) _(integer)_ - - [query_handler_errors](#query_handler_errors) _(integer)_ + - [dropped_member_events](#dropped_member_events) _(integer)_ + - [dropped_user_events](#dropped_user_events) _(integer)_ + - [query_handler_errors](#query_handler_errors) _(integer)_ - [kapacitor_edges](#kapacitor_edges) - - [collected](#collected) _(integer)_ - - [emitted](#emitted) _(integer)_ + - [collected](#collected) _(integer)_ + - [emitted](#emitted) _(integer)_ - [kapacitor_ingress](#kapacitor_ingress) - - [points_received](#points_received) _(integer)_ + - [points_received](#points_received) _(integer)_ - [kapacitor_load](#kapacitor_load) - - [errors](#errors) _(integer)_ + - [errors](#errors) _(integer)_ - [kapacitor_memstats](#kapacitor_memstats) - - [alloc_bytes](#alloc_bytes) _(integer)_ - - [buck_hash_sys_bytes](#buck_hash_sys_bytes) _(integer)_ - - [frees](#frees) _(integer)_ - - [gc_sys_bytes](#gc_sys_bytes) _(integer)_ - - [gc_cpu_fraction](#gc_cpu_fraction) _(float)_ - - [heap_alloc_bytes](#heap_alloc_bytes) _(integer)_ - - [heap_idle_bytes](#heap_idle_bytes) _(integer)_ - - [heap_in_use_bytes](#heap_in_use_bytes) _(integer)_ - - [heap_objects](#heap_objects) _(integer)_ - - [heap_released_bytes](#heap_released_bytes) _(integer)_ - - [heap_sys_bytes](#heap_sys_bytes) _(integer)_ - - [last_gc_ns](#last_gc_ns) _(integer)_ - - [lookups](#lookups) _(integer)_ - - [mallocs](#mallocs) _(integer)_ - - [mcache_in_use_bytes](#mcache_in_use_bytes) _(integer)_ - - [mcache_sys_bytes](#mcache_sys_bytes) _(integer)_ - - [mspan_in_use_bytes](#mspan_in_use_bytes) _(integer)_ - - [mspan_sys_bytes](#mspan_sys_bytes) _(integer)_ - - [next_gc_ns](#next_gc_ns) _(integer)_ - - [num_gc](#num_gc) _(integer)_ - - [other_sys_bytes](#other_sys_bytes) _(integer)_ - - [pause_total_ns](#pause_total_ns) _(integer)_ - - [stack_in_use_bytes](#stack_in_use_bytes) _(integer)_ - - [stack_sys_bytes](#stack_sys_bytes) _(integer)_ - - [sys_bytes](#sys_bytes) _(integer)_ - - [total_alloc_bytes](#total_alloc_bytes) _(integer)_ + - [alloc_bytes](#alloc_bytes) _(integer)_ + - [buck_hash_sys_bytes](#buck_hash_sys_bytes) _(integer)_ + - [frees](#frees) _(integer)_ + - [gc_sys_bytes](#gc_sys_bytes) _(integer)_ + - [gc_cpu_fraction](#gc_cpu_fraction) _(float)_ + - [heap_alloc_bytes](#heap_alloc_bytes) _(integer)_ + - [heap_idle_bytes](#heap_idle_bytes) _(integer)_ + - [heap_in_use_bytes](#heap_in_use_bytes) _(integer)_ + - [heap_objects](#heap_objects) _(integer)_ + - [heap_released_bytes](#heap_released_bytes) _(integer)_ + - [heap_sys_bytes](#heap_sys_bytes) _(integer)_ + - [last_gc_ns](#last_gc_ns) _(integer)_ + - [lookups](#lookups) _(integer)_ + - [mallocs](#mallocs) _(integer)_ + - [mcache_in_use_bytes](#mcache_in_use_bytes) _(integer)_ + - [mcache_sys_bytes](#mcache_sys_bytes) _(integer)_ + - [mspan_in_use_bytes](#mspan_in_use_bytes) _(integer)_ + - [mspan_sys_bytes](#mspan_sys_bytes) _(integer)_ + - [next_gc_ns](#next_gc_ns) _(integer)_ + - [num_gc](#num_gc) _(integer)_ + - [other_sys_bytes](#other_sys_bytes) _(integer)_ + - [pause_total_ns](#pause_total_ns) _(integer)_ + - [stack_in_use_bytes](#stack_in_use_bytes) _(integer)_ + - [stack_sys_bytes](#stack_sys_bytes) _(integer)_ + - [sys_bytes](#sys_bytes) _(integer)_ + - [total_alloc_bytes](#total_alloc_bytes) _(integer)_ - [kapacitor_nodes](#kapacitor_nodes) - - [alerts_inhibited](#alerts_inhibited) _(integer)_ - - [alerts_triggered](#alerts_triggered) _(integer)_ - - [avg_exec_time_ns](#avg_exec_time_ns) _(integer)_ - - [crits_triggered](#crits_triggered) _(integer)_ - - [errors](#errors) _(integer)_ - - [infos_triggered](#infos_triggered) _(integer)_ - - [oks_triggered](#oks_triggered) _(integer)_ - - [points_written](#points_written) _(integer)_ - - [warns_triggered](#warns_triggered) _(integer)_ - - [write_errors](#write_errors) _(integer)_ + - [alerts_inhibited](#alerts_inhibited) _(integer)_ + - [alerts_triggered](#alerts_triggered) _(integer)_ + - [avg_exec_time_ns](#avg_exec_time_ns) _(integer)_ + - [crits_triggered](#crits_triggered) _(integer)_ + - [errors](#errors) _(integer)_ + - [infos_triggered](#infos_triggered) _(integer)_ + - [oks_triggered](#oks_triggered) _(integer)_ + - [points_written](#points_written) _(integer)_ + - [warns_triggered](#warns_triggered) _(integer)_ + - [write_errors](#write_errors) _(integer)_ - [kapacitor_topics](#kapacitor_topics) - - [collected](#collected) _(integer)_ - + - [collected](#collected) _(integer)_ --- -### kapacitor -The `kapacitor` measurement stores fields with information related to -[Kapacitor tasks](https://docs.influxdata.com/kapacitor/latest/introduction/getting-started/#kapacitor-tasks) -and [subscriptions](https://docs.influxdata.com/kapacitor/latest/administration/subscription-management/). +## kapacitor + +The `kapacitor` measurement stores fields with information related to [Kapacitor +tasks][tasks] and [subscriptions][subs]. + +[tasks]: https://docs.influxdata.com/kapacitor/latest/introduction/getting-started/#kapacitor-tasks + +[subs]: https://docs.influxdata.com/kapacitor/latest/administration/subscription-management/ + +### num_enabled_tasks -#### num_enabled_tasks The number of enabled Kapacitor tasks. -#### num_subscriptions +### num_subscriptions + The number of Kapacitor/InfluxDB subscriptions. -#### num_tasks +### num_tasks + The total number of Kapacitor tasks. --- -### kapacitor_alert +## kapacitor_alert + The `kapacitor_alert` measurement stores fields with information related to [Kapacitor alerts](https://docs.influxdata.com/kapacitor/v1.5/working/alerts/). -#### notification-dropped -The number of internal notifications dropped because they arrive too late from another Kapacitor node. -If this count is increasing, Kapacitor Enterprise nodes aren't able to communicate fast enough -to keep up with the volume of alerts. +### notification-dropped + +The number of internal notifications dropped because they arrive too late from +another Kapacitor node. If this count is increasing, Kapacitor Enterprise nodes +aren't able to communicate fast enough to keep up with the volume of alerts. -#### primary-handle-count -The number of times this node handled an alert as the primary. This count should increase under normal conditions. +### primary-handle-count -#### secondary-handle-count -The number of times this node handled an alert as the secondary. An increase in this counter indicates that the primary is failing to handle alerts in a timely manner. +The number of times this node handled an alert as the primary. This count should +increase under normal conditions. + +### secondary-handle-count + +The number of times this node handled an alert as the secondary. An increase in +this counter indicates that the primary is failing to handle alerts in a timely +manner. --- -### kapacitor_cluster -The `kapacitor_cluster` measurement reflects the ability of [Kapacitor nodes to communicate](https://docs.influxdata.com/enterprise_kapacitor/v1.5/administration/configuration/#cluster-communications) with one another. Specifically, these metrics track the gossip communication between the Kapacitor nodes. +## kapacitor_cluster + +The `kapacitor_cluster` measurement reflects the ability of [Kapacitor nodes to +communicate][cluster] with one another. Specifically, these metrics track the +gossip communication between the Kapacitor nodes. + +[cluster]: https://docs.influxdata.com/enterprise_kapacitor/v1.5/administration/configuration/#cluster-communications + +### dropped_member_events -#### dropped_member_events The number of gossip member events that were dropped. -#### dropped_user_events +### dropped_user_events + The number of gossip user events that were dropped. --- -### kapacitor_edges +## kapacitor_edges + The `kapacitor_edges` measurement stores fields with information related to -[edges](https://docs.influxdata.com/kapacitor/latest/tick/introduction/#pipelines) -in Kapacitor TICKscripts. +[edges][] in Kapacitor TICKscripts. + +[edges]: https://docs.influxdata.com/kapacitor/latest/tick/introduction/#pipelines + +### collected -#### collected The number of messages collected by TICKscript edges. -#### emitted +### emitted + The number of messages emitted by TICKscript edges. --- -### kapacitor_ingress -The `kapacitor_ingress` measurement stores fields with information related to data -coming into Kapacitor. +## kapacitor_ingress + +The `kapacitor_ingress` measurement stores fields with information related to +data coming into Kapacitor. + +### points_received -#### points_received The number of points received by Kapacitor. --- -### kapacitor_load +## kapacitor_load + The `kapacitor_load` measurement stores fields with information related to the -[Kapacitor Load Directory service](https://docs.influxdata.com/kapacitor/latest/guides/load_directory/). +[Kapacitor Load Directory service][load-dir]. + +[load-dir]: https://docs.influxdata.com/kapacitor/latest/guides/load_directory/ + +### errors -#### errors The number of errors reported from the load directory service. --- -### kapacitor_memstats -The `kapacitor_memstats` measurement stores fields related to Kapacitor memory usage. +## kapacitor_memstats + +The `kapacitor_memstats` measurement stores fields related to Kapacitor memory +usage. + +### alloc_bytes -#### alloc_bytes The number of bytes of memory allocated by Kapacitor that are still in use. -#### buck_hash_sys_bytes +### buck_hash_sys_bytes + The number of bytes of memory used by the profiling bucket hash table. -#### frees +### frees + The number of heap objects freed. -#### gc_sys_bytes +### gc_sys_bytes + The number of bytes of memory used for garbage collection system metadata. -#### gc_cpu_fraction +### gc_cpu_fraction + The fraction of Kapacitor's available CPU time used by garbage collection since Kapacitor started. -#### heap_alloc_bytes +### heap_alloc_bytes + The number of reachable and unreachable heap objects garbage collection has not freed. -#### heap_idle_bytes +### heap_idle_bytes + The number of heap bytes waiting to be used. -#### heap_in_use_bytes +### heap_in_use_bytes + The number of heap bytes in use. -#### heap_objects +### heap_objects + The number of allocated objects. -#### heap_released_bytes +### heap_released_bytes + The number of heap bytes released to the operating system. -#### heap_sys_bytes +### heap_sys_bytes + The number of heap bytes obtained from `system`. -#### last_gc_ns +### last_gc_ns + The nanosecond epoch time of the last garbage collection. -#### lookups +### lookups + The total number of pointer lookups. -#### mallocs +### mallocs + The total number of mallocs. -#### mcache_in_use_bytes +### mcache_in_use_bytes + The number of bytes in use by mcache structures. -#### mcache_sys_bytes +### mcache_sys_bytes + The number of bytes used for mcache structures obtained from `system`. -#### mspan_in_use_bytes +### mspan_in_use_bytes + The number of bytes in use by mspan structures. -#### mspan_sys_bytes +### mspan_sys_bytes + The number of bytes used for mspan structures obtained from `system`. -#### next_gc_ns +### next_gc_ns + The nanosecond epoch time of the next garbage collection. -#### num_gc +### num_gc + The number of completed garbage collection cycles. -#### other_sys_bytes +### other_sys_bytes + The number of bytes used for other system allocations. -#### pause_total_ns +### pause_total_ns + The total number of nanoseconds spent in garbage collection "stop-the-world" pauses since Kapacitor started. -#### stack_in_use_bytes +### stack_in_use_bytes + The number of bytes in use by the stack allocator. -#### stack_sys_bytes +### stack_sys_bytes + The number of bytes obtained from `system` for stack allocator. -#### sys_bytes +### sys_bytes + The number of bytes of memory obtained from `system`. -#### total_alloc_bytes +### total_alloc_bytes + The total number of bytes allocated, even if freed. --- -### kapacitor_nodes +## kapacitor_nodes + The `kapacitor_nodes` measurement stores fields related to events that occur in [TICKscript nodes](https://docs.influxdata.com/kapacitor/latest/nodes/). -#### alerts_inhibited +### alerts_inhibited + The total number of alerts inhibited by TICKscripts. -#### alerts_triggered +### alerts_triggered + The total number of alerts triggered by TICKscripts. -#### avg_exec_time_ns +### avg_exec_time_ns + The average execution time of TICKscripts in nanoseconds. -#### crits_triggered +### crits_triggered + The number of critical (`crit`) alerts triggered by TICKscripts. -#### errors +### errors (from TICKscripts) + The number of errors caused caused by TICKscripts. -#### infos_triggered +### infos_triggered + The number of info (`info`) alerts triggered by TICKscripts. -#### oks_triggered +### oks_triggered + The number of ok (`ok`) alerts triggered by TICKscripts. #### points_written + The number of points written to InfluxDB or back to Kapacitor. #### warns_triggered + The number of warning (`warn`) alerts triggered by TICKscripts. #### working_cardinality + The total number of unique series processed. #### write_errors -The number of errors that occurred when writing to InfluxDB or other write endpoints. + +The number of errors that occurred when writing to InfluxDB or other write +endpoints. --- ### kapacitor_topics -The `kapacitor_topics` measurement stores fields related to -Kapacitor topics](https://docs.influxdata.com/kapacitor/latest/working/using_alert_topics/). -#### collected +The `kapacitor_topics` measurement stores fields related to Kapacitor +topics][topics]. + +[topics]: https://docs.influxdata.com/kapacitor/latest/working/using_alert_topics/ + +#### collected (kapacitor_topics) + The number of events collected by Kapacitor topics. --- -*Note:* The Kapacitor variables `host`, `cluster_id`, and `server_id` +__Note:__ The Kapacitor variables `host`, `cluster_id`, and `server_id` are currently not recorded due to the potential high cardinality of these values. ## Example Output -``` +```shell $ telegraf --config /etc/telegraf.conf --input-filter kapacitor --test * Plugin: inputs.kapacitor, Collection 1 > kapacitor_memstats,host=hostname.local,kap_version=1.1.0~rc2,url=http://localhost:9092/kapacitor/v1/debug/vars alloc_bytes=6974808i,buck_hash_sys_bytes=1452609i,frees=207281i,gc_sys_bytes=802816i,gc_cpu_fraction=0.00004693548939673313,heap_alloc_bytes=6974808i,heap_idle_bytes=6742016i,heap_in_use_bytes=9183232i,heap_objects=23216i,heap_released_bytes=0i,heap_sys_bytes=15925248i,last_gc_ns=1478791460012676997i,lookups=88i,mallocs=230497i,mcache_in_use_bytes=9600i,mcache_sys_bytes=16384i,mspan_in_use_bytes=98560i,mspan_sys_bytes=131072i,next_gc_ns=11467528i,num_gc=8i,other_sys_bytes=2236087i,pause_total_ns=2994110i,stack_in_use_bytes=1900544i,stack_sys_bytes=1900544i,sys_bytes=22464760i,total_alloc_bytes=35023600i 1478791462000000000 diff --git a/plugins/inputs/kapacitor/kapacitor.go b/plugins/inputs/kapacitor/kapacitor.go index dd3303a7419d3..f18870ccd7c2b 100644 --- a/plugins/inputs/kapacitor/kapacitor.go +++ b/plugins/inputs/kapacitor/kapacitor.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package kapacitor import ( + _ "embed" "encoding/json" "fmt" "net/http" @@ -8,50 +10,34 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const ( defaultURL = "http://localhost:9092/kapacitor/v1/debug/vars" ) type Kapacitor struct { URLs []string `toml:"urls"` - Timeout internal.Duration + Timeout config.Duration tls.ClientConfig client *http.Client } -func (*Kapacitor) Description() string { - return "Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints" -} - func (*Kapacitor) SampleConfig() string { - return ` - ## Multiple URLs from which to read Kapacitor-formatted JSON - ## Default is "http://localhost:9092/kapacitor/v1/debug/vars". - urls = [ - "http://localhost:9092/kapacitor/v1/debug/vars" - ] - - ## Time limit for http requests - timeout = "5s" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` + return sampleConfig } func (k *Kapacitor) Gather(acc telegraf.Accumulator) error { if k.client == nil { - client, err := k.createHttpClient() + client, err := k.createHTTPClient() if err != nil { return err } @@ -73,7 +59,7 @@ func (k *Kapacitor) Gather(acc telegraf.Accumulator) error { return nil } -func (k *Kapacitor) createHttpClient() (*http.Client, error) { +func (k *Kapacitor) createHTTPClient() (*http.Client, error) { tlsCfg, err := k.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -83,7 +69,7 @@ func (k *Kapacitor) createHttpClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: k.Timeout.Duration, + Timeout: time.Duration(k.Timeout), } return client, nil @@ -216,13 +202,10 @@ func (k *Kapacitor) gatherURL( if s.Kapacitor != nil { for _, obj := range *s.Kapacitor { - // Strip out high-cardinality or duplicative tags excludeTags := []string{"host", "cluster_id", "server_id"} for _, key := range excludeTags { - if _, ok := obj.Tags[key]; ok { - delete(obj.Tags, key) - } + delete(obj.Tags, key) } // Convert time-related string field to int @@ -250,7 +233,7 @@ func init() { inputs.Add("kapacitor", func() telegraf.Input { return &Kapacitor{ URLs: []string{defaultURL}, - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } }) } diff --git a/plugins/inputs/kapacitor/kapacitor_test.go b/plugins/inputs/kapacitor/kapacitor_test.go index cae1f9ce30e77..163af10601f0a 100644 --- a/plugins/inputs/kapacitor/kapacitor_test.go +++ b/plugins/inputs/kapacitor/kapacitor_test.go @@ -74,7 +74,8 @@ func TestKapacitor(t *testing.T) { func TestMissingStats(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte(`{}`)) + _, err := w.Write([]byte(`{}`)) + require.NoError(t, err) })) defer server.Close() @@ -83,7 +84,7 @@ func TestMissingStats(t *testing.T) { } var acc testutil.Accumulator - plugin.Gather(&acc) + require.NoError(t, plugin.Gather(&acc)) require.False(t, acc.HasField("kapacitor_memstats", "alloc_bytes")) require.True(t, acc.HasField("kapacitor", "num_tasks")) @@ -92,7 +93,8 @@ func TestMissingStats(t *testing.T) { func TestErrorHandling(t *testing.T) { badServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == "/endpoint" { - _, _ = w.Write([]byte("not json")) + _, err := w.Write([]byte("not json")) + require.NoError(t, err) } else { w.WriteHeader(http.StatusNotFound) } @@ -104,7 +106,7 @@ func TestErrorHandling(t *testing.T) { } var acc testutil.Accumulator - plugin.Gather(&acc) + require.NoError(t, plugin.Gather(&acc)) acc.WaitError(1) require.Equal(t, uint64(0), acc.NMetrics()) } @@ -120,7 +122,7 @@ func TestErrorHandling404(t *testing.T) { } var acc testutil.Accumulator - plugin.Gather(&acc) + require.NoError(t, plugin.Gather(&acc)) acc.WaitError(1) require.Equal(t, uint64(0), acc.NMetrics()) } diff --git a/plugins/inputs/kapacitor/sample.conf b/plugins/inputs/kapacitor/sample.conf new file mode 100644 index 0000000000000..0a7a64c8622d8 --- /dev/null +++ b/plugins/inputs/kapacitor/sample.conf @@ -0,0 +1,17 @@ +# Read Kapacitor-formatted JSON metrics from one or more HTTP endpoints +[[inputs.kapacitor]] + ## Multiple URLs from which to read Kapacitor-formatted JSON + ## Default is "http://localhost:9092/kapacitor/v1/debug/vars". + urls = [ + "http://localhost:9092/kapacitor/v1/debug/vars" + ] + + ## Time limit for http requests + timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/kernel/README.md b/plugins/inputs/kernel/README.md index 0f28bf7770370..8b38e2a415a5b 100644 --- a/plugins/inputs/kernel/README.md +++ b/plugins/inputs/kernel/README.md @@ -3,13 +3,14 @@ This plugin is only available on Linux. The kernel plugin gathers info about the kernel that doesn't fit into other -plugins. In general, it is the statistics available in `/proc/stat` that are -not covered by other plugins as well as the value of `/proc/sys/kernel/random/entropy_avail` +plugins. In general, it is the statistics available in `/proc/stat` that are not +covered by other plugins as well as the value of +`/proc/sys/kernel/random/entropy_avail` The metrics are documented in `man proc` under the `/proc/stat` section. The metrics are documented in `man 4 random` under the `/proc/stat` section. -``` +```text /proc/sys/kernel/random/entropy_avail @@ -39,32 +40,32 @@ processes 86031 Number of forks since boot. ``` -### Configuration: +## Configuration -```toml +```toml @sample.conf # Get kernel statistics from /proc/stat [[inputs.kernel]] # no configuration ``` -### Measurements & Fields: +## Measurements & Fields - kernel - - boot_time (integer, seconds since epoch, `btime`) - - context_switches (integer, `ctxt`) - - disk_pages_in (integer, `page (0)`) - - disk_pages_out (integer, `page (1)`) - - interrupts (integer, `intr`) - - processes_forked (integer, `processes`) - - entropy_avail (integer, `entropy_available`) + - boot_time (integer, seconds since epoch, `btime`) + - context_switches (integer, `ctxt`) + - disk_pages_in (integer, `page (0)`) + - disk_pages_out (integer, `page (1)`) + - interrupts (integer, `intr`) + - processes_forked (integer, `processes`) + - entropy_avail (integer, `entropy_available`) -### Tags: +## Tags None -### Example Output: +## Example Output -``` +```shell $ telegraf --config ~/ws/telegraf.conf --input-filter kernel --test * Plugin: kernel, Collection 1 > kernel entropy_available=2469i,boot_time=1457505775i,context_switches=2626618i,disk_pages_in=5741i,disk_pages_out=1808i,interrupts=1472736i,processes_forked=10673i 1457613402960879816 diff --git a/plugins/inputs/kernel/kernel.go b/plugins/inputs/kernel/kernel.go index 461c9564a38e9..2b60d63385e87 100644 --- a/plugins/inputs/kernel/kernel.go +++ b/plugins/inputs/kernel/kernel.go @@ -1,11 +1,13 @@ +//go:generate ../../../tools/readme_config_includer/generator +//go:build linux // +build linux package kernel import ( "bytes" + _ "embed" "fmt" - "io/ioutil" "os" "strconv" "strings" @@ -14,13 +16,17 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // /proc/stat file line prefixes to gather stats on: var ( - interrupts = []byte("intr") - context_switches = []byte("ctxt") - processes_forked = []byte("processes") - disk_pages = []byte("page") - boot_time = []byte("btime") + interrupts = []byte("intr") + contextSwitches = []byte("ctxt") + processesForked = []byte("processes") + diskPages = []byte("page") + bootTime = []byte("btime") ) type Kernel struct { @@ -28,20 +34,17 @@ type Kernel struct { entropyStatFile string } -func (k *Kernel) Description() string { - return "Get kernel statistics from /proc/stat" +func (*Kernel) SampleConfig() string { + return sampleConfig } -func (k *Kernel) SampleConfig() string { return "" } - func (k *Kernel) Gather(acc telegraf.Accumulator) error { - data, err := k.getProcStat() if err != nil { return err } - entropyData, err := ioutil.ReadFile(k.entropyStatFile) + entropyData, err := os.ReadFile(k.entropyStatFile) if err != nil { return err } @@ -54,7 +57,7 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { fields := make(map[string]interface{}) - fields["entropy_avail"] = int64(entropyValue) + fields["entropy_avail"] = entropyValue dataFields := bytes.Fields(data) for i, field := range dataFields { @@ -64,26 +67,26 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { if err != nil { return err } - fields["interrupts"] = int64(m) - case bytes.Equal(field, context_switches): + fields["interrupts"] = m + case bytes.Equal(field, contextSwitches): m, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64) if err != nil { return err } - fields["context_switches"] = int64(m) - case bytes.Equal(field, processes_forked): + fields["context_switches"] = m + case bytes.Equal(field, processesForked): m, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64) if err != nil { return err } - fields["processes_forked"] = int64(m) - case bytes.Equal(field, boot_time): + fields["processes_forked"] = m + case bytes.Equal(field, bootTime): m, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64) if err != nil { return err } - fields["boot_time"] = int64(m) - case bytes.Equal(field, disk_pages): + fields["boot_time"] = m + case bytes.Equal(field, diskPages): in, err := strconv.ParseInt(string(dataFields[i+1]), 10, 64) if err != nil { return err @@ -92,8 +95,8 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { if err != nil { return err } - fields["disk_pages_in"] = int64(in) - fields["disk_pages_out"] = int64(out) + fields["disk_pages_in"] = in + fields["disk_pages_out"] = out } } @@ -104,12 +107,12 @@ func (k *Kernel) Gather(acc telegraf.Accumulator) error { func (k *Kernel) getProcStat() ([]byte, error) { if _, err := os.Stat(k.statFile); os.IsNotExist(err) { - return nil, fmt.Errorf("kernel: %s does not exist!", k.statFile) + return nil, fmt.Errorf("kernel: %s does not exist", k.statFile) } else if err != nil { return nil, err } - data, err := ioutil.ReadFile(k.statFile) + data, err := os.ReadFile(k.statFile) if err != nil { return nil, err } diff --git a/plugins/inputs/kernel/kernel_notlinux.go b/plugins/inputs/kernel/kernel_notlinux.go index 05f6e55c453c5..838a97071a6d4 100644 --- a/plugins/inputs/kernel/kernel_notlinux.go +++ b/plugins/inputs/kernel/kernel_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package kernel diff --git a/plugins/inputs/kernel/kernel_test.go b/plugins/inputs/kernel/kernel_test.go index d356f43802798..f174017fad7b9 100644 --- a/plugins/inputs/kernel/kernel_test.go +++ b/plugins/inputs/kernel/kernel_test.go @@ -1,20 +1,20 @@ +//go:build linux // +build linux package kernel import ( - "io/ioutil" "os" "testing" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestFullProcFile(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(statFile_Full)) - tmpfile2 := makeFakeStatFile([]byte(entropyStatFile_Full)) + tmpfile := makeFakeStatFile(t, []byte(statFileFull)) + tmpfile2 := makeFakeStatFile(t, []byte(entropyStatFileFull)) defer os.Remove(tmpfile) defer os.Remove(tmpfile2) @@ -24,8 +24,7 @@ func TestFullProcFile(t *testing.T) { } acc := testutil.Accumulator{} - err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, k.Gather(&acc)) fields := map[string]interface{}{ "boot_time": int64(1457505775), @@ -40,8 +39,8 @@ func TestFullProcFile(t *testing.T) { } func TestPartialProcFile(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(statFile_Partial)) - tmpfile2 := makeFakeStatFile([]byte(entropyStatFile_Partial)) + tmpfile := makeFakeStatFile(t, []byte(statFilePartial)) + tmpfile2 := makeFakeStatFile(t, []byte(entropyStatFilePartial)) defer os.Remove(tmpfile) defer os.Remove(tmpfile2) @@ -51,8 +50,7 @@ func TestPartialProcFile(t *testing.T) { } acc := testutil.Accumulator{} - err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, k.Gather(&acc)) fields := map[string]interface{}{ "boot_time": int64(1457505775), @@ -66,8 +64,8 @@ func TestPartialProcFile(t *testing.T) { } func TestInvalidProcFile1(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(statFile_Invalid)) - tmpfile2 := makeFakeStatFile([]byte(entropyStatFile_Invalid)) + tmpfile := makeFakeStatFile(t, []byte(statFileInvalid)) + tmpfile2 := makeFakeStatFile(t, []byte(entropyStatFileInvalid)) defer os.Remove(tmpfile) defer os.Remove(tmpfile2) @@ -78,11 +76,12 @@ func TestInvalidProcFile1(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid syntax") } func TestInvalidProcFile2(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(statFile_Invalid2)) + tmpfile := makeFakeStatFile(t, []byte(statFileInvalid2)) defer os.Remove(tmpfile) k := Kernel{ @@ -91,12 +90,13 @@ func TestInvalidProcFile2(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) + require.Error(t, err) + require.Contains(t, err.Error(), "no such file") } func TestNoProcFile(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(statFile_Invalid2)) - os.Remove(tmpfile) + tmpfile := makeFakeStatFile(t, []byte(statFileInvalid2)) + require.NoError(t, os.Remove(tmpfile)) k := Kernel{ statFile: tmpfile, @@ -104,11 +104,11 @@ func TestNoProcFile(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) - assert.Contains(t, err.Error(), "does not exist") + require.Error(t, err) + require.Contains(t, err.Error(), "does not exist") } -const statFile_Full = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 +const statFileFull = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 cpu0 6796 252 5655 10444977 175 0 101 0 0 0 intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ctxt 2626618 @@ -122,7 +122,7 @@ swap 1 0 entropy_avail 1024 ` -const statFile_Partial = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 +const statFilePartial = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 cpu0 6796 252 5655 10444977 175 0 101 0 0 0 intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ctxt 2626618 @@ -134,7 +134,7 @@ page 5741 1808 ` // missing btime measurement -const statFile_Invalid = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 +const statFileInvalid = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 cpu0 6796 252 5655 10444977 175 0 101 0 0 0 intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ctxt 2626618 @@ -149,7 +149,7 @@ entropy_avail 1024 ` // missing second page measurement -const statFile_Invalid2 = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 +const statFileInvalid2 = `cpu 6796 252 5655 10444977 175 0 101 0 0 0 cpu0 6796 252 5655 10444977 175 0 101 0 0 0 intr 1472736 57 10 0 0 0 0 0 0 0 0 0 0 156 0 0 0 0 0 0 111551 42541 12356 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ctxt 2626618 @@ -161,24 +161,20 @@ softirq 1031662 0 649485 20946 111071 11620 0 1 0 994 237545 entropy_avail 1024 2048 ` -const entropyStatFile_Full = `1024` +const entropyStatFileFull = `1024` -const entropyStatFile_Partial = `1024` +const entropyStatFilePartial = `1024` -const entropyStatFile_Invalid = `` +const entropyStatFileInvalid = `` -func makeFakeStatFile(content []byte) string { - tmpfile, err := ioutil.TempFile("", "kernel_test") - if err != nil { - panic(err) - } +func makeFakeStatFile(t *testing.T, content []byte) string { + tmpfile, err := os.CreateTemp("", "kernel_test") + require.NoError(t, err) - if _, err := tmpfile.Write(content); err != nil { - panic(err) - } - if err := tmpfile.Close(); err != nil { - panic(err) - } + _, err = tmpfile.Write(content) + require.NoError(t, err) + + require.NoError(t, tmpfile.Close()) return tmpfile.Name() } diff --git a/plugins/inputs/kernel/sample.conf b/plugins/inputs/kernel/sample.conf new file mode 100644 index 0000000000000..10de9f8a43b62 --- /dev/null +++ b/plugins/inputs/kernel/sample.conf @@ -0,0 +1,3 @@ +# Get kernel statistics from /proc/stat +[[inputs.kernel]] + # no configuration diff --git a/plugins/inputs/kernel_vmstat/README.md b/plugins/inputs/kernel_vmstat/README.md index 3ca6a097c1456..0da56783391e6 100644 --- a/plugins/inputs/kernel_vmstat/README.md +++ b/plugins/inputs/kernel_vmstat/README.md @@ -1,13 +1,15 @@ # Kernel VMStat Input Plugin -The kernel_vmstat plugin gathers virtual memory statistics -by reading /proc/vmstat. For a full list of available fields see the -/proc/vmstat section of the [proc man page](http://man7.org/linux/man-pages/man5/proc.5.html). -For a better idea of what each field represents, see the -[vmstat man page](http://linux.die.net/man/8/vmstat). +The kernel_vmstat plugin gathers virtual memory statistics by reading +/proc/vmstat. For a full list of available fields see the /proc/vmstat section +of the [proc man page][man-proc]. For a better idea of what each field +represents, see the [vmstat man page][man-vmstat]. +[man-proc]: http://man7.org/linux/man-pages/man5/proc.5.html -``` +[man-vmstat]: http://linux.die.net/man/8/vmstat + +```text /proc/vmstat kernel/system statistics. Common entries include (from http://www.linuxinsight.com/proc_vmstat.html): @@ -109,116 +111,116 @@ pgrotated 3781 nr_bounce 0 ``` -### Configuration: +## Configuration -```toml +```toml @sample.conf # Get kernel statistics from /proc/vmstat [[inputs.kernel_vmstat]] # no configuration ``` -### Measurements & Fields: +## Measurements & Fields - kernel_vmstat - - nr_free_pages (integer, `nr_free_pages`) - - nr_inactive_anon (integer, `nr_inactive_anon`) - - nr_active_anon (integer, `nr_active_anon`) - - nr_inactive_file (integer, `nr_inactive_file`) - - nr_active_file (integer, `nr_active_file`) - - nr_unevictable (integer, `nr_unevictable`) - - nr_mlock (integer, `nr_mlock`) - - nr_anon_pages (integer, `nr_anon_pages`) - - nr_mapped (integer, `nr_mapped`) - - nr_file_pages (integer, `nr_file_pages`) - - nr_dirty (integer, `nr_dirty`) - - nr_writeback (integer, `nr_writeback`) - - nr_slab_reclaimable (integer, `nr_slab_reclaimable`) - - nr_slab_unreclaimable (integer, `nr_slab_unreclaimable`) - - nr_page_table_pages (integer, `nr_page_table_pages`) - - nr_kernel_stack (integer, `nr_kernel_stack`) - - nr_unstable (integer, `nr_unstable`) - - nr_bounce (integer, `nr_bounce`) - - nr_vmscan_write (integer, `nr_vmscan_write`) - - nr_writeback_temp (integer, `nr_writeback_temp`) - - nr_isolated_anon (integer, `nr_isolated_anon`) - - nr_isolated_file (integer, `nr_isolated_file`) - - nr_shmem (integer, `nr_shmem`) - - numa_hit (integer, `numa_hit`) - - numa_miss (integer, `numa_miss`) - - numa_foreign (integer, `numa_foreign`) - - numa_interleave (integer, `numa_interleave`) - - numa_local (integer, `numa_local`) - - numa_other (integer, `numa_other`) - - nr_anon_transparent_hugepages (integer, `nr_anon_transparent_hugepages`) - - pgpgin (integer, `pgpgin`) - - pgpgout (integer, `pgpgout`) - - pswpin (integer, `pswpin`) - - pswpout (integer, `pswpout`) - - pgalloc_dma (integer, `pgalloc_dma`) - - pgalloc_dma32 (integer, `pgalloc_dma32`) - - pgalloc_normal (integer, `pgalloc_normal`) - - pgalloc_movable (integer, `pgalloc_movable`) - - pgfree (integer, `pgfree`) - - pgactivate (integer, `pgactivate`) - - pgdeactivate (integer, `pgdeactivate`) - - pgfault (integer, `pgfault`) - - pgmajfault (integer, `pgmajfault`) - - pgrefill_dma (integer, `pgrefill_dma`) - - pgrefill_dma32 (integer, `pgrefill_dma32`) - - pgrefill_normal (integer, `pgrefill_normal`) - - pgrefill_movable (integer, `pgrefill_movable`) - - pgsteal_dma (integer, `pgsteal_dma`) - - pgsteal_dma32 (integer, `pgsteal_dma32`) - - pgsteal_normal (integer, `pgsteal_normal`) - - pgsteal_movable (integer, `pgsteal_movable`) - - pgscan_kswapd_dma (integer, `pgscan_kswapd_dma`) - - pgscan_kswapd_dma32 (integer, `pgscan_kswapd_dma32`) - - pgscan_kswapd_normal (integer, `pgscan_kswapd_normal`) - - pgscan_kswapd_movable (integer, `pgscan_kswapd_movable`) - - pgscan_direct_dma (integer, `pgscan_direct_dma`) - - pgscan_direct_dma32 (integer, `pgscan_direct_dma32`) - - pgscan_direct_normal (integer, `pgscan_direct_normal`) - - pgscan_direct_movable (integer, `pgscan_direct_movable`) - - zone_reclaim_failed (integer, `zone_reclaim_failed`) - - pginodesteal (integer, `pginodesteal`) - - slabs_scanned (integer, `slabs_scanned`) - - kswapd_steal (integer, `kswapd_steal`) - - kswapd_inodesteal (integer, `kswapd_inodesteal`) - - kswapd_low_wmark_hit_quickly (integer, `kswapd_low_wmark_hit_quickly`) - - kswapd_high_wmark_hit_quickly (integer, `kswapd_high_wmark_hit_quickly`) - - kswapd_skip_congestion_wait (integer, `kswapd_skip_congestion_wait`) - - pageoutrun (integer, `pageoutrun`) - - allocstall (integer, `allocstall`) - - pgrotated (integer, `pgrotated`) - - compact_blocks_moved (integer, `compact_blocks_moved`) - - compact_pages_moved (integer, `compact_pages_moved`) - - compact_pagemigrate_failed (integer, `compact_pagemigrate_failed`) - - compact_stall (integer, `compact_stall`) - - compact_fail (integer, `compact_fail`) - - compact_success (integer, `compact_success`) - - htlb_buddy_alloc_success (integer, `htlb_buddy_alloc_success`) - - htlb_buddy_alloc_fail (integer, `htlb_buddy_alloc_fail`) - - unevictable_pgs_culled (integer, `unevictable_pgs_culled`) - - unevictable_pgs_scanned (integer, `unevictable_pgs_scanned`) - - unevictable_pgs_rescued (integer, `unevictable_pgs_rescued`) - - unevictable_pgs_mlocked (integer, `unevictable_pgs_mlocked`) - - unevictable_pgs_munlocked (integer, `unevictable_pgs_munlocked`) - - unevictable_pgs_cleared (integer, `unevictable_pgs_cleared`) - - unevictable_pgs_stranded (integer, `unevictable_pgs_stranded`) - - unevictable_pgs_mlockfreed (integer, `unevictable_pgs_mlockfreed`) - - thp_fault_alloc (integer, `thp_fault_alloc`) - - thp_fault_fallback (integer, `thp_fault_fallback`) - - thp_collapse_alloc (integer, `thp_collapse_alloc`) - - thp_collapse_alloc_failed (integer, `thp_collapse_alloc_failed`) - - thp_split (integer, `thp_split`) - -### Tags: + - nr_free_pages (integer, `nr_free_pages`) + - nr_inactive_anon (integer, `nr_inactive_anon`) + - nr_active_anon (integer, `nr_active_anon`) + - nr_inactive_file (integer, `nr_inactive_file`) + - nr_active_file (integer, `nr_active_file`) + - nr_unevictable (integer, `nr_unevictable`) + - nr_mlock (integer, `nr_mlock`) + - nr_anon_pages (integer, `nr_anon_pages`) + - nr_mapped (integer, `nr_mapped`) + - nr_file_pages (integer, `nr_file_pages`) + - nr_dirty (integer, `nr_dirty`) + - nr_writeback (integer, `nr_writeback`) + - nr_slab_reclaimable (integer, `nr_slab_reclaimable`) + - nr_slab_unreclaimable (integer, `nr_slab_unreclaimable`) + - nr_page_table_pages (integer, `nr_page_table_pages`) + - nr_kernel_stack (integer, `nr_kernel_stack`) + - nr_unstable (integer, `nr_unstable`) + - nr_bounce (integer, `nr_bounce`) + - nr_vmscan_write (integer, `nr_vmscan_write`) + - nr_writeback_temp (integer, `nr_writeback_temp`) + - nr_isolated_anon (integer, `nr_isolated_anon`) + - nr_isolated_file (integer, `nr_isolated_file`) + - nr_shmem (integer, `nr_shmem`) + - numa_hit (integer, `numa_hit`) + - numa_miss (integer, `numa_miss`) + - numa_foreign (integer, `numa_foreign`) + - numa_interleave (integer, `numa_interleave`) + - numa_local (integer, `numa_local`) + - numa_other (integer, `numa_other`) + - nr_anon_transparent_hugepages (integer, `nr_anon_transparent_hugepages`) + - pgpgin (integer, `pgpgin`) + - pgpgout (integer, `pgpgout`) + - pswpin (integer, `pswpin`) + - pswpout (integer, `pswpout`) + - pgalloc_dma (integer, `pgalloc_dma`) + - pgalloc_dma32 (integer, `pgalloc_dma32`) + - pgalloc_normal (integer, `pgalloc_normal`) + - pgalloc_movable (integer, `pgalloc_movable`) + - pgfree (integer, `pgfree`) + - pgactivate (integer, `pgactivate`) + - pgdeactivate (integer, `pgdeactivate`) + - pgfault (integer, `pgfault`) + - pgmajfault (integer, `pgmajfault`) + - pgrefill_dma (integer, `pgrefill_dma`) + - pgrefill_dma32 (integer, `pgrefill_dma32`) + - pgrefill_normal (integer, `pgrefill_normal`) + - pgrefill_movable (integer, `pgrefill_movable`) + - pgsteal_dma (integer, `pgsteal_dma`) + - pgsteal_dma32 (integer, `pgsteal_dma32`) + - pgsteal_normal (integer, `pgsteal_normal`) + - pgsteal_movable (integer, `pgsteal_movable`) + - pgscan_kswapd_dma (integer, `pgscan_kswapd_dma`) + - pgscan_kswapd_dma32 (integer, `pgscan_kswapd_dma32`) + - pgscan_kswapd_normal (integer, `pgscan_kswapd_normal`) + - pgscan_kswapd_movable (integer, `pgscan_kswapd_movable`) + - pgscan_direct_dma (integer, `pgscan_direct_dma`) + - pgscan_direct_dma32 (integer, `pgscan_direct_dma32`) + - pgscan_direct_normal (integer, `pgscan_direct_normal`) + - pgscan_direct_movable (integer, `pgscan_direct_movable`) + - zone_reclaim_failed (integer, `zone_reclaim_failed`) + - pginodesteal (integer, `pginodesteal`) + - slabs_scanned (integer, `slabs_scanned`) + - kswapd_steal (integer, `kswapd_steal`) + - kswapd_inodesteal (integer, `kswapd_inodesteal`) + - kswapd_low_wmark_hit_quickly (integer, `kswapd_low_wmark_hit_quickly`) + - kswapd_high_wmark_hit_quickly (integer, `kswapd_high_wmark_hit_quickly`) + - kswapd_skip_congestion_wait (integer, `kswapd_skip_congestion_wait`) + - pageoutrun (integer, `pageoutrun`) + - allocstall (integer, `allocstall`) + - pgrotated (integer, `pgrotated`) + - compact_blocks_moved (integer, `compact_blocks_moved`) + - compact_pages_moved (integer, `compact_pages_moved`) + - compact_pagemigrate_failed (integer, `compact_pagemigrate_failed`) + - compact_stall (integer, `compact_stall`) + - compact_fail (integer, `compact_fail`) + - compact_success (integer, `compact_success`) + - htlb_buddy_alloc_success (integer, `htlb_buddy_alloc_success`) + - htlb_buddy_alloc_fail (integer, `htlb_buddy_alloc_fail`) + - unevictable_pgs_culled (integer, `unevictable_pgs_culled`) + - unevictable_pgs_scanned (integer, `unevictable_pgs_scanned`) + - unevictable_pgs_rescued (integer, `unevictable_pgs_rescued`) + - unevictable_pgs_mlocked (integer, `unevictable_pgs_mlocked`) + - unevictable_pgs_munlocked (integer, `unevictable_pgs_munlocked`) + - unevictable_pgs_cleared (integer, `unevictable_pgs_cleared`) + - unevictable_pgs_stranded (integer, `unevictable_pgs_stranded`) + - unevictable_pgs_mlockfreed (integer, `unevictable_pgs_mlockfreed`) + - thp_fault_alloc (integer, `thp_fault_alloc`) + - thp_fault_fallback (integer, `thp_fault_fallback`) + - thp_collapse_alloc (integer, `thp_collapse_alloc`) + - thp_collapse_alloc_failed (integer, `thp_collapse_alloc_failed`) + - thp_split (integer, `thp_split`) + +## Tags None -### Example Output: +## Example Output -``` +```shell $ telegraf --config ~/ws/telegraf.conf --input-filter kernel_vmstat --test * Plugin: kernel_vmstat, Collection 1 > kernel_vmstat allocstall=81496i,compact_blocks_moved=238196i,compact_fail=135220i,compact_pagemigrate_failed=0i,compact_pages_moved=6370588i,compact_stall=142092i,compact_success=6872i,htlb_buddy_alloc_fail=0i,htlb_buddy_alloc_success=0i,kswapd_high_wmark_hit_quickly=25439i,kswapd_inodesteal=29770874i,kswapd_low_wmark_hit_quickly=8756i,kswapd_skip_congestion_wait=0i,kswapd_steal=291534428i,nr_active_anon=2515657i,nr_active_file=2244914i,nr_anon_pages=1358675i,nr_anon_transparent_hugepages=2034i,nr_bounce=0i,nr_dirty=5690i,nr_file_pages=5153546i,nr_free_pages=78730i,nr_inactive_anon=426259i,nr_inactive_file=2366791i,nr_isolated_anon=0i,nr_isolated_file=0i,nr_kernel_stack=579i,nr_mapped=558821i,nr_mlock=0i,nr_page_table_pages=11115i,nr_shmem=541689i,nr_slab_reclaimable=459806i,nr_slab_unreclaimable=47859i,nr_unevictable=0i,nr_unstable=0i,nr_vmscan_write=6206i,nr_writeback=0i,nr_writeback_temp=0i,numa_foreign=0i,numa_hit=5113399878i,numa_interleave=35793i,numa_local=5113399878i,numa_miss=0i,numa_other=0i,pageoutrun=505006i,pgactivate=375664931i,pgalloc_dma=0i,pgalloc_dma32=122480220i,pgalloc_movable=0i,pgalloc_normal=5233176719i,pgdeactivate=122735906i,pgfault=8699921410i,pgfree=5359765021i,pginodesteal=9188431i,pgmajfault=122210i,pgpgin=219717626i,pgpgout=3495885510i,pgrefill_dma=0i,pgrefill_dma32=1180010i,pgrefill_movable=0i,pgrefill_normal=119866676i,pgrotated=60620i,pgscan_direct_dma=0i,pgscan_direct_dma32=12256i,pgscan_direct_movable=0i,pgscan_direct_normal=31501600i,pgscan_kswapd_dma=0i,pgscan_kswapd_dma32=4480608i,pgscan_kswapd_movable=0i,pgscan_kswapd_normal=287857984i,pgsteal_dma=0i,pgsteal_dma32=4466436i,pgsteal_movable=0i,pgsteal_normal=318463755i,pswpin=2092i,pswpout=6206i,slabs_scanned=93775616i,thp_collapse_alloc=24857i,thp_collapse_alloc_failed=102214i,thp_fault_alloc=346219i,thp_fault_fallback=895453i,thp_split=9817i,unevictable_pgs_cleared=0i,unevictable_pgs_culled=1531i,unevictable_pgs_mlocked=6988i,unevictable_pgs_mlockfreed=0i,unevictable_pgs_munlocked=6988i,unevictable_pgs_rescued=5426i,unevictable_pgs_scanned=0i,unevictable_pgs_stranded=0i,zone_reclaim_failed=0i 1459455200071462843 diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat.go b/plugins/inputs/kernel_vmstat/kernel_vmstat.go index ffc56d97d154e..0c3f44695f8c5 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat.go @@ -1,11 +1,13 @@ +//go:generate ../../../tools/readme_config_includer/generator +//go:build linux // +build linux package kernel_vmstat import ( "bytes" + _ "embed" "fmt" - "io/ioutil" "os" "strconv" @@ -13,16 +15,16 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type KernelVmstat struct { statFile string } -func (k *KernelVmstat) Description() string { - return "Get kernel statistics from /proc/vmstat" -} - -func (k *KernelVmstat) SampleConfig() string { - return "" +func (*KernelVmstat) SampleConfig() string { + return sampleConfig } func (k *KernelVmstat) Gather(acc telegraf.Accumulator) error { @@ -35,7 +37,6 @@ func (k *KernelVmstat) Gather(acc telegraf.Accumulator) error { dataFields := bytes.Fields(data) for i, field := range dataFields { - // dataFields is an array of {"stat1_name", "stat1_value", "stat2_name", // "stat2_value", ...} // We only want the even number index as that contain the stat name. @@ -46,7 +47,7 @@ func (k *KernelVmstat) Gather(acc telegraf.Accumulator) error { return err } - fields[string(field)] = int64(m) + fields[string(field)] = m } } @@ -56,12 +57,12 @@ func (k *KernelVmstat) Gather(acc telegraf.Accumulator) error { func (k *KernelVmstat) getProcVmstat() ([]byte, error) { if _, err := os.Stat(k.statFile); os.IsNotExist(err) { - return nil, fmt.Errorf("kernel_vmstat: %s does not exist!", k.statFile) + return nil, fmt.Errorf("kernel_vmstat: %s does not exist", k.statFile) } else if err != nil { return nil, err } - data, err := ioutil.ReadFile(k.statFile) + data, err := os.ReadFile(k.statFile) if err != nil { return nil, err } diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go b/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go index 11a5d2e553dff..d687b13a9e72d 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package kernel_vmstat diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go index bba615a743e54..6590e3febd19c 100644 --- a/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go @@ -1,19 +1,19 @@ +//go:build linux // +build linux package kernel_vmstat import ( - "io/ioutil" "os" "testing" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestFullVmStatProcFile(t *testing.T) { - tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Full)) + tmpfile := makeFakeVMStatFile(t, []byte(vmStatFileFull)) defer os.Remove(tmpfile) k := KernelVmstat{ @@ -21,8 +21,7 @@ func TestFullVmStatProcFile(t *testing.T) { } acc := testutil.Accumulator{} - err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, k.Gather(&acc)) fields := map[string]interface{}{ "nr_free_pages": int64(78730), @@ -121,7 +120,7 @@ func TestFullVmStatProcFile(t *testing.T) { } func TestPartialVmStatProcFile(t *testing.T) { - tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Partial)) + tmpfile := makeFakeVMStatFile(t, []byte(vmStatFilePartial)) defer os.Remove(tmpfile) k := KernelVmstat{ @@ -130,7 +129,7 @@ func TestPartialVmStatProcFile(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, err) fields := map[string]interface{}{ "unevictable_pgs_culled": int64(1531), @@ -151,7 +150,7 @@ func TestPartialVmStatProcFile(t *testing.T) { } func TestInvalidVmStatProcFile1(t *testing.T) { - tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Invalid)) + tmpfile := makeFakeVMStatFile(t, []byte(vmStatFileInvalid)) defer os.Remove(tmpfile) k := KernelVmstat{ @@ -160,12 +159,13 @@ func TestInvalidVmStatProcFile1(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid syntax") } func TestNoVmStatProcFile(t *testing.T) { - tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Invalid)) - os.Remove(tmpfile) + tmpfile := makeFakeVMStatFile(t, []byte(vmStatFileInvalid)) + require.NoError(t, os.Remove(tmpfile)) k := KernelVmstat{ statFile: tmpfile, @@ -173,11 +173,11 @@ func TestNoVmStatProcFile(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) - assert.Contains(t, err.Error(), "does not exist") + require.Error(t, err) + require.Contains(t, err.Error(), "does not exist") } -const vmStatFile_Full = `nr_free_pages 78730 +const vmStatFileFull = `nr_free_pages 78730 nr_inactive_anon 426259 nr_active_anon 2515657 nr_inactive_file 2366791 @@ -269,7 +269,7 @@ thp_collapse_alloc 24857 thp_collapse_alloc_failed 102214 thp_split 9817` -const vmStatFile_Partial = `unevictable_pgs_culled 1531 +const vmStatFilePartial = `unevictable_pgs_culled 1531 unevictable_pgs_scanned 0 unevictable_pgs_rescued 5426 unevictable_pgs_mlocked 6988 @@ -284,7 +284,7 @@ thp_collapse_alloc_failed 102214 thp_split 9817` // invalid thp_split measurement -const vmStatFile_Invalid = `unevictable_pgs_culled 1531 +const vmStatFileInvalid = `unevictable_pgs_culled 1531 unevictable_pgs_scanned 0 unevictable_pgs_rescued 5426 unevictable_pgs_mlocked 6988 @@ -298,18 +298,14 @@ thp_collapse_alloc 24857 thp_collapse_alloc_failed 102214 thp_split abcd` -func makeFakeVmStatFile(content []byte) string { - tmpfile, err := ioutil.TempFile("", "kernel_vmstat_test") - if err != nil { - panic(err) - } +func makeFakeVMStatFile(t *testing.T, content []byte) string { + tmpfile, err := os.CreateTemp("", "kernel_vmstat_test") + require.NoError(t, err) - if _, err := tmpfile.Write(content); err != nil { - panic(err) - } - if err := tmpfile.Close(); err != nil { - panic(err) - } + _, err = tmpfile.Write(content) + require.NoError(t, err) + + require.NoError(t, tmpfile.Close()) return tmpfile.Name() } diff --git a/plugins/inputs/kernel_vmstat/sample.conf b/plugins/inputs/kernel_vmstat/sample.conf new file mode 100644 index 0000000000000..fc5fd633db84f --- /dev/null +++ b/plugins/inputs/kernel_vmstat/sample.conf @@ -0,0 +1,3 @@ +# Get kernel statistics from /proc/vmstat +[[inputs.kernel_vmstat]] + # no configuration diff --git a/plugins/inputs/kibana/README.md b/plugins/inputs/kibana/README.md index 73bf4a2981d63..9c09850005483 100644 --- a/plugins/inputs/kibana/README.md +++ b/plugins/inputs/kibana/README.md @@ -7,9 +7,10 @@ The `kibana` plugin queries the [Kibana][] API to obtain the service status. [Kibana]: https://www.elastic.co/ -### Configuration +## Configuration -```toml +```toml @sample.conf +# Read status information from one or more Kibana servers [[inputs.kibana]] ## Specify a list of one or more Kibana servers servers = ["http://localhost:5601"] @@ -29,7 +30,7 @@ The `kibana` plugin queries the [Kibana][] API to obtain the service status. # insecure_skip_verify = false ``` -### Metrics +## Metrics - kibana - tags: @@ -42,14 +43,37 @@ The `kibana` plugin queries the [Kibana][] API to obtain the service status. - heap_total_bytes (integer) - heap_max_bytes (integer; deprecated in 1.13.3: use `heap_total_bytes` field) - heap_used_bytes (integer) + - heap_size_limit (integer) - uptime_ms (integer) - response_time_avg_ms (float) - response_time_max_ms (integer) - concurrent_connections (integer) - requests_per_sec (float) -### Example Output +## Example Output -``` +```shell kibana,host=myhost,name=my-kibana,source=localhost:5601,status=green,version=6.5.4 concurrent_connections=8i,heap_max_bytes=447778816i,heap_total_bytes=447778816i,heap_used_bytes=380603352i,requests_per_sec=1,response_time_avg_ms=57.6,response_time_max_ms=220i,status_code=1i,uptime_ms=6717489805i 1534864502000000000 ``` + +## Run example environment + +Requires the following tools: + +- [Docker](https://docs.docker.com/get-docker/) +- [Docker Compose](https://docs.docker.com/compose/install/) + +From the root of this project execute the following script: +`./plugins/inputs/kibana/test_environment/run_test_env.sh` + +This will build the latest Telegraf and then start up Kibana and Elasticsearch, +Telegraf will begin monitoring Kibana's status and write its results to the file +`/tmp/metrics.out` in the Telegraf container. + +Then you can attach to the telegraf container to inspect the file +`/tmp/metrics.out` to see if the status is being reported. + +The Visual Studio Code [Remote - Containers][remote] extension provides an easy +user interface to attach to the running container. + +[remote]: https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers diff --git a/plugins/inputs/kibana/kibana.go b/plugins/inputs/kibana/kibana.go index 98b81a91f52b9..c859d0df41405 100644 --- a/plugins/inputs/kibana/kibana.go +++ b/plugins/inputs/kibana/kibana.go @@ -1,10 +1,11 @@ +//go:generate ../../../tools/readme_config_includer/generator package kibana import ( + _ "embed" "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "strconv" "strings" @@ -12,11 +13,15 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const statusPath = "/api/status" type kibanaStatus struct { @@ -78,33 +83,15 @@ type memory struct { type heap struct { TotalInBytes int64 `json:"total_in_bytes"` UsedInBytes int64 `json:"used_in_bytes"` + SizeLimit int64 `json:"size_limit"` } -const sampleConfig = ` - ## Specify a list of one or more Kibana servers - servers = ["http://localhost:5601"] - - ## Timeout for HTTP requests - timeout = "5s" - - ## HTTP Basic Auth credentials - # username = "username" - # password = "pa$$word" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - type Kibana struct { Local bool Servers []string Username string Password string - Timeout internal.Duration + Timeout config.Duration tls.ClientConfig client *http.Client @@ -112,7 +99,7 @@ type Kibana struct { func NewKibana() *Kibana { return &Kibana{ - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } } @@ -129,19 +116,13 @@ func mapHealthStatusToCode(s string) int { return 0 } -// SampleConfig returns sample configuration for this plugin. -func (k *Kibana) SampleConfig() string { +func (*Kibana) SampleConfig() string { return sampleConfig } -// Description returns the plugin description. -func (k *Kibana) Description() string { - return "Read status information from one or more Kibana servers" -} - func (k *Kibana) Gather(acc telegraf.Accumulator) error { if k.client == nil { - client, err := k.createHttpClient() + client, err := k.createHTTPClient() if err != nil { return err @@ -166,7 +147,7 @@ func (k *Kibana) Gather(acc telegraf.Accumulator) error { return nil } -func (k *Kibana) createHttpClient() (*http.Client, error) { +func (k *Kibana) createHTTPClient() (*http.Client, error) { tlsCfg, err := k.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -176,18 +157,17 @@ func (k *Kibana) createHttpClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: k.Timeout.Duration, + Timeout: time.Duration(k.Timeout), } return client, nil } -func (k *Kibana) gatherKibanaStatus(baseUrl string, acc telegraf.Accumulator) error { - +func (k *Kibana) gatherKibanaStatus(baseURL string, acc telegraf.Accumulator) error { kibanaStatus := &kibanaStatus{} - url := baseUrl + statusPath + url := baseURL + statusPath - host, err := k.gatherJsonData(url, kibanaStatus) + host, err := k.gatherJSONData(url, kibanaStatus) if err != nil { return err } @@ -224,20 +204,19 @@ func (k *Kibana) gatherKibanaStatus(baseUrl string, acc telegraf.Accumulator) er fields["heap_max_bytes"] = kibanaStatus.Metrics.Process.Memory.Heap.TotalInBytes fields["heap_total_bytes"] = kibanaStatus.Metrics.Process.Memory.Heap.TotalInBytes fields["heap_used_bytes"] = kibanaStatus.Metrics.Process.Memory.Heap.UsedInBytes + fields["heap_size_limit"] = kibanaStatus.Metrics.Process.Memory.Heap.SizeLimit } else { fields["uptime_ms"] = int64(kibanaStatus.Metrics.UptimeInMillis) fields["heap_max_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapMaxInBytes fields["heap_total_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapMaxInBytes fields["heap_used_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapUsedInBytes - } - acc.AddFields("kibana", fields, tags) return nil } -func (k *Kibana) gatherJsonData(url string, v interface{}) (host string, err error) { +func (k *Kibana) gatherJSONData(url string, v interface{}) (host string, err error) { request, err := http.NewRequest("GET", url, nil) if err != nil { return "", fmt.Errorf("unable to create new request '%s': %v", url, err) @@ -256,7 +235,7 @@ func (k *Kibana) gatherJsonData(url string, v interface{}) (host string, err err if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) return request.Host, fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) } diff --git a/plugins/inputs/kibana/kibana_test.go b/plugins/inputs/kibana/kibana_test.go index 3dfed9edfa9a2..565d9b1c79416 100644 --- a/plugins/inputs/kibana/kibana_test.go +++ b/plugins/inputs/kibana/kibana_test.go @@ -1,7 +1,7 @@ package kibana import ( - "io/ioutil" + "io" "net/http" "strings" "testing" @@ -46,7 +46,7 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { StatusCode: t.statusCode, } res.Header.Set("Content-Type", "application/json") - res.Body = ioutil.NopCloser(strings.NewReader(t.body)) + res.Body = io.NopCloser(strings.NewReader(t.body)) return res, nil } diff --git a/plugins/inputs/kibana/sample.conf b/plugins/inputs/kibana/sample.conf new file mode 100644 index 0000000000000..577cde8814d59 --- /dev/null +++ b/plugins/inputs/kibana/sample.conf @@ -0,0 +1,18 @@ +# Read status information from one or more Kibana servers +[[inputs.kibana]] + ## Specify a list of one or more Kibana servers + servers = ["http://localhost:5601"] + + ## Timeout for HTTP requests + timeout = "5s" + + ## HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/kibana/test_environment/basic_kibana_telegraf.conf b/plugins/inputs/kibana/test_environment/basic_kibana_telegraf.conf new file mode 100644 index 0000000000000..c67f346b5c170 --- /dev/null +++ b/plugins/inputs/kibana/test_environment/basic_kibana_telegraf.conf @@ -0,0 +1,75 @@ +# Telegraf Configuration for basic Kibana example + +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "10s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 1000 + + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. + metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Default flushing interval for all outputs. Maximum flush_interval will be + ## flush_interval + flush_jitter + flush_interval = "10s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s. + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + precision = "" + + ## Override default hostname, if empty use os.Hostname() + hostname = "" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = false + + +############################################################################### +# OUTPUT PLUGINS # +############################################################################### + +# Send telegraf metrics to file(s) +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" + +############################################################################### +# INPUT PLUGINS # +############################################################################### + +# Read status information from one or more Kibana servers +[[inputs.kibana]] + ## Specify a list of one or more Kibana servers + servers = ["http://kib01:5601"] + + ## Timeout for HTTP requests + timeout = "5s" diff --git a/plugins/inputs/kibana/test_environment/docker-compose.yml b/plugins/inputs/kibana/test_environment/docker-compose.yml new file mode 100644 index 0000000000000..8aa6db00df009 --- /dev/null +++ b/plugins/inputs/kibana/test_environment/docker-compose.yml @@ -0,0 +1,48 @@ +## Reference: https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html#docker-cli-run-dev-mode +version: '2.2' +services: + es01: + image: docker.elastic.co/elasticsearch/elasticsearch:7.10.1 + container_name: es01 + environment: + - node.name=es01 + - cluster.name=es-docker-cluster + - cluster.initial_master_nodes=es01 + - bootstrap.memory_lock=true + - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + ulimits: + memlock: + soft: -1 + hard: -1 + volumes: + - data01:/usr/share/elasticsearch/data + ports: + - 9200:9200 + networks: + - elastic + + kib01: + image: docker.elastic.co/kibana/kibana:7.10.1 + container_name: kib01 + ports: + - 5601:5601 + environment: + ELASTICSEARCH_URL: http://es01:9200 + ELASTICSEARCH_HOSTS: http://es01:9200 + networks: + - elastic + + telegraf: + image: local_telegraf + volumes: + - ./basic_kibana_telegraf.conf:/etc/telegraf/telegraf.conf:ro + networks: + - elastic + +volumes: + data01: + driver: local + +networks: + elastic: + driver: bridge diff --git a/plugins/inputs/kibana/test_environment/run_test_env.sh b/plugins/inputs/kibana/test_environment/run_test_env.sh new file mode 100755 index 0000000000000..8ea741ac3f98e --- /dev/null +++ b/plugins/inputs/kibana/test_environment/run_test_env.sh @@ -0,0 +1,3 @@ +docker build -t local_telegraf -f scripts/alpine.docker . + +docker-compose -f plugins/inputs/kibana/test_environment/docker-compose.yml up diff --git a/plugins/inputs/kibana/testdata_test6_5.go b/plugins/inputs/kibana/testdata_test6_5.go index a000229c14f73..51460301a3779 100644 --- a/plugins/inputs/kibana/testdata_test6_5.go +++ b/plugins/inputs/kibana/testdata_test6_5.go @@ -219,6 +219,7 @@ var kibanaStatusExpected6_5 = map[string]interface{}{ "heap_total_bytes": int64(149954560), "heap_max_bytes": int64(149954560), "heap_used_bytes": int64(126274392), + "heap_size_limit": int64(1501560832), "uptime_ms": int64(2173595337), "response_time_avg_ms": float64(12.5), "response_time_max_ms": int64(123), diff --git a/plugins/inputs/kinesis_consumer/README.md b/plugins/inputs/kinesis_consumer/README.md index 7896557ac6cf5..d85f3653d9cc4 100644 --- a/plugins/inputs/kinesis_consumer/README.md +++ b/plugins/inputs/kinesis_consumer/README.md @@ -3,26 +3,29 @@ The [Kinesis][kinesis] consumer plugin reads from a Kinesis data stream and creates metrics using one of the supported [input data formats][]. +## Configuration -### Configuration - -```toml +```toml @sample.conf +# Configuration for the AWS Kinesis input. [[inputs.kinesis_consumer]] ## Amazon REGION of kinesis endpoint. region = "ap-southeast-2" ## Amazon Credentials ## Credentials are loaded in the following order - ## 1) Assumed credentials via STS if role_arn is specified - ## 2) explicit credentials from 'access_key' and 'secret_key' - ## 3) shared profile from 'profile' - ## 4) environment variables - ## 5) shared credentials file - ## 6) EC2 Instance Profile + ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified + ## 2) Assumed credentials via STS if role_arn is specified + ## 3) explicit credentials from 'access_key' and 'secret_key' + ## 4) shared profile from 'profile' + ## 5) environment variables + ## 6) shared credentials file + ## 7) EC2 Instance Profile # access_key = "" # secret_key = "" # token = "" # role_arn = "" + # web_identity_token_file = "" + # role_session_name = "" # profile = "" # shared_credential_file = "" @@ -54,6 +57,15 @@ and creates metrics using one of the supported [input data formats][]. ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + ## + ## The content encoding of the data from kinesis + ## If you are processing a cloudwatch logs kinesis stream then set this to "gzip" + ## as AWS compresses cloudwatch log data before it is sent to kinesis (aws + ## also base64 encodes the zip byte data before pushing to the stream. The base64 decoding + ## is done automatically by the golang sdk, as data is read from kinesis) + ## + # content_encoding = "identity" + ## Optional ## Configuration for a dynamodb checkpoint [inputs.kinesis_consumer.checkpoint_dynamodb] @@ -62,29 +74,28 @@ and creates metrics using one of the supported [input data formats][]. table_name = "default" ``` - -#### Required AWS IAM permissions +### Required AWS IAM permissions Kinesis: - - DescribeStream - - GetRecords - - GetShardIterator + +- DescribeStream +- GetRecords +- GetShardIterator DynamoDB: - - GetItem - - PutItem +- GetItem +- PutItem -#### DynamoDB Checkpoint +### DynamoDB Checkpoint -The DynamoDB checkpoint stores the last processed record in a DynamoDB. To leverage -this functionality, create a table with the following string type keys: +The DynamoDB checkpoint stores the last processed record in a DynamoDB. To +leverage this functionality, create a table with the following string type keys: -``` +```shell Partition key: namespace Sort key: shard_id ``` - [kinesis]: https://aws.amazon.com/kinesis/ [input data formats]: /docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go index 6a3b1c8301a48..e0eba6546bb6e 100644 --- a/plugins/inputs/kinesis_consumer/kinesis_consumer.go +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -1,17 +1,23 @@ +//go:generate ../../../tools/readme_config_includer/generator package kinesis_consumer import ( + "bytes" + "compress/gzip" + "compress/zlib" "context" + _ "embed" "fmt" + "io" "math/big" "strings" "sync" "time" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/kinesis" consumer "github.com/harlow/kinesis-consumer" - "github.com/harlow/kinesis-consumer/checkpoint/ddb" + "github.com/harlow/kinesis-consumer/store/ddb" "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/config/aws" @@ -19,6 +25,10 @@ import ( "github.com/influxdata/telegraf/plugins/parsers" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type ( DynamoDB struct { AppName string `toml:"app_name"` @@ -26,36 +36,32 @@ type ( } KinesisConsumer struct { - Region string `toml:"region"` - AccessKey string `toml:"access_key"` - SecretKey string `toml:"secret_key"` - RoleARN string `toml:"role_arn"` - Profile string `toml:"profile"` - Filename string `toml:"shared_credential_file"` - Token string `toml:"token"` - EndpointURL string `toml:"endpoint_url"` StreamName string `toml:"streamname"` ShardIteratorType string `toml:"shard_iterator_type"` DynamoDB *DynamoDB `toml:"checkpoint_dynamodb"` MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + ContentEncoding string `toml:"content_encoding"` Log telegraf.Logger cons *consumer.Consumer parser parsers.Parser cancel context.CancelFunc - ctx context.Context acc telegraf.TrackingAccumulator sem chan struct{} - checkpoint consumer.Checkpoint + checkpoint consumer.Store checkpoints map[string]checkpoint records map[telegraf.TrackingID]string checkpointTex sync.Mutex recordsTex sync.Mutex wg sync.WaitGroup + processContentEncodingFunc processContent + lastSeqNum *big.Int + + internalaws.CredentialConfig } checkpoint struct { @@ -68,106 +74,33 @@ const ( defaultMaxUndeliveredMessages = 1000 ) +type processContent func([]byte) ([]byte, error) + // this is the largest sequence number allowed - https://docs.aws.amazon.com/kinesis/latest/APIReference/API_SequenceNumberRange.html var maxSeq = strToBint(strings.Repeat("9", 129)) -var sampleConfig = ` - ## Amazon REGION of kinesis endpoint. - region = "ap-southeast-2" - - ## Amazon Credentials - ## Credentials are loaded in the following order - ## 1) Assumed credentials via STS if role_arn is specified - ## 2) explicit credentials from 'access_key' and 'secret_key' - ## 3) shared profile from 'profile' - ## 4) environment variables - ## 5) shared credentials file - ## 6) EC2 Instance Profile - # access_key = "" - # secret_key = "" - # token = "" - # role_arn = "" - # profile = "" - # shared_credential_file = "" - - ## Endpoint to make request against, the correct endpoint is automatically - ## determined and this option should only be set if you wish to override the - ## default. - ## ex: endpoint_url = "http://localhost:8000" - # endpoint_url = "" - - ## Kinesis StreamName must exist prior to starting telegraf. - streamname = "StreamName" - - ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported) - # shard_iterator_type = "TRIM_HORIZON" - - ## Maximum messages to read from the broker that have not been written by an - ## output. For best throughput set based on the number of metrics within - ## each message and the size of the output's metric_batch_size. - ## - ## For example, if each message from the queue contains 10 metrics and the - ## output metric_batch_size is 1000, setting this to 100 will ensure that a - ## full batch is collected and the write is triggered immediately without - ## waiting until the next flush_interval. - # max_undelivered_messages = 1000 - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" - - ## Optional - ## Configuration for a dynamodb checkpoint - [inputs.kinesis_consumer.checkpoint_dynamodb] - ## unique name for this consumer - app_name = "default" - table_name = "default" -` - -func (k *KinesisConsumer) SampleConfig() string { +func (*KinesisConsumer) SampleConfig() string { return sampleConfig } -func (k *KinesisConsumer) Description() string { - return "Configuration for the AWS Kinesis input." -} - func (k *KinesisConsumer) SetParser(parser parsers.Parser) { k.parser = parser } func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { - credentialConfig := &internalaws.CredentialConfig{ - Region: k.Region, - AccessKey: k.AccessKey, - SecretKey: k.SecretKey, - RoleARN: k.RoleARN, - Profile: k.Profile, - Filename: k.Filename, - Token: k.Token, - EndpointURL: k.EndpointURL, + cfg, err := k.CredentialConfig.Credentials() + if err != nil { + return err } - configProvider := credentialConfig.Credentials() - client := kinesis.New(configProvider) + client := kinesis.NewFromConfig(cfg) - k.checkpoint = &noopCheckpoint{} + k.checkpoint = &noopStore{} if k.DynamoDB != nil { var err error k.checkpoint, err = ddb.New( k.DynamoDB.AppName, k.DynamoDB.TableName, - ddb.WithDynamoClient(dynamodb.New((&internalaws.CredentialConfig{ - Region: k.Region, - AccessKey: k.AccessKey, - SecretKey: k.SecretKey, - RoleARN: k.RoleARN, - Profile: k.Profile, - Filename: k.Filename, - Token: k.Token, - EndpointURL: k.EndpointURL, - }).Credentials())), + ddb.WithDynamoClient(dynamodb.NewFromConfig(cfg)), ddb.WithMaxInterval(time.Second*10), ) if err != nil { @@ -179,7 +112,7 @@ func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { k.StreamName, consumer.WithClient(client), consumer.WithShardIteratorType(k.ShardIteratorType), - consumer.WithCheckpoint(k), + consumer.WithStore(k), ) if err != nil { return err @@ -204,20 +137,20 @@ func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { k.wg.Add(1) go func() { defer k.wg.Done() - err := k.cons.Scan(ctx, func(r *consumer.Record) consumer.ScanStatus { + err := k.cons.Scan(ctx, func(r *consumer.Record) error { select { case <-ctx.Done(): - return consumer.ScanStatus{Error: ctx.Err()} + return ctx.Err() case k.sem <- struct{}{}: break } err := k.onMessage(k.acc, r) if err != nil { - k.sem <- struct{}{} - return consumer.ScanStatus{Error: err} + <-k.sem + k.Log.Errorf("Scan parser error: %s", err.Error()) } - return consumer.ScanStatus{} + return nil }) if err != nil { k.cancel() @@ -239,7 +172,11 @@ func (k *KinesisConsumer) Start(ac telegraf.Accumulator) error { } func (k *KinesisConsumer) onMessage(acc telegraf.TrackingAccumulator, r *consumer.Record) error { - metrics, err := k.parser.Parse(r.Data) + data, err := k.processContentEncodingFunc(r.Data) + if err != nil { + return err + } + metrics, err := k.parser.Parse(data) if err != nil { return err } @@ -284,7 +221,9 @@ func (k *KinesisConsumer) onDelivery(ctx context.Context) { } k.lastSeqNum = strToBint(sequenceNum) - k.checkpoint.Set(chk.streamName, chk.shardID, sequenceNum) + if err := k.checkpoint.SetCheckpoint(chk.streamName, chk.shardID, sequenceNum); err != nil { + k.Log.Debug("Setting checkpoint failed: %v", err) + } } else { k.Log.Debug("Metric group failed to process") } @@ -316,13 +255,13 @@ func (k *KinesisConsumer) Gather(acc telegraf.Accumulator) error { return nil } -// Get wraps the checkpoint's Get function (called by consumer library) -func (k *KinesisConsumer) Get(streamName, shardID string) (string, error) { - return k.checkpoint.Get(streamName, shardID) +// Get wraps the checkpoint's GetCheckpoint function (called by consumer library) +func (k *KinesisConsumer) GetCheckpoint(streamName, shardID string) (string, error) { + return k.checkpoint.GetCheckpoint(streamName, shardID) } -// Set wraps the checkpoint's Set function (called by consumer library) -func (k *KinesisConsumer) Set(streamName, shardID, sequenceNumber string) error { +// Set wraps the checkpoint's SetCheckpoint function (called by consumer library) +func (k *KinesisConsumer) SetCheckpoint(streamName, shardID, sequenceNumber string) error { if sequenceNumber == "" { return fmt.Errorf("sequence number should not be empty") } @@ -334,10 +273,50 @@ func (k *KinesisConsumer) Set(streamName, shardID, sequenceNumber string) error return nil } -type noopCheckpoint struct{} +func processGzip(data []byte) ([]byte, error) { + zipData, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + defer zipData.Close() + return io.ReadAll(zipData) +} + +func processZlib(data []byte) ([]byte, error) { + zlibData, err := zlib.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + defer zlibData.Close() + return io.ReadAll(zlibData) +} + +func processNoOp(data []byte) ([]byte, error) { + return data, nil +} + +func (k *KinesisConsumer) configureProcessContentEncodingFunc() error { + switch k.ContentEncoding { + case "gzip": + k.processContentEncodingFunc = processGzip + case "zlib": + k.processContentEncodingFunc = processZlib + case "none", "identity", "": + k.processContentEncodingFunc = processNoOp + default: + return fmt.Errorf("unknown content encoding %q", k.ContentEncoding) + } + return nil +} + +func (k *KinesisConsumer) Init() error { + return k.configureProcessContentEncodingFunc() +} + +type noopStore struct{} -func (n noopCheckpoint) Set(string, string, string) error { return nil } -func (n noopCheckpoint) Get(string, string) (string, error) { return "", nil } +func (n noopStore) SetCheckpoint(string, string, string) error { return nil } +func (n noopStore) GetCheckpoint(string, string) (string, error) { return "", nil } func init() { negOne, _ = new(big.Int).SetString("-1", 10) @@ -347,6 +326,7 @@ func init() { ShardIteratorType: "TRIM_HORIZON", MaxUndeliveredMessages: defaultMaxUndeliveredMessages, lastSeqNum: maxSeq, + ContentEncoding: "identity", } }) } diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go b/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go new file mode 100644 index 0000000000000..3334d3da58814 --- /dev/null +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer_test.go @@ -0,0 +1,211 @@ +package kinesis_consumer + +import ( + "encoding/base64" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" + consumer "github.com/harlow/kinesis-consumer" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/json" + "github.com/influxdata/telegraf/testutil" +) + +func TestKinesisConsumer_onMessage(t *testing.T) { + zlibBytpes, _ := base64.StdEncoding.DecodeString("eF5FjlFrgzAUhf9KuM+2aNB2zdsQ2xe3whQGW8qIeqdhaiSJK0P874u1Y4+Hc/jON0GHxoga858BgUF8fs5fzunHU5Jlj6cEPFDXHvXStGqsrsKWTapq44pW1SetxsF1a8qsRtGt0YyFKbUcrFT9UbYWtQH2frntkm/s7RInkNU6t9JpWNE5WBAFPo3CcHeg+9D703OziUOhCg6MQ/yakrspuZsyEjdYfsm+Jg2K1jZEfZLKQWUvFglylBobZXDLwSP8//EGpD4NNj7dUJpT6hQY3W33h/AhCt84zDBf5l/MDl08") + gzippedBytes, _ := base64.StdEncoding.DecodeString("H4sIAAFXNGAAA0WOUWuDMBSF/0q4z7Zo0HbN2xDbF7fCFAZbyoh6p2FqJIkrQ/zvi7Vjj4dz+M43QYfGiBrznwGBQXx+zl/O6cdTkmWPpwQ8UNce9dK0aqyuwpZNqmrjilbVJ63GwXVryqxG0a3RjIUptRysVP1Rtha1AfZ+ue2Sb+ztEieQ1Tq30mlY0TlYEAU+jcJwd6D70PvTc7OJQ6EKDoxD/JqSuym5mzISN1h+yb4mDYrWNkR9kspBZS8WCXKUGhtlcMvBI/z/8QakPg02Pt1QmlPqFBjdbfeH8CEK3zjMMF/mX0TaxZUpAQAA") + notZippedBytes := []byte(`{"messageType":"CONTROL_MESSAGE","owner":"CloudwatchLogs","logGroup":"","logStream":"", +"subscriptionFilters":[],"logEvents":[ + {"id":"","timestamp":1510254469274,"message":"{\"bob\":\"CWL CONTROL MESSAGE: Checking health of destination Firehose.\", \"timestamp\":\"2021-02-22T22:15:26.794854Z\"},"}, + {"id":"","timestamp":1510254469274,"message":"{\"bob\":\"CWL CONTROL MESSAGE: Checking health of destination Firehose.\", \"timestamp\":\"2021-02-22T22:15:26.794854Z\"}"} +]}`) + parser := &json.Parser{ + MetricName: "json_test", + Query: "logEvents", + StringFields: []string{"message"}, + } + require.NoError(t, parser.Init()) + + type fields struct { + ContentEncoding string + parser parsers.Parser + records map[telegraf.TrackingID]string + } + type args struct { + r *consumer.Record + } + type expected struct { + numberOfMetrics int + messageContains string + } + tests := []struct { + name string + fields fields + args args + wantErr bool + expected expected + }{ + { + name: "test no compression", + fields: fields{ + ContentEncoding: "none", + parser: parser, + records: make(map[telegraf.TrackingID]string), + }, + args: args{ + r: &consumer.Record{ + Record: types.Record{ + Data: notZippedBytes, + SequenceNumber: aws.String("anything"), + }, + }, + }, + wantErr: false, + expected: expected{ + messageContains: "bob", + numberOfMetrics: 2, + }, + }, + { + name: "test no compression via empty string for ContentEncoding", + fields: fields{ + ContentEncoding: "", + parser: parser, + records: make(map[telegraf.TrackingID]string), + }, + args: args{ + r: &consumer.Record{ + Record: types.Record{ + Data: notZippedBytes, + SequenceNumber: aws.String("anything"), + }, + }, + }, + wantErr: false, + expected: expected{ + messageContains: "bob", + numberOfMetrics: 2, + }, + }, + { + name: "test no compression via identity ContentEncoding", + fields: fields{ + ContentEncoding: "identity", + parser: parser, + records: make(map[telegraf.TrackingID]string), + }, + args: args{ + r: &consumer.Record{ + Record: types.Record{ + Data: notZippedBytes, + SequenceNumber: aws.String("anything"), + }, + }, + }, + wantErr: false, + expected: expected{ + messageContains: "bob", + numberOfMetrics: 2, + }, + }, + { + name: "test no compression via no ContentEncoding", + fields: fields{ + parser: parser, + records: make(map[telegraf.TrackingID]string), + }, + args: args{ + r: &consumer.Record{ + Record: types.Record{ + Data: notZippedBytes, + SequenceNumber: aws.String("anything"), + }, + }, + }, + wantErr: false, + expected: expected{ + messageContains: "bob", + numberOfMetrics: 2, + }, + }, + { + name: "test gzip compression", + fields: fields{ + ContentEncoding: "gzip", + parser: parser, + records: make(map[telegraf.TrackingID]string), + }, + args: args{ + r: &consumer.Record{ + Record: types.Record{ + Data: gzippedBytes, + SequenceNumber: aws.String("anything"), + }, + }, + }, + wantErr: false, + expected: expected{ + messageContains: "bob", + numberOfMetrics: 1, + }, + }, + { + name: "test zlib compression", + fields: fields{ + ContentEncoding: "zlib", + parser: parser, + records: make(map[telegraf.TrackingID]string), + }, + args: args{ + r: &consumer.Record{ + Record: types.Record{ + Data: zlibBytpes, + SequenceNumber: aws.String("anything"), + }, + }, + }, + wantErr: false, + expected: expected{ + messageContains: "bob", + numberOfMetrics: 1, + }, + }, + } + + k := &KinesisConsumer{ + ContentEncoding: "notsupported", + } + err := k.Init() + require.NotNil(t, err) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + k := &KinesisConsumer{ + ContentEncoding: tt.fields.ContentEncoding, + parser: tt.fields.parser, + records: tt.fields.records, + } + err := k.Init() + require.Nil(t, err) + + acc := testutil.Accumulator{} + if err := k.onMessage(acc.WithTracking(tt.expected.numberOfMetrics), tt.args.r); (err != nil) != tt.wantErr { + t.Errorf("onMessage() error = %v, wantErr %v", err, tt.wantErr) + } + + require.Equal(t, tt.expected.numberOfMetrics, len(acc.Metrics)) + + for _, metric := range acc.Metrics { + if logEventMessage, ok := metric.Fields["message"]; ok { + require.Contains(t, logEventMessage.(string), tt.expected.messageContains) + } else { + t.Errorf("Expect logEvents to be present") + } + } + }) + } +} diff --git a/plugins/inputs/kinesis_consumer/sample.conf b/plugins/inputs/kinesis_consumer/sample.conf new file mode 100644 index 0000000000000..1e7547fbdc00d --- /dev/null +++ b/plugins/inputs/kinesis_consumer/sample.conf @@ -0,0 +1,66 @@ +# Configuration for the AWS Kinesis input. +[[inputs.kinesis_consumer]] + ## Amazon REGION of kinesis endpoint. + region = "ap-southeast-2" + + ## Amazon Credentials + ## Credentials are loaded in the following order + ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified + ## 2) Assumed credentials via STS if role_arn is specified + ## 3) explicit credentials from 'access_key' and 'secret_key' + ## 4) shared profile from 'profile' + ## 5) environment variables + ## 6) shared credentials file + ## 7) EC2 Instance Profile + # access_key = "" + # secret_key = "" + # token = "" + # role_arn = "" + # web_identity_token_file = "" + # role_session_name = "" + # profile = "" + # shared_credential_file = "" + + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + + ## Kinesis StreamName must exist prior to starting telegraf. + streamname = "StreamName" + + ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported) + # shard_iterator_type = "TRIM_HORIZON" + + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + + ## + ## The content encoding of the data from kinesis + ## If you are processing a cloudwatch logs kinesis stream then set this to "gzip" + ## as AWS compresses cloudwatch log data before it is sent to kinesis (aws + ## also base64 encodes the zip byte data before pushing to the stream. The base64 decoding + ## is done automatically by the golang sdk, as data is read from kinesis) + ## + # content_encoding = "identity" + + ## Optional + ## Configuration for a dynamodb checkpoint + [inputs.kinesis_consumer.checkpoint_dynamodb] + ## unique name for this consumer + app_name = "default" + table_name = "default" diff --git a/plugins/inputs/knx_listener/README.md b/plugins/inputs/knx_listener/README.md new file mode 100644 index 0000000000000..f77511bcb8522 --- /dev/null +++ b/plugins/inputs/knx_listener/README.md @@ -0,0 +1,65 @@ +# KNX Input Plugin + +The KNX input plugin that listens for messages on the KNX home-automation bus. +This plugin connects to the KNX bus via a KNX-IP interface. +Information about supported KNX message datapoint types can be found at the +underlying "knx-go" project site (). + +## Configuration + +```toml @sample.conf +# Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +[[inputs.knx_listener]] + ## Type of KNX-IP interface. + ## Can be either "tunnel" or "router". + # service_type = "tunnel" + + ## Address of the KNX-IP interface. + service_address = "localhost:3671" + + ## Measurement definition(s) + # [[inputs.knx_listener.measurement]] + # ## Name of the measurement + # name = "temperature" + # ## Datapoint-Type (DPT) of the KNX messages + # dpt = "9.001" + # ## List of Group-Addresses (GAs) assigned to the measurement + # addresses = ["5/5/1"] + + # [[inputs.knx_listener.measurement]] + # name = "illumination" + # dpt = "9.004" + # addresses = ["5/5/3"] +``` + +### Measurement configurations + +Each measurement contains only one datapoint-type (DPT) and assigns a list of +addresses to this measurement. You can, for example group all temperature sensor +messages within a "temperature" measurement. However, you are free to split +messages of one datapoint-type to multiple measurements. + +**NOTE: You should not assign a group-address (GA) to multiple measurements!** + +## Metrics + +Received KNX data is stored in the named measurement as configured above using +the "value" field. Additional to the value, there are the following tags added +to the datapoint: + +- "groupaddress": KNX group-address corresponding to the value +- "unit": unit of the value +- "source": KNX physical address sending the value + +To find out about the datatype of the datapoint please check your KNX project, +the KNX-specification or the "knx-go" project for the corresponding DPT. + +## Example Output + +This section shows example output in Line Protocol format. + +```shell +illumination,groupaddress=5/5/4,host=Hugin,source=1.1.12,unit=lux value=17.889999389648438 1582132674999013274 +temperature,groupaddress=5/5/1,host=Hugin,source=1.1.8,unit=°C value=17.799999237060547 1582132663427587361 +windowopen,groupaddress=1/0/1,host=Hugin,source=1.1.3 value=true 1582132630425581320 +``` diff --git a/plugins/inputs/knx_listener/knx_dummy_interface.go b/plugins/inputs/knx_listener/knx_dummy_interface.go new file mode 100644 index 0000000000000..1f897c4d99baa --- /dev/null +++ b/plugins/inputs/knx_listener/knx_dummy_interface.go @@ -0,0 +1,28 @@ +package knx_listener + +import ( + "github.com/vapourismo/knx-go/knx" +) + +type KNXDummyInterface struct { + inbound chan knx.GroupEvent +} + +func NewDummyInterface() (di KNXDummyInterface, err error) { + di, err = KNXDummyInterface{}, nil + di.inbound = make(chan knx.GroupEvent) + + return di, err +} + +func (di *KNXDummyInterface) Send(event knx.GroupEvent) { + di.inbound <- event +} + +func (di *KNXDummyInterface) Inbound() <-chan knx.GroupEvent { + return di.inbound +} + +func (di *KNXDummyInterface) Close() { + close(di.inbound) +} diff --git a/plugins/inputs/knx_listener/knx_listener.go b/plugins/inputs/knx_listener/knx_listener.go new file mode 100644 index 0000000000000..e03a072b3abae --- /dev/null +++ b/plugins/inputs/knx_listener/knx_listener.go @@ -0,0 +1,182 @@ +//go:generate ../../../tools/readme_config_includer/generator +package knx_listener + +import ( + _ "embed" + "fmt" + "reflect" + "sync" + + "github.com/vapourismo/knx-go/knx" + "github.com/vapourismo/knx-go/knx/dpt" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +type KNXInterface interface { + Inbound() <-chan knx.GroupEvent + Close() +} + +type addressTarget struct { + measurement string + datapoint dpt.DatapointValue +} + +type Measurement struct { + Name string + Dpt string + Addresses []string +} + +type KNXListener struct { + ServiceType string `toml:"service_type"` + ServiceAddress string `toml:"service_address"` + Measurements []Measurement `toml:"measurement"` + Log telegraf.Logger `toml:"-"` + + client KNXInterface + gaTargetMap map[string]addressTarget + gaLogbook map[string]bool + + acc telegraf.Accumulator + wg sync.WaitGroup +} + +func (*KNXListener) SampleConfig() string { + return sampleConfig +} + +func (kl *KNXListener) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (kl *KNXListener) Start(acc telegraf.Accumulator) error { + // Store the accumulator for later use + kl.acc = acc + + // Setup a logbook to track unknown GAs to avoid log-spamming + kl.gaLogbook = make(map[string]bool) + + // Construct the mapping of Group-addresses (GAs) to DPTs and the name + // of the measurement + kl.gaTargetMap = make(map[string]addressTarget) + for _, m := range kl.Measurements { + kl.Log.Debugf("Group-address mapping for measurement %q:", m.Name) + for _, ga := range m.Addresses { + kl.Log.Debugf(" %s --> %s", ga, m.Dpt) + if _, ok := kl.gaTargetMap[ga]; ok { + return fmt.Errorf("duplicate specification of address %q", ga) + } + d, ok := dpt.Produce(m.Dpt) + if !ok { + return fmt.Errorf("cannot create datapoint-type %q for address %q", m.Dpt, ga) + } + kl.gaTargetMap[ga] = addressTarget{m.Name, d} + } + } + + // Connect to the KNX-IP interface + kl.Log.Infof("Trying to connect to %q at %q", kl.ServiceType, kl.ServiceAddress) + switch kl.ServiceType { + case "tunnel": + c, err := knx.NewGroupTunnel(kl.ServiceAddress, knx.DefaultTunnelConfig) + if err != nil { + return err + } + kl.client = &c + case "router": + c, err := knx.NewGroupRouter(kl.ServiceAddress, knx.DefaultRouterConfig) + if err != nil { + return err + } + kl.client = &c + case "dummy": + c, err := NewDummyInterface() + if err != nil { + return err + } + kl.client = &c + default: + return fmt.Errorf("invalid interface type: %s", kl.ServiceAddress) + } + kl.Log.Infof("Connected!") + + // Listen to the KNX bus + kl.wg.Add(1) + go func() { + kl.wg.Done() + kl.listen() + }() + + return nil +} + +func (kl *KNXListener) Stop() { + if kl.client != nil { + kl.client.Close() + kl.wg.Wait() + } +} + +func (kl *KNXListener) listen() { + for msg := range kl.client.Inbound() { + // Match GA to DataPointType and measurement name + ga := msg.Destination.String() + target, ok := kl.gaTargetMap[ga] + if !ok { + if !kl.gaLogbook[ga] { + kl.Log.Infof("Ignoring message %+v for unknown GA %q", msg, ga) + kl.gaLogbook[ga] = true + } + continue + } + + // Extract the value from the data-frame + err := target.datapoint.Unpack(msg.Data) + if err != nil { + kl.Log.Errorf("Unpacking data failed: %v", err) + continue + } + kl.Log.Debugf("Matched GA %q to measurement %q with value %v", ga, target.measurement, target.datapoint) + + // Convert the DatapointValue interface back to its basic type again + // as otherwise telegraf will not push out the metrics and eat it + // silently. + var value interface{} + vi := reflect.Indirect(reflect.ValueOf(target.datapoint)) + switch vi.Kind() { + case reflect.Bool: + value = vi.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + value = vi.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + value = vi.Uint() + case reflect.Float32, reflect.Float64: + value = vi.Float() + default: + kl.Log.Errorf("Type conversion %v failed for address %q", vi.Kind(), ga) + continue + } + + // Compose the actual data to be pushed out + fields := map[string]interface{}{"value": value} + tags := map[string]string{ + "groupaddress": ga, + "unit": target.datapoint.(dpt.DatapointMeta).Unit(), + "source": msg.Source.String(), + } + kl.acc.AddFields(target.measurement, fields, tags) + } +} + +func init() { + inputs.Add("knx_listener", func() telegraf.Input { return &KNXListener{ServiceType: "tunnel"} }) + // Register for backward compatibility + inputs.Add("KNXListener", func() telegraf.Input { return &KNXListener{ServiceType: "tunnel"} }) +} diff --git a/plugins/inputs/knx_listener/knx_listener_test.go b/plugins/inputs/knx_listener/knx_listener_test.go new file mode 100644 index 0000000000000..adb07eb6d0113 --- /dev/null +++ b/plugins/inputs/knx_listener/knx_listener_test.go @@ -0,0 +1,188 @@ +package knx_listener + +import ( + "fmt" + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/vapourismo/knx-go/knx" + "github.com/vapourismo/knx-go/knx/cemi" + "github.com/vapourismo/knx-go/knx/dpt" + + "github.com/influxdata/telegraf/testutil" +) + +const epsilon = 1e-3 + +func setValue(data dpt.DatapointValue, value interface{}) error { + d := reflect.Indirect(reflect.ValueOf(data)) + if !d.CanSet() { + return fmt.Errorf("cannot set datapoint %v", data) + } + switch v := value.(type) { + case bool: + d.SetBool(v) + case float64: + d.SetFloat(v) + case int64: + d.SetInt(v) + case uint64: + d.SetUint(v) + default: + return fmt.Errorf("unknown type '%T' when setting value for DPT", value) + } + return nil +} + +type TestMessage struct { + address string + dpt string + value interface{} +} + +func ProduceKnxEvent(t *testing.T, address string, datapoint string, value interface{}) *knx.GroupEvent { + addr, err := cemi.NewGroupAddrString(address) + require.NoError(t, err) + + data, ok := dpt.Produce(datapoint) + require.True(t, ok) + err = setValue(data, value) + require.NoError(t, err) + + return &knx.GroupEvent{ + Command: knx.GroupWrite, + Destination: addr, + Data: data.Pack(), + } +} + +func TestRegularReceives_DPT(t *testing.T) { + // Define the test-cases + var testcases = []TestMessage{ + {"1/0/1", "1.001", true}, + {"1/0/2", "1.002", false}, + {"1/0/3", "1.003", true}, + {"1/0/9", "1.009", false}, + {"1/1/0", "1.010", true}, + {"5/0/1", "5.001", 12.157}, + {"5/0/3", "5.003", 121.412}, + {"5/0/4", "5.004", uint64(25)}, + {"9/0/1", "9.001", 18.56}, + {"9/0/4", "9.004", 243.84}, + {"9/0/5", "9.005", 12.01}, + {"9/0/7", "9.007", 59.32}, + {"13/0/1", "13.001", int64(-15)}, + {"13/0/2", "13.002", int64(183)}, + {"13/1/0", "13.010", int64(-141)}, + {"13/1/1", "13.011", int64(277)}, + {"13/1/2", "13.012", int64(-4096)}, + {"13/1/3", "13.013", int64(8192)}, + {"13/1/4", "13.014", int64(-65536)}, + {"13/1/5", "13.015", int64(2147483647)}, + {"14/0/0", "14.000", -1.31}, + {"14/0/1", "14.001", 0.44}, + {"14/0/2", "14.002", 32.08}, + // {"14/0/3", "14.003", 92.69}, + // {"14/0/4", "14.004", 1.00794}, + {"14/1/0", "14.010", 5963.78}, + {"14/1/1", "14.011", 150.95}, + } + acc := &testutil.Accumulator{} + + // Setup the unit-under-test + measurements := make([]Measurement, 0, len(testcases)) + for _, testcase := range testcases { + measurements = append(measurements, Measurement{"test", testcase.dpt, []string{testcase.address}}) + } + listener := KNXListener{ + ServiceType: "dummy", + Measurements: measurements, + Log: testutil.Logger{Name: "knx_listener"}, + } + + // Setup the listener to test + err := listener.Start(acc) + require.NoError(t, err) + client := listener.client.(*KNXDummyInterface) + + tstart := time.Now() + + // Send the defined test data + for _, testcase := range testcases { + event := ProduceKnxEvent(t, testcase.address, testcase.dpt, testcase.value) + client.Send(*event) + } + + // Give the accumulator some time to collect the data + acc.Wait(len(testcases)) + + // Stop the listener + listener.Stop() + tstop := time.Now() + + // Check if we got what we expected + require.Len(t, acc.Metrics, len(testcases)) + for i, m := range acc.Metrics { + require.Equal(t, "test", m.Measurement) + require.Equal(t, testcases[i].address, m.Tags["groupaddress"]) + require.Len(t, m.Fields, 1) + switch v := testcases[i].value.(type) { + case bool, int64, uint64: + require.Equal(t, v, m.Fields["value"]) + case float64: + require.InDelta(t, v, m.Fields["value"], epsilon) + } + require.True(t, !tstop.Before(m.Time)) + require.True(t, !tstart.After(m.Time)) + } +} + +func TestRegularReceives_MultipleMessages(t *testing.T) { + listener := KNXListener{ + ServiceType: "dummy", + Measurements: []Measurement{ + {"temperature", "1.001", []string{"1/1/1"}}, + }, + Log: testutil.Logger{Name: "knx_listener"}, + } + + acc := &testutil.Accumulator{} + + // Setup the listener to test + err := listener.Start(acc) + require.NoError(t, err) + client := listener.client.(*KNXDummyInterface) + + testMessages := []TestMessage{ + {"1/1/1", "1.001", true}, + {"1/1/1", "1.001", false}, + {"1/1/2", "1.001", false}, + {"1/1/2", "1.001", true}, + } + + for _, testcase := range testMessages { + event := ProduceKnxEvent(t, testcase.address, testcase.dpt, testcase.value) + client.Send(*event) + } + + // Give the accumulator some time to collect the data + acc.Wait(2) + + // Stop the listener + listener.Stop() + + // Check if we got what we expected + require.Len(t, acc.Metrics, 2) + + require.Equal(t, "temperature", acc.Metrics[0].Measurement) + require.Equal(t, "1/1/1", acc.Metrics[0].Tags["groupaddress"]) + require.Len(t, acc.Metrics[0].Fields, 1) + require.Equal(t, true, acc.Metrics[0].Fields["value"]) + + require.Equal(t, "temperature", acc.Metrics[1].Measurement) + require.Equal(t, "1/1/1", acc.Metrics[1].Tags["groupaddress"]) + require.Len(t, acc.Metrics[1].Fields, 1) + require.Equal(t, false, acc.Metrics[1].Fields["value"]) +} diff --git a/plugins/inputs/knx_listener/sample.conf b/plugins/inputs/knx_listener/sample.conf new file mode 100644 index 0000000000000..d4508ed933088 --- /dev/null +++ b/plugins/inputs/knx_listener/sample.conf @@ -0,0 +1,22 @@ +# Listener capable of handling KNX bus messages provided through a KNX-IP Interface. +[[inputs.knx_listener]] + ## Type of KNX-IP interface. + ## Can be either "tunnel" or "router". + # service_type = "tunnel" + + ## Address of the KNX-IP interface. + service_address = "localhost:3671" + + ## Measurement definition(s) + # [[inputs.knx_listener.measurement]] + # ## Name of the measurement + # name = "temperature" + # ## Datapoint-Type (DPT) of the KNX messages + # dpt = "9.001" + # ## List of Group-Addresses (GAs) assigned to the measurement + # addresses = ["5/5/1"] + + # [[inputs.knx_listener.measurement]] + # name = "illumination" + # dpt = "9.004" + # addresses = ["5/5/3"] diff --git a/plugins/inputs/kube_inventory/README.md b/plugins/inputs/kube_inventory/README.md index dbed6d6f01edb..711ba4d603622 100644 --- a/plugins/inputs/kube_inventory/README.md +++ b/plugins/inputs/kube_inventory/README.md @@ -1,6 +1,7 @@ # Kubernetes Inventory Input Plugin -This plugin generates metrics derived from the state of the following Kubernetes resources: +This plugin generates metrics derived from the state of the following Kubernetes +resources: - daemonsets - deployments @@ -19,7 +20,7 @@ the major cloud providers; this is roughly 4 release / 2 years. **This plugin supports Kubernetes 1.11 and later.** -#### Series Cardinality Warning +## Series Cardinality Warning This plugin may produce a high number of series which, when not controlled for, will cause high load on your database. Use the following techniques to @@ -27,15 +28,14 @@ avoid cardinality issues: - Use [metric filtering][] options to exclude unneeded measurements and tags. - Write to a database with an appropriate [retention policy][]. -- Limit series cardinality in your database using the - [max-series-per-database][] and [max-values-per-tag][] settings. - Consider using the [Time Series Index][tsi]. - Monitor your databases [series cardinality][]. - Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques. -### Configuration: +## Configuration -```toml +```toml @sample.conf +# Read metrics from the Kubernetes api [[inputs.kube_inventory]] ## URL for the Kubernetes API url = "https://127.0.0.1" @@ -66,13 +66,18 @@ avoid cardinality issues: ## selectors to include and exclude as tags. Globs accepted. ## Note that an empty array for both will include all selectors as tags ## selector_exclude overrides selector_include if both set. - selector_include = [] - selector_exclude = ["*"] + # selector_include = [] + # selector_exclude = ["*"] ## Optional TLS Config + ## Trusted root certificates for server # tls_ca = "/path/to/cafile" + ## Used for TLS client certificate authentication # tls_cert = "/path/to/certfile" + ## Used for TLS client certificate authentication # tls_key = "/path/to/keyfile" + ## Send the specified TLS server name via SNI + # tls_server_name = "kubernetes.example.com" ## Use TLS but skip chain & host verification # insecure_skip_verify = false @@ -80,9 +85,15 @@ avoid cardinality issues: # fielddrop = ["terminated_reason"] ``` -#### Kubernetes Permissions +## Kubernetes Permissions + +If using [RBAC authorization][rbac], you will need to create a cluster role to +list "persistentvolumes" and "nodes". You will then need to make an [aggregated +ClusterRole][agg] that will eventually be bound to a user or group. + +[rbac]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ -If using [RBAC authorization](https://kubernetes.io/docs/reference/access-authn-authz/rbac/), you will need to create a cluster role to list "persistentvolumes" and "nodes". You will then need to make an [aggregated ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles) that will eventually be bound to a user or group. +[agg]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles ```yaml --- @@ -111,7 +122,8 @@ aggregationRule: rules: [] # Rules are automatically filled in by the controller manager. ``` -Bind the newly created aggregated ClusterRole with the following config file, updating the subjects as needed. +Bind the newly created aggregated ClusterRole with the following config file, +updating the subjects as needed. ```yaml --- @@ -129,7 +141,28 @@ subjects: namespace: default ``` -### Metrics: +## Quickstart in k3s + +When monitoring [k3s](https://k3s.io) server instances one can re-use already +generated administration token. This is less secure than using the more +restrictive dedicated telegraf user but more convienient to set up. + +```console +# an empty token will make telegraf use the client cert/key files instead +$ touch /run/telegraf-kubernetes-token +# replace `telegraf` with the user the telegraf process is running as +$ install -o telegraf -m400 /var/lib/rancher/k3s/server/tls/client-admin.crt /run/telegraf-kubernetes-cert +$ install -o telegraf -m400 /var/lib/rancher/k3s/server/tls/client-admin.key /run/telegraf-kubernetes-key +``` + +```toml +[kube_inventory] +bearer_token = "/run/telegraf-kubernetes-token" +tls_cert = "/run/telegraf-kubernetes-cert" +tls_key = "/run/telegraf-kubernetes-key" +``` + +## Metrics - kubernetes_daemonset - tags: @@ -146,7 +179,7 @@ subjects: - number_unavailable - updated_number_scheduled -* kubernetes_deployment +- kubernetes_deployment - tags: - deployment_name - namespace @@ -171,7 +204,7 @@ subjects: - ready - port -* kubernetes_ingress +- kubernetes_ingress - tags: - ingress_name - namespace @@ -191,13 +224,15 @@ subjects: - node_name - fields: - capacity_cpu_cores + - capacity_millicpu_cores - capacity_memory_bytes - capacity_pods - allocatable_cpu_cores + - allocatable_millicpu_cores - allocatable_memory_bytes - allocatable_pods -* kubernetes_persistentvolume +- kubernetes_persistentvolume - tags: - pv_name - phase @@ -215,23 +250,25 @@ subjects: - fields: - phase_type (int, [see below](#pvc-phase_type)) -* kubernetes_pod_container +- kubernetes_pod_container - tags: - container_name - namespace - node_name - pod_name - node_selector (\*varies) + - phase - state - readiness - fields: - restarts_total - state_code - state_reason + - phase_reason - terminated_reason (string, deprecated in 1.15: use `state_reason` instead) - - resource_requests_cpu_units + - resource_requests_millicpu_units - resource_requests_memory_bytes - - resource_limits_cpu_units + - resource_limits_millicpu_units - resource_limits_memory_bytes - kubernetes_service @@ -249,7 +286,7 @@ subjects: - port - target_port -* kubernetes_statefulset +- kubernetes_statefulset - tags: - statefulset_name - namespace @@ -264,9 +301,10 @@ subjects: - spec_replicas - observed_generation -#### pv `phase_type` +### pv `phase_type` -The persistentvolume "phase" is saved in the `phase` tag with a correlated numeric field called `phase_type` corresponding with that tag value. +The persistentvolume "phase" is saved in the `phase` tag with a correlated +numeric field called `phase_type` corresponding with that tag value. | Tag value | Corresponding field value | | --------- | ------------------------- | @@ -277,9 +315,10 @@ The persistentvolume "phase" is saved in the `phase` tag with a correlated numer | available | 4 | | unknown | 5 | -#### pvc `phase_type` +### pvc `phase_type` -The persistentvolumeclaim "phase" is saved in the `phase` tag with a correlated numeric field called `phase_type` corresponding with that tag value. +The persistentvolumeclaim "phase" is saved in the `phase` tag with a correlated +numeric field called `phase_type` corresponding with that tag value. | Tag value | Corresponding field value | | --------- | ------------------------- | @@ -288,9 +327,9 @@ The persistentvolumeclaim "phase" is saved in the `phase` tag with a correlated | pending | 2 | | unknown | 3 | -### Example Output: +## Example Output -``` +```shell kubernetes_configmap,configmap_name=envoy-config,namespace=default,resource_version=56593031 created=1544103867000000000i 1547597616000000000 kubernetes_daemonset,daemonset_name=telegraf,selector_select1=s1,namespace=logging number_unavailable=0i,desired_number_scheduled=11i,number_available=11i,number_misscheduled=8i,number_ready=11i,updated_number_scheduled=11i,created=1527758699000000000i,generation=16i,current_number_scheduled=11i 1547597616000000000 kubernetes_deployment,deployment_name=deployd,selector_select1=s1,namespace=default replicas_unavailable=0i,created=1544103082000000000i,replicas_available=1i 1547597616000000000 @@ -299,14 +338,12 @@ kubernetes_persistentvolume,phase=Released,pv_name=pvc-aaaaaaaa-bbbb-cccc-1111-2 kubernetes_persistentvolumeclaim,namespace=default,phase=Bound,pvc_name=data-etcd-0,selector_select1=s1,storageclass=ebs-1-retain phase_type=0i 1547597615000000000 kubernetes_pod,namespace=default,node_name=ip-172-17-0-2.internal,pod_name=tick1 last_transition_time=1547578322000000000i,ready="false" 1547597616000000000 kubernetes_service,cluster_ip=172.29.61.80,namespace=redis-cache-0001,port_name=redis,port_protocol=TCP,selector_app=myapp,selector_io.kompose.service=redis,selector_role=slave,service_name=redis-slave created=1588690034000000000i,generation=0i,port=6379i,target_port=0i 1547597616000000000 -kubernetes_pod_container,container_name=telegraf,namespace=default,node_name=ip-172-17-0-2.internal,node_selector_node-role.kubernetes.io/compute=true,pod_name=tick1,state=running,readiness=ready resource_requests_cpu_units=0.1,resource_limits_memory_bytes=524288000,resource_limits_cpu_units=0.5,restarts_total=0i,state_code=0i,state_reason="",resource_requests_memory_bytes=524288000 1547597616000000000 +kubernetes_pod_container,container_name=telegraf,namespace=default,node_name=ip-172-17-0-2.internal,node_selector_node-role.kubernetes.io/compute=true,pod_name=tick1,phase=Running,state=running,readiness=ready resource_requests_cpu_units=0.1,resource_limits_memory_bytes=524288000,resource_limits_cpu_units=0.5,restarts_total=0i,state_code=0i,state_reason="",phase_reason="",resource_requests_memory_bytes=524288000 1547597616000000000 kubernetes_statefulset,namespace=default,selector_select1=s1,statefulset_name=etcd replicas_updated=3i,spec_replicas=3i,observed_generation=1i,created=1544101669000000000i,generation=1i,replicas=3i,replicas_current=3i,replicas_ready=3i 1547597616000000000 ``` [metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering [retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ -[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 -[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000 [tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ [series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality [influx-docs]: https://docs.influxdata.com/influxdb/latest/ diff --git a/plugins/inputs/kube_inventory/client.go b/plugins/inputs/kube_inventory/client.go index d9b24ba5c0a95..da03c643283fe 100644 --- a/plugins/inputs/kube_inventory/client.go +++ b/plugins/inputs/kube_inventory/client.go @@ -4,10 +4,12 @@ import ( "context" "time" - "github.com/ericchiang/k8s" - v1APPS "github.com/ericchiang/k8s/apis/apps/v1" - v1 "github.com/ericchiang/k8s/apis/core/v1" - v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + netv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "github.com/influxdata/telegraf/plugins/common/tls" ) @@ -15,104 +17,89 @@ import ( type client struct { namespace string timeout time.Duration - *k8s.Client + *kubernetes.Clientset } func newClient(baseURL, namespace, bearerToken string, timeout time.Duration, tlsConfig tls.ClientConfig) (*client, error) { - c, err := k8s.NewClient(&k8s.Config{ - Clusters: []k8s.NamedCluster{{Name: "cluster", Cluster: k8s.Cluster{ - Server: baseURL, - InsecureSkipTLSVerify: tlsConfig.InsecureSkipVerify, - CertificateAuthority: tlsConfig.TLSCA, - }}}, - Contexts: []k8s.NamedContext{{Name: "context", Context: k8s.Context{ - Cluster: "cluster", - AuthInfo: "auth", - Namespace: namespace, - }}}, - AuthInfos: []k8s.NamedAuthInfo{{Name: "auth", AuthInfo: k8s.AuthInfo{ - Token: bearerToken, - ClientCertificate: tlsConfig.TLSCert, - ClientKey: tlsConfig.TLSKey, - }}}, + c, err := kubernetes.NewForConfig(&rest.Config{ + TLSClientConfig: rest.TLSClientConfig{ + ServerName: tlsConfig.ServerName, + Insecure: tlsConfig.InsecureSkipVerify, + CAFile: tlsConfig.TLSCA, + CertFile: tlsConfig.TLSCert, + KeyFile: tlsConfig.TLSKey, + }, + Host: baseURL, + BearerToken: bearerToken, + ContentConfig: rest.ContentConfig{}, }) if err != nil { return nil, err } return &client{ - Client: c, + Clientset: c, timeout: timeout, namespace: namespace, }, nil } -func (c *client) getDaemonSets(ctx context.Context) (*v1APPS.DaemonSetList, error) { - list := new(v1APPS.DaemonSetList) +func (c *client) getDaemonSets(ctx context.Context) (*appsv1.DaemonSetList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.AppsV1().DaemonSets(c.namespace).List(ctx, metav1.ListOptions{}) } -func (c *client) getDeployments(ctx context.Context) (*v1APPS.DeploymentList, error) { - list := &v1APPS.DeploymentList{} +func (c *client) getDeployments(ctx context.Context) (*appsv1.DeploymentList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.AppsV1().Deployments(c.namespace).List(ctx, metav1.ListOptions{}) } -func (c *client) getEndpoints(ctx context.Context) (*v1.EndpointsList, error) { - list := new(v1.EndpointsList) +func (c *client) getEndpoints(ctx context.Context) (*corev1.EndpointsList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.CoreV1().Endpoints(c.namespace).List(ctx, metav1.ListOptions{}) } -func (c *client) getIngress(ctx context.Context) (*v1beta1EXT.IngressList, error) { - list := new(v1beta1EXT.IngressList) +func (c *client) getIngress(ctx context.Context) (*netv1.IngressList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.NetworkingV1().Ingresses(c.namespace).List(ctx, metav1.ListOptions{}) } -func (c *client) getNodes(ctx context.Context) (*v1.NodeList, error) { - list := new(v1.NodeList) +func (c *client) getNodes(ctx context.Context) (*corev1.NodeList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, "", list) + return c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) } -func (c *client) getPersistentVolumes(ctx context.Context) (*v1.PersistentVolumeList, error) { - list := new(v1.PersistentVolumeList) +func (c *client) getPersistentVolumes(ctx context.Context) (*corev1.PersistentVolumeList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, "", list) + return c.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) } -func (c *client) getPersistentVolumeClaims(ctx context.Context) (*v1.PersistentVolumeClaimList, error) { - list := new(v1.PersistentVolumeClaimList) +func (c *client) getPersistentVolumeClaims(ctx context.Context) (*corev1.PersistentVolumeClaimList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.CoreV1().PersistentVolumeClaims(c.namespace).List(ctx, metav1.ListOptions{}) } -func (c *client) getPods(ctx context.Context) (*v1.PodList, error) { - list := new(v1.PodList) +func (c *client) getPods(ctx context.Context) (*corev1.PodList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.CoreV1().Pods(c.namespace).List(ctx, metav1.ListOptions{}) } -func (c *client) getServices(ctx context.Context) (*v1.ServiceList, error) { - list := new(v1.ServiceList) +func (c *client) getServices(ctx context.Context) (*corev1.ServiceList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.CoreV1().Services(c.namespace).List(ctx, metav1.ListOptions{}) } -func (c *client) getStatefulSets(ctx context.Context) (*v1APPS.StatefulSetList, error) { - list := new(v1APPS.StatefulSetList) +func (c *client) getStatefulSets(ctx context.Context) (*appsv1.StatefulSetList, error) { ctx, cancel := context.WithTimeout(ctx, c.timeout) defer cancel() - return list, c.List(ctx, c.namespace, list) + return c.AppsV1().StatefulSets(c.namespace).List(ctx, metav1.ListOptions{}) } diff --git a/plugins/inputs/kube_inventory/client_test.go b/plugins/inputs/kube_inventory/client_test.go index 88411ea367ccf..0462c0222d527 100644 --- a/plugins/inputs/kube_inventory/client_test.go +++ b/plugins/inputs/kube_inventory/client_test.go @@ -4,8 +4,8 @@ import ( "testing" "time" - "github.com/ericchiang/k8s/util/intstr" "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/stretchr/testify/require" ) type mockHandler struct { @@ -20,24 +20,11 @@ func toInt32Ptr(i int32) *int32 { return &i } -func toInt64Ptr(i int64) *int64 { - return &i -} - func toBoolPtr(b bool) *bool { return &b } -func toIntStrPtrS(s string) *intstr.IntOrString { - return &intstr.IntOrString{StrVal: &s} -} - -func toIntStrPtrI(i int32) *intstr.IntOrString { - return &intstr.IntOrString{IntVal: &i} -} func TestNewClient(t *testing.T) { _, err := newClient("https://127.0.0.1:443/", "default", "abc123", time.Second, tls.ClientConfig{}) - if err != nil { - t.Errorf("Failed to create new client - %s", err.Error()) - } + require.NoErrorf(t, err, "Failed to create new client - %v", err) } diff --git a/plugins/inputs/kube_inventory/daemonset.go b/plugins/inputs/kube_inventory/daemonset.go index db612a5e33b2a..e169c8f274662 100644 --- a/plugins/inputs/kube_inventory/daemonset.go +++ b/plugins/inputs/kube_inventory/daemonset.go @@ -2,9 +2,8 @@ package kube_inventory import ( "context" - "time" - "github.com/ericchiang/k8s/apis/apps/v1" + v1 "k8s.io/api/apps/v1" "github.com/influxdata/telegraf" ) @@ -16,39 +15,35 @@ func collectDaemonSets(ctx context.Context, acc telegraf.Accumulator, ki *Kubern return } for _, d := range list.Items { - if err = ki.gatherDaemonSet(*d, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherDaemonSet(d, acc) } } -func (ki *KubernetesInventory) gatherDaemonSet(d v1.DaemonSet, acc telegraf.Accumulator) error { +func (ki *KubernetesInventory) gatherDaemonSet(d v1.DaemonSet, acc telegraf.Accumulator) { fields := map[string]interface{}{ - "generation": d.Metadata.GetGeneration(), - "current_number_scheduled": d.Status.GetCurrentNumberScheduled(), - "desired_number_scheduled": d.Status.GetDesiredNumberScheduled(), - "number_available": d.Status.GetNumberAvailable(), - "number_misscheduled": d.Status.GetNumberMisscheduled(), - "number_ready": d.Status.GetNumberReady(), - "number_unavailable": d.Status.GetNumberUnavailable(), - "updated_number_scheduled": d.Status.GetUpdatedNumberScheduled(), + "generation": d.Generation, + "current_number_scheduled": d.Status.CurrentNumberScheduled, + "desired_number_scheduled": d.Status.DesiredNumberScheduled, + "number_available": d.Status.NumberAvailable, + "number_misscheduled": d.Status.NumberMisscheduled, + "number_ready": d.Status.NumberReady, + "number_unavailable": d.Status.NumberUnavailable, + "updated_number_scheduled": d.Status.UpdatedNumberScheduled, } tags := map[string]string{ - "daemonset_name": d.Metadata.GetName(), - "namespace": d.Metadata.GetNamespace(), + "daemonset_name": d.Name, + "namespace": d.Namespace, } - for key, val := range d.GetSpec().GetSelector().GetMatchLabels() { + for key, val := range d.Spec.Selector.MatchLabels { if ki.selectorFilter.Match(key) { tags["selector_"+key] = val } } - if d.Metadata.CreationTimestamp.GetSeconds() != 0 { - fields["created"] = time.Unix(d.Metadata.CreationTimestamp.GetSeconds(), int64(d.Metadata.CreationTimestamp.GetNanos())).UnixNano() + creationTs := d.GetCreationTimestamp() + if !creationTs.IsZero() { + fields["created"] = d.GetCreationTimestamp().UnixNano() } acc.AddFields(daemonSetMeasurement, fields, tags) - - return nil } diff --git a/plugins/inputs/kube_inventory/daemonset_test.go b/plugins/inputs/kube_inventory/daemonset_test.go index 0a13f1e42cb3d..5c67f39432dae 100644 --- a/plugins/inputs/kube_inventory/daemonset_test.go +++ b/plugins/inputs/kube_inventory/daemonset_test.go @@ -1,15 +1,16 @@ package kube_inventory import ( - "reflect" "strings" "testing" "time" - "github.com/ericchiang/k8s/apis/apps/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" + v1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestDaemonSet(t *testing.T) { @@ -21,7 +22,7 @@ func TestDaemonSet(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { @@ -38,28 +39,28 @@ func TestDaemonSet(t *testing.T) { handler: &mockHandler{ responseMap: map[string]interface{}{ "/daemonsets/": &v1.DaemonSetList{ - Items: []*v1.DaemonSet{ + Items: []v1.DaemonSet{ { - Status: &v1.DaemonSetStatus{ - CurrentNumberScheduled: toInt32Ptr(3), - DesiredNumberScheduled: toInt32Ptr(5), - NumberAvailable: toInt32Ptr(2), - NumberMisscheduled: toInt32Ptr(2), - NumberReady: toInt32Ptr(1), - NumberUnavailable: toInt32Ptr(1), - UpdatedNumberScheduled: toInt32Ptr(2), + Status: v1.DaemonSetStatus{ + CurrentNumberScheduled: 3, + DesiredNumberScheduled: 5, + NumberAvailable: 2, + NumberMisscheduled: 2, + NumberReady: 1, + NumberUnavailable: 1, + UpdatedNumberScheduled: 2, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(11221), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("daemon1"), + ObjectMeta: metav1.ObjectMeta{ + Generation: 11221, + Namespace: "ns1", + Name: "daemon1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + CreationTimestamp: metav1.Time{Time: now}, }, - Spec: &v1.DaemonSetSpec{ + Spec: v1.DaemonSetSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "select1": "s1", @@ -72,28 +73,28 @@ func TestDaemonSet(t *testing.T) { }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "generation": int64(11221), - "current_number_scheduled": int32(3), - "desired_number_scheduled": int32(5), - "number_available": int32(2), - "number_misscheduled": int32(2), - "number_ready": int32(1), - "number_unavailable": int32(1), - "updated_number_scheduled": int32(2), - "created": now.UnixNano(), - }, - Tags: map[string]string{ - "daemonset_name": "daemon1", - "namespace": "ns1", - "selector_select1": "s1", - "selector_select2": "s2", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_daemonset", + map[string]string{ + "daemonset_name": "daemon1", + "namespace": "ns1", + "selector_select1": "s1", + "selector_select2": "s2", }, - }, + map[string]interface{}{ + "generation": int64(11221), + "current_number_scheduled": int32(3), + "desired_number_scheduled": int32(5), + "number_available": int32(2), + "number_misscheduled": int32(2), + "number_ready": int32(1), + "number_unavailable": int32(1), + "updated_number_scheduled": int32(2), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -105,37 +106,23 @@ func TestDaemonSet(t *testing.T) { SelectorInclude: selectInclude, SelectorExclude: selectExclude, } - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items { - err := ks.gatherDaemonSet(*dset, acc) - if err != nil { - t.Errorf("Failed to gather daemonset - %s", err.Error()) - } + ks.gatherDaemonSet(dset, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } @@ -146,28 +133,28 @@ func TestDaemonSetSelectorFilter(t *testing.T) { responseMap := map[string]interface{}{ "/daemonsets/": &v1.DaemonSetList{ - Items: []*v1.DaemonSet{ + Items: []v1.DaemonSet{ { - Status: &v1.DaemonSetStatus{ - CurrentNumberScheduled: toInt32Ptr(3), - DesiredNumberScheduled: toInt32Ptr(5), - NumberAvailable: toInt32Ptr(2), - NumberMisscheduled: toInt32Ptr(2), - NumberReady: toInt32Ptr(1), - NumberUnavailable: toInt32Ptr(1), - UpdatedNumberScheduled: toInt32Ptr(2), + Status: v1.DaemonSetStatus{ + CurrentNumberScheduled: 3, + DesiredNumberScheduled: 5, + NumberAvailable: 2, + NumberMisscheduled: 2, + NumberReady: 1, + NumberUnavailable: 1, + UpdatedNumberScheduled: 2, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(11221), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("daemon1"), + ObjectMeta: metav1.ObjectMeta{ + Generation: 11221, + Namespace: "ns1", + Name: "daemon1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + CreationTimestamp: metav1.Time{Time: time.Now()}, }, - Spec: &v1.DaemonSetSpec{ + Spec: v1.DaemonSetSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "select1": "s1", @@ -281,13 +268,10 @@ func TestDaemonSetSelectorFilter(t *testing.T) { } ks.SelectorInclude = v.include ks.SelectorExclude = v.exclude - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items { - err := ks.gatherDaemonSet(*dset, acc) - if err != nil { - t.Errorf("Failed to gather daemonset - %s", err.Error()) - } + ks.gatherDaemonSet(dset, acc) } // Grab selector tags @@ -300,8 +284,7 @@ func TestDaemonSetSelectorFilter(t *testing.T) { } } - if !reflect.DeepEqual(v.expected, actual) { - t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) - } + require.Equalf(t, v.expected, actual, + "actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) } } diff --git a/plugins/inputs/kube_inventory/deployment.go b/plugins/inputs/kube_inventory/deployment.go index b91216765e9a6..510cc68cecaa7 100644 --- a/plugins/inputs/kube_inventory/deployment.go +++ b/plugins/inputs/kube_inventory/deployment.go @@ -2,10 +2,9 @@ package kube_inventory import ( "context" - "time" - v1 "github.com/ericchiang/k8s/apis/apps/v1" "github.com/influxdata/telegraf" + v1 "k8s.io/api/apps/v1" ) func collectDeployments(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { @@ -15,30 +14,25 @@ func collectDeployments(ctx context.Context, acc telegraf.Accumulator, ki *Kuber return } for _, d := range list.Items { - if err = ki.gatherDeployment(*d, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherDeployment(d, acc) } } -func (ki *KubernetesInventory) gatherDeployment(d v1.Deployment, acc telegraf.Accumulator) error { +func (ki *KubernetesInventory) gatherDeployment(d v1.Deployment, acc telegraf.Accumulator) { fields := map[string]interface{}{ - "replicas_available": d.Status.GetAvailableReplicas(), - "replicas_unavailable": d.Status.GetUnavailableReplicas(), - "created": time.Unix(d.Metadata.CreationTimestamp.GetSeconds(), int64(d.Metadata.CreationTimestamp.GetNanos())).UnixNano(), + "replicas_available": d.Status.AvailableReplicas, + "replicas_unavailable": d.Status.UnavailableReplicas, + "created": d.GetCreationTimestamp().UnixNano(), } tags := map[string]string{ - "deployment_name": d.Metadata.GetName(), - "namespace": d.Metadata.GetNamespace(), + "deployment_name": d.Name, + "namespace": d.Namespace, } - for key, val := range d.GetSpec().GetSelector().GetMatchLabels() { + for key, val := range d.Spec.Selector.MatchLabels { if ki.selectorFilter.Match(key) { tags["selector_"+key] = val } } acc.AddFields(deploymentMeasurement, fields, tags) - - return nil } diff --git a/plugins/inputs/kube_inventory/deployment_test.go b/plugins/inputs/kube_inventory/deployment_test.go index 9407c84d91322..277377619fe84 100644 --- a/plugins/inputs/kube_inventory/deployment_test.go +++ b/plugins/inputs/kube_inventory/deployment_test.go @@ -1,15 +1,17 @@ package kube_inventory import ( - "reflect" "strings" "testing" "time" - "github.com/ericchiang/k8s/apis/apps/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" - "github.com/ericchiang/k8s/util/intstr" + v1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestDeployment(t *testing.T) { @@ -18,24 +20,11 @@ func TestDeployment(t *testing.T) { selectExclude := []string{} now := time.Now() now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) - outputMetric := &testutil.Metric{ - Fields: map[string]interface{}{ - "replicas_available": int32(1), - "replicas_unavailable": int32(4), - "created": now.UnixNano(), - }, - Tags: map[string]string{ - "namespace": "ns1", - "deployment_name": "deploy1", - "selector_select1": "s1", - "selector_select2": "s2", - }, - } tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { @@ -52,23 +41,23 @@ func TestDeployment(t *testing.T) { handler: &mockHandler{ responseMap: map[string]interface{}{ "/deployments/": &v1.DeploymentList{ - Items: []*v1.Deployment{ + Items: []v1.Deployment{ { - Status: &v1.DeploymentStatus{ - Replicas: toInt32Ptr(3), - AvailableReplicas: toInt32Ptr(1), - UnavailableReplicas: toInt32Ptr(4), - UpdatedReplicas: toInt32Ptr(2), - ObservedGeneration: toInt64Ptr(9121), + Status: v1.DeploymentStatus{ + Replicas: 3, + AvailableReplicas: 1, + UnavailableReplicas: 4, + UpdatedReplicas: 2, + ObservedGeneration: 9121, }, - Spec: &v1.DeploymentSpec{ - Strategy: &v1.DeploymentStrategy{ + Spec: v1.DeploymentSpec{ + Strategy: v1.DeploymentStrategy{ RollingUpdate: &v1.RollingUpdateDeployment{ MaxUnavailable: &intstr.IntOrString{ - IntVal: toInt32Ptr(30), + IntVal: 30, }, MaxSurge: &intstr.IntOrString{ - IntVal: toInt32Ptr(20), + IntVal: 20, }, }, }, @@ -80,25 +69,37 @@ func TestDeployment(t *testing.T) { }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(11221), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("deploy1"), + ObjectMeta: metav1.ObjectMeta{ + Generation: 11221, + Namespace: "ns1", + Name: "deploy1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + CreationTimestamp: metav1.Time{Time: now}, }, }, }, }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - outputMetric, - }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_deployment", + map[string]string{ + "namespace": "ns1", + "deployment_name": "deploy1", + "selector_select1": "s1", + "selector_select2": "s2", + }, + map[string]interface{}{ + "replicas_available": int32(1), + "replicas_unavailable": int32(4), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -110,37 +111,23 @@ func TestDeployment(t *testing.T) { SelectorInclude: selectInclude, SelectorExclude: selectExclude, } - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items { - err := ks.gatherDeployment(*deployment, acc) - if err != nil { - t.Errorf("Failed to gather deployment - %s", err.Error()) - } + ks.gatherDeployment(deployment, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } @@ -151,23 +138,23 @@ func TestDeploymentSelectorFilter(t *testing.T) { responseMap := map[string]interface{}{ "/deployments/": &v1.DeploymentList{ - Items: []*v1.Deployment{ + Items: []v1.Deployment{ { - Status: &v1.DeploymentStatus{ - Replicas: toInt32Ptr(3), - AvailableReplicas: toInt32Ptr(1), - UnavailableReplicas: toInt32Ptr(4), - UpdatedReplicas: toInt32Ptr(2), - ObservedGeneration: toInt64Ptr(9121), + Status: v1.DeploymentStatus{ + Replicas: 3, + AvailableReplicas: 1, + UnavailableReplicas: 4, + UpdatedReplicas: 2, + ObservedGeneration: 9121, }, - Spec: &v1.DeploymentSpec{ - Strategy: &v1.DeploymentStrategy{ + Spec: v1.DeploymentSpec{ + Strategy: v1.DeploymentStrategy{ RollingUpdate: &v1.RollingUpdateDeployment{ MaxUnavailable: &intstr.IntOrString{ - IntVal: toInt32Ptr(30), + IntVal: 30, }, MaxSurge: &intstr.IntOrString{ - IntVal: toInt32Ptr(20), + IntVal: 20, }, }, }, @@ -179,15 +166,15 @@ func TestDeploymentSelectorFilter(t *testing.T) { }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(11221), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("deploy1"), + ObjectMeta: metav1.ObjectMeta{ + Generation: 11221, + Namespace: "ns1", + Name: "deploy1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -295,13 +282,10 @@ func TestDeploymentSelectorFilter(t *testing.T) { } ks.SelectorInclude = v.include ks.SelectorExclude = v.exclude - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items { - err := ks.gatherDeployment(*deployment, acc) - if err != nil { - t.Errorf("Failed to gather deployment - %s", err.Error()) - } + ks.gatherDeployment(deployment, acc) } // Grab selector tags @@ -314,8 +298,7 @@ func TestDeploymentSelectorFilter(t *testing.T) { } } - if !reflect.DeepEqual(v.expected, actual) { - t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) - } + require.Equalf(t, v.expected, actual, + "actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) } } diff --git a/plugins/inputs/kube_inventory/endpoint.go b/plugins/inputs/kube_inventory/endpoint.go index 7298789da8e08..1eb86eea13b76 100644 --- a/plugins/inputs/kube_inventory/endpoint.go +++ b/plugins/inputs/kube_inventory/endpoint.go @@ -3,11 +3,9 @@ package kube_inventory import ( "context" "strings" - "time" - - "github.com/ericchiang/k8s/apis/core/v1" "github.com/influxdata/telegraf" + corev1 "k8s.io/api/core/v1" ) func collectEndpoints(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { @@ -17,66 +15,66 @@ func collectEndpoints(ctx context.Context, acc telegraf.Accumulator, ki *Kuberne return } for _, i := range list.Items { - if err = ki.gatherEndpoint(*i, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherEndpoint(i, acc) } } -func (ki *KubernetesInventory) gatherEndpoint(e v1.Endpoints, acc telegraf.Accumulator) error { - if e.Metadata.CreationTimestamp.GetSeconds() == 0 && e.Metadata.CreationTimestamp.GetNanos() == 0 { - return nil +func (ki *KubernetesInventory) gatherEndpoint(e corev1.Endpoints, acc telegraf.Accumulator) { + creationTs := e.GetCreationTimestamp() + if creationTs.IsZero() { + return } fields := map[string]interface{}{ - "created": time.Unix(e.Metadata.CreationTimestamp.GetSeconds(), int64(e.Metadata.CreationTimestamp.GetNanos())).UnixNano(), - "generation": e.Metadata.GetGeneration(), + "created": e.GetCreationTimestamp().UnixNano(), + "generation": e.Generation, } tags := map[string]string{ - "endpoint_name": e.Metadata.GetName(), - "namespace": e.Metadata.GetNamespace(), + "endpoint_name": e.Name, + "namespace": e.Namespace, } - for _, endpoint := range e.GetSubsets() { - for _, readyAddr := range endpoint.GetAddresses() { + for _, endpoint := range e.Subsets { + for _, readyAddr := range endpoint.Addresses { fields["ready"] = true - tags["hostname"] = readyAddr.GetHostname() - tags["node_name"] = readyAddr.GetNodeName() + tags["hostname"] = readyAddr.Hostname + if readyAddr.NodeName != nil { + tags["node_name"] = *readyAddr.NodeName + } if readyAddr.TargetRef != nil { - tags[strings.ToLower(readyAddr.GetTargetRef().GetKind())] = readyAddr.GetTargetRef().GetName() + tags[strings.ToLower(readyAddr.TargetRef.Kind)] = readyAddr.TargetRef.Name } - for _, port := range endpoint.GetPorts() { - fields["port"] = port.GetPort() + for _, port := range endpoint.Ports { + fields["port"] = port.Port - tags["port_name"] = port.GetName() - tags["port_protocol"] = port.GetProtocol() + tags["port_name"] = port.Name + tags["port_protocol"] = string(port.Protocol) acc.AddFields(endpointMeasurement, fields, tags) } } - for _, notReadyAddr := range endpoint.GetNotReadyAddresses() { + for _, notReadyAddr := range endpoint.NotReadyAddresses { fields["ready"] = false - tags["hostname"] = notReadyAddr.GetHostname() - tags["node_name"] = notReadyAddr.GetNodeName() + tags["hostname"] = notReadyAddr.Hostname + if notReadyAddr.NodeName != nil { + tags["node_name"] = *notReadyAddr.NodeName + } if notReadyAddr.TargetRef != nil { - tags[strings.ToLower(notReadyAddr.GetTargetRef().GetKind())] = notReadyAddr.GetTargetRef().GetName() + tags[strings.ToLower(notReadyAddr.TargetRef.Kind)] = notReadyAddr.TargetRef.Name } - for _, port := range endpoint.GetPorts() { - fields["port"] = port.GetPort() + for _, port := range endpoint.Ports { + fields["port"] = port.Port - tags["port_name"] = port.GetName() - tags["port_protocol"] = port.GetProtocol() + tags["port_name"] = port.Name + tags["port_protocol"] = string(port.Protocol) acc.AddFields(endpointMeasurement, fields, tags) } } } - - return nil } diff --git a/plugins/inputs/kube_inventory/endpoint_test.go b/plugins/inputs/kube_inventory/endpoint_test.go index b88c388162bd2..936a64b72544b 100644 --- a/plugins/inputs/kube_inventory/endpoint_test.go +++ b/plugins/inputs/kube_inventory/endpoint_test.go @@ -4,9 +4,12 @@ import ( "testing" "time" - "github.com/ericchiang/k8s/apis/core/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestEndpoint(t *testing.T) { @@ -18,7 +21,7 @@ func TestEndpoint(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { @@ -35,60 +38,60 @@ func TestEndpoint(t *testing.T) { handler: &mockHandler{ responseMap: map[string]interface{}{ "/endpoints/": &v1.EndpointsList{ - Items: []*v1.Endpoints{ + Items: []v1.Endpoints{ { - Subsets: []*v1.EndpointSubset{ + Subsets: []v1.EndpointSubset{ { - Addresses: []*v1.EndpointAddress{ + Addresses: []v1.EndpointAddress{ { - Hostname: toStrPtr("storage-6"), + Hostname: "storage-6", NodeName: toStrPtr("b.storage.internal"), TargetRef: &v1.ObjectReference{ - Kind: toStrPtr("pod"), - Name: toStrPtr("storage-6"), + Kind: "pod", + Name: "storage-6", }, }, }, - Ports: []*v1.EndpointPort{ + Ports: []v1.EndpointPort{ { - Name: toStrPtr("server"), - Protocol: toStrPtr("TCP"), - Port: toInt32Ptr(8080), + Name: "server", + Protocol: "TCP", + Port: 8080, }, }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(12), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("storage"), - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "storage", + CreationTimestamp: metav1.Time{Time: now}, }, }, }, }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "ready": true, - "port": int32(8080), - "generation": int64(12), - "created": now.UnixNano(), - }, - Tags: map[string]string{ - "endpoint_name": "storage", - "namespace": "ns1", - "hostname": "storage-6", - "node_name": "b.storage.internal", - "port_name": "server", - "port_protocol": "TCP", - "pod": "storage-6", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_endpoint", + map[string]string{ + "endpoint_name": "storage", + "namespace": "ns1", + "hostname": "storage-6", + "node_name": "b.storage.internal", + "port_name": "server", + "port_protocol": "TCP", + "pod": "storage-6", }, - }, + map[string]interface{}{ + "ready": true, + "port": int32(8080), + "generation": int64(12), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -97,61 +100,157 @@ func TestEndpoint(t *testing.T) { handler: &mockHandler{ responseMap: map[string]interface{}{ "/endpoints/": &v1.EndpointsList{ - Items: []*v1.Endpoints{ + Items: []v1.Endpoints{ { - Subsets: []*v1.EndpointSubset{ + Subsets: []v1.EndpointSubset{ { - NotReadyAddresses: []*v1.EndpointAddress{ + NotReadyAddresses: []v1.EndpointAddress{ { - Hostname: toStrPtr("storage-6"), + Hostname: "storage-6", NodeName: toStrPtr("b.storage.internal"), TargetRef: &v1.ObjectReference{ - Kind: toStrPtr("pod"), - Name: toStrPtr("storage-6"), + Kind: "pod", + Name: "storage-6", }, }, }, - Ports: []*v1.EndpointPort{ + Ports: []v1.EndpointPort{ { - Name: toStrPtr("server"), - Protocol: toStrPtr("TCP"), - Port: toInt32Ptr(8080), + Name: "server", + Protocol: "TCP", + Port: 8080, }, }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(12), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("storage"), - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "storage", + CreationTimestamp: metav1.Time{Time: now}, }, }, }, }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "ready": false, - "port": int32(8080), - "generation": int64(12), - "created": now.UnixNano(), - }, - Tags: map[string]string{ - "endpoint_name": "storage", - "namespace": "ns1", - "hostname": "storage-6", - "node_name": "b.storage.internal", - "port_name": "server", - "port_protocol": "TCP", - "pod": "storage-6", + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_endpoint", + map[string]string{ + "endpoint_name": "storage", + "namespace": "ns1", + "hostname": "storage-6", + "node_name": "b.storage.internal", + "port_name": "server", + "port_protocol": "TCP", + "pod": "storage-6", + }, + map[string]interface{}{ + "ready": false, + "port": int32(8080), + "generation": int64(12), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), + }, + hasError: false, + }, + { + name: "endpoints missing node_name", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/endpoints/": &v1.EndpointsList{ + Items: []v1.Endpoints{ + { + Subsets: []v1.EndpointSubset{ + { + NotReadyAddresses: []v1.EndpointAddress{ + { + Hostname: "storage-6", + TargetRef: &v1.ObjectReference{ + Kind: "pod", + Name: "storage-6", + }, + }, + }, + Ports: []v1.EndpointPort{ + { + Name: "server", + Protocol: "TCP", + Port: 8080, + }, + }, + }, + { + Addresses: []v1.EndpointAddress{ + { + Hostname: "storage-12", + TargetRef: &v1.ObjectReference{ + Kind: "pod", + Name: "storage-12", + }, + }, + }, + Ports: []v1.EndpointPort{ + { + Name: "server", + Protocol: "TCP", + Port: 8080, + }, + }, + }, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "storage", + CreationTimestamp: metav1.Time{Time: now}, + }, + }, }, }, }, }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_endpoint", + map[string]string{ + "endpoint_name": "storage", + "namespace": "ns1", + "hostname": "storage-6", + "port_name": "server", + "port_protocol": "TCP", + "pod": "storage-6", + }, + map[string]interface{}{ + "ready": false, + "port": int32(8080), + "generation": int64(12), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "kubernetes_endpoint", + map[string]string{ + "endpoint_name": "storage", + "namespace": "ns1", + "hostname": "storage-12", + "port_name": "server", + "port_protocol": "TCP", + "pod": "storage-12", + }, + map[string]interface{}{ + "ready": true, + "port": int32(8080), + "generation": int64(12), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), + }, hasError: false, }, } @@ -162,33 +261,19 @@ func TestEndpoint(t *testing.T) { } acc := new(testutil.Accumulator) for _, endpoint := range ((v.handler.responseMap["/endpoints/"]).(*v1.EndpointsList)).Items { - err := ks.gatherEndpoint(*endpoint, acc) - if err != nil { - t.Errorf("Failed to gather endpoint - %s", err.Error()) - } + ks.gatherEndpoint(endpoint, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } diff --git a/plugins/inputs/kube_inventory/ingress.go b/plugins/inputs/kube_inventory/ingress.go index 6d5c8019927cf..f8a966bc15a46 100644 --- a/plugins/inputs/kube_inventory/ingress.go +++ b/plugins/inputs/kube_inventory/ingress.go @@ -2,9 +2,8 @@ package kube_inventory import ( "context" - "time" - v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1" + netv1 "k8s.io/api/networking/v1" "github.com/influxdata/telegraf" ) @@ -16,45 +15,47 @@ func collectIngress(ctx context.Context, acc telegraf.Accumulator, ki *Kubernete return } for _, i := range list.Items { - if err = ki.gatherIngress(*i, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherIngress(i, acc) } } -func (ki *KubernetesInventory) gatherIngress(i v1beta1EXT.Ingress, acc telegraf.Accumulator) error { - if i.Metadata.CreationTimestamp.GetSeconds() == 0 && i.Metadata.CreationTimestamp.GetNanos() == 0 { - return nil +func (ki *KubernetesInventory) gatherIngress(i netv1.Ingress, acc telegraf.Accumulator) { + creationTs := i.GetCreationTimestamp() + if creationTs.IsZero() { + return } fields := map[string]interface{}{ - "created": time.Unix(i.Metadata.CreationTimestamp.GetSeconds(), int64(i.Metadata.CreationTimestamp.GetNanos())).UnixNano(), - "generation": i.Metadata.GetGeneration(), + "created": i.GetCreationTimestamp().UnixNano(), + "generation": i.Generation, } tags := map[string]string{ - "ingress_name": i.Metadata.GetName(), - "namespace": i.Metadata.GetNamespace(), + "ingress_name": i.Name, + "namespace": i.Namespace, } - for _, ingress := range i.GetStatus().GetLoadBalancer().GetIngress() { - tags["hostname"] = ingress.GetHostname() - tags["ip"] = ingress.GetIp() + for _, ingress := range i.Status.LoadBalancer.Ingress { + tags["hostname"] = ingress.Hostname + tags["ip"] = ingress.IP - for _, rule := range i.GetSpec().GetRules() { - for _, path := range rule.GetIngressRuleValue().GetHttp().GetPaths() { - fields["backend_service_port"] = path.GetBackend().GetServicePort().GetIntVal() - fields["tls"] = i.GetSpec().GetTls() != nil + for _, rule := range i.Spec.Rules { + if rule.IngressRuleValue.HTTP == nil { + continue + } + for _, path := range rule.IngressRuleValue.HTTP.Paths { + if path.Backend.Service != nil { + tags["backend_service_name"] = path.Backend.Service.Name + fields["backend_service_port"] = path.Backend.Service.Port.Number + } - tags["backend_service_name"] = path.GetBackend().GetServiceName() - tags["path"] = path.GetPath() - tags["host"] = rule.GetHost() + fields["tls"] = i.Spec.TLS != nil + + tags["path"] = path.Path + tags["host"] = rule.Host acc.AddFields(ingressMeasurement, fields, tags) } } } - - return nil } diff --git a/plugins/inputs/kube_inventory/ingress_test.go b/plugins/inputs/kube_inventory/ingress_test.go index 2d111801a96f3..77ceceaac22ba 100644 --- a/plugins/inputs/kube_inventory/ingress_test.go +++ b/plugins/inputs/kube_inventory/ingress_test.go @@ -4,10 +4,13 @@ import ( "testing" "time" - v1 "github.com/ericchiang/k8s/apis/core/v1" - v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" + v1 "k8s.io/api/core/v1" + netv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestIngress(t *testing.T) { @@ -19,14 +22,14 @@ func TestIngress(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { name: "no ingress", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/ingress/": &v1beta1EXT.IngressList{}, + "/ingress/": netv1.IngressList{}, }, }, hasError: false, @@ -35,31 +38,35 @@ func TestIngress(t *testing.T) { name: "collect ingress", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/ingress/": &v1beta1EXT.IngressList{ - Items: []*v1beta1EXT.Ingress{ + "/ingress/": netv1.IngressList{ + Items: []netv1.Ingress{ { - Status: &v1beta1EXT.IngressStatus{ - LoadBalancer: &v1.LoadBalancerStatus{ - Ingress: []*v1.LoadBalancerIngress{ + Status: netv1.IngressStatus{ + LoadBalancer: v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ { - Hostname: toStrPtr("chron-1"), - Ip: toStrPtr("1.0.0.127"), + Hostname: "chron-1", + IP: "1.0.0.127", }, }, }, }, - Spec: &v1beta1EXT.IngressSpec{ - Rules: []*v1beta1EXT.IngressRule{ + Spec: netv1.IngressSpec{ + Rules: []netv1.IngressRule{ { - Host: toStrPtr("ui.internal"), - IngressRuleValue: &v1beta1EXT.IngressRuleValue{ - Http: &v1beta1EXT.HTTPIngressRuleValue{ - Paths: []*v1beta1EXT.HTTPIngressPath{ + Host: "ui.internal", + IngressRuleValue: netv1.IngressRuleValue{ + HTTP: &netv1.HTTPIngressRuleValue{ + Paths: []netv1.HTTPIngressPath{ { - Path: toStrPtr("/"), - Backend: &v1beta1EXT.IngressBackend{ - ServiceName: toStrPtr("chronografd"), - ServicePort: toIntStrPtrI(8080), + Path: "/", + Backend: netv1.IngressBackend{ + Service: &netv1.IngressServiceBackend{ + Name: "chronografd", + Port: netv1.ServiceBackendPort{ + Number: 8080, + }, + }, }, }, }, @@ -68,38 +75,146 @@ func TestIngress(t *testing.T) { }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(12), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("ui-lb"), - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "ui-lb", + CreationTimestamp: metav1.Time{Time: now}, }, }, }, }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "tls": false, - "backend_service_port": int32(8080), - "generation": int64(12), - "created": now.UnixNano(), + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_ingress", + map[string]string{ + "ingress_name": "ui-lb", + "namespace": "ns1", + "ip": "1.0.0.127", + "hostname": "chron-1", + "backend_service_name": "chronografd", + "host": "ui.internal", + "path": "/", + }, + map[string]interface{}{ + "tls": false, + "backend_service_port": int32(8080), + "generation": int64(12), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), + }, + hasError: false, + }, + { + name: "no HTTPIngressRuleValue", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/ingress/": netv1.IngressList{ + Items: []netv1.Ingress{ + { + Status: netv1.IngressStatus{ + LoadBalancer: v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + { + Hostname: "chron-1", + IP: "1.0.0.127", + }, + }, + }, + }, + Spec: netv1.IngressSpec{ + Rules: []netv1.IngressRule{ + { + Host: "ui.internal", + IngressRuleValue: netv1.IngressRuleValue{ + HTTP: nil, + }, + }, + }, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "ui-lb", + CreationTimestamp: metav1.Time{Time: now}, + }, + }, }, - Tags: map[string]string{ - "ingress_name": "ui-lb", - "namespace": "ns1", - "ip": "1.0.0.127", - "hostname": "chron-1", - "backend_service_name": "chronografd", - "host": "ui.internal", - "path": "/", + }, + }, + }, + hasError: false, + }, + { + name: "no IngressServiceBackend", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/ingress/": netv1.IngressList{ + Items: []netv1.Ingress{ + { + Status: netv1.IngressStatus{ + LoadBalancer: v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + { + Hostname: "chron-1", + IP: "1.0.0.127", + }, + }, + }, + }, + Spec: netv1.IngressSpec{ + Rules: []netv1.IngressRule{ + { + Host: "ui.internal", + IngressRuleValue: netv1.IngressRuleValue{ + HTTP: &netv1.HTTPIngressRuleValue{ + Paths: []netv1.HTTPIngressPath{ + { + Path: "/", + Backend: netv1.IngressBackend{ + Service: nil, + }, + }, + }, + }, + }, + }, + }, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "ui-lb", + CreationTimestamp: metav1.Time{Time: now}, + }, + }, }, }, }, }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_ingress", + map[string]string{ + "ingress_name": "ui-lb", + "namespace": "ns1", + "ip": "1.0.0.127", + "hostname": "chron-1", + "host": "ui.internal", + "path": "/", + }, + map[string]interface{}{ + "tls": false, + "generation": int64(12), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), + }, hasError: false, }, } @@ -109,34 +224,20 @@ func TestIngress(t *testing.T) { client: cli, } acc := new(testutil.Accumulator) - for _, ingress := range ((v.handler.responseMap["/ingress/"]).(*v1beta1EXT.IngressList)).Items { - err := ks.gatherIngress(*ingress, acc) - if err != nil { - t.Errorf("Failed to gather ingress - %s", err.Error()) - } + for _, ingress := range ((v.handler.responseMap["/ingress/"]).(netv1.IngressList)).Items { + ks.gatherIngress(ingress, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } diff --git a/plugins/inputs/kube_inventory/kube_state.go b/plugins/inputs/kube_inventory/kube_inventory.go similarity index 55% rename from plugins/inputs/kube_inventory/kube_state.go rename to plugins/inputs/kube_inventory/kube_inventory.go index 0a2a882974e67..de83216073582 100644 --- a/plugins/inputs/kube_inventory/kube_state.go +++ b/plugins/inputs/kube_inventory/kube_inventory.go @@ -1,99 +1,59 @@ +//go:generate ../../../tools/readme_config_includer/generator package kube_inventory import ( "context" + _ "embed" "fmt" - "io/ioutil" - "log" + "os" "strconv" "strings" "sync" "time" - "github.com/kubernetes/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/api/resource" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const ( defaultServiceAccountPath = "/run/secrets/kubernetes.io/serviceaccount/token" ) // KubernetesInventory represents the config object for the plugin. type KubernetesInventory struct { - URL string `toml:"url"` - BearerToken string `toml:"bearer_token"` - BearerTokenString string `toml:"bearer_token_string"` - Namespace string `toml:"namespace"` - ResponseTimeout internal.Duration `toml:"response_timeout"` // Timeout specified as a string - 3s, 1m, 1h - ResourceExclude []string `toml:"resource_exclude"` - ResourceInclude []string `toml:"resource_include"` - MaxConfigMapAge internal.Duration `toml:"max_config_map_age"` + URL string `toml:"url"` + BearerToken string `toml:"bearer_token"` + BearerTokenString string `toml:"bearer_token_string"` + Namespace string `toml:"namespace"` + ResponseTimeout config.Duration `toml:"response_timeout"` // Timeout specified as a string - 3s, 1m, 1h + ResourceExclude []string `toml:"resource_exclude"` + ResourceInclude []string `toml:"resource_include"` + MaxConfigMapAge config.Duration `toml:"max_config_map_age"` SelectorInclude []string `toml:"selector_include"` SelectorExclude []string `toml:"selector_exclude"` + Log telegraf.Logger `toml:"-"` + tls.ClientConfig client *client selectorFilter filter.Filter } -var sampleConfig = ` - ## URL for the Kubernetes API - url = "https://127.0.0.1" - - ## Namespace to use. Set to "" to use all namespaces. - # namespace = "default" - - ## Use bearer token for authorization. ('bearer_token' takes priority) - ## If both of these are empty, we'll use the default serviceaccount: - ## at: /run/secrets/kubernetes.io/serviceaccount/token - # bearer_token = "/path/to/bearer/token" - ## OR - # bearer_token_string = "abc_123" - - ## Set response_timeout (default 5 seconds) - # response_timeout = "5s" - - ## Optional Resources to exclude from gathering - ## Leave them with blank with try to gather everything available. - ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes", - ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets" - # resource_exclude = [ "deployments", "nodes", "statefulsets" ] - - ## Optional Resources to include when gathering - ## Overrides resource_exclude if both set. - # resource_include = [ "deployments", "nodes", "statefulsets" ] - - ## selectors to include and exclude as tags. Globs accepted. - ## Note that an empty array for both will include all selectors as tags - ## selector_exclude overrides selector_include if both set. - # selector_include = [] - # selector_exclude = ["*"] - - ## Optional TLS Config - # tls_ca = "/path/to/cafile" - # tls_cert = "/path/to/certfile" - # tls_key = "/path/to/keyfile" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - -// SampleConfig returns a sample config -func (ki *KubernetesInventory) SampleConfig() string { +func (*KubernetesInventory) SampleConfig() string { return sampleConfig } -// Description returns the description of this plugin -func (ki *KubernetesInventory) Description() string { - return "Read metrics from the Kubernetes api" -} - func (ki *KubernetesInventory) Init() error { // If neither are provided, use the default service account. if ki.BearerToken == "" && ki.BearerTokenString == "" { @@ -101,7 +61,7 @@ func (ki *KubernetesInventory) Init() error { } if ki.BearerToken != "" { - token, err := ioutil.ReadFile(ki.BearerToken) + token, err := os.ReadFile(ki.BearerToken) if err != nil { return err } @@ -109,7 +69,7 @@ func (ki *KubernetesInventory) Init() error { } var err error - ki.client, err = newClient(ki.URL, ki.Namespace, ki.BearerTokenString, ki.ResponseTimeout.Duration, ki.ClientConfig) + ki.client, err = newClient(ki.URL, ki.Namespace, ki.BearerTokenString, time.Duration(ki.ResponseTimeout), ki.ClientConfig) if err != nil { return err @@ -166,18 +126,18 @@ func atoi(s string) int64 { if err != nil { return 0 } - return int64(i) + return i } -func convertQuantity(s string, m float64) int64 { +func (ki *KubernetesInventory) convertQuantity(s string, m float64) int64 { q, err := resource.ParseQuantity(s) if err != nil { - log.Printf("D! [inputs.kube_inventory] failed to parse quantity: %s", err.Error()) + ki.Log.Debugf("failed to parse quantity: %s", err.Error()) return 0 } f, err := strconv.ParseFloat(fmt.Sprint(q.AsDec()), 64) if err != nil { - log.Printf("D! [inputs.kube_inventory] failed to parse float: %s", err.Error()) + ki.Log.Debugf("failed to parse float: %s", err.Error()) return 0 } if m < 1 { @@ -187,11 +147,11 @@ func convertQuantity(s string, m float64) int64 { } func (ki *KubernetesInventory) createSelectorFilters() error { - filter, err := filter.NewIncludeExcludeFilter(ki.SelectorInclude, ki.SelectorExclude) + selectorFilter, err := filter.NewIncludeExcludeFilter(ki.SelectorInclude, ki.SelectorExclude) if err != nil { return err } - ki.selectorFilter = filter + ki.selectorFilter = selectorFilter return nil } @@ -211,7 +171,7 @@ var ( func init() { inputs.Add("kube_inventory", func() telegraf.Input { return &KubernetesInventory{ - ResponseTimeout: internal.Duration{Duration: time.Second * 5}, + ResponseTimeout: config.Duration(time.Second * 5), Namespace: "default", SelectorInclude: []string{}, SelectorExclude: []string{"*"}, diff --git a/plugins/inputs/kube_inventory/node.go b/plugins/inputs/kube_inventory/node.go index cccf6897f8aa3..b46b4e6209ffc 100644 --- a/plugins/inputs/kube_inventory/node.go +++ b/plugins/inputs/kube_inventory/node.go @@ -3,7 +3,7 @@ package kube_inventory import ( "context" - "github.com/ericchiang/k8s/apis/core/v1" + corev1 "k8s.io/api/core/v1" "github.com/influxdata/telegraf" ) @@ -15,42 +15,39 @@ func collectNodes(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesI return } for _, n := range list.Items { - if err = ki.gatherNode(*n, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherNode(n, acc) } } -func (ki *KubernetesInventory) gatherNode(n v1.Node, acc telegraf.Accumulator) error { +func (ki *KubernetesInventory) gatherNode(n corev1.Node, acc telegraf.Accumulator) { fields := map[string]interface{}{} tags := map[string]string{ - "node_name": *n.Metadata.Name, + "node_name": n.Name, } for resourceName, val := range n.Status.Capacity { switch resourceName { case "cpu": - fields["capacity_cpu_cores"] = atoi(val.GetString_()) + fields["capacity_cpu_cores"] = ki.convertQuantity(val.String(), 1) + fields["capacity_millicpu_cores"] = ki.convertQuantity(val.String(), 1000) case "memory": - fields["capacity_memory_bytes"] = convertQuantity(val.GetString_(), 1) + fields["capacity_memory_bytes"] = ki.convertQuantity(val.String(), 1) case "pods": - fields["capacity_pods"] = atoi(val.GetString_()) + fields["capacity_pods"] = atoi(val.String()) } } for resourceName, val := range n.Status.Allocatable { switch resourceName { case "cpu": - fields["allocatable_cpu_cores"] = atoi(val.GetString_()) + fields["allocatable_cpu_cores"] = ki.convertQuantity(val.String(), 1) + fields["allocatable_millicpu_cores"] = ki.convertQuantity(val.String(), 1000) case "memory": - fields["allocatable_memory_bytes"] = convertQuantity(val.GetString_(), 1) + fields["allocatable_memory_bytes"] = ki.convertQuantity(val.String(), 1) case "pods": - fields["allocatable_pods"] = atoi(val.GetString_()) + fields["allocatable_pods"] = atoi(val.String()) } } acc.AddFields(nodeMeasurement, fields, tags) - - return nil } diff --git a/plugins/inputs/kube_inventory/node_test.go b/plugins/inputs/kube_inventory/node_test.go index 7573dd2c06f6d..02f330a7d1a2f 100644 --- a/plugins/inputs/kube_inventory/node_test.go +++ b/plugins/inputs/kube_inventory/node_test.go @@ -4,11 +4,13 @@ import ( "testing" "time" - "github.com/ericchiang/k8s/apis/core/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" - "github.com/ericchiang/k8s/apis/resource" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestNode(t *testing.T) { @@ -19,14 +21,14 @@ func TestNode(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { name: "no nodes", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/nodes/": &v1.NodeList{}, + "/nodes/": corev1.NodeList{}, }, }, hasError: false, @@ -35,86 +37,87 @@ func TestNode(t *testing.T) { name: "collect nodes", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/nodes/": &v1.NodeList{ - Items: []*v1.Node{ + "/nodes/": corev1.NodeList{ + Items: []corev1.Node{ { - Status: &v1.NodeStatus{ - NodeInfo: &v1.NodeSystemInfo{ - KernelVersion: toStrPtr("4.14.48-coreos-r2"), - OsImage: toStrPtr("Container Linux by CoreOS 1745.7.0 (Rhyolite)"), - ContainerRuntimeVersion: toStrPtr("docker://18.3.1"), - KubeletVersion: toStrPtr("v1.10.3"), - KubeProxyVersion: toStrPtr("v1.10.3"), + Status: corev1.NodeStatus{ + NodeInfo: corev1.NodeSystemInfo{ + KernelVersion: "4.14.48-coreos-r2", + OSImage: "Container Linux by CoreOS 1745.7.0 (Rhyolite)", + ContainerRuntimeVersion: "docker://18.3.1", + KubeletVersion: "v1.10.3", + KubeProxyVersion: "v1.10.3", }, - Phase: toStrPtr("Running"), - Capacity: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("16")}, - "ephemeral_storage_bytes": {String_: toStrPtr("49536401408")}, - "hugepages_1Gi_bytes": {String_: toStrPtr("0")}, - "hugepages_2Mi_bytes": {String_: toStrPtr("0")}, - "memory": {String_: toStrPtr("125817904Ki")}, - "pods": {String_: toStrPtr("110")}, + Phase: "Running", + Capacity: corev1.ResourceList{ + "cpu": resource.MustParse("16"), + "ephemeral_storage_bytes": resource.MustParse("49536401408"), + "hugepages_1Gi_bytes": resource.MustParse("0"), + "hugepages_2Mi_bytes": resource.MustParse("0"), + "memory": resource.MustParse("125817904Ki"), + "pods": resource.MustParse("110"), }, - Allocatable: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("16")}, - "ephemeral_storage_bytes": {String_: toStrPtr("44582761194")}, - "hugepages_1Gi_bytes": {String_: toStrPtr("0")}, - "hugepages_2Mi_bytes": {String_: toStrPtr("0")}, - "memory": {String_: toStrPtr("125715504Ki")}, - "pods": {String_: toStrPtr("110")}, + Allocatable: corev1.ResourceList{ + "cpu": resource.MustParse("1000m"), + "ephemeral_storage_bytes": resource.MustParse("44582761194"), + "hugepages_1Gi_bytes": resource.MustParse("0"), + "hugepages_2Mi_bytes": resource.MustParse("0"), + "memory": resource.MustParse("125715504Ki"), + "pods": resource.MustParse("110"), }, - Conditions: []*v1.NodeCondition{ - {Type: toStrPtr("Ready"), Status: toStrPtr("true"), LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}}, - {Type: toStrPtr("OutOfDisk"), Status: toStrPtr("false"), LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(created.Unix())}}, + Conditions: []corev1.NodeCondition{ + {Type: "Ready", Status: "true", LastTransitionTime: metav1.Time{Time: now}}, + {Type: "OutOfDisk", Status: "false", LastTransitionTime: metav1.Time{Time: created}}, }, }, - Spec: &v1.NodeSpec{ - ProviderID: toStrPtr("aws:///us-east-1c/i-0c00"), - Taints: []*v1.Taint{ + Spec: corev1.NodeSpec{ + ProviderID: "aws:///us-east-1c/i-0c00", + Taints: []corev1.Taint{ { - Key: toStrPtr("k1"), - Value: toStrPtr("v1"), - Effect: toStrPtr("NoExecute"), + Key: "k1", + Value: "v1", + Effect: "NoExecute", }, { - Key: toStrPtr("k2"), - Value: toStrPtr("v2"), - Effect: toStrPtr("NoSchedule"), + Key: "k2", + Value: "v2", + Effect: "NoSchedule", }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(int64(11232)), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("node1"), + ObjectMeta: metav1.ObjectMeta{ + Generation: 11232, + Namespace: "ns1", + Name: "node1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(created.Unix())}, + CreationTimestamp: metav1.Time{Time: now}, }, }, }, }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Measurement: nodeMeasurement, - Fields: map[string]interface{}{ - "capacity_cpu_cores": int64(16), - "capacity_memory_bytes": int64(1.28837533696e+11), - "capacity_pods": int64(110), - "allocatable_cpu_cores": int64(16), - "allocatable_memory_bytes": int64(1.28732676096e+11), - "allocatable_pods": int64(110), - }, - Tags: map[string]string{ - "node_name": "node1", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + nodeMeasurement, + map[string]string{ + "node_name": "node1", }, - }, + map[string]interface{}{ + "capacity_cpu_cores": int64(16), + "capacity_millicpu_cores": int64(16000), + "capacity_memory_bytes": int64(1.28837533696e+11), + "capacity_pods": int64(110), + "allocatable_cpu_cores": int64(1), + "allocatable_millicpu_cores": int64(1000), + "allocatable_memory_bytes": int64(1.28732676096e+11), + "allocatable_pods": int64(110), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -125,48 +128,20 @@ func TestNode(t *testing.T) { client: cli, } acc := new(testutil.Accumulator) - for _, node := range ((v.handler.responseMap["/nodes/"]).(*v1.NodeList)).Items { - err := ks.gatherNode(*node, acc) - if err != nil { - t.Errorf("Failed to gather node - %s", err.Error()) - } + for _, node := range ((v.handler.responseMap["/nodes/"]).(corev1.NodeList)).Items { + ks.gatherNode(node, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - measurement := v.output.Metrics[i].Measurement - var keyTag string - switch measurement { - case nodeMeasurement: - keyTag = "node" - } - var j int - for j = range acc.Metrics { - if acc.Metrics[j].Measurement == measurement && - acc.Metrics[j].Tags[keyTag] == v.output.Metrics[i].Tags[keyTag] { - break - } - } - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[j].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s, measurement %s, j %d\n", v.name, k, m, acc.Metrics[j].Tags[k], measurement, j) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[j].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T), measurement %s, j %d\n", v.name, k, m, m, acc.Metrics[j].Fields[k], acc.Metrics[i].Fields[k], measurement, j) - } - } - } - } + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } diff --git a/plugins/inputs/kube_inventory/persistentvolume.go b/plugins/inputs/kube_inventory/persistentvolume.go index 05600522b7ea8..4199dfed9e4c3 100644 --- a/plugins/inputs/kube_inventory/persistentvolume.go +++ b/plugins/inputs/kube_inventory/persistentvolume.go @@ -4,7 +4,7 @@ import ( "context" "strings" - "github.com/ericchiang/k8s/apis/core/v1" + corev1 "k8s.io/api/core/v1" "github.com/influxdata/telegraf" ) @@ -16,16 +16,13 @@ func collectPersistentVolumes(ctx context.Context, acc telegraf.Accumulator, ki return } for _, pv := range list.Items { - if err = ki.gatherPersistentVolume(*pv, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherPersistentVolume(pv, acc) } } -func (ki *KubernetesInventory) gatherPersistentVolume(pv v1.PersistentVolume, acc telegraf.Accumulator) error { +func (ki *KubernetesInventory) gatherPersistentVolume(pv corev1.PersistentVolume, acc telegraf.Accumulator) { phaseType := 5 - switch strings.ToLower(pv.Status.GetPhase()) { + switch strings.ToLower(string(pv.Status.Phase)) { case "bound": phaseType = 0 case "failed": @@ -41,12 +38,10 @@ func (ki *KubernetesInventory) gatherPersistentVolume(pv v1.PersistentVolume, ac "phase_type": phaseType, } tags := map[string]string{ - "pv_name": pv.Metadata.GetName(), - "phase": pv.Status.GetPhase(), - "storageclass": pv.Spec.GetStorageClassName(), + "pv_name": pv.Name, + "phase": string(pv.Status.Phase), + "storageclass": pv.Spec.StorageClassName, } acc.AddFields(persistentVolumeMeasurement, fields, tags) - - return nil } diff --git a/plugins/inputs/kube_inventory/persistentvolume_test.go b/plugins/inputs/kube_inventory/persistentvolume_test.go index a5d20d047331a..2f62081afb7f6 100644 --- a/plugins/inputs/kube_inventory/persistentvolume_test.go +++ b/plugins/inputs/kube_inventory/persistentvolume_test.go @@ -4,10 +4,12 @@ import ( "testing" "time" - "github.com/ericchiang/k8s/apis/core/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestPersistentVolume(t *testing.T) { @@ -18,14 +20,14 @@ func TestPersistentVolume(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { name: "no pv", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/persistentvolumes/": &v1.PersistentVolumeList{}, + "/persistentvolumes/": &corev1.PersistentVolumeList{}, }, }, hasError: false, @@ -34,41 +36,41 @@ func TestPersistentVolume(t *testing.T) { name: "collect pvs", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/persistentvolumes/": &v1.PersistentVolumeList{ - Items: []*v1.PersistentVolume{ + "/persistentvolumes/": &corev1.PersistentVolumeList{ + Items: []corev1.PersistentVolume{ { - Status: &v1.PersistentVolumeStatus{ - Phase: toStrPtr("pending"), + Status: corev1.PersistentVolumeStatus{ + Phase: "pending", }, - Spec: &v1.PersistentVolumeSpec{ - StorageClassName: toStrPtr("ebs-1"), + Spec: corev1.PersistentVolumeSpec{ + StorageClassName: "ebs-1", }, - Metadata: &metav1.ObjectMeta{ - Name: toStrPtr("pv1"), + ObjectMeta: metav1.ObjectMeta{ + Name: "pv1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + CreationTimestamp: metav1.Time{Time: now}, }, }, }, }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "phase_type": 2, - }, - Tags: map[string]string{ - "pv_name": "pv1", - "storageclass": "ebs-1", - "phase": "pending", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_persistentvolume", + map[string]string{ + "pv_name": "pv1", + "storageclass": "ebs-1", + "phase": "pending", }, - }, + map[string]interface{}{ + "phase_type": 2, + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -79,34 +81,20 @@ func TestPersistentVolume(t *testing.T) { client: cli, } acc := new(testutil.Accumulator) - for _, pv := range ((v.handler.responseMap["/persistentvolumes/"]).(*v1.PersistentVolumeList)).Items { - err := ks.gatherPersistentVolume(*pv, acc) - if err != nil { - t.Errorf("Failed to gather pv - %s", err.Error()) - } + for _, pv := range ((v.handler.responseMap["/persistentvolumes/"]).(*corev1.PersistentVolumeList)).Items { + ks.gatherPersistentVolume(pv, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } diff --git a/plugins/inputs/kube_inventory/persistentvolumeclaim.go b/plugins/inputs/kube_inventory/persistentvolumeclaim.go index ac8c9f85a931c..2b06cce6b9fbb 100644 --- a/plugins/inputs/kube_inventory/persistentvolumeclaim.go +++ b/plugins/inputs/kube_inventory/persistentvolumeclaim.go @@ -4,7 +4,7 @@ import ( "context" "strings" - "github.com/ericchiang/k8s/apis/core/v1" + corev1 "k8s.io/api/core/v1" "github.com/influxdata/telegraf" ) @@ -16,16 +16,13 @@ func collectPersistentVolumeClaims(ctx context.Context, acc telegraf.Accumulator return } for _, pvc := range list.Items { - if err = ki.gatherPersistentVolumeClaim(*pvc, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherPersistentVolumeClaim(pvc, acc) } } -func (ki *KubernetesInventory) gatherPersistentVolumeClaim(pvc v1.PersistentVolumeClaim, acc telegraf.Accumulator) error { +func (ki *KubernetesInventory) gatherPersistentVolumeClaim(pvc corev1.PersistentVolumeClaim, acc telegraf.Accumulator) { phaseType := 3 - switch strings.ToLower(pvc.Status.GetPhase()) { + switch strings.ToLower(string(pvc.Status.Phase)) { case "bound": phaseType = 0 case "lost": @@ -37,18 +34,20 @@ func (ki *KubernetesInventory) gatherPersistentVolumeClaim(pvc v1.PersistentVolu "phase_type": phaseType, } tags := map[string]string{ - "pvc_name": pvc.Metadata.GetName(), - "namespace": pvc.Metadata.GetNamespace(), - "phase": pvc.Status.GetPhase(), - "storageclass": pvc.Spec.GetStorageClassName(), + "pvc_name": pvc.Name, + "namespace": pvc.Namespace, + "phase": string(pvc.Status.Phase), + } + if pvc.Spec.StorageClassName != nil { + tags["storageclass"] = *pvc.Spec.StorageClassName } - for key, val := range pvc.GetSpec().GetSelector().GetMatchLabels() { - if ki.selectorFilter.Match(key) { - tags["selector_"+key] = val + if pvc.Spec.Selector != nil { + for key, val := range pvc.Spec.Selector.MatchLabels { + if ki.selectorFilter.Match(key) { + tags["selector_"+key] = val + } } } acc.AddFields(persistentVolumeClaimMeasurement, fields, tags) - - return nil } diff --git a/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go b/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go index 5155a5d3ba698..00da84f9f757a 100644 --- a/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go +++ b/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go @@ -1,15 +1,16 @@ package kube_inventory import ( - "reflect" "strings" "testing" "time" - "github.com/ericchiang/k8s/apis/core/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestPersistentVolumeClaim(t *testing.T) { @@ -22,14 +23,14 @@ func TestPersistentVolumeClaim(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { name: "no pv claims", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/persistentvolumeclaims/": &v1.PersistentVolumeClaimList{}, + "/persistentvolumeclaims/": &corev1.PersistentVolumeClaimList{}, }, }, hasError: false, @@ -38,14 +39,14 @@ func TestPersistentVolumeClaim(t *testing.T) { name: "collect pv claims", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/persistentvolumeclaims/": &v1.PersistentVolumeClaimList{ - Items: []*v1.PersistentVolumeClaim{ + "/persistentvolumeclaims/": &corev1.PersistentVolumeClaimList{ + Items: []corev1.PersistentVolumeClaim{ { - Status: &v1.PersistentVolumeClaimStatus{ - Phase: toStrPtr("bound"), + Status: corev1.PersistentVolumeClaimStatus{ + Phase: "bound", }, - Spec: &v1.PersistentVolumeClaimSpec{ - VolumeName: toStrPtr("pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8"), + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: "pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8", StorageClassName: toStrPtr("ebs-1"), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -54,37 +55,135 @@ func TestPersistentVolumeClaim(t *testing.T) { }, }, }, - Metadata: &metav1.ObjectMeta{ - Namespace: toStrPtr("ns1"), - Name: toStrPtr("pc1"), + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "pc1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + CreationTimestamp: metav1.Time{Time: now}, }, }, }, }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "phase_type": 0, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_persistentvolumeclaim", + map[string]string{ + "pvc_name": "pc1", + "namespace": "ns1", + "storageclass": "ebs-1", + "phase": "bound", + "selector_select1": "s1", + "selector_select2": "s2", + }, + map[string]interface{}{ + "phase_type": 0, + }, + time.Unix(0, 0), + ), + }, + hasError: false, + }, + { + name: "no label selectors", + hasError: false, + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/persistentvolumeclaims/": &corev1.PersistentVolumeClaimList{ + Items: []corev1.PersistentVolumeClaim{ + { + Status: corev1.PersistentVolumeClaimStatus{ + Phase: "bound", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: "pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8", + StorageClassName: toStrPtr("ebs-1"), + Selector: nil, + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "pc1", + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: metav1.Time{Time: now}, + }, + }, }, - Tags: map[string]string{ - "pvc_name": "pc1", - "namespace": "ns1", - "storageclass": "ebs-1", - "phase": "bound", - "selector_select1": "s1", - "selector_select2": "s2", + }, + }, + }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_persistentvolumeclaim", + map[string]string{ + "pvc_name": "pc1", + "namespace": "ns1", + "storageclass": "ebs-1", + "phase": "bound", + }, + map[string]interface{}{ + "phase_type": 0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "no storage class name", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/persistentvolumeclaims/": &corev1.PersistentVolumeClaimList{ + Items: []corev1.PersistentVolumeClaim{ + { + Status: corev1.PersistentVolumeClaimStatus{ + Phase: "bound", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: "pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8", + StorageClassName: nil, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "select1": "s1", + "select2": "s2", + }, + }, + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "pc1", + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: metav1.Time{Time: now}, + }, + }, }, }, }, }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_persistentvolumeclaim", + map[string]string{ + "pvc_name": "pc1", + "namespace": "ns1", + "phase": "bound", + "selector_select1": "s1", + "selector_select2": "s2", + }, + map[string]interface{}{ + "phase_type": 0, + }, + time.Unix(0, 0), + ), + }, hasError: false, }, } @@ -95,37 +194,23 @@ func TestPersistentVolumeClaim(t *testing.T) { SelectorInclude: selectInclude, SelectorExclude: selectExclude, } - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) - for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*v1.PersistentVolumeClaimList)).Items { - err := ks.gatherPersistentVolumeClaim(*pvc, acc) - if err != nil { - t.Errorf("Failed to gather pvc - %s", err.Error()) - } + for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*corev1.PersistentVolumeClaimList)).Items { + ks.gatherPersistentVolumeClaim(pvc, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } @@ -135,14 +220,14 @@ func TestPersistentVolumeClaimSelectorFilter(t *testing.T) { now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) responseMap := map[string]interface{}{ - "/persistentvolumeclaims/": &v1.PersistentVolumeClaimList{ - Items: []*v1.PersistentVolumeClaim{ + "/persistentvolumeclaims/": &corev1.PersistentVolumeClaimList{ + Items: []corev1.PersistentVolumeClaim{ { - Status: &v1.PersistentVolumeClaimStatus{ - Phase: toStrPtr("bound"), + Status: corev1.PersistentVolumeClaimStatus{ + Phase: "bound", }, - Spec: &v1.PersistentVolumeClaimSpec{ - VolumeName: toStrPtr("pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8"), + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: "pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8", StorageClassName: toStrPtr("ebs-1"), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -151,14 +236,14 @@ func TestPersistentVolumeClaimSelectorFilter(t *testing.T) { }, }, }, - Metadata: &metav1.ObjectMeta{ - Namespace: toStrPtr("ns1"), - Name: toStrPtr("pc1"), + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "pc1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -266,13 +351,10 @@ func TestPersistentVolumeClaimSelectorFilter(t *testing.T) { } ks.SelectorInclude = v.include ks.SelectorExclude = v.exclude - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) - for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*v1.PersistentVolumeClaimList)).Items { - err := ks.gatherPersistentVolumeClaim(*pvc, acc) - if err != nil { - t.Errorf("Failed to gather pvc - %s", err.Error()) - } + for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*corev1.PersistentVolumeClaimList)).Items { + ks.gatherPersistentVolumeClaim(pvc, acc) } // Grab selector tags @@ -285,8 +367,7 @@ func TestPersistentVolumeClaimSelectorFilter(t *testing.T) { } } - if !reflect.DeepEqual(v.expected, actual) { - t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) - } + require.Equalf(t, v.expected, actual, + "actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) } } diff --git a/plugins/inputs/kube_inventory/pod.go b/plugins/inputs/kube_inventory/pod.go index 2f17f690d08c5..ed95dd63d970d 100644 --- a/plugins/inputs/kube_inventory/pod.go +++ b/plugins/inputs/kube_inventory/pod.go @@ -3,7 +3,7 @@ package kube_inventory import ( "context" - v1 "github.com/ericchiang/k8s/apis/core/v1" + corev1 "k8s.io/api/core/v1" "github.com/influxdata/telegraf" ) @@ -15,30 +15,35 @@ func collectPods(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesIn return } for _, p := range list.Items { - if err = ki.gatherPod(*p, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherPod(p, acc) } } -func (ki *KubernetesInventory) gatherPod(p v1.Pod, acc telegraf.Accumulator) error { - if p.Metadata.CreationTimestamp.GetSeconds() == 0 && p.Metadata.CreationTimestamp.GetNanos() == 0 { - return nil +func (ki *KubernetesInventory) gatherPod(p corev1.Pod, acc telegraf.Accumulator) { + creationTs := p.GetCreationTimestamp() + if creationTs.IsZero() { + return } - for i, cs := range p.Status.ContainerStatuses { - c := p.Spec.Containers[i] - gatherPodContainer(*p.Spec.NodeName, ki, p, *cs, *c, acc) + containerList := map[string]*corev1.ContainerStatus{} + for i := range p.Status.ContainerStatuses { + containerList[p.Status.ContainerStatuses[i].Name] = &p.Status.ContainerStatuses[i] } - return nil + for _, c := range p.Spec.Containers { + cs, ok := containerList[c.Name] + if !ok { + cs = &corev1.ContainerStatus{} + } + ki.gatherPodContainer(p, *cs, c, acc) + } } -func gatherPodContainer(nodeName string, ki *KubernetesInventory, p v1.Pod, cs v1.ContainerStatus, c v1.Container, acc telegraf.Accumulator) { +func (ki *KubernetesInventory) gatherPodContainer(p corev1.Pod, cs corev1.ContainerStatus, c corev1.Container, acc telegraf.Accumulator) { stateCode := 3 stateReason := "" state := "unknown" + readiness := "unready" switch { case cs.State.Running != nil: @@ -47,37 +52,46 @@ func gatherPodContainer(nodeName string, ki *KubernetesInventory, p v1.Pod, cs v case cs.State.Terminated != nil: stateCode = 1 state = "terminated" - stateReason = cs.State.Terminated.GetReason() + stateReason = cs.State.Terminated.Reason case cs.State.Waiting != nil: stateCode = 2 state = "waiting" - stateReason = cs.State.Waiting.GetReason() + stateReason = cs.State.Waiting.Reason } - readiness := "unready" - if cs.GetReady() { + if cs.Ready { readiness = "ready" } fields := map[string]interface{}{ - "restarts_total": cs.GetRestartCount(), - "state_code": stateCode, - "terminated_reason": cs.State.Terminated.GetReason(), + "restarts_total": cs.RestartCount, + "state_code": stateCode, + } + + // deprecated in 1.15: use `state_reason` instead + if state == "terminated" { + fields["terminated_reason"] = stateReason } if stateReason != "" { fields["state_reason"] = stateReason } + phaseReason := p.Status.Reason + if phaseReason != "" { + fields["phase_reason"] = phaseReason + } + tags := map[string]string{ - "container_name": *c.Name, - "namespace": *p.Metadata.Namespace, - "node_name": *p.Spec.NodeName, - "pod_name": *p.Metadata.Name, + "container_name": c.Name, + "namespace": p.Namespace, + "node_name": p.Spec.NodeName, + "pod_name": p.Name, + "phase": string(p.Status.Phase), "state": state, "readiness": readiness, } - for key, val := range p.GetSpec().GetNodeSelector() { + for key, val := range p.Spec.NodeSelector { if ki.selectorFilter.Match(key) { tags["node_selector_"+key] = val } @@ -89,17 +103,17 @@ func gatherPodContainer(nodeName string, ki *KubernetesInventory, p v1.Pod, cs v for resourceName, val := range req { switch resourceName { case "cpu": - fields["resource_requests_millicpu_units"] = convertQuantity(val.GetString_(), 1000) + fields["resource_requests_millicpu_units"] = ki.convertQuantity(val.String(), 1000) case "memory": - fields["resource_requests_memory_bytes"] = convertQuantity(val.GetString_(), 1) + fields["resource_requests_memory_bytes"] = ki.convertQuantity(val.String(), 1) } } for resourceName, val := range lim { switch resourceName { case "cpu": - fields["resource_limits_millicpu_units"] = convertQuantity(val.GetString_(), 1000) + fields["resource_limits_millicpu_units"] = ki.convertQuantity(val.String(), 1000) case "memory": - fields["resource_limits_memory_bytes"] = convertQuantity(val.GetString_(), 1) + fields["resource_limits_memory_bytes"] = ki.convertQuantity(val.String(), 1) } } diff --git a/plugins/inputs/kube_inventory/pod_test.go b/plugins/inputs/kube_inventory/pod_test.go index d9b3221655027..962805a67e3a3 100644 --- a/plugins/inputs/kube_inventory/pod_test.go +++ b/plugins/inputs/kube_inventory/pod_test.go @@ -1,15 +1,17 @@ package kube_inventory import ( - "reflect" "strings" "testing" "time" - v1 "github.com/ericchiang/k8s/apis/core/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" - "github.com/ericchiang/k8s/apis/resource" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestPod(t *testing.T) { @@ -18,21 +20,21 @@ func TestPod(t *testing.T) { selectExclude := []string{} now := time.Now() started := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-1, 1, 36, 0, now.Location()) - created := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-2, 1, 36, 0, now.Location()) + created := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-2, 1, 0, 0, now.Location()) cond1 := time.Date(now.Year(), 7, 5, 7, 53, 29, 0, now.Location()) cond2 := time.Date(now.Year(), 7, 5, 7, 53, 31, 0, now.Location()) tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { name: "no pods", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/pods/": &v1.PodList{}, + "/pods/": &corev1.PodList{}, }, }, hasError: false, @@ -41,79 +43,79 @@ func TestPod(t *testing.T) { name: "collect pods", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/pods/": &v1.PodList{ - Items: []*v1.Pod{ + "/pods/": &corev1.PodList{ + Items: []corev1.Pod{ { - Spec: &v1.PodSpec{ - NodeName: toStrPtr("node1"), - Containers: []*v1.Container{ + Spec: corev1.PodSpec{ + NodeName: "node1", + Containers: []corev1.Container{ { - Name: toStrPtr("running"), - Image: toStrPtr("image1"), - Ports: []*v1.ContainerPort{ + Name: "running", + Image: "image1", + Ports: []corev1.ContainerPort{ { - ContainerPort: toInt32Ptr(8080), - Protocol: toStrPtr("TCP"), + ContainerPort: 8080, + Protocol: "TCP", }, }, - Resources: &v1.ResourceRequirements{ - Limits: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), }, - Requests: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), }, }, }, { - Name: toStrPtr("completed"), - Image: toStrPtr("image1"), - Ports: []*v1.ContainerPort{ + Name: "completed", + Image: "image1", + Ports: []corev1.ContainerPort{ { - ContainerPort: toInt32Ptr(8080), - Protocol: toStrPtr("TCP"), + ContainerPort: 8080, + Protocol: "TCP", }, }, - Resources: &v1.ResourceRequirements{ - Limits: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), }, - Requests: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), }, }, }, { - Name: toStrPtr("waiting"), - Image: toStrPtr("image1"), - Ports: []*v1.ContainerPort{ + Name: "waiting", + Image: "image1", + Ports: []corev1.ContainerPort{ { - ContainerPort: toInt32Ptr(8080), - Protocol: toStrPtr("TCP"), + ContainerPort: 8080, + Protocol: "TCP", }, }, - Resources: &v1.ResourceRequirements{ - Limits: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), }, - Requests: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), }, }, }, }, - Volumes: []*v1.Volume{ + Volumes: []corev1.Volume{ { - Name: toStrPtr("vol1"), - VolumeSource: &v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: toStrPtr("pc1"), - ReadOnly: toBoolPtr(true), + Name: "vol1", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pc1", + ReadOnly: true, }, }, }, { - Name: toStrPtr("vol2"), + Name: "vol2", }, }, NodeSelector: map[string]string{ @@ -121,153 +123,162 @@ func TestPod(t *testing.T) { "select2": "s2", }, }, - Status: &v1.PodStatus{ - Phase: toStrPtr("Running"), - HostIP: toStrPtr("180.12.10.18"), - PodIP: toStrPtr("10.244.2.15"), - StartTime: &metav1.Time{Seconds: toInt64Ptr(started.Unix())}, - Conditions: []*v1.PodCondition{ + Status: corev1.PodStatus{ + Phase: "Running", + HostIP: "180.12.10.18", + PodIP: "10.244.2.15", + StartTime: &metav1.Time{Time: started}, + Conditions: []corev1.PodCondition{ { - Type: toStrPtr("Initialized"), - Status: toStrPtr("True"), - LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + Type: "Initialized", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond1}, }, { - Type: toStrPtr("Ready"), - Status: toStrPtr("True"), - LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, + Type: "Ready", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond2}, }, { - Type: toStrPtr("Scheduled"), - Status: toStrPtr("True"), - LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + Type: "Scheduled", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond1}, }, }, - ContainerStatuses: []*v1.ContainerStatus{ + ContainerStatuses: []corev1.ContainerStatus{ { - Name: toStrPtr("running"), - State: &v1.ContainerState{ - Running: &v1.ContainerStateRunning{ - StartedAt: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, + Name: "running", + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{ + StartedAt: metav1.Time{Time: started}, }, }, - Ready: toBoolPtr(true), - RestartCount: toInt32Ptr(3), - Image: toStrPtr("image1"), - ImageID: toStrPtr("image_id1"), - ContainerID: toStrPtr("docker://54abe32d0094479d3d"), + Ready: true, + RestartCount: 3, + Image: "image1", + ImageID: "image_id1", + ContainerID: "docker://54abe32d0094479d3d", }, { - Name: toStrPtr("completed"), - State: &v1.ContainerState{ - Terminated: &v1.ContainerStateTerminated{ - StartedAt: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, - ExitCode: toInt32Ptr(0), - Reason: toStrPtr("Completed"), + Name: "completed", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + StartedAt: metav1.Time{Time: now}, + ExitCode: 0, + Reason: "Completed", }, }, - Ready: toBoolPtr(false), - RestartCount: toInt32Ptr(3), - Image: toStrPtr("image1"), - ImageID: toStrPtr("image_id1"), - ContainerID: toStrPtr("docker://54abe32d0094479d3d"), + Ready: false, + RestartCount: 3, + Image: "image1", + ImageID: "image_id1", + ContainerID: "docker://54abe32d0094479d3d", }, { - Name: toStrPtr("waiting"), - State: &v1.ContainerState{ - Waiting: &v1.ContainerStateWaiting{ - Reason: toStrPtr("PodUninitialized"), + Name: "waiting", + State: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{ + Reason: "PodUninitialized", }, }, - Ready: toBoolPtr(false), - RestartCount: toInt32Ptr(3), - Image: toStrPtr("image1"), - ImageID: toStrPtr("image_id1"), - ContainerID: toStrPtr("docker://54abe32d0094479d3d"), + Ready: false, + RestartCount: 3, + Image: "image1", + ImageID: "image_id1", + ContainerID: "docker://54abe32d0094479d3d", }, }, }, - Metadata: &metav1.ObjectMeta{ - OwnerReferences: []*metav1.OwnerReference{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ { - ApiVersion: toStrPtr("apps/v1"), - Kind: toStrPtr("DaemonSet"), - Name: toStrPtr("forwarder"), + APIVersion: "apps/v1", + Kind: "DaemonSet", + Name: "forwarder", Controller: toBoolPtr(true), }, }, - Generation: toInt64Ptr(11232), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("pod1"), + Generation: 11232, + Namespace: "ns1", + Name: "pod1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(created.Unix())}, + CreationTimestamp: metav1.Time{Time: created}, }, }, }, }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Measurement: podContainerMeasurement, - Fields: map[string]interface{}{ - "restarts_total": int32(3), - "state_code": 0, - "resource_requests_millicpu_units": int64(100), - "resource_limits_millicpu_units": int64(100), - }, - Tags: map[string]string{ - "namespace": "ns1", - "container_name": "running", - "node_name": "node1", - "pod_name": "pod1", - "state": "running", - "readiness": "ready", - "node_selector_select1": "s1", - "node_selector_select2": "s2", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + podContainerMeasurement, + map[string]string{ + "namespace": "ns1", + "container_name": "running", + "node_name": "node1", + "pod_name": "pod1", + "phase": "Running", + "state": "running", + "readiness": "ready", + "node_selector_select1": "s1", + "node_selector_select2": "s2", }, - { - Measurement: podContainerMeasurement, - Fields: map[string]interface{}{ - "restarts_total": int32(3), - "state_code": 1, - "state_reason": "Completed", - "resource_requests_millicpu_units": int64(100), - "resource_limits_millicpu_units": int64(100), - }, - Tags: map[string]string{ - "namespace": "ns1", - "container_name": "completed", - "node_name": "node1", - "pod_name": "pod1", - "state": "terminated", - "readiness": "unready", - }, + map[string]interface{}{ + "restarts_total": int32(3), + "state_code": 0, + "resource_requests_millicpu_units": int64(100), + "resource_limits_millicpu_units": int64(100), }, - { - Measurement: podContainerMeasurement, - Fields: map[string]interface{}{ - "restarts_total": int32(3), - "state_code": 2, - "state_reason": "PodUninitialized", - "resource_requests_millicpu_units": int64(100), - "resource_limits_millicpu_units": int64(100), - }, - Tags: map[string]string{ - "namespace": "ns1", - "container_name": "waiting", - "node_name": "node1", - "pod_name": "pod1", - "state": "waiting", - "readiness": "unready", - }, + time.Unix(0, 0), + ), + testutil.MustMetric( + podContainerMeasurement, + map[string]string{ + "namespace": "ns1", + "container_name": "completed", + "node_name": "node1", + "pod_name": "pod1", + "phase": "Running", + "state": "terminated", + "readiness": "unready", + "node_selector_select1": "s1", + "node_selector_select2": "s2", }, - }, + map[string]interface{}{ + "restarts_total": int32(3), + "state_code": 1, + "state_reason": "Completed", + "resource_requests_millicpu_units": int64(100), + "resource_limits_millicpu_units": int64(100), + "terminated_reason": "Completed", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + podContainerMeasurement, + map[string]string{ + "namespace": "ns1", + "container_name": "waiting", + "node_name": "node1", + "pod_name": "pod1", + "phase": "Running", + "state": "waiting", + "readiness": "unready", + "node_selector_select1": "s1", + "node_selector_select2": "s2", + }, + map[string]interface{}{ + "restarts_total": int32(3), + "state_code": 2, + "state_reason": "PodUninitialized", + "resource_requests_millicpu_units": int64(100), + "resource_limits_millicpu_units": int64(100), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -278,37 +289,23 @@ func TestPod(t *testing.T) { SelectorInclude: selectInclude, SelectorExclude: selectExclude, } - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) - for _, pod := range ((v.handler.responseMap["/pods/"]).(*v1.PodList)).Items { - err := ks.gatherPod(*pod, acc) - if err != nil { - t.Errorf("Failed to gather pod - %s", err.Error()) - } + for _, pod := range ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items { + ks.gatherPod(pod, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s, i %d\n", v.name, k, m, acc.Metrics[i].Tags[k], i) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T), i %d\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k], i) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } @@ -321,43 +318,43 @@ func TestPodSelectorFilter(t *testing.T) { cond2 := time.Date(now.Year(), 7, 5, 7, 53, 31, 0, now.Location()) responseMap := map[string]interface{}{ - "/pods/": &v1.PodList{ - Items: []*v1.Pod{ + "/pods/": &corev1.PodList{ + Items: []corev1.Pod{ { - Spec: &v1.PodSpec{ - NodeName: toStrPtr("node1"), - Containers: []*v1.Container{ + Spec: corev1.PodSpec{ + NodeName: "node1", + Containers: []corev1.Container{ { - Name: toStrPtr("forwarder"), - Image: toStrPtr("image1"), - Ports: []*v1.ContainerPort{ + Name: "forwarder", + Image: "image1", + Ports: []corev1.ContainerPort{ { - ContainerPort: toInt32Ptr(8080), - Protocol: toStrPtr("TCP"), + ContainerPort: 8080, + Protocol: "TCP", }, }, - Resources: &v1.ResourceRequirements{ - Limits: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), }, - Requests: map[string]*resource.Quantity{ - "cpu": {String_: toStrPtr("100m")}, + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), }, }, }, }, - Volumes: []*v1.Volume{ + Volumes: []corev1.Volume{ { - Name: toStrPtr("vol1"), - VolumeSource: &v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: toStrPtr("pc1"), - ReadOnly: toBoolPtr(true), + Name: "vol1", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pc1", + ReadOnly: true, }, }, }, { - Name: toStrPtr("vol2"), + Name: "vol2", }, }, NodeSelector: map[string]string{ @@ -365,61 +362,61 @@ func TestPodSelectorFilter(t *testing.T) { "select2": "s2", }, }, - Status: &v1.PodStatus{ - Phase: toStrPtr("Running"), - HostIP: toStrPtr("180.12.10.18"), - PodIP: toStrPtr("10.244.2.15"), - StartTime: &metav1.Time{Seconds: toInt64Ptr(started.Unix())}, - Conditions: []*v1.PodCondition{ + Status: corev1.PodStatus{ + Phase: "Running", + HostIP: "180.12.10.18", + PodIP: "10.244.2.15", + StartTime: &metav1.Time{Time: started}, + Conditions: []corev1.PodCondition{ { - Type: toStrPtr("Initialized"), - Status: toStrPtr("True"), - LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + Type: "Initialized", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond1}, }, { - Type: toStrPtr("Ready"), - Status: toStrPtr("True"), - LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, + Type: "Ready", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond2}, }, { - Type: toStrPtr("Scheduled"), - Status: toStrPtr("True"), - LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + Type: "Scheduled", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond1}, }, }, - ContainerStatuses: []*v1.ContainerStatus{ + ContainerStatuses: []corev1.ContainerStatus{ { - Name: toStrPtr("forwarder"), - State: &v1.ContainerState{ - Running: &v1.ContainerStateRunning{ - StartedAt: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, + Name: "forwarder", + State: corev1.ContainerState{ + Running: &corev1.ContainerStateRunning{ + StartedAt: metav1.Time{Time: now}, }, }, - Ready: toBoolPtr(true), - RestartCount: toInt32Ptr(3), - Image: toStrPtr("image1"), - ImageID: toStrPtr("image_id1"), - ContainerID: toStrPtr("docker://54abe32d0094479d3d"), + Ready: true, + RestartCount: 3, + Image: "image1", + ImageID: "image_id1", + ContainerID: "docker://54abe32d0094479d3d", }, }, }, - Metadata: &metav1.ObjectMeta{ - OwnerReferences: []*metav1.OwnerReference{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ { - ApiVersion: toStrPtr("apps/v1"), - Kind: toStrPtr("DaemonSet"), - Name: toStrPtr("forwarder"), + APIVersion: "apps/v1", + Kind: "DaemonSet", + Name: "forwarder", Controller: toBoolPtr(true), }, }, - Generation: toInt64Ptr(11232), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("pod1"), + Generation: 11232, + Namespace: "ns1", + Name: "pod1", Labels: map[string]string{ "lab1": "v1", "lab2": "v2", }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(created.Unix())}, + CreationTimestamp: metav1.Time{Time: created}, }, }, }, @@ -527,13 +524,10 @@ func TestPodSelectorFilter(t *testing.T) { } ks.SelectorInclude = v.include ks.SelectorExclude = v.exclude - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) - for _, pod := range ((v.handler.responseMap["/pods/"]).(*v1.PodList)).Items { - err := ks.gatherPod(*pod, acc) - if err != nil { - t.Errorf("Failed to gather pod - %s", err.Error()) - } + for _, pod := range ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items { + ks.gatherPod(pod, acc) } // Grab selector tags @@ -546,8 +540,212 @@ func TestPodSelectorFilter(t *testing.T) { } } - if !reflect.DeepEqual(v.expected, actual) { - t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) + require.Equalf(t, v.expected, actual, + "actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) + } +} + +func TestPodPendingContainers(t *testing.T) { + cli := &client{} + selectInclude := []string{} + selectExclude := []string{} + now := time.Now() + started := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-1, 1, 36, 0, now.Location()) + created := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-2, 1, 36, 0, now.Location()) + cond1 := time.Date(now.Year(), 7, 5, 7, 53, 29, 0, now.Location()) + cond2 := time.Date(now.Year(), 7, 5, 7, 53, 31, 0, now.Location()) + + tests := []struct { + name string + handler *mockHandler + output []telegraf.Metric + hasError bool + }{ + { + name: "collect pods", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/pods/": &corev1.PodList{ + Items: []corev1.Pod{ + { + Spec: corev1.PodSpec{ + NodeName: "node1", + Containers: []corev1.Container{ + { + Name: "waiting", + Image: "image1", + Ports: []corev1.ContainerPort{ + { + ContainerPort: 8080, + Protocol: "TCP", + }, + }, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), + }, + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), + }, + }, + }, + { + Name: "terminated", + Image: "image1", + Ports: []corev1.ContainerPort{ + { + ContainerPort: 8080, + Protocol: "TCP", + }, + }, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), + }, + Requests: corev1.ResourceList{ + "cpu": resource.MustParse("100m"), + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "vol1", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pc1", + ReadOnly: true, + }, + }, + }, + { + Name: "vol2", + }, + }, + NodeSelector: map[string]string{ + "select1": "s1", + "select2": "s2", + }, + }, + Status: corev1.PodStatus{ + Phase: "Pending", + Reason: "NetworkNotReady", + HostIP: "180.12.10.18", + PodIP: "10.244.2.15", + StartTime: &metav1.Time{Time: started}, + Conditions: []corev1.PodCondition{ + { + Type: "Initialized", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond1}, + }, + { + Type: "Ready", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond2}, + }, + { + Type: "Scheduled", + Status: "True", + LastTransitionTime: metav1.Time{Time: cond1}, + }, + }, + ContainerStatuses: []corev1.ContainerStatus{}, + }, + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "DaemonSet", + Name: "forwarder", + Controller: toBoolPtr(true), + }, + }, + Generation: 11232, + Namespace: "ns1", + Name: "pod1", + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: metav1.Time{Time: created}, + }, + }, + }, + }, + }, + }, + output: []telegraf.Metric{ + testutil.MustMetric( + podContainerMeasurement, + map[string]string{ + "namespace": "ns1", + "container_name": "waiting", + "node_name": "node1", + "pod_name": "pod1", + "phase": "Pending", + "state": "unknown", + "readiness": "unready", + "node_selector_select1": "s1", + "node_selector_select2": "s2", + }, + map[string]interface{}{ + "phase_reason": "NetworkNotReady", + "restarts_total": int32(0), + "state_code": 3, + "resource_requests_millicpu_units": int64(100), + "resource_limits_millicpu_units": int64(100), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + podContainerMeasurement, + map[string]string{ + "namespace": "ns1", + "container_name": "terminated", + "node_name": "node1", + "pod_name": "pod1", + "phase": "Pending", + "state": "unknown", + "readiness": "unready", + "node_selector_select1": "s1", + "node_selector_select2": "s2", + }, + map[string]interface{}{ + "phase_reason": "NetworkNotReady", + "restarts_total": int32(0), + "state_code": 3, + "resource_requests_millicpu_units": int64(100), + "resource_limits_millicpu_units": int64(100), + }, + time.Unix(0, 0), + ), + }, + hasError: false, + }, + } + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + SelectorInclude: selectInclude, + SelectorExclude: selectExclude, } + require.NoError(t, ks.createSelectorFilters()) + acc := new(testutil.Accumulator) + for _, pod := range ((v.handler.responseMap["/pods/"]).(*corev1.PodList)).Items { + ks.gatherPod(pod, acc) + } + + err := acc.FirstError() + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue + } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } diff --git a/plugins/inputs/kube_inventory/sample.conf b/plugins/inputs/kube_inventory/sample.conf new file mode 100644 index 0000000000000..d9c508a08bad1 --- /dev/null +++ b/plugins/inputs/kube_inventory/sample.conf @@ -0,0 +1,48 @@ +# Read metrics from the Kubernetes api +[[inputs.kube_inventory]] + ## URL for the Kubernetes API + url = "https://127.0.0.1" + + ## Namespace to use. Set to "" to use all namespaces. + # namespace = "default" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + ## If both of these are empty, we'll use the default serviceaccount: + ## at: /run/secrets/kubernetes.io/serviceaccount/token + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## Optional Resources to exclude from gathering + ## Leave them with blank with try to gather everything available. + ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes", + ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets" + # resource_exclude = [ "deployments", "nodes", "statefulsets" ] + + ## Optional Resources to include when gathering + ## Overrides resource_exclude if both set. + # resource_include = [ "deployments", "nodes", "statefulsets" ] + + ## selectors to include and exclude as tags. Globs accepted. + ## Note that an empty array for both will include all selectors as tags + ## selector_exclude overrides selector_include if both set. + # selector_include = [] + # selector_exclude = ["*"] + + ## Optional TLS Config + ## Trusted root certificates for server + # tls_ca = "/path/to/cafile" + ## Used for TLS client certificate authentication + # tls_cert = "/path/to/certfile" + ## Used for TLS client certificate authentication + # tls_key = "/path/to/keyfile" + ## Send the specified TLS server name via SNI + # tls_server_name = "kubernetes.example.com" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Uncomment to remove deprecated metrics. + # fielddrop = ["terminated_reason"] diff --git a/plugins/inputs/kube_inventory/service.go b/plugins/inputs/kube_inventory/service.go index 0c749ea8ac3fc..d589188605c85 100644 --- a/plugins/inputs/kube_inventory/service.go +++ b/plugins/inputs/kube_inventory/service.go @@ -2,9 +2,8 @@ package kube_inventory import ( "context" - "time" - "github.com/ericchiang/k8s/apis/core/v1" + corev1 "k8s.io/api/core/v1" "github.com/influxdata/telegraf" ) @@ -16,53 +15,51 @@ func collectServices(ctx context.Context, acc telegraf.Accumulator, ki *Kubernet return } for _, i := range list.Items { - if err = ki.gatherService(*i, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherService(i, acc) } } -func (ki *KubernetesInventory) gatherService(s v1.Service, acc telegraf.Accumulator) error { - if s.Metadata.CreationTimestamp.GetSeconds() == 0 && s.Metadata.CreationTimestamp.GetNanos() == 0 { - return nil +func (ki *KubernetesInventory) gatherService(s corev1.Service, acc telegraf.Accumulator) { + creationTs := s.GetCreationTimestamp() + if creationTs.IsZero() { + return } fields := map[string]interface{}{ - "created": time.Unix(s.Metadata.CreationTimestamp.GetSeconds(), int64(s.Metadata.CreationTimestamp.GetNanos())).UnixNano(), - "generation": s.Metadata.GetGeneration(), + "created": s.GetCreationTimestamp().UnixNano(), + "generation": s.Generation, } tags := map[string]string{ - "service_name": s.Metadata.GetName(), - "namespace": s.Metadata.GetNamespace(), + "service_name": s.Name, + "namespace": s.Namespace, } - for key, val := range s.GetSpec().GetSelector() { + for key, val := range s.Spec.Selector { if ki.selectorFilter.Match(key) { tags["selector_"+key] = val } } var getPorts = func() { - for _, port := range s.GetSpec().GetPorts() { - fields["port"] = port.GetPort() - fields["target_port"] = port.GetTargetPort().GetIntVal() + for _, port := range s.Spec.Ports { + fields["port"] = port.Port + fields["target_port"] = port.TargetPort.IntVal - tags["port_name"] = port.GetName() - tags["port_protocol"] = port.GetProtocol() + tags["port_name"] = port.Name + tags["port_protocol"] = string(port.Protocol) - if s.GetSpec().GetType() == "ExternalName" { - tags["external_name"] = s.GetSpec().GetExternalName() + if s.Spec.Type == "ExternalName" { + tags["external_name"] = s.Spec.ExternalName } else { - tags["cluster_ip"] = s.GetSpec().GetClusterIP() + tags["cluster_ip"] = s.Spec.ClusterIP } acc.AddFields(serviceMeasurement, fields, tags) } } - if externIPs := s.GetSpec().GetExternalIPs(); externIPs != nil { + if externIPs := s.Spec.ExternalIPs; externIPs != nil { for _, ip := range externIPs { tags["ip"] = ip @@ -71,6 +68,4 @@ func (ki *KubernetesInventory) gatherService(s v1.Service, acc telegraf.Accumula } else { getPorts() } - - return nil } diff --git a/plugins/inputs/kube_inventory/service_test.go b/plugins/inputs/kube_inventory/service_test.go index 3b1089130fbf7..b89a45a45dd5c 100644 --- a/plugins/inputs/kube_inventory/service_test.go +++ b/plugins/inputs/kube_inventory/service_test.go @@ -1,16 +1,17 @@ package kube_inventory import ( - "reflect" - + "strings" "testing" "time" - "github.com/ericchiang/k8s/apis/core/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" - "github.com/influxdata/telegraf/testutil" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" - "strings" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestService(t *testing.T) { @@ -21,7 +22,7 @@ func TestService(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool include []string exclude []string @@ -30,7 +31,7 @@ func TestService(t *testing.T) { name: "no service", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/service/": &v1.ServiceList{}, + "/service/": &corev1.ServiceList{}, }, }, hasError: false, @@ -39,30 +40,32 @@ func TestService(t *testing.T) { name: "collect service", handler: &mockHandler{ responseMap: map[string]interface{}{ - "/service/": &v1.ServiceList{ - Items: []*v1.Service{ + "/service/": &corev1.ServiceList{ + Items: []corev1.Service{ { - Spec: &v1.ServiceSpec{ - Ports: []*v1.ServicePort{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ { - Port: toInt32Ptr(8080), - TargetPort: toIntStrPtrI(1234), - Name: toStrPtr("diagnostic"), - Protocol: toStrPtr("TCP"), + Port: 8080, + TargetPort: intstr.IntOrString{ + IntVal: 1234, + }, + Name: "diagnostic", + Protocol: "TCP", }, }, ExternalIPs: []string{"1.0.0.127"}, - ClusterIP: toStrPtr("127.0.0.1"), + ClusterIP: "127.0.0.1", Selector: map[string]string{ "select1": "s1", "select2": "s2", }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(12), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("checker"), - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "checker", + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -70,27 +73,27 @@ func TestService(t *testing.T) { }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "port": int32(8080), - "target_port": int32(1234), - "generation": int64(12), - "created": now.UnixNano(), - }, - Tags: map[string]string{ - "service_name": "checker", - "namespace": "ns1", - "port_name": "diagnostic", - "port_protocol": "TCP", - "cluster_ip": "127.0.0.1", - "ip": "1.0.0.127", - "selector_select1": "s1", - "selector_select2": "s2", - }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_service", + map[string]string{ + "service_name": "checker", + "namespace": "ns1", + "port_name": "diagnostic", + "port_protocol": "TCP", + "cluster_ip": "127.0.0.1", + "ip": "1.0.0.127", + "selector_select1": "s1", + "selector_select2": "s2", }, - }, + map[string]interface{}{ + "port": int32(8080), + "target_port": int32(1234), + "generation": int64(12), + "created": now.UnixNano(), + }, + time.Unix(0, 0), + ), }, hasError: false, }, @@ -102,37 +105,23 @@ func TestService(t *testing.T) { } ks.SelectorInclude = v.include ks.SelectorExclude = v.exclude - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) - for _, service := range ((v.handler.responseMap["/service/"]).(*v1.ServiceList)).Items { - err := ks.gatherService(*service, acc) - if err != nil { - t.Errorf("Failed to gather service - %s", err.Error()) - } + for _, service := range ((v.handler.responseMap["/service/"]).(*corev1.ServiceList)).Items { + ks.gatherService(service, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } @@ -142,30 +131,32 @@ func TestServiceSelectorFilter(t *testing.T) { now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) responseMap := map[string]interface{}{ - "/service/": &v1.ServiceList{ - Items: []*v1.Service{ + "/service/": &corev1.ServiceList{ + Items: []corev1.Service{ { - Spec: &v1.ServiceSpec{ - Ports: []*v1.ServicePort{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ { - Port: toInt32Ptr(8080), - TargetPort: toIntStrPtrI(1234), - Name: toStrPtr("diagnostic"), - Protocol: toStrPtr("TCP"), + Port: 8080, + TargetPort: intstr.IntOrString{ + IntVal: 1234, + }, + Name: "diagnostic", + Protocol: "TCP", }, }, ExternalIPs: []string{"1.0.0.127"}, - ClusterIP: toStrPtr("127.0.0.1"), + ClusterIP: "127.0.0.1", Selector: map[string]string{ "select1": "s1", "select2": "s2", }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(12), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("checker"), - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + ObjectMeta: metav1.ObjectMeta{ + Generation: 12, + Namespace: "ns1", + Name: "checker", + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -273,13 +264,10 @@ func TestServiceSelectorFilter(t *testing.T) { } ks.SelectorInclude = v.include ks.SelectorExclude = v.exclude - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) - for _, service := range ((v.handler.responseMap["/service/"]).(*v1.ServiceList)).Items { - err := ks.gatherService(*service, acc) - if err != nil { - t.Errorf("Failed to gather service - %s", err.Error()) - } + for _, service := range ((v.handler.responseMap["/service/"]).(*corev1.ServiceList)).Items { + ks.gatherService(service, acc) } // Grab selector tags @@ -292,8 +280,7 @@ func TestServiceSelectorFilter(t *testing.T) { } } - if !reflect.DeepEqual(v.expected, actual) { - t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) - } + require.Equalf(t, v.expected, actual, + "actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) } } diff --git a/plugins/inputs/kube_inventory/statefulset.go b/plugins/inputs/kube_inventory/statefulset.go index fe25f19f08440..06335fc612a1e 100644 --- a/plugins/inputs/kube_inventory/statefulset.go +++ b/plugins/inputs/kube_inventory/statefulset.go @@ -2,9 +2,8 @@ package kube_inventory import ( "context" - "time" - "github.com/ericchiang/k8s/apis/apps/v1" + v1 "k8s.io/api/apps/v1" "github.com/influxdata/telegraf" ) @@ -16,36 +15,35 @@ func collectStatefulSets(ctx context.Context, acc telegraf.Accumulator, ki *Kube return } for _, s := range list.Items { - if err = ki.gatherStatefulSet(*s, acc); err != nil { - acc.AddError(err) - return - } + ki.gatherStatefulSet(s, acc) } } -func (ki *KubernetesInventory) gatherStatefulSet(s v1.StatefulSet, acc telegraf.Accumulator) error { +func (ki *KubernetesInventory) gatherStatefulSet(s v1.StatefulSet, acc telegraf.Accumulator) { status := s.Status fields := map[string]interface{}{ - "created": time.Unix(s.Metadata.CreationTimestamp.GetSeconds(), int64(s.Metadata.CreationTimestamp.GetNanos())).UnixNano(), - "generation": *s.Metadata.Generation, - "replicas": *status.Replicas, - "replicas_current": *status.CurrentReplicas, - "replicas_ready": *status.ReadyReplicas, - "replicas_updated": *status.UpdatedReplicas, - "spec_replicas": *s.Spec.Replicas, - "observed_generation": *s.Status.ObservedGeneration, + "created": s.GetCreationTimestamp().UnixNano(), + "generation": s.Generation, + "replicas": status.Replicas, + "replicas_current": status.CurrentReplicas, + "replicas_ready": status.ReadyReplicas, + "replicas_updated": status.UpdatedReplicas, + "observed_generation": s.Status.ObservedGeneration, + } + if s.Spec.Replicas != nil { + fields["spec_replicas"] = *s.Spec.Replicas } tags := map[string]string{ - "statefulset_name": *s.Metadata.Name, - "namespace": *s.Metadata.Namespace, + "statefulset_name": s.Name, + "namespace": s.Namespace, } - for key, val := range s.GetSpec().GetSelector().GetMatchLabels() { - if ki.selectorFilter.Match(key) { - tags["selector_"+key] = val + if s.Spec.Selector != nil { + for key, val := range s.Spec.Selector.MatchLabels { + if ki.selectorFilter.Match(key) { + tags["selector_"+key] = val + } } } acc.AddFields(statefulSetMeasurement, fields, tags) - - return nil } diff --git a/plugins/inputs/kube_inventory/statefulset_test.go b/plugins/inputs/kube_inventory/statefulset_test.go index 689cbadbc4b8d..6f30acc8b7435 100644 --- a/plugins/inputs/kube_inventory/statefulset_test.go +++ b/plugins/inputs/kube_inventory/statefulset_test.go @@ -1,15 +1,16 @@ package kube_inventory import ( - "reflect" "strings" "testing" "time" - "github.com/ericchiang/k8s/apis/apps/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" + v1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestStatefulSet(t *testing.T) { @@ -21,7 +22,7 @@ func TestStatefulSet(t *testing.T) { tests := []struct { name string handler *mockHandler - output *testutil.Accumulator + output []telegraf.Metric hasError bool }{ { @@ -38,16 +39,16 @@ func TestStatefulSet(t *testing.T) { handler: &mockHandler{ responseMap: map[string]interface{}{ "/statefulsets/": &v1.StatefulSetList{ - Items: []*v1.StatefulSet{ + Items: []v1.StatefulSet{ { - Status: &v1.StatefulSetStatus{ - Replicas: toInt32Ptr(2), - CurrentReplicas: toInt32Ptr(4), - ReadyReplicas: toInt32Ptr(1), - UpdatedReplicas: toInt32Ptr(3), - ObservedGeneration: toInt64Ptr(119), + Status: v1.StatefulSetStatus{ + Replicas: 2, + CurrentReplicas: 4, + ReadyReplicas: 1, + UpdatedReplicas: 3, + ObservedGeneration: 119, }, - Spec: &v1.StatefulSetSpec{ + Spec: v1.StatefulSetSpec{ Replicas: toInt32Ptr(3), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -56,43 +57,147 @@ func TestStatefulSet(t *testing.T) { }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(332), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("sts1"), - Labels: map[string]string{ - "lab1": "v1", - "lab2": "v2", - }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + ObjectMeta: metav1.ObjectMeta{ + Generation: 332, + Namespace: "ns1", + Name: "sts1", + CreationTimestamp: metav1.Time{Time: now}, }, }, }, }, }, }, - output: &testutil.Accumulator{ - Metrics: []*testutil.Metric{ - { - Fields: map[string]interface{}{ - "generation": int64(332), - "observed_generation": int64(119), - "created": now.UnixNano(), - "spec_replicas": int32(3), - "replicas": int32(2), - "replicas_current": int32(4), - "replicas_ready": int32(1), - "replicas_updated": int32(3), + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_statefulset", + map[string]string{ + "namespace": "ns1", + "statefulset_name": "sts1", + "selector_select1": "s1", + "selector_select2": "s2", + }, + map[string]interface{}{ + "generation": int64(332), + "observed_generation": int64(119), + "created": now.UnixNano(), + "spec_replicas": int32(3), + "replicas": int32(2), + "replicas_current": int32(4), + "replicas_ready": int32(1), + "replicas_updated": int32(3), + }, + time.Unix(0, 0), + ), + }, + hasError: false, + }, + { + name: "no label selector", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/statefulsets/": &v1.StatefulSetList{ + Items: []v1.StatefulSet{ + { + Status: v1.StatefulSetStatus{ + Replicas: 2, + CurrentReplicas: 4, + ReadyReplicas: 1, + UpdatedReplicas: 3, + ObservedGeneration: 119, + }, + Spec: v1.StatefulSetSpec{ + Replicas: toInt32Ptr(3), + Selector: nil, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 332, + Namespace: "ns1", + Name: "sts1", + CreationTimestamp: metav1.Time{Time: now}, + }, + }, }, - Tags: map[string]string{ - "namespace": "ns1", - "statefulset_name": "sts1", - "selector_select1": "s1", - "selector_select2": "s2", + }, + }, + }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_statefulset", + map[string]string{ + "namespace": "ns1", + "statefulset_name": "sts1", + }, + map[string]interface{}{ + "generation": int64(332), + "observed_generation": int64(119), + "created": now.UnixNano(), + "spec_replicas": int32(3), + "replicas": int32(2), + "replicas_current": int32(4), + "replicas_ready": int32(1), + "replicas_updated": int32(3), + }, + time.Unix(0, 0), + ), + }, + hasError: false, + }, + { + name: "no desired number of replicas", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/statefulsets/": &v1.StatefulSetList{ + Items: []v1.StatefulSet{ + { + Status: v1.StatefulSetStatus{ + Replicas: 2, + CurrentReplicas: 4, + ReadyReplicas: 1, + UpdatedReplicas: 3, + ObservedGeneration: 119, + }, + Spec: v1.StatefulSetSpec{ + Replicas: nil, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "select1": "s1", + "select2": "s2", + }, + }, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 332, + Namespace: "ns1", + Name: "sts1", + CreationTimestamp: metav1.Time{Time: now}, + }, + }, }, }, }, }, + output: []telegraf.Metric{ + testutil.MustMetric( + "kubernetes_statefulset", + map[string]string{ + "namespace": "ns1", + "statefulset_name": "sts1", + "selector_select1": "s1", + "selector_select2": "s2", + }, + map[string]interface{}{ + "generation": int64(332), + "observed_generation": int64(119), + "created": now.UnixNano(), + "replicas": int32(2), + "replicas_current": int32(4), + "replicas_ready": int32(1), + "replicas_updated": int32(3), + }, + time.Unix(0, 0), + ), + }, hasError: false, }, } @@ -103,37 +208,23 @@ func TestStatefulSet(t *testing.T) { SelectorInclude: selectInclude, SelectorExclude: selectExclude, } - ks.createSelectorFilters() - acc := new(testutil.Accumulator) + require.NoError(t, ks.createSelectorFilters()) + acc := &testutil.Accumulator{} for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items { - err := ks.gatherStatefulSet(*ss, acc) - if err != nil { - t.Errorf("Failed to gather ss - %s", err.Error()) - } + ks.gatherStatefulSet(ss, acc) } err := acc.FirstError() - if err == nil && v.hasError { - t.Fatalf("%s failed, should have error", v.name) - } else if err != nil && !v.hasError { - t.Fatalf("%s failed, err: %v", v.name, err) - } - if v.output == nil && len(acc.Metrics) > 0 { - t.Fatalf("%s: collected extra data", v.name) - } else if v.output != nil && len(v.output.Metrics) > 0 { - for i := range v.output.Metrics { - for k, m := range v.output.Metrics[i].Tags { - if acc.Metrics[i].Tags[k] != m { - t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", v.name, k, m, acc.Metrics[i].Tags[k]) - } - } - for k, m := range v.output.Metrics[i].Fields { - if acc.Metrics[i].Fields[k] != m { - t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) - } - } - } + if v.hasError { + require.Errorf(t, err, "%s failed, should have error", v.name) + continue } + + // No error case + require.NoErrorf(t, err, "%s failed, err: %v", v.name, err) + + require.Len(t, acc.Metrics, len(v.output)) + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), v.output, testutil.IgnoreTime()) } } @@ -144,16 +235,16 @@ func TestStatefulSetSelectorFilter(t *testing.T) { responseMap := map[string]interface{}{ "/statefulsets/": &v1.StatefulSetList{ - Items: []*v1.StatefulSet{ + Items: []v1.StatefulSet{ { - Status: &v1.StatefulSetStatus{ - Replicas: toInt32Ptr(2), - CurrentReplicas: toInt32Ptr(4), - ReadyReplicas: toInt32Ptr(1), - UpdatedReplicas: toInt32Ptr(3), - ObservedGeneration: toInt64Ptr(119), + Status: v1.StatefulSetStatus{ + Replicas: 2, + CurrentReplicas: 4, + ReadyReplicas: 1, + UpdatedReplicas: 3, + ObservedGeneration: 119, }, - Spec: &v1.StatefulSetSpec{ + Spec: v1.StatefulSetSpec{ Replicas: toInt32Ptr(3), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -162,15 +253,11 @@ func TestStatefulSetSelectorFilter(t *testing.T) { }, }, }, - Metadata: &metav1.ObjectMeta{ - Generation: toInt64Ptr(332), - Namespace: toStrPtr("ns1"), - Name: toStrPtr("sts1"), - Labels: map[string]string{ - "lab1": "v1", - "lab2": "v2", - }, - CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + ObjectMeta: metav1.ObjectMeta{ + Generation: 332, + Namespace: "ns1", + Name: "sts1", + CreationTimestamp: metav1.Time{Time: now}, }, }, }, @@ -278,13 +365,10 @@ func TestStatefulSetSelectorFilter(t *testing.T) { } ks.SelectorInclude = v.include ks.SelectorExclude = v.exclude - ks.createSelectorFilters() + require.NoError(t, ks.createSelectorFilters()) acc := new(testutil.Accumulator) for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items { - err := ks.gatherStatefulSet(*ss, acc) - if err != nil { - t.Errorf("Failed to gather ss - %s", err.Error()) - } + ks.gatherStatefulSet(ss, acc) } // Grab selector tags @@ -297,8 +381,7 @@ func TestStatefulSetSelectorFilter(t *testing.T) { } } - if !reflect.DeepEqual(v.expected, actual) { - t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) - } + require.Equalf(t, v.expected, actual, + "actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected) } } diff --git a/plugins/inputs/kubernetes/README.md b/plugins/inputs/kubernetes/README.md index a574bed06ffe4..6a5bda4fe86aa 100644 --- a/plugins/inputs/kubernetes/README.md +++ b/plugins/inputs/kubernetes/README.md @@ -6,13 +6,15 @@ is running as part of a `daemonset` within a kubernetes installation. This means that telegraf is running on every node within the cluster. Therefore, you should configure this plugin to talk to its locally running kubelet. -To find the ip address of the host you are running on you can issue a command like the following: +To find the ip address of the host you are running on you can issue a command +like the following: -``` -$ curl -s $API_URL/api/v1/namespaces/$POD_NAMESPACE/pods/$HOSTNAME --header "Authorization: Bearer $TOKEN" --insecure | jq -r '.status.hostIP' +```sh +curl -s $API_URL/api/v1/namespaces/$POD_NAMESPACE/pods/$HOSTNAME --header "Authorization: Bearer $TOKEN" --insecure | jq -r '.status.hostIP' ``` -In this case we used the downward API to pass in the `$POD_NAMESPACE` and `$HOSTNAME` is the hostname of the pod which is set by the kubernetes API. +In this case we used the downward API to pass in the `$POD_NAMESPACE` and +`$HOSTNAME` is the hostname of the pod which is set by the kubernetes API. Kubernetes is a fast moving project, with a new minor release every 3 months. As such, we will aim to maintain support only for versions that are supported by @@ -20,7 +22,7 @@ the major cloud providers; this is roughly 4 release / 2 years. **This plugin supports Kubernetes 1.11 and later.** -#### Series Cardinality Warning +## Series Cardinality Warning This plugin may produce a high number of series which, when not controlled for, will cause high load on your database. Use the following techniques to @@ -28,15 +30,14 @@ avoid cardinality issues: - Use [metric filtering][] options to exclude unneeded measurements and tags. - Write to a database with an appropriate [retention policy][]. -- Limit series cardinality in your database using the - [max-series-per-database][] and [max-values-per-tag][] settings. - Consider using the [Time Series Index][tsi]. - Monitor your databases [series cardinality][]. - Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques. -### Configuration +## Configuration -```toml +```toml @sample.conf +# Read metrics from the kubernetes kubelet api [[inputs.kubernetes]] ## URL for the kubelet url = "http://127.0.0.1:10255" @@ -64,17 +65,17 @@ avoid cardinality issues: # insecure_skip_verify = false ``` -### DaemonSet +## DaemonSet -For recommendations on running Telegraf as a DaemonSet see [Monitoring Kubernetes -Architecture][k8s-telegraf] or view the Helm charts: +For recommendations on running Telegraf as a DaemonSet see [Monitoring +Kubernetes Architecture][k8s-telegraf] or view the Helm charts: - [Telegraf][] - [InfluxDB][] - [Chronograf][] - [Kapacitor][] -### Metrics +## Metrics - kubernetes_node - tags: @@ -99,7 +100,7 @@ Architecture][k8s-telegraf] or view the Helm charts: - runtime_image_fs_capacity_bytes - runtime_image_fs_used_bytes -* kubernetes_pod_container +- kubernetes_pod_container - tags: - container_name - namespace @@ -131,7 +132,7 @@ Architecture][k8s-telegraf] or view the Helm charts: - capacity_bytes - used_bytes -* kubernetes_pod_network +- kubernetes_pod_network - tags: - namespace - node_name @@ -142,9 +143,9 @@ Architecture][k8s-telegraf] or view the Helm charts: - tx_bytes - tx_errors -### Example Output +## Example Output -``` +```shell kubernetes_node kubernetes_pod_container,container_name=deis-controller,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr cpu_usage_core_nanoseconds=2432835i,cpu_usage_nanocores=0i,logsfs_available_bytes=121128271872i,logsfs_capacity_bytes=153567944704i,logsfs_used_bytes=20787200i,memory_major_page_faults=0i,memory_page_faults=175i,memory_rss_bytes=0i,memory_usage_bytes=0i,memory_working_set_bytes=0i,rootfs_available_bytes=121128271872i,rootfs_capacity_bytes=153567944704i,rootfs_used_bytes=1110016i 1476477530000000000 kubernetes_pod_network,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr rx_bytes=120671099i,rx_errors=0i,tx_bytes=102451983i,tx_errors=0i 1476477530000000000 @@ -154,8 +155,6 @@ kubernetes_system_container [metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering [retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ -[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 -[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000 [tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ [series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality [influx-docs]: https://docs.influxdata.com/influxdb/latest/ diff --git a/plugins/inputs/kubernetes/kubernetes.go b/plugins/inputs/kubernetes/kubernetes.go index a9bb6ef4850d8..8563cf5549fa8 100644 --- a/plugins/inputs/kubernetes/kubernetes.go +++ b/plugins/inputs/kubernetes/kubernetes.go @@ -1,21 +1,26 @@ +//go:generate ../../../tools/readme_config_includer/generator package kubernetes import ( + _ "embed" "encoding/json" "fmt" - "io/ioutil" "net/http" - "net/url" + "os" "strings" "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // Kubernetes represents the config object for the plugin type Kubernetes struct { URL string @@ -30,42 +35,14 @@ type Kubernetes struct { labelFilter filter.Filter // HTTP Timeout specified as a string - 3s, 1m, 1h - ResponseTimeout internal.Duration + ResponseTimeout config.Duration tls.ClientConfig RoundTripper http.RoundTripper } -var sampleConfig = ` - ## URL for the kubelet - url = "http://127.0.0.1:10255" - - ## Use bearer token for authorization. ('bearer_token' takes priority) - ## If both of these are empty, we'll use the default serviceaccount: - ## at: /run/secrets/kubernetes.io/serviceaccount/token - # bearer_token = "/path/to/bearer/token" - ## OR - # bearer_token_string = "abc_123" - - ## Pod labels to be added as tags. An empty array for both include and - ## exclude will include all labels. - # label_include = [] - # label_exclude = ["*"] - - ## Set response_timeout (default 5 seconds) - # response_timeout = "5s" - - ## Optional TLS Config - # tls_ca = /path/to/cafile - # tls_cert = /path/to/certfile - # tls_key = /path/to/keyfile - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - const ( - summaryEndpoint = `%s/stats/summary` defaultServiceAccountPath = "/run/secrets/kubernetes.io/serviceaccount/token" ) @@ -78,25 +55,18 @@ func init() { }) } -//SampleConfig returns a sample config -func (k *Kubernetes) SampleConfig() string { +func (*Kubernetes) SampleConfig() string { return sampleConfig } -//Description returns the description of this plugin -func (k *Kubernetes) Description() string { - return "Read metrics from the kubernetes kubelet api" -} - func (k *Kubernetes) Init() error { - // If neither are provided, use the default service account. if k.BearerToken == "" && k.BearerTokenString == "" { k.BearerToken = defaultServiceAccountPath } if k.BearerToken != "" { - token, err := ioutil.ReadFile(k.BearerToken) + token, err := os.ReadFile(k.BearerToken) if err != nil { return err } @@ -118,18 +88,9 @@ func (k *Kubernetes) Gather(acc telegraf.Accumulator) error { return nil } -func buildURL(endpoint string, base string) (*url.URL, error) { - u := fmt.Sprintf(endpoint, base) - addr, err := url.Parse(u) - if err != nil { - return nil, fmt.Errorf("Unable to parse address '%s': %s", u, err) - } - return addr, nil -} - func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) error { summaryMetrics := &SummaryMetrics{} - err := k.LoadJson(fmt.Sprintf("%s/stats/summary", baseURL), summaryMetrics) + err := k.LoadJSON(fmt.Sprintf("%s/stats/summary", baseURL), summaryMetrics) if err != nil { return err } @@ -140,7 +101,7 @@ func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) err } buildSystemContainerMetrics(summaryMetrics, acc) buildNodeMetrics(summaryMetrics, acc) - buildPodMetrics(baseURL, summaryMetrics, podInfos, k.labelFilter, acc) + buildPodMetrics(summaryMetrics, podInfos, k.labelFilter, acc) return nil } @@ -193,19 +154,19 @@ func buildNodeMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) } func (k *Kubernetes) gatherPodInfo(baseURL string) ([]Metadata, error) { - var podApi Pods - err := k.LoadJson(fmt.Sprintf("%s/pods", baseURL), &podApi) + var podAPI Pods + err := k.LoadJSON(fmt.Sprintf("%s/pods", baseURL), &podAPI) if err != nil { return nil, err } var podInfos []Metadata - for _, podMetadata := range podApi.Items { + for _, podMetadata := range podAPI.Items { podInfos = append(podInfos, podMetadata.Metadata) } return podInfos, nil } -func (k *Kubernetes) LoadJson(url string, v interface{}) error { +func (k *Kubernetes) LoadJSON(url string, v interface{}) error { var req, err = http.NewRequest("GET", url, nil) if err != nil { return err @@ -216,13 +177,13 @@ func (k *Kubernetes) LoadJson(url string, v interface{}) error { return err } if k.RoundTripper == nil { - if k.ResponseTimeout.Duration < time.Second { - k.ResponseTimeout.Duration = time.Second * 5 + if k.ResponseTimeout < config.Duration(time.Second) { + k.ResponseTimeout = config.Duration(time.Second * 5) } k.RoundTripper = &http.Transport{ TLSHandshakeTimeout: 5 * time.Second, TLSClientConfig: tlsCfg, - ResponseHeaderTimeout: k.ResponseTimeout.Duration, + ResponseHeaderTimeout: time.Duration(k.ResponseTimeout), } } req.Header.Set("Authorization", "Bearer "+k.BearerTokenString) @@ -244,8 +205,19 @@ func (k *Kubernetes) LoadJson(url string, v interface{}) error { return nil } -func buildPodMetrics(baseURL string, summaryMetrics *SummaryMetrics, podInfo []Metadata, labelFilter filter.Filter, acc telegraf.Accumulator) { +func buildPodMetrics(summaryMetrics *SummaryMetrics, podInfo []Metadata, labelFilter filter.Filter, acc telegraf.Accumulator) { for _, pod := range summaryMetrics.Pods { + podLabels := make(map[string]string) + for _, info := range podInfo { + if info.Name == pod.PodRef.Name && info.Namespace == pod.PodRef.Namespace { + for k, v := range info.Labels { + if labelFilter.Match(k) { + podLabels[k] = v + } + } + } + } + for _, container := range pod.Containers { tags := map[string]string{ "node_name": summaryMetrics.Node.NodeName, @@ -253,16 +225,9 @@ func buildPodMetrics(baseURL string, summaryMetrics *SummaryMetrics, podInfo []M "container_name": container.Name, "pod_name": pod.PodRef.Name, } - for _, info := range podInfo { - if info.Name == pod.PodRef.Name && info.Namespace == pod.PodRef.Namespace { - for k, v := range info.Labels { - if labelFilter.Match(k) { - tags[k] = v - } - } - } + for k, v := range podLabels { + tags[k] = v } - fields := make(map[string]interface{}) fields["cpu_usage_nanocores"] = container.CPU.UsageNanoCores fields["cpu_usage_core_nanoseconds"] = container.CPU.UsageCoreNanoSeconds @@ -287,6 +252,9 @@ func buildPodMetrics(baseURL string, summaryMetrics *SummaryMetrics, podInfo []M "namespace": pod.PodRef.Namespace, "volume_name": volume.Name, } + for k, v := range podLabels { + tags[k] = v + } fields := make(map[string]interface{}) fields["available_bytes"] = volume.AvailableBytes fields["capacity_bytes"] = volume.CapacityBytes @@ -299,6 +267,9 @@ func buildPodMetrics(baseURL string, summaryMetrics *SummaryMetrics, podInfo []M "pod_name": pod.PodRef.Name, "namespace": pod.PodRef.Namespace, } + for k, v := range podLabels { + tags[k] = v + } fields := make(map[string]interface{}) fields["rx_bytes"] = pod.Network.RXBytes fields["rx_errors"] = pod.Network.RXErrors diff --git a/plugins/inputs/kubernetes/kubernetes_pods.go b/plugins/inputs/kubernetes/kubernetes_pods.go index 672608e54fe25..29d5e77895266 100644 --- a/plugins/inputs/kubernetes/kubernetes_pods.go +++ b/plugins/inputs/kubernetes/kubernetes_pods.go @@ -2,7 +2,7 @@ package kubernetes type Pods struct { Kind string `json:"kind"` - ApiVersion string `json:"apiVersion"` + APIVersion string `json:"apiVersion"` Items []Item `json:"items"` } diff --git a/plugins/inputs/kubernetes/kubernetes_test.go b/plugins/inputs/kubernetes/kubernetes_test.go index faf40be3e1000..864905448780d 100644 --- a/plugins/inputs/kubernetes/kubernetes_test.go +++ b/plugins/inputs/kubernetes/kubernetes_test.go @@ -15,13 +15,14 @@ func TestKubernetesStats(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.RequestURI == "/stats/summary" { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, responseStatsSummery) + _, err := fmt.Fprintln(w, responseStatsSummery) + require.NoError(t, err) } if r.RequestURI == "/pods" { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, responsePods) + _, err := fmt.Fprintln(w, responsePods) + require.NoError(t, err) } - })) defer ts.Close() @@ -140,6 +141,8 @@ func TestKubernetesStats(t *testing.T) { "volume_name": "volume1", "namespace": "foons", "pod_name": "foopod", + "app": "foo", + "superkey": "foobar", } acc.AssertContainsTaggedFields(t, "kubernetes_pod_volume", fields, tags) @@ -153,9 +156,10 @@ func TestKubernetesStats(t *testing.T) { "node_name": "node1", "namespace": "foons", "pod_name": "foopod", + "app": "foo", + "superkey": "foobar", } acc.AssertContainsTaggedFields(t, "kubernetes_pod_network", fields, tags) - } var responsePods = ` diff --git a/plugins/inputs/kubernetes/sample.conf b/plugins/inputs/kubernetes/sample.conf new file mode 100644 index 0000000000000..ef8c5bdf4aacf --- /dev/null +++ b/plugins/inputs/kubernetes/sample.conf @@ -0,0 +1,26 @@ +# Read metrics from the kubernetes kubelet api +[[inputs.kubernetes]] + ## URL for the kubelet + url = "http://127.0.0.1:10255" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + ## If both of these are empty, we'll use the default serviceaccount: + ## at: /run/secrets/kubernetes.io/serviceaccount/token + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" + + ## Pod labels to be added as tags. An empty array for both include and + ## exclude will include all labels. + # label_include = [] + # label_exclude = ["*"] + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/lanz/README.md b/plugins/inputs/lanz/README.md index c47b22fee1dd1..3c63aa596a97e 100644 --- a/plugins/inputs/lanz/README.md +++ b/plugins/inputs/lanz/README.md @@ -1,34 +1,39 @@ # Arista LANZ Consumer Input Plugin -This plugin provides a consumer for use with Arista Networks’ Latency Analyzer (LANZ) +This plugin provides a consumer for use with Arista Networks’ Latency Analyzer +(LANZ) Metrics are read from a stream of data via TCP through port 50001 on the -switches management IP. The data is in Protobuffers format. For more information on Arista LANZ +switches management IP. The data is in Protobuffers format. For more information +on Arista LANZ -- https://www.arista.com/en/um-eos/eos-latency-analyzer-lanz +- This plugin uses Arista's sdk. -- https://github.com/aristanetworks/goarista +- -### Configuration +## Configuration -You will need to configure LANZ and enable streaming LANZ data. - -- https://www.arista.com/en/um-eos/eos-section-44-3-configuring-lanz -- https://www.arista.com/en/um-eos/eos-section-44-3-configuring-lanz#ww1149292 - -```toml +```toml @sample.conf +# Read metrics off Arista LANZ, via socket [[inputs.lanz]] + ## URL to Arista LANZ endpoint servers = [ "tcp://switch1.int.example.com:50001", "tcp://switch2.int.example.com:50001", ] ``` -### Metrics +You will need to configure LANZ and enable streaming LANZ data. + +- +- + +## Metrics -For more details on the metrics see https://github.com/aristanetworks/goarista/blob/master/lanz/proto/lanz.proto +For more details on the metrics see + - lanz_congestion_record: - tags: @@ -47,7 +52,7 @@ For more details on the metrics see https://github.com/aristanetworks/goarista/b - tx_latency (integer) - q_drop_count (integer) -+ lanz_global_buffer_usage_record +- lanz_global_buffer_usage_record - tags: - entry_type - source @@ -57,31 +62,31 @@ For more details on the metrics see https://github.com/aristanetworks/goarista/b - buffer_size (integer) - duration (integer) - - -### Sample Queries +## Sample Queries Get the max tx_latency for the last hour for all interfaces on all switches. + ```sql SELECT max("tx_latency") AS "max_tx_latency" FROM "congestion_record" WHERE time > now() - 1h GROUP BY time(10s), "hostname", "intf_name" ``` Get the max tx_latency for the last hour for all interfaces on all switches. + ```sql SELECT max("queue_size") AS "max_queue_size" FROM "congestion_record" WHERE time > now() - 1h GROUP BY time(10s), "hostname", "intf_name" ``` Get the max buffer_size for over the last hour for all switches. + ```sql SELECT max("buffer_size") AS "max_buffer_size" FROM "global_buffer_usage_record" WHERE time > now() - 1h GROUP BY time(10s), "hostname" ``` -### Example output -``` +## Example output + +```shell lanz_global_buffer_usage_record,entry_type=2,host=telegraf.int.example.com,port=50001,source=switch01.int.example.com timestamp=158334105824919i,buffer_size=505i,duration=0i 1583341058300643815 lanz_congestion_record,entry_type=2,host=telegraf.int.example.com,intf_name=Ethernet36,port=50001,port_id=61,source=switch01.int.example.com,switch_id=0,traffic_class=1 time_of_max_qlen=0i,tx_latency=564480i,q_drop_count=0i,timestamp=158334105824919i,queue_size=225i 1583341058300636045 lanz_global_buffer_usage_record,entry_type=2,host=telegraf.int.example.com,port=50001,source=switch01.int.example.com timestamp=158334105824919i,buffer_size=589i,duration=0i 1583341058300457464 lanz_congestion_record,entry_type=1,host=telegraf.int.example.com,intf_name=Ethernet36,port=50001,port_id=61,source=switch01.int.example.com,switch_id=0,traffic_class=1 q_drop_count=0i,timestamp=158334105824919i,queue_size=232i,time_of_max_qlen=0i,tx_latency=584640i 1583341058300450302 ``` - - diff --git a/plugins/inputs/lanz/lanz.go b/plugins/inputs/lanz/lanz.go index 7553c33c777b2..af0d530454070 100644 --- a/plugins/inputs/lanz/lanz.go +++ b/plugins/inputs/lanz/lanz.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package lanz import ( + _ "embed" "net/url" "strconv" "sync" @@ -8,16 +10,14 @@ import ( "github.com/aristanetworks/goarista/lanz" pb "github.com/aristanetworks/goarista/lanz/proto" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) -var sampleConfig = ` - ## URL to Arista LANZ endpoint - servers = [ - "tcp://127.0.0.1:50001" - ] -` +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string func init() { inputs.Add("lanz", func() telegraf.Input { @@ -35,31 +35,26 @@ func NewLanz() *Lanz { return &Lanz{} } -func (l *Lanz) SampleConfig() string { +func (*Lanz) SampleConfig() string { return sampleConfig } -func (l *Lanz) Description() string { - return "Read metrics off Arista LANZ, via socket" -} - -func (l *Lanz) Gather(acc telegraf.Accumulator) error { +func (l *Lanz) Gather(_ telegraf.Accumulator) error { return nil } func (l *Lanz) Start(acc telegraf.Accumulator) error { - if len(l.Servers) == 0 { l.Servers = append(l.Servers, "tcp://127.0.0.1:50001") } for _, server := range l.Servers { - deviceUrl, err := url.Parse(server) + deviceURL, err := url.Parse(server) if err != nil { return err } client := lanz.New( - lanz.WithAddr(deviceUrl.Host), + lanz.WithAddr(deviceURL.Host), lanz.WithBackoff(1*time.Second), lanz.WithTimeout(10*time.Second), ) @@ -72,7 +67,7 @@ func (l *Lanz) Start(acc telegraf.Accumulator) error { l.wg.Add(1) go func() { l.wg.Done() - receive(acc, in, deviceUrl) + receive(acc, in, deviceURL) }() } return nil @@ -85,19 +80,20 @@ func (l *Lanz) Stop() { l.wg.Wait() } -func receive(acc telegraf.Accumulator, in <-chan *pb.LanzRecord, deviceUrl *url.URL) { +func receive(acc telegraf.Accumulator, in <-chan *pb.LanzRecord, deviceURL *url.URL) { + //nolint:gosimple // for-select used on purpose for { select { case msg, ok := <-in: if !ok { return } - msgToAccumulator(acc, msg, deviceUrl) + msgToAccumulator(acc, msg, deviceURL) } } } -func msgToAccumulator(acc telegraf.Accumulator, msg *pb.LanzRecord, deviceUrl *url.URL) { +func msgToAccumulator(acc telegraf.Accumulator, msg *pb.LanzRecord, deviceURL *url.URL) { cr := msg.GetCongestionRecord() if cr != nil { vals := map[string]interface{}{ @@ -114,8 +110,8 @@ func msgToAccumulator(acc telegraf.Accumulator, msg *pb.LanzRecord, deviceUrl *u "entry_type": strconv.FormatInt(int64(cr.GetEntryType()), 10), "traffic_class": strconv.FormatInt(int64(cr.GetTrafficClass()), 10), "fabric_peer_intf_name": cr.GetFabricPeerIntfName(), - "source": deviceUrl.Hostname(), - "port": deviceUrl.Port(), + "source": deviceURL.Hostname(), + "port": deviceURL.Port(), } acc.AddFields("lanz_congestion_record", vals, tags) } @@ -129,8 +125,8 @@ func msgToAccumulator(acc telegraf.Accumulator, msg *pb.LanzRecord, deviceUrl *u } tags := map[string]string{ "entry_type": strconv.FormatInt(int64(gbur.GetEntryType()), 10), - "source": deviceUrl.Hostname(), - "port": deviceUrl.Port(), + "source": deviceURL.Hostname(), + "port": deviceURL.Port(), } acc.AddFields("lanz_global_buffer_usage_record", vals, tags) } diff --git a/plugins/inputs/lanz/lanz_test.go b/plugins/inputs/lanz/lanz_test.go index 5f9c7ab24cb40..f2a8b5815e36d 100644 --- a/plugins/inputs/lanz/lanz_test.go +++ b/plugins/inputs/lanz/lanz_test.go @@ -6,7 +6,8 @@ import ( "testing" pb "github.com/aristanetworks/goarista/lanz/proto" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" + "github.com/influxdata/telegraf/testutil" ) @@ -51,23 +52,22 @@ var testProtoBufGlobalBufferUsageRecord = &pb.LanzRecord{ } func TestLanzGeneratesMetrics(t *testing.T) { - var acc testutil.Accumulator l := NewLanz() l.Servers = append(l.Servers, "tcp://switch01.int.example.com:50001") l.Servers = append(l.Servers, "tcp://switch02.int.example.com:50001") - deviceUrl1, err := url.Parse(l.Servers[0]) + deviceURL1, err := url.Parse(l.Servers[0]) if err != nil { t.Fail() } - deviceUrl2, err := url.Parse(l.Servers[1]) + deviceURL2, err := url.Parse(l.Servers[1]) if err != nil { t.Fail() } - msgToAccumulator(&acc, testProtoBufCongestionRecord1, deviceUrl1) + msgToAccumulator(&acc, testProtoBufCongestionRecord1, deviceURL1) acc.Wait(1) vals1 := map[string]interface{}{ @@ -92,7 +92,7 @@ func TestLanzGeneratesMetrics(t *testing.T) { acc.AssertContainsTaggedFields(t, "lanz_congestion_record", vals1, tags1) acc.ClearMetrics() - msgToAccumulator(&acc, testProtoBufCongestionRecord2, deviceUrl2) + msgToAccumulator(&acc, testProtoBufCongestionRecord2, deviceURL2) acc.Wait(1) vals2 := map[string]interface{}{ @@ -117,7 +117,7 @@ func TestLanzGeneratesMetrics(t *testing.T) { acc.AssertContainsTaggedFields(t, "lanz_congestion_record", vals2, tags2) acc.ClearMetrics() - msgToAccumulator(&acc, testProtoBufGlobalBufferUsageRecord, deviceUrl1) + msgToAccumulator(&acc, testProtoBufGlobalBufferUsageRecord, deviceURL1) acc.Wait(1) gburVals1 := map[string]interface{}{ @@ -133,5 +133,4 @@ func TestLanzGeneratesMetrics(t *testing.T) { acc.AssertContainsFields(t, "lanz_global_buffer_usage_record", gburVals1) acc.AssertContainsTaggedFields(t, "lanz_global_buffer_usage_record", gburVals1, gburTags1) - } diff --git a/plugins/inputs/lanz/sample.conf b/plugins/inputs/lanz/sample.conf new file mode 100644 index 0000000000000..9fcc5adb8fcac --- /dev/null +++ b/plugins/inputs/lanz/sample.conf @@ -0,0 +1,7 @@ +# Read metrics off Arista LANZ, via socket +[[inputs.lanz]] + ## URL to Arista LANZ endpoint + servers = [ + "tcp://switch1.int.example.com:50001", + "tcp://switch2.int.example.com:50001", + ] diff --git a/plugins/inputs/leofs/README.md b/plugins/inputs/leofs/README.md index bd028e65ab048..078145519afb1 100644 --- a/plugins/inputs/leofs/README.md +++ b/plugins/inputs/leofs/README.md @@ -1,67 +1,73 @@ # LeoFS Input Plugin -The LeoFS plugin gathers metrics of LeoGateway, LeoManager, and LeoStorage using SNMP. See [LeoFS Documentation / System Administration / System Monitoring](https://leo-project.net/leofs/docs/admin/system_admin/monitoring/). +The LeoFS plugin gathers metrics of LeoGateway, LeoManager, and LeoStorage using +SNMP. See [LeoFS Documentation / System Administration / System +Monitoring](https://leo-project.net/leofs/docs/admin/system_admin/monitoring/). -## Configuration: - -```toml -# Sample Config: +## Configuration +```toml @sample.conf +# Read metrics from a LeoFS Server via SNMP [[inputs.leofs]] - servers = ["127.0.0.1:4010"] + ## An array of URLs of the form: + ## host [ ":" port] + servers = ["127.0.0.1:4010"] ``` -## Measurements & Fields: +## Measurements & Fields + ### Statistics specific to the internals of LeoManager -#### Erlang VM + +#### Erlang VM of LeoManager - 1 min Statistics - - num_of_processes - - total_memory_usage - - system_memory_usage - - processes_memory_usage - - ets_memory_usage - - used_allocated_memory - - allocated_memory + - num_of_processes + - total_memory_usage + - system_memory_usage + - processes_memory_usage + - ets_memory_usage + - used_allocated_memory + - allocated_memory - 5 min Statistics - - num_of_processes_5min - - total_memory_usage_5min - - system_memory_usage_5min - - processes_memory_usage_5min - - ets_memory_usage_5min - - used_allocated_memory_5min - - allocated_memory_5min + - num_of_processes_5min + - total_memory_usage_5min + - system_memory_usage_5min + - processes_memory_usage_5min + - ets_memory_usage_5min + - used_allocated_memory_5min + - allocated_memory_5min ### Statistics specific to the internals of LeoStorage -#### Erlang VM + +### Erlang VM of LeoStorage - 1 min Statistics - - num_of_processes - - total_memory_usage - - system_memory_usage - - processes_memory_usage - - ets_memory_usage - - used_allocated_memory - - allocated_memory + - num_of_processes + - total_memory_usage + - system_memory_usage + - processes_memory_usage + - ets_memory_usage + - used_allocated_memory + - allocated_memory - 5 min Statistics - - num_of_processes_5min - - total_memory_usage_5min - - system_memory_usage_5min - - processes_memory_usage_5min - - ets_memory_usage_5min - - used_allocated_memory_5min - - allocated_memory_5min + - num_of_processes_5min + - total_memory_usage_5min + - system_memory_usage_5min + - processes_memory_usage_5min + - ets_memory_usage_5min + - used_allocated_memory_5min + - allocated_memory_5min -#### Total Number of Requests +### Total Number of Requests for LeoStorage - 1 min Statistics - - num_of_writes - - num_of_reads - - num_of_deletes + - num_of_writes + - num_of_reads + - num_of_deletes - 5 min Statistics - - num_of_writes_5min - - num_of_reads_5min - - num_of_deletes_5min + - num_of_writes_5min + - num_of_reads_5min + - num_of_deletes_5min #### Total Number of Objects and Total Size of Objects @@ -103,35 +109,36 @@ Note: The following items are available since LeoFS v1.4.0: Note: The all items are available since LeoFS v1.4.0. ### Statistics specific to the internals of LeoGateway -#### Erlang VM + +#### Erlang VM of LeoGateway - 1 min Statistics - - num_of_processes - - total_memory_usage - - system_memory_usage - - processes_memory_usage - - ets_memory_usage - - used_allocated_memory - - allocated_memory + - num_of_processes + - total_memory_usage + - system_memory_usage + - processes_memory_usage + - ets_memory_usage + - used_allocated_memory + - allocated_memory - 5 min Statistics - - num_of_processes_5min - - total_memory_usage_5min - - system_memory_usage_5min - - processes_memory_usage_5min - - ets_memory_usage_5min - - used_allocated_memory_5min - - allocated_memory_5min + - num_of_processes_5min + - total_memory_usage_5min + - system_memory_usage_5min + - processes_memory_usage_5min + - ets_memory_usage_5min + - used_allocated_memory_5min + - allocated_memory_5min -#### Total Number of Requests +#### Total Number of Requests for LeoGateway - 1 min Statistics - - num_of_writes - - num_of_reads - - num_of_deletes + - num_of_writes + - num_of_reads + - num_of_deletes - 5 min Statistics - - num_of_writes_5min - - num_of_reads_5min - - num_of_deletes_5min + - num_of_writes_5min + - num_of_reads_5min + - num_of_deletes_5min #### Object Cache @@ -140,15 +147,13 @@ Note: The all items are available since LeoFS v1.4.0. - total_of_files - total_cached_size - -### Tags: +### Tags All measurements have the following tags: - node - -### Example output: +### Example output #### LeoManager @@ -221,7 +226,7 @@ $ ./telegraf --config ./plugins/inputs/leofs/leo_storage.conf --input-filter leo #### LeoGateway -``` +```shell $ ./telegraf --config ./plugins/inputs/leofs/leo_gateway.conf --input-filter leofs --test > leofs, host=gateway_0, node=gateway_0@127.0.0.1 allocated_memory=87941120, diff --git a/plugins/inputs/leofs/leofs.go b/plugins/inputs/leofs/leofs.go index 7e5ae25d4743d..fea0f99af16d1 100644 --- a/plugins/inputs/leofs/leofs.go +++ b/plugins/inputs/leofs/leofs.go @@ -1,7 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package leofs import ( "bufio" + _ "embed" "fmt" "os/exec" "strconv" @@ -14,6 +16,10 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const oid = ".1.3.6.1.4.1.35450" // For Manager Master @@ -146,24 +152,13 @@ var serverTypeMapping = map[string]ServerType{ "4001": ServerTypeGateway, } -var sampleConfig = ` - ## An array of URLs of the form: - ## host [ ":" port] - servers = ["127.0.0.1:4020"] -` - -func (l *LeoFS) SampleConfig() string { +func (*LeoFS) SampleConfig() string { return sampleConfig } -func (l *LeoFS) Description() string { - return "Read metrics from a LeoFS Server via SNMP" -} - func (l *LeoFS) Gather(acc telegraf.Accumulator) error { if len(l.Servers) == 0 { - l.gatherServer(defaultEndpoint, ServerTypeManagerMaster, acc) - return nil + return l.gatherServer(defaultEndpoint, ServerTypeManagerMaster, acc) } var wg sync.WaitGroup for _, endpoint := range l.Servers { @@ -206,7 +201,11 @@ func (l *LeoFS) gatherServer( if err != nil { return err } - cmd.Start() + if err := cmd.Start(); err != nil { + return err + } + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive defer internal.WaitTimeout(cmd, time.Second*5) scanner := bufio.NewScanner(stdout) if !scanner.Scan() { diff --git a/plugins/inputs/leofs/leofs_test.go b/plugins/inputs/leofs/leofs_test.go index f456a998e73a6..b0825b7e46ca0 100644 --- a/plugins/inputs/leofs/leofs_test.go +++ b/plugins/inputs/leofs/leofs_test.go @@ -1,14 +1,13 @@ package leofs import ( - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "io/ioutil" - "log" "os" "os/exec" + "runtime" "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) var fakeSNMP4Manager = ` @@ -124,31 +123,23 @@ func main() { } ` -func makeFakeSNMPSrc(code string) string { - path := os.TempDir() + "/test.go" - err := ioutil.WriteFile(path, []byte(code), 0600) - if err != nil { - log.Fatalln(err) - } - return path -} - -func buildFakeSNMPCmd(src string) { - err := exec.Command("go", "build", "-o", "snmpwalk", src).Run() - if err != nil { - log.Fatalln(err) +func testMain(t *testing.T, code string, endpoint string, serverType ServerType) { + executable := "snmpwalk" + if runtime.GOOS == "windows" { + executable = "snmpwalk.exe" } -} -func testMain(t *testing.T, code string, endpoint string, serverType ServerType) { // Build the fake snmpwalk for test - src := makeFakeSNMPSrc(code) + src := os.TempDir() + "/test.go" + require.NoError(t, os.WriteFile(src, []byte(code), 0600)) defer os.Remove(src) - buildFakeSNMPCmd(src) - defer os.Remove("./snmpwalk") + + require.NoError(t, exec.Command("go", "build", "-o", executable, src).Run()) + defer os.Remove("./" + executable) + envPathOrigin := os.Getenv("PATH") // Refer to the fake snmpwalk - os.Setenv("PATH", ".") + require.NoError(t, os.Setenv("PATH", ".")) defer os.Setenv("PATH", envPathOrigin) l := &LeoFS{ @@ -164,22 +155,38 @@ func testMain(t *testing.T, code string, endpoint string, serverType ServerType) floatMetrics := KeyMapping[serverType] for _, metric := range floatMetrics { - assert.True(t, acc.HasFloatField("leofs", metric), metric) + require.True(t, acc.HasFloatField("leofs", metric), metric) } } -func TestLeoFSManagerMasterMetrics(t *testing.T) { +func TestLeoFSManagerMasterMetricsIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + testMain(t, fakeSNMP4Manager, "localhost:4020", ServerTypeManagerMaster) } -func TestLeoFSManagerSlaveMetrics(t *testing.T) { +func TestLeoFSManagerSlaveMetricsIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + testMain(t, fakeSNMP4Manager, "localhost:4021", ServerTypeManagerSlave) } -func TestLeoFSStorageMetrics(t *testing.T) { +func TestLeoFSStorageMetricsIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + testMain(t, fakeSNMP4Storage, "localhost:4010", ServerTypeStorage) } -func TestLeoFSGatewayMetrics(t *testing.T) { +func TestLeoFSGatewayMetricsIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + testMain(t, fakeSNMP4Gateway, "localhost:4000", ServerTypeGateway) } diff --git a/plugins/inputs/leofs/sample.conf b/plugins/inputs/leofs/sample.conf new file mode 100644 index 0000000000000..1a02688ada8b2 --- /dev/null +++ b/plugins/inputs/leofs/sample.conf @@ -0,0 +1,5 @@ +# Read metrics from a LeoFS Server via SNMP +[[inputs.leofs]] + ## An array of URLs of the form: + ## host [ ":" port] + servers = ["127.0.0.1:4010"] diff --git a/plugins/inputs/linux_sysctl_fs/README.md b/plugins/inputs/linux_sysctl_fs/README.md index d6598e16ff30a..84d79ee73ed7a 100644 --- a/plugins/inputs/linux_sysctl_fs/README.md +++ b/plugins/inputs/linux_sysctl_fs/README.md @@ -1,9 +1,19 @@ # Linux Sysctl FS Input Plugin -The linux_sysctl_fs input provides Linux system level file metrics. The documentation on these fields can be found at https://www.kernel.org/doc/Documentation/sysctl/fs.txt. +The linux_sysctl_fs input provides Linux system level file metrics. The +documentation on these fields can be found at +. Example output: -``` +```shell > linux_sysctl_fs,host=foo dentry-want-pages=0i,file-max=44222i,aio-max-nr=65536i,inode-preshrink-nr=0i,dentry-nr=64340i,dentry-unused-nr=55274i,file-nr=1568i,aio-nr=0i,inode-nr=35952i,inode-free-nr=12957i,dentry-age-limit=45i 1490982022000000000 ``` + +## Configuration + +```toml @sample.conf +# Provides Linux sysctl fs metrics +[[inputs.linux_sysctl_fs]] + # no configuration +``` diff --git a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go index ed24963404fc2..936843935ccc8 100644 --- a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go +++ b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go @@ -1,17 +1,22 @@ +//go:generate ../../../tools/readme_config_includer/generator package linux_sysctl_fs import ( "bytes" - "io/ioutil" + _ "embed" + "errors" "os" - "strconv" - "path" + "strconv" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // https://www.kernel.org/doc/Documentation/sysctl/fs.txt type SysctlFS struct { path string @@ -20,16 +25,13 @@ type SysctlFS struct { var sysctlFSDescription = `Provides Linux sysctl fs metrics` var sysctlFSSampleConfig = `` -func (_ SysctlFS) Description() string { - return sysctlFSDescription -} -func (_ SysctlFS) SampleConfig() string { - return sysctlFSSampleConfig -} - func (sfs *SysctlFS) gatherList(file string, fields map[string]interface{}, fieldNames ...string) error { - bs, err := ioutil.ReadFile(sfs.path + "/" + file) + bs, err := os.ReadFile(sfs.path + "/" + file) if err != nil { + // Ignore non-existing entries + if errors.Is(err, os.ErrNotExist) { + return nil + } return err } @@ -53,8 +55,12 @@ func (sfs *SysctlFS) gatherList(file string, fields map[string]interface{}, fiel } func (sfs *SysctlFS) gatherOne(name string, fields map[string]interface{}) error { - bs, err := ioutil.ReadFile(sfs.path + "/" + name) + bs, err := os.ReadFile(sfs.path + "/" + name) if err != nil { + // Ignore non-existing entries + if errors.Is(err, os.ErrNotExist) { + return nil + } return err } @@ -67,16 +73,31 @@ func (sfs *SysctlFS) gatherOne(name string, fields map[string]interface{}) error return nil } +func (*SysctlFS) SampleConfig() string { + return sampleConfig +} + func (sfs *SysctlFS) Gather(acc telegraf.Accumulator) error { fields := map[string]interface{}{} for _, n := range []string{"aio-nr", "aio-max-nr", "dquot-nr", "dquot-max", "super-nr", "super-max"} { - sfs.gatherOne(n, fields) + if err := sfs.gatherOne(n, fields); err != nil { + return err + } } - sfs.gatherList("inode-state", fields, "inode-nr", "inode-free-nr", "inode-preshrink-nr") - sfs.gatherList("dentry-state", fields, "dentry-nr", "dentry-unused-nr", "dentry-age-limit", "dentry-want-pages") - sfs.gatherList("file-nr", fields, "file-nr", "", "file-max") + err := sfs.gatherList("inode-state", fields, "inode-nr", "inode-free-nr", "inode-preshrink-nr") + if err != nil { + return err + } + err = sfs.gatherList("dentry-state", fields, "dentry-nr", "dentry-unused-nr", "dentry-age-limit", "dentry-want-pages") + if err != nil { + return err + } + err = sfs.gatherList("file-nr", fields, "file-nr", "", "file-max") + if err != nil { + return err + } acc.AddFields("linux_sysctl_fs", fields, nil) return nil @@ -91,7 +112,6 @@ func GetHostProc() string { } func init() { - inputs.Add("linux_sysctl_fs", func() telegraf.Input { return &SysctlFS{ path: path.Join(GetHostProc(), "/sys/fs"), diff --git a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go index 78011e288b962..8928b1de1a41c 100644 --- a/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go +++ b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go @@ -1,7 +1,6 @@ package linux_sysctl_fs import ( - "io/ioutil" "os" "testing" @@ -10,16 +9,14 @@ import ( ) func TestSysctlFSGather(t *testing.T) { - td, err := ioutil.TempDir("", "") - require.NoError(t, err) - defer os.RemoveAll(td) + td := t.TempDir() - require.NoError(t, ioutil.WriteFile(td+"/aio-nr", []byte("100\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/aio-max-nr", []byte("101\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/super-nr", []byte("102\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/super-max", []byte("103\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/file-nr", []byte("104\t0\t106\n"), 0644)) - require.NoError(t, ioutil.WriteFile(td+"/inode-state", []byte("107\t108\t109\t0\t0\t0\t0\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/aio-nr", []byte("100\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/aio-max-nr", []byte("101\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/super-nr", []byte("102\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/super-max", []byte("103\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/file-nr", []byte("104\t0\t106\n"), 0644)) + require.NoError(t, os.WriteFile(td+"/inode-state", []byte("107\t108\t109\t0\t0\t0\t0\n"), 0644)) sfs := &SysctlFS{ path: td, diff --git a/plugins/inputs/linux_sysctl_fs/sample.conf b/plugins/inputs/linux_sysctl_fs/sample.conf new file mode 100644 index 0000000000000..ea0dfeb2ee994 --- /dev/null +++ b/plugins/inputs/linux_sysctl_fs/sample.conf @@ -0,0 +1,3 @@ +# Provides Linux sysctl fs metrics +[[inputs.linux_sysctl_fs]] + # no configuration diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index 0abdba2c972df..5063dd0669cb0 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -1,19 +1,27 @@ # Logparser Input Plugin +**Deprecated in Telegraf 1.15: Please use the [tail][] plugin along with the +[`grok` data format][grok parser]** + The `logparser` plugin streams and parses the given logfiles. Currently it has the capability of parsing "grok" patterns from logfiles, which also supports regex patterns. -**Deprecated in Telegraf 1.15**: Please use the [tail][] plugin along with the [`grok` data format][grok parser]. - The `tail` plugin now provides all the functionality of the `logparser` plugin. Most options can be translated directly to the `tail` plugin: + - For options in the `[inputs.logparser.grok]` section, the equivalent option will have add the `grok_` prefix when using them in the `tail` input. - The grok `measurement` option can be replaced using the standard plugin `name_override` option. +This plugin also supports [metric filtering](CONFIGURATION.md#metric-filtering) +and some [additional common options](CONFIGURATION.md#processor-plugins). + +## Example + Migration Example: + ```diff - [[inputs.logparser]] - files = ["/var/log/apache/access.log"] @@ -38,9 +46,10 @@ Migration Example: + data_format = "grok" ``` -### Configuration +## Configuration -```toml +```toml @sample.conf +# Read metrics off Arista LANZ, via socket [[inputs.logparser]] ## Log files to parse. ## These accept standard unix glob matching rules, but with the addition of @@ -88,17 +97,20 @@ Migration Example: ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones ## 3. UTC -- or blank/unspecified, will return timestamp in UTC # timezone = "Canada/Eastern" + + ## When set to "disable", timestamp will not incremented if there is a + ## duplicate. + # unique_timestamp = "auto" ``` -### Grok Parser +## Grok Parser Reference the [grok parser][] documentation to setup the grok section of the configuration. +## Additional Resources -### Additional Resources - -- https://www.influxdata.com/telegraf-correlate-log-metrics-data-performance-bottlenecks/ +- [tail]: /plugins/inputs/tail/README.md [grok parser]: /plugins/parsers/grok/README.md diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index 4fbd2e90d921c..d297393a9e3d0 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -1,19 +1,29 @@ +//go:generate ../../../tools/readme_config_includer/generator +//go:build !solaris // +build !solaris package logparser import ( + _ "embed" "fmt" "strings" "sync" "github.com/influxdata/tail" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/globpath" + "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/grok" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const ( defaultWatchMethod = "inotify" ) @@ -74,76 +84,17 @@ func NewLogParser() *LogParserPlugin { } } -const sampleConfig = ` - ## Log files to parse. - ## These accept standard unix glob matching rules, but with the addition of - ## ** as a "super asterisk". ie: - ## /var/log/**.log -> recursively find all .log files in /var/log - ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log - ## /var/log/apache.log -> only tail the apache log file - files = ["/var/log/apache/access.log"] - - ## Read files that currently exist from the beginning. Files that are created - ## while telegraf is running (and that match the "files" globs) will always - ## be read from the beginning. - from_beginning = false - - ## Method used to watch for file updates. Can be either "inotify" or "poll". - # watch_method = "inotify" - - ## Parse logstash-style "grok" patterns: - [inputs.logparser.grok] - ## This is a list of patterns to check the given log file(s) for. - ## Note that adding patterns here increases processing time. The most - ## efficient configuration is to have one pattern per logparser. - ## Other common built-in patterns are: - ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) - ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) - patterns = ["%{COMBINED_LOG_FORMAT}"] - - ## Name of the outputted measurement name. - measurement = "apache_access_log" - - ## Full path(s) to custom pattern files. - custom_pattern_files = [] - - ## Custom patterns can also be defined here. Put one pattern per line. - custom_patterns = ''' - ''' - - ## Timezone allows you to provide an override for timestamps that - ## don't already include an offset - ## e.g. 04/06/2016 12:41:45 data one two 5.43µs - ## - ## Default: "" which renders UTC - ## Options are as follows: - ## 1. Local -- interpret based on machine localtime - ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones - ## 3. UTC -- or blank/unspecified, will return timestamp in UTC - # timezone = "Canada/Eastern" - - ## When set to "disable", timestamp will not incremented if there is a - ## duplicate. - # unique_timestamp = "auto" -` - -// SampleConfig returns the sample configuration for the plugin -func (l *LogParserPlugin) SampleConfig() string { +func (*LogParserPlugin) SampleConfig() string { return sampleConfig } -// Description returns the human readable description for the plugin -func (l *LogParserPlugin) Description() string { - return "Stream and parse log file(s)." -} - func (l *LogParserPlugin) Init() error { l.Log.Warnf(`The logparser plugin is deprecated; please use the 'tail' input with the 'grok' data_format`) return nil } // Gather is the primary function to collect the metrics for the plugin -func (l *LogParserPlugin) Gather(acc telegraf.Accumulator) error { +func (l *LogParserPlugin) Gather(_ telegraf.Accumulator) error { l.Lock() defer l.Unlock() @@ -167,22 +118,21 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error { } // Looks for fields which implement LogParser interface - config := &parsers.Config{ - MetricName: mName, - GrokPatterns: l.GrokConfig.Patterns, - GrokNamedPatterns: l.GrokConfig.NamedPatterns, - GrokCustomPatterns: l.GrokConfig.CustomPatterns, - GrokCustomPatternFiles: l.GrokConfig.CustomPatternFiles, - GrokTimezone: l.GrokConfig.Timezone, - GrokUniqueTimestamp: l.GrokConfig.UniqueTimestamp, - DataFormat: "grok", + parser := grok.Parser{ + Measurement: mName, + Patterns: l.GrokConfig.Patterns, + NamedPatterns: l.GrokConfig.NamedPatterns, + CustomPatterns: l.GrokConfig.CustomPatterns, + CustomPatternFiles: l.GrokConfig.CustomPatternFiles, + Timezone: l.GrokConfig.Timezone, + UniqueTimestamp: l.GrokConfig.UniqueTimestamp, } - - var err error - l.GrokParser, err = parsers.NewParser(config) + err := parser.Init() if err != nil { return err } + l.GrokParser = &parser + models.SetLoggerOnPlugin(l.GrokParser, l.Log) l.wg.Add(1) go l.parser() @@ -271,7 +221,6 @@ func (l *LogParserPlugin) receiver(tailer *tail.Tail) { var line *tail.Line for line = range tailer.Lines { - if line.Err != nil { l.Log.Errorf("Error tailing file %s, Error: %s", tailer.Filename, line.Err) @@ -321,7 +270,6 @@ func (l *LogParserPlugin) parser() { } else { l.Log.Errorf("Error parsing log line: %s", err.Error()) } - } } diff --git a/plugins/inputs/logparser/logparser_solaris.go b/plugins/inputs/logparser/logparser_solaris.go index 28afe26772846..da482b97d27be 100644 --- a/plugins/inputs/logparser/logparser_solaris.go +++ b/plugins/inputs/logparser/logparser_solaris.go @@ -1,3 +1,4 @@ +//go:build solaris // +build solaris package logparser diff --git a/plugins/inputs/logparser/logparser_test.go b/plugins/inputs/logparser/logparser_test.go index 142f78d464963..58d74cb09fd4a 100644 --- a/plugins/inputs/logparser/logparser_test.go +++ b/plugins/inputs/logparser/logparser_test.go @@ -1,60 +1,58 @@ package logparser import ( - "io/ioutil" "os" - "runtime" - "strings" + "path/filepath" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" +) + +var ( + testdataDir = getTestdataDir() ) func TestStartNoParsers(t *testing.T) { logparser := &LogParserPlugin{ Log: testutil.Logger{}, FromBeginning: true, - Files: []string{"testdata/*.log"}, + Files: []string{filepath.Join(testdataDir, "*.log")}, } acc := testutil.Accumulator{} - assert.Error(t, logparser.Start(&acc)) + require.NoError(t, logparser.Start(&acc)) } func TestGrokParseLogFilesNonExistPattern(t *testing.T) { - thisdir := getCurrentDir() - logparser := &LogParserPlugin{ Log: testutil.Logger{}, FromBeginning: true, - Files: []string{thisdir + "testdata/*.log"}, + Files: []string{filepath.Join(testdataDir, "*.log")}, GrokConfig: GrokConfig{ Patterns: []string{"%{FOOBAR}"}, - CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, + CustomPatternFiles: []string{filepath.Join(testdataDir, "test-patterns")}, }, } acc := testutil.Accumulator{} err := logparser.Start(&acc) - assert.Error(t, err) + require.Error(t, err) } func TestGrokParseLogFiles(t *testing.T) { - thisdir := getCurrentDir() - logparser := &LogParserPlugin{ Log: testutil.Logger{}, GrokConfig: GrokConfig{ MeasurementName: "logparser_grok", Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}", "%{TEST_LOG_C}"}, - CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, + CustomPatternFiles: []string{filepath.Join(testdataDir, "test-patterns")}, }, FromBeginning: true, - Files: []string{thisdir + "testdata/*.log"}, + Files: []string{filepath.Join(testdataDir, "*.log")}, } acc := testutil.Accumulator{} @@ -68,7 +66,7 @@ func TestGrokParseLogFiles(t *testing.T) { "logparser_grok", map[string]string{ "response_code": "200", - "path": thisdir + "testdata/test_a.log", + "path": filepath.Join(testdataDir, "test_a.log"), }, map[string]interface{}{ "clientip": "192.168.1.1", @@ -81,7 +79,7 @@ func TestGrokParseLogFiles(t *testing.T) { testutil.MustMetric( "logparser_grok", map[string]string{ - "path": thisdir + "testdata/test_b.log", + "path": filepath.Join(testdataDir, "test_b.log"), }, map[string]interface{}{ "myfloat": 1.25, @@ -93,7 +91,7 @@ func TestGrokParseLogFiles(t *testing.T) { testutil.MustMetric( "logparser_grok", map[string]string{ - "path": thisdir + "testdata/test_c.log", + "path": filepath.Join(testdataDir, "test_c.log"), "response_code": "200", }, map[string]interface{}{ @@ -111,30 +109,41 @@ func TestGrokParseLogFiles(t *testing.T) { } func TestGrokParseLogFilesAppearLater(t *testing.T) { - emptydir, err := ioutil.TempDir("", "TestGrokParseLogFilesAppearLater") + // TODO: t.TempDir will fail on Windows because it could not remove + // test.a.log file. This seems like an issue with the tail package, it + // is not closing the os.File properly on Stop. + // === RUN TestGrokParseLogFilesAppearLater + //2022/04/16 11:05:13 D! [] Tail added for file: C:\Users\circleci\AppData\Local\Temp\TestGrokParseLogFilesAppearLater3687440534\001\test_a.log + //2022/04/16 11:05:13 D! [] Tail dropped for file: C:\Users\circleci\AppData\Local\Temp\TestGrokParseLogFilesAppearLater3687440534\001\test_a.log + // testing.go:1090: TempDir RemoveAll cleanup: CreateFile C:\Users\circleci\AppData\Local\Temp\TestGrokParseLogFilesAppearLater3687440534\001: Access is denied. + //--- FAIL: TestGrokParseLogFilesAppearLater (1.68s) + emptydir, err := os.MkdirTemp("", "TestGrokParseLogFilesAppearLater") + require.NoError(t, err) defer os.RemoveAll(emptydir) - assert.NoError(t, err) - - thisdir := getCurrentDir() logparser := &LogParserPlugin{ Log: testutil.Logger{}, FromBeginning: true, - Files: []string{emptydir + "/*.log"}, + Files: []string{filepath.Join(emptydir, "*.log")}, GrokConfig: GrokConfig{ MeasurementName: "logparser_grok", Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, - CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, + CustomPatternFiles: []string{filepath.Join(testdataDir, "test-patterns")}, }, } acc := testutil.Accumulator{} - assert.NoError(t, logparser.Start(&acc)) + require.NoError(t, logparser.Start(&acc)) + + require.Equal(t, acc.NFields(), 0) + + input, err := os.ReadFile(filepath.Join(testdataDir, "test_a.log")) + require.NoError(t, err) - assert.Equal(t, acc.NFields(), 0) + err = os.WriteFile(filepath.Join(emptydir, "test_a.log"), input, 0644) + require.NoError(t, err) - _ = os.Symlink(thisdir+"testdata/test_a.log", emptydir+"/test_a.log") - assert.NoError(t, acc.GatherError(logparser.Gather)) + require.NoError(t, acc.GatherError(logparser.Gather)) acc.Wait(1) logparser.Stop() @@ -148,29 +157,27 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) { }, map[string]string{ "response_code": "200", - "path": emptydir + "/test_a.log", + "path": filepath.Join(emptydir, "test_a.log"), }) } // Test that test_a.log line gets parsed even though we don't have the correct // pattern available for test_b.log func TestGrokParseLogFilesOneBad(t *testing.T) { - thisdir := getCurrentDir() - logparser := &LogParserPlugin{ Log: testutil.Logger{}, FromBeginning: true, - Files: []string{thisdir + "testdata/test_a.log"}, + Files: []string{filepath.Join(testdataDir, "test_a.log")}, GrokConfig: GrokConfig{ MeasurementName: "logparser_grok", Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_BAD}"}, - CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, + CustomPatternFiles: []string{filepath.Join(testdataDir, "test-patterns")}, }, } acc := testutil.Accumulator{} acc.SetDebug(true) - assert.NoError(t, logparser.Start(&acc)) + require.NoError(t, logparser.Start(&acc)) acc.Wait(1) logparser.Stop() @@ -184,27 +191,25 @@ func TestGrokParseLogFilesOneBad(t *testing.T) { }, map[string]string{ "response_code": "200", - "path": thisdir + "testdata/test_a.log", + "path": filepath.Join(testdataDir, "test_a.log"), }) } func TestGrokParseLogFiles_TimestampInEpochMilli(t *testing.T) { - thisdir := getCurrentDir() - logparser := &LogParserPlugin{ Log: testutil.Logger{}, GrokConfig: GrokConfig{ MeasurementName: "logparser_grok", Patterns: []string{"%{TEST_LOG_C}"}, - CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, + CustomPatternFiles: []string{filepath.Join(testdataDir, "test-patterns")}, }, FromBeginning: true, - Files: []string{thisdir + "testdata/test_c.log"}, + Files: []string{filepath.Join(testdataDir, "test_c.log")}, } acc := testutil.Accumulator{} acc.SetDebug(true) - assert.NoError(t, logparser.Start(&acc)) + require.NoError(t, logparser.Start(&acc)) acc.Wait(1) logparser.Stop() @@ -218,11 +223,16 @@ func TestGrokParseLogFiles_TimestampInEpochMilli(t *testing.T) { }, map[string]string{ "response_code": "200", - "path": thisdir + "testdata/test_c.log", + "path": filepath.Join(testdataDir, "test_c.log"), }) } -func getCurrentDir() string { - _, filename, _, _ := runtime.Caller(1) - return strings.Replace(filename, "logparser_test.go", "", 1) +func getTestdataDir() string { + dir, err := os.Getwd() + if err != nil { + // if we cannot even establish the test directory, further progress is meaningless + panic(err) + } + + return filepath.Join(dir, "testdata") } diff --git a/plugins/inputs/logparser/sample.conf b/plugins/inputs/logparser/sample.conf new file mode 100644 index 0000000000000..7ac3ac93ea113 --- /dev/null +++ b/plugins/inputs/logparser/sample.conf @@ -0,0 +1,52 @@ +# Read metrics off Arista LANZ, via socket +[[inputs.logparser]] + ## Log files to parse. + ## These accept standard unix glob matching rules, but with the addition of + ## ** as a "super asterisk". ie: + ## /var/log/**.log -> recursively find all .log files in /var/log + ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log + ## /var/log/apache.log -> only tail the apache log file + files = ["/var/log/apache/access.log"] + + ## Read files that currently exist from the beginning. Files that are created + ## while telegraf is running (and that match the "files" globs) will always + ## be read from the beginning. + from_beginning = false + + ## Method used to watch for file updates. Can be either "inotify" or "poll". + # watch_method = "inotify" + + ## Parse logstash-style "grok" patterns: + [inputs.logparser.grok] + ## This is a list of patterns to check the given log file(s) for. + ## Note that adding patterns here increases processing time. The most + ## efficient configuration is to have one pattern per logparser. + ## Other common built-in patterns are: + ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) + ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) + patterns = ["%{COMBINED_LOG_FORMAT}"] + + ## Name of the outputted measurement name. + measurement = "apache_access_log" + + ## Full path(s) to custom pattern files. + custom_pattern_files = [] + + ## Custom patterns can also be defined here. Put one pattern per line. + custom_patterns = ''' + ''' + + ## Timezone allows you to provide an override for timestamps that + ## don't already include an offset + ## e.g. 04/06/2016 12:41:45 data one two 5.43µs + ## + ## Default: "" which renders UTC + ## Options are as follows: + ## 1. Local -- interpret based on machine localtime + ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + ## 3. UTC -- or blank/unspecified, will return timestamp in UTC + # timezone = "Canada/Eastern" + + ## When set to "disable", timestamp will not incremented if there is a + ## duplicate. + # unique_timestamp = "auto" diff --git a/plugins/inputs/logstash/README.md b/plugins/inputs/logstash/README.md index 9571de5fd8873..15963430050b3 100644 --- a/plugins/inputs/logstash/README.md +++ b/plugins/inputs/logstash/README.md @@ -1,13 +1,14 @@ # Logstash Input Plugin -This plugin reads metrics exposed by -[Logstash Monitoring API](https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html). +This plugin reads metrics exposed by [Logstash Monitoring +API](https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html). Logstash 5 and later is supported. -### Configuration +## Configuration -```toml +```toml @sample.conf +# Read metrics exposed by Logstash [[inputs.logstash]] ## The URL of the exposed Logstash API endpoint. url = "http://127.0.0.1:9600" @@ -40,7 +41,10 @@ Logstash 5 and later is supported. # "X-Special-Header" = "Special-Value" ``` -### Metrics +## Metrics + +Additional plugin stats may be collected (because logstash doesn't consistently +expose all stats) - logstash_jvm - tags: @@ -78,7 +82,7 @@ Logstash 5 and later is supported. - gc_collectors_young_collection_count - uptime_in_millis -+ logstash_process +- logstash_process - tags: - node_id - node_name @@ -110,7 +114,7 @@ Logstash 5 and later is supported. - filtered - out -+ logstash_plugins +- logstash_plugins - tags: - node_id - node_name @@ -125,6 +129,10 @@ Logstash 5 and later is supported. - duration_in_millis - in - out + - bulk_requests_failures (for Logstash 7+) + - bulk_requests_with_errors (for Logstash 7+) + - documents_successes (for logstash 7+) + - documents_retryable_failures (for logstash 7+) - logstash_queue - tags: @@ -142,9 +150,9 @@ Logstash 5 and later is supported. - page_capacity_in_bytes - queue_size_in_bytes -### Example Output +## Example Output -``` +```shell logstash_jvm,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,source=debian-stretch-logstash6.virt gc_collectors_old_collection_count=2,gc_collectors_old_collection_time_in_millis=100,gc_collectors_young_collection_count=26,gc_collectors_young_collection_time_in_millis=1028,mem_heap_committed_in_bytes=1056309248,mem_heap_max_in_bytes=1056309248,mem_heap_used_in_bytes=207216328,mem_heap_used_percent=19,mem_non_heap_committed_in_bytes=160878592,mem_non_heap_used_in_bytes=140838184,mem_pools_old_committed_in_bytes=899284992,mem_pools_old_max_in_bytes=899284992,mem_pools_old_peak_max_in_bytes=899284992,mem_pools_old_peak_used_in_bytes=189468088,mem_pools_old_used_in_bytes=189468088,mem_pools_survivor_committed_in_bytes=17432576,mem_pools_survivor_max_in_bytes=17432576,mem_pools_survivor_peak_max_in_bytes=17432576,mem_pools_survivor_peak_used_in_bytes=17432576,mem_pools_survivor_used_in_bytes=12572640,mem_pools_young_committed_in_bytes=139591680,mem_pools_young_max_in_bytes=139591680,mem_pools_young_peak_max_in_bytes=139591680,mem_pools_young_peak_used_in_bytes=139591680,mem_pools_young_used_in_bytes=5175600,threads_count=20,threads_peak_count=24,uptime_in_millis=739089 1566425244000000000 logstash_process,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,source=debian-stretch-logstash6.virt cpu_load_average_15m=0.03,cpu_load_average_1m=0.01,cpu_load_average_5m=0.04,cpu_percent=0,cpu_total_in_millis=83230,max_file_descriptors=16384,mem_total_virtual_in_bytes=3689132032,open_file_descriptors=118,peak_open_file_descriptors=118 1566425244000000000 logstash_events,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,pipeline=main,source=debian-stretch-logstash6.virt duration_in_millis=0,filtered=0,in=0,out=0,queue_push_duration_in_millis=0 1566425244000000000 diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go index e360ba032ff35..98b94f6783d15 100644 --- a/plugins/inputs/logstash/logstash.go +++ b/plugins/inputs/logstash/logstash.go @@ -1,54 +1,27 @@ +//go:generate ../../../tools/readme_config_includer/generator package logstash import ( + _ "embed" "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/choice" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" jsonParser "github.com/influxdata/telegraf/plugins/parsers/json" ) -const sampleConfig = ` - ## The URL of the exposed Logstash API endpoint. - url = "http://127.0.0.1:9600" - - ## Use Logstash 5 single pipeline API, set to true when monitoring - ## Logstash 5. - # single_pipeline = false - - ## Enable optional collection components. Can contain - ## "pipelines", "process", and "jvm". - # collect = ["pipelines", "process", "jvm"] - - ## Timeout for HTTP requests. - # timeout = "5s" - - ## Optional HTTP Basic Auth credentials. - # username = "username" - # password = "pa$$word" - - ## Optional TLS Config. - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - - ## Use TLS but skip chain & host verification. - # insecure_skip_verify = false - - ## Optional HTTP headers. - # [inputs.logstash.headers] - # "X-Special-Header" = "Special-Value" -` +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string type Logstash struct { URL string `toml:"url"` @@ -59,7 +32,7 @@ type Logstash struct { Username string `toml:"username"` Password string `toml:"password"` Headers map[string]string `toml:"headers"` - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` tls.ClientConfig client *http.Client @@ -72,20 +45,10 @@ func NewLogstash() *Logstash { SinglePipeline: false, Collect: []string{"pipelines", "process", "jvm"}, Headers: make(map[string]string), - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } } -// Description returns short info about plugin -func (logstash *Logstash) Description() string { - return "Read metrics exposed by Logstash" -} - -// SampleConfig returns details how to configure plugin -func (logstash *Logstash) SampleConfig() string { - return sampleConfig -} - type ProcessStats struct { ID string `json:"id"` Process interface{} `json:"process"` @@ -126,9 +89,11 @@ type Pipeline struct { } type Plugin struct { - ID string `json:"id"` - Events interface{} `json:"events"` - Name string `json:"name"` + ID string `json:"id"` + Events interface{} `json:"events"` + Name string `json:"name"` + BulkRequests map[string]interface{} `json:"bulk_requests"` + Documents map[string]interface{} `json:"documents"` } type PipelinePlugins struct { @@ -138,10 +103,13 @@ type PipelinePlugins struct { } type PipelineQueue struct { - Events float64 `json:"events"` - Type string `json:"type"` - Capacity interface{} `json:"capacity"` - Data interface{} `json:"data"` + Events float64 `json:"events"` + EventsCount *float64 `json:"events_count"` + Type string `json:"type"` + Capacity interface{} `json:"capacity"` + Data interface{} `json:"data"` + QueueSizeInBytes *float64 `json:"queue_size_in_bytes"` + MaxQueueSizeInBytes *float64 `json:"max_queue_size_in_bytes"` } const jvmStats = "/_node/stats/jvm" @@ -149,16 +117,20 @@ const processStats = "/_node/stats/process" const pipelinesStats = "/_node/stats/pipelines" const pipelineStats = "/_node/stats/pipeline" -func (i *Logstash) Init() error { - err := choice.CheckSlice(i.Collect, []string{"pipelines", "process", "jvm"}) +func (*Logstash) SampleConfig() string { + return sampleConfig +} + +func (logstash *Logstash) Init() error { + err := choice.CheckSlice(logstash.Collect, []string{"pipelines", "process", "jvm"}) if err != nil { return fmt.Errorf(`cannot verify "collect" setting: %v`, err) } return nil } -// createHttpClient create a clients to access API -func (logstash *Logstash) createHttpClient() (*http.Client, error) { +// createHTTPClient create a clients to access API +func (logstash *Logstash) createHTTPClient() (*http.Client, error) { tlsConfig, err := logstash.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -168,15 +140,15 @@ func (logstash *Logstash) createHttpClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, - Timeout: logstash.Timeout.Duration, + Timeout: time.Duration(logstash.Timeout), } return client, nil } -// gatherJsonData query the data source and parse the response JSON -func (logstash *Logstash) gatherJsonData(url string, value interface{}) error { - request, err := http.NewRequest("GET", url, nil) +// gatherJSONData query the data source and parse the response JSON +func (logstash *Logstash) gatherJSONData(address string, value interface{}) error { + request, err := http.NewRequest("GET", address, nil) if err != nil { return err } @@ -201,8 +173,8 @@ func (logstash *Logstash) gatherJsonData(url string, value interface{}) error { defer response.Body.Close() if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) - return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) + body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) + return fmt.Errorf("%s returned HTTP status %s: %q", address, response.Status, body) } err = json.NewDecoder(response.Body).Decode(value) @@ -214,10 +186,10 @@ func (logstash *Logstash) gatherJsonData(url string, value interface{}) error { } // gatherJVMStats gather the JVM metrics and add results to the accumulator -func (logstash *Logstash) gatherJVMStats(url string, accumulator telegraf.Accumulator) error { +func (logstash *Logstash) gatherJVMStats(address string, accumulator telegraf.Accumulator) error { jvmStats := &JVMStats{} - err := logstash.gatherJsonData(url, jvmStats) + err := logstash.gatherJSONData(address, jvmStats) if err != nil { return err } @@ -240,10 +212,10 @@ func (logstash *Logstash) gatherJVMStats(url string, accumulator telegraf.Accumu } // gatherJVMStats gather the Process metrics and add results to the accumulator -func (logstash *Logstash) gatherProcessStats(url string, accumulator telegraf.Accumulator) error { +func (logstash *Logstash) gatherProcessStats(address string, accumulator telegraf.Accumulator) error { processStats := &ProcessStats{} - err := logstash.gatherJsonData(url, processStats) + err := logstash.gatherJSONData(address, processStats) if err != nil { return err } @@ -270,8 +242,8 @@ func (logstash *Logstash) gatherPluginsStats( plugins []Plugin, pluginType string, tags map[string]string, - accumulator telegraf.Accumulator) error { - + accumulator telegraf.Accumulator, +) error { for _, plugin := range plugins { pluginTags := map[string]string{ "plugin_name": plugin.Name, @@ -287,6 +259,63 @@ func (logstash *Logstash) gatherPluginsStats( return err } accumulator.AddFields("logstash_plugins", flattener.Fields, pluginTags) + /* + The elasticsearch output produces additional stats around + bulk requests and document writes (that are elasticsearch specific). + Collect those here + */ + if pluginType == "output" && plugin.Name == "elasticsearch" { + /* + The "bulk_requests" section has details about batch writes + into Elasticsearch + + "bulk_requests" : { + "successes" : 2870, + "responses" : { + "200" : 2870 + }, + "failures": 262, + "with_errors": 9089 + }, + */ + flattener := jsonParser.JSONFlattener{} + err := flattener.FlattenJSON("", plugin.BulkRequests) + if err != nil { + return err + } + for k, v := range flattener.Fields { + if strings.HasPrefix(k, "bulk_requests") { + continue + } + newKey := fmt.Sprintf("bulk_requests_%s", k) + flattener.Fields[newKey] = v + delete(flattener.Fields, k) + } + accumulator.AddFields("logstash_plugins", flattener.Fields, pluginTags) + + /* + The "documents" section has counts of individual documents + written/retried/etc. + "documents" : { + "successes" : 2665549, + "retryable_failures": 13733 + } + */ + flattener = jsonParser.JSONFlattener{} + err = flattener.FlattenJSON("", plugin.Documents) + if err != nil { + return err + } + for k, v := range flattener.Fields { + if strings.HasPrefix(k, "documents") { + continue + } + newKey := fmt.Sprintf("documents_%s", k) + flattener.Fields[newKey] = v + delete(flattener.Fields, k) + } + accumulator.AddFields("logstash_plugins", flattener.Fields, pluginTags) + } } return nil @@ -295,9 +324,8 @@ func (logstash *Logstash) gatherPluginsStats( func (logstash *Logstash) gatherQueueStats( queue *PipelineQueue, tags map[string]string, - accumulator telegraf.Accumulator) error { - - var err error + accumulator telegraf.Accumulator, +) error { queueTags := map[string]string{ "queue_type": queue.Type, } @@ -305,13 +333,18 @@ func (logstash *Logstash) gatherQueueStats( queueTags[tag] = value } + events := queue.Events + if queue.EventsCount != nil { + events = *queue.EventsCount + } + queueFields := map[string]interface{}{ - "events": queue.Events, + "events": events, } if queue.Type != "memory" { flattener := jsonParser.JSONFlattener{} - err = flattener.FlattenJSON("", queue.Capacity) + err := flattener.FlattenJSON("", queue.Capacity) if err != nil { return err } @@ -322,6 +355,14 @@ func (logstash *Logstash) gatherQueueStats( for field, value := range flattener.Fields { queueFields[field] = value } + + if queue.MaxQueueSizeInBytes != nil { + queueFields["max_queue_size_in_bytes"] = *queue.MaxQueueSizeInBytes + } + + if queue.QueueSizeInBytes != nil { + queueFields["queue_size_in_bytes"] = *queue.QueueSizeInBytes + } } accumulator.AddFields("logstash_queue", queueFields, queueTags) @@ -330,10 +371,10 @@ func (logstash *Logstash) gatherQueueStats( } // gatherJVMStats gather the Pipeline metrics and add results to the accumulator (for Logstash < 6) -func (logstash *Logstash) gatherPipelineStats(url string, accumulator telegraf.Accumulator) error { +func (logstash *Logstash) gatherPipelineStats(address string, accumulator telegraf.Accumulator) error { pipelineStats := &PipelineStats{} - err := logstash.gatherJsonData(url, pipelineStats) + err := logstash.gatherJSONData(address, pipelineStats) if err != nil { return err } @@ -374,10 +415,10 @@ func (logstash *Logstash) gatherPipelineStats(url string, accumulator telegraf.A } // gatherJVMStats gather the Pipelines metrics and add results to the accumulator (for Logstash >= 6) -func (logstash *Logstash) gatherPipelinesStats(url string, accumulator telegraf.Accumulator) error { +func (logstash *Logstash) gatherPipelinesStats(address string, accumulator telegraf.Accumulator) error { pipelinesStats := &PipelinesStats{} - err := logstash.gatherJsonData(url, pipelinesStats) + err := logstash.gatherJSONData(address, pipelinesStats) if err != nil { return err } @@ -423,7 +464,7 @@ func (logstash *Logstash) gatherPipelinesStats(url string, accumulator telegraf. // Gather ask this plugin to start gathering metrics func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error { if logstash.client == nil { - client, err := logstash.createHttpClient() + client, err := logstash.createHTTPClient() if err != nil { return err @@ -432,40 +473,40 @@ func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error { } if choice.Contains("jvm", logstash.Collect) { - jvmUrl, err := url.Parse(logstash.URL + jvmStats) + jvmURL, err := url.Parse(logstash.URL + jvmStats) if err != nil { return err } - if err := logstash.gatherJVMStats(jvmUrl.String(), accumulator); err != nil { + if err := logstash.gatherJVMStats(jvmURL.String(), accumulator); err != nil { return err } } if choice.Contains("process", logstash.Collect) { - processUrl, err := url.Parse(logstash.URL + processStats) + processURL, err := url.Parse(logstash.URL + processStats) if err != nil { return err } - if err := logstash.gatherProcessStats(processUrl.String(), accumulator); err != nil { + if err := logstash.gatherProcessStats(processURL.String(), accumulator); err != nil { return err } } if choice.Contains("pipelines", logstash.Collect) { if logstash.SinglePipeline { - pipelineUrl, err := url.Parse(logstash.URL + pipelineStats) + pipelineURL, err := url.Parse(logstash.URL + pipelineStats) if err != nil { return err } - if err := logstash.gatherPipelineStats(pipelineUrl.String(), accumulator); err != nil { + if err := logstash.gatherPipelineStats(pipelineURL.String(), accumulator); err != nil { return err } } else { - pipelinesUrl, err := url.Parse(logstash.URL + pipelinesStats) + pipelinesURL, err := url.Parse(logstash.URL + pipelinesStats) if err != nil { return err } - if err := logstash.gatherPipelinesStats(pipelinesUrl.String(), accumulator); err != nil { + if err := logstash.gatherPipelinesStats(pipelinesURL.String(), accumulator); err != nil { return err } } diff --git a/plugins/inputs/logstash/logstash_test.go b/plugins/inputs/logstash/logstash_test.go index aeb4e46f8dbb6..6059d4eac9b1b 100644 --- a/plugins/inputs/logstash/logstash_test.go +++ b/plugins/inputs/logstash/logstash_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) var logstashTest = NewLogstash() @@ -16,6 +17,7 @@ var logstashTest = NewLogstash() var ( logstash5accPipelineStats testutil.Accumulator logstash6accPipelinesStats testutil.Accumulator + logstash7accPipelinesStats testutil.Accumulator logstash5accProcessStats testutil.Accumulator logstash6accProcessStats testutil.Accumulator logstash5accJVMStats testutil.Accumulator @@ -25,28 +27,24 @@ var ( func Test_Logstash5GatherProcessStats(test *testing.T) { fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { writer.Header().Set("Content-Type", "application/json") - fmt.Fprintf(writer, "%s", string(logstash5ProcessJSON)) + _, err := fmt.Fprintf(writer, "%s", string(logstash5ProcessJSON)) + require.NoError(test, err) })) requestURL, err := url.Parse(logstashTest.URL) - if err != nil { - test.Logf("Can't connect to: %s", logstashTest.URL) - } - fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + require.NoErrorf(test, err, "Can't connect to: %s", logstashTest.URL) + fakeServer.Listener, err = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + require.NoError(test, err) fakeServer.Start() defer fakeServer.Close() if logstashTest.client == nil { - client, err := logstashTest.createHttpClient() - - if err != nil { - test.Logf("Can't createHttpClient") - } + client, err := logstashTest.createHTTPClient() + require.NoError(test, err, "Can't createHTTPClient") logstashTest.client = client } - if err := logstashTest.gatherProcessStats(logstashTest.URL+processStats, &logstash5accProcessStats); err != nil { - test.Logf("Can't gather Process stats") - } + err = logstashTest.gatherProcessStats(logstashTest.URL+processStats, &logstash5accProcessStats) + require.NoError(test, err, "Can't gather Process stats") logstash5accProcessStats.AssertContainsTaggedFields( test, @@ -74,28 +72,24 @@ func Test_Logstash5GatherProcessStats(test *testing.T) { func Test_Logstash6GatherProcessStats(test *testing.T) { fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { writer.Header().Set("Content-Type", "application/json") - fmt.Fprintf(writer, "%s", string(logstash6ProcessJSON)) + _, err := fmt.Fprintf(writer, "%s", string(logstash6ProcessJSON)) + require.NoError(test, err) })) requestURL, err := url.Parse(logstashTest.URL) - if err != nil { - test.Logf("Can't connect to: %s", logstashTest.URL) - } - fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + require.NoErrorf(test, err, "Can't connect to: %s", logstashTest.URL) + fakeServer.Listener, err = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + require.NoError(test, err) fakeServer.Start() defer fakeServer.Close() if logstashTest.client == nil { - client, err := logstashTest.createHttpClient() - - if err != nil { - test.Logf("Can't createHttpClient") - } + client, err := logstashTest.createHTTPClient() + require.NoError(test, err, "Can't createHTTPClient") logstashTest.client = client } - if err := logstashTest.gatherProcessStats(logstashTest.URL+processStats, &logstash6accProcessStats); err != nil { - test.Logf("Can't gather Process stats") - } + err = logstashTest.gatherProcessStats(logstashTest.URL+processStats, &logstash6accProcessStats) + require.NoError(test, err, "Can't gather Process stats") logstash6accProcessStats.AssertContainsTaggedFields( test, @@ -124,28 +118,24 @@ func Test_Logstash5GatherPipelineStats(test *testing.T) { //logstash5accPipelineStats.SetDebug(true) fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { writer.Header().Set("Content-Type", "application/json") - fmt.Fprintf(writer, "%s", string(logstash5PipelineJSON)) + _, err := fmt.Fprintf(writer, "%s", string(logstash5PipelineJSON)) + require.NoError(test, err) })) requestURL, err := url.Parse(logstashTest.URL) - if err != nil { - test.Logf("Can't connect to: %s", logstashTest.URL) - } - fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + require.NoErrorf(test, err, "Can't connect to: %s", logstashTest.URL) + fakeServer.Listener, err = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + require.NoError(test, err) fakeServer.Start() defer fakeServer.Close() if logstashTest.client == nil { - client, err := logstashTest.createHttpClient() - - if err != nil { - test.Logf("Can't createHttpClient") - } + client, err := logstashTest.createHTTPClient() + require.NoError(test, err, "Can't createHTTPClient") logstashTest.client = client } - if err := logstashTest.gatherPipelineStats(logstashTest.URL+pipelineStats, &logstash5accPipelineStats); err != nil { - test.Logf("Can't gather Pipeline stats") - } + err = logstashTest.gatherPipelineStats(logstashTest.URL+pipelineStats, &logstash5accPipelineStats) + require.NoError(test, err, "Can't gather Pipeline stats") logstash5accPipelineStats.AssertContainsTaggedFields( test, @@ -226,28 +216,24 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { //logstash6accPipelinesStats.SetDebug(true) fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { writer.Header().Set("Content-Type", "application/json") - fmt.Fprintf(writer, "%s", string(logstash6PipelinesJSON)) + _, err := fmt.Fprintf(writer, "%s", string(logstash6PipelinesJSON)) + require.NoError(test, err) })) requestURL, err := url.Parse(logstashTest.URL) - if err != nil { - test.Logf("Can't connect to: %s", logstashTest.URL) - } - fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + require.NoErrorf(test, err, "Can't connect to: %s", logstashTest.URL) + fakeServer.Listener, err = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + require.NoError(test, err) fakeServer.Start() defer fakeServer.Close() if logstashTest.client == nil { - client, err := logstashTest.createHttpClient() - - if err != nil { - test.Logf("Can't createHttpClient") - } + client, err := logstashTest.createHTTPClient() + require.NoError(test, err, "Can't createHTTPClient") logstashTest.client = client } - if err := logstashTest.gatherPipelinesStats(logstashTest.URL+pipelineStats, &logstash6accPipelinesStats); err != nil { - test.Logf("Can't gather Pipeline stats") - } + err = logstashTest.gatherPipelinesStats(logstashTest.URL+pipelineStats, &logstash6accPipelinesStats) + require.NoError(test, err, "Can't gather Pipeline stats") fields := make(map[string]interface{}) fields["duration_in_millis"] = float64(8540751.0) @@ -549,34 +535,29 @@ func Test_Logstash6GatherPipelinesStats(test *testing.T) { "queue_type": string("persisted"), }, ) - } func Test_Logstash5GatherJVMStats(test *testing.T) { fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { writer.Header().Set("Content-Type", "application/json") - fmt.Fprintf(writer, "%s", string(logstash5JvmJSON)) + _, err := fmt.Fprintf(writer, "%s", string(logstash5JvmJSON)) + require.NoError(test, err) })) requestURL, err := url.Parse(logstashTest.URL) - if err != nil { - test.Logf("Can't connect to: %s", logstashTest.URL) - } - fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + require.NoErrorf(test, err, "Can't connect to: %s", logstashTest.URL) + fakeServer.Listener, err = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + require.NoError(test, err) fakeServer.Start() defer fakeServer.Close() if logstashTest.client == nil { - client, err := logstashTest.createHttpClient() - - if err != nil { - test.Logf("Can't createHttpClient") - } + client, err := logstashTest.createHTTPClient() + require.NoError(test, err, "Can't createHTTPClient") logstashTest.client = client } - if err := logstashTest.gatherJVMStats(logstashTest.URL+jvmStats, &logstash5accJVMStats); err != nil { - test.Logf("Can't gather JVM stats") - } + err = logstashTest.gatherJVMStats(logstashTest.URL+jvmStats, &logstash5accJVMStats) + require.NoError(test, err, "Can't gather JVM stats") logstash5accJVMStats.AssertContainsTaggedFields( test, @@ -618,34 +599,29 @@ func Test_Logstash5GatherJVMStats(test *testing.T) { "node_version": string("5.3.0"), }, ) - } func Test_Logstash6GatherJVMStats(test *testing.T) { fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { writer.Header().Set("Content-Type", "application/json") - fmt.Fprintf(writer, "%s", string(logstash6JvmJSON)) + _, err := fmt.Fprintf(writer, "%s", string(logstash6JvmJSON)) + require.NoError(test, err) })) requestURL, err := url.Parse(logstashTest.URL) - if err != nil { - test.Logf("Can't connect to: %s", logstashTest.URL) - } - fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + require.NoErrorf(test, err, "Can't connect to: %s", logstashTest.URL) + fakeServer.Listener, err = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + require.NoError(test, err) fakeServer.Start() defer fakeServer.Close() if logstashTest.client == nil { - client, err := logstashTest.createHttpClient() - - if err != nil { - test.Logf("Can't createHttpClient") - } + client, err := logstashTest.createHTTPClient() + require.NoError(test, err, "Can't createHTTPClient") logstashTest.client = client } - if err := logstashTest.gatherJVMStats(logstashTest.URL+jvmStats, &logstash6accJVMStats); err != nil { - test.Logf("Can't gather JVM stats") - } + err = logstashTest.gatherJVMStats(logstashTest.URL+jvmStats, &logstash6accJVMStats) + require.NoError(test, err, "Can't gather JVM stats") logstash6accJVMStats.AssertContainsTaggedFields( test, @@ -687,5 +663,131 @@ func Test_Logstash6GatherJVMStats(test *testing.T) { "node_version": string("6.4.2"), }, ) +} + +func Test_Logstash7GatherPipelinesQueueStats(test *testing.T) { + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + writer.Header().Set("Content-Type", "application/json") + _, err := fmt.Fprintf(writer, "%s", string(logstash7PipelinesJSON)) + if err != nil { + test.Logf("Can't print test json") + } + })) + requestURL, err := url.Parse(logstashTest.URL) + if err != nil { + test.Logf("Can't connect to: %s", logstashTest.URL) + } + fakeServer.Listener, err = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + require.NoError(test, err) + fakeServer.Start() + defer fakeServer.Close() + + if logstashTest.client == nil { + client, err := logstashTest.createHTTPClient() + + if err != nil { + test.Logf("Can't createHTTPClient") + } + logstashTest.client = client + } + + if err := logstashTest.gatherPipelinesStats(logstashTest.URL+pipelineStats, &logstash7accPipelinesStats); err != nil { + test.Logf("Can't gather Pipeline stats") + } + + fields := make(map[string]interface{}) + fields["duration_in_millis"] = float64(3032875.0) + fields["queue_push_duration_in_millis"] = float64(13300.0) + fields["in"] = float64(2665549.0) + fields["filtered"] = float64(2665549.0) + fields["out"] = float64(2665549.0) + + logstash7accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_events", + fields, + map[string]string{ + "node_id": string("28580380-ad2c-4032-934b-76359125edca"), + "node_name": string("HOST01.local"), + "source": string("HOST01.local"), + "node_version": string("7.4.2"), + "pipeline": string("infra"), + }, + ) + logstash7accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(2802177.0), + "in": float64(2665549.0), + "out": float64(2665549.0), + }, + map[string]string{ + "node_id": string("28580380-ad2c-4032-934b-76359125edca"), + "node_name": string("HOST01.local"), + "source": string("HOST01.local"), + "node_version": string("7.4.2"), + "pipeline": string("infra"), + "plugin_name": string("elasticsearch"), + "plugin_id": string("38967f09bbd2647a95aa00702b6b557bdbbab31da6a04f991d38abe5629779e3"), + "plugin_type": string("output"), + }, + ) + logstash7accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "bulk_requests_successes": float64(2870), + "bulk_requests_responses_200": float64(2870), + "bulk_requests_failures": float64(262), + "bulk_requests_with_errors": float64(9089), + }, + map[string]string{ + "node_id": string("28580380-ad2c-4032-934b-76359125edca"), + "node_name": string("HOST01.local"), + "source": string("HOST01.local"), + "node_version": string("7.4.2"), + "pipeline": string("infra"), + "plugin_name": string("elasticsearch"), + "plugin_id": string("38967f09bbd2647a95aa00702b6b557bdbbab31da6a04f991d38abe5629779e3"), + "plugin_type": string("output"), + }, + ) + logstash7accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "documents_successes": float64(2665549), + "documents_retryable_failures": float64(13733), + }, + map[string]string{ + "node_id": string("28580380-ad2c-4032-934b-76359125edca"), + "node_name": string("HOST01.local"), + "source": string("HOST01.local"), + "node_version": string("7.4.2"), + "pipeline": string("infra"), + "plugin_name": string("elasticsearch"), + "plugin_id": string("38967f09bbd2647a95aa00702b6b557bdbbab31da6a04f991d38abe5629779e3"), + "plugin_type": string("output"), + }, + ) + + logstash7accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_queue", + map[string]interface{}{ + "events": float64(0), + "max_queue_size_in_bytes": float64(4294967296), + "queue_size_in_bytes": float64(32028566), + }, + map[string]string{ + "node_id": string("28580380-ad2c-4032-934b-76359125edca"), + "node_name": string("HOST01.local"), + "source": string("HOST01.local"), + "node_version": string("7.4.2"), + "pipeline": string("infra"), + "queue_type": string("persisted"), + }, + ) } diff --git a/plugins/inputs/logstash/sample.conf b/plugins/inputs/logstash/sample.conf new file mode 100644 index 0000000000000..ed62dce13e4ad --- /dev/null +++ b/plugins/inputs/logstash/sample.conf @@ -0,0 +1,31 @@ +# Read metrics exposed by Logstash +[[inputs.logstash]] + ## The URL of the exposed Logstash API endpoint. + url = "http://127.0.0.1:9600" + + ## Use Logstash 5 single pipeline API, set to true when monitoring + ## Logstash 5. + # single_pipeline = false + + ## Enable optional collection components. Can contain + ## "pipelines", "process", and "jvm". + # collect = ["pipelines", "process", "jvm"] + + ## Timeout for HTTP requests. + # timeout = "5s" + + ## Optional HTTP Basic Auth credentials. + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config. + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Use TLS but skip chain & host verification. + # insecure_skip_verify = false + + ## Optional HTTP headers. + # [inputs.logstash.headers] + # "X-Special-Header" = "Special-Value" diff --git a/plugins/inputs/logstash/samples_logstash7.go b/plugins/inputs/logstash/samples_logstash7.go new file mode 100644 index 0000000000000..e04bb4319a27a --- /dev/null +++ b/plugins/inputs/logstash/samples_logstash7.go @@ -0,0 +1,140 @@ +package logstash + +const logstash7PipelinesJSON = ` +{ + "host" : "HOST01.local", + "version" : "7.4.2", + "http_address" : "127.0.0.1:9600", + "id" : "28580380-ad2c-4032-934b-76359125edca", + "name" : "HOST01.local", + "ephemeral_id" : "bd95ff6b-3fa8-42ae-be32-098a4e4ea1ec", + "status" : "green", + "snapshot" : true, + "pipeline" : { + "workers" : 8, + "batch_size" : 125, + "batch_delay" : 50 + }, + "pipelines" : { + "infra" : { + "events" : { + "in" : 2665549, + "out" : 2665549, + "duration_in_millis" : 3032875, + "filtered" : 2665549, + "queue_push_duration_in_millis" : 13300 + }, + "plugins" : { + "inputs" : [ { + "id" : "8526dc80bc2257ab08f96018f96b0c68dd03abc5695bb22fb9e96339a8dfb4f86", + "events" : { + "out" : 2665549, + "queue_push_duration_in_millis" : 13300 + }, + "peak_connections" : 1, + "name" : "beats", + "current_connections" : 1 + } ], + "codecs" : [ { + "id" : "plain_7312c097-1e7f-41db-983b-4f5a87a9eba2", + "encode" : { + "duration_in_millis" : 0, + "writes_in" : 0 + }, + "name" : "plain", + "decode" : { + "out" : 0, + "duration_in_millis" : 0, + "writes_in" : 0 + } + }, { + "id" : "rubydebug_e958e3dc-10f6-4dd6-b7c5-ae3de2892afb", + "encode" : { + "duration_in_millis" : 0, + "writes_in" : 0 + }, + "name" : "rubydebug", + "decode" : { + "out" : 0, + "duration_in_millis" : 0, + "writes_in" : 0 + } + }, { + "id" : "plain_addb97be-fb77-4cbc-b45c-0424cd5d0ac7", + "encode" : { + "duration_in_millis" : 0, + "writes_in" : 0 + }, + "name" : "plain", + "decode" : { + "out" : 0, + "duration_in_millis" : 0, + "writes_in" : 0 + } + } ], + "filters" : [ { + "id" : "9e8297a6ee7b61864f77853317dccde83d29952ef869010c385dcfc9064ab8b8", + "events" : { + "in" : 2665549, + "out" : 2665549, + "duration_in_millis" : 8648 + }, + "name" : "date", + "matches" : 2665549 + }, { + "id" : "bec0c77b3f53a78c7878449c72ec59f97be31c1f12f9621f61ed2d4563bad869", + "events" : { + "in" : 2665549, + "out" : 2665549, + "duration_in_millis" : 195138 + }, + "name" : "fingerprint" + } ], + "outputs" : [ { + "id" : "df59066a933f038354c1845ba44de692f70dbd0d2009ab07a12b98b776be7e3f", + "events" : { + "in" : 0, + "out" : 0, + "duration_in_millis" : 25 + }, + "name" : "stdout" + }, { + "id" : "38967f09bbd2647a95aa00702b6b557bdbbab31da6a04f991d38abe5629779e3", + "events" : { + "in" : 2665549, + "out" : 2665549, + "duration_in_millis" : 2802177 + }, + "name" : "elasticsearch", + "bulk_requests" : { + "successes" : 2870, + "responses" : { + "200" : 2870 + }, + "failures": 262, + "with_errors": 9089 + }, + "documents" : { + "successes" : 2665549, + "retryable_failures": 13733 + } + } ] + }, + "reloads" : { + "successes" : 4, + "last_error" : null, + "failures" : 0, + "last_success_timestamp" : "2020-06-05T08:06:12.538Z", + "last_failure_timestamp" : null + }, + "queue" : { + "type" : "persisted", + "events_count" : 0, + "queue_size_in_bytes" : 32028566, + "max_queue_size_in_bytes" : 4294967296 + }, + "hash" : "5bc589ae4b02cb3e436626429b50928b9d99360639c84dc7fc69268ac01a9fd0", + "ephemeral_id" : "4bcacefa-6cbf-461e-b14e-184edd9ebdf3" + } + } +}` diff --git a/plugins/inputs/lustre2/README.md b/plugins/inputs/lustre2/README.md index dbdf58f73b257..8cd3ed9139064 100644 --- a/plugins/inputs/lustre2/README.md +++ b/plugins/inputs/lustre2/README.md @@ -1,13 +1,14 @@ # Lustre Input Plugin -The [Lustre][]® file system is an open-source, parallel file system that supports -many requirements of leadership class HPC simulation environments. +The [Lustre][]® file system is an open-source, parallel file system that +supports many requirements of leadership class HPC simulation environments. -This plugin monitors the Lustre file system using its entries in the proc filesystem. +This plugin monitors the Lustre file system using its entries in the proc +filesystem. -### Configuration +## Configuration -```toml +```toml @sample.conf # Read metrics from local Lustre service on OST, MDS [[inputs.lustre2]] ## An array of /proc globs to search for Lustre stats @@ -17,16 +18,19 @@ This plugin monitors the Lustre file system using its entries in the proc filesy # "/proc/fs/lustre/obdfilter/*/stats", # "/proc/fs/lustre/osd-ldiskfs/*/stats", # "/proc/fs/lustre/obdfilter/*/job_stats", + # "/proc/fs/lustre/obdfilter/*/exports/*/stats", # ] # mds_procfiles = [ # "/proc/fs/lustre/mdt/*/md_stats", # "/proc/fs/lustre/mdt/*/job_stats", + # "/proc/fs/lustre/mdt/*/exports/*/stats", # ] ``` -### Metrics +## Metrics -From `/proc/fs/lustre/obdfilter/*/stats` and `/proc/fs/lustre/osd-ldiskfs/*/stats`: +From `/proc/fs/lustre/obdfilter/*/stats` and +`/proc/fs/lustre/osd-ldiskfs/*/stats`: - lustre2 - tags: @@ -40,6 +44,18 @@ From `/proc/fs/lustre/obdfilter/*/stats` and `/proc/fs/lustre/osd-ldiskfs/*/stat - cache_miss - cache_access +From `/proc/fs/lustre/obdfilter/*/exports/*/stats`: + +- lustre2 + - tags: + - name + - client + - fields: + - write_bytes + - write_calls + - read_bytes + - read_calls + From `/proc/fs/lustre/obdfilter/*/job_stats`: - lustre2 @@ -89,6 +105,30 @@ From `/proc/fs/lustre/mdt/*/md_stats`: - samedir_rename - crossdir_rename +From `/proc/fs/lustre/mdt/*/exports/*/stats`: + +- lustre2 + - tags: + - name + - client + - fields: + - open + - close + - mknod + - link + - unlink + - mkdir + - rmdir + - rename + - getattr + - setattr + - getxattr + - setxattr + - statfs + - sync + - samedir_rename + - crossdir_rename + From `/proc/fs/lustre/mdt/*/job_stats`: - lustre2 @@ -113,17 +153,16 @@ From `/proc/fs/lustre/mdt/*/job_stats`: - jobstats_sync - jobstats_unlink - -### Troubleshooting +## Troubleshooting Check for the default or custom procfiles in the proc filesystem, and reference the [Lustre Monitoring and Statistics Guide][guide]. This plugin does not report all information from these files, only a limited set of items corresponding to the above metric fields. -### Example Output +## Example Output -``` +```shell lustre2,host=oss2,jobid=42990218,name=wrk-OST0041 jobstats_ost_setattr=0i,jobstats_ost_sync=0i,jobstats_punch=0i,jobstats_read_bytes=4096i,jobstats_read_calls=1i,jobstats_read_max_size=4096i,jobstats_read_min_size=4096i,jobstats_write_bytes=310206488i,jobstats_write_calls=7423i,jobstats_write_max_size=53048i,jobstats_write_min_size=8820i 1556525847000000000 lustre2,host=mds1,jobid=42992017,name=wrk-MDT0000 jobstats_close=31798i,jobstats_crossdir_rename=0i,jobstats_getattr=34146i,jobstats_getxattr=15i,jobstats_link=0i,jobstats_mkdir=658i,jobstats_mknod=0i,jobstats_open=31797i,jobstats_rename=0i,jobstats_rmdir=0i,jobstats_samedir_rename=0i,jobstats_setattr=1788i,jobstats_setxattr=0i,jobstats_statfs=0i,jobstats_sync=0i,jobstats_unlink=0i 1556525828000000000 diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go index 611ba294dbc5c..8444649b94e1c 100644 --- a/plugins/inputs/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -1,16 +1,18 @@ -/* -Lustre 2.x telegraf plugin +//go:generate ../../../tools/readme_config_includer/generator +//go:build !windows +// +build !windows -Lustre (http://lustre.org/) is an open-source, parallel file system -for HPC environments. It stores statistics about its activity in -/proc - -*/ +// Package lustre2 (doesn't aim for Windows) +// Lustre 2.x Telegraf plugin +// Lustre (http://lustre.org/) is an open-source, parallel file system +// for HPC environments. It stores statistics about its activity in /proc package lustre2 import ( - "io/ioutil" + _ "embed" + "os" "path/filepath" + "regexp" "strconv" "strings" @@ -18,35 +20,24 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type tags struct { - name, job string + name, job, client string } // Lustre proc files can change between versions, so we want to future-proof // by letting people choose what to look at. type Lustre2 struct { - Ost_procfiles []string `toml:"ost_procfiles"` - Mds_procfiles []string `toml:"mds_procfiles"` + OstProcfiles []string `toml:"ost_procfiles"` + MdsProcfiles []string `toml:"mds_procfiles"` // allFields maps and OST name to the metric fields associated with that OST allFields map[tags]map[string]interface{} } -var sampleConfig = ` - ## An array of /proc globs to search for Lustre stats - ## If not specified, the default will work on Lustre 2.5.x - ## - # ost_procfiles = [ - # "/proc/fs/lustre/obdfilter/*/stats", - # "/proc/fs/lustre/osd-ldiskfs/*/stats", - # "/proc/fs/lustre/obdfilter/*/job_stats", - # ] - # mds_procfiles = [ - # "/proc/fs/lustre/mdt/*/md_stats", - # "/proc/fs/lustre/mdt/*/job_stats", - # ] -` - /* The wanted fields would be a []string if not for the lines that start with read_bytes/write_bytes and contain both the byte count and the function call count @@ -55,10 +46,9 @@ type mapping struct { inProc string // What to look for at the start of a line in /proc/fs/lustre/* field uint32 // which field to extract from that line reportAs string // What measurement name to use - tag string // Additional tag to add for this metric } -var wanted_ost_fields = []*mapping{ +var wantedOstFields = []*mapping{ { inProc: "write_bytes", field: 6, @@ -90,7 +80,7 @@ var wanted_ost_fields = []*mapping{ }, } -var wanted_ost_jobstats_fields = []*mapping{ +var wantedOstJobstatsFields = []*mapping{ { // The read line has several fields, so we need to differentiate what they are inProc: "read", field: 3, @@ -223,7 +213,7 @@ var wanted_ost_jobstats_fields = []*mapping{ }, } -var wanted_mds_fields = []*mapping{ +var wantedMdsFields = []*mapping{ { inProc: "open", }, @@ -274,7 +264,7 @@ var wanted_mds_fields = []*mapping{ }, } -var wanted_mdt_jobstats_fields = []*mapping{ +var wantedMdtJobstatsFields = []*mapping{ { inProc: "open", field: 3, @@ -357,29 +347,48 @@ var wanted_mdt_jobstats_fields = []*mapping{ }, } -func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, acc telegraf.Accumulator) error { +func (*Lustre2) SampleConfig() string { + return sampleConfig +} + +func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping) error { files, err := filepath.Glob(fileglob) if err != nil { return err } + fieldSplitter := regexp.MustCompile(`[ :]+`) + for _, file := range files { - /* Turn /proc/fs/lustre/obdfilter//stats and similar - * into just the object store target name - * Assumption: the target name is always second to last, - * which is true in Lustre 2.1->2.8 + + /* From /proc/fs/lustre/obdfilter//stats and similar + * extract the object store target name, + * and for per-client files under + * /proc/fs/lustre/obdfilter//exports//stats + * and similar the client NID + * Assumption: the target name is fourth to last + * for per-client files and second to last otherwise + * and the client NID is always second to last, + * which is true in Lustre 2.1->2.14 */ path := strings.Split(file, "/") - name := path[len(path)-2] + var name, client string + if strings.Contains(file, "/exports/") { + name = path[len(path)-4] + client = path[len(path)-2] + } else { + name = path[len(path)-2] + client = "" + } //lines, err := internal.ReadLines(file) - wholeFile, err := ioutil.ReadFile(file) + wholeFile, err := os.ReadFile(file) if err != nil { return err } jobs := strings.Split(string(wholeFile), "- ") for _, job := range jobs { - lines := strings.Split(string(job), "\n") + lines := strings.Split(job, "\n") jobid := "" // figure out if the data should be tagged with job_id here @@ -393,25 +402,29 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, a if len(line) < 1 { continue } - parts := strings.Fields(line) + + parts := fieldSplitter.Split(line, -1) + if len(parts[0]) == 0 { + parts = parts[1:] + } var fields map[string]interface{} - fields, ok := l.allFields[tags{name, jobid}] + fields, ok := l.allFields[tags{name, jobid, client}] if !ok { fields = make(map[string]interface{}) - l.allFields[tags{name, jobid}] = fields + l.allFields[tags{name, jobid, client}] = fields } for _, wanted := range wantedFields { var data uint64 - if strings.TrimSuffix(parts[0], ":") == wanted.inProc { + if parts[0] == wanted.inProc { wantedField := wanted.field // if not set, assume field[1]. Shouldn't be field[0], as // that's a string if wantedField == 0 { wantedField = 1 } - data, err = strconv.ParseUint(strings.TrimSuffix((parts[wantedField]), ","), 10, 64) + data, err = strconv.ParseUint(strings.TrimSuffix(parts[wantedField], ","), 10, 64) if err != nil { return err } @@ -428,87 +441,74 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, a return nil } -// SampleConfig returns sample configuration message -func (l *Lustre2) SampleConfig() string { - return sampleConfig -} - -// Description returns description of Lustre2 plugin -func (l *Lustre2) Description() string { - return "Read metrics from local Lustre service on OST, MDS" -} - // Gather reads stats from all lustre targets func (l *Lustre2) Gather(acc telegraf.Accumulator) error { //l.allFields = make(map[string]map[string]interface{}) l.allFields = make(map[tags]map[string]interface{}) - if len(l.Ost_procfiles) == 0 { + if len(l.OstProcfiles) == 0 { // read/write bytes are in obdfilter//stats - err := l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/stats", - wanted_ost_fields, acc) + err := l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/stats", wantedOstFields) if err != nil { return err } // cache counters are in osd-ldiskfs//stats - err = l.GetLustreProcStats("/proc/fs/lustre/osd-ldiskfs/*/stats", - wanted_ost_fields, acc) + err = l.GetLustreProcStats("/proc/fs/lustre/osd-ldiskfs/*/stats", wantedOstFields) if err != nil { return err } // per job statistics are in obdfilter//job_stats - err = l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/job_stats", - wanted_ost_jobstats_fields, acc) + err = l.GetLustreProcStats("/proc/fs/lustre/obdfilter/*/job_stats", wantedOstJobstatsFields) if err != nil { return err } } - if len(l.Mds_procfiles) == 0 { + if len(l.MdsProcfiles) == 0 { // Metadata server stats - err := l.GetLustreProcStats("/proc/fs/lustre/mdt/*/md_stats", - wanted_mds_fields, acc) + err := l.GetLustreProcStats("/proc/fs/lustre/mdt/*/md_stats", wantedMdsFields) if err != nil { return err } // Metadata target job stats - err = l.GetLustreProcStats("/proc/fs/lustre/mdt/*/job_stats", - wanted_mdt_jobstats_fields, acc) + err = l.GetLustreProcStats("/proc/fs/lustre/mdt/*/job_stats", wantedMdtJobstatsFields) if err != nil { return err } } - for _, procfile := range l.Ost_procfiles { - ost_fields := wanted_ost_fields + for _, procfile := range l.OstProcfiles { + ostFields := wantedOstFields if strings.HasSuffix(procfile, "job_stats") { - ost_fields = wanted_ost_jobstats_fields + ostFields = wantedOstJobstatsFields } - err := l.GetLustreProcStats(procfile, ost_fields, acc) + err := l.GetLustreProcStats(procfile, ostFields) if err != nil { return err } } - for _, procfile := range l.Mds_procfiles { - mdt_fields := wanted_mds_fields + for _, procfile := range l.MdsProcfiles { + mdtFields := wantedMdsFields if strings.HasSuffix(procfile, "job_stats") { - mdt_fields = wanted_mdt_jobstats_fields + mdtFields = wantedMdtJobstatsFields } - err := l.GetLustreProcStats(procfile, mdt_fields, acc) + err := l.GetLustreProcStats(procfile, mdtFields) if err != nil { return err } } for tgs, fields := range l.allFields { - tags := map[string]string{ "name": tgs.name, } if len(tgs.job) > 0 { tags["jobid"] = tgs.job } + if len(tgs.client) > 0 { + tags["client"] = tgs.client + } acc.AddFields("lustre2", fields, tags) } diff --git a/plugins/inputs/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go index 8e93da8e81726..3da003ee6d7db 100644 --- a/plugins/inputs/lustre2/lustre2_test.go +++ b/plugins/inputs/lustre2/lustre2_test.go @@ -1,15 +1,17 @@ +//go:build !windows +// +build !windows + package lustre2 import ( - "io/ioutil" "os" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" "github.com/influxdata/toml/ast" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // Set config file variables to point to fake directory structure instead of /proc? @@ -45,7 +47,7 @@ const obdfilterJobStatsContents = `job_stats: - job_id: cluster-testjob1 snapshot_time: 1461772761 read_bytes: { samples: 1, unit: bytes, min: 4096, max: 4096, sum: 4096 } - write_bytes: { samples: 25, unit: bytes, min: 1048576, max: 1048576, sum: 26214400 } + write_bytes: { samples: 25, unit: bytes, min: 1048576, max:16777216, sum: 26214400 } getattr: { samples: 0, unit: reqs } setattr: { samples: 0, unit: reqs } punch: { samples: 1, unit: reqs } @@ -131,35 +133,34 @@ const mdtJobStatsContents = `job_stats: ` func TestLustre2GeneratesMetrics(t *testing.T) { - tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/" - ost_name := "OST0001" + ostName := "OST0001" mdtdir := tempdir + "/mdt/" - err := os.MkdirAll(mdtdir+"/"+ost_name, 0755) + err := os.MkdirAll(mdtdir+"/"+ostName, 0755) require.NoError(t, err) osddir := tempdir + "/osd-ldiskfs/" - err = os.MkdirAll(osddir+"/"+ost_name, 0755) + err = os.MkdirAll(osddir+"/"+ostName, 0755) require.NoError(t, err) obddir := tempdir + "/obdfilter/" - err = os.MkdirAll(obddir+"/"+ost_name, 0755) + err = os.MkdirAll(obddir+"/"+ostName, 0755) require.NoError(t, err) - err = ioutil.WriteFile(mdtdir+"/"+ost_name+"/md_stats", []byte(mdtProcContents), 0644) + err = os.WriteFile(mdtdir+"/"+ostName+"/md_stats", []byte(mdtProcContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(osddir+"/"+ost_name+"/stats", []byte(osdldiskfsProcContents), 0644) + err = os.WriteFile(osddir+"/"+ostName+"/stats", []byte(osdldiskfsProcContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(obddir+"/"+ost_name+"/stats", []byte(obdfilterProcContents), 0644) + err = os.WriteFile(obddir+"/"+ostName+"/stats", []byte(obdfilterProcContents), 0644) require.NoError(t, err) // Begin by testing standard Lustre stats m := &Lustre2{ - Ost_procfiles: []string{obddir + "/*/stats", osddir + "/*/stats"}, - Mds_procfiles: []string{mdtdir + "/*/md_stats"}, + OstProcfiles: []string{obddir + "/*/stats", osddir + "/*/stats"}, + MdsProcfiles: []string{mdtdir + "/*/md_stats"}, } var acc testutil.Accumulator @@ -168,7 +169,7 @@ func TestLustre2GeneratesMetrics(t *testing.T) { require.NoError(t, err) tags := map[string]string{ - "name": ost_name, + "name": ostName, } fields := map[string]interface{}{ @@ -203,30 +204,92 @@ func TestLustre2GeneratesMetrics(t *testing.T) { require.NoError(t, err) } -func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { +func TestLustre2GeneratesClientMetrics(t *testing.T) { + tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/" + ostName := "OST0001" + clientName := "10.2.4.27@o2ib1" + mdtdir := tempdir + "/mdt/" + err := os.MkdirAll(mdtdir+"/"+ostName+"/exports/"+clientName, 0755) + require.NoError(t, err) + + obddir := tempdir + "/obdfilter/" + err = os.MkdirAll(obddir+"/"+ostName+"/exports/"+clientName, 0755) + require.NoError(t, err) + + err = os.WriteFile(mdtdir+"/"+ostName+"/exports/"+clientName+"/stats", []byte(mdtProcContents), 0644) + require.NoError(t, err) + + err = os.WriteFile(obddir+"/"+ostName+"/exports/"+clientName+"/stats", []byte(obdfilterProcContents), 0644) + require.NoError(t, err) + + // Begin by testing standard Lustre stats + m := &Lustre2{ + OstProcfiles: []string{obddir + "/*/exports/*/stats"}, + MdsProcfiles: []string{mdtdir + "/*/exports/*/stats"}, + } + + var acc testutil.Accumulator + + err = m.Gather(&acc) + require.NoError(t, err) + + tags := map[string]string{ + "name": ostName, + "client": clientName, + } + + fields := map[string]interface{}{ + "close": uint64(873243496), + "crossdir_rename": uint64(369571), + "getattr": uint64(1503663097), + "getxattr": uint64(6145349681), + "link": uint64(445), + "mkdir": uint64(705499), + "mknod": uint64(349042), + "open": uint64(1024577037), + "read_bytes": uint64(78026117632000), + "read_calls": uint64(203238095), + "rename": uint64(629196), + "rmdir": uint64(227434), + "samedir_rename": uint64(259625), + "setattr": uint64(1898364), + "setxattr": uint64(83969), + "statfs": uint64(2916320), + "sync": uint64(434081), + "unlink": uint64(3549417), + "write_bytes": uint64(15201500833981), + "write_calls": uint64(71893382), + } + acc.AssertContainsTaggedFields(t, "lustre2", fields, tags) + + err = os.RemoveAll(os.TempDir() + "/telegraf") + require.NoError(t, err) +} + +func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/" - ost_name := "OST0001" - job_names := []string{"cluster-testjob1", "testjob2"} + ostName := "OST0001" + jobNames := []string{"cluster-testjob1", "testjob2"} mdtdir := tempdir + "/mdt/" - err := os.MkdirAll(mdtdir+"/"+ost_name, 0755) + err := os.MkdirAll(mdtdir+"/"+ostName, 0755) require.NoError(t, err) obddir := tempdir + "/obdfilter/" - err = os.MkdirAll(obddir+"/"+ost_name, 0755) + err = os.MkdirAll(obddir+"/"+ostName, 0755) require.NoError(t, err) - err = ioutil.WriteFile(mdtdir+"/"+ost_name+"/job_stats", []byte(mdtJobStatsContents), 0644) + err = os.WriteFile(mdtdir+"/"+ostName+"/job_stats", []byte(mdtJobStatsContents), 0644) require.NoError(t, err) - err = ioutil.WriteFile(obddir+"/"+ost_name+"/job_stats", []byte(obdfilterJobStatsContents), 0644) + err = os.WriteFile(obddir+"/"+ostName+"/job_stats", []byte(obdfilterJobStatsContents), 0644) require.NoError(t, err) // Test Lustre Jobstats m := &Lustre2{ - Ost_procfiles: []string{obddir + "/*/job_stats"}, - Mds_procfiles: []string{mdtdir + "/*/job_stats"}, + OstProcfiles: []string{obddir + "/*/job_stats"}, + MdsProcfiles: []string{mdtdir + "/*/job_stats"}, } var acc testutil.Accumulator @@ -238,12 +301,12 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { // and even further make this dependent on summing per OST tags := []map[string]string{ { - "name": ost_name, - "jobid": job_names[0], + "name": ostName, + "jobid": jobNames[0], }, { - "name": ost_name, - "jobid": job_names[1], + "name": ostName, + "jobid": jobNames[1], }, } @@ -257,7 +320,7 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { "jobstats_read_bytes": uint64(4096), "jobstats_write_calls": uint64(25), "jobstats_write_min_size": uint64(1048576), - "jobstats_write_max_size": uint64(1048576), + "jobstats_write_max_size": uint64(16777216), "jobstats_write_bytes": uint64(26214400), "jobstats_ost_getattr": uint64(0), "jobstats_ost_setattr": uint64(0), @@ -345,7 +408,7 @@ func TestLustre2CanParseConfiguration(t *testing.T) { "/proc/fs/lustre/mdt/*/md_stats", ]`) - table, err := toml.Parse([]byte(config)) + table, err := toml.Parse(config) require.NoError(t, err) inputs, ok := table.Fields["inputs"] @@ -358,12 +421,12 @@ func TestLustre2CanParseConfiguration(t *testing.T) { require.NoError(t, toml.UnmarshalTable(lustre2.([]*ast.Table)[0], &plugin)) - assert.Equal(t, Lustre2{ - Ost_procfiles: []string{ + require.Equal(t, Lustre2{ + OstProcfiles: []string{ "/proc/fs/lustre/obdfilter/*/stats", "/proc/fs/lustre/osd-ldiskfs/*/stats", }, - Mds_procfiles: []string{ + MdsProcfiles: []string{ "/proc/fs/lustre/mdt/*/md_stats", }, }, plugin) diff --git a/plugins/inputs/lustre2/lustre2_windows.go b/plugins/inputs/lustre2/lustre2_windows.go new file mode 100644 index 0000000000000..cd3aea1b534f1 --- /dev/null +++ b/plugins/inputs/lustre2/lustre2_windows.go @@ -0,0 +1,4 @@ +//go:build windows +// +build windows + +package lustre2 diff --git a/plugins/inputs/lustre2/sample.conf b/plugins/inputs/lustre2/sample.conf new file mode 100644 index 0000000000000..02a3e3133c08c --- /dev/null +++ b/plugins/inputs/lustre2/sample.conf @@ -0,0 +1,16 @@ +# Read metrics from local Lustre service on OST, MDS +[[inputs.lustre2]] + ## An array of /proc globs to search for Lustre stats + ## If not specified, the default will work on Lustre 2.5.x + ## + # ost_procfiles = [ + # "/proc/fs/lustre/obdfilter/*/stats", + # "/proc/fs/lustre/osd-ldiskfs/*/stats", + # "/proc/fs/lustre/obdfilter/*/job_stats", + # "/proc/fs/lustre/obdfilter/*/exports/*/stats", + # ] + # mds_procfiles = [ + # "/proc/fs/lustre/mdt/*/md_stats", + # "/proc/fs/lustre/mdt/*/job_stats", + # "/proc/fs/lustre/mdt/*/exports/*/stats", + # ] diff --git a/plugins/inputs/lvm/README.md b/plugins/inputs/lvm/README.md new file mode 100644 index 0000000000000..bc07c37cb208e --- /dev/null +++ b/plugins/inputs/lvm/README.md @@ -0,0 +1,79 @@ +# LVM Input Plugin + +The Logical Volume Management (LVM) input plugin collects information about +physical volumes, volume groups, and logical volumes. + +## Configuration + +```toml @sample.conf +# Read metrics about LVM physical volumes, volume groups, logical volumes. +[[inputs.lvm]] + ## Use sudo to run LVM commands + use_sudo = false +``` + +The `lvm` command requires elevated permissions. If the user has configured sudo +with the ability to run these commands, then set the `use_sudo` to true. + +### Using sudo + +If your account does not already have the ability to run commands +with passwordless sudo then updates to the sudoers file are required. Below +is an example to allow the requires LVM commands: + +First, use the `visudo` command to start editing the sudoers file. Then add +the following content, where `` is the username of the user that +needs this access: + +```text +Cmnd_Alias LVM = /usr/sbin/pvs *, /usr/sbin/vgs *, /usr/sbin/lvs * + ALL=(root) NOPASSWD: LVM +Defaults!LVM !logfile, !syslog, !pam_session +``` + +## Metrics + +Metrics are broken out by physical volume (pv), volume group (vg), and logical +volume (lv): + +- lvm_physical_vol + - tags + - path + - vol_group + - fields + - size + - free + - used + - used_percent +- lvm_vol_group + - tags + - name + - fields + - size + - free + - used_percent + - physical_volume_count + - logical_volume_count + - snapshot_count +- lvm_logical_vol + - tags + - name + - vol_group + - fields + - size + - data_percent + - meta_percent + +## Example Output + +The following example shows a system with the root partition on an LVM group +as well as with a Docker thin-provisioned LVM group on a second drive: + +```shell +> lvm_physical_vol,path=/dev/sda2,vol_group=vgroot free=0i,size=249510756352i,used=249510756352i,used_percent=100 1631823026000000000 +> lvm_physical_vol,path=/dev/sdb,vol_group=docker free=3858759680i,size=128316342272i,used=124457582592i,used_percent=96.99277612525741 1631823026000000000 +> lvm_vol_group,name=vgroot free=0i,logical_volume_count=1i,physical_volume_count=1i,size=249510756352i,snapshot_count=0i,used_percent=100 1631823026000000000 +> lvm_vol_group,name=docker free=3858759680i,logical_volume_count=1i,physical_volume_count=1i,size=128316342272i,snapshot_count=0i,used_percent=96.99277612525741 1631823026000000000 +> lvm_logical_vol,name=lvroot,vol_group=vgroot data_percent=0,metadata_percent=0,size=249510756352i 1631823026000000000 +> lvm_logical_vol,name=thinpool,vol_group=docker data_percent=0.36000001430511475,metadata_percent=1.3300000429153442,size=121899057152i 1631823026000000000 +``` diff --git a/plugins/inputs/lvm/lvm.go b/plugins/inputs/lvm/lvm.go new file mode 100644 index 0000000000000..e77dab460962f --- /dev/null +++ b/plugins/inputs/lvm/lvm.go @@ -0,0 +1,290 @@ +//go:generate ../../../tools/readme_config_includer/generator +package lvm + +import ( + _ "embed" + "encoding/json" + "fmt" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +var ( + execCommand = exec.Command +) + +type LVM struct { + UseSudo bool `toml:"use_sudo"` +} + +func (*LVM) SampleConfig() string { + return sampleConfig +} + +func (lvm *LVM) Init() error { + return nil +} + +func (lvm *LVM) Gather(acc telegraf.Accumulator) error { + if err := lvm.gatherPhysicalVolumes(acc); err != nil { + return err + } else if err := lvm.gatherVolumeGroups(acc); err != nil { + return err + } else if err := lvm.gatherLogicalVolumes(acc); err != nil { + return err + } + + return nil +} + +func (lvm *LVM) gatherPhysicalVolumes(acc telegraf.Accumulator) error { + pvsCmd := "/usr/sbin/pvs" + args := []string{ + "--reportformat", "json", "--units", "b", "--nosuffix", + "-o", "pv_name,vg_name,pv_size,pv_free,pv_used", + } + out, err := lvm.runCmd(pvsCmd, args) + if err != nil { + return err + } + + var report pvsReport + err = json.Unmarshal(out, &report) + if err != nil { + return fmt.Errorf("failed to unmarshal physical volume JSON: %s", err) + } + + if len(report.Report) > 0 { + for _, pv := range report.Report[0].Pv { + tags := map[string]string{ + "path": pv.Name, + "vol_group": pv.VolGroup, + } + + size, err := strconv.ParseUint(pv.Size, 10, 64) + if err != nil { + return err + } + + free, err := strconv.ParseUint(pv.Free, 10, 64) + if err != nil { + return err + } + + used, err := strconv.ParseUint(pv.Used, 10, 64) + if err != nil { + return err + } + + usedPercent := float64(used) / float64(size) * 100 + + fields := map[string]interface{}{ + "size": size, + "free": free, + "used": used, + "used_percent": usedPercent, + } + + acc.AddFields("lvm_physical_vol", fields, tags) + } + } + + return nil +} + +func (lvm *LVM) gatherVolumeGroups(acc telegraf.Accumulator) error { + cmd := "/usr/sbin/vgs" + args := []string{ + "--reportformat", "json", "--units", "b", "--nosuffix", + "-o", "vg_name,pv_count,lv_count,snap_count,vg_size,vg_free", + } + out, err := lvm.runCmd(cmd, args) + if err != nil { + return err + } + + var report vgsReport + err = json.Unmarshal(out, &report) + if err != nil { + return fmt.Errorf("failed to unmarshal vol group JSON: %s", err) + } + + if len(report.Report) > 0 { + for _, vg := range report.Report[0].Vg { + tags := map[string]string{ + "name": vg.Name, + } + + size, err := strconv.ParseUint(vg.Size, 10, 64) + if err != nil { + return err + } + + free, err := strconv.ParseUint(vg.Free, 10, 64) + if err != nil { + return err + } + + pvCount, err := strconv.ParseUint(vg.PvCount, 10, 64) + if err != nil { + return err + } + lvCount, err := strconv.ParseUint(vg.LvCount, 10, 64) + if err != nil { + return err + } + snapCount, err := strconv.ParseUint(vg.SnapCount, 10, 64) + if err != nil { + return err + } + + usedPercent := (float64(size) - float64(free)) / float64(size) * 100 + + fields := map[string]interface{}{ + "size": size, + "free": free, + "used_percent": usedPercent, + "physical_volume_count": pvCount, + "logical_volume_count": lvCount, + "snapshot_count": snapCount, + } + + acc.AddFields("lvm_vol_group", fields, tags) + } + } + + return nil +} + +func (lvm *LVM) gatherLogicalVolumes(acc telegraf.Accumulator) error { + cmd := "/usr/sbin/lvs" + args := []string{ + "--reportformat", "json", "--units", "b", "--nosuffix", + "-o", "lv_name,vg_name,lv_size,data_percent,metadata_percent", + } + out, err := lvm.runCmd(cmd, args) + if err != nil { + return err + } + + var report lvsReport + err = json.Unmarshal(out, &report) + if err != nil { + return fmt.Errorf("failed to unmarshal logical vol JSON: %s", err) + } + + if len(report.Report) > 0 { + for _, lv := range report.Report[0].Lv { + tags := map[string]string{ + "name": lv.Name, + "vol_group": lv.VolGroup, + } + + size, err := strconv.ParseUint(lv.Size, 10, 64) + if err != nil { + return err + } + + // Does not apply to all logical volumes, set default value + if lv.DataPercent == "" { + lv.DataPercent = "0.0" + } + dataPercent, err := strconv.ParseFloat(lv.DataPercent, 32) + if err != nil { + return err + } + + // Does not apply to all logical volumes, set default value + if lv.MetadataPercent == "" { + lv.MetadataPercent = "0.0" + } + metadataPercent, err := strconv.ParseFloat(lv.MetadataPercent, 32) + if err != nil { + return err + } + + fields := map[string]interface{}{ + "size": size, + "data_percent": dataPercent, + "metadata_percent": metadataPercent, + } + + acc.AddFields("lvm_logical_vol", fields, tags) + } + } + + return nil +} + +func (lvm *LVM) runCmd(cmd string, args []string) ([]byte, error) { + execCmd := execCommand(cmd, args...) + if lvm.UseSudo { + execCmd = execCommand("sudo", append([]string{"-n", cmd}, args...)...) + } + + out, err := internal.StdOutputTimeout(execCmd, 5*time.Second) + if err != nil { + return nil, fmt.Errorf( + "failed to run command %s: %s - %s", + strings.Join(execCmd.Args, " "), err, string(out), + ) + } + + return out, nil +} + +// Represents info about physical volume command, pvs, output +type pvsReport struct { + Report []struct { + Pv []struct { + Name string `json:"pv_name"` + VolGroup string `json:"vg_name"` + Size string `json:"pv_size"` + Free string `json:"pv_free"` + Used string `json:"pv_used"` + } `json:"pv"` + } `json:"report"` +} + +// Represents info about volume group command, vgs, output +type vgsReport struct { + Report []struct { + Vg []struct { + Name string `json:"vg_name"` + Size string `json:"vg_size"` + Free string `json:"vg_free"` + LvCount string `json:"lv_count"` + PvCount string `json:"pv_count"` + SnapCount string `json:"snap_count"` + } `json:"vg"` + } `json:"report"` +} + +// Represents info about logical volume command, lvs, output +type lvsReport struct { + Report []struct { + Lv []struct { + Name string `json:"lv_name"` + VolGroup string `json:"vg_name"` + Size string `json:"lv_size"` + DataPercent string `json:"data_percent"` + MetadataPercent string `json:"metadata_percent"` + } `json:"lv"` + } `json:"report"` +} + +func init() { + inputs.Add("lvm", func() telegraf.Input { + return &LVM{} + }) +} diff --git a/plugins/inputs/lvm/lvm_test.go b/plugins/inputs/lvm/lvm_test.go new file mode 100644 index 0000000000000..c48eff5c039b1 --- /dev/null +++ b/plugins/inputs/lvm/lvm_test.go @@ -0,0 +1,211 @@ +package lvm + +import ( + "fmt" + "os" + "os/exec" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestGather(t *testing.T) { + var lvm LVM = LVM{UseSudo: false} + var acc testutil.Accumulator + + // overwriting exec commands with mock commands + execCommand = fakeExecCommand + err := lvm.Gather(&acc) + require.NoError(t, err) + + pvsTags := map[string]string{ + "path": "/dev/sdb", + "vol_group": "docker", + } + pvsFields := map[string]interface{}{ + "size": uint64(128316342272), + "free": uint64(3858759680), + "used": uint64(124457582592), + "used_percent": 96.99277612525741, + } + acc.AssertContainsTaggedFields(t, "lvm_physical_vol", pvsFields, pvsTags) + + vgsTags := map[string]string{ + "name": "docker", + } + vgsFields := map[string]interface{}{ + "size": uint64(128316342272), + "free": uint64(3858759680), + "used_percent": 96.99277612525741, + "physical_volume_count": uint64(1), + "logical_volume_count": uint64(1), + "snapshot_count": uint64(0), + } + acc.AssertContainsTaggedFields(t, "lvm_vol_group", vgsFields, vgsTags) + + lvsTags := map[string]string{ + "name": "thinpool", + "vol_group": "docker", + } + lvsFields := map[string]interface{}{ + "size": uint64(121899057152), + "data_percent": 0.36000001430511475, + "metadata_percent": 1.3300000429153442, + } + acc.AssertContainsTaggedFields(t, "lvm_logical_vol", lvsFields, lvsTags) +} + +// Used as a helper function that mock the exec.Command call +func fakeExecCommand(command string, args ...string) *exec.Cmd { + cs := []string{"-test.run=TestHelperProcess", "--", command} + cs = append(cs, args...) + cmd := exec.Command(os.Args[0], cs...) + cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} + return cmd +} + +// Used to mock exec.Command output +func TestHelperProcess(_ *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + mockPVSData := `{ + "report": [ + { + "pv": [ + {"pv_name":"/dev/sdb", "vg_name":"docker", "pv_size":"128316342272", "pv_free":"3858759680", "pv_used":"124457582592"} + ] + } + ] + } +` + + mockVGSData := `{ + "report": [ + { + "vg": [ + {"vg_name":"docker", "pv_count":"1", "lv_count":"1", "snap_count":"0", "vg_size":"128316342272", "vg_free":"3858759680"} + ] + } + ] + } +` + + mockLVSData := `{ + "report": [ + { + "lv": [ + {"lv_name":"thinpool", "vg_name":"docker", "lv_size":"121899057152", "data_percent":"0.36", "metadata_percent":"1.33"} + ] + } + ] + } +` + + // Previous arguments are tests stuff, that looks like : + // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- + args := os.Args + cmd := args[3] + if cmd == "/usr/sbin/pvs" { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, mockPVSData) + } else if cmd == "/usr/sbin/vgs" { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, mockVGSData) + } else if cmd == "/usr/sbin/lvs" { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, mockLVSData) + } else { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // error code is important for this "test" + os.Exit(1) + } + //nolint:revive // error code is important for this "test" + os.Exit(0) +} + +// test when no lvm devices exist +func TestGatherNoLVM(t *testing.T) { + var noLVM LVM = LVM{UseSudo: false} + var acc testutil.Accumulator + + // overwriting exec commands with mock commands + execCommand = fakeExecCommandNoLVM + err := noLVM.Gather(&acc) + require.NoError(t, err) + + acc.AssertDoesNotContainMeasurement(t, "lvm_physical_vol") + acc.AssertDoesNotContainMeasurement(t, "lvm_vol_group") + acc.AssertDoesNotContainMeasurement(t, "lvm_logical_vol") +} + +// Used as a helper function that mock the exec.Command call +func fakeExecCommandNoLVM(command string, args ...string) *exec.Cmd { + cs := []string{"-test.run=TestHelperProcessNoLVM", "--", command} + cs = append(cs, args...) + cmd := exec.Command(os.Args[0], cs...) + cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} + return cmd +} + +// Used to mock exec.Command output +func TestHelperProcessNoLVM(_ *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + mockPVSData := `{ + "report": [ + { + "pv": [ + ] + } + ] + } +` + + mockVGSData := `{ + "report": [ + { + "vg": [ + ] + } + ] + } +` + + mockLVSData := `{ + "report": [ + { + "lv": [ + ] + } + ] + } +` + + // Previous arguments are tests stuff, that looks like : + // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- + args := os.Args + cmd := args[3] + if cmd == "/usr/sbin/pvs" { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, mockPVSData) + } else if cmd == "/usr/sbin/vgs" { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, mockVGSData) + } else if cmd == "/usr/sbin/lvs" { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, mockLVSData) + } else { + //nolint:errcheck,revive // test will fail anyway + fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // error code is important for this "test" + os.Exit(1) + } + //nolint:revive // error code is important for this "test" + os.Exit(0) +} diff --git a/plugins/inputs/lvm/sample.conf b/plugins/inputs/lvm/sample.conf new file mode 100644 index 0000000000000..ec2b7be83b0b4 --- /dev/null +++ b/plugins/inputs/lvm/sample.conf @@ -0,0 +1,4 @@ +# Read metrics about LVM physical volumes, volume groups, logical volumes. +[[inputs.lvm]] + ## Use sudo to run LVM commands + use_sudo = false diff --git a/plugins/inputs/mailchimp/README.md b/plugins/inputs/mailchimp/README.md index 46750f6fc5efa..b1f4be9dd8467 100644 --- a/plugins/inputs/mailchimp/README.md +++ b/plugins/inputs/mailchimp/README.md @@ -1,27 +1,27 @@ # Mailchimp Input Plugin -Pulls campaign reports from the [Mailchimp API](https://developer.mailchimp.com/). +Pulls campaign reports from the [Mailchimp API][1]. -### Configuration +[1]: https://developer.mailchimp.com/ -This section contains the default TOML to configure the plugin. You can -generate it using `telegraf --usage mailchimp`. +## Configuration -```toml +```toml @sample.conf +# Gathers metrics from the /3.0/reports MailChimp API [[inputs.mailchimp]] ## MailChimp API key ## get from https://admin.mailchimp.com/account/api/ api_key = "" # required - + ## Reports for campaigns sent more than days_old ago will not be collected. ## 0 means collect all and is the default value. days_old = 0 - + ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old # campaign_id = "" ``` -### Metrics +## Metrics - mailchimp - tags: diff --git a/plugins/inputs/mailchimp/chimp_api.go b/plugins/inputs/mailchimp/chimp_api.go index a40614b1d0f7e..71e7bcea6d535 100644 --- a/plugins/inputs/mailchimp/chimp_api.go +++ b/plugins/inputs/mailchimp/chimp_api.go @@ -5,29 +5,30 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" - "log" "net/http" "net/url" "regexp" "sync" "time" + + "github.com/influxdata/telegraf" ) const ( - reports_endpoint string = "/3.0/reports" - reports_endpoint_campaign string = "/3.0/reports/%s" + reportsEndpoint string = "/3.0/reports" + reportsEndpointCampaign string = "/3.0/reports/%s" ) -var mailchimp_datacenter = regexp.MustCompile("[a-z]+[0-9]+$") +var mailchimpDatacenter = regexp.MustCompile("[a-z]+[0-9]+$") type ChimpAPI struct { Transport http.RoundTripper - Debug bool + debug bool sync.Mutex url *url.URL + log telegraf.Logger } type ReportsParams struct { @@ -54,12 +55,12 @@ func (p *ReportsParams) String() string { return v.Encode() } -func NewChimpAPI(apiKey string) *ChimpAPI { +func NewChimpAPI(apiKey string, log telegraf.Logger) *ChimpAPI { u := &url.URL{} u.Scheme = "https" - u.Host = fmt.Sprintf("%s.api.mailchimp.com", mailchimp_datacenter.FindString(apiKey)) + u.Host = fmt.Sprintf("%s.api.mailchimp.com", mailchimpDatacenter.FindString(apiKey)) u.User = url.UserPassword("", apiKey) - return &ChimpAPI{url: u} + return &ChimpAPI{url: u, log: log} } type APIError struct { @@ -76,7 +77,9 @@ func (e APIError) Error() string { func chimpErrorCheck(body []byte) error { var e APIError - json.Unmarshal(body, &e) + if err := json.Unmarshal(body, &e); err != nil { + return err + } if e.Title != "" || e.Status != 0 { return e } @@ -86,10 +89,10 @@ func chimpErrorCheck(body []byte) error { func (a *ChimpAPI) GetReports(params ReportsParams) (ReportsResponse, error) { a.Lock() defer a.Unlock() - a.url.Path = reports_endpoint + a.url.Path = reportsEndpoint var response ReportsResponse - rawjson, err := runChimp(a, params) + rawjson, err := a.runChimp(params) if err != nil { return response, err } @@ -105,10 +108,10 @@ func (a *ChimpAPI) GetReports(params ReportsParams) (ReportsResponse, error) { func (a *ChimpAPI) GetReport(campaignID string) (Report, error) { a.Lock() defer a.Unlock() - a.url.Path = fmt.Sprintf(reports_endpoint_campaign, campaignID) + a.url.Path = fmt.Sprintf(reportsEndpointCampaign, campaignID) var response Report - rawjson, err := runChimp(a, ReportsParams{}) + rawjson, err := a.runChimp(ReportsParams{}) if err != nil { return response, err } @@ -121,21 +124,21 @@ func (a *ChimpAPI) GetReport(campaignID string) (Report, error) { return response, nil } -func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { +func (a *ChimpAPI) runChimp(params ReportsParams) ([]byte, error) { client := &http.Client{ - Transport: api.Transport, - Timeout: time.Duration(4 * time.Second), + Transport: a.Transport, + Timeout: 4 * time.Second, } var b bytes.Buffer - req, err := http.NewRequest("GET", api.url.String(), &b) + req, err := http.NewRequest("GET", a.url.String(), &b) if err != nil { return nil, err } req.URL.RawQuery = params.String() req.Header.Set("User-Agent", "Telegraf-MailChimp-Plugin") - if api.Debug { - log.Printf("D! [inputs.mailchimp] request URL: %s", req.URL.String()) + if a.debug { + a.log.Debugf("request URL: %s", req.URL.String()) } resp, err := client.Do(req) @@ -146,16 +149,16 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) - return nil, fmt.Errorf("%s returned HTTP status %s: %q", api.url.String(), resp.Status, body) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) + return nil, fmt.Errorf("%s returned HTTP status %s: %q", a.url.String(), resp.Status, body) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } - if api.Debug { - log.Printf("D! [inputs.mailchimp] response Body: %q", string(body)) + if a.debug { + a.log.Debugf("response Body: %q", string(body)) } if err = chimpErrorCheck(body); err != nil { diff --git a/plugins/inputs/mailchimp/mailchimp.go b/plugins/inputs/mailchimp/mailchimp.go index d7255191ab724..65b286b7e59af 100644 --- a/plugins/inputs/mailchimp/mailchimp.go +++ b/plugins/inputs/mailchimp/mailchimp.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package mailchimp import ( + _ "embed" "fmt" "time" @@ -8,40 +10,32 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type MailChimp struct { api *ChimpAPI - ApiKey string - DaysOld int - CampaignId string -} + APIKey string `toml:"api_key"` + DaysOld int `toml:"days_old"` + CampaignID string `toml:"campaign_id"` -var sampleConfig = ` - ## MailChimp API key - ## get from https://admin.mailchimp.com/account/api/ - api_key = "" # required - ## Reports for campaigns sent more than days_old ago will not be collected. - ## 0 means collect all. - days_old = 0 - ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old - # campaign_id = "" -` + Log telegraf.Logger `toml:"-"` +} -func (m *MailChimp) SampleConfig() string { +func (*MailChimp) SampleConfig() string { return sampleConfig } -func (m *MailChimp) Description() string { - return "Gathers metrics from the /3.0/reports MailChimp API" +func (m *MailChimp) Init() error { + m.api = NewChimpAPI(m.APIKey, m.Log) + + return nil } func (m *MailChimp) Gather(acc telegraf.Accumulator) error { - if m.api == nil { - m.api = NewChimpAPI(m.ApiKey) - } - m.api.Debug = false - - if m.CampaignId == "" { + if m.CampaignID == "" { since := "" if m.DaysOld > 0 { now := time.Now() @@ -61,7 +55,7 @@ func (m *MailChimp) Gather(acc telegraf.Accumulator) error { gatherReport(acc, report, now) } } else { - report, err := m.api.GetReport(m.CampaignId) + report, err := m.api.GetReport(m.CampaignID) if err != nil { return err } diff --git a/plugins/inputs/mailchimp/mailchimp_test.go b/plugins/inputs/mailchimp/mailchimp_test.go index 0c4dab56d5d12..1df6c52cf6256 100644 --- a/plugins/inputs/mailchimp/mailchimp_test.go +++ b/plugins/inputs/mailchimp/mailchimp_test.go @@ -7,9 +7,9 @@ import ( "net/url" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestMailChimpGatherReports(t *testing.T) { @@ -17,7 +17,8 @@ func TestMailChimpGatherReports(t *testing.T) { http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, sampleReports) + _, err := fmt.Fprintln(w, sampleReports) + require.NoError(t, err) }, )) defer ts.Close() @@ -27,7 +28,8 @@ func TestMailChimpGatherReports(t *testing.T) { api := &ChimpAPI{ url: u, - Debug: true, + debug: true, + log: testutil.Logger{}, } m := MailChimp{ api: api, @@ -42,22 +44,22 @@ func TestMailChimpGatherReports(t *testing.T) { tags["campaign_title"] = "Freddie's Jokes Vol. 1" fields := map[string]interface{}{ - "emails_sent": int(200), - "abuse_reports": int(0), - "unsubscribed": int(2), - "hard_bounces": int(0), - "soft_bounces": int(2), - "syntax_errors": int(0), - "forwards_count": int(0), - "forwards_opens": int(0), - "opens_total": int(186), - "unique_opens": int(100), - "clicks_total": int(42), - "unique_clicks": int(400), - "unique_subscriber_clicks": int(42), - "facebook_recipient_likes": int(5), - "facebook_unique_likes": int(8), - "facebook_likes": int(42), + "emails_sent": 200, + "abuse_reports": 0, + "unsubscribed": 2, + "hard_bounces": 0, + "soft_bounces": 2, + "syntax_errors": 0, + "forwards_count": 0, + "forwards_opens": 0, + "opens_total": 186, + "unique_opens": 100, + "clicks_total": 42, + "unique_clicks": 400, + "unique_subscriber_clicks": 42, + "facebook_recipient_likes": 5, + "facebook_unique_likes": 8, + "facebook_likes": 42, "open_rate": float64(42), "click_rate": float64(42), "industry_open_rate": float64(0.17076777144396), @@ -80,7 +82,8 @@ func TestMailChimpGatherReport(t *testing.T) { http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, sampleReport) + _, err := fmt.Fprintln(w, sampleReport) + require.NoError(t, err) }, )) defer ts.Close() @@ -90,11 +93,12 @@ func TestMailChimpGatherReport(t *testing.T) { api := &ChimpAPI{ url: u, - Debug: true, + debug: true, + log: testutil.Logger{}, } m := MailChimp{ api: api, - CampaignId: "test", + CampaignID: "test", } var acc testutil.Accumulator @@ -137,7 +141,6 @@ func TestMailChimpGatherReport(t *testing.T) { "industry_type": "Social Networks and Online Communities", } acc.AssertContainsTaggedFields(t, "mailchimp", fields, tags) - } func TestMailChimpGatherError(t *testing.T) { @@ -145,7 +148,8 @@ func TestMailChimpGatherError(t *testing.T) { http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, sampleError) + _, err := fmt.Fprintln(w, sampleError) + require.NoError(t, err) }, )) defer ts.Close() @@ -155,11 +159,12 @@ func TestMailChimpGatherError(t *testing.T) { api := &ChimpAPI{ url: u, - Debug: true, + debug: true, + log: testutil.Logger{}, } m := MailChimp{ api: api, - CampaignId: "test", + CampaignID: "test", } var acc testutil.Accumulator diff --git a/plugins/inputs/mailchimp/sample.conf b/plugins/inputs/mailchimp/sample.conf new file mode 100644 index 0000000000000..09d2afc637876 --- /dev/null +++ b/plugins/inputs/mailchimp/sample.conf @@ -0,0 +1,12 @@ +# Gathers metrics from the /3.0/reports MailChimp API +[[inputs.mailchimp]] + ## MailChimp API key + ## get from https://admin.mailchimp.com/account/api/ + api_key = "" # required + + ## Reports for campaigns sent more than days_old ago will not be collected. + ## 0 means collect all and is the default value. + days_old = 0 + + ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old + # campaign_id = "" diff --git a/plugins/inputs/marklogic/README.md b/plugins/inputs/marklogic/README.md index 7feb4a10d9d04..fded2513a6502 100644 --- a/plugins/inputs/marklogic/README.md +++ b/plugins/inputs/marklogic/README.md @@ -1,10 +1,12 @@ # MarkLogic Input Plugin -The MarkLogic Telegraf plugin gathers health status metrics from one or more host. +The MarkLogic Telegraf plugin gathers health status metrics from one or more +host. -### Configuration: +## Configuration -```toml +```toml @sample.conf +# Retrieves information on a specific host in a MarkLogic Cluster [[inputs.marklogic]] ## Base URL of the MarkLogic HTTP Server. url = "http://localhost:8002" @@ -24,7 +26,7 @@ The MarkLogic Telegraf plugin gathers health status metrics from one or more hos # insecure_skip_verify = false ``` -### Metrics +## Metrics - marklogic - tags: @@ -56,9 +58,9 @@ The MarkLogic Telegraf plugin gathers health status metrics from one or more hos - http_server_receive_bytes - http_server_send_bytes -### Example Output: +## Example Output -``` +```shell $> marklogic,host=localhost,id=2592913110757471141,source=ml1.local total_cpu_stat_iowait=0.0125649003311992,memory_process_swap_size=0i,host_size=380i,data_dir_space=28216i,query_read_load=0i,ncpus=1i,log_device_space=28216i,query_read_bytes=13947332i,merge_write_load=0i,http_server_receive_bytes=225893i,online=true,ncores=4i,total_cpu_stat_user=0.150778993964195,total_cpu_stat_system=0.598927974700928,total_cpu_stat_idle=99.2210006713867,memory_system_total=3947i,memory_system_free=2669i,memory_size=4096i,total_rate=14.7697010040283,http_server_send_bytes=0i,memory_process_size=903i,memory_process_rss=486i,merge_read_load=0i,total_load=0.00502600101754069 1566373000000000000 ``` diff --git a/plugins/inputs/marklogic/marklogic.go b/plugins/inputs/marklogic/marklogic.go index b350466122dc7..cbf09f1d1360b 100644 --- a/plugins/inputs/marklogic/marklogic.go +++ b/plugins/inputs/marklogic/marklogic.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package marklogic import ( + _ "embed" "encoding/json" "fmt" "net/http" @@ -14,6 +16,10 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // Marklogic configuration toml type Marklogic struct { URL string `toml:"url"` @@ -82,33 +88,12 @@ type MlHost struct { } `json:"host-status"` } -// Description of plugin returned -func (c *Marklogic) Description() string { - return "Retrieves information on a specific host in a MarkLogic Cluster" +func (*Marklogic) SampleConfig() string { + return sampleConfig } -var sampleConfig = ` - ## Base URL of the MarkLogic HTTP Server. - url = "http://localhost:8002" - - ## List of specific hostnames to retrieve information. At least (1) required. - # hosts = ["hostname1", "hostname2"] - - ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges - # username = "myuser" - # password = "mypassword" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - // Init parse all source URLs and place on the Marklogic struct func (c *Marklogic) Init() error { - if len(c.URL) == 0 { c.URL = "http://localhost:8002/" } @@ -129,11 +114,6 @@ func (c *Marklogic) Init() error { return nil } -// SampleConfig to gather stats from localhost, default port. -func (c *Marklogic) SampleConfig() string { - return sampleConfig -} - // Gather metrics from HTTP Server. func (c *Marklogic) Gather(accumulator telegraf.Accumulator) error { var wg sync.WaitGroup @@ -164,9 +144,9 @@ func (c *Marklogic) Gather(accumulator telegraf.Accumulator) error { return nil } -func (c *Marklogic) fetchAndInsertData(acc telegraf.Accumulator, url string) error { +func (c *Marklogic) fetchAndInsertData(acc telegraf.Accumulator, address string) error { ml := &MlHost{} - if err := c.gatherJSONData(url, ml); err != nil { + if err := c.gatherJSONData(address, ml); err != nil { return err } @@ -220,14 +200,14 @@ func (c *Marklogic) createHTTPClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: time.Duration(5 * time.Second), + Timeout: 5 * time.Second, } return client, nil } -func (c *Marklogic) gatherJSONData(url string, v interface{}) error { - req, err := http.NewRequest("GET", url, nil) +func (c *Marklogic) gatherJSONData(address string, v interface{}) error { + req, err := http.NewRequest("GET", address, nil) if err != nil { return err } @@ -246,11 +226,7 @@ func (c *Marklogic) gatherJSONData(url string, v interface{}) error { response.StatusCode, http.StatusOK) } - if err = json.NewDecoder(response.Body).Decode(v); err != nil { - return err - } - - return nil + return json.NewDecoder(response.Body).Decode(v) } func init() { diff --git a/plugins/inputs/marklogic/marklogic_test.go b/plugins/inputs/marklogic/marklogic_test.go index 34e4bbd6bb7e9..5c39fac19051d 100644 --- a/plugins/inputs/marklogic/marklogic_test.go +++ b/plugins/inputs/marklogic/marklogic_test.go @@ -15,7 +15,8 @@ func TestMarklogic(t *testing.T) { // Create a test server with the const response JSON ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, response) + _, err := fmt.Fprintln(w, response) + require.NoError(t, err) })) defer ts.Close() @@ -27,7 +28,7 @@ func TestMarklogic(t *testing.T) { ml := &Marklogic{ Hosts: []string{"example1"}, - URL: string(ts.URL), + URL: ts.URL, //Sources: []string{"http://localhost:8002/manage/v2/hosts/hostname1?view=status&format=json"}, } @@ -76,7 +77,6 @@ func TestMarklogic(t *testing.T) { } acc.AssertContainsTaggedFields(t, "marklogic", expectFields, expectTags) - } var response = ` diff --git a/plugins/inputs/marklogic/sample.conf b/plugins/inputs/marklogic/sample.conf new file mode 100644 index 0000000000000..04eee2da1ec76 --- /dev/null +++ b/plugins/inputs/marklogic/sample.conf @@ -0,0 +1,18 @@ +# Retrieves information on a specific host in a MarkLogic Cluster +[[inputs.marklogic]] + ## Base URL of the MarkLogic HTTP Server. + url = "http://localhost:8002" + + ## List of specific hostnames to retrieve information. At least (1) required. + # hosts = ["hostname1", "hostname2"] + + ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges + # username = "myuser" + # password = "mypassword" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/mcrouter/README.md b/plugins/inputs/mcrouter/README.md index 05c2597869e05..bc8891b896e59 100644 --- a/plugins/inputs/mcrouter/README.md +++ b/plugins/inputs/mcrouter/README.md @@ -2,9 +2,9 @@ This plugin gathers statistics data from a Mcrouter server. -### Configuration: +## Configuration -```toml +```toml @sample.conf # Read metrics from one or many mcrouter servers. [[inputs.mcrouter]] ## An array of address to gather stats about. Specify an ip or hostname @@ -15,11 +15,12 @@ This plugin gathers statistics data from a Mcrouter server. # timeout = "5s" ``` -### Measurements & Fields: +## Metrics The fields from this plugin are gathered in the *mcrouter* measurement. -Description of gathered fields can be found [here](https://github.com/facebook/mcrouter/wiki/Stats-list). +Description of gathered fields can be found +[here](https://github.com/facebook/mcrouter/wiki/Stats-list). Fields: @@ -88,16 +89,14 @@ Fields: * cmd_delete_out_all * cmd_lease_set_out_all -### Tags: +## Tags * Mcrouter measurements have the following tags: - - server (the host name from which metrics are gathered) + * server (the host name from which metrics are gathered) +## Example Output - -### Example Output: - -``` +```shell $ ./telegraf --config telegraf.conf --input-filter mcrouter --test mcrouter,server=localhost:11211 uptime=166,num_servers=1,num_servers_new=1,num_servers_up=0,num_servers_down=0,num_servers_closed=0,num_clients=1,num_suspect_servers=0,destination_batches_sum=0,destination_requests_sum=0,outstanding_route_get_reqs_queued=0,outstanding_route_update_reqs_queued=0,outstanding_route_get_avg_queue_size=0,outstanding_route_update_avg_queue_size=0,outstanding_route_get_avg_wait_time_sec=0,outstanding_route_update_avg_wait_time_sec=0,retrans_closed_connections=0,destination_pending_reqs=0,destination_inflight_reqs=0,destination_batch_size=0,asynclog_requests=0,proxy_reqs_processing=1,proxy_reqs_waiting=0,client_queue_notify_period=0,rusage_system=0.040966,rusage_user=0.020483,ps_num_minor_faults=2490,ps_num_major_faults=11,ps_user_time_sec=0.02,ps_system_time_sec=0.04,ps_vsize=697741312,ps_rss=10563584,fibers_allocated=0,fibers_pool_size=0,fibers_stack_high_watermark=0,successful_client_connections=18,duration_us=0,destination_max_pending_reqs=0,destination_max_inflight_reqs=0,retrans_per_kbyte_max=0,cmd_get_count=0,cmd_delete_out=0,cmd_lease_get=0,cmd_set=0,cmd_get_out_all=0,cmd_get_out=0,cmd_lease_set_count=0,cmd_other_out_all=0,cmd_lease_get_out=0,cmd_set_count=0,cmd_lease_set_out=0,cmd_delete_count=0,cmd_other=0,cmd_delete=0,cmd_get=0,cmd_lease_set=0,cmd_set_out=0,cmd_lease_get_count=0,cmd_other_out=0,cmd_lease_get_out_all=0,cmd_set_out_all=0,cmd_other_count=0,cmd_delete_out_all=0,cmd_lease_set_out_all=0 1453831884664956455 ``` diff --git a/plugins/inputs/mcrouter/mcrouter.go b/plugins/inputs/mcrouter/mcrouter.go index d6303c87758e4..956230ae7bd89 100644 --- a/plugins/inputs/mcrouter/mcrouter.go +++ b/plugins/inputs/mcrouter/mcrouter.go @@ -1,8 +1,10 @@ +//go:generate ../../../tools/readme_config_includer/generator package mcrouter import ( "bufio" "context" + _ "embed" "fmt" "net" "net/url" @@ -11,14 +13,18 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // Mcrouter is a mcrouter plugin type Mcrouter struct { Servers []string - Timeout internal.Duration + Timeout config.Duration } // enum for statType @@ -29,15 +35,6 @@ const ( typeFloat statType = iota ) -var sampleConfig = ` - ## An array of address to gather stats about. Specify an ip or hostname - ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc. - servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] - - ## Timeout for metric collections from all servers. Minimum timeout is "1s". - # timeout = "5s" -` - var defaultTimeout = 5 * time.Second var defaultServerURL = url.URL{ @@ -113,25 +110,19 @@ var sendMetrics = map[string]statType{ "cmd_lease_set_out_all": typeInt, } -// SampleConfig returns sample configuration message -func (m *Mcrouter) SampleConfig() string { +func (*Mcrouter) SampleConfig() string { return sampleConfig } -// Description returns description of Mcrouter plugin -func (m *Mcrouter) Description() string { - return "Read metrics from one or many mcrouter servers" -} - // Gather reads stats from all configured servers accumulates stats func (m *Mcrouter) Gather(acc telegraf.Accumulator) error { ctx := context.Background() - if m.Timeout.Duration < 1*time.Second { - m.Timeout.Duration = defaultTimeout + if m.Timeout < config.Duration(1*time.Second) { + m.Timeout = config.Duration(defaultTimeout) } - ctx, cancel := context.WithTimeout(ctx, m.Timeout.Duration) + ctx, cancel := context.WithTimeout(ctx, time.Duration(m.Timeout)) defer cancel() if len(m.Servers) == 0 { @@ -146,32 +137,33 @@ func (m *Mcrouter) Gather(acc telegraf.Accumulator) error { } // ParseAddress parses an address string into 'host:port' and 'protocol' parts -func (m *Mcrouter) ParseAddress(address string) (string, string, error) { - var protocol string +func (m *Mcrouter) ParseAddress(address string) (parsedAddress string, protocol string, err error) { var host string var port string - u, parseError := url.Parse(address) + parsedAddress = address + + u, parseError := url.Parse(parsedAddress) if parseError != nil { - return "", "", fmt.Errorf("Invalid server address") + return "", "", fmt.Errorf("invalid server address") } if u.Scheme != "tcp" && u.Scheme != "unix" { - return "", "", fmt.Errorf("Invalid server protocol") + return "", "", fmt.Errorf("invalid server protocol") } protocol = u.Scheme if protocol == "unix" { if u.Path == "" { - return "", "", fmt.Errorf("Invalid unix socket path") + return "", "", fmt.Errorf("invalid unix socket path") } - address = u.Path + parsedAddress = u.Path } else { if u.Host == "" { - return "", "", fmt.Errorf("Invalid host") + return "", "", fmt.Errorf("invalid host") } host = u.Hostname() @@ -185,10 +177,10 @@ func (m *Mcrouter) ParseAddress(address string) (string, string, error) { port = defaultServerURL.Port() } - address = host + ":" + port + parsedAddress = host + ":" + port } - return address, protocol, nil + return parsedAddress, protocol, nil } func (m *Mcrouter) gatherServer(ctx context.Context, address string, acc telegraf.Accumulator) error { @@ -213,7 +205,9 @@ func (m *Mcrouter) gatherServer(ctx context.Context, address string, acc telegra deadline, ok := ctx.Deadline() if ok { - conn.SetDeadline(deadline) + if err := conn.SetDeadline(deadline); err != nil { + return err + } } // Read and write buffer diff --git a/plugins/inputs/mcrouter/mcrouter_test.go b/plugins/inputs/mcrouter/mcrouter_test.go index e17c13b6d6655..5e5adbc53d12e 100644 --- a/plugins/inputs/mcrouter/mcrouter_test.go +++ b/plugins/inputs/mcrouter/mcrouter_test.go @@ -2,12 +2,15 @@ package mcrouter import ( "bufio" + "fmt" "strings" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/docker/go-connections/nat" "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go/wait" + + "github.com/influxdata/telegraf/testutil" ) func TestAddressParsing(t *testing.T) { @@ -30,61 +33,124 @@ func TestAddressParsing(t *testing.T) { for _, args := range acceptTests { address, protocol, err := m.ParseAddress(args[0]) - assert.Nil(t, err, args[0]) - assert.True(t, address == args[1], args[0]) - assert.True(t, protocol == args[2], args[0]) + require.Nil(t, err, args[0]) + require.Equal(t, args[1], address, args[0]) + require.Equal(t, args[2], protocol, args[0]) } for _, addr := range rejectTests { address, protocol, err := m.ParseAddress(addr) - assert.NotNil(t, err, addr) - assert.Empty(t, address, addr) - assert.Empty(t, protocol, addr) + require.NotNil(t, err, addr) + require.Empty(t, address, addr) + require.Empty(t, protocol, addr) } } -func TestMcrouterGeneratesMetrics(t *testing.T) { +func TestMcrouterGeneratesMetricsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } + servicePort := "11211" + container := testutil.Container{ + Image: "memcached", + ExposedPorts: []string{servicePort}, + WaitingFor: wait.ForListeningPort(nat.Port(servicePort)), + } + err := container.Start() + require.NoError(t, err, "failed to start container") + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + m := &Mcrouter{ - Servers: []string{"tcp://" + testutil.GetLocalHost()}, + Servers: []string{ + fmt.Sprintf("tcp://%s:%s", container.Address, container.Ports[servicePort]), + }, } var acc testutil.Accumulator - err := acc.GatherError(m.Gather) + err = acc.GatherError(m.Gather) require.NoError(t, err) - intMetrics := []string{"uptime", "num_servers", "num_servers_new", "num_servers_up", - "num_servers_down", "num_servers_closed", "num_clients", - "num_suspect_servers", "destination_batches_sum", "destination_requests_sum", - "outstanding_route_get_reqs_queued", "outstanding_route_update_reqs_queued", - "outstanding_route_get_avg_queue_size", "outstanding_route_update_avg_queue_size", - "outstanding_route_get_avg_wait_time_sec", "outstanding_route_update_avg_wait_time_sec", - "retrans_closed_connections", "destination_pending_reqs", "destination_inflight_reqs", - "destination_batch_size", "asynclog_requests", "proxy_reqs_processing", - "proxy_reqs_waiting", "client_queue_notify_period", - "ps_num_minor_faults", "ps_num_major_faults", - "ps_vsize", "ps_rss", "fibers_allocated", "fibers_pool_size", "fibers_stack_high_watermark", - "successful_client_connections", "duration_us", "destination_max_pending_reqs", - "destination_max_inflight_reqs", "retrans_per_kbyte_max", "cmd_get_count", "cmd_delete_out", - "cmd_lease_get", "cmd_set", "cmd_get_out_all", "cmd_get_out", "cmd_lease_set_count", - "cmd_other_out_all", "cmd_lease_get_out", "cmd_set_count", "cmd_lease_set_out", - "cmd_delete_count", "cmd_other", "cmd_delete", "cmd_get", "cmd_lease_set", "cmd_set_out", - "cmd_lease_get_count", "cmd_other_out", "cmd_lease_get_out_all", "cmd_set_out_all", - "cmd_other_count", "cmd_delete_out_all", "cmd_lease_set_out_all"} - - floatMetrics := []string{"rusage_system", "rusage_user", "ps_user_time_sec", "ps_system_time_sec"} + intMetrics := []string{ + "uptime", + // "num_servers", + // "num_servers_new", + // "num_servers_up", + // "num_servers_down", + // "num_servers_closed", + // "num_clients", + // "num_suspect_servers", + // "destination_batches_sum", + // "destination_requests_sum", + // "outstanding_route_get_reqs_queued", + // "outstanding_route_update_reqs_queued", + // "outstanding_route_get_avg_queue_size", + // "outstanding_route_update_avg_queue_size", + // "outstanding_route_get_avg_wait_time_sec", + // "outstanding_route_update_avg_wait_time_sec", + // "retrans_closed_connections", + // "destination_pending_reqs", + // "destination_inflight_reqs", + // "destination_batch_size", + // "asynclog_requests", + // "proxy_reqs_processing", + // "proxy_reqs_waiting", + // "client_queue_notify_period", + // "ps_num_minor_faults", + // "ps_num_major_faults", + // "ps_vsize", + // "ps_rss", + // "fibers_allocated", + // "fibers_pool_size", + // "fibers_stack_high_watermark", + // "successful_client_connections", + // "duration_us", + // "destination_max_pending_reqs", + // "destination_max_inflight_reqs", + // "retrans_per_kbyte_max", + // "cmd_get_count", + // "cmd_delete_out", + // "cmd_lease_get", + "cmd_set", + // "cmd_get_out_all", + // "cmd_get_out", + // "cmd_lease_set_count", + // "cmd_other_out_all", + // "cmd_lease_get_out", + // "cmd_set_count", + // "cmd_lease_set_out", + // "cmd_delete_count", + // "cmd_other", + // "cmd_delete", + "cmd_get", + // "cmd_lease_set", + // "cmd_set_out", + // "cmd_lease_get_count", + // "cmd_other_out", + // "cmd_lease_get_out_all", + // "cmd_set_out_all", + // "cmd_other_count", + // "cmd_delete_out_all", + // "cmd_lease_set_out_all" + } + + floatMetrics := []string{ + "rusage_system", + "rusage_user", + // "ps_user_time_sec", + // "ps_system_time_sec", + } for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("mcrouter", metric), metric) + require.True(t, acc.HasInt64Field("mcrouter", metric), metric) } for _, metric := range floatMetrics { - assert.True(t, acc.HasFloatField("mcrouter", metric), metric) + require.True(t, acc.HasFloatField("mcrouter", metric), metric) } } diff --git a/plugins/inputs/mcrouter/sample.conf b/plugins/inputs/mcrouter/sample.conf new file mode 100644 index 0000000000000..91aa9023d6a94 --- /dev/null +++ b/plugins/inputs/mcrouter/sample.conf @@ -0,0 +1,8 @@ +# Read metrics from one or many mcrouter servers. +[[inputs.mcrouter]] + ## An array of address to gather stats about. Specify an ip or hostname + ## with port. ie tcp://localhost:11211, tcp://10.0.0.1:11211, etc. + servers = ["tcp://localhost:11211", "unix:///var/run/mcrouter.sock"] + + ## Timeout for metric collections from all servers. Minimum timeout is "1s". + # timeout = "5s" diff --git a/plugins/inputs/mdstat/README.md b/plugins/inputs/mdstat/README.md new file mode 100644 index 0000000000000..610f73a08a5b6 --- /dev/null +++ b/plugins/inputs/mdstat/README.md @@ -0,0 +1,53 @@ +# mdstat Input Plugin + +The mdstat plugin gathers statistics about any Linux MD RAID arrays configured +on the host by reading /proc/mdstat. For a full list of available fields see the +/proc/mdstat section of the [proc man page][man-proc]. For a better idea of +what each field represents, see the [mdstat man page][man-mdstat]. + +Stat collection based on Prometheus' [mdstat collection library][prom-lib]. + +[man-proc]: http://man7.org/linux/man-pages/man5/proc.5.html + +[man-mdstat]: https://raid.wiki.kernel.org/index.php/Mdstat + +[prom-lib]: https://github.com/prometheus/procfs/blob/master/mdstat.go + +## Configuration + +```toml @sample.conf +# Get kernel statistics from /proc/mdstat +[[inputs.mdstat]] + ## Sets file path + ## If not specified, then default is /proc/mdstat + # file_name = "/proc/mdstat" +``` + +## Metrics + +- mdstat + - BlocksSynced (if the array is rebuilding/checking, this is the count of blocks that have been scanned) + - BlocksSyncedFinishTime (the expected finish time of the rebuild scan, listed in minutes remaining) + - BlocksSyncedPct (the percentage of the rebuild scan left) + - BlocksSyncedSpeed (the current speed the rebuild is running at, listed in K/sec) + - BlocksTotal (the total count of blocks in the array) + - DisksActive (the number of disks that are currently considered healthy in the array) + - DisksFailed (the current count of failed disks in the array) + - DisksSpare (the current count of "spare" disks in the array) + - DisksTotal (total count of disks in the array) + +## Tags + +- mdstat + - ActivityState (`active` or `inactive`) + - Devices (comma separated list of devices that make up the array) + - Name (name of the array) + +## Example Output + +```shell +$ telegraf --config ~/ws/telegraf.conf --input-filter mdstat --test +* Plugin: mdstat, Collection 1 +> mdstat,ActivityState=active,Devices=sdm1\,sdn1,Name=md1 BlocksSynced=231299072i,BlocksSyncedFinishTime=0,BlocksSyncedPct=0,BlocksSyncedSpeed=0,BlocksTotal=231299072i,DisksActive=2i,DisksFailed=0i,DisksSpare=0i,DisksTotal=2i,DisksDown=0i 1617814276000000000 +> mdstat,ActivityState=active,Devices=sdm5\,sdn5,Name=md2 BlocksSynced=2996224i,BlocksSyncedFinishTime=0,BlocksSyncedPct=0,BlocksSyncedSpeed=0,BlocksTotal=2996224i,DisksActive=2i,DisksFailed=0i,DisksSpare=0i,DisksTotal=2i,DisksDown=0i 1617814276000000000 +``` diff --git a/plugins/inputs/mdstat/mdstat.go b/plugins/inputs/mdstat/mdstat.go new file mode 100644 index 0000000000000..0d1eb940988ad --- /dev/null +++ b/plugins/inputs/mdstat/mdstat.go @@ -0,0 +1,309 @@ +//go:generate ../../../tools/readme_config_includer/generator +//go:build linux +// +build linux + +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code has been changed since initial import. + +package mdstat + +import ( + _ "embed" + "fmt" + "os" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +const ( + defaultHostProc = "/proc" + envProc = "HOST_PROC" +) + +var ( + statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`) + recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`) + recoveryLinePctRE = regexp.MustCompile(`= +(.+)%`) + recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`) + recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`) + componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`) +) + +type statusLine struct { + active int64 + total int64 + size int64 + down int64 +} + +type recoveryLine struct { + syncedBlocks int64 + pct float64 + finish float64 + speed float64 +} + +type MdstatConf struct { + FileName string `toml:"file_name"` +} + +func evalStatusLine(deviceLine, statusLineStr string) (statusLine, error) { + sizeFields := strings.Fields(statusLineStr) + if len(sizeFields) < 1 { + return statusLine{active: 0, total: 0, down: 0, size: 0}, + fmt.Errorf("statusLine empty? %q", statusLineStr) + } + sizeStr := sizeFields[0] + size, err := strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + return statusLine{active: 0, total: 0, down: 0, size: 0}, + fmt.Errorf("unexpected statusLine %q: %w", statusLineStr, err) + } + + if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { + // In the device deviceLine, only disks have a number associated with them in []. + total := int64(strings.Count(deviceLine, "[")) + return statusLine{active: total, total: total, down: 0, size: size}, nil + } + + if strings.Contains(deviceLine, "inactive") { + return statusLine{active: 0, total: 0, down: 0, size: size}, nil + } + + matches := statusLineRE.FindStringSubmatch(statusLineStr) + if len(matches) != 5 { + return statusLine{active: 0, total: 0, down: 0, size: size}, + fmt.Errorf("couldn't find all the substring matches: %s", statusLineStr) + } + total, err := strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return statusLine{active: 0, total: 0, down: 0, size: size}, + fmt.Errorf("unexpected statusLine %q: %w", statusLineStr, err) + } + active, err := strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return statusLine{active: 0, total: total, down: 0, size: size}, + fmt.Errorf("unexpected statusLine %q: %w", statusLineStr, err) + } + down := int64(strings.Count(matches[4], "_")) + + return statusLine{active: active, total: total, size: size, down: down}, nil +} + +func evalRecoveryLine(recoveryLineStr string) (recoveryLine, error) { + // Get count of completed vs. total blocks + matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLineStr) + if len(matches) != 2 { + return recoveryLine{syncedBlocks: 0, pct: 0, finish: 0, speed: 0}, + fmt.Errorf("unexpected recoveryLine matching syncedBlocks: %s", recoveryLineStr) + } + syncedBlocks, err := strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return recoveryLine{syncedBlocks: 0, pct: 0, finish: 0, speed: 0}, + fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLineStr, err) + } + + // Get percentage complete + matches = recoveryLinePctRE.FindStringSubmatch(recoveryLineStr) + if len(matches) != 2 { + return recoveryLine{syncedBlocks: syncedBlocks, pct: 0, finish: 0, speed: 0}, + fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLineStr) + } + pct, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return recoveryLine{syncedBlocks: syncedBlocks, pct: 0, finish: 0, speed: 0}, + fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLineStr, err) + } + + // Get time expected left to complete + matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLineStr) + if len(matches) != 2 { + return recoveryLine{syncedBlocks: syncedBlocks, pct: pct, finish: 0, speed: 0}, + fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLineStr) + } + finish, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return recoveryLine{syncedBlocks: syncedBlocks, pct: pct, finish: 0, speed: 0}, + fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLineStr, err) + } + + // Get recovery speed + matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLineStr) + if len(matches) != 2 { + return recoveryLine{syncedBlocks: syncedBlocks, pct: pct, finish: finish, speed: 0}, + fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLineStr) + } + speed, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return recoveryLine{syncedBlocks: syncedBlocks, pct: pct, finish: finish, speed: 0}, + fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLineStr, err) + } + return recoveryLine{syncedBlocks: syncedBlocks, pct: pct, finish: finish, speed: speed}, nil +} + +func evalComponentDevices(deviceFields []string) string { + mdComponentDevices := make([]string, 0) + if len(deviceFields) > 3 { + for _, field := range deviceFields[4:] { + match := componentDeviceRE.FindStringSubmatch(field) + if match == nil { + continue + } + mdComponentDevices = append(mdComponentDevices, match[1]) + } + } + + // Ensure no churn on tag ordering change + sort.Strings(mdComponentDevices) + return strings.Join(mdComponentDevices, ",") +} + +func (*MdstatConf) SampleConfig() string { + return sampleConfig +} + +func (k *MdstatConf) Gather(acc telegraf.Accumulator) error { + data, err := k.getProcMdstat() + if err != nil { + return err + } + lines := strings.Split(string(data), "\n") + // empty file should return nothing + if len(lines) < 3 { + return nil + } + for i, line := range lines { + if strings.TrimSpace(line) == "" || line[0] == ' ' || strings.HasPrefix(line, "Personalities") || strings.HasPrefix(line, "unused") { + continue + } + deviceFields := strings.Fields(line) + if len(deviceFields) < 3 || len(lines) <= i+3 { + return fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) + } + mdName := deviceFields[0] // mdx + state := deviceFields[2] // active or inactive + + /* + Failed disks have the suffix (F) & Spare disks have the suffix (S). + Failed disks may also not be marked separately... + */ + fail := int64(strings.Count(line, "(F)")) + spare := int64(strings.Count(line, "(S)")) + + sts, err := evalStatusLine(lines[i], lines[i+1]) + if err != nil { + return fmt.Errorf("error parsing md device lines: %w", err) + } + + syncLineIdx := i + 2 + if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + syncLineIdx++ + } + + var rcvry recoveryLine + // If device is syncing at the moment, get the number of currently + // synced bytes, otherwise that number equals the size of the device. + rcvry.syncedBlocks = sts.size + recovering := strings.Contains(lines[syncLineIdx], "recovery") + resyncing := strings.Contains(lines[syncLineIdx], "resync") + checking := strings.Contains(lines[syncLineIdx], "check") + + // Append recovery and resyncing state info. + if recovering || resyncing || checking { + if recovering { + state = "recovering" + } else if checking { + state = "checking" + } else { + state = "resyncing" + } + + // Handle case when resync=PENDING or resync=DELAYED. + if strings.Contains(lines[syncLineIdx], "PENDING") || strings.Contains(lines[syncLineIdx], "DELAYED") { + rcvry.syncedBlocks = 0 + } else { + var err error + rcvry, err = evalRecoveryLine(lines[syncLineIdx]) + if err != nil { + return fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err) + } + } + } + fields := map[string]interface{}{ + "DisksActive": sts.active, + "DisksFailed": fail, + "DisksSpare": spare, + "DisksTotal": sts.total, + "DisksDown": sts.down, + "BlocksTotal": sts.size, + "BlocksSynced": rcvry.syncedBlocks, + "BlocksSyncedPct": rcvry.pct, + "BlocksSyncedFinishTime": rcvry.finish, + "BlocksSyncedSpeed": rcvry.speed, + } + tags := map[string]string{ + "Name": mdName, + "ActivityState": state, + "Devices": evalComponentDevices(deviceFields), + } + acc.AddFields("mdstat", fields, tags) + } + + return nil +} + +func (k *MdstatConf) getProcMdstat() ([]byte, error) { + var mdStatFile string + if k.FileName == "" { + mdStatFile = proc(envProc, defaultHostProc) + "/mdstat" + } else { + mdStatFile = k.FileName + } + if _, err := os.Stat(mdStatFile); os.IsNotExist(err) { + return nil, fmt.Errorf("mdstat: %s does not exist", mdStatFile) + } else if err != nil { + return nil, err + } + + data, err := os.ReadFile(mdStatFile) + if err != nil { + return nil, err + } + + return data, nil +} + +func init() { + inputs.Add("mdstat", func() telegraf.Input { return &MdstatConf{} }) +} + +// proc can be used to read file paths from env +func proc(env, path string) string { + // try to read full file path + if p := os.Getenv(env); p != "" { + return p + } + // return default path + return path +} diff --git a/plugins/inputs/mdstat/mdstat_notlinux.go b/plugins/inputs/mdstat/mdstat_notlinux.go new file mode 100644 index 0000000000000..409ae776102b0 --- /dev/null +++ b/plugins/inputs/mdstat/mdstat_notlinux.go @@ -0,0 +1,4 @@ +//go:build !linux +// +build !linux + +package mdstat diff --git a/plugins/inputs/mdstat/mdstat_test.go b/plugins/inputs/mdstat/mdstat_test.go new file mode 100644 index 0000000000000..9f76021dcd237 --- /dev/null +++ b/plugins/inputs/mdstat/mdstat_test.go @@ -0,0 +1,191 @@ +//go:build linux +// +build linux + +package mdstat + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" +) + +func TestFullMdstatProcFile(t *testing.T) { + filename := makeFakeMDStatFile([]byte(mdStatFileFull)) + defer os.Remove(filename) + k := MdstatConf{ + FileName: filename, + } + acc := testutil.Accumulator{} + err := k.Gather(&acc) + require.NoError(t, err) + + fields := map[string]interface{}{ + "BlocksSynced": int64(10620027200), + "BlocksSyncedFinishTime": float64(101.6), + "BlocksSyncedPct": float64(94.3), + "BlocksSyncedSpeed": float64(103517), + "BlocksTotal": int64(11251451904), + "DisksActive": int64(12), + "DisksFailed": int64(0), + "DisksSpare": int64(0), + "DisksTotal": int64(12), + "DisksDown": int64(0), + } + acc.AssertContainsFields(t, "mdstat", fields) +} + +func TestMdstatSyncStart(t *testing.T) { + filename := makeFakeMDStatFile([]byte(mdStatSyncStart)) + defer os.Remove(filename) + k := MdstatConf{ + FileName: filename, + } + acc := testutil.Accumulator{} + err := k.Gather(&acc) + require.NoError(t, err) + + fields := map[string]interface{}{ + "BlocksSynced": int64(10620027200), + "BlocksSyncedFinishTime": float64(101.6), + "BlocksSyncedPct": float64(1.5), + "BlocksSyncedSpeed": float64(103517), + "BlocksTotal": int64(11251451904), + "DisksActive": int64(12), + "DisksFailed": int64(0), + "DisksSpare": int64(0), + "DisksTotal": int64(12), + "DisksDown": int64(0), + } + acc.AssertContainsFields(t, "mdstat", fields) +} + +func TestFailedDiskMdStatProcFile1(t *testing.T) { + filename := makeFakeMDStatFile([]byte(mdStatFileFailedDisk)) + defer os.Remove(filename) + + k := MdstatConf{ + FileName: filename, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + require.NoError(t, err) + + fields := map[string]interface{}{ + "BlocksSynced": int64(5860144128), + "BlocksSyncedFinishTime": float64(0), + "BlocksSyncedPct": float64(0), + "BlocksSyncedSpeed": float64(0), + "BlocksTotal": int64(5860144128), + "DisksActive": int64(3), + "DisksFailed": int64(0), + "DisksSpare": int64(0), + "DisksTotal": int64(4), + "DisksDown": int64(1), + } + acc.AssertContainsFields(t, "mdstat", fields) +} + +func TestEmptyMdStatProcFile1(t *testing.T) { + filename := makeFakeMDStatFile([]byte(mdStatFileEmpty)) + defer os.Remove(filename) + + k := MdstatConf{ + FileName: filename, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + require.NoError(t, err) +} + +func TestInvalidMdStatProcFile1(t *testing.T) { + filename := makeFakeMDStatFile([]byte(mdStatFileInvalid)) + defer os.Remove(filename) + + k := MdstatConf{ + FileName: filename, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + require.Error(t, err) +} + +const mdStatFileFull = ` +Personalities : [raid1] [raid10] [linear] [multipath] [raid0] [raid6] [raid5] [raid4] +md2 : active raid10 sde[2] sdl[9] sdf[3] sdk[8] sdh[5] sdd[1] sdg[4] sdn[11] sdm[10] sdj[7] sdc[0] sdi[6] + 11251451904 blocks super 1.2 512K chunks 2 near-copies [12/12] [UUUUUUUUUUUU] + [==================>..] check = 94.3% (10620027200/11251451904) finish=101.6min speed=103517K/sec + bitmap: 35/84 pages [140KB], 65536KB chunk + +md1 : active raid1 sdb2[2] sda2[0] + 5909504 blocks super 1.2 [2/2] [UU] + +md0 : active raid1 sdb1[2] sda1[0] + 244005888 blocks super 1.2 [2/2] [UU] + bitmap: 1/2 pages [4KB], 65536KB chunk + +unused devices: +` + +const mdStatSyncStart = ` +Personalities : [raid1] [raid10] [linear] [multipath] [raid0] [raid6] [raid5] [raid4] +md2 : active raid10 sde[2] sdl[9] sdf[3] sdk[8] sdh[5] sdd[1] sdg[4] sdn[11] sdm[10] sdj[7] sdc[0] sdi[6] + 11251451904 blocks super 1.2 512K chunks 2 near-copies [12/12] [UUUUUUUUUUUU] + [>....................] check = 1.5% (10620027200/11251451904) finish=101.6min speed=103517K/sec + bitmap: 35/84 pages [140KB], 65536KB chunk + +md1 : active raid1 sdb2[2] sda2[0] + 5909504 blocks super 1.2 [2/2] [UU] + +md0 : active raid1 sdb1[2] sda1[0] + 244005888 blocks super 1.2 [2/2] [UU] + bitmap: 1/2 pages [4KB], 65536KB chunk + +unused devices: +` + +const mdStatFileFailedDisk = ` +Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] +md0 : active raid5 sdd1[3] sdb1[1] sda1[0] + 5860144128 blocks super 1.2 level 5, 64k chunk, algorithm 2 [4/3] [UUU_] + bitmap: 8/15 pages [32KB], 65536KB chunk + +unused devices: +` + +const mdStatFileEmpty = ` +Personalities : +unused devices: +` + +const mdStatFileInvalid = ` +Personalities : + +mdf1: testman actve + +md0 : active raid1 sdb1[2] sda1[0] + 244005888 blocks super 1.2 [2/2] [UU] + bitmap: 1/2 pages [4KB], 65536KB chunk + +unused devices: +` + +func makeFakeMDStatFile(content []byte) (filename string) { + fileobj, err := os.CreateTemp("", "mdstat") + if err != nil { + panic(err) + } + + if _, err = fileobj.Write(content); err != nil { + panic(err) + } + if err := fileobj.Close(); err != nil { + panic(err) + } + return fileobj.Name() +} diff --git a/plugins/inputs/mdstat/sample.conf b/plugins/inputs/mdstat/sample.conf new file mode 100644 index 0000000000000..040d1d690c2c2 --- /dev/null +++ b/plugins/inputs/mdstat/sample.conf @@ -0,0 +1,5 @@ +# Get kernel statistics from /proc/mdstat +[[inputs.mdstat]] + ## Sets file path + ## If not specified, then default is /proc/mdstat + # file_name = "/proc/mdstat" diff --git a/plugins/inputs/mem/README.md b/plugins/inputs/mem/README.md index 9122b885a09e1..625e991738473 100644 --- a/plugins/inputs/mem/README.md +++ b/plugins/inputs/mem/README.md @@ -5,14 +5,15 @@ The mem plugin collects system memory metrics. For a more complete explanation of the difference between *used* and *actual_used* RAM, see [Linux ate my ram](http://www.linuxatemyram.com/). -### Configuration: -```toml +## Configuration + +```toml @sample.conf # Read metrics about memory usage [[inputs.mem]] # no configuration ``` -### Metrics: +## Metrics Available fields are dependent on platform. @@ -55,7 +56,8 @@ Available fields are dependent on platform. - write_back (integer, Linux) - write_back_tmp (integer, Linux) -### Example Output: -``` +## Example Output + +```shell mem active=9299595264i,available=16818249728i,available_percent=80.41654254645131,buffered=2383761408i,cached=13316689920i,commit_limit=14751920128i,committed_as=11781156864i,dirty=122880i,free=1877688320i,high_free=0i,high_total=0i,huge_page_size=2097152i,huge_pages_free=0i,huge_pages_total=0i,inactive=7549939712i,low_free=0i,low_total=0i,mapped=416763904i,page_tables=19787776i,shared=670679040i,slab=2081071104i,sreclaimable=1923395584i,sunreclaim=157675520i,swap_cached=1302528i,swap_free=4286128128i,swap_total=4294963200i,total=20913917952i,used=3335778304i,used_percent=15.95004011996231,vmalloc_chunk=0i,vmalloc_total=35184372087808i,vmalloc_used=0i,wired=0i,write_back=0i,write_back_tmp=0i 1574712869000000000 ``` diff --git a/plugins/inputs/mem/memory.go b/plugins/inputs/mem/mem.go similarity index 74% rename from plugins/inputs/mem/memory.go rename to plugins/inputs/mem/mem.go index c8dbd0c2a43b5..961a02ed10022 100644 --- a/plugins/inputs/mem/memory.go +++ b/plugins/inputs/mem/mem.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package mem import ( + _ "embed" "fmt" "runtime" @@ -9,24 +11,26 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/system" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type MemStats struct { ps system.PS platform string } -func (_ *MemStats) Description() string { - return "Read metrics about memory usage" +func (*MemStats) SampleConfig() string { + return sampleConfig } -func (_ *MemStats) SampleConfig() string { return "" } - -func (m *MemStats) Init() error { - m.platform = runtime.GOOS +func (ms *MemStats) Init() error { + ms.platform = runtime.GOOS return nil } -func (s *MemStats) Gather(acc telegraf.Accumulator) error { - vm, err := s.ps.VMStat() +func (ms *MemStats) Gather(acc telegraf.Accumulator) error { + vm, err := ms.ps.VMStat() if err != nil { return fmt.Errorf("error getting virtual memory info: %s", err) } @@ -39,7 +43,7 @@ func (s *MemStats) Gather(acc telegraf.Accumulator) error { "available_percent": 100 * float64(vm.Available) / float64(vm.Total), } - switch s.platform { + switch ms.platform { case "darwin": fields["active"] = vm.Active fields["free"] = vm.Free @@ -79,16 +83,16 @@ func (s *MemStats) Gather(acc telegraf.Accumulator) error { fields["page_tables"] = vm.PageTables fields["shared"] = vm.Shared fields["slab"] = vm.Slab - fields["sreclaimable"] = vm.SReclaimable - fields["sunreclaim"] = vm.SUnreclaim + fields["sreclaimable"] = vm.Sreclaimable + fields["sunreclaim"] = vm.Sunreclaim fields["swap_cached"] = vm.SwapCached fields["swap_free"] = vm.SwapFree fields["swap_total"] = vm.SwapTotal - fields["vmalloc_chunk"] = vm.VMallocChunk - fields["vmalloc_total"] = vm.VMallocTotal - fields["vmalloc_used"] = vm.VMallocUsed - fields["write_back_tmp"] = vm.WritebackTmp - fields["write_back"] = vm.Writeback + fields["vmalloc_chunk"] = vm.VmallocChunk + fields["vmalloc_total"] = vm.VmallocTotal + fields["vmalloc_used"] = vm.VmallocUsed + fields["write_back_tmp"] = vm.WriteBackTmp + fields["write_back"] = vm.WriteBack } acc.AddGauge("mem", fields, nil) diff --git a/plugins/inputs/mem/memory_test.go b/plugins/inputs/mem/mem_test.go similarity index 92% rename from plugins/inputs/mem/memory_test.go rename to plugins/inputs/mem/mem_test.go index 626a1806c4055..06561875753c9 100644 --- a/plugins/inputs/mem/memory_test.go +++ b/plugins/inputs/mem/mem_test.go @@ -7,7 +7,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/mem" + "github.com/shirou/gopsutil/v3/mem" "github.com/stretchr/testify/require" ) @@ -42,16 +42,16 @@ func TestMemStats(t *testing.T) { Mapped: 42236, PageTables: 1236, Shared: 0, - SReclaimable: 1923022848, - SUnreclaim: 157728768, + Sreclaimable: 1923022848, + Sunreclaim: 157728768, SwapCached: 0, SwapFree: 524280, SwapTotal: 524280, - VMallocChunk: 3872908, - VMallocTotal: 3874808, - VMallocUsed: 1416, - Writeback: 0, - WritebackTmp: 0, + VmallocChunk: 3872908, + VmallocTotal: 3874808, + VmallocUsed: 1416, + WriteBack: 0, + WriteBackTmp: 0, } mps.On("VMStat").Return(vms, nil) diff --git a/plugins/inputs/mem/sample.conf b/plugins/inputs/mem/sample.conf new file mode 100644 index 0000000000000..2fa26d07b3ee2 --- /dev/null +++ b/plugins/inputs/mem/sample.conf @@ -0,0 +1,3 @@ +# Read metrics about memory usage +[[inputs.mem]] + # no configuration diff --git a/plugins/inputs/memcached/README.md b/plugins/inputs/memcached/README.md index 721be913054a7..2a27f906aac46 100644 --- a/plugins/inputs/memcached/README.md +++ b/plugins/inputs/memcached/README.md @@ -2,9 +2,9 @@ This plugin gathers statistics data from a Memcached server. -### Configuration: +## Configuration -```toml +```toml @sample.conf # Read metrics from one or many memcached servers. [[inputs.memcached]] # An array of address to gather stats about. Specify an ip on hostname @@ -12,9 +12,17 @@ This plugin gathers statistics data from a Memcached server. servers = ["localhost:11211"] # An array of unix memcached sockets to gather stats about. # unix_sockets = ["/var/run/memcached.sock"] + + ## Optional TLS Config + # enable_tls = true + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## If false, skip chain & host verification + # insecure_skip_verify = true ``` -### Measurements & Fields: +## Metrics The fields from this plugin are gathered in the *memcached* measurement. @@ -41,9 +49,12 @@ Fields: * decr_misses - Number of decr reqs against missing keys * delete_hits - Number of deletion reqs resulting in an item being removed * delete_misses - umber of deletions reqs for missing keys +* evicted_active - Items evicted from LRU that had been hit recently but did not jump to top of LRU * evicted_unfetched - Items evicted from LRU that were never touched by get/incr/append/etc * evictions - Number of valid items removed from cache to free memory for new items * expired_unfetched - Items pulled from LRU that were never touched by get/incr/append/etc before expiring +* get_expired - Number of items that have been requested but had already expired +* get_flushed - Number of items that have been requested but have been flushed via flush_all * get_hits - Number of keys that have been requested and found present * get_misses - Number of items that have been requested and not found * hash_bytes - Bytes currently used by hash tables @@ -53,7 +64,11 @@ Fields: * incr_misses - Number of incr reqs against missing keys * limit_maxbytes - Number of bytes this server is allowed to use for storage * listen_disabled_num - Number of times server has stopped accepting new connections (maxconns) +* max_connections - Max number of simultaneous connections * reclaimed - Number of times an entry was stored using memory from an expired entry +* rejected_connections - Conns rejected in maxconns_fast mode +* store_no_memory - Number of rejected storage requests caused by exhaustion of the memory limit when evictions are disabled +* store_too_large - Number of rejected storage requests caused by attempting to write a value larger than the item size limit * threads - Number of worker threads requested * total_connections - Total number of connections opened since the server started running * total_items - Total number of items stored since the server started @@ -61,24 +76,28 @@ Fields: * touch_misses - Number of items that have been touched and not found * uptime - Number of secs since the server started -Description of gathered fields taken from [here](https://github.com/memcached/memcached/blob/master/doc/protocol.txt). +Description of gathered fields taken from [memcached protocol docs][protocol]. + +[protocol]: https://github.com/memcached/memcached/blob/master/doc/protocol.txt -### Tags: +## Tags * Memcached measurements have the following tags: - - server (the host name from which metrics are gathered) + * server (the host name from which metrics are gathered) -### Sample Queries: +## Sample Queries -You can use the following query to get the average get hit and miss ratio, as well as the total average size of cached items, number of cached items and average connection counts per server. +You can use the following query to get the average get hit and miss ratio, as +well as the total average size of cached items, number of cached items and +average connection counts per server. -``` +```sql SELECT mean(get_hits) / mean(cmd_get) as get_ratio, mean(get_misses) / mean(cmd_get) as get_misses_ratio, mean(bytes), mean(curr_items), mean(curr_connections) FROM memcached WHERE time > now() - 1h GROUP BY server ``` -### Example Output: +## Example Output -``` +```shell $ ./telegraf --config telegraf.conf --input-filter memcached --test -memcached,server=localhost:11211 get_hits=1,get_misses=2,evictions=0,limit_maxbytes=0,bytes=10,uptime=3600,curr_items=2,total_items=2,curr_connections=1,total_connections=2,connection_structures=1,cmd_get=2,cmd_set=1,delete_hits=0,delete_misses=0,incr_hits=0,incr_misses=0,decr_hits=0,decr_misses=0,cas_hits=0,cas_misses=0,bytes_read=10,bytes_written=10,threads=1,conn_yields=0 1453831884664956455 +memcached,server=localhost:11211 accepting_conns=1i,auth_cmds=0i,auth_errors=0i,bytes=0i,bytes_read=7i,bytes_written=0i,cas_badval=0i,cas_hits=0i,cas_misses=0i,cmd_flush=0i,cmd_get=0i,cmd_set=0i,cmd_touch=0i,conn_yields=0i,connection_structures=3i,curr_connections=2i,curr_items=0i,decr_hits=0i,decr_misses=0i,delete_hits=0i,delete_misses=0i,evicted_active=0i,evicted_unfetched=0i,evictions=0i,expired_unfetched=0i,get_expired=0i,get_flushed=0i,get_hits=0i,get_misses=0i,hash_bytes=524288i,hash_is_expanding=0i,hash_power_level=16i,incr_hits=0i,incr_misses=0i,limit_maxbytes=67108864i,listen_disabled_num=0i,max_connections=1024i,reclaimed=0i,rejected_connections=0i,store_no_memory=0i,store_too_large=0i,threads=4i,total_connections=3i,total_items=0i,touch_hits=0i,touch_misses=0i,uptime=3i 1644771989000000000 ``` diff --git a/plugins/inputs/memcached/memcached.go b/plugins/inputs/memcached/memcached.go index 99128263ade10..b52d5e7b448ff 100644 --- a/plugins/inputs/memcached/memcached.go +++ b/plugins/inputs/memcached/memcached.go @@ -1,30 +1,35 @@ +//go:generate ../../../tools/readme_config_includer/generator package memcached import ( "bufio" "bytes" + "crypto/tls" + _ "embed" "fmt" "net" "strconv" "time" + "golang.org/x/net/proxy" + "github.com/influxdata/telegraf" + tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // Memcached is a memcached plugin type Memcached struct { - Servers []string - UnixSockets []string + Servers []string `toml:"servers"` + UnixSockets []string `toml:"unix_sockets"` + EnableTLS bool `toml:"enable_tls"` + tlsint.ClientConfig } -var sampleConfig = ` - ## An array of address to gather stats about. Specify an ip on hostname - ## with optional port. ie localhost, 10.0.0.1:11211, etc. - servers = ["localhost:11211"] - # unix_sockets = ["/var/run/memcached.sock"] -` - var defaultTimeout = 5 * time.Second // The list of metrics that should be sent @@ -50,9 +55,12 @@ var sendMetrics = []string{ "decr_misses", "delete_hits", "delete_misses", + "evicted_active", "evicted_unfetched", "evictions", "expired_unfetched", + "get_expired", + "get_flushed", "get_hits", "get_misses", "hash_bytes", @@ -62,7 +70,11 @@ var sendMetrics = []string{ "incr_misses", "limit_maxbytes", "listen_disabled_num", + "max_connections", "reclaimed", + "rejected_connections", + "store_no_memory", + "store_too_large", "threads", "total_connections", "total_items", @@ -71,16 +83,10 @@ var sendMetrics = []string{ "uptime", } -// SampleConfig returns sample configuration message -func (m *Memcached) SampleConfig() string { +func (*Memcached) SampleConfig() string { return sampleConfig } -// Description returns description of Memcached plugin -func (m *Memcached) Description() string { - return "Read metrics from one or many memcached servers" -} - // Gather reads stats from all configured servers accumulates stats func (m *Memcached) Gather(acc telegraf.Accumulator) error { if len(m.Servers) == 0 && len(m.UnixSockets) == 0 { @@ -105,8 +111,23 @@ func (m *Memcached) gatherServer( ) error { var conn net.Conn var err error + var dialer proxy.Dialer + + dialer = &net.Dialer{Timeout: defaultTimeout} + if m.EnableTLS { + tlsCfg, err := m.ClientConfig.TLSConfig() + if err != nil { + return err + } + + dialer = &tls.Dialer{ + NetDialer: dialer.(*net.Dialer), + Config: tlsCfg, + } + } + if unix { - conn, err = net.DialTimeout("unix", address, defaultTimeout) + conn, err = dialer.Dial("unix", address) if err != nil { return err } @@ -117,7 +138,7 @@ func (m *Memcached) gatherServer( address = address + ":11211" } - conn, err = net.DialTimeout("tcp", address, defaultTimeout) + conn, err = dialer.Dial("tcp", address) if err != nil { return err } @@ -129,7 +150,9 @@ func (m *Memcached) gatherServer( } // Extend connection - conn.SetDeadline(time.Now().Add(defaultTimeout)) + if err := conn.SetDeadline(time.Now().Add(defaultTimeout)); err != nil { + return err + } // Read and write buffer rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)) diff --git a/plugins/inputs/memcached/memcached_test.go b/plugins/inputs/memcached/memcached_test.go index 3c8a239f06d73..ba1aa97f6b357 100644 --- a/plugins/inputs/memcached/memcached_test.go +++ b/plugins/inputs/memcached/memcached_test.go @@ -2,26 +2,41 @@ package memcached import ( "bufio" + "fmt" "strings" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/docker/go-connections/nat" "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go/wait" + + "github.com/influxdata/telegraf/testutil" ) -func TestMemcachedGeneratesMetrics(t *testing.T) { +func TestMemcachedGeneratesMetricsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } + servicePort := "11211" + container := testutil.Container{ + Image: "memcached", + ExposedPorts: []string{servicePort}, + WaitingFor: wait.ForListeningPort(nat.Port(servicePort)), + } + err := container.Start() + require.NoError(t, err, "failed to start container") + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + m := &Memcached{ - Servers: []string{testutil.GetLocalHost()}, + Servers: []string{fmt.Sprintf("%s:%s", container.Address, container.Ports[servicePort])}, } var acc testutil.Accumulator - err := acc.GatherError(m.Gather) + err = acc.GatherError(m.Gather) require.NoError(t, err) intMetrics := []string{"get_hits", "get_misses", "evictions", @@ -32,7 +47,7 @@ func TestMemcachedGeneratesMetrics(t *testing.T) { "bytes_read", "bytes_written", "threads", "conn_yields"} for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("memcached", metric), metric) + require.True(t, acc.HasInt64Field("memcached", metric), metric) } } @@ -45,24 +60,36 @@ func TestMemcachedParseMetrics(t *testing.T) { key string value string }{ - {"pid", "23235"}, - {"uptime", "194"}, - {"time", "1449174679"}, - {"version", "1.4.14 (Ubuntu)"}, - {"libevent", "2.0.21-stable"}, + {"pid", "5619"}, + {"uptime", "11"}, + {"time", "1644765868"}, + {"version", "1.6.14_5_ge03751b"}, + {"libevent", "2.1.11-stable"}, {"pointer_size", "64"}, - {"rusage_user", "0.000000"}, - {"rusage_system", "0.007566"}, - {"curr_connections", "5"}, - {"total_connections", "6"}, - {"connection_structures", "6"}, + {"rusage_user", "0.080905"}, + {"rusage_system", "0.059330"}, + {"max_connections", "1024"}, + {"curr_connections", "2"}, + {"total_connections", "3"}, + {"rejected_connections", "0"}, + {"connection_structures", "3"}, + {"response_obj_oom", "0"}, + {"response_obj_count", "1"}, + {"response_obj_bytes", "16384"}, + {"read_buf_count", "2"}, + {"read_buf_bytes", "32768"}, + {"read_buf_bytes_free", "0"}, + {"read_buf_oom", "0"}, {"reserved_fds", "20"}, {"cmd_get", "0"}, {"cmd_set", "0"}, {"cmd_flush", "0"}, {"cmd_touch", "0"}, + {"cmd_meta", "0"}, {"get_hits", "0"}, {"get_misses", "0"}, + {"get_expired", "0"}, + {"get_flushed", "0"}, {"delete_misses", "0"}, {"delete_hits", "0"}, {"incr_misses", "0"}, @@ -74,25 +101,57 @@ func TestMemcachedParseMetrics(t *testing.T) { {"cas_badval", "0"}, {"touch_hits", "0"}, {"touch_misses", "0"}, + {"store_too_large", "0"}, + {"store_no_memory", "0"}, {"auth_cmds", "0"}, {"auth_errors", "0"}, - {"bytes_read", "7"}, + {"bytes_read", "6"}, {"bytes_written", "0"}, {"limit_maxbytes", "67108864"}, {"accepting_conns", "1"}, {"listen_disabled_num", "0"}, + {"time_in_listen_disabled_us", "0"}, {"threads", "4"}, {"conn_yields", "0"}, {"hash_power_level", "16"}, {"hash_bytes", "524288"}, {"hash_is_expanding", "0"}, - {"expired_unfetched", "0"}, - {"evicted_unfetched", "0"}, + {"slab_reassign_rescues", "0"}, + {"slab_reassign_chunk_rescues", "0"}, + {"slab_reassign_evictions_nomem", "0"}, + {"slab_reassign_inline_reclaim", "0"}, + {"slab_reassign_busy_items", "0"}, + {"slab_reassign_busy_deletes", "0"}, + {"slab_reassign_running", "0"}, + {"slabs_moved", "0"}, + {"lru_crawler_running", "0"}, + {"lru_crawler_starts", "1"}, + {"lru_maintainer_juggles", "60"}, + {"malloc_fails", "0"}, + {"log_worker_dropped", "0"}, + {"log_worker_written", "0"}, + {"log_watcher_skipped", "0"}, + {"log_watcher_sent", "0"}, + {"log_watchers", "0"}, + {"unexpected_napi_ids", "0"}, + {"round_robin_fallback", "0"}, {"bytes", "0"}, {"curr_items", "0"}, {"total_items", "0"}, + {"slab_global_page_pool", "0"}, + {"expired_unfetched", "0"}, + {"evicted_unfetched", "0"}, + {"evicted_active", "0"}, {"evictions", "0"}, {"reclaimed", "0"}, + {"crawler_reclaimed", "0"}, + {"crawler_items_checked", "0"}, + {"lrutail_reflocked", "0"}, + {"moves_to_cold", "0"}, + {"moves_to_warm", "0"}, + {"moves_within_lru", "0"}, + {"direct_reclaims", "0"}, + {"lru_bumps_dropped", "0"}, } for _, test := range tests { @@ -108,24 +167,36 @@ func TestMemcachedParseMetrics(t *testing.T) { } } -var memcachedStats = `STAT pid 23235 -STAT uptime 194 -STAT time 1449174679 -STAT version 1.4.14 (Ubuntu) -STAT libevent 2.0.21-stable +var memcachedStats = `STAT pid 5619 +STAT uptime 11 +STAT time 1644765868 +STAT version 1.6.14_5_ge03751b +STAT libevent 2.1.11-stable STAT pointer_size 64 -STAT rusage_user 0.000000 -STAT rusage_system 0.007566 -STAT curr_connections 5 -STAT total_connections 6 -STAT connection_structures 6 +STAT rusage_user 0.080905 +STAT rusage_system 0.059330 +STAT max_connections 1024 +STAT curr_connections 2 +STAT total_connections 3 +STAT rejected_connections 0 +STAT connection_structures 3 +STAT response_obj_oom 0 +STAT response_obj_count 1 +STAT response_obj_bytes 16384 +STAT read_buf_count 2 +STAT read_buf_bytes 32768 +STAT read_buf_bytes_free 0 +STAT read_buf_oom 0 STAT reserved_fds 20 STAT cmd_get 0 STAT cmd_set 0 STAT cmd_flush 0 STAT cmd_touch 0 +STAT cmd_meta 0 STAT get_hits 0 STAT get_misses 0 +STAT get_expired 0 +STAT get_flushed 0 STAT delete_misses 0 STAT delete_hits 0 STAT incr_misses 0 @@ -137,24 +208,56 @@ STAT cas_hits 0 STAT cas_badval 0 STAT touch_hits 0 STAT touch_misses 0 +STAT store_too_large 0 +STAT store_no_memory 0 STAT auth_cmds 0 STAT auth_errors 0 -STAT bytes_read 7 +STAT bytes_read 6 STAT bytes_written 0 STAT limit_maxbytes 67108864 STAT accepting_conns 1 STAT listen_disabled_num 0 +STAT time_in_listen_disabled_us 0 STAT threads 4 STAT conn_yields 0 STAT hash_power_level 16 STAT hash_bytes 524288 STAT hash_is_expanding 0 -STAT expired_unfetched 0 -STAT evicted_unfetched 0 +STAT slab_reassign_rescues 0 +STAT slab_reassign_chunk_rescues 0 +STAT slab_reassign_evictions_nomem 0 +STAT slab_reassign_inline_reclaim 0 +STAT slab_reassign_busy_items 0 +STAT slab_reassign_busy_deletes 0 +STAT slab_reassign_running 0 +STAT slabs_moved 0 +STAT lru_crawler_running 0 +STAT lru_crawler_starts 1 +STAT lru_maintainer_juggles 60 +STAT malloc_fails 0 +STAT log_worker_dropped 0 +STAT log_worker_written 0 +STAT log_watcher_skipped 0 +STAT log_watcher_sent 0 +STAT log_watchers 0 +STAT unexpected_napi_ids 0 +STAT round_robin_fallback 0 STAT bytes 0 STAT curr_items 0 STAT total_items 0 +STAT slab_global_page_pool 0 +STAT expired_unfetched 0 +STAT evicted_unfetched 0 +STAT evicted_active 0 STAT evictions 0 STAT reclaimed 0 +STAT crawler_reclaimed 0 +STAT crawler_items_checked 0 +STAT lrutail_reflocked 0 +STAT moves_to_cold 0 +STAT moves_to_warm 0 +STAT moves_within_lru 0 +STAT direct_reclaims 0 +STAT lru_bumps_dropped 0 END ` diff --git a/plugins/inputs/memcached/sample.conf b/plugins/inputs/memcached/sample.conf new file mode 100644 index 0000000000000..b13c2985c0a7f --- /dev/null +++ b/plugins/inputs/memcached/sample.conf @@ -0,0 +1,15 @@ +# Read metrics from one or many memcached servers. +[[inputs.memcached]] + # An array of address to gather stats about. Specify an ip on hostname + # with optional port. ie localhost, 10.0.0.1:11211, etc. + servers = ["localhost:11211"] + # An array of unix memcached sockets to gather stats about. + # unix_sockets = ["/var/run/memcached.sock"] + + ## Optional TLS Config + # enable_tls = true + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## If false, skip chain & host verification + # insecure_skip_verify = true diff --git a/plugins/inputs/mesos/README.md b/plugins/inputs/mesos/README.md index 2845881880d95..361e2d9a78bc4 100644 --- a/plugins/inputs/mesos/README.md +++ b/plugins/inputs/mesos/README.md @@ -1,11 +1,13 @@ # Mesos Input Plugin -This input plugin gathers metrics from Mesos. -For more information, please check the [Mesos Observability Metrics](http://mesos.apache.org/documentation/latest/monitoring/) page. +This input plugin gathers metrics from Mesos. For more information, please +check the [Mesos Observability Metrics][1] page. -### Configuration: +[1]: http://mesos.apache.org/documentation/latest/monitoring/ -```toml +## Configuration + +```toml @sample.conf # Telegraf plugin for gathering metrics from N Mesos masters [[inputs.mesos]] ## Timeout, in ms. @@ -50,283 +52,287 @@ For more information, please check the [Mesos Observability Metrics](http://meso # insecure_skip_verify = false ``` -By default this plugin is not configured to gather metrics from mesos. Since a mesos cluster can be deployed in numerous ways it does not provide any default -values. User needs to specify master/slave nodes this plugin will gather metrics from. +By default this plugin is not configured to gather metrics from mesos. Since a +mesos cluster can be deployed in numerous ways it does not provide any default +values. User needs to specify master/slave nodes this plugin will gather metrics +from. -### Measurements & Fields: +## Metrics Mesos master metric groups - resources - - master/cpus_percent - - master/cpus_used - - master/cpus_total - - master/cpus_revocable_percent - - master/cpus_revocable_total - - master/cpus_revocable_used - - master/disk_percent - - master/disk_used - - master/disk_total - - master/disk_revocable_percent - - master/disk_revocable_total - - master/disk_revocable_used - - master/gpus_percent - - master/gpus_used - - master/gpus_total - - master/gpus_revocable_percent - - master/gpus_revocable_total - - master/gpus_revocable_used - - master/mem_percent - - master/mem_used - - master/mem_total - - master/mem_revocable_percent - - master/mem_revocable_total - - master/mem_revocable_used + - master/cpus_percent + - master/cpus_used + - master/cpus_total + - master/cpus_revocable_percent + - master/cpus_revocable_total + - master/cpus_revocable_used + - master/disk_percent + - master/disk_used + - master/disk_total + - master/disk_revocable_percent + - master/disk_revocable_total + - master/disk_revocable_used + - master/gpus_percent + - master/gpus_used + - master/gpus_total + - master/gpus_revocable_percent + - master/gpus_revocable_total + - master/gpus_revocable_used + - master/mem_percent + - master/mem_used + - master/mem_total + - master/mem_revocable_percent + - master/mem_revocable_total + - master/mem_revocable_used - master - - master/elected - - master/uptime_secs + - master/elected + - master/uptime_secs - system - - system/cpus_total - - system/load_15min - - system/load_5min - - system/load_1min - - system/mem_free_bytes - - system/mem_total_bytes + - system/cpus_total + - system/load_15min + - system/load_5min + - system/load_1min + - system/mem_free_bytes + - system/mem_total_bytes - slaves - - master/slave_registrations - - master/slave_removals - - master/slave_reregistrations - - master/slave_shutdowns_scheduled - - master/slave_shutdowns_canceled - - master/slave_shutdowns_completed - - master/slaves_active - - master/slaves_connected - - master/slaves_disconnected - - master/slaves_inactive - - master/slave_unreachable_canceled - - master/slave_unreachable_completed - - master/slave_unreachable_scheduled - - master/slaves_unreachable + - master/slave_registrations + - master/slave_removals + - master/slave_reregistrations + - master/slave_shutdowns_scheduled + - master/slave_shutdowns_canceled + - master/slave_shutdowns_completed + - master/slaves_active + - master/slaves_connected + - master/slaves_disconnected + - master/slaves_inactive + - master/slave_unreachable_canceled + - master/slave_unreachable_completed + - master/slave_unreachable_scheduled + - master/slaves_unreachable - frameworks - - master/frameworks_active - - master/frameworks_connected - - master/frameworks_disconnected - - master/frameworks_inactive - - master/outstanding_offers + - master/frameworks_active + - master/frameworks_connected + - master/frameworks_disconnected + - master/frameworks_inactive + - master/outstanding_offers - framework offers - - master/frameworks/subscribed - - master/frameworks/calls_total - - master/frameworks/calls - - master/frameworks/events_total - - master/frameworks/events - - master/frameworks/operations_total - - master/frameworks/operations - - master/frameworks/tasks/active - - master/frameworks/tasks/terminal - - master/frameworks/offers/sent - - master/frameworks/offers/accepted - - master/frameworks/offers/declined - - master/frameworks/offers/rescinded - - master/frameworks/roles/suppressed + - master/frameworks/subscribed + - master/frameworks/calls_total + - master/frameworks/calls + - master/frameworks/events_total + - master/frameworks/events + - master/frameworks/operations_total + - master/frameworks/operations + - master/frameworks/tasks/active + - master/frameworks/tasks/terminal + - master/frameworks/offers/sent + - master/frameworks/offers/accepted + - master/frameworks/offers/declined + - master/frameworks/offers/rescinded + - master/frameworks/roles/suppressed - tasks - - master/tasks_error - - master/tasks_failed - - master/tasks_finished - - master/tasks_killed - - master/tasks_lost - - master/tasks_running - - master/tasks_staging - - master/tasks_starting - - master/tasks_dropped - - master/tasks_gone - - master/tasks_gone_by_operator - - master/tasks_killing - - master/tasks_unreachable + - master/tasks_error + - master/tasks_failed + - master/tasks_finished + - master/tasks_killed + - master/tasks_lost + - master/tasks_running + - master/tasks_staging + - master/tasks_starting + - master/tasks_dropped + - master/tasks_gone + - master/tasks_gone_by_operator + - master/tasks_killing + - master/tasks_unreachable - messages - - master/invalid_executor_to_framework_messages - - master/invalid_framework_to_executor_messages - - master/invalid_status_update_acknowledgements - - master/invalid_status_updates - - master/dropped_messages - - master/messages_authenticate - - master/messages_deactivate_framework - - master/messages_decline_offers - - master/messages_executor_to_framework - - master/messages_exited_executor - - master/messages_framework_to_executor - - master/messages_kill_task - - master/messages_launch_tasks - - master/messages_reconcile_tasks - - master/messages_register_framework - - master/messages_register_slave - - master/messages_reregister_framework - - master/messages_reregister_slave - - master/messages_resource_request - - master/messages_revive_offers - - master/messages_status_update - - master/messages_status_update_acknowledgement - - master/messages_unregister_framework - - master/messages_unregister_slave - - master/messages_update_slave - - master/recovery_slave_removals - - master/slave_removals/reason_registered - - master/slave_removals/reason_unhealthy - - master/slave_removals/reason_unregistered - - master/valid_framework_to_executor_messages - - master/valid_status_update_acknowledgements - - master/valid_status_updates - - master/task_lost/source_master/reason_invalid_offers - - master/task_lost/source_master/reason_slave_removed - - master/task_lost/source_slave/reason_executor_terminated - - master/valid_executor_to_framework_messages - - master/invalid_operation_status_update_acknowledgements - - master/messages_operation_status_update_acknowledgement - - master/messages_reconcile_operations - - master/messages_suppress_offers - - master/valid_operation_status_update_acknowledgements + - master/invalid_executor_to_framework_messages + - master/invalid_framework_to_executor_messages + - master/invalid_status_update_acknowledgements + - master/invalid_status_updates + - master/dropped_messages + - master/messages_authenticate + - master/messages_deactivate_framework + - master/messages_decline_offers + - master/messages_executor_to_framework + - master/messages_exited_executor + - master/messages_framework_to_executor + - master/messages_kill_task + - master/messages_launch_tasks + - master/messages_reconcile_tasks + - master/messages_register_framework + - master/messages_register_slave + - master/messages_reregister_framework + - master/messages_reregister_slave + - master/messages_resource_request + - master/messages_revive_offers + - master/messages_status_update + - master/messages_status_update_acknowledgement + - master/messages_unregister_framework + - master/messages_unregister_slave + - master/messages_update_slave + - master/recovery_slave_removals + - master/slave_removals/reason_registered + - master/slave_removals/reason_unhealthy + - master/slave_removals/reason_unregistered + - master/valid_framework_to_executor_messages + - master/valid_status_update_acknowledgements + - master/valid_status_updates + - master/task_lost/source_master/reason_invalid_offers + - master/task_lost/source_master/reason_slave_removed + - master/task_lost/source_slave/reason_executor_terminated + - master/valid_executor_to_framework_messages + - master/invalid_operation_status_update_acknowledgements + - master/messages_operation_status_update_acknowledgement + - master/messages_reconcile_operations + - master/messages_suppress_offers + - master/valid_operation_status_update_acknowledgements - evqueue - - master/event_queue_dispatches - - master/event_queue_http_requests - - master/event_queue_messages - - master/operator_event_stream_subscribers + - master/event_queue_dispatches + - master/event_queue_http_requests + - master/event_queue_messages + - master/operator_event_stream_subscribers - registrar - - registrar/state_fetch_ms - - registrar/state_store_ms - - registrar/state_store_ms/max - - registrar/state_store_ms/min - - registrar/state_store_ms/p50 - - registrar/state_store_ms/p90 - - registrar/state_store_ms/p95 - - registrar/state_store_ms/p99 - - registrar/state_store_ms/p999 - - registrar/state_store_ms/p9999 - - registrar/state_store_ms/count - - registrar/log/ensemble_size - - registrar/log/recovered - - registrar/queued_operations - - registrar/registry_size_bytes + - registrar/state_fetch_ms + - registrar/state_store_ms + - registrar/state_store_ms/max + - registrar/state_store_ms/min + - registrar/state_store_ms/p50 + - registrar/state_store_ms/p90 + - registrar/state_store_ms/p95 + - registrar/state_store_ms/p99 + - registrar/state_store_ms/p999 + - registrar/state_store_ms/p9999 + - registrar/state_store_ms/count + - registrar/log/ensemble_size + - registrar/log/recovered + - registrar/queued_operations + - registrar/registry_size_bytes - allocator - - allocator/allocation_run_ms - - allocator/allocation_run_ms/count - - allocator/allocation_run_ms/max - - allocator/allocation_run_ms/min - - allocator/allocation_run_ms/p50 - - allocator/allocation_run_ms/p90 - - allocator/allocation_run_ms/p95 - - allocator/allocation_run_ms/p99 - - allocator/allocation_run_ms/p999 - - allocator/allocation_run_ms/p9999 - - allocator/allocation_runs - - allocator/allocation_run_latency_ms - - allocator/allocation_run_latency_ms/count - - allocator/allocation_run_latency_ms/max - - allocator/allocation_run_latency_ms/min - - allocator/allocation_run_latency_ms/p50 - - allocator/allocation_run_latency_ms/p90 - - allocator/allocation_run_latency_ms/p95 - - allocator/allocation_run_latency_ms/p99 - - allocator/allocation_run_latency_ms/p999 - - allocator/allocation_run_latency_ms/p9999 - - allocator/roles/shares/dominant - - allocator/event_queue_dispatches - - allocator/offer_filters/roles/active - - allocator/quota/roles/resources/offered_or_allocated - - allocator/quota/roles/resources/guarantee - - allocator/resources/cpus/offered_or_allocated - - allocator/resources/cpus/total - - allocator/resources/disk/offered_or_allocated - - allocator/resources/disk/total - - allocator/resources/mem/offered_or_allocated - - allocator/resources/mem/total + - allocator/allocation_run_ms + - allocator/allocation_run_ms/count + - allocator/allocation_run_ms/max + - allocator/allocation_run_ms/min + - allocator/allocation_run_ms/p50 + - allocator/allocation_run_ms/p90 + - allocator/allocation_run_ms/p95 + - allocator/allocation_run_ms/p99 + - allocator/allocation_run_ms/p999 + - allocator/allocation_run_ms/p9999 + - allocator/allocation_runs + - allocator/allocation_run_latency_ms + - allocator/allocation_run_latency_ms/count + - allocator/allocation_run_latency_ms/max + - allocator/allocation_run_latency_ms/min + - allocator/allocation_run_latency_ms/p50 + - allocator/allocation_run_latency_ms/p90 + - allocator/allocation_run_latency_ms/p95 + - allocator/allocation_run_latency_ms/p99 + - allocator/allocation_run_latency_ms/p999 + - allocator/allocation_run_latency_ms/p9999 + - allocator/roles/shares/dominant + - allocator/event_queue_dispatches + - allocator/offer_filters/roles/active + - allocator/quota/roles/resources/offered_or_allocated + - allocator/quota/roles/resources/guarantee + - allocator/resources/cpus/offered_or_allocated + - allocator/resources/cpus/total + - allocator/resources/disk/offered_or_allocated + - allocator/resources/disk/total + - allocator/resources/mem/offered_or_allocated + - allocator/resources/mem/total Mesos slave metric groups + - resources - - slave/cpus_percent - - slave/cpus_used - - slave/cpus_total - - slave/cpus_revocable_percent - - slave/cpus_revocable_total - - slave/cpus_revocable_used - - slave/disk_percent - - slave/disk_used - - slave/disk_total - - slave/disk_revocable_percent - - slave/disk_revocable_total - - slave/disk_revocable_used - - slave/gpus_percent - - slave/gpus_used - - slave/gpus_total, - - slave/gpus_revocable_percent - - slave/gpus_revocable_total - - slave/gpus_revocable_used - - slave/mem_percent - - slave/mem_used - - slave/mem_total - - slave/mem_revocable_percent - - slave/mem_revocable_total - - slave/mem_revocable_used + - slave/cpus_percent + - slave/cpus_used + - slave/cpus_total + - slave/cpus_revocable_percent + - slave/cpus_revocable_total + - slave/cpus_revocable_used + - slave/disk_percent + - slave/disk_used + - slave/disk_total + - slave/disk_revocable_percent + - slave/disk_revocable_total + - slave/disk_revocable_used + - slave/gpus_percent + - slave/gpus_used + - slave/gpus_total, + - slave/gpus_revocable_percent + - slave/gpus_revocable_total + - slave/gpus_revocable_used + - slave/mem_percent + - slave/mem_used + - slave/mem_total + - slave/mem_revocable_percent + - slave/mem_revocable_total + - slave/mem_revocable_used - agent - - slave/registered - - slave/uptime_secs + - slave/registered + - slave/uptime_secs - system - - system/cpus_total - - system/load_15min - - system/load_5min - - system/load_1min - - system/mem_free_bytes - - system/mem_total_bytes + - system/cpus_total + - system/load_15min + - system/load_5min + - system/load_1min + - system/mem_free_bytes + - system/mem_total_bytes - executors - - containerizer/mesos/container_destroy_errors - - slave/container_launch_errors - - slave/executors_preempted - - slave/frameworks_active - - slave/executor_directory_max_allowed_age_secs - - slave/executors_registering - - slave/executors_running - - slave/executors_terminated - - slave/executors_terminating - - slave/recovery_errors + - containerizer/mesos/container_destroy_errors + - slave/container_launch_errors + - slave/executors_preempted + - slave/frameworks_active + - slave/executor_directory_max_allowed_age_secs + - slave/executors_registering + - slave/executors_running + - slave/executors_terminated + - slave/executors_terminating + - slave/recovery_errors - tasks - - slave/tasks_failed - - slave/tasks_finished - - slave/tasks_killed - - slave/tasks_lost - - slave/tasks_running - - slave/tasks_staging - - slave/tasks_starting + - slave/tasks_failed + - slave/tasks_finished + - slave/tasks_killed + - slave/tasks_lost + - slave/tasks_running + - slave/tasks_staging + - slave/tasks_starting - messages - - slave/invalid_framework_messages - - slave/invalid_status_updates - - slave/valid_framework_messages - - slave/valid_status_updates + - slave/invalid_framework_messages + - slave/invalid_status_updates + - slave/valid_framework_messages + - slave/valid_status_updates -### Tags: +## Tags - All master/slave measurements have the following tags: - - server (network location of server: `host:port`) - - url (URL origin of server: `scheme://host:port`) - - role (master/slave) + - server (network location of server: `host:port`) + - url (URL origin of server: `scheme://host:port`) + - role (master/slave) - All master measurements have the extra tags: - - state (leader/follower) + - state (leader/follower) -### Example Output: -``` +## Example Output + +```shell $ telegraf --config ~/mesos.conf --input-filter mesos --test * Plugin: mesos, Collection 1 mesos,role=master,state=leader,host=172.17.8.102,server=172.17.8.101 @@ -347,4 +353,3 @@ master/mem_revocable_used=0,master/mem_total=1002, master/mem_used=0,master/messages_authenticate=0, master/messages_deactivate_framework=0 ... ``` - diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index acc836cba34bb..f0f0f102d16fb 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -1,10 +1,11 @@ +//go:generate ../../../tools/readme_config_includer/generator package mesos import ( + _ "embed" "encoding/json" "errors" - "io/ioutil" - "log" + "io" "net" "net/http" "net/url" @@ -19,11 +20,15 @@ import ( jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Role string const ( MASTER Role = "master" - SLAVE = "slave" + SLAVE Role = "slave" ) type Mesos struct { @@ -47,60 +52,7 @@ var allMetrics = map[Role][]string{ SLAVE: {"resources", "agent", "system", "executors", "tasks", "messages"}, } -var sampleConfig = ` - ## Timeout, in ms. - timeout = 100 - - ## A list of Mesos masters. - masters = ["http://localhost:5050"] - - ## Master metrics groups to be collected, by default, all enabled. - master_collections = [ - "resources", - "master", - "system", - "agents", - "frameworks", - "framework_offers", - "tasks", - "messages", - "evqueue", - "registrar", - "allocator", - ] - - ## A list of Mesos slaves, default is [] - # slaves = [] - - ## Slave metrics groups to be collected, by default, all enabled. - # slave_collections = [ - # "resources", - # "agent", - # "system", - # "executors", - # "tasks", - # "messages", - # ] - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - -// SampleConfig returns a sample configuration block -func (m *Mesos) SampleConfig() string { - return sampleConfig -} - -// Description just returns a short description of the Mesos plugin -func (m *Mesos) Description() string { - return "Telegraf plugin for gathering metrics from N Mesos masters" -} - -func parseURL(s string, role Role) (*url.URL, error) { +func (m *Mesos) parseURL(s string, role Role) (*url.URL, error) { if !strings.HasPrefix(s, "http://") && !strings.HasPrefix(s, "https://") { host, port, err := net.SplitHostPort(s) // no port specified @@ -115,7 +67,7 @@ func parseURL(s string, role Role) (*url.URL, error) { } s = "http://" + host + ":" + port - log.Printf("W! [inputs.mesos] using %q as connection URL; please update your configuration to use an URL", s) + m.Log.Warnf("using %q as connection URL; please update your configuration to use an URL", s) } return url.Parse(s) @@ -139,7 +91,7 @@ func (m *Mesos) initialize() error { m.masterURLs = make([]*url.URL, 0, len(m.Masters)) for _, master := range m.Masters { - u, err := parseURL(master, MASTER) + u, err := m.parseURL(master, MASTER) if err != nil { return err } @@ -150,7 +102,7 @@ func (m *Mesos) initialize() error { m.slaveURLs = make([]*url.URL, 0, len(m.Slaves)) for _, slave := range m.Slaves { - u, err := parseURL(slave, SLAVE) + u, err := m.parseURL(slave, SLAVE) if err != nil { return err } @@ -159,7 +111,7 @@ func (m *Mesos) initialize() error { m.slaveURLs = append(m.slaveURLs, u) } - client, err := m.createHttpClient() + client, err := m.createHTTPClient() if err != nil { return err } @@ -168,6 +120,10 @@ func (m *Mesos) initialize() error { return nil } +func (*Mesos) SampleConfig() string { + return sampleConfig +} + // Gather() metrics from given list of Mesos Masters func (m *Mesos) Gather(acc telegraf.Accumulator) error { if !m.initialized { @@ -185,7 +141,6 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error { go func(master *url.URL) { acc.AddError(m.gatherMainMetrics(master, MASTER, acc)) wg.Done() - return }(master) } @@ -194,7 +149,6 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error { go func(slave *url.URL) { acc.AddError(m.gatherMainMetrics(slave, SLAVE, acc)) wg.Done() - return }(slave) } @@ -203,7 +157,7 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error { return nil } -func (m *Mesos) createHttpClient() (*http.Client, error) { +func (m *Mesos) createHTTPClient() (*http.Client, error) { tlsCfg, err := m.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -243,13 +197,11 @@ func metricsDiff(role Role, w []string) []string { } // masterBlocks serves as kind of metrics registry grouping them in sets -func getMetrics(role Role, group string) []string { - var m map[string][]string - - m = make(map[string][]string) +func (m *Mesos) getMetrics(role Role, group string) []string { + metrics := make(map[string][]string) if role == MASTER { - m["resources"] = []string{ + metrics["resources"] = []string{ "master/cpus_percent", "master/cpus_used", "master/cpus_total", @@ -276,12 +228,12 @@ func getMetrics(role Role, group string) []string { "master/mem_revocable_used", } - m["master"] = []string{ + metrics["master"] = []string{ "master/elected", "master/uptime_secs", } - m["system"] = []string{ + metrics["system"] = []string{ "system/cpus_total", "system/load_15min", "system/load_5min", @@ -290,7 +242,7 @@ func getMetrics(role Role, group string) []string { "system/mem_total_bytes", } - m["agents"] = []string{ + metrics["agents"] = []string{ "master/slave_registrations", "master/slave_removals", "master/slave_reregistrations", @@ -307,7 +259,7 @@ func getMetrics(role Role, group string) []string { "master/slaves_unreachable", } - m["frameworks"] = []string{ + metrics["frameworks"] = []string{ "master/frameworks_active", "master/frameworks_connected", "master/frameworks_disconnected", @@ -318,10 +270,10 @@ func getMetrics(role Role, group string) []string { // framework_offers and allocator metrics have unpredictable names, so they can't be listed here. // These empty groups are included to prevent the "unknown metrics group" info log below. // filterMetrics() filters these metrics by looking for names with the corresponding prefix. - m["framework_offers"] = []string{} - m["allocator"] = []string{} + metrics["framework_offers"] = []string{} + metrics["allocator"] = []string{} - m["tasks"] = []string{ + metrics["tasks"] = []string{ "master/tasks_error", "master/tasks_failed", "master/tasks_finished", @@ -337,7 +289,7 @@ func getMetrics(role Role, group string) []string { "master/tasks_unreachable", } - m["messages"] = []string{ + metrics["messages"] = []string{ "master/invalid_executor_to_framework_messages", "master/invalid_framework_to_executor_messages", "master/invalid_status_update_acknowledgements", @@ -381,14 +333,14 @@ func getMetrics(role Role, group string) []string { "master/valid_operation_status_update_acknowledgements", } - m["evqueue"] = []string{ + metrics["evqueue"] = []string{ "master/event_queue_dispatches", "master/event_queue_http_requests", "master/event_queue_messages", "master/operator_event_stream_subscribers", } - m["registrar"] = []string{ + metrics["registrar"] = []string{ "registrar/state_fetch_ms", "registrar/state_store_ms", "registrar/state_store_ms/max", @@ -406,7 +358,7 @@ func getMetrics(role Role, group string) []string { "registrar/state_store_ms/count", } } else if role == SLAVE { - m["resources"] = []string{ + metrics["resources"] = []string{ "slave/cpus_percent", "slave/cpus_used", "slave/cpus_total", @@ -433,12 +385,12 @@ func getMetrics(role Role, group string) []string { "slave/mem_revocable_used", } - m["agent"] = []string{ + metrics["agent"] = []string{ "slave/registered", "slave/uptime_secs", } - m["system"] = []string{ + metrics["system"] = []string{ "system/cpus_total", "system/load_15min", "system/load_5min", @@ -447,7 +399,7 @@ func getMetrics(role Role, group string) []string { "system/mem_total_bytes", } - m["executors"] = []string{ + metrics["executors"] = []string{ "containerizer/mesos/container_destroy_errors", "slave/container_launch_errors", "slave/executors_preempted", @@ -460,7 +412,7 @@ func getMetrics(role Role, group string) []string { "slave/recovery_errors", } - m["tasks"] = []string{ + metrics["tasks"] = []string{ "slave/tasks_failed", "slave/tasks_finished", "slave/tasks_killed", @@ -470,7 +422,7 @@ func getMetrics(role Role, group string) []string { "slave/tasks_starting", } - m["messages"] = []string{ + metrics["messages"] = []string{ "slave/invalid_framework_messages", "slave/invalid_status_updates", "slave/valid_framework_messages", @@ -478,10 +430,10 @@ func getMetrics(role Role, group string) []string { } } - ret, ok := m[group] + ret, ok := metrics[group] if !ok { - log.Printf("I! [inputs.mesos] unknown role %q metrics group: %s", role, group) + m.Log.Infof("unknown role %q metrics group: %s", role, group) return []string{} } @@ -504,21 +456,21 @@ func (m *Mesos) filterMetrics(role Role, metrics *map[string]interface{}) { case "allocator": for m := range *metrics { if strings.HasPrefix(m, "allocator/") { - delete((*metrics), m) + delete(*metrics, m) } } case "framework_offers": for m := range *metrics { if strings.HasPrefix(m, "master/frameworks/") || strings.HasPrefix(m, "frameworks/") { - delete((*metrics), m) + delete(*metrics, m) } } // All other metrics have predictable names. We can use getMetrics() to retrieve them. default: - for _, v := range getMetrics(role, k) { + for _, v := range m.getMetrics(role, k) { if _, ok = (*metrics)[v]; ok { - delete((*metrics), v) + delete(*metrics, v) } } } @@ -532,49 +484,6 @@ type TaskStats struct { Statistics map[string]interface{} `json:"statistics"` } -func (m *Mesos) gatherSlaveTaskMetrics(u *url.URL, acc telegraf.Accumulator) error { - var metrics []TaskStats - - tags := map[string]string{ - "server": u.Hostname(), - "url": urlTag(u), - } - - resp, err := m.client.Get(withPath(u, "/monitor/statistics").String()) - - if err != nil { - return err - } - - data, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return err - } - - if err = json.Unmarshal([]byte(data), &metrics); err != nil { - return errors.New("Error decoding JSON response") - } - - for _, task := range metrics { - tags["framework_id"] = task.FrameworkID - - jf := jsonparser.JSONFlattener{} - err = jf.FlattenJSON("", task.Statistics) - - if err != nil { - return err - } - - timestamp := time.Unix(int64(jf.Fields["timestamp"].(float64)), 0) - jf.Fields["executor_id"] = task.ExecutorID - - acc.AddFields("mesos_tasks", jf.Fields, tags, timestamp) - } - - return nil -} - func withPath(u *url.URL, path string) *url.URL { c := *u c.Path = path @@ -605,14 +514,16 @@ func (m *Mesos) gatherMainMetrics(u *url.URL, role Role, acc telegraf.Accumulato return err } - data, err := ioutil.ReadAll(resp.Body) + data, err := io.ReadAll(resp.Body) + // Ignore the returned error to not shadow the initial one + //nolint:errcheck,revive resp.Body.Close() if err != nil { return err } - if err = json.Unmarshal([]byte(data), &jsonOut); err != nil { - return errors.New("Error decoding JSON response") + if err = json.Unmarshal(data, &jsonOut); err != nil { + return errors.New("error decoding JSON response") } m.filterMetrics(role, &jsonOut) diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go index e25f250c8f8d4..2605ddd4678c2 100644 --- a/plugins/inputs/mesos/mesos_test.go +++ b/plugins/inputs/mesos/mesos_test.go @@ -2,7 +2,6 @@ package mesos import ( "encoding/json" - "fmt" "math/rand" "net/http" "net/http/httptest" @@ -11,25 +10,19 @@ import ( "strings" "testing" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) var masterMetrics map[string]interface{} var masterTestServer *httptest.Server var slaveMetrics map[string]interface{} -// var slaveTaskMetrics map[string]interface{} var slaveTestServer *httptest.Server -func randUUID() string { - b := make([]byte, 16) - rand.Read(b) - return fmt.Sprintf("%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) -} - // master metrics that will be returned by generateMetrics() -var masterMetricNames []string = []string{ +var masterMetricNames = []string{ // resources "master/cpus_percent", "master/cpus_used", @@ -214,7 +207,7 @@ var masterMetricNames []string = []string{ } // slave metrics that will be returned by generateMetrics() -var slaveMetricNames []string = []string{ +var slaveMetricNames = []string{ // resources "slave/cpus_percent", "slave/cpus_used", @@ -286,32 +279,6 @@ func generateMetrics() { for _, k := range slaveMetricNames { slaveMetrics[k] = rand.Float64() } - - // slaveTaskMetrics = map[string]interface{}{ - // "executor_id": fmt.Sprintf("task_name.%s", randUUID()), - // "executor_name": "Some task description", - // "framework_id": randUUID(), - // "source": fmt.Sprintf("task_source.%s", randUUID()), - // "statistics": map[string]interface{}{ - // "cpus_limit": rand.Float64(), - // "cpus_system_time_secs": rand.Float64(), - // "cpus_user_time_secs": rand.Float64(), - // "mem_anon_bytes": float64(rand.Int63()), - // "mem_cache_bytes": float64(rand.Int63()), - // "mem_critical_pressure_counter": float64(rand.Int63()), - // "mem_file_bytes": float64(rand.Int63()), - // "mem_limit_bytes": float64(rand.Int63()), - // "mem_low_pressure_counter": float64(rand.Int63()), - // "mem_mapped_file_bytes": float64(rand.Int63()), - // "mem_medium_pressure_counter": float64(rand.Int63()), - // "mem_rss_bytes": float64(rand.Int63()), - // "mem_swap_bytes": float64(rand.Int63()), - // "mem_total_bytes": float64(rand.Int63()), - // "mem_total_memsw_bytes": float64(rand.Int63()), - // "mem_unevictable_bytes": float64(rand.Int63()), - // "timestamp": rand.Float64(), - // }, - // } } func TestMain(m *testing.M) { @@ -321,6 +288,8 @@ func TestMain(m *testing.M) { masterRouter.HandleFunc("/metrics/snapshot", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", "application/json") + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive json.NewEncoder(w).Encode(masterMetrics) }) masterTestServer = httptest.NewServer(masterRouter) @@ -329,13 +298,10 @@ func TestMain(m *testing.M) { slaveRouter.HandleFunc("/metrics/snapshot", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", "application/json") + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive json.NewEncoder(w).Encode(slaveMetrics) }) - // slaveRouter.HandleFunc("/monitor/statistics", func(w http.ResponseWriter, r *http.Request) { - // w.WriteHeader(http.StatusOK) - // w.Header().Set("Content-Type", "application/json") - // json.NewEncoder(w).Encode([]map[string]interface{}{slaveTaskMetrics}) - // }) slaveTestServer = httptest.NewServer(slaveRouter) rc := m.Run() @@ -354,11 +320,7 @@ func TestMesosMaster(t *testing.T) { Timeout: 10, } - err := acc.GatherError(m.Gather) - - if err != nil { - t.Errorf(err.Error()) - } + require.NoError(t, acc.GatherError(m.Gather)) acc.AssertContainsFields(t, "mesos", masterMetrics) } @@ -379,10 +341,9 @@ func TestMasterFilter(t *testing.T) { // Assert expected metrics are present. for _, v := range m.MasterCols { - for _, x := range getMetrics(MASTER, v) { - if _, ok := masterMetrics[x]; !ok { - t.Errorf("Didn't find key %s, it should present.", x) - } + for _, x := range m.getMetrics(MASTER, v) { + _, ok := masterMetrics[x] + require.Truef(t, ok, "Didn't find key %s, it should present.", x) } } // m.MasterCols includes "allocator", so allocator metrics should be present. @@ -390,18 +351,16 @@ func TestMasterFilter(t *testing.T) { // getMetrics(). We have to find them by checking name prefixes. for _, x := range masterMetricNames { if strings.HasPrefix(x, "allocator/") { - if _, ok := masterMetrics[x]; !ok { - t.Errorf("Didn't find key %s, it should be present.", x) - } + _, ok := masterMetrics[x] + require.Truef(t, ok, "Didn't find key %s, it should present.", x) } } // Assert unexpected metrics are not present. for _, v := range b { - for _, x := range getMetrics(MASTER, v) { - if _, ok := masterMetrics[x]; ok { - t.Errorf("Found key %s, it should be gone.", x) - } + for _, x := range m.getMetrics(MASTER, v) { + _, ok := masterMetrics[x] + require.Falsef(t, ok, "Found key %s, it should be gone.", x) } } // m.MasterCols does not include "framework_offers", so framework_offers metrics should not be present. @@ -409,7 +368,7 @@ func TestMasterFilter(t *testing.T) { // getMetrics(). We have to find them by checking name prefixes. for k := range masterMetrics { if strings.HasPrefix(k, "master/frameworks/") || strings.HasPrefix(k, "frameworks/") { - t.Errorf("Found key %s, it should be gone.", k) + require.Failf(t, "Found key %s, it should be gone.", k) } } } @@ -425,11 +384,7 @@ func TestMesosSlave(t *testing.T) { Timeout: 10, } - err := acc.GatherError(m.Gather) - - if err != nil { - t.Errorf(err.Error()) - } + require.NoError(t, acc.GatherError(m.Gather)) acc.AssertContainsFields(t, "mesos", slaveMetrics) } @@ -448,17 +403,15 @@ func TestSlaveFilter(t *testing.T) { m.filterMetrics(SLAVE, &slaveMetrics) for _, v := range b { - for _, x := range getMetrics(SLAVE, v) { - if _, ok := slaveMetrics[x]; ok { - t.Errorf("Found key %s, it should be gone.", x) - } + for _, x := range m.getMetrics(SLAVE, v) { + _, ok := slaveMetrics[x] + require.Falsef(t, ok, "Found key %s, it should be gone.", x) } } for _, v := range m.MasterCols { - for _, x := range getMetrics(SLAVE, v) { - if _, ok := slaveMetrics[x]; !ok { - t.Errorf("Didn't find key %s, it should present.", x) - } + for _, x := range m.getMetrics(SLAVE, v) { + _, ok := slaveMetrics[x] + require.Truef(t, ok, "Didn't find key %s, it should present.", x) } } } diff --git a/plugins/inputs/mesos/sample.conf b/plugins/inputs/mesos/sample.conf new file mode 100644 index 0000000000000..e860160dbff3c --- /dev/null +++ b/plugins/inputs/mesos/sample.conf @@ -0,0 +1,42 @@ +# Telegraf plugin for gathering metrics from N Mesos masters +[[inputs.mesos]] + ## Timeout, in ms. + timeout = 100 + + ## A list of Mesos masters. + masters = ["http://localhost:5050"] + + ## Master metrics groups to be collected, by default, all enabled. + master_collections = [ + "resources", + "master", + "system", + "agents", + "frameworks", + "framework_offers", + "tasks", + "messages", + "evqueue", + "registrar", + "allocator", + ] + + ## A list of Mesos slaves, default is [] + # slaves = [] + + ## Slave metrics groups to be collected, by default, all enabled. + # slave_collections = [ + # "resources", + # "agent", + # "system", + # "executors", + # "tasks", + # "messages", + # ] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/minecraft/README.md b/plugins/inputs/minecraft/README.md index 026c9e3b3fb99..e1ce6a1014be7 100644 --- a/plugins/inputs/minecraft/README.md +++ b/plugins/inputs/minecraft/README.md @@ -7,7 +7,7 @@ This plugin is known to support Minecraft Java Edition versions 1.11 - 1.14. When using an version of Minecraft earlier than 1.13, be aware that the values for some criterion has changed and may need to be modified. -#### Server Setup +## Server Setup Enable [RCON][] on the Minecraft server, add this to your server configuration in the [server.properties][] file: @@ -24,24 +24,28 @@ from the server console, or over an RCON connection. When getting started pick an easy to test objective. This command will add an objective that counts the number of times a player has jumped: -``` + +```sh /scoreboard objectives add jumps minecraft.custom:minecraft.jump ``` Once a player has triggered the event they will be added to the scoreboard, you can then list all players with recorded scores: -``` + +```sh /scoreboard players list ``` View the current scores with a command, substituting your player name: -``` + +```sh /scoreboard players list Etho ``` -### Configuration +## Configuration -```toml +```toml @sample.conf +# Collects scores from a Minecraft server's scoreboard using the RCON protocol [[inputs.minecraft]] ## Address of the Minecraft server. # server = "localhost" @@ -51,9 +55,12 @@ View the current scores with a command, substituting your player name: ## Server RCON Password. password = "" + + ## Uncomment to remove deprecated metric components. + # tagdrop = ["server"] ``` -### Metrics +## Metrics - minecraft - tags: @@ -64,15 +71,17 @@ View the current scores with a command, substituting your player name: - fields: - `` (integer, count) -### Sample Queries: +## Sample Queries Get the number of jumps per player in the last hour: + ```sql SELECT SPREAD("jumps") FROM "minecraft" WHERE time > now() - 1h GROUP BY "player" ``` -### Example Output: -``` +## Example Output + +```shell minecraft,player=notch,source=127.0.0.1,port=25575 jumps=178i 1498261397000000000 minecraft,player=dinnerbone,source=127.0.0.1,port=25575 deaths=1i,jumps=1999i,cow_kills=1i 1498261397000000000 minecraft,player=jeb,source=127.0.0.1,port=25575 d_pickaxe=1i,damage_dealt=80i,d_sword=2i,hunger=20i,health=20i,kills=1i,level=33i,jumps=264i,armor=15i 1498261397000000000 diff --git a/plugins/inputs/minecraft/client.go b/plugins/inputs/minecraft/client.go index 30f56213af345..4aa712d4b04f4 100644 --- a/plugins/inputs/minecraft/client.go +++ b/plugins/inputs/minecraft/client.go @@ -25,12 +25,12 @@ type Connector interface { Connect() (Connection, error) } -func NewConnector(hostname, port, password string) (*connector, error) { +func newConnector(hostname, port, password string) *connector { return &connector{ hostname: hostname, port: port, password: password, - }, nil + } } type connector struct { @@ -45,21 +45,21 @@ func (c *connector) Connect() (Connection, error) { return nil, err } - rcon, err := rcon.NewClient(c.hostname, p) + client, err := rcon.NewClient(c.hostname, p) if err != nil { return nil, err } - _, err = rcon.Authorize(c.password) + _, err = client.Authorize(c.password) if err != nil { return nil, err } - return &connection{rcon: rcon}, nil + return &connection{client: client}, nil } -func NewClient(connector Connector) (*client, error) { - return &client{connector: connector}, nil +func newClient(connector Connector) *client { + return &client{connector: connector} } type client struct { @@ -90,13 +90,7 @@ func (c *client) Players() ([]string, error) { return nil, err } - players, err := parsePlayers(resp) - if err != nil { - c.conn = nil - return nil, err - } - - return players, nil + return parsePlayers(resp), nil } func (c *client) Scores(player string) ([]Score, error) { @@ -113,31 +107,25 @@ func (c *client) Scores(player string) ([]Score, error) { return nil, err } - scores, err := parseScores(resp) - if err != nil { - c.conn = nil - return nil, err - } - - return scores, nil + return parseScores(resp), nil } type connection struct { - rcon *rcon.Client + client *rcon.Client } func (c *connection) Execute(command string) (string, error) { - packet, err := c.rcon.Execute(command) + packet, err := c.client.Execute(command) if err != nil { return "", err } return packet.Body, nil } -func parsePlayers(input string) ([]string, error) { +func parsePlayers(input string) []string { parts := strings.SplitAfterN(input, ":", 2) if len(parts) != 2 { - return []string{}, nil + return []string{} } names := strings.Split(parts[1], ",") @@ -157,9 +145,8 @@ func parsePlayers(input string) ([]string, error) { continue } players = append(players, name) - } - return players, nil + return players } // Score is an individual tracked scoreboard stat. @@ -168,9 +155,9 @@ type Score struct { Value int64 } -func parseScores(input string) ([]Score, error) { +func parseScores(input string) []Score { if strings.Contains(input, "has no scores") { - return []Score{}, nil + return []Score{} } // Detect Minecraft <= 1.12 @@ -201,5 +188,6 @@ func parseScores(input string) ([]Score, error) { } scores = append(scores, score) } - return scores, nil + + return scores } diff --git a/plugins/inputs/minecraft/client_test.go b/plugins/inputs/minecraft/client_test.go index 767a0c30ef5d3..59db9bf34a8d6 100644 --- a/plugins/inputs/minecraft/client_test.go +++ b/plugins/inputs/minecraft/client_test.go @@ -98,9 +98,7 @@ func TestClient_Player(t *testing.T) { conn: &MockConnection{commands: tt.commands}, } - client, err := NewClient(connector) - require.NoError(t, err) - + client := newClient(connector) actual, err := client.Players() require.NoError(t, err) @@ -183,9 +181,7 @@ func TestClient_Scores(t *testing.T) { conn: &MockConnection{commands: tt.commands}, } - client, err := NewClient(connector) - require.NoError(t, err) - + client := newClient(connector) actual, err := client.Scores(tt.player) require.NoError(t, err) diff --git a/plugins/inputs/minecraft/internal/rcon/rcon.go b/plugins/inputs/minecraft/internal/rcon/rcon.go index f9e49e6e62d4e..6efce2ba5c4b1 100644 --- a/plugins/inputs/minecraft/internal/rcon/rcon.go +++ b/plugins/inputs/minecraft/internal/rcon/rcon.go @@ -32,11 +32,11 @@ const ( // Rcon package errors. var ( - ErrInvalidWrite = errors.New("Failed to write the payload correctly to remote connection.") - ErrInvalidRead = errors.New("Failed to read the response correctly from remote connection.") - ErrInvalidChallenge = errors.New("Server failed to mirror request challenge.") - ErrUnauthorizedRequest = errors.New("Client not authorized to remote server.") - ErrFailedAuthorization = errors.New("Failed to authorize to the remote server.") + ErrInvalidWrite = errors.New("failed to write the payload correctly to remote connection") + ErrInvalidRead = errors.New("failed to read the response correctly from remote connection") + ErrInvalidChallenge = errors.New("server failed to mirror request challenge") + ErrUnauthorizedRequest = errors.New("client not authorized to remote server") + ErrFailedAuthorization = errors.New("failed to authorize to the remote server") ) type Client struct { @@ -62,20 +62,24 @@ type Packet struct { // Write method fails to write the header bytes in their little // endian byte order. func (p Packet) Compile() (payload []byte, err error) { - var size int32 = p.Header.Size + var size = p.Header.Size var buffer bytes.Buffer var padding [PacketPaddingSize]byte if err = binary.Write(&buffer, binary.LittleEndian, &size); nil != err { - return + return nil, err } else if err = binary.Write(&buffer, binary.LittleEndian, &p.Header.Challenge); nil != err { - return + return nil, err } else if err = binary.Write(&buffer, binary.LittleEndian, &p.Header.Type); nil != err { - return + return nil, err } - buffer.WriteString(p.Body) - buffer.Write(padding[:]) + if _, err = buffer.WriteString(p.Body); err != nil { + return nil, err + } + if _, err = buffer.Write(padding[:]); err != nil { + return nil, err + } return buffer.Bytes(), nil } @@ -91,16 +95,13 @@ func NewPacket(challenge, typ int32, body string) (packet *Packet) { // or a potential error. func (c *Client) Authorize(password string) (response *Packet, err error) { if response, err = c.Send(Auth, password); nil == err { - if response.Header.Type == AuthResponse { - c.Authorized = true - } else { - err = ErrFailedAuthorization - response = nil - return + if response.Header.Type != AuthResponse { + return nil, ErrFailedAuthorization } + c.Authorized = true } - return + return response, err } // Execute calls Send with the appropriate command type and the provided @@ -110,90 +111,95 @@ func (c *Client) Execute(command string) (response *Packet, err error) { return c.Send(Exec, command) } -// Sends accepts the commands type and its string to execute to the clients server, +// Send accepts the commands type and its string to execute to the clients server, // creating a packet with a random challenge id for the server to mirror, // and compiling its payload bytes in the appropriate order. The response is // decompiled from its bytes into a Packet type for return. An error is returned // if send fails. -func (c *Client) Send(typ int32, command string) (response *Packet, err error) { +func (c *Client) Send(typ int32, command string) (*Packet, error) { if typ != Auth && !c.Authorized { - err = ErrUnauthorizedRequest - return + return nil, ErrUnauthorizedRequest } // Create a random challenge for the server to mirror in its response. var challenge int32 - binary.Read(rand.Reader, binary.LittleEndian, &challenge) + if err := binary.Read(rand.Reader, binary.LittleEndian, &challenge); nil != err { + return nil, err + } // Create the packet from the challenge, typ and command // and compile it to its byte payload packet := NewPacket(challenge, typ, command) payload, err := packet.Compile() + if nil != err { + return nil, err + } - var n int - + n, err := c.Connection.Write(payload) if nil != err { - return - } else if n, err = c.Connection.Write(payload); nil != err { - return - } else if n != len(payload) { - err = ErrInvalidWrite - return + return nil, err + } + if n != len(payload) { + return nil, ErrInvalidWrite } var header Header - - if err = binary.Read(c.Connection, binary.LittleEndian, &header.Size); nil != err { - return - } else if err = binary.Read(c.Connection, binary.LittleEndian, &header.Challenge); nil != err { - return - } else if err = binary.Read(c.Connection, binary.LittleEndian, &header.Type); nil != err { - return + if err := binary.Read(c.Connection, binary.LittleEndian, &header.Size); nil != err { + return nil, err + } + if err := binary.Read(c.Connection, binary.LittleEndian, &header.Challenge); nil != err { + return nil, err + } + if err := binary.Read(c.Connection, binary.LittleEndian, &header.Type); nil != err { + return nil, err } if packet.Header.Type == Auth && header.Type == ResponseValue { // Discard, empty SERVERDATA_RESPONSE_VALUE from authorization. - c.Connection.Read(make([]byte, header.Size-int32(PacketHeaderSize))) + if _, err := c.Connection.Read(make([]byte, header.Size-int32(PacketHeaderSize))); nil != err { + return nil, err + } // Reread the packet header. - if err = binary.Read(c.Connection, binary.LittleEndian, &header.Size); nil != err { - return - } else if err = binary.Read(c.Connection, binary.LittleEndian, &header.Challenge); nil != err { - return - } else if err = binary.Read(c.Connection, binary.LittleEndian, &header.Type); nil != err { - return + if err := binary.Read(c.Connection, binary.LittleEndian, &header.Size); nil != err { + return nil, err + } + if err := binary.Read(c.Connection, binary.LittleEndian, &header.Challenge); nil != err { + return nil, err + } + if err := binary.Read(c.Connection, binary.LittleEndian, &header.Type); nil != err { + return nil, err } } if header.Challenge != packet.Header.Challenge { - err = ErrInvalidChallenge - return + return nil, ErrInvalidChallenge } body := make([]byte, header.Size-int32(PacketHeaderSize)) n, err = c.Connection.Read(body) - for n < len(body) { var nBytes int nBytes, err = c.Connection.Read(body[n:]) if err != nil { - return + return nil, err } n += nBytes } + // Shouldn't this be moved up to the first read? if nil != err { - return - } else if n != len(body) { - err = ErrInvalidRead - return + return nil, err + } + if n != len(body) { + return nil, ErrInvalidRead } - response = new(Packet) + response := new(Packet) response.Header = header response.Body = strings.TrimRight(string(body), TerminationSequence) - return + return response, nil } // NewClient creates a new Client type, creating the connection @@ -204,5 +210,5 @@ func NewClient(host string, port int) (client *Client, err error) { client.Host = host client.Port = port client.Connection, err = net.Dial("tcp", fmt.Sprintf("%v:%v", client.Host, client.Port)) - return + return client, err } diff --git a/plugins/inputs/minecraft/minecraft.go b/plugins/inputs/minecraft/minecraft.go index 0de79d94a3c77..490bf188f06bd 100644 --- a/plugins/inputs/minecraft/minecraft.go +++ b/plugins/inputs/minecraft/minecraft.go @@ -1,23 +1,16 @@ +//go:generate ../../../tools/readme_config_includer/generator package minecraft import ( + _ "embed" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) -const sampleConfig = ` - ## Address of the Minecraft server. - # server = "localhost" - - ## Server RCON Port. - # port = "25575" - - ## Server RCON Password. - password = "" - - ## Uncomment to remove deprecated metric components. - # tagdrop = ["server"] -` +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string // Client is a client for the Minecraft server. type Client interface { @@ -40,27 +33,14 @@ type Minecraft struct { client Client } -func (s *Minecraft) Description() string { - return "Collects scores from a Minecraft server's scoreboard using the RCON protocol" -} - -func (s *Minecraft) SampleConfig() string { +func (*Minecraft) SampleConfig() string { return sampleConfig } func (s *Minecraft) Gather(acc telegraf.Accumulator) error { if s.client == nil { - connector, err := NewConnector(s.Server, s.Port, s.Password) - if err != nil { - return err - } - - client, err := NewClient(connector) - if err != nil { - return err - } - - s.client = client + connector := newConnector(s.Server, s.Port, s.Password) + s.client = newClient(connector) } players, err := s.client.Players() diff --git a/plugins/inputs/minecraft/sample.conf b/plugins/inputs/minecraft/sample.conf new file mode 100644 index 0000000000000..0e5e878b3e948 --- /dev/null +++ b/plugins/inputs/minecraft/sample.conf @@ -0,0 +1,13 @@ +# Collects scores from a Minecraft server's scoreboard using the RCON protocol +[[inputs.minecraft]] + ## Address of the Minecraft server. + # server = "localhost" + + ## Server RCON Port. + # port = "25575" + + ## Server RCON Password. + password = "" + + ## Uncomment to remove deprecated metric components. + # tagdrop = ["server"] diff --git a/plugins/inputs/mock/README.md b/plugins/inputs/mock/README.md new file mode 100644 index 0000000000000..eb889de1e9fbb --- /dev/null +++ b/plugins/inputs/mock/README.md @@ -0,0 +1,71 @@ +# Mock Data Input Plugin + +The mock input plugin generates random data based on a selection of different +algorithms. For example, it can produce random data between a set of values, +fake stock data, sine waves, and step-wise values. + +Additionally, users can set the measurement name and tags used to whatever is +required to mock their situation. + +## Configuration + +```toml @sample.conf +# Generate metrics for test and demonstration purposes +[[inputs.mock]] + ## Set the metric name to use for reporting + metric_name = "mock" + + ## Optional string key-value pairs of tags to add to all metrics + # [inputs.mock.tags] + # "key" = "value" + + ## One or more mock data fields *must* be defined. + ## + ## [[inputs.mock.constant]] + ## name = "constant" + ## value = value_of_any_type + ## [[inputs.mock.random]] + ## name = "rand" + ## min = 1.0 + ## max = 6.0 + ## [[inputs.mock.sine_wave]] + ## name = "wave" + ## amplitude = 1.0 + ## period = 0.5 + ## [[inputs.mock.step]] + ## name = "plus_one" + ## start = 0.0 + ## step = 1.0 + ## [[inputs.mock.stock]] + ## name = "abc" + ## price = 50.00 + ## volatility = 0.2 +``` + +The mock plugin only requires that: + +1) Metric name is set +2) One of the data field algorithms is defined + +## Available Algorithms + +The available algorithms for generating mock data include: + +* Constant - generate a field with the given value of type string, float, int or bool +* Random Float - generate a random float, inclusive of min and max +* Sine Wave - produce a sine wave with a certain amplitude and period +* Step - always add the step value, negative values accepted +* Stock - generate fake, stock-like price values based on a volatility variable + +## Example Output + +The following example shows all available algorithms configured with an +additional two tags as well: + +```s +mock_sensors,building=5A,site=FTC random=4.875966794516125,abc=50,wave=0,plus_one=0 1632170840000000000 +mock_sensors,building=5A,site=FTC random=5.738651873834452,abc=45.095549448434774,wave=5.877852522924732,plus_one=1 1632170850000000000 +mock_sensors,building=5A,site=FTC random=1.0429328917205203,abc=51.928560083072924,wave=9.510565162951535,plus_one=2 1632170860000000000 +mock_sensors,building=5A,site=FTC random=5.290188595384418,abc=44.41090520217027,wave=9.510565162951536,plus_one=3 1632170870000000000 +mock_sensors,building=5A,site=FTC random=2.0724967227069135,abc=47.212167806890314,wave=5.877852522924733,plus_one=4 1632170880000000000 +``` diff --git a/plugins/inputs/mock/mock.go b/plugins/inputs/mock/mock.go new file mode 100644 index 0000000000000..3861a18c7dcb4 --- /dev/null +++ b/plugins/inputs/mock/mock.go @@ -0,0 +1,146 @@ +//go:generate ../../../tools/readme_config_includer/generator +package mock + +import ( + _ "embed" + "math" + "math/rand" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +type Mock struct { + counter int64 + + MetricName string `toml:"metric_name"` + Tags map[string]string `toml:"tags"` + + Constant []*constant `toml:"constant"` + Random []*random `toml:"random"` + Step []*step `toml:"step"` + Stock []*stock `toml:"stock"` + SineWave []*sineWave `toml:"sine_wave"` +} + +type constant struct { + Name string `toml:"name"` + Value interface{} `toml:"value"` +} + +type random struct { + Name string `toml:"name"` + Min float64 `toml:"min"` + Max float64 `toml:"max"` +} + +type sineWave struct { + Name string `toml:"name"` + Amplitude float64 `toml:"amplitude"` + Period float64 `toml:"period"` +} + +type step struct { + latest float64 + + Name string `toml:"name"` + Start float64 `toml:"min"` + Step float64 `toml:"max"` +} + +type stock struct { + latest float64 + + Name string `toml:"name"` + Price float64 `toml:"price"` + Volatility float64 `toml:"volatility"` +} + +func (*Mock) SampleConfig() string { + return sampleConfig +} + +func (m *Mock) Init() error { + rand.Seed(time.Now().UnixNano()) + return nil +} + +func (m *Mock) Gather(acc telegraf.Accumulator) error { + fields := make(map[string]interface{}) + m.generateRandomFloat64(fields) + m.generateStockPrice(fields) + m.generateSineWave(fields) + m.generateStep(fields) + + for _, c := range m.Constant { + fields[c.Name] = c.Value + } + + tags := make(map[string]string) + for key, value := range m.Tags { + tags[key] = value + } + + acc.AddFields(m.MetricName, fields, tags) + + m.counter++ + + return nil +} + +// Generate random value between min and max, inclusivly +func (m *Mock) generateRandomFloat64(fields map[string]interface{}) { + for _, random := range m.Random { + fields[random.Name] = random.Min + rand.Float64()*(random.Max-random.Min) + } +} + +// Create sine waves +func (m *Mock) generateSineWave(fields map[string]interface{}) { + for _, field := range m.SineWave { + fields[field.Name] = math.Sin((float64(m.counter) * field.Period * math.Pi)) * field.Amplitude + } +} + +// Begin at start value and then add step value every tick +func (m *Mock) generateStep(fields map[string]interface{}) { + for _, step := range m.Step { + if m.counter == 0 { + step.latest = step.Start + } else { + step.latest += step.Step + } + + fields[step.Name] = step.latest + } +} + +// Begin at start price and then generate random value +func (m *Mock) generateStockPrice(fields map[string]interface{}) { + for _, stock := range m.Stock { + if stock.latest == 0.0 { + stock.latest = stock.Price + } else { + noise := 2 * (rand.Float64() - 0.5) + stock.latest = stock.latest + (stock.latest * stock.Volatility * noise) + + // avoid going below zero + if stock.latest < 1.0 { + stock.latest = 1.0 + } + } + + fields[stock.Name] = stock.latest + } +} + +func init() { + inputs.Add("mock", func() telegraf.Input { + return &Mock{} + }) +} diff --git a/plugins/inputs/mock/mock_test.go b/plugins/inputs/mock/mock_test.go new file mode 100644 index 0000000000000..070829bc1fd62 --- /dev/null +++ b/plugins/inputs/mock/mock_test.go @@ -0,0 +1,106 @@ +package mock + +import ( + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestGather(t *testing.T) { + testConstantString := &constant{ + Name: "constant_string", + Value: "a string", + } + testConstantFloat := &constant{ + Name: "constant_float", + Value: 3.1415, + } + testConstantInt := &constant{ + Name: "constant_int", + Value: 42, + } + testConstantBool := &constant{ + Name: "constant_bool", + Value: true, + } + testRandom := &random{ + Name: "random", + Min: 1.0, + Max: 6.0, + } + testSineWave := &sineWave{ + Name: "sine", + Amplitude: 1.0, + Period: 0.5, + } + testStep := &step{ + Name: "step", + Start: 0.0, + Step: 1.0, + } + testStock := &stock{ + Name: "abc", + Price: 50.00, + Volatility: 0.2, + } + + tags := map[string]string{ + "buildling": "tbd", + "site": "nowhere", + } + + m := &Mock{ + MetricName: "test", + Tags: tags, + + Constant: []*constant{testConstantString, testConstantFloat, testConstantInt, testConstantBool}, + Random: []*random{testRandom}, + SineWave: []*sineWave{testSineWave}, + Step: []*step{testStep}, + Stock: []*stock{testStock}, + } + + var acc testutil.Accumulator + require.NoError(t, m.Gather(&acc)) + + require.Len(t, acc.Metrics, 1) + + metric := acc.Metrics[0] + require.Equal(t, "test", metric.Measurement) + require.Equal(t, tags, metric.Tags) + for k, v := range metric.Fields { + switch k { + case "abc": + require.Equal(t, 50.0, v) + case "constant_string": + require.Equal(t, testConstantString.Value, v) + case "constant_float": + require.Equal(t, testConstantFloat.Value, v) + case "constant_int": + require.Equal(t, testConstantInt.Value, v) + case "constant_bool": + require.Equal(t, testConstantBool.Value, v) + case "random": + require.GreaterOrEqual(t, 6.0, v) + require.LessOrEqual(t, 1.0, v) + case "sine": + require.Equal(t, 0.0, v) + case "step": + require.Equal(t, 0.0, v) + default: + require.Failf(t, "unexpected field %q", k) + } + } +} + +func TestGatherEmpty(t *testing.T) { + m := &Mock{ + MetricName: "test_empty", + } + + var acc testutil.Accumulator + require.NoError(t, m.Gather(&acc)) + + acc.AssertDoesNotContainMeasurement(t, "test_empty") +} diff --git a/plugins/inputs/mock/sample.conf b/plugins/inputs/mock/sample.conf new file mode 100644 index 0000000000000..03feadf79e3c9 --- /dev/null +++ b/plugins/inputs/mock/sample.conf @@ -0,0 +1,30 @@ +# Generate metrics for test and demonstration purposes +[[inputs.mock]] + ## Set the metric name to use for reporting + metric_name = "mock" + + ## Optional string key-value pairs of tags to add to all metrics + # [inputs.mock.tags] + # "key" = "value" + + ## One or more mock data fields *must* be defined. + ## + ## [[inputs.mock.constant]] + ## name = "constant" + ## value = value_of_any_type + ## [[inputs.mock.random]] + ## name = "rand" + ## min = 1.0 + ## max = 6.0 + ## [[inputs.mock.sine_wave]] + ## name = "wave" + ## amplitude = 1.0 + ## period = 0.5 + ## [[inputs.mock.step]] + ## name = "plus_one" + ## start = 0.0 + ## step = 1.0 + ## [[inputs.mock.stock]] + ## name = "abc" + ## price = 50.00 + ## volatility = 0.2 diff --git a/plugins/inputs/mock_Plugin.go b/plugins/inputs/mock_Plugin.go deleted file mode 100644 index 4dec121bc7b6f..0000000000000 --- a/plugins/inputs/mock_Plugin.go +++ /dev/null @@ -1,31 +0,0 @@ -package inputs - -import ( - "github.com/influxdata/telegraf" - - "github.com/stretchr/testify/mock" -) - -// MockPlugin struct should be named the same as the Plugin -type MockPlugin struct { - mock.Mock -} - -// Description will appear directly above the plugin definition in the config file -func (m *MockPlugin) Description() string { - return `This is an example plugin` -} - -// SampleConfig will populate the sample configuration portion of the plugin's configuration -func (m *MockPlugin) SampleConfig() string { - return ` sampleVar = 'foo'` -} - -// Gather defines what data the plugin will gather. -func (m *MockPlugin) Gather(_a0 telegraf.Accumulator) error { - ret := m.Called(_a0) - - r0 := ret.Error(0) - - return r0 -} diff --git a/plugins/inputs/modbus/README.md b/plugins/inputs/modbus/README.md index 3c568b5e6e5e7..872823dc52f24 100644 --- a/plugins/inputs/modbus/README.md +++ b/plugins/inputs/modbus/README.md @@ -3,13 +3,14 @@ The Modbus plugin collects Discrete Inputs, Coils, Input Registers and Holding Registers via Modbus TCP or Modbus RTU/ASCII. -### Configuration +## Configuration -```toml +```toml @sample_general_begin.conf @sample_register.conf @sample_request.conf @sample_general_end.conf +# Retrieve data from MODBUS slave devices [[inputs.modbus]] ## Connection Configuration ## - ## The plugin supports connections to PLCs via MODBUS/TCP or + ## The plugin supports connections to PLCs via MODBUS/TCP, RTU over TCP, ASCII over TCP or ## via serial line communication in binary (RTU) or readable (ASCII) encoding ## ## Device name @@ -36,8 +37,22 @@ Registers via Modbus TCP or Modbus RTU/ASCII. # data_bits = 8 # parity = "N" # stop_bits = 1 + + ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP" + ## default behaviour is "TCP" if the controller is TCP + ## For Serial you can choose between "RTU" and "ASCII" # transmission_mode = "RTU" + ## Trace the connection to the modbus device as debug messages + ## Note: You have to enable telegraf's debug mode to see those messages! + # debug_connection = false + + ## Define the configuration schema + ## |---register -- define fields per register type in the original style (only supports one slave ID) + ## |---request -- define fields on a requests base + configuration_type = "register" + + ## --- "register" configuration style --- ## Measurements ## @@ -67,10 +82,11 @@ Registers via Modbus TCP or Modbus RTU/ASCII. ## |---BA, DCBA - Little Endian ## |---BADC - Mid-Big Endian ## |---CDAB - Mid-Little Endian - ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32-IEEE (the IEEE 754 binary representation) - ## FLOAT32 (deprecated), FIXED, UFIXED (fixed-point representation on input) - ## scale - the final numeric variable representation - ## address - variable address + ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, + ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) + ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) + ## scale - the final numeric variable representation + ## address - variable address holding_registers = [ { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]}, @@ -85,49 +101,365 @@ Registers via Modbus TCP or Modbus RTU/ASCII. { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, ] + + ## --- "request" configuration style --- + + ## Per request definition + ## + + ## Define a request sent to the device + ## Multiple of those requests can be defined. Data will be collated into metrics at the end of data collection. + [[inputs.modbus.request]] + ## ID of the modbus slave device to query. + ## If you need to query multiple slave-devices, create several "request" definitions. + slave_id = 1 + + ## Byte order of the data. + ## |---ABCD -- Big Endian (Motorola) + ## |---DCBA -- Little Endian (Intel) + ## |---BADC -- Big Endian with byte swap + ## |---CDAB -- Little Endian with byte swap + byte_order = "ABCD" + + ## Type of the register for the request + ## Can be "coil", "discrete", "holding" or "input" + register = "coil" + + ## Name of the measurement. + ## Can be overriden by the individual field definitions. Defaults to "modbus" + # measurement = "modbus" + + ## Field definitions + ## Analog Variables, Input Registers and Holding Registers + ## address - address of the register to query. For coil and discrete inputs this is the bit address. + ## name *1 - field name + ## type *1,2 - type of the modbus field, can be INT16, UINT16, INT32, UINT32, INT64, UINT64 and + ## FLOAT32, FLOAT64 (IEEE 754 binary representation) + ## scale *1,2 - (optional) factor to scale the variable with + ## output *1,2 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. Defaults to FLOAT64 if + ## "scale" is provided and to the input "type" class otherwise (i.e. INT* -> INT64, etc). + ## measurement *1 - (optional) measurement name, defaults to the setting of the request + ## omit - (optional) omit this field. Useful to leave out single values when querying many registers + ## with a single request. Defaults to "false". + ## + ## *1: Those fields are ignored if field is omitted ("omit"=true) + ## + ## *2: Thise fields are ignored for both "coil" and "discrete"-input type of registers. For those register types + ## the fields are output as zero or one in UINT64 format by default. + + ## Coil / discrete input example + fields = [ + { address=0, name="motor1_run"}, + { address=1, name="jog", measurement="motor"}, + { address=2, name="motor1_stop", omit=true}, + { address=3, name="motor1_overheating"}, + ] + + [[inputs.modbus.request.tags]] + machine = "impresser" + location = "main building" + + [[inputs.modbus.request]] + ## Holding example + ## All of those examples will result in FLOAT64 field outputs + slave_id = 1 + byte_order = "DCBA" + register = "holding" + fields = [ + { address=0, name="voltage", type="INT16", scale=0.1 }, + { address=1, name="current", type="INT32", scale=0.001 }, + { address=3, name="power", type="UINT32", omit=true }, + { address=5, name="energy", type="FLOAT32", scale=0.001, measurement="W" }, + { address=7, name="frequency", type="UINT32", scale=0.1 }, + { address=8, name="power_factor", type="INT64", scale=0.01 }, + ] + + [[inputs.modbus.request.tags]] + machine = "impresser" + location = "main building" + + [[inputs.modbus.request]] + ## Input example with type conversions + slave_id = 1 + byte_order = "ABCD" + register = "input" + fields = [ + { address=0, name="rpm", type="INT16" }, # will result in INT64 field + { address=1, name="temperature", type="INT16", scale=0.1 }, # will result in FLOAT64 field + { address=2, name="force", type="INT32", output="FLOAT64" }, # will result in FLOAT64 field + { address=4, name="hours", type="UINT32" }, # will result in UIN64 field + ] + + [[inputs.modbus.request.tags]] + machine = "impresser" + location = "main building" + + ## Enable workarounds required by some devices to work correctly + # [inputs.modbus.workarounds] + ## Pause between read requests sent to the device. This might be necessary for (slow) serial devices. + # pause_between_requests = "0ms" + ## Close the connection after every gather cycle. Usually the plugin closes the connection after a certain + ## idle-timeout, however, if you query a device with limited simultaneous connectivity (e.g. serial devices) + ## from multiple instances you might want to only stay connected during gather and disconnect afterwards. + # close_connection_after_gather = false ``` -### Metrics +## Notes + +You can debug Modbus connection issues by enabling `debug_connection`. To see +those debug messages, Telegraf has to be started with debugging enabled +(i.e. with the `--debug` option). Please be aware that connection tracing will +produce a lot of messages and should __NOT__ be used in production environments. + +Please use `pause_between_requests` with care. Ensure the total gather time, +including the pause(s), does not exceed the configured collection interval. Note +that pauses add up if multiple requests are sent! + +## Configuration styles + +The modbus plugin supports multiple configuration styles that can be set using +the `configuration_type` setting. The different styles are described +below. Please note that styles cannot be mixed, i.e. only the settings belonging +to the configured `configuration_type` are used for constructing _modbus_ +requests and creation of metrics. + +Directly jump to the styles: + +- [original / register plugin style](#register-configuration-style) +- [per-request style](#request-configuration-style) + +--- + +### `register` configuration style + +This is the original style used by this plugin. It allows a per-register +configuration for a single slave-device. + +#### Usage of `data_type` + +The field `data_type` defines the representation of the data value on input from +the modbus registers. The input values are then converted from the given +`data_type` to a type that is apropriate when sending the value to the output +plugin. These output types are usually one of string, integer or +floating-point-number. The size of the output type is assumed to be large enough +for all supported input types. The mapping from the input type to the output +type is fixed and cannot be configured. + +##### Integers: `INT16`, `UINT16`, `INT32`, `UINT32`, `INT64`, `UINT64` + +These types are used for integer input values. Select the one that matches your +modbus data source. + +##### Floating Point: `FLOAT32-IEEE`, `FLOAT64-IEEE` + +Use these types if your modbus registers contain a value that is encoded in this +format. These types always include the sign, therefore no variant exists. + +##### Fixed Point: `FIXED`, `UFIXED` (`FLOAT32`) + +These types are handled as an integer type on input, but are converted to +floating point representation for further processing (e.g. scaling). Use one of +these types when the input value is a decimal fixed point representation of a +non-integer value. + +Select the type `UFIXED` when the input type is declared to hold unsigned +integer values, which cannot be negative. The documentation of your modbus +device should indicate this by a term like 'uint16 containing fixed-point +representation with N decimal places'. + +Select the type `FIXED` when the input type is declared to hold signed integer +values. Your documentation of the modbus device should indicate this with a term +like 'int32 containing fixed-point representation with N decimal places'. + +(FLOAT32 is deprecated and should not be used. UFIXED provides the same +conversion from unsigned values). + +--- -Metric are custom and configured using the `discrete_inputs`, `coils`, +### `request` configuration style + +This sytle can be used to specify the modbus requests directly. It enables +specifying multiple `[[inputs.modbus.request]]` sections including multiple +slave-devices. This way, _modbus_ gateway devices can be queried. Please note +that _requests_ might be split for non-consecutive addresses. If you want to +avoid this behavior please add _fields_ with the `omit` flag set filling the +gaps between addresses. + +#### Slave device + +You can use the `slave_id` setting to specify the ID of the slave device to +query. It should be specified for each request, otherwise it defaults to +zero. Please note, only one `slave_id` can be specified per request. + +#### Byte order of the register + +The `byte_order` setting specifies the byte and word-order of the registers. It +can be set to `ABCD` for _big endian (Motorola)_ or `DCBA` for _little endian +(Intel)_ format as well as `BADC` and `CDAB` for _big endian_ or _little endian_ +with _byte swap_. + +#### Register type + +The `register` setting specifies the modbus register-set to query and can be set +to `coil`, `discrete`, `holding` or `input`. + +#### Per-request measurement setting + +You can specify the name of the measurement for the following field definitions +using the `measurement` setting. If the setting is omitted `modbus` is +used. Furthermore, the measurement value can be overridden by each field +individually. + +#### Field definitions + +Each `request` can contain a list of fields to collect from the modbus device. + +##### address + +A field is identified by an `address` that reflects the modbus register +address. You can usually find the address values for the different datapoints in +the datasheet of your modbus device. This is a mandatory setting. + +For _coil_ and _discrete input_ registers this setting specifies the __bit__ +containing the value of the field. + +##### name + +Using the `name` setting you can specify the field-name in the metric as output +by the plugin. This setting is ignored if the field's `omit` is set to `true` +and can be omitted in this case. + +__Please note:__ There cannot be multiple fields with the same `name` in one +metric identified by `measurement`, `slave_id` and `register`. + +##### register datatype + +The `register` setting specifies the datatype of the modbus register and can be +set to `INT16`, `UINT16`, `INT32`, `UINT32`, `INT64` or `UINT64` for integer +types or `FLOAT32` and `FLOAT64` for IEEE 754 binary representations of floating +point values. Usually the datatype of the register is listed in the datasheet of +your modbus device in relation to the `address` described above. + + This setting is ignored if the field's `omit` is set to `true` or if the + `register` type is a bit-type (`coil` or `discrete`) and can be omitted in + these cases. + +##### scaling + +You can use the `scale` setting to scale the register values, e.g. if the +register contains a fix-point values in `UINT32` format with two decimal places +for example. To convert the read register value to the actual value you can set +the `scale=0.01`. The scale is used as a factor e.g. `field_value * scale`. + +This setting is ignored if the field's `omit` is set to `true` or if the +`register` type is a bit-type (`coil` or `discrete`) and can be omitted in these +cases. + +__Please note:__ The resulting field-type will be set to `FLOAT64` if no output +format is specified. + +##### output datatype + +Using the `output` setting you can explicitly specify the output +field-datatype. The `output` type can be `INT64`, `UINT64` or `FLOAT64`. If not +set explicitly, the output type is guessed as follows: If `scale` is set to a +non-zero value, the output type is `FLOAT64`. Otherwise, the output type +corresponds to the register datatype _class_, i.e. `INT*` will result in +`INT64`, `UINT*` in `UINT64` and `FLOAT*` in `FLOAT64`. + +This setting is ignored if the field's `omit` is set to `true` or if the +`register` type is a bit-type (`coil` or `discrete`) and can be omitted in these +cases. For `coil` and `discrete` registers the field-value is output as zero or +one in `UINT16` format. + +#### per-field measurement setting + +The `measurement` setting can be used to override the measurement name on a +per-field basis. This might be useful if you want to split the fields in one +request to multiple measurements. If not specified, the value specified in the +[`request` section](#per-request-measurement-setting) or, if also omitted, +`modbus` is used. + +This setting is ignored if the field's `omit` is set to `true` and can be +omitted in this case. + +#### omitting a field + +When specifying `omit=true`, the corresponding field will be ignored when +collecting the metric but is taken into account when constructing the modbus +requests. This way, you can fill "holes" in the addresses to construct +consecutive address ranges resulting in a single request. Using a single modbus +request can be beneficial as the values are all collected at the same point in +time. + +#### Tags definitions + +Each `request` can be accompanied by tags valid for this request. + +__Please note:__ These tags take precedence over predefined tags such as `name`, +`type` or `slave_id`. + +--- + +## Metrics + +Metrics are custom and configured using the `discrete_inputs`, `coils`, `holding_register` and `input_registers` options. -### Usage of `data_type` +## Troubleshooting + +### Strange data + +Modbus documentation is often a mess. People confuse memory-address (starts at +one) and register address (starts at zero) or are unsure about the word-order +used. Furthermore, there are some non-standard implementations that also swap +the bytes within the register word (16-bit). -The field `data_type` defines the representation of the data value on input from the modbus registers. -The input values are then converted from the given `data_type` to a type that is apropriate when -sending the value to the output plugin. These output types are usually one of string, -integer or floating-point-number. The size of the output type is assumed to be large enough -for all supported input types. The mapping from the input type to the output type is fixed -and cannot be configured. +If you get an error or don't get the expected values from your device, you can +try the following steps (assuming a 32-bit value). -#### Integers: `INT16`, `UINT16`, `INT32`, `UINT32`, `INT64`, `UINT64` +If you are using a serial device and get a `permission denied` error, check the +permissions of your serial device and change them accordingly. -These types are used for integer input values. Select the one that matches your modbus data source. +In case you get an `exception '2' (illegal data address)` error you might try to +offset your `address` entries by minus one as it is very likely that there is +confusion between memory and register addresses. -#### Floating Point: `FLOAT32-IEEE` +If you see strange values, the `byte_order` might be wrong. You can either probe +all combinations (`ABCD`, `CDBA`, `BADC` or `DCBA`) or set `byte_order="ABCD" +data_type="UINT32"` and use the resulting value(s) in an online converter like +[this][online-converter]. This especially makes sense if you don't want to mess +with the device, deal with 64-bit values and/or don't know the `data_type` of +your register (e.g. fix-point floating values vs. IEEE floating point). -Use this type if your modbus registers contain a value that is encoded in this format. This type -always includes the sign and therefore there exists no variant. +If your data still looks corrupted, please post your configuration, error +message and/or the output of `byte_order="ABCD" data_type="UINT32"` to one of +the telegraf support channels (forum, slack or as an issue). If nothing helps, +please post your configuration, error message and/or the output of +`byte_order="ABCD" data_type="UINT32"` to one of the telegraf support channels +(forum, slack or as an issue). -#### Fixed Point: `FIXED`, `UFIXED` (`FLOAT32`) +[online-converter]: https://www.scadacore.com/tools/programming-calculators/online-hex-converter/ -These types are handled as an integer type on input, but are converted to floating point representation -for further processing (e.g. scaling). Use one of these types when the input value is a decimal fixed point -representation of a non-integer value. +### Workarounds -Select the type `UFIXED` when the input type is declared to hold unsigned integer values, which cannot -be negative. The documentation of your modbus device should indicate this by a term like -'uint16 containing fixed-point representation with N decimal places'. +Some Modbus devices need special read characteristics when reading data and will +fail otherwise. For example, some serial devices need a pause between register +read requests. Others might only support a limited number of simultaneously +connected devices, like serial devices or some ModbusTCP devices. In case you +need to access those devices in parallel you might want to disconnect +immediately after the plugin finishes reading. -Select the type `FIXED` when the input type is declared to hold signed integer values. Your documentation -of the modbus device should indicate this with a term like 'int32 containing fixed-point representation -with N decimal places'. +To enable this plugin to also handle those "special" devices, there is the +`workarounds` configuration option. In case your documentation states certain +read requirements or you get read timeouts or other read errors, you might want +to try one or more workaround options. If you find that other/more workarounds +are required for your device, please let us know. -(FLOAT32 is deprecated and should not be used any more. UFIXED provides the same conversion -from unsigned values). +In case your device needs a workaround that is not yet implemented, please open +an issue or submit a pull-request. -### Example Output +## Example Output ```sh $ ./telegraf -config telegraf.conf -input-filter modbus -test diff --git a/plugins/inputs/modbus/configuration.go b/plugins/inputs/modbus/configuration.go new file mode 100644 index 0000000000000..b15b51e499b62 --- /dev/null +++ b/plugins/inputs/modbus/configuration.go @@ -0,0 +1,62 @@ +package modbus + +import "fmt" + +const ( + maxQuantityDiscreteInput = uint16(2000) + maxQuantityCoils = uint16(2000) + maxQuantityInputRegisters = uint16(125) + maxQuantityHoldingRegisters = uint16(125) +) + +type Configuration interface { + Check() error + Process() (map[byte]requestSet, error) + SampleConfigPart() string +} + +func removeDuplicates(elements []uint16) []uint16 { + encountered := map[uint16]bool{} + result := []uint16{} + + for _, addr := range elements { + if !encountered[addr] { + encountered[addr] = true + result = append(result, addr) + } + } + + return result +} + +func normalizeInputDatatype(dataType string) (string, error) { + switch dataType { + case "INT16", "UINT16", "INT32", "UINT32", "INT64", "UINT64", "FLOAT32", "FLOAT64": + return dataType, nil + } + return "unknown", fmt.Errorf("unknown input type %q", dataType) +} + +func normalizeOutputDatatype(dataType string) (string, error) { + switch dataType { + case "", "native": + return "native", nil + case "INT64", "UINT64", "FLOAT64": + return dataType, nil + } + return "unknown", fmt.Errorf("unknown output type %q", dataType) +} + +func normalizeByteOrder(byteOrder string) (string, error) { + switch byteOrder { + case "ABCD", "MSW-BE", "MSW": // Big endian (Motorola) + return "ABCD", nil + case "BADC", "MSW-LE": // Big endian with bytes swapped + return "BADC", nil + case "CDAB", "LSW-BE": // Little endian with bytes swapped + return "CDAB", nil + case "DCBA", "LSW-LE", "LSW": // Little endian (Intel) + return "DCBA", nil + } + return "unknown", fmt.Errorf("unknown byte-order %q", byteOrder) +} diff --git a/plugins/inputs/modbus/configuration_register.go b/plugins/inputs/modbus/configuration_register.go new file mode 100644 index 0000000000000..87275675a0f0c --- /dev/null +++ b/plugins/inputs/modbus/configuration_register.go @@ -0,0 +1,254 @@ +package modbus + +import ( + _ "embed" + "fmt" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample_register.conf +var sampleConfigPartPerRegister string + +type fieldDefinition struct { + Measurement string `toml:"measurement"` + Name string `toml:"name"` + ByteOrder string `toml:"byte_order"` + DataType string `toml:"data_type"` + Scale float64 `toml:"scale"` + Address []uint16 `toml:"address"` +} + +type ConfigurationOriginal struct { + SlaveID byte `toml:"slave_id"` + DiscreteInputs []fieldDefinition `toml:"discrete_inputs"` + Coils []fieldDefinition `toml:"coils"` + HoldingRegisters []fieldDefinition `toml:"holding_registers"` + InputRegisters []fieldDefinition `toml:"input_registers"` +} + +func (c *ConfigurationOriginal) SampleConfigPart() string { + return sampleConfigPartPerRegister +} + +func (c *ConfigurationOriginal) Check() error { + if err := c.validateFieldDefinitions(c.DiscreteInputs, cDiscreteInputs); err != nil { + return err + } + + if err := c.validateFieldDefinitions(c.Coils, cCoils); err != nil { + return err + } + + if err := c.validateFieldDefinitions(c.HoldingRegisters, cHoldingRegisters); err != nil { + return err + } + + return c.validateFieldDefinitions(c.InputRegisters, cInputRegisters) +} + +func (c *ConfigurationOriginal) Process() (map[byte]requestSet, error) { + coil, err := c.initRequests(c.Coils, maxQuantityCoils) + if err != nil { + return nil, err + } + + discrete, err := c.initRequests(c.DiscreteInputs, maxQuantityDiscreteInput) + if err != nil { + return nil, err + } + + holding, err := c.initRequests(c.HoldingRegisters, maxQuantityHoldingRegisters) + if err != nil { + return nil, err + } + + input, err := c.initRequests(c.InputRegisters, maxQuantityInputRegisters) + if err != nil { + return nil, err + } + + return map[byte]requestSet{ + c.SlaveID: { + coil: coil, + discrete: discrete, + holding: holding, + input: input, + }, + }, nil +} + +func (c *ConfigurationOriginal) initRequests(fieldDefs []fieldDefinition, maxQuantity uint16) ([]request, error) { + fields, err := c.initFields(fieldDefs) + if err != nil { + return nil, err + } + return groupFieldsToRequests(fields, nil, maxQuantity), nil +} + +func (c *ConfigurationOriginal) initFields(fieldDefs []fieldDefinition) ([]field, error) { + // Construct the fields from the field definitions + fields := make([]field, 0, len(fieldDefs)) + for _, def := range fieldDefs { + f, err := c.newFieldFromDefinition(def) + if err != nil { + return nil, fmt.Errorf("initializing field %q failed: %v", def.Name, err) + } + fields = append(fields, f) + } + + return fields, nil +} + +func (c *ConfigurationOriginal) newFieldFromDefinition(def fieldDefinition) (field, error) { + // Check if the addresses are consecutive + expected := def.Address[0] + for _, current := range def.Address[1:] { + expected++ + if current != expected { + return field{}, fmt.Errorf("addresses of field %q are not consecutive", def.Name) + } + } + + // Initialize the field + f := field{ + measurement: def.Measurement, + name: def.Name, + address: def.Address[0], + length: uint16(len(def.Address)), + } + if def.DataType != "" { + inType, err := c.normalizeInputDatatype(def.DataType, len(def.Address)) + if err != nil { + return f, err + } + outType, err := c.normalizeOutputDatatype(def.DataType) + if err != nil { + return f, err + } + byteOrder, err := c.normalizeByteOrder(def.ByteOrder) + if err != nil { + return f, err + } + + f.converter, err = determineConverter(inType, byteOrder, outType, def.Scale) + if err != nil { + return f, err + } + } + + return f, nil +} + +func (c *ConfigurationOriginal) validateFieldDefinitions(fieldDefs []fieldDefinition, registerType string) error { + nameEncountered := map[string]bool{} + for _, item := range fieldDefs { + //check empty name + if item.Name == "" { + return fmt.Errorf("empty name in '%s'", registerType) + } + + //search name duplicate + canonicalName := item.Measurement + "." + item.Name + if nameEncountered[canonicalName] { + return fmt.Errorf("name '%s' is duplicated in measurement '%s' '%s' - '%s'", item.Name, item.Measurement, registerType, item.Name) + } + nameEncountered[canonicalName] = true + + if registerType == cInputRegisters || registerType == cHoldingRegisters { + // search byte order + switch item.ByteOrder { + case "AB", "BA", "ABCD", "CDAB", "BADC", "DCBA", "ABCDEFGH", "HGFEDCBA", "BADCFEHG", "GHEFCDAB": + default: + return fmt.Errorf("invalid byte order '%s' in '%s' - '%s'", item.ByteOrder, registerType, item.Name) + } + + // search data type + switch item.DataType { + case "UINT16", "INT16", "UINT32", "INT32", "UINT64", "INT64", "FLOAT32-IEEE", "FLOAT64-IEEE", "FLOAT32", "FIXED", "UFIXED": + default: + return fmt.Errorf("invalid data type '%s' in '%s' - '%s'", item.DataType, registerType, item.Name) + } + + // check scale + if item.Scale == 0.0 { + return fmt.Errorf("invalid scale '%f' in '%s' - '%s'", item.Scale, registerType, item.Name) + } + } + + // check address + if len(item.Address) != 1 && len(item.Address) != 2 && len(item.Address) != 4 { + return fmt.Errorf("invalid address '%v' length '%v' in '%s' - '%s'", item.Address, len(item.Address), registerType, item.Name) + } + + if registerType == cInputRegisters || registerType == cHoldingRegisters { + if 2*len(item.Address) != len(item.ByteOrder) { + return fmt.Errorf("invalid byte order '%s' and address '%v' in '%s' - '%s'", item.ByteOrder, item.Address, registerType, item.Name) + } + + // search duplicated + if len(item.Address) > len(removeDuplicates(item.Address)) { + return fmt.Errorf("duplicate address '%v' in '%s' - '%s'", item.Address, registerType, item.Name) + } + } else if len(item.Address) != 1 { + return fmt.Errorf("invalid address'%v' length'%v' in '%s' - '%s'", item.Address, len(item.Address), registerType, item.Name) + } + } + return nil +} + +func (c *ConfigurationOriginal) normalizeInputDatatype(dataType string, words int) (string, error) { + // Handle our special types + switch dataType { + case "FIXED": + switch words { + case 1: + return "INT16", nil + case 2: + return "INT32", nil + case 4: + return "INT64", nil + default: + return "unknown", fmt.Errorf("invalid length %d for type %q", words, dataType) + } + case "FLOAT32", "UFIXED": + switch words { + case 1: + return "UINT16", nil + case 2: + return "UINT32", nil + case 4: + return "UINT64", nil + default: + return "unknown", fmt.Errorf("invalid length %d for type %q", words, dataType) + } + case "FLOAT32-IEEE": + return "FLOAT32", nil + case "FLOAT64-IEEE": + return "FLOAT64", nil + } + return normalizeInputDatatype(dataType) +} + +func (c *ConfigurationOriginal) normalizeOutputDatatype(dataType string) (string, error) { + // Handle our special types + switch dataType { + case "FIXED", "FLOAT32", "UFIXED": + return "FLOAT64", nil + } + return normalizeOutputDatatype("native") +} + +func (c *ConfigurationOriginal) normalizeByteOrder(byteOrder string) (string, error) { + // Handle our special types + switch byteOrder { + case "AB", "ABCDEFGH": + return "ABCD", nil + case "BADCFEHG": + return "BADC", nil + case "GHEFCDAB": + return "CDAB", nil + case "BA", "HGFEDCBA": + return "DCBA", nil + } + return normalizeByteOrder(byteOrder) +} diff --git a/plugins/inputs/modbus/configuration_request.go b/plugins/inputs/modbus/configuration_request.go new file mode 100644 index 0000000000000..e9ab1f57bdfab --- /dev/null +++ b/plugins/inputs/modbus/configuration_request.go @@ -0,0 +1,306 @@ +package modbus + +import ( + _ "embed" + "fmt" + "hash/maphash" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample_request.conf +var sampleConfigPartPerRequest string + +type requestFieldDefinition struct { + Address uint16 `toml:"address"` + Name string `toml:"name"` + InputType string `toml:"type"` + Scale float64 `toml:"scale"` + OutputType string `toml:"output"` + Measurement string `toml:"measurement"` + Omit bool `toml:"omit"` +} + +type requestDefinition struct { + SlaveID byte `toml:"slave_id"` + ByteOrder string `toml:"byte_order"` + RegisterType string `toml:"register"` + Measurement string `toml:"measurement"` + Fields []requestFieldDefinition `toml:"fields"` + Tags map[string]string `toml:"tags"` +} + +type ConfigurationPerRequest struct { + Requests []requestDefinition `toml:"request"` +} + +func (c *ConfigurationPerRequest) SampleConfigPart() string { + return sampleConfigPartPerRequest +} + +func (c *ConfigurationPerRequest) Check() error { + seed := maphash.MakeSeed() + seenFields := make(map[uint64]bool) + + for _, def := range c.Requests { + // Check byte order of the data + switch def.ByteOrder { + case "": + def.ByteOrder = "ABCD" + case "ABCD", "DCBA", "BADC", "CDAB", "MSW-BE", "MSW-LE", "LSW-LE", "LSW-BE": + default: + return fmt.Errorf("unknown byte-order %q", def.ByteOrder) + } + + // Check register type + switch def.RegisterType { + case "": + def.RegisterType = "holding" + case "coil", "discrete", "holding", "input": + default: + return fmt.Errorf("unknown register-type %q", def.RegisterType) + } + + // Set the default for measurement if required + if def.Measurement == "" { + def.Measurement = "modbus" + } + + // Check the fields + for fidx, f := range def.Fields { + // Check the input type for all fields except the bit-field ones. + // We later need the type (even for omitted fields) to determine the length. + if def.RegisterType == cHoldingRegisters || def.RegisterType == cInputRegisters { + switch f.InputType { + case "INT16", "UINT16", "INT32", "UINT32", "INT64", "UINT64", "FLOAT32", "FLOAT64": + default: + return fmt.Errorf("unknown register data-type %q for field %q", f.InputType, f.Name) + } + } + + // Other properties don't need to be checked for omitted fields + if f.Omit { + continue + } + + // Name is mandatory + if f.Name == "" { + return fmt.Errorf("empty field name in request for slave %d", def.SlaveID) + } + + // Check fields only relevant for non-bit register types + if def.RegisterType == cHoldingRegisters || def.RegisterType == cInputRegisters { + // Check output type + switch f.OutputType { + case "", "INT64", "UINT64", "FLOAT64": + default: + return fmt.Errorf("unknown output data-type %q for field %q", f.OutputType, f.Name) + } + } + + // Handle the default for measurement + if f.Measurement == "" { + f.Measurement = def.Measurement + } + def.Fields[fidx] = f + + // Check for duplicate field definitions + id, err := c.fieldID(seed, def.SlaveID, def.RegisterType, def.Measurement, f.Name) + if err != nil { + return fmt.Errorf("cannot determine field id for %q: %v", f.Name, err) + } + if seenFields[id] { + return fmt.Errorf("field %q duplicated in measurement %q (slave %d/%q)", f.Name, f.Measurement, def.SlaveID, def.RegisterType) + } + seenFields[id] = true + } + } + + return nil +} + +func (c *ConfigurationPerRequest) Process() (map[byte]requestSet, error) { + result := map[byte]requestSet{} + + for _, def := range c.Requests { + // Set default + if def.RegisterType == "" { + def.RegisterType = "holding" + } + + // Construct the fields + isTyped := def.RegisterType == "holding" || def.RegisterType == "input" + fields, err := c.initFields(def.Fields, isTyped, def.ByteOrder) + if err != nil { + return nil, err + } + + // Make sure we have a set to work with + set, found := result[def.SlaveID] + if !found { + set = requestSet{ + coil: []request{}, + discrete: []request{}, + holding: []request{}, + input: []request{}, + } + } + + switch def.RegisterType { + case "coil": + requests := groupFieldsToRequests(fields, def.Tags, maxQuantityCoils) + set.coil = append(set.coil, requests...) + case "discrete": + requests := groupFieldsToRequests(fields, def.Tags, maxQuantityDiscreteInput) + set.discrete = append(set.discrete, requests...) + case "holding": + requests := groupFieldsToRequests(fields, def.Tags, maxQuantityHoldingRegisters) + set.holding = append(set.holding, requests...) + case "input": + requests := groupFieldsToRequests(fields, def.Tags, maxQuantityInputRegisters) + set.input = append(set.input, requests...) + default: + return nil, fmt.Errorf("unknown register type %q", def.RegisterType) + } + result[def.SlaveID] = set + } + + return result, nil +} + +func (c *ConfigurationPerRequest) initFields(fieldDefs []requestFieldDefinition, typed bool, byteOrder string) ([]field, error) { + // Construct the fields from the field definitions + fields := make([]field, 0, len(fieldDefs)) + for _, def := range fieldDefs { + f, err := c.newFieldFromDefinition(def, typed, byteOrder) + if err != nil { + return nil, fmt.Errorf("initializing field %q failed: %v", def.Name, err) + } + fields = append(fields, f) + } + + return fields, nil +} + +func (c *ConfigurationPerRequest) newFieldFromDefinition(def requestFieldDefinition, typed bool, byteOrder string) (field, error) { + var err error + + fieldLength := uint16(1) + if typed { + if fieldLength, err = c.determineFieldLength(def.InputType); err != nil { + return field{}, err + } + } + + // Initialize the field + f := field{ + measurement: def.Measurement, + name: def.Name, + address: def.Address, + length: fieldLength, + omit: def.Omit, + } + + // No more processing for un-typed (coil and discrete registers) or omitted fields + if !typed || def.Omit { + return f, nil + } + + // Automagically determine the output type... + if def.OutputType == "" { + if def.Scale == 0.0 { + // For non-scaling cases we should choose the output corresponding to the input class + // i.e. INT64 for INT*, UINT64 for UINT* etc. + var err error + if def.OutputType, err = c.determineOutputDatatype(def.InputType); err != nil { + return field{}, err + } + } else { + // For scaling cases we always want FLOAT64 by default + def.OutputType = "FLOAT64" + } + } + + // Setting default byte-order + if byteOrder == "" { + byteOrder = "ABCD" + } + + // Normalize the data relevant for determining the converter + inType, err := normalizeInputDatatype(def.InputType) + if err != nil { + return field{}, err + } + outType, err := normalizeOutputDatatype(def.OutputType) + if err != nil { + return field{}, err + } + order, err := normalizeByteOrder(byteOrder) + if err != nil { + return field{}, err + } + + f.converter, err = determineConverter(inType, order, outType, def.Scale) + if err != nil { + return field{}, err + } + + return f, nil +} + +func (c *ConfigurationPerRequest) fieldID(seed maphash.Seed, slave byte, register, measurement, name string) (uint64, error) { + var mh maphash.Hash + mh.SetSeed(seed) + + if err := mh.WriteByte(slave); err != nil { + return 0, err + } + if err := mh.WriteByte(0); err != nil { + return 0, err + } + if _, err := mh.WriteString(register); err != nil { + return 0, err + } + if err := mh.WriteByte(0); err != nil { + return 0, err + } + if _, err := mh.WriteString(measurement); err != nil { + return 0, err + } + if err := mh.WriteByte(0); err != nil { + return 0, err + } + if _, err := mh.WriteString(name); err != nil { + return 0, err + } + if err := mh.WriteByte(0); err != nil { + return 0, err + } + + return mh.Sum64(), nil +} + +func (c *ConfigurationPerRequest) determineOutputDatatype(input string) (string, error) { + // Handle our special types + switch input { + case "INT16", "INT32", "INT64": + return "INT64", nil + case "UINT16", "UINT32", "UINT64": + return "UINT64", nil + case "FLOAT32", "FLOAT64": + return "FLOAT64", nil + } + return "unknown", fmt.Errorf("invalid input datatype %q for determining output", input) +} + +func (c *ConfigurationPerRequest) determineFieldLength(input string) (uint16, error) { + // Handle our special types + switch input { + case "INT16", "UINT16": + return 1, nil + case "INT32", "UINT32", "FLOAT32": + return 2, nil + case "INT64", "UINT64", "FLOAT64": + return 4, nil + } + return 0, fmt.Errorf("invalid input datatype %q for determining field length", input) +} diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go index ec68890c5eb91..bf2537d4e35fb 100644 --- a/plugins/inputs/modbus/modbus.go +++ b/plugins/inputs/modbus/modbus.go @@ -1,22 +1,35 @@ +//go:generate ../../../tools/readme_config_includer/generator package modbus import ( - "encoding/binary" + _ "embed" "fmt" - "log" - "math" "net" "net/url" - "sort" + "strconv" "time" - mb "github.com/goburrow/modbus" + mb "github.com/grid-x/modbus" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample_general_begin.conf +var sampleConfigStart string + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample_general_end.conf +var sampleConfigEnd string + +type ModbusWorkarounds struct { + PollPause config.Duration `toml:"pause_between_requests"` + CloseAfterGather bool `toml:"close_connection_after_gather"` +} + // Modbus holds all data relevant to the plugin type Modbus struct { Name string `toml:"name"` @@ -26,43 +39,44 @@ type Modbus struct { DataBits int `toml:"data_bits"` Parity string `toml:"parity"` StopBits int `toml:"stop_bits"` - SlaveID int `toml:"slave_id"` - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` Retries int `toml:"busy_retries"` - RetriesWaitTime internal.Duration `toml:"busy_retries_wait"` - DiscreteInputs []fieldContainer `toml:"discrete_inputs"` - Coils []fieldContainer `toml:"coils"` - HoldingRegisters []fieldContainer `toml:"holding_registers"` - InputRegisters []fieldContainer `toml:"input_registers"` - registers []register - isConnected bool - tcpHandler *mb.TCPClientHandler - rtuHandler *mb.RTUClientHandler - asciiHandler *mb.ASCIIClientHandler - client mb.Client -} - -type register struct { - Type string - RegistersRange []registerRange - Fields []fieldContainer -} - -type fieldContainer struct { - Measurement string `toml:"measurement"` - Name string `toml:"name"` - ByteOrder string `toml:"byte_order"` - DataType string `toml:"data_type"` - Scale float64 `toml:"scale"` - Address []uint16 `toml:"address"` + RetriesWaitTime config.Duration `toml:"busy_retries_wait"` + DebugConnection bool `toml:"debug_connection"` + Workarounds ModbusWorkarounds `toml:"workarounds"` + Log telegraf.Logger `toml:"-"` + // Register configuration + ConfigurationType string `toml:"configuration_type"` + ConfigurationOriginal + ConfigurationPerRequest + + // Connection handling + client mb.Client + handler mb.ClientHandler + isConnected bool + // Request handling + requests map[byte]requestSet +} + +type fieldConverterFunc func(bytes []byte) interface{} + +type requestSet struct { + coil []request + discrete []request + holding []request + input []request +} + +type field struct { + measurement string + name string + address uint16 + length uint16 + omit bool + converter fieldConverterFunc value interface{} } -type registerRange struct { - address uint16 - length uint16 -} - const ( cDiscreteInputs = "discrete_input" cCoils = "coil" @@ -70,96 +84,20 @@ const ( cInputRegisters = "input_register" ) -const description = `Retrieve data from MODBUS slave devices` -const sampleConfig = ` - ## Connection Configuration - ## - ## The plugin supports connections to PLCs via MODBUS/TCP or - ## via serial line communication in binary (RTU) or readable (ASCII) encoding - ## - ## Device name - name = "Device" - - ## Slave ID - addresses a MODBUS device on the bus - ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] - slave_id = 1 - - ## Timeout for each request - timeout = "1s" - - ## Maximum number of retries and the time to wait between retries - ## when a slave-device is busy. - # busy_retries = 0 - # busy_retries_wait = "100ms" - - # TCP - connect via Modbus/TCP - controller = "tcp://localhost:502" - - ## Serial (RS485; RS232) - # controller = "file:///dev/ttyUSB0" - # baud_rate = 9600 - # data_bits = 8 - # parity = "N" - # stop_bits = 1 - # transmission_mode = "RTU" - - - ## Measurements - ## - - ## Digital Variables, Discrete Inputs and Coils - ## measurement - the (optional) measurement name, defaults to "modbus" - ## name - the variable name - ## address - variable address - - discrete_inputs = [ - { name = "start", address = [0]}, - { name = "stop", address = [1]}, - { name = "reset", address = [2]}, - { name = "emergency_stop", address = [3]}, - ] - coils = [ - { name = "motor1_run", address = [0]}, - { name = "motor1_jog", address = [1]}, - { name = "motor1_stop", address = [2]}, - ] - - ## Analog Variables, Input Registers and Holding Registers - ## measurement - the (optional) measurement name, defaults to "modbus" - ## name - the variable name - ## byte_order - the ordering of bytes - ## |---AB, ABCD - Big Endian - ## |---BA, DCBA - Little Endian - ## |---BADC - Mid-Big Endian - ## |---CDAB - Mid-Little Endian - ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32-IEEE (the IEEE 754 binary representation) - ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) - ## scale - the final numeric variable representation - ## address - variable address - - holding_registers = [ - { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]}, - { name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]}, - { name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]}, - { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]}, - { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]}, - { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]}, - ] - input_registers = [ - { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, - { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, - { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, - ] -` - // SampleConfig returns a basic configuration for the plugin func (m *Modbus) SampleConfig() string { - return sampleConfig -} - -// Description returns a short description of what the plugin does -func (m *Modbus) Description() string { - return description + configs := []Configuration{} + cfgOriginal := m.ConfigurationOriginal + cfgPerRequest := m.ConfigurationPerRequest + configs = append(configs, &cfgOriginal, &cfgPerRequest) + + totalConfig := sampleConfigStart + for _, c := range configs { + totalConfig += c.SampleConfigPart() + "\n" + } + totalConfig += "\n" + totalConfig += sampleConfigEnd + return totalConfig } func (m *Modbus) Init() error { @@ -172,76 +110,90 @@ func (m *Modbus) Init() error { return fmt.Errorf("retries cannot be negative") } - err := m.InitRegister(m.DiscreteInputs, cDiscreteInputs) - if err != nil { - return err + // Determine the configuration style + var cfg Configuration + switch m.ConfigurationType { + case "", "register": + cfg = &m.ConfigurationOriginal + case "request": + cfg = &m.ConfigurationPerRequest + default: + return fmt.Errorf("unknown configuration type %q", m.ConfigurationType) } - err = m.InitRegister(m.Coils, cCoils) - if err != nil { - return err + // Check and process the configuration + if err := cfg.Check(); err != nil { + return fmt.Errorf("configuraton invalid: %v", err) } - err = m.InitRegister(m.HoldingRegisters, cHoldingRegisters) + r, err := cfg.Process() if err != nil { - return err + return fmt.Errorf("cannot process configuraton: %v", err) } + m.requests = r - err = m.InitRegister(m.InputRegisters, cInputRegisters) - if err != nil { - return err + // Setup client + if err := m.initClient(); err != nil { + return fmt.Errorf("initializing client failed: %v", err) } return nil } -func (m *Modbus) InitRegister(fields []fieldContainer, name string) error { - if len(fields) == 0 { - return nil - } - - err := validateFieldContainers(fields, name) - if err != nil { - return err +// Gather implements the telegraf plugin interface method for data accumulation +func (m *Modbus) Gather(acc telegraf.Accumulator) error { + if !m.isConnected { + if err := m.connect(); err != nil { + return err + } } - addrs := []uint16{} - for _, field := range fields { - for _, a := range field.Address { - addrs = append(addrs, a) + timestamp := time.Now() + for retry := 0; retry <= m.Retries; retry++ { + timestamp = time.Now() + if err := m.gatherFields(); err != nil { + if mberr, ok := err.(*mb.Error); ok && mberr.ExceptionCode == mb.ExceptionCodeServerDeviceBusy && retry < m.Retries { + m.Log.Infof("Device busy! Retrying %d more time(s)...", m.Retries-retry) + time.Sleep(time.Duration(m.RetriesWaitTime)) + continue + } + // Show the disconnect error this way to not shadow the initial error + if discerr := m.disconnect(); discerr != nil { + m.Log.Errorf("Disconnecting failed: %v", discerr) + } + return err } + // Reading was successful, leave the retry loop + break } - addrs = removeDuplicates(addrs) - sort.Slice(addrs, func(i, j int) bool { return addrs[i] < addrs[j] }) + for slaveID, requests := range m.requests { + tags := map[string]string{ + "name": m.Name, + "type": cCoils, + "slave_id": strconv.Itoa(int(slaveID)), + } + m.collectFields(acc, timestamp, tags, requests.coil) - ii := 0 - var registersRange []registerRange + tags["type"] = cDiscreteInputs + m.collectFields(acc, timestamp, tags, requests.discrete) - // Get range of consecutive integers - // [1, 2, 3, 5, 6, 10, 11, 12, 14] - // (1, 3) , (5, 2) , (10, 3), (14 , 1) - for range addrs { - if ii < len(addrs) { - start := addrs[ii] - end := start + tags["type"] = cHoldingRegisters + m.collectFields(acc, timestamp, tags, requests.holding) - for ii < len(addrs)-1 && addrs[ii+1]-addrs[ii] == 1 { - end = addrs[ii+1] - ii++ - } - ii++ - registersRange = append(registersRange, registerRange{start, end - start + 1}) - } + tags["type"] = cInputRegisters + m.collectFields(acc, timestamp, tags, requests.input) } - m.registers = append(m.registers, register{name, registersRange, fields}) + // Disconnect after read if configured + if m.Workarounds.CloseAfterGather { + return m.disconnect() + } return nil } -// Connect to a MODBUS Slave device via Modbus/[TCP|RTU|ASCII] -func connect(m *Modbus) error { +func (m *Modbus) initClient() error { u, err := url.Parse(m.Controller) if err != nil { return err @@ -249,481 +201,245 @@ func connect(m *Modbus) error { switch u.Scheme { case "tcp": - var host, port string - host, port, err = net.SplitHostPort(u.Host) - if err != nil { - return err - } - m.tcpHandler = mb.NewTCPClientHandler(host + ":" + port) - m.tcpHandler.Timeout = m.Timeout.Duration - m.tcpHandler.SlaveId = byte(m.SlaveID) - m.client = mb.NewClient(m.tcpHandler) - err := m.tcpHandler.Connect() + host, port, err := net.SplitHostPort(u.Host) if err != nil { return err } - m.isConnected = true - return nil - case "file": - if m.TransmissionMode == "RTU" { - m.rtuHandler = mb.NewRTUClientHandler(u.Path) - m.rtuHandler.Timeout = m.Timeout.Duration - m.rtuHandler.SlaveId = byte(m.SlaveID) - m.rtuHandler.BaudRate = m.BaudRate - m.rtuHandler.DataBits = m.DataBits - m.rtuHandler.Parity = m.Parity - m.rtuHandler.StopBits = m.StopBits - m.client = mb.NewClient(m.rtuHandler) - err := m.rtuHandler.Connect() - if err != nil { - return err + switch m.TransmissionMode { + case "RTUoverTCP": + handler := mb.NewRTUOverTCPClientHandler(host + ":" + port) + handler.Timeout = time.Duration(m.Timeout) + if m.DebugConnection { + handler.Logger = m } - m.isConnected = true - return nil - } else if m.TransmissionMode == "ASCII" { - m.asciiHandler = mb.NewASCIIClientHandler(u.Path) - m.asciiHandler.Timeout = m.Timeout.Duration - m.asciiHandler.SlaveId = byte(m.SlaveID) - m.asciiHandler.BaudRate = m.BaudRate - m.asciiHandler.DataBits = m.DataBits - m.asciiHandler.Parity = m.Parity - m.asciiHandler.StopBits = m.StopBits - m.client = mb.NewClient(m.asciiHandler) - err := m.asciiHandler.Connect() - if err != nil { - return err + m.handler = handler + case "ASCIIoverTCP": + handler := mb.NewASCIIOverTCPClientHandler(host + ":" + port) + handler.Timeout = time.Duration(m.Timeout) + if m.DebugConnection { + handler.Logger = m } - m.isConnected = true - return nil - } else { - return fmt.Errorf("invalid protocol '%s' - '%s' ", u.Scheme, m.TransmissionMode) + m.handler = handler + default: + handler := mb.NewTCPClientHandler(host + ":" + port) + handler.Timeout = time.Duration(m.Timeout) + if m.DebugConnection { + handler.Logger = m + } + m.handler = handler } - default: - return fmt.Errorf("invalid controller") - } -} - -func disconnect(m *Modbus) error { - u, err := url.Parse(m.Controller) - if err != nil { - return err - } - - switch u.Scheme { - case "tcp": - m.tcpHandler.Close() - return nil case "file": - if m.TransmissionMode == "RTU" { - m.rtuHandler.Close() - return nil - } else if m.TransmissionMode == "ASCII" { - m.asciiHandler.Close() - return nil - } else { + switch m.TransmissionMode { + case "RTU": + handler := mb.NewRTUClientHandler(u.Path) + handler.Timeout = time.Duration(m.Timeout) + handler.BaudRate = m.BaudRate + handler.DataBits = m.DataBits + handler.Parity = m.Parity + handler.StopBits = m.StopBits + if m.DebugConnection { + handler.Logger = m + } + m.handler = handler + case "ASCII": + handler := mb.NewASCIIClientHandler(u.Path) + handler.Timeout = time.Duration(m.Timeout) + handler.BaudRate = m.BaudRate + handler.DataBits = m.DataBits + handler.Parity = m.Parity + handler.StopBits = m.StopBits + if m.DebugConnection { + handler.Logger = m + } + m.handler = handler + default: return fmt.Errorf("invalid protocol '%s' - '%s' ", u.Scheme, m.TransmissionMode) } default: - return fmt.Errorf("invalid controller") + return fmt.Errorf("invalid controller %q", m.Controller) } -} - -func validateFieldContainers(t []fieldContainer, n string) error { - nameEncountered := map[string]bool{} - for _, item := range t { - //check empty name - if item.Name == "" { - return fmt.Errorf("empty name in '%s'", n) - } - //search name duplicate - canonical_name := item.Measurement + "." + item.Name - if nameEncountered[canonical_name] { - return fmt.Errorf("name '%s' is duplicated in measurement '%s' '%s' - '%s'", item.Name, item.Measurement, n, item.Name) - } else { - nameEncountered[canonical_name] = true - } - - if n == cInputRegisters || n == cHoldingRegisters { - // search byte order - switch item.ByteOrder { - case "AB", "BA", "ABCD", "CDAB", "BADC", "DCBA", "ABCDEFGH", "HGFEDCBA", "BADCFEHG", "GHEFCDAB": - break - default: - return fmt.Errorf("invalid byte order '%s' in '%s' - '%s'", item.ByteOrder, n, item.Name) - } + m.client = mb.NewClient(m.handler) + m.isConnected = false - // search data type - switch item.DataType { - case "UINT16", "INT16", "UINT32", "INT32", "UINT64", "INT64", "FLOAT32-IEEE", "FLOAT32", "FIXED", "UFIXED": - break - default: - return fmt.Errorf("invalid data type '%s' in '%s' - '%s'", item.DataType, n, item.Name) - } - - // check scale - if item.Scale == 0.0 { - return fmt.Errorf("invalid scale '%f' in '%s' - '%s'", item.Scale, n, item.Name) - } - } - - // check address - if len(item.Address) != 1 && len(item.Address) != 2 && len(item.Address) != 4 { - return fmt.Errorf("invalid address '%v' length '%v' in '%s' - '%s'", item.Address, len(item.Address), n, item.Name) - } - - if n == cInputRegisters || n == cHoldingRegisters { - if 2*len(item.Address) != len(item.ByteOrder) { - return fmt.Errorf("invalid byte order '%s' and address '%v' in '%s' - '%s'", item.ByteOrder, item.Address, n, item.Name) - } - - // search duplicated - if len(item.Address) > len(removeDuplicates(item.Address)) { - return fmt.Errorf("duplicate address '%v' in '%s' - '%s'", item.Address, n, item.Name) - } - } else if len(item.Address) != 1 { - return fmt.Errorf("invalid address'%v' length'%v' in '%s' - '%s'", item.Address, len(item.Address), n, item.Name) - } - } return nil } -func removeDuplicates(elements []uint16) []uint16 { - encountered := map[uint16]bool{} - result := []uint16{} - - for v := range elements { - if encountered[elements[v]] { - } else { - encountered[elements[v]] = true - result = append(result, elements[v]) - } - } - - return result +// Connect to a MODBUS Slave device via Modbus/[TCP|RTU|ASCII] +func (m *Modbus) connect() error { + err := m.handler.Connect() + m.isConnected = err == nil + return err } -func readRegisterValues(m *Modbus, rt string, rr registerRange) ([]byte, error) { - if rt == cDiscreteInputs { - return m.client.ReadDiscreteInputs(uint16(rr.address), uint16(rr.length)) - } else if rt == cCoils { - return m.client.ReadCoils(uint16(rr.address), uint16(rr.length)) - } else if rt == cInputRegisters { - return m.client.ReadInputRegisters(uint16(rr.address), uint16(rr.length)) - } else if rt == cHoldingRegisters { - return m.client.ReadHoldingRegisters(uint16(rr.address), uint16(rr.length)) - } else { - return []byte{}, fmt.Errorf("not Valid function") - } +func (m *Modbus) disconnect() error { + err := m.handler.Close() + m.isConnected = false + return err } -func (m *Modbus) getFields() error { - for _, register := range m.registers { - rawValues := make(map[uint16][]byte) - bitRawValues := make(map[uint16]uint16) - for _, rr := range register.RegistersRange { - address := rr.address - readValues, err := readRegisterValues(m, register.Type, rr) - if err != nil { - return err - } - - // Raw Values - if register.Type == cDiscreteInputs || register.Type == cCoils { - for _, readValue := range readValues { - for bitPosition := 0; bitPosition < 8; bitPosition++ { - bitRawValues[address] = getBitValue(readValue, bitPosition) - address = address + 1 - if address+1 > rr.length { - break - } - } - } - } - - // Raw Values - if register.Type == cInputRegisters || register.Type == cHoldingRegisters { - batchSize := 2 - for batchSize < len(readValues) { - rawValues[address] = readValues[0:batchSize:batchSize] - address = address + 1 - readValues = readValues[batchSize:] - } - - rawValues[address] = readValues[0:batchSize:batchSize] - } +func (m *Modbus) gatherFields() error { + for slaveID, requests := range m.requests { + m.handler.SetSlave(slaveID) + if err := m.gatherRequestsCoil(requests.coil); err != nil { + return err } - - if register.Type == cDiscreteInputs || register.Type == cCoils { - for i := 0; i < len(register.Fields); i++ { - register.Fields[i].value = bitRawValues[register.Fields[i].Address[0]] - } + if err := m.gatherRequestsDiscrete(requests.discrete); err != nil { + return err } - - if register.Type == cInputRegisters || register.Type == cHoldingRegisters { - for i := 0; i < len(register.Fields); i++ { - var values_t []byte - - for j := 0; j < len(register.Fields[i].Address); j++ { - tempArray := rawValues[register.Fields[i].Address[j]] - for x := 0; x < len(tempArray); x++ { - values_t = append(values_t, tempArray[x]) - } - } - - register.Fields[i].value = convertDataType(register.Fields[i], values_t) - } - + if err := m.gatherRequestsHolding(requests.holding); err != nil { + return err + } + if err := m.gatherRequestsInput(requests.input); err != nil { + return err } } return nil } -func getBitValue(n byte, pos int) uint16 { - return uint16(n >> uint(pos) & 0x01) -} - -func convertDataType(t fieldContainer, bytes []byte) interface{} { - switch t.DataType { - case "UINT16": - e16 := convertEndianness16(t.ByteOrder, bytes) - return scaleUint16(t.Scale, e16) - case "INT16": - e16 := convertEndianness16(t.ByteOrder, bytes) - f16 := int16(e16) - return scaleInt16(t.Scale, f16) - case "UINT32": - e32 := convertEndianness32(t.ByteOrder, bytes) - return scaleUint32(t.Scale, e32) - case "INT32": - e32 := convertEndianness32(t.ByteOrder, bytes) - f32 := int32(e32) - return scaleInt32(t.Scale, f32) - case "UINT64": - e64 := convertEndianness64(t.ByteOrder, bytes) - f64 := format64(t.DataType, e64).(uint64) - return scaleUint64(t.Scale, f64) - case "INT64": - e64 := convertEndianness64(t.ByteOrder, bytes) - f64 := format64(t.DataType, e64).(int64) - return scaleInt64(t.Scale, f64) - case "FLOAT32-IEEE": - e32 := convertEndianness32(t.ByteOrder, bytes) - f32 := math.Float32frombits(e32) - return scaleFloat32(t.Scale, f32) - case "FIXED": - if len(bytes) == 2 { - e16 := convertEndianness16(t.ByteOrder, bytes) - f16 := int16(e16) - return scale16toFloat(t.Scale, f16) - } else if len(bytes) == 4 { - e32 := convertEndianness32(t.ByteOrder, bytes) - f32 := int32(e32) - return scale32toFloat(t.Scale, f32) - } else { - e64 := convertEndianness64(t.ByteOrder, bytes) - f64 := int64(e64) - return scale64toFloat(t.Scale, f64) - } - case "FLOAT32", "UFIXED": - if len(bytes) == 2 { - e16 := convertEndianness16(t.ByteOrder, bytes) - return scale16UtoFloat(t.Scale, e16) - } else if len(bytes) == 4 { - e32 := convertEndianness32(t.ByteOrder, bytes) - return scale32UtoFloat(t.Scale, e32) - } else { - e64 := convertEndianness64(t.ByteOrder, bytes) - return scale64UtoFloat(t.Scale, e64) +func (m *Modbus) gatherRequestsCoil(requests []request) error { + for _, request := range requests { + m.Log.Debugf("trying to read coil@%v[%v]...", request.address, request.length) + bytes, err := m.client.ReadCoils(request.address, request.length) + if err != nil { + return err } - default: - return 0 - } -} + nextRequest := time.Now().Add(time.Duration(m.Workarounds.PollPause)) + m.Log.Debugf("got coil@%v[%v]: %v", request.address, request.length, bytes) -func convertEndianness16(o string, b []byte) uint16 { - switch o { - case "AB": - return binary.BigEndian.Uint16(b) - case "BA": - return binary.LittleEndian.Uint16(b) - default: - return 0 - } -} + // Bit value handling + for i, field := range request.fields { + offset := field.address - request.address + idx := offset / 8 + bit := offset % 8 -func convertEndianness32(o string, b []byte) uint32 { - switch o { - case "ABCD": - return binary.BigEndian.Uint32(b) - case "DCBA": - return binary.LittleEndian.Uint32(b) - case "BADC": - return uint32(binary.LittleEndian.Uint16(b[0:]))<<16 | uint32(binary.LittleEndian.Uint16(b[2:])) - case "CDAB": - return uint32(binary.BigEndian.Uint16(b[2:]))<<16 | uint32(binary.BigEndian.Uint16(b[0:])) - default: - return 0 - } -} - -func convertEndianness64(o string, b []byte) uint64 { - switch o { - case "ABCDEFGH": - return binary.BigEndian.Uint64(b) - case "HGFEDCBA": - return binary.LittleEndian.Uint64(b) - case "BADCFEHG": - return uint64(binary.LittleEndian.Uint16(b[0:]))<<48 | uint64(binary.LittleEndian.Uint16(b[2:]))<<32 | uint64(binary.LittleEndian.Uint16(b[4:]))<<16 | uint64(binary.LittleEndian.Uint16(b[6:])) - case "GHEFCDAB": - return uint64(binary.BigEndian.Uint16(b[6:]))<<48 | uint64(binary.BigEndian.Uint16(b[4:]))<<32 | uint64(binary.BigEndian.Uint16(b[2:]))<<16 | uint64(binary.BigEndian.Uint16(b[0:])) - default: - return 0 - } -} - -func format16(f string, r uint16) interface{} { - switch f { - case "UINT16": - return r - case "INT16": - return int16(r) - default: - return r - } -} - -func format32(f string, r uint32) interface{} { - switch f { - case "UINT32": - return r - case "INT32": - return int32(r) - case "FLOAT32-IEEE": - return math.Float32frombits(r) - default: - return r - } -} + request.fields[i].value = uint16((bytes[idx] >> bit) & 0x01) + m.Log.Debugf(" field %s with bit %d @ byte %d: %v --> %v", field.name, bit, idx, (bytes[idx]>>bit)&0x01, request.fields[i].value) + } -func format64(f string, r uint64) interface{} { - switch f { - case "UINT64": - return r - case "INT64": - return int64(r) - default: - return r + // Some (serial) devices require a pause between requests... + time.Sleep(time.Until(nextRequest)) } + return nil } -func scale16toFloat(s float64, v int16) float64 { - return float64(v) * s -} - -func scale32toFloat(s float64, v int32) float64 { - return float64(float64(v) * float64(s)) -} - -func scale64toFloat(s float64, v int64) float64 { - return float64(float64(v) * float64(s)) -} - -func scale16UtoFloat(s float64, v uint16) float64 { - return float64(v) * s -} - -func scale32UtoFloat(s float64, v uint32) float64 { - return float64(float64(v) * float64(s)) -} - -func scale64UtoFloat(s float64, v uint64) float64 { - return float64(float64(v) * float64(s)) -} - -func scaleInt16(s float64, v int16) int16 { - return int16(float64(v) * s) -} - -func scaleUint16(s float64, v uint16) uint16 { - return uint16(float64(v) * s) -} - -func scaleUint32(s float64, v uint32) uint32 { - return uint32(float64(v) * float64(s)) -} - -func scaleInt32(s float64, v int32) int32 { - return int32(float64(v) * float64(s)) -} +func (m *Modbus) gatherRequestsDiscrete(requests []request) error { + for _, request := range requests { + m.Log.Debugf("trying to read discrete@%v[%v]...", request.address, request.length) + bytes, err := m.client.ReadDiscreteInputs(request.address, request.length) + if err != nil { + return err + } + nextRequest := time.Now().Add(time.Duration(m.Workarounds.PollPause)) + m.Log.Debugf("got discrete@%v[%v]: %v", request.address, request.length, bytes) -func scaleFloat32(s float64, v float32) float32 { - return float32(float64(v) * s) -} + // Bit value handling + for i, field := range request.fields { + offset := field.address - request.address + idx := offset / 8 + bit := offset % 8 -func scaleUint64(s float64, v uint64) uint64 { - return uint64(float64(v) * float64(s)) -} + request.fields[i].value = uint16((bytes[idx] >> bit) & 0x01) + m.Log.Debugf(" field %s with bit %d @ byte %d: %v --> %v", field.name, bit, idx, (bytes[idx]>>bit)&0x01, request.fields[i].value) + } -func scaleInt64(s float64, v int64) int64 { - return int64(float64(v) * float64(s)) + // Some (serial) devices require a pause between requests... + time.Sleep(time.Until(nextRequest)) + } + return nil } -// Gather implements the telegraf plugin interface method for data accumulation -func (m *Modbus) Gather(acc telegraf.Accumulator) error { - if !m.isConnected { - err := connect(m) +func (m *Modbus) gatherRequestsHolding(requests []request) error { + for _, request := range requests { + m.Log.Debugf("trying to read holding@%v[%v]...", request.address, request.length) + bytes, err := m.client.ReadHoldingRegisters(request.address, request.length) if err != nil { - m.isConnected = false return err } + nextRequest := time.Now().Add(time.Duration(m.Workarounds.PollPause)) + m.Log.Debugf("got holding@%v[%v]: %v", request.address, request.length, bytes) + + // Non-bit value handling + for i, field := range request.fields { + // Determine the offset of the field values in the read array + offset := 2 * (field.address - request.address) // registers are 16bit = 2 byte + length := 2 * field.length // field length is in registers a 16bit + + // Convert the actual value + request.fields[i].value = field.converter(bytes[offset : offset+length]) + m.Log.Debugf(" field %s with offset %d with len %d: %v --> %v", field.name, offset, length, bytes[offset:offset+length], request.fields[i].value) + } + + // Some (serial) devices require a pause between requests... + time.Sleep(time.Until(nextRequest)) } + return nil +} - timestamp := time.Now() - for retry := 0; retry <= m.Retries; retry += 1 { - timestamp = time.Now() - err := m.getFields() +func (m *Modbus) gatherRequestsInput(requests []request) error { + for _, request := range requests { + m.Log.Debugf("trying to read input@%v[%v]...", request.address, request.length) + bytes, err := m.client.ReadInputRegisters(request.address, request.length) if err != nil { - mberr, ok := err.(*mb.ModbusError) - if ok && mberr.ExceptionCode == mb.ExceptionCodeServerDeviceBusy && retry < m.Retries { - log.Printf("I! [inputs.modbus] device busy! Retrying %d more time(s)...", m.Retries-retry) - time.Sleep(m.RetriesWaitTime.Duration) - continue - } - disconnect(m) - m.isConnected = false return err } - // Reading was successful, leave the retry loop - break + nextRequest := time.Now().Add(time.Duration(m.Workarounds.PollPause)) + m.Log.Debugf("got input@%v[%v]: %v", request.address, request.length, bytes) + + // Non-bit value handling + for i, field := range request.fields { + // Determine the offset of the field values in the read array + offset := 2 * (field.address - request.address) // registers are 16bit = 2 byte + length := 2 * field.length // field length is in registers a 16bit + + // Convert the actual value + request.fields[i].value = field.converter(bytes[offset : offset+length]) + m.Log.Debugf(" field %s with offset %d with len %d: %v --> %v", field.name, offset, length, bytes[offset:offset+length], request.fields[i].value) + } + + // Some (serial) devices require a pause between requests... + time.Sleep(time.Until(nextRequest)) } + return nil +} +func (m *Modbus) collectFields(acc telegraf.Accumulator, timestamp time.Time, tags map[string]string, requests []request) { grouper := metric.NewSeriesGrouper() - for _, reg := range m.registers { - tags := map[string]string{ - "name": m.Name, - "type": reg.Type, + for _, request := range requests { + // Collect tags from global and per-request + rtags := map[string]string{} + for k, v := range tags { + rtags[k] = v + } + for k, v := range request.tags { + rtags[k] = v } - for _, field := range reg.Fields { + for _, field := range request.fields { // In case no measurement was specified we use "modbus" as default measurement := "modbus" - if field.Measurement != "" { - measurement = field.Measurement + if field.measurement != "" { + measurement = field.measurement } // Group the data by series - grouper.Add(measurement, tags, timestamp, field.Name, field.value) + if err := grouper.Add(measurement, rtags, timestamp, field.name, field.value); err != nil { + acc.AddError(fmt.Errorf("cannot add field %q for measurement %q: %v", field.name, measurement, err)) + continue + } } + } - // Add the metrics grouped by series to the accumulator - for _, metric := range grouper.Metrics() { - acc.AddMetric(metric) - } + // Add the metrics grouped by series to the accumulator + for _, x := range grouper.Metrics() { + acc.AddMetric(x) } +} - return nil +// Implement the logger interface of the modbus client +func (m *Modbus) Printf(format string, v ...interface{}) { + m.Log.Debugf(format, v...) } // Add this plugin to telegraf diff --git a/plugins/inputs/modbus/modbus_test.go b/plugins/inputs/modbus/modbus_test.go index 8c5241dc2aaee..5a7a7570d7b7d 100644 --- a/plugins/inputs/modbus/modbus_test.go +++ b/plugins/inputs/modbus/modbus_test.go @@ -1,12 +1,16 @@ package modbus import ( + "fmt" + "strconv" "testing" + "time" - m "github.com/goburrow/modbus" - "github.com/stretchr/testify/assert" + mb "github.com/grid-x/modbus" + "github.com/stretchr/testify/require" "github.com/tbrandon/mbserver" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" ) @@ -77,43 +81,52 @@ func TestCoils(t *testing.T) { } serv := mbserver.NewServer() - err := serv.ListenTCP("localhost:1502") + require.NoError(t, serv.ListenTCP("localhost:1502")) defer serv.Close() - assert.NoError(t, err) - handler := m.NewTCPClientHandler("localhost:1502") - err = handler.Connect() - assert.NoError(t, err) + handler := mb.NewTCPClientHandler("localhost:1502") + require.NoError(t, handler.Connect()) defer handler.Close() - client := m.NewClient(handler) + client := mb.NewClient(handler) for _, ct := range coilTests { t.Run(ct.name, func(t *testing.T) { - _, err = client.WriteMultipleCoils(ct.address, ct.quantity, ct.write) - assert.NoError(t, err) + _, err := client.WriteMultipleCoils(ct.address, ct.quantity, ct.write) + require.NoError(t, err) modbus := Modbus{ Name: "TestCoils", Controller: "tcp://localhost:1502", - SlaveID: 1, - Coils: []fieldContainer{ - { - Name: ct.name, - Address: []uint16{ct.address}, - }, + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.Coils = []fieldDefinition{ + { + Name: ct.name, + Address: []uint16{ct.address}, }, } - err = modbus.Init() - assert.NoError(t, err) + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cCoils, + "slave_id": strconv.Itoa(int(modbus.SlaveID)), + "name": modbus.Name, + }, + map[string]interface{}{ct.name: ct.read}, + time.Unix(0, 0), + ), + } + var acc testutil.Accumulator - err = modbus.Gather(&acc) - assert.NoError(t, err) - assert.NotEmpty(t, modbus.registers) + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) - for _, coil := range modbus.registers { - assert.Equal(t, ct.read, coil.Fields[0].value) - } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) } } @@ -549,60 +562,404 @@ func TestHoldingRegisters(t *testing.T) { write: []byte{0xF6, 0x84, 0xF9, 0x45, 0xFE, 0xBC, 0xFF, 0xFF}, read: uint64(18446742686322259968), }, + { + name: "register214_to_register217_abcdefgh_float64_ieee", + address: []uint16{214, 215, 216, 217}, + quantity: 4, + byteOrder: "ABCDEFGH", + dataType: "FLOAT64-IEEE", + scale: 1, + write: []byte{0xBF, 0x9C, 0x6A, 0x40, 0xC3, 0x47, 0x8F, 0x55}, + read: float64(-0.02774907295123737), + }, + { + name: "register214_to_register217_abcdefgh_float64_ieee_scaled", + address: []uint16{214, 215, 216, 217}, + quantity: 4, + byteOrder: "ABCDEFGH", + dataType: "FLOAT64-IEEE", + scale: 0.1, + write: []byte{0xBF, 0x9C, 0x6A, 0x40, 0xC3, 0x47, 0x8F, 0x55}, + read: float64(-0.002774907295123737), + }, + { + name: "register218_to_register221_abcdefgh_float64_ieee_pos", + address: []uint16{218, 219, 220, 221}, + quantity: 4, + byteOrder: "ABCDEFGH", + dataType: "FLOAT64-IEEE", + scale: 1, + write: []byte{0x3F, 0x9C, 0x6A, 0x40, 0xC3, 0x47, 0x8F, 0x55}, + read: float64(0.02774907295123737), + }, + { + name: "register222_to_register225_hgfecdba_float64_ieee", + address: []uint16{222, 223, 224, 225}, + quantity: 4, + byteOrder: "HGFEDCBA", + dataType: "FLOAT64-IEEE", + scale: 1, + write: []byte{0x55, 0x8F, 0x47, 0xC3, 0x40, 0x6A, 0x9C, 0xBF}, + read: float64(-0.02774907295123737), + }, + { + name: "register226_to_register229_badcfehg_float64_ieee", + address: []uint16{226, 227, 228, 229}, + quantity: 4, + byteOrder: "BADCFEHG", + dataType: "FLOAT64-IEEE", + scale: 1, + write: []byte{0x9C, 0xBF, 0x40, 0x6A, 0x47, 0xC3, 0x55, 0x8F}, + read: float64(-0.02774907295123737), + }, + { + name: "register230_to_register233_ghefcdab_float64_ieee", + address: []uint16{230, 231, 232, 233}, + quantity: 4, + byteOrder: "GHEFCDAB", + dataType: "FLOAT64-IEEE", + scale: 1, + write: []byte{0x8F, 0x55, 0xC3, 0x47, 0x6A, 0x40, 0xBF, 0x9C}, + read: float64(-0.02774907295123737), + }, } serv := mbserver.NewServer() - err := serv.ListenTCP("localhost:1502") + require.NoError(t, serv.ListenTCP("localhost:1502")) defer serv.Close() - assert.NoError(t, err) - handler := m.NewTCPClientHandler("localhost:1502") - err = handler.Connect() - assert.NoError(t, err) + handler := mb.NewTCPClientHandler("localhost:1502") + require.NoError(t, handler.Connect()) defer handler.Close() - client := m.NewClient(handler) + client := mb.NewClient(handler) for _, hrt := range holdingRegisterTests { t.Run(hrt.name, func(t *testing.T) { - _, err = client.WriteMultipleRegisters(hrt.address[0], hrt.quantity, hrt.write) - assert.NoError(t, err) + _, err := client.WriteMultipleRegisters(hrt.address[0], hrt.quantity, hrt.write) + require.NoError(t, err) modbus := Modbus{ Name: "TestHoldingRegisters", Controller: "tcp://localhost:1502", - SlaveID: 1, - HoldingRegisters: []fieldContainer{ - { - Name: hrt.name, - ByteOrder: hrt.byteOrder, - DataType: hrt.dataType, - Scale: hrt.scale, - Address: hrt.address, - }, + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.HoldingRegisters = []fieldDefinition{ + { + Name: hrt.name, + ByteOrder: hrt.byteOrder, + DataType: hrt.dataType, + Scale: hrt.scale, + Address: hrt.address, }, } - err = modbus.Init() - assert.NoError(t, err) + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cHoldingRegisters, + "slave_id": strconv.Itoa(int(modbus.SlaveID)), + "name": modbus.Name, + }, + map[string]interface{}{hrt.name: hrt.read}, + time.Unix(0, 0), + ), + } + var acc testutil.Accumulator - modbus.Gather(&acc) - assert.NotEmpty(t, modbus.registers) + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) - for _, coil := range modbus.registers { - assert.Equal(t, hrt.read, coil.Fields[0].value) - } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) } } +func TestReadMultipleCoilWithHole(t *testing.T) { + serv := mbserver.NewServer() + require.NoError(t, serv.ListenTCP("localhost:1502")) + defer serv.Close() + + handler := mb.NewTCPClientHandler("localhost:1502") + require.NoError(t, handler.Connect()) + defer handler.Close() + client := mb.NewClient(handler) + + fcs := []fieldDefinition{} + expectedFields := make(map[string]interface{}) + writeValue := uint16(0) + readValue := uint16(0) + for i := 0; i < 14; i++ { + fc := fieldDefinition{} + fc.Name = fmt.Sprintf("coil-%v", i) + fc.Address = []uint16{uint16(i)} + fcs = append(fcs, fc) + + _, err := client.WriteSingleCoil(fc.Address[0], writeValue) + require.NoError(t, err) + + expectedFields[fc.Name] = readValue + writeValue = 65280 - writeValue + readValue = 1 - readValue + } + for i := 15; i < 18; i++ { + fc := fieldDefinition{} + fc.Name = fmt.Sprintf("coil-%v", i) + fc.Address = []uint16{uint16(i)} + fcs = append(fcs, fc) + + _, err := client.WriteSingleCoil(fc.Address[0], writeValue) + require.NoError(t, err) + + expectedFields[fc.Name] = readValue + writeValue = 65280 - writeValue + readValue = 1 - readValue + } + for i := 24; i < 33; i++ { + fc := fieldDefinition{} + fc.Name = fmt.Sprintf("coil-%v", i) + fc.Address = []uint16{uint16(i)} + fcs = append(fcs, fc) + + _, err := client.WriteSingleCoil(fc.Address[0], writeValue) + require.NoError(t, err) + + expectedFields[fc.Name] = readValue + writeValue = 65280 - writeValue + readValue = 1 - readValue + } + require.Len(t, expectedFields, len(fcs)) + + modbus := Modbus{ + Name: "TestReadMultipleCoilWithHole", + Controller: "tcp://localhost:1502", + Log: testutil.Logger{Name: "modbus:MultipleCoilWithHole"}, + } + modbus.SlaveID = 1 + modbus.Coils = fcs + + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cCoils, + "slave_id": strconv.Itoa(int(modbus.SlaveID)), + "name": modbus.Name, + }, + expectedFields, + time.Unix(0, 0), + ), + } + + var acc testutil.Accumulator + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} + +func TestReadMultipleCoilLimit(t *testing.T) { + serv := mbserver.NewServer() + require.NoError(t, serv.ListenTCP("localhost:1502")) + defer serv.Close() + + handler := mb.NewTCPClientHandler("localhost:1502") + require.NoError(t, handler.Connect()) + defer handler.Close() + client := mb.NewClient(handler) + + fcs := []fieldDefinition{} + expectedFields := make(map[string]interface{}) + writeValue := uint16(0) + readValue := uint16(0) + for i := 0; i < 4000; i++ { + fc := fieldDefinition{} + fc.Name = fmt.Sprintf("coil-%v", i) + fc.Address = []uint16{uint16(i)} + fcs = append(fcs, fc) + + _, err := client.WriteSingleCoil(fc.Address[0], writeValue) + require.NoError(t, err) + + expectedFields[fc.Name] = readValue + writeValue = 65280 - writeValue + readValue = 1 - readValue + } + require.Len(t, expectedFields, len(fcs)) + + modbus := Modbus{ + Name: "TestReadCoils", + Controller: "tcp://localhost:1502", + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.Coils = fcs + + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cCoils, + "slave_id": strconv.Itoa(int(modbus.SlaveID)), + "name": modbus.Name, + }, + expectedFields, + time.Unix(0, 0), + ), + } + + var acc testutil.Accumulator + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} + +func TestReadMultipleHoldingRegisterWithHole(t *testing.T) { + serv := mbserver.NewServer() + require.NoError(t, serv.ListenTCP("localhost:1502")) + defer serv.Close() + + handler := mb.NewTCPClientHandler("localhost:1502") + require.NoError(t, handler.Connect()) + defer handler.Close() + client := mb.NewClient(handler) + + fcs := []fieldDefinition{} + expectedFields := make(map[string]interface{}) + for i := 0; i < 10; i++ { + fc := fieldDefinition{ + Name: fmt.Sprintf("HoldingRegister-%v", i), + ByteOrder: "AB", + DataType: "INT16", + Scale: 1.0, + Address: []uint16{uint16(i)}, + } + fcs = append(fcs, fc) + + _, err := client.WriteSingleRegister(fc.Address[0], uint16(i)) + require.NoError(t, err) + + expectedFields[fc.Name] = int64(i) + } + for i := 20; i < 30; i++ { + fc := fieldDefinition{ + Name: fmt.Sprintf("HoldingRegister-%v", i), + ByteOrder: "AB", + DataType: "INT16", + Scale: 1.0, + Address: []uint16{uint16(i)}, + } + fcs = append(fcs, fc) + + _, err := client.WriteSingleRegister(fc.Address[0], uint16(i)) + require.NoError(t, err) + + expectedFields[fc.Name] = int64(i) + } + require.Len(t, expectedFields, len(fcs)) + + modbus := Modbus{ + Name: "TestHoldingRegister", + Controller: "tcp://localhost:1502", + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.HoldingRegisters = fcs + + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cHoldingRegisters, + "slave_id": strconv.Itoa(int(modbus.SlaveID)), + "name": modbus.Name, + }, + expectedFields, + time.Unix(0, 0), + ), + } + + var acc testutil.Accumulator + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} + +func TestReadMultipleHoldingRegisterLimit(t *testing.T) { + serv := mbserver.NewServer() + require.NoError(t, serv.ListenTCP("localhost:1502")) + defer serv.Close() + + handler := mb.NewTCPClientHandler("localhost:1502") + require.NoError(t, handler.Connect()) + defer handler.Close() + client := mb.NewClient(handler) + + fcs := []fieldDefinition{} + expectedFields := make(map[string]interface{}) + for i := 0; i <= 400; i++ { + fc := fieldDefinition{} + fc.Name = fmt.Sprintf("HoldingRegister-%v", i) + fc.ByteOrder = "AB" + fc.DataType = "INT16" + fc.Scale = 1.0 + fc.Address = []uint16{uint16(i)} + fcs = append(fcs, fc) + + _, err := client.WriteSingleRegister(fc.Address[0], uint16(i)) + require.NoError(t, err) + + expectedFields[fc.Name] = int64(i) + } + + modbus := Modbus{ + Name: "TestHoldingRegister", + Controller: "tcp://localhost:1502", + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.HoldingRegisters = fcs + + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cHoldingRegisters, + "slave_id": strconv.Itoa(int(modbus.SlaveID)), + "name": modbus.Name, + }, + expectedFields, + time.Unix(0, 0), + ), + } + + var acc testutil.Accumulator + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} + func TestRetrySuccessful(t *testing.T) { retries := 0 maxretries := 2 value := 1 serv := mbserver.NewServer() - err := serv.ListenTCP("localhost:1502") - assert.NoError(t, err) + require.NoError(t, serv.ListenTCP("localhost:1502")) defer serv.Close() // Make read on coil-registers fail for some trials by making the device @@ -617,44 +974,52 @@ func TestRetrySuccessful(t *testing.T) { if retries >= maxretries { except = &mbserver.Success } - retries += 1 + retries++ return data, except }) - t.Run("retry_success", func(t *testing.T) { - modbus := Modbus{ - Name: "TestRetry", - Controller: "tcp://localhost:1502", - SlaveID: 1, - Retries: maxretries, - Coils: []fieldContainer{ - { - Name: "retry_success", - Address: []uint16{0}, - }, + modbus := Modbus{ + Name: "TestRetry", + Controller: "tcp://localhost:1502", + Retries: maxretries, + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.Coils = []fieldDefinition{ + { + Name: "retry_success", + Address: []uint16{0}, + }, + } + + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cCoils, + "slave_id": strconv.Itoa(int(modbus.SlaveID)), + "name": modbus.Name, }, - } + map[string]interface{}{"retry_success": uint16(value)}, + time.Unix(0, 0), + ), + } - err = modbus.Init() - assert.NoError(t, err) - var acc testutil.Accumulator - err = modbus.Gather(&acc) - assert.NoError(t, err) - assert.NotEmpty(t, modbus.registers) + var acc testutil.Accumulator + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) - for _, coil := range modbus.registers { - assert.Equal(t, uint16(value), coil.Fields[0].value) - } - }) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) } -func TestRetryFail(t *testing.T) { +func TestRetryFailExhausted(t *testing.T) { maxretries := 2 serv := mbserver.NewServer() - err := serv.ListenTCP("localhost:1502") - assert.NoError(t, err) + require.NoError(t, serv.ListenTCP("localhost:1502")) defer serv.Close() // Make the read on coils fail with busy @@ -667,32 +1032,41 @@ func TestRetryFail(t *testing.T) { return data, &mbserver.SlaveDeviceBusy }) - t.Run("retry_fail", func(t *testing.T) { - modbus := Modbus{ - Name: "TestRetryFail", - Controller: "tcp://localhost:1502", - SlaveID: 1, - Retries: maxretries, - Coils: []fieldContainer{ - { - Name: "retry_fail", - Address: []uint16{0}, - }, - }, - } + modbus := Modbus{ + Name: "TestRetryFailExhausted", + Controller: "tcp://localhost:1502", + Retries: maxretries, + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.Coils = []fieldDefinition{ + { + Name: "retry_fail", + Address: []uint16{0}, + }, + } - err = modbus.Init() - assert.NoError(t, err) - var acc testutil.Accumulator - err = modbus.Gather(&acc) - assert.Error(t, err) - }) + var acc testutil.Accumulator + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + + err := modbus.Gather(&acc) + require.Error(t, err) + require.Equal(t, "modbus: exception '6' (server device busy), function '129'", err.Error()) +} + +func TestRetryFailIllegal(t *testing.T) { + maxretries := 2 + + serv := mbserver.NewServer() + require.NoError(t, serv.ListenTCP("localhost:1502")) + defer serv.Close() // Make the read on coils fail with illegal function preventing retry counter := 0 serv.RegisterFunctionHandler(1, func(s *mbserver.Server, frame mbserver.Framer) ([]byte, *mbserver.Exception) { - counter += 1 + counter++ data := make([]byte, 2) data[0] = byte(1) data[1] = byte(0) @@ -700,25 +1074,853 @@ func TestRetryFail(t *testing.T) { return data, &mbserver.IllegalFunction }) - t.Run("retry_fail", func(t *testing.T) { - modbus := Modbus{ - Name: "TestRetryFail", - Controller: "tcp://localhost:1502", - SlaveID: 1, - Retries: maxretries, - Coils: []fieldContainer{ + modbus := Modbus{ + Name: "TestRetryFailExhausted", + Controller: "tcp://localhost:1502", + Retries: maxretries, + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.Coils = []fieldDefinition{ + { + Name: "retry_fail", + Address: []uint16{0}, + }, + } + + var acc testutil.Accumulator + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + + err := modbus.Gather(&acc) + require.Error(t, err) + require.Equal(t, "modbus: exception '1' (illegal function), function '129'", err.Error()) + require.Equal(t, counter, 1) +} + +func TestConfigurationRegister(t *testing.T) { + modbus := Modbus{ + Name: "TestRetryFailExhausted", + Controller: "tcp://localhost:1502", + ConfigurationType: "register", + Log: testutil.Logger{}, + } + modbus.SlaveID = 1 + modbus.Coils = []fieldDefinition{ + { + Name: "coil", + Address: []uint16{0}, + }, + } + modbus.DiscreteInputs = []fieldDefinition{ + { + Name: "discrete", + Address: []uint16{0}, + }, + } + modbus.HoldingRegisters = []fieldDefinition{ + { + Name: "holding", + Address: []uint16{0}, + DataType: "INT16", + ByteOrder: "AB", + Scale: 1.0, + }, + } + modbus.InputRegisters = []fieldDefinition{ + { + Name: "input", + Address: []uint16{0}, + DataType: "INT16", + ByteOrder: "AB", + Scale: 1.0, + }, + } + + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NotNil(t, modbus.requests[1]) + require.Len(t, modbus.requests[1].coil, len(modbus.Coils)) + require.Len(t, modbus.requests[1].discrete, len(modbus.DiscreteInputs)) + require.Len(t, modbus.requests[1].holding, len(modbus.HoldingRegisters)) + require.Len(t, modbus.requests[1].input, len(modbus.InputRegisters)) +} + +func TestConfigurationPerRequest(t *testing.T) { + modbus := Modbus{ + Name: "Test", + Controller: "tcp://localhost:1502", + ConfigurationType: "request", + Log: testutil.Logger{}, + } + modbus.Requests = []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "coil", + Fields: []requestFieldDefinition{ { - Name: "retry_fail", - Address: []uint16{0}, + Name: "coil-0", + Address: uint16(0), + }, + { + Name: "coil-1", + Address: uint16(1), + Omit: true, + }, + { + Name: "coil-2", + Address: uint16(2), + InputType: "INT64", + Scale: 1.2, + OutputType: "FLOAT64", + Measurement: "modbus", }, }, - } + }, + { + SlaveID: 1, + RegisterType: "coil", + Fields: []requestFieldDefinition{ + { + Name: "coil-3", + Address: uint16(6), + }, + { + Name: "coil-4", + Address: uint16(7), + Omit: true, + }, + { + Name: "coil-5", + Address: uint16(8), + InputType: "INT64", + Scale: 1.2, + OutputType: "FLOAT64", + Measurement: "modbus", + }, + }, + }, + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "discrete", + Fields: []requestFieldDefinition{ + { + Name: "discrete-0", + Address: uint16(0), + }, + { + Name: "discrete-1", + Address: uint16(1), + Omit: true, + }, + { + Name: "discrete-2", + Address: uint16(2), + InputType: "INT64", + Scale: 1.2, + OutputType: "FLOAT64", + Measurement: "modbus", + }, + }, + }, + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "holding", + Fields: []requestFieldDefinition{ + { + Name: "holding-0", + Address: uint16(0), + InputType: "INT16", + }, + { + Name: "holding-1", + Address: uint16(1), + InputType: "UINT16", + Omit: true, + }, + { + Name: "holding-2", + Address: uint16(2), + InputType: "INT64", + Scale: 1.2, + OutputType: "FLOAT64", + Measurement: "modbus", + }, + }, + }, + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "input", + Fields: []requestFieldDefinition{ + { + Name: "input-0", + Address: uint16(0), + InputType: "INT16", + }, + { + Name: "input-1", + Address: uint16(1), + InputType: "UINT16", + Omit: true, + }, + { + Name: "input-2", + Address: uint16(2), + InputType: "INT64", + Scale: 1.2, + OutputType: "FLOAT64", + Measurement: "modbus", + }, + }, + }, + } + + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NotNil(t, modbus.requests[1]) + require.Len(t, modbus.requests[1].coil, 2) + require.Len(t, modbus.requests[1].discrete, 1) + require.Len(t, modbus.requests[1].holding, 1) + require.Len(t, modbus.requests[1].input, 1) +} + +func TestConfigurationPerRequestWithTags(t *testing.T) { + modbus := Modbus{ + Name: "Test", + Controller: "tcp://localhost:1502", + ConfigurationType: "request", + Log: testutil.Logger{}, + } + modbus.Requests = []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "coil", + Fields: []requestFieldDefinition{ + { + Name: "coil-0", + Address: uint16(0), + }, + { + Name: "coil-1", + Address: uint16(1), + Omit: true, + }, + { + Name: "coil-2", + Address: uint16(2), + InputType: "INT64", + Scale: 1.2, + OutputType: "FLOAT64", + Measurement: "modbus", + }, + }, + Tags: map[string]string{ + "first": "a", + "second": "bb", + "third": "ccc", + }, + }, + { + SlaveID: 1, + RegisterType: "coil", + Fields: []requestFieldDefinition{ + { + Name: "coil-3", + Address: uint16(6), + }, + { + Name: "coil-4", + Address: uint16(7), + Omit: true, + }, + { + Name: "coil-5", + Address: uint16(8), + InputType: "INT64", + Scale: 1.2, + OutputType: "FLOAT64", + Measurement: "modbus", + }, + }, + Tags: map[string]string{ + "first": "a", + "second": "bb", + "third": "ccc", + }, + }, + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "discrete", + Fields: []requestFieldDefinition{ + { + Name: "discrete-0", + Address: uint16(0), + }, + { + Name: "discrete-1", + Address: uint16(1), + Omit: true, + }, + { + Name: "discrete-2", + Address: uint16(2), + InputType: "INT64", + Scale: 1.2, + OutputType: "FLOAT64", + Measurement: "modbus", + }, + }, + Tags: map[string]string{ + "first": "a", + "second": "bb", + "third": "ccc", + }, + }, + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "holding", + Fields: []requestFieldDefinition{ + { + Name: "holding-0", + Address: uint16(0), + InputType: "INT16", + }, + { + Name: "holding-1", + Address: uint16(1), + InputType: "UINT16", + Omit: true, + }, + { + Name: "holding-2", + Address: uint16(2), + InputType: "INT64", + Scale: 1.2, + OutputType: "FLOAT64", + Measurement: "modbus", + }, + }, + Tags: map[string]string{ + "first": "a", + "second": "bb", + "third": "ccc", + }, + }, + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "input", + Fields: []requestFieldDefinition{ + { + Name: "input-0", + Address: uint16(0), + InputType: "INT16", + }, + { + Name: "input-1", + Address: uint16(1), + InputType: "UINT16", + Omit: true, + }, + { + Name: "input-2", + Address: uint16(2), + InputType: "INT64", + Scale: 1.2, + OutputType: "FLOAT64", + Measurement: "modbus", + }, + }, + Tags: map[string]string{ + "first": "a", + "second": "bb", + "third": "ccc", + }, + }, + } + + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NotNil(t, modbus.requests[1]) + require.Len(t, modbus.requests[1].coil, 2) + require.Len(t, modbus.requests[1].discrete, 1) + require.Len(t, modbus.requests[1].holding, 1) + require.Len(t, modbus.requests[1].input, 1) + + expectedTags := map[string]string{ + "first": "a", + "second": "bb", + "third": "ccc", + } + require.Equal(t, expectedTags, modbus.requests[1].coil[0].tags) + require.Equal(t, expectedTags, modbus.requests[1].coil[1].tags) + require.Equal(t, expectedTags, modbus.requests[1].discrete[0].tags) + require.Equal(t, expectedTags, modbus.requests[1].holding[0].tags) + require.Equal(t, expectedTags, modbus.requests[1].input[0].tags) +} + +func TestConfigurationPerRequestFail(t *testing.T) { + tests := []struct { + name string + requests []requestDefinition + errormsg string + }{ + { + name: "empty field name (coil)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "coil", + Fields: []requestFieldDefinition{ + { + Address: uint16(15), + }, + }, + }, + }, + errormsg: "configuraton invalid: empty field name in request for slave 1", + }, + { + name: "invalid byte-order (coil)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "AB", + RegisterType: "coil", + Fields: []requestFieldDefinition{}, + }, + }, + errormsg: "configuraton invalid: unknown byte-order \"AB\"", + }, + { + name: "duplicate fields (coil)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "coil", + Fields: []requestFieldDefinition{ + { + Name: "coil-0", + Address: uint16(0), + }, + { + Name: "coil-0", + Address: uint16(1), + }, + }, + }, + }, + errormsg: "configuraton invalid: field \"coil-0\" duplicated in measurement \"modbus\" (slave 1/\"coil\")", + }, + { + name: "duplicate fields multiple requests (coil)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "coil", + Fields: []requestFieldDefinition{ + { + Name: "coil-0", + Address: uint16(0), + Measurement: "foo", + }, + }, + }, + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "coil", + Fields: []requestFieldDefinition{ + { + Name: "coil-0", + Address: uint16(0), + Measurement: "foo", + }, + }, + }, + }, + errormsg: "configuraton invalid: field \"coil-0\" duplicated in measurement \"foo\" (slave 1/\"coil\")", + }, + { + name: "invalid byte-order (discrete)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "AB", + RegisterType: "discrete", + Fields: []requestFieldDefinition{}, + }, + }, + errormsg: "configuraton invalid: unknown byte-order \"AB\"", + }, + { + name: "duplicate fields (discrete)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "discrete", + Fields: []requestFieldDefinition{ + { + Name: "discrete-0", + Address: uint16(0), + }, + { + Name: "discrete-0", + Address: uint16(1), + }, + }, + }, + }, + errormsg: "configuraton invalid: field \"discrete-0\" duplicated in measurement \"modbus\" (slave 1/\"discrete\")", + }, + { + name: "duplicate fields multiple requests (discrete)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "discrete", + Fields: []requestFieldDefinition{ + { + Name: "discrete-0", + Address: uint16(0), + Measurement: "foo", + }, + }, + }, + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "discrete", + Fields: []requestFieldDefinition{ + { + Name: "discrete-0", + Address: uint16(0), + Measurement: "foo", + }, + }, + }, + }, + errormsg: "configuraton invalid: field \"discrete-0\" duplicated in measurement \"foo\" (slave 1/\"discrete\")", + }, + { + name: "invalid byte-order (holding)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "AB", + RegisterType: "holding", + Fields: []requestFieldDefinition{}, + }, + }, + errormsg: "configuraton invalid: unknown byte-order \"AB\"", + }, + { + name: "invalid field name (holding)", + requests: []requestDefinition{ + { + SlaveID: 1, + RegisterType: "holding", + Fields: []requestFieldDefinition{ + { + Address: uint16(0), + }, + }, + }, + }, + errormsg: "configuraton invalid: empty field name in request for slave 1", + }, + { + name: "invalid field input type (holding)", + requests: []requestDefinition{ + { + SlaveID: 1, + RegisterType: "holding", + Fields: []requestFieldDefinition{ + { + Name: "holding-0", + Address: uint16(0), + }, + }, + }, + }, + errormsg: "cannot process configuraton: initializing field \"holding-0\" failed: invalid input datatype \"\" for determining field length", + }, + { + name: "invalid field output type (holding)", + requests: []requestDefinition{ + { + SlaveID: 1, + RegisterType: "holding", + Fields: []requestFieldDefinition{ + { + Name: "holding-0", + Address: uint16(0), + InputType: "UINT16", + OutputType: "UINT8", + }, + }, + }, + }, + errormsg: "cannot process configuraton: initializing field \"holding-0\" failed: unknown output type \"UINT8\"", + }, + { + name: "duplicate fields (holding)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "holding", + Fields: []requestFieldDefinition{ + { + Name: "holding-0", + Address: uint16(0), + }, + { + Name: "holding-0", + Address: uint16(1), + }, + }, + }, + }, + errormsg: "configuraton invalid: field \"holding-0\" duplicated in measurement \"modbus\" (slave 1/\"holding\")", + }, + { + name: "duplicate fields multiple requests (holding)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "holding", + Fields: []requestFieldDefinition{ + { + Name: "holding-0", + Address: uint16(0), + Measurement: "foo", + }, + }, + }, + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "holding", + Fields: []requestFieldDefinition{ + { + Name: "holding-0", + Address: uint16(0), + Measurement: "foo", + }, + }, + }, + }, + errormsg: "configuraton invalid: field \"holding-0\" duplicated in measurement \"foo\" (slave 1/\"holding\")", + }, + { + name: "invalid byte-order (input)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "AB", + RegisterType: "input", + Fields: []requestFieldDefinition{}, + }, + }, + errormsg: "configuraton invalid: unknown byte-order \"AB\"", + }, + { + name: "invalid field name (input)", + requests: []requestDefinition{ + { + SlaveID: 1, + RegisterType: "input", + Fields: []requestFieldDefinition{ + { + Address: uint16(0), + }, + }, + }, + }, + errormsg: "configuraton invalid: empty field name in request for slave 1", + }, + { + name: "invalid field input type (input)", + requests: []requestDefinition{ + { + SlaveID: 1, + RegisterType: "input", + Fields: []requestFieldDefinition{ + { + Name: "input-0", + Address: uint16(0), + }, + }, + }, + }, + errormsg: "cannot process configuraton: initializing field \"input-0\" failed: invalid input datatype \"\" for determining field length", + }, + { + name: "invalid field output type (input)", + requests: []requestDefinition{ + { + SlaveID: 1, + RegisterType: "input", + Fields: []requestFieldDefinition{ + { + Name: "input-0", + Address: uint16(0), + InputType: "UINT16", + OutputType: "UINT8", + }, + }, + }, + }, + errormsg: "cannot process configuraton: initializing field \"input-0\" failed: unknown output type \"UINT8\"", + }, + { + name: "duplicate fields (input)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "input", + Fields: []requestFieldDefinition{ + { + Name: "input-0", + Address: uint16(0), + }, + { + Name: "input-0", + Address: uint16(1), + }, + }, + }, + }, + errormsg: "configuraton invalid: field \"input-0\" duplicated in measurement \"modbus\" (slave 1/\"input\")", + }, + { + name: "duplicate fields multiple requests (input)", + requests: []requestDefinition{ + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "input", + Fields: []requestFieldDefinition{ + { + Name: "input-0", + Address: uint16(0), + Measurement: "foo", + }, + }, + }, + { + SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "input", + Fields: []requestFieldDefinition{ + { + Name: "input-0", + Address: uint16(0), + Measurement: "foo", + }, + }, + }, + }, + errormsg: "configuraton invalid: field \"input-0\" duplicated in measurement \"foo\" (slave 1/\"input\")", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + plugin := Modbus{ + Name: "Test", + Controller: "tcp://localhost:1502", + ConfigurationType: "request", + Log: testutil.Logger{}, + } + plugin.Requests = tt.requests + + err := plugin.Init() + require.Error(t, err) + require.Equal(t, tt.errormsg, err.Error()) + require.Empty(t, plugin.requests) + }) + } +} + +func TestRequestsStartingWithOmits(t *testing.T) { + modbus := Modbus{ + Name: "Test", + Controller: "tcp://localhost:1502", + ConfigurationType: "request", + Log: testutil.Logger{}, + } + modbus.Requests = []requestDefinition{ + {SlaveID: 1, + ByteOrder: "ABCD", + RegisterType: "holding", + Fields: []requestFieldDefinition{ + { + Name: "holding-0", + Address: uint16(0), + InputType: "INT16", + Omit: true, + }, + { + Name: "holding-1", + Address: uint16(1), + InputType: "UINT16", + Omit: true, + }, + { + Name: "holding-2", + Address: uint16(2), + InputType: "INT16", + }, + }, + }, + } + require.NoError(t, modbus.Init()) + require.NotEmpty(t, modbus.requests) + require.NotNil(t, modbus.requests[1]) + require.Equal(t, uint16(0), modbus.requests[1].holding[0].address) + + serv := mbserver.NewServer() + require.NoError(t, serv.ListenTCP("localhost:1502")) + defer serv.Close() + + handler := mb.NewTCPClientHandler("localhost:1502") + require.NoError(t, handler.Connect()) + defer handler.Close() + client := mb.NewClient(handler) + _, err := client.WriteMultipleRegisters(uint16(0), 3, []byte{0x00, 0x01, 0x00, 0x02, 0x00, 0x03}) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "modbus", + map[string]string{ + "type": cHoldingRegisters, + "slave_id": strconv.Itoa(int(modbus.Requests[0].SlaveID)), + "name": modbus.Name, + }, + map[string]interface{}{"holding-2": int16(3)}, + time.Unix(0, 0), + ), + } - err = modbus.Init() - assert.NoError(t, err) - var acc testutil.Accumulator - err = modbus.Gather(&acc) - assert.Error(t, err) - assert.Equal(t, counter, 1) - }) + var acc testutil.Accumulator + require.NoError(t, modbus.Gather(&acc)) + acc.Wait(len(expected)) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) } diff --git a/plugins/inputs/modbus/request.go b/plugins/inputs/modbus/request.go new file mode 100644 index 0000000000000..b3c5b62dc76ed --- /dev/null +++ b/plugins/inputs/modbus/request.go @@ -0,0 +1,69 @@ +package modbus + +import "sort" + +type request struct { + address uint16 + length uint16 + fields []field + tags map[string]string +} + +func newRequest(f field, tags map[string]string) request { + r := request{ + address: f.address, + length: f.length, + fields: []field{}, + tags: map[string]string{}, + } + if !f.omit { + r.fields = append(r.fields, f) + } + // Copy the tags + for k, v := range tags { + r.tags[k] = v + } + return r +} + +func groupFieldsToRequests(fields []field, tags map[string]string, maxBatchSize uint16) []request { + if len(fields) == 0 { + return nil + } + + // Sort the fields by address (ascending) and length + sort.Slice(fields, func(i, j int) bool { + addrI := fields[i].address + addrJ := fields[j].address + return addrI < addrJ || (addrI == addrJ && fields[i].length > fields[j].length) + }) + + // Construct the consecutive register chunks for the addresses and construct Modbus requests. + // For field addresses like [1, 2, 3, 5, 6, 10, 11, 12, 14] we should construct the following + // requests (1, 3) , (5, 2) , (10, 3), (14 , 1). Furthermore, we should respect field boundaries + // and the given maximum chunk sizes. + var requests []request + + current := newRequest(fields[0], tags) + for _, f := range fields[1:] { + // Check if we need to interrupt the current chunk and require a new one + needInterrupt := f.address != current.address+current.length // not consecutive + needInterrupt = needInterrupt || f.length+current.length > maxBatchSize // too large + + if !needInterrupt { + // Still safe to add the field to the current request + current.length += f.length + if !f.omit { + // Omit adding the field but use it for constructing the request. + current.fields = append(current.fields, f) + } + continue + } + + // Finish the current request, add it to the list and construct a new one + requests = append(requests, current) + current = newRequest(f, tags) + } + requests = append(requests, current) + return requests +} diff --git a/plugins/inputs/modbus/sample.conf b/plugins/inputs/modbus/sample.conf new file mode 100644 index 0000000000000..40cdf1eb043c6 --- /dev/null +++ b/plugins/inputs/modbus/sample.conf @@ -0,0 +1,196 @@ +# Retrieve data from MODBUS slave devices +[[inputs.modbus]] + ## Connection Configuration + ## + ## The plugin supports connections to PLCs via MODBUS/TCP, RTU over TCP, ASCII over TCP or + ## via serial line communication in binary (RTU) or readable (ASCII) encoding + ## + ## Device name + name = "Device" + + ## Slave ID - addresses a MODBUS device on the bus + ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] + slave_id = 1 + + ## Timeout for each request + timeout = "1s" + + ## Maximum number of retries and the time to wait between retries + ## when a slave-device is busy. + # busy_retries = 0 + # busy_retries_wait = "100ms" + + # TCP - connect via Modbus/TCP + controller = "tcp://localhost:502" + + ## Serial (RS485; RS232) + # controller = "file:///dev/ttyUSB0" + # baud_rate = 9600 + # data_bits = 8 + # parity = "N" + # stop_bits = 1 + + ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP" + ## default behaviour is "TCP" if the controller is TCP + ## For Serial you can choose between "RTU" and "ASCII" + # transmission_mode = "RTU" + + ## Trace the connection to the modbus device as debug messages + ## Note: You have to enable telegraf's debug mode to see those messages! + # debug_connection = false + + ## Define the configuration schema + ## |---register -- define fields per register type in the original style (only supports one slave ID) + ## |---request -- define fields on a requests base + configuration_type = "register" + + ## --- "register" configuration style --- + + ## Measurements + ## + + ## Digital Variables, Discrete Inputs and Coils + ## measurement - the (optional) measurement name, defaults to "modbus" + ## name - the variable name + ## address - variable address + + discrete_inputs = [ + { name = "start", address = [0]}, + { name = "stop", address = [1]}, + { name = "reset", address = [2]}, + { name = "emergency_stop", address = [3]}, + ] + coils = [ + { name = "motor1_run", address = [0]}, + { name = "motor1_jog", address = [1]}, + { name = "motor1_stop", address = [2]}, + ] + + ## Analog Variables, Input Registers and Holding Registers + ## measurement - the (optional) measurement name, defaults to "modbus" + ## name - the variable name + ## byte_order - the ordering of bytes + ## |---AB, ABCD - Big Endian + ## |---BA, DCBA - Little Endian + ## |---BADC - Mid-Big Endian + ## |---CDAB - Mid-Little Endian + ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, + ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) + ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) + ## scale - the final numeric variable representation + ## address - variable address + + holding_registers = [ + { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]}, + { name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]}, + { name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]}, + { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]}, + { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]}, + { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]}, + ] + input_registers = [ + { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, + { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, + { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, + ] + + ## --- "request" configuration style --- + + ## Per request definition + ## + + ## Define a request sent to the device + ## Multiple of those requests can be defined. Data will be collated into metrics at the end of data collection. + [[inputs.modbus.request]] + ## ID of the modbus slave device to query. + ## If you need to query multiple slave-devices, create several "request" definitions. + slave_id = 1 + + ## Byte order of the data. + ## |---ABCD -- Big Endian (Motorola) + ## |---DCBA -- Little Endian (Intel) + ## |---BADC -- Big Endian with byte swap + ## |---CDAB -- Little Endian with byte swap + byte_order = "ABCD" + + ## Type of the register for the request + ## Can be "coil", "discrete", "holding" or "input" + register = "coil" + + ## Name of the measurement. + ## Can be overriden by the individual field definitions. Defaults to "modbus" + # measurement = "modbus" + + ## Field definitions + ## Analog Variables, Input Registers and Holding Registers + ## address - address of the register to query. For coil and discrete inputs this is the bit address. + ## name *1 - field name + ## type *1,2 - type of the modbus field, can be INT16, UINT16, INT32, UINT32, INT64, UINT64 and + ## FLOAT32, FLOAT64 (IEEE 754 binary representation) + ## scale *1,2 - (optional) factor to scale the variable with + ## output *1,2 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. Defaults to FLOAT64 if + ## "scale" is provided and to the input "type" class otherwise (i.e. INT* -> INT64, etc). + ## measurement *1 - (optional) measurement name, defaults to the setting of the request + ## omit - (optional) omit this field. Useful to leave out single values when querying many registers + ## with a single request. Defaults to "false". + ## + ## *1: Those fields are ignored if field is omitted ("omit"=true) + ## + ## *2: Thise fields are ignored for both "coil" and "discrete"-input type of registers. For those register types + ## the fields are output as zero or one in UINT64 format by default. + + ## Coil / discrete input example + fields = [ + { address=0, name="motor1_run"}, + { address=1, name="jog", measurement="motor"}, + { address=2, name="motor1_stop", omit=true}, + { address=3, name="motor1_overheating"}, + ] + + [[inputs.modbus.request.tags]] + machine = "impresser" + location = "main building" + + [[inputs.modbus.request]] + ## Holding example + ## All of those examples will result in FLOAT64 field outputs + slave_id = 1 + byte_order = "DCBA" + register = "holding" + fields = [ + { address=0, name="voltage", type="INT16", scale=0.1 }, + { address=1, name="current", type="INT32", scale=0.001 }, + { address=3, name="power", type="UINT32", omit=true }, + { address=5, name="energy", type="FLOAT32", scale=0.001, measurement="W" }, + { address=7, name="frequency", type="UINT32", scale=0.1 }, + { address=8, name="power_factor", type="INT64", scale=0.01 }, + ] + + [[inputs.modbus.request.tags]] + machine = "impresser" + location = "main building" + + [[inputs.modbus.request]] + ## Input example with type conversions + slave_id = 1 + byte_order = "ABCD" + register = "input" + fields = [ + { address=0, name="rpm", type="INT16" }, # will result in INT64 field + { address=1, name="temperature", type="INT16", scale=0.1 }, # will result in FLOAT64 field + { address=2, name="force", type="INT32", output="FLOAT64" }, # will result in FLOAT64 field + { address=4, name="hours", type="UINT32" }, # will result in UIN64 field + ] + + [[inputs.modbus.request.tags]] + machine = "impresser" + location = "main building" + + ## Enable workarounds required by some devices to work correctly + # [inputs.modbus.workarounds] + ## Pause between read requests sent to the device. This might be necessary for (slow) serial devices. + # pause_between_requests = "0ms" + ## Close the connection after every gather cycle. Usually the plugin closes the connection after a certain + ## idle-timeout, however, if you query a device with limited simultaneous connectivity (e.g. serial devices) + ## from multiple instances you might want to only stay connected during gather and disconnect afterwards. + # close_connection_after_gather = false diff --git a/plugins/inputs/modbus/sample_general_begin.conf b/plugins/inputs/modbus/sample_general_begin.conf new file mode 100644 index 0000000000000..b3c5e677e22b2 --- /dev/null +++ b/plugins/inputs/modbus/sample_general_begin.conf @@ -0,0 +1,46 @@ +# Retrieve data from MODBUS slave devices +[[inputs.modbus]] + ## Connection Configuration + ## + ## The plugin supports connections to PLCs via MODBUS/TCP, RTU over TCP, ASCII over TCP or + ## via serial line communication in binary (RTU) or readable (ASCII) encoding + ## + ## Device name + name = "Device" + + ## Slave ID - addresses a MODBUS device on the bus + ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] + slave_id = 1 + + ## Timeout for each request + timeout = "1s" + + ## Maximum number of retries and the time to wait between retries + ## when a slave-device is busy. + # busy_retries = 0 + # busy_retries_wait = "100ms" + + # TCP - connect via Modbus/TCP + controller = "tcp://localhost:502" + + ## Serial (RS485; RS232) + # controller = "file:///dev/ttyUSB0" + # baud_rate = 9600 + # data_bits = 8 + # parity = "N" + # stop_bits = 1 + + ## For Modbus over TCP you can choose between "TCP", "RTUoverTCP" and "ASCIIoverTCP" + ## default behaviour is "TCP" if the controller is TCP + ## For Serial you can choose between "RTU" and "ASCII" + # transmission_mode = "RTU" + + ## Trace the connection to the modbus device as debug messages + ## Note: You have to enable telegraf's debug mode to see those messages! + # debug_connection = false + + ## Define the configuration schema + ## |---register -- define fields per register type in the original style (only supports one slave ID) + ## |---request -- define fields on a requests base + configuration_type = "register" + diff --git a/plugins/inputs/modbus/sample_general_end.conf b/plugins/inputs/modbus/sample_general_end.conf new file mode 100644 index 0000000000000..f6245caed9473 --- /dev/null +++ b/plugins/inputs/modbus/sample_general_end.conf @@ -0,0 +1,8 @@ + ## Enable workarounds required by some devices to work correctly + # [inputs.modbus.workarounds] + ## Pause between read requests sent to the device. This might be necessary for (slow) serial devices. + # pause_between_requests = "0ms" + ## Close the connection after every gather cycle. Usually the plugin closes the connection after a certain + ## idle-timeout, however, if you query a device with limited simultaneous connectivity (e.g. serial devices) + ## from multiple instances you might want to only stay connected during gather and disconnect afterwards. + # close_connection_after_gather = false diff --git a/plugins/inputs/modbus/sample_register.conf b/plugins/inputs/modbus/sample_register.conf new file mode 100644 index 0000000000000..ebfaf636d1e0a --- /dev/null +++ b/plugins/inputs/modbus/sample_register.conf @@ -0,0 +1,50 @@ + ## --- "register" configuration style --- + + ## Measurements + ## + + ## Digital Variables, Discrete Inputs and Coils + ## measurement - the (optional) measurement name, defaults to "modbus" + ## name - the variable name + ## address - variable address + + discrete_inputs = [ + { name = "start", address = [0]}, + { name = "stop", address = [1]}, + { name = "reset", address = [2]}, + { name = "emergency_stop", address = [3]}, + ] + coils = [ + { name = "motor1_run", address = [0]}, + { name = "motor1_jog", address = [1]}, + { name = "motor1_stop", address = [2]}, + ] + + ## Analog Variables, Input Registers and Holding Registers + ## measurement - the (optional) measurement name, defaults to "modbus" + ## name - the variable name + ## byte_order - the ordering of bytes + ## |---AB, ABCD - Big Endian + ## |---BA, DCBA - Little Endian + ## |---BADC - Mid-Big Endian + ## |---CDAB - Mid-Little Endian + ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, + ## FLOAT32-IEEE, FLOAT64-IEEE (the IEEE 754 binary representation) + ## FLOAT32, FIXED, UFIXED (fixed-point representation on input) + ## scale - the final numeric variable representation + ## address - variable address + + holding_registers = [ + { name = "power_factor", byte_order = "AB", data_type = "FIXED", scale=0.01, address = [8]}, + { name = "voltage", byte_order = "AB", data_type = "FIXED", scale=0.1, address = [0]}, + { name = "energy", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [5,6]}, + { name = "current", byte_order = "ABCD", data_type = "FIXED", scale=0.001, address = [1,2]}, + { name = "frequency", byte_order = "AB", data_type = "UFIXED", scale=0.1, address = [7]}, + { name = "power", byte_order = "ABCD", data_type = "UFIXED", scale=0.1, address = [3,4]}, + ] + input_registers = [ + { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, + { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, + { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, + ] + diff --git a/plugins/inputs/modbus/sample_request.conf b/plugins/inputs/modbus/sample_request.conf new file mode 100644 index 0000000000000..717b04e2de22e --- /dev/null +++ b/plugins/inputs/modbus/sample_request.conf @@ -0,0 +1,92 @@ + ## --- "request" configuration style --- + + ## Per request definition + ## + + ## Define a request sent to the device + ## Multiple of those requests can be defined. Data will be collated into metrics at the end of data collection. + [[inputs.modbus.request]] + ## ID of the modbus slave device to query. + ## If you need to query multiple slave-devices, create several "request" definitions. + slave_id = 1 + + ## Byte order of the data. + ## |---ABCD -- Big Endian (Motorola) + ## |---DCBA -- Little Endian (Intel) + ## |---BADC -- Big Endian with byte swap + ## |---CDAB -- Little Endian with byte swap + byte_order = "ABCD" + + ## Type of the register for the request + ## Can be "coil", "discrete", "holding" or "input" + register = "coil" + + ## Name of the measurement. + ## Can be overriden by the individual field definitions. Defaults to "modbus" + # measurement = "modbus" + + ## Field definitions + ## Analog Variables, Input Registers and Holding Registers + ## address - address of the register to query. For coil and discrete inputs this is the bit address. + ## name *1 - field name + ## type *1,2 - type of the modbus field, can be INT16, UINT16, INT32, UINT32, INT64, UINT64 and + ## FLOAT32, FLOAT64 (IEEE 754 binary representation) + ## scale *1,2 - (optional) factor to scale the variable with + ## output *1,2 - (optional) type of resulting field, can be INT64, UINT64 or FLOAT64. Defaults to FLOAT64 if + ## "scale" is provided and to the input "type" class otherwise (i.e. INT* -> INT64, etc). + ## measurement *1 - (optional) measurement name, defaults to the setting of the request + ## omit - (optional) omit this field. Useful to leave out single values when querying many registers + ## with a single request. Defaults to "false". + ## + ## *1: Those fields are ignored if field is omitted ("omit"=true) + ## + ## *2: Thise fields are ignored for both "coil" and "discrete"-input type of registers. For those register types + ## the fields are output as zero or one in UINT64 format by default. + + ## Coil / discrete input example + fields = [ + { address=0, name="motor1_run"}, + { address=1, name="jog", measurement="motor"}, + { address=2, name="motor1_stop", omit=true}, + { address=3, name="motor1_overheating"}, + ] + + [[inputs.modbus.request.tags]] + machine = "impresser" + location = "main building" + + [[inputs.modbus.request]] + ## Holding example + ## All of those examples will result in FLOAT64 field outputs + slave_id = 1 + byte_order = "DCBA" + register = "holding" + fields = [ + { address=0, name="voltage", type="INT16", scale=0.1 }, + { address=1, name="current", type="INT32", scale=0.001 }, + { address=3, name="power", type="UINT32", omit=true }, + { address=5, name="energy", type="FLOAT32", scale=0.001, measurement="W" }, + { address=7, name="frequency", type="UINT32", scale=0.1 }, + { address=8, name="power_factor", type="INT64", scale=0.01 }, + ] + + [[inputs.modbus.request.tags]] + machine = "impresser" + location = "main building" + + [[inputs.modbus.request]] + ## Input example with type conversions + slave_id = 1 + byte_order = "ABCD" + register = "input" + fields = [ + { address=0, name="rpm", type="INT16" }, # will result in INT64 field + { address=1, name="temperature", type="INT16", scale=0.1 }, # will result in FLOAT64 field + { address=2, name="force", type="INT32", output="FLOAT64" }, # will result in FLOAT64 field + { address=4, name="hours", type="UINT32" }, # will result in UIN64 field + ] + + [[inputs.modbus.request.tags]] + machine = "impresser" + location = "main building" + diff --git a/plugins/inputs/modbus/type_conversions.go b/plugins/inputs/modbus/type_conversions.go new file mode 100644 index 0000000000000..556f7b423c13d --- /dev/null +++ b/plugins/inputs/modbus/type_conversions.go @@ -0,0 +1,54 @@ +package modbus + +import "fmt" + +func determineConverter(inType, byteOrder, outType string, scale float64) (fieldConverterFunc, error) { + if scale != 0.0 { + return determineConverterScale(inType, byteOrder, outType, scale) + } + return determineConverterNoScale(inType, byteOrder, outType) +} + +func determineConverterScale(inType, byteOrder, outType string, scale float64) (fieldConverterFunc, error) { + switch inType { + case "INT16": + return determineConverterI16Scale(outType, byteOrder, scale) + case "UINT16": + return determineConverterU16Scale(outType, byteOrder, scale) + case "INT32": + return determineConverterI32Scale(outType, byteOrder, scale) + case "UINT32": + return determineConverterU32Scale(outType, byteOrder, scale) + case "INT64": + return determineConverterI64Scale(outType, byteOrder, scale) + case "UINT64": + return determineConverterU64Scale(outType, byteOrder, scale) + case "FLOAT32": + return determineConverterF32Scale(outType, byteOrder, scale) + case "FLOAT64": + return determineConverterF64Scale(outType, byteOrder, scale) + } + return nil, fmt.Errorf("invalid input data-type: %s", inType) +} + +func determineConverterNoScale(inType, byteOrder, outType string) (fieldConverterFunc, error) { + switch inType { + case "INT16": + return determineConverterI16(outType, byteOrder) + case "UINT16": + return determineConverterU16(outType, byteOrder) + case "INT32": + return determineConverterI32(outType, byteOrder) + case "UINT32": + return determineConverterU32(outType, byteOrder) + case "INT64": + return determineConverterI64(outType, byteOrder) + case "UINT64": + return determineConverterU64(outType, byteOrder) + case "FLOAT32": + return determineConverterF32(outType, byteOrder) + case "FLOAT64": + return determineConverterF64(outType, byteOrder) + } + return nil, fmt.Errorf("invalid input data-type: %s", inType) +} diff --git a/plugins/inputs/modbus/type_conversions16.go b/plugins/inputs/modbus/type_conversions16.go new file mode 100644 index 0000000000000..0887291a649e8 --- /dev/null +++ b/plugins/inputs/modbus/type_conversions16.go @@ -0,0 +1,138 @@ +package modbus + +import ( + "encoding/binary" + "fmt" +) + +type convert16 func([]byte) uint16 + +func endianessConverter16(byteOrder string) (convert16, error) { + switch byteOrder { + case "ABCD", "CDAB": // Big endian (Motorola) + return binary.BigEndian.Uint16, nil + case "DCBA", "BADC": // Little endian (Intel) + return binary.LittleEndian.Uint16, nil + } + return nil, fmt.Errorf("invalid byte-order: %s", byteOrder) +} + +// I16 - no scale +func determineConverterI16(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter16(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + return int16(tohost(b)) + }, nil + case "INT64": + return func(b []byte) interface{} { + return int64(int16(tohost(b))) + }, nil + case "UINT64": + return func(b []byte) interface{} { + return uint64(int16(tohost(b))) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + return float64(int16(tohost(b))) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// U16 - no scale +func determineConverterU16(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter16(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + return tohost(b) + }, nil + case "INT64": + return func(b []byte) interface{} { + return int64(tohost(b)) + }, nil + case "UINT64": + return func(b []byte) interface{} { + return uint64(tohost(b)) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + return float64(tohost(b)) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// I16 - scale +func determineConverterI16Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter16(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + in := int16(tohost(b)) + return int16(float64(in) * scale) + }, nil + case "INT64": + return func(b []byte) interface{} { + in := int16(tohost(b)) + return int64(float64(in) * scale) + }, nil + case "UINT64": + return func(b []byte) interface{} { + in := int16(tohost(b)) + return uint64(float64(in) * scale) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + in := int16(tohost(b)) + return float64(in) * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// U16 - scale +func determineConverterU16Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter16(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + in := tohost(b) + return uint16(float64(in) * scale) + }, nil + case "INT64": + return func(b []byte) interface{} { + in := tohost(b) + return int64(float64(in) * scale) + }, nil + case "UINT64": + return func(b []byte) interface{} { + in := tohost(b) + return uint64(float64(in) * scale) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + in := tohost(b) + return float64(in) * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} diff --git a/plugins/inputs/modbus/type_conversions32.go b/plugins/inputs/modbus/type_conversions32.go new file mode 100644 index 0000000000000..1a0255ef3e8e0 --- /dev/null +++ b/plugins/inputs/modbus/type_conversions32.go @@ -0,0 +1,200 @@ +package modbus + +import ( + "encoding/binary" + "fmt" + "math" +) + +type convert32 func([]byte) uint32 + +func binaryMSWLEU32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(binary.LittleEndian.Uint16(b[0:]))<<16 | uint32(binary.LittleEndian.Uint16(b[2:])) +} + +func binaryLSWBEU32(b []byte) uint32 { + _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(binary.BigEndian.Uint16(b[2:]))<<16 | uint32(binary.BigEndian.Uint16(b[0:])) +} + +func endianessConverter32(byteOrder string) (convert32, error) { + switch byteOrder { + case "ABCD": // Big endian (Motorola) + return binary.BigEndian.Uint32, nil + case "BADC": // Big endian with bytes swapped + return binaryMSWLEU32, nil + case "CDAB": // Little endian with bytes swapped + return binaryLSWBEU32, nil + case "DCBA": // Little endian (Intel) + return binary.LittleEndian.Uint32, nil + } + return nil, fmt.Errorf("invalid byte-order: %s", byteOrder) +} + +// I32 - no scale +func determineConverterI32(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter32(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + return int32(tohost(b)) + }, nil + case "INT64": + return func(b []byte) interface{} { + return int64(int32(tohost(b))) + }, nil + case "UINT64": + return func(b []byte) interface{} { + return uint64(int32(tohost(b))) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + return float64(int32(tohost(b))) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// U32 - no scale +func determineConverterU32(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter32(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + return tohost(b) + }, nil + case "INT64": + return func(b []byte) interface{} { + return int64(tohost(b)) + }, nil + case "UINT64": + return func(b []byte) interface{} { + return uint64(tohost(b)) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + return float64(tohost(b)) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// F32 - no scale +func determineConverterF32(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter32(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + raw := tohost(b) + return math.Float32frombits(raw) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + raw := tohost(b) + in := math.Float32frombits(raw) + return float64(in) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// I32 - scale +func determineConverterI32Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter32(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + in := int32(tohost(b)) + return int32(float64(in) * scale) + }, nil + case "INT64": + return func(b []byte) interface{} { + in := int32(tohost(b)) + return int64(float64(in) * scale) + }, nil + case "UINT64": + return func(b []byte) interface{} { + in := int32(tohost(b)) + return uint64(float64(in) * scale) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + in := int32(tohost(b)) + return float64(in) * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// U32 - scale +func determineConverterU32Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter32(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + in := tohost(b) + return uint32(float64(in) * scale) + }, nil + case "INT64": + return func(b []byte) interface{} { + in := tohost(b) + return int64(float64(in) * scale) + }, nil + case "UINT64": + return func(b []byte) interface{} { + in := tohost(b) + return uint64(float64(in) * scale) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + in := tohost(b) + return float64(in) * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// F32 - scale +func determineConverterF32Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter32(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + raw := tohost(b) + in := math.Float32frombits(raw) + return float32(float64(in) * scale) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + raw := tohost(b) + in := math.Float32frombits(raw) + return float64(in) * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} diff --git a/plugins/inputs/modbus/type_conversions64.go b/plugins/inputs/modbus/type_conversions64.go new file mode 100644 index 0000000000000..f72dfdf3af66d --- /dev/null +++ b/plugins/inputs/modbus/type_conversions64.go @@ -0,0 +1,182 @@ +package modbus + +import ( + "encoding/binary" + "fmt" + "math" +) + +type convert64 func([]byte) uint64 + +func binaryMSWLEU64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(binary.LittleEndian.Uint16(b[0:]))<<48 | uint64(binary.LittleEndian.Uint16(b[2:]))<<32 | uint64(binary.LittleEndian.Uint16(b[4:]))<<16 | uint64(binary.LittleEndian.Uint16(b[6:])) +} + +func binaryLSWBEU64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(binary.BigEndian.Uint16(b[6:]))<<48 | uint64(binary.BigEndian.Uint16(b[4:]))<<32 | uint64(binary.BigEndian.Uint16(b[2:]))<<16 | uint64(binary.BigEndian.Uint16(b[0:])) +} + +func endianessConverter64(byteOrder string) (convert64, error) { + switch byteOrder { + case "ABCD": // Big endian (Motorola) + return binary.BigEndian.Uint64, nil + case "BADC": // Big endian with bytes swapped + return binaryMSWLEU64, nil + case "CDAB": // Little endian with bytes swapped + return binaryLSWBEU64, nil + case "DCBA": // Little endian (Intel) + return binary.LittleEndian.Uint64, nil + } + return nil, fmt.Errorf("invalid byte-order: %s", byteOrder) +} + +// I64 - no scale +func determineConverterI64(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter64(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native", "INT64": + return func(b []byte) interface{} { + return int64(tohost(b)) + }, nil + case "UINT64": + return func(b []byte) interface{} { + in := int64(tohost(b)) + return uint64(in) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + in := int64(tohost(b)) + return float64(in) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// U64 - no scale +func determineConverterU64(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter64(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "INT64": + return func(b []byte) interface{} { + return int64(tohost(b)) + }, nil + case "native", "UINT64": + return func(b []byte) interface{} { + return tohost(b) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + return float64(tohost(b)) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// F64 - no scale +func determineConverterF64(outType, byteOrder string) (fieldConverterFunc, error) { + tohost, err := endianessConverter64(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native", "FLOAT64": + return func(b []byte) interface{} { + raw := tohost(b) + return math.Float64frombits(raw) + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// I64 - scale +func determineConverterI64Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter64(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + in := int64(tohost(b)) + return int64(float64(in) * scale) + }, nil + case "INT64": + return func(b []byte) interface{} { + in := int64(tohost(b)) + return int64(float64(in) * scale) + }, nil + case "UINT64": + return func(b []byte) interface{} { + in := int64(tohost(b)) + return uint64(float64(in) * scale) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + in := int64(tohost(b)) + return float64(in) * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// U64 - scale +func determineConverterU64Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter64(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native": + return func(b []byte) interface{} { + in := tohost(b) + return uint64(float64(in) * scale) + }, nil + case "INT64": + return func(b []byte) interface{} { + in := tohost(b) + return int64(float64(in) * scale) + }, nil + case "UINT64": + return func(b []byte) interface{} { + in := tohost(b) + return uint64(float64(in) * scale) + }, nil + case "FLOAT64": + return func(b []byte) interface{} { + in := tohost(b) + return float64(in) * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} + +// F64 - scale +func determineConverterF64Scale(outType, byteOrder string, scale float64) (fieldConverterFunc, error) { + tohost, err := endianessConverter64(byteOrder) + if err != nil { + return nil, err + } + + switch outType { + case "native", "FLOAT64": + return func(b []byte) interface{} { + raw := tohost(b) + in := math.Float64frombits(raw) + return in * scale + }, nil + } + return nil, fmt.Errorf("invalid output data-type: %s", outType) +} diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index cce93dc07376a..545511c926cc7 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -1,15 +1,22 @@ # MongoDB Input Plugin -### Configuration: +All MongoDB server versions from 2.6 and higher are supported. -```toml +## Configuration + +```toml @sample.conf +# Read metrics from one or many MongoDB servers [[inputs.mongodb]] ## An array of URLs of the form: ## "mongodb://" [user ":" pass "@"] host [ ":" port] ## For example: ## mongodb://user:auth_key@10.10.3.30:27017, ## mongodb://10.10.3.33:18832, - servers = ["mongodb://127.0.0.1:27017"] + ## + ## If connecting to a cluster, users must include the "?connect=direct" in + ## the URL to ensure that the connection goes directly to the specified node + ## and not have all connections passed to the master node. + servers = ["mongodb://127.0.0.1:27017/?connect=direct"] ## When true, collect cluster status. ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which @@ -22,6 +29,10 @@ ## When true, collect per collection stats # gather_col_stats = false + ## When true, collect usage statistics for each collection + ## (insert, update, queries, remove, getmore, commands etc...). + # gather_top_stat = false + ## List of db where collections stats are collected ## If empty, all db are concerned # col_stats_dbs = ["local"] @@ -34,20 +45,22 @@ # insecure_skip_verify = false ``` -#### Permissions: +### Permissions If your MongoDB instance has access control enabled you will need to connect as a user with sufficient rights. With MongoDB 3.4 and higher, the `clusterMonitor` role can be used. In version 3.2 you may also need these additional permissions: -``` + +```shell > db.grantRolesToUser("user", [{role: "read", actions: "find", db: "local"}]) ``` If the user is missing required privileges you may see an error in the Telegraf logs similar to: -``` + +```shell Error in input [mongodb]: not authorized on admin to execute command { serverStatus: 1, recordStats: 0 } ``` @@ -55,7 +68,7 @@ Some permission related errors are logged at debug level, you can check these messages by setting `debug = true` in the agent section of the configuration or by running Telegraf with the `--debug` argument. -### Metrics: +## Metrics - mongodb - tags: @@ -186,6 +199,8 @@ by running Telegraf with the `--debug` argument. - uptime_ns (integer) - version (string) - vsize_megabytes (integer) + - wt_connection_files_currently_open (integer) + - wt_data_handles_currently_active (integer) - wtcache_app_threads_page_read_count (integer) - wtcache_app_threads_page_read_time (integer) - wtcache_app_threads_page_write_count (integer) @@ -225,7 +240,7 @@ by running Telegraf with the `--debug` argument. - ttl_passes_per_sec (integer, deprecated in 1.10; use `ttl_passes`)) - updates_per_sec (integer, deprecated in 1.10; use `updates`)) -+ mongodb_db_stats +- mongodb_db_stats - tags: - db_name - hostname @@ -240,6 +255,8 @@ by running Telegraf with the `--debug` argument. - ok (integer) - storage_size (integer) - type (string) + - fs_used_size (integer) + - fs_total_size (integer) - mongodb_col_stats - tags: @@ -264,12 +281,37 @@ by running Telegraf with the `--debug` argument. - created (integer) - refreshing (integer) -### Example Output: -``` -mongodb,hostname=127.0.0.1:27017 active_reads=3i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=87210i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=0i,assert_warning=0i,available_reads=125i,available_writes=128i,commands=218126i,commands_per_sec=1876i,connections_available=838853i,connections_current=7i,connections_total_created=8i,count_command_failed=0i,count_command_total=7i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=87190i,document_deleted=0i,document_inserted=0i,document_returned=7i,document_updated=43595i,find_and_modify_command_failed=0i,find_and_modify_command_total=43595i,find_command_failed=0i,find_command_total=348819i,flushes=1i,flushes_per_sec=0i,flushes_total_time_ns=5000000i,get_more_command_failed=0i,get_more_command_total=0i,getmores=7i,getmores_per_sec=1i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=44179i,latency_commands_count=122i,latency_reads=36662189i,latency_reads_count=523229i,latency_writes=6768713i,latency_writes_count=87190i,net_in_bytes=837378i,net_in_bytes_count=97692502i,net_out_bytes=690836i,net_out_bytes_count=75377383i,open_connections=7i,operation_scan_and_order=87193i,operation_write_conflicts=7i,page_faults=0i,percent_cache_dirty=0.9,percent_cache_used=1,queries=348816i,queries_per_sec=2988i,queued_reads=0i,queued_writes=0i,resident_megabytes=77i,storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=280136i,tcmalloc_current_allocated_bytes=77677288i,tcmalloc_current_total_thread_cache_bytes=1222608i,tcmalloc_heap_size=142659584i,tcmalloc_max_total_thread_cache_bytes=260046848i,tcmalloc_pageheap_commit_count=1898i,tcmalloc_pageheap_committed_bytes=130084864i,tcmalloc_pageheap_decommit_count=889i,tcmalloc_pageheap_free_bytes=50610176i,tcmalloc_pageheap_reserve_count=50i,tcmalloc_pageheap_scavenge_count=884i,tcmalloc_pageheap_total_commit_bytes=13021937664i,tcmalloc_pageheap_total_decommit_bytes=12891852800i,tcmalloc_pageheap_total_reserve_bytes=142659584i,tcmalloc_pageheap_unmapped_bytes=12574720i,tcmalloc_spinlock_total_delay_ns=9767500i,tcmalloc_thread_cache_free_bytes=1222608i,tcmalloc_total_free_bytes=1797400i,tcmalloc_transfer_cache_free_bytes=294656i,total_available=0i,total_created=0i,total_docs_scanned=43595i,total_in_use=0i,total_keys_scanned=130805i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=0i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=43595i,updates=43595i,updates_per_sec=372i,uptime_ns=60023000000i,version="3.6.17",vsize_megabytes=1048i,wtcache_app_threads_page_read_count=108i,wtcache_app_threads_page_read_time=25995i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=2487250i,wtcache_bytes_written_from=74i,wtcache_current_bytes=5014530i,wtcache_internal_pages_evicted=0i,wtcache_max_bytes_configured=505413632i,wtcache_modified_pages_evicted=0i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_pages_read_into=139i,wtcache_pages_requested_from=699135i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=4797426i,wtcache_unmodified_pages_evicted=0i,wtcache_worker_thread_evictingpages=0i 1586379818000000000 -mongodb,hostname=127.0.0.1:27017,node_type=SEC,rs_name=rs0 active_reads=1i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=1i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=79i,assert_warning=0i,available_reads=127i,available_writes=128i,commands=1121855i,commands_per_sec=10i,connections_available=51183i,connections_current=17i,connections_total_created=557i,count_command_failed=0i,count_command_total=46307i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=28i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=0i,document_deleted=0i,document_inserted=0i,document_returned=2248129i,document_updated=0i,find_and_modify_command_failed=0i,find_and_modify_command_total=0i,find_command_failed=2i,find_command_total=8764i,flushes=7850i,flushes_per_sec=0i,flushes_total_time_ns=4535446000000i,get_more_command_failed=0i,get_more_command_total=1993i,getmores=2018i,getmores_per_sec=0i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=112011949i,latency_commands_count=1072472i,latency_reads=1877142443i,latency_reads_count=57086i,latency_writes=0i,latency_writes_count=0i,member_status="SEC",net_in_bytes=1212i,net_in_bytes_count=263928689i,net_out_bytes=41051i,net_out_bytes_count=2475389483i,open_connections=17i,operation_scan_and_order=34i,operation_write_conflicts=0i,page_faults=317i,percent_cache_dirty=1.6,percent_cache_used=73,queries=8764i,queries_per_sec=0i,queued_reads=0i,queued_writes=0i,repl_apply_batches_num=17839419i,repl_apply_batches_total_millis=399929i,repl_apply_ops=23355263i,repl_buffer_count=0i,repl_buffer_size_bytes=0i,repl_commands=11i,repl_commands_per_sec=0i,repl_deletes=440608i,repl_deletes_per_sec=0i,repl_executor_pool_in_progress_count=0i,repl_executor_queues_network_in_progress=0i,repl_executor_queues_sleepers=4i,repl_executor_unsignaled_events=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=1875729i,repl_inserts_per_sec=0i,repl_lag=0i,repl_network_bytes=39122199371i,repl_network_getmores_num=34908797i,repl_network_getmores_total_millis=434805356i,repl_network_ops=23199086i,repl_oplog_window_sec=619292i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=21034729i,repl_updates_per_sec=38i,repl_state=2,resident_megabytes=6721i,state="SECONDARY",storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=358512400i,tcmalloc_current_allocated_bytes=5427379424i,tcmalloc_current_total_thread_cache_bytes=70349552i,tcmalloc_heap_size=10199310336i,tcmalloc_max_total_thread_cache_bytes=1073741824i,tcmalloc_pageheap_commit_count=790819i,tcmalloc_pageheap_committed_bytes=7064821760i,tcmalloc_pageheap_decommit_count=533347i,tcmalloc_pageheap_free_bytes=1207816192i,tcmalloc_pageheap_reserve_count=7706i,tcmalloc_pageheap_scavenge_count=426235i,tcmalloc_pageheap_total_commit_bytes=116127649792i,tcmalloc_pageheap_total_decommit_bytes=109062828032i,tcmalloc_pageheap_total_reserve_bytes=10199310336i,tcmalloc_pageheap_unmapped_bytes=3134488576i,tcmalloc_spinlock_total_delay_ns=2518474348i,tcmalloc_thread_cache_free_bytes=70349552i,tcmalloc_total_free_bytes=429626144i,tcmalloc_transfer_cache_free_bytes=764192i,total_available=0i,total_created=0i,total_docs_scanned=735004782i,total_in_use=0i,total_keys_scanned=6188216i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=7892i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=0i,updates=0i,updates_per_sec=0i,uptime_ns=473590288000000i,version="3.6.17",vsize_megabytes=11136i,wtcache_app_threads_page_read_count=11467625i,wtcache_app_threads_page_read_time=1700336840i,wtcache_app_threads_page_write_count=13268184i,wtcache_bytes_read_into=348022587843i,wtcache_bytes_written_from=322571702254i,wtcache_current_bytes=5509459274i,wtcache_internal_pages_evicted=109108i,wtcache_max_bytes_configured=7547650048i,wtcache_modified_pages_evicted=911196i,wtcache_pages_evicted_by_app_thread=17366i,wtcache_pages_queued_for_eviction=16572754i,wtcache_pages_read_into=11689764i,wtcache_pages_requested_from=499825861i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=117487510i,wtcache_unmodified_pages_evicted=11058458i,wtcache_worker_thread_evictingpages=11907226i 1586379707000000000 +- mongodb_top_stats + - tags: + - collection + - fields: + - total_time (integer) + - total_count (integer) + - read_lock_time (integer) + - read_lock_count (integer) + - write_lock_time (integer) + - write_lock_count (integer) + - queries_time (integer) + - queries_count (integer) + - get_more_time (integer) + - get_more_count (integer) + - insert_time (integer) + - insert_count (integer) + - update_time (integer) + - update_count (integer) + - remove_time (integer) + - remove_count (integer) + - commands_time (integer) + - commands_count (integer) + +## Example Output + +```shell +mongodb,hostname=127.0.0.1:27017 active_reads=1i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=0i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=0i,assert_warning=0i,available_reads=127i,available_writes=128i,commands=65i,commands_per_sec=4i,connections_available=51199i,connections_current=1i,connections_total_created=5i,count_command_failed=0i,count_command_total=7i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=1i,deletes=1i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=0i,document_deleted=0i,document_inserted=0i,document_returned=0i,document_updated=0i,find_and_modify_command_failed=0i,find_and_modify_command_total=0i,find_command_failed=0i,find_command_total=1i,flushes=52i,flushes_per_sec=0i,flushes_total_time_ns=364000000i,get_more_command_failed=0i,get_more_command_total=0i,getmores=0i,getmores_per_sec=0i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=5740i,latency_commands_count=46i,latency_reads=348i,latency_reads_count=7i,latency_writes=0i,latency_writes_count=0i,net_in_bytes=296i,net_in_bytes_count=4262i,net_out_bytes=29322i,net_out_bytes_count=242103i,open_connections=1i,operation_scan_and_order=0i,operation_write_conflicts=0i,page_faults=1i,percent_cache_dirty=0,percent_cache_used=0,queries=1i,queries_per_sec=0i,queued_reads=0i,queued_writes=0i,resident_megabytes=33i,storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=0i,tcmalloc_current_allocated_bytes=0i,tcmalloc_current_total_thread_cache_bytes=0i,tcmalloc_heap_size=0i,tcmalloc_max_total_thread_cache_bytes=0i,tcmalloc_pageheap_commit_count=0i,tcmalloc_pageheap_committed_bytes=0i,tcmalloc_pageheap_decommit_count=0i,tcmalloc_pageheap_free_bytes=0i,tcmalloc_pageheap_reserve_count=0i,tcmalloc_pageheap_scavenge_count=0i,tcmalloc_pageheap_total_commit_bytes=0i,tcmalloc_pageheap_total_decommit_bytes=0i,tcmalloc_pageheap_total_reserve_bytes=0i,tcmalloc_pageheap_unmapped_bytes=0i,tcmalloc_spinlock_total_delay_ns=0i,tcmalloc_thread_cache_free_bytes=0i,tcmalloc_total_free_bytes=0i,tcmalloc_transfer_cache_free_bytes=0i,total_available=0i,total_created=0i,total_docs_scanned=0i,total_in_use=0i,total_keys_scanned=0i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=51i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=0i,updates=0i,updates_per_sec=0i,uptime_ns=6135152000000i,version="4.0.19",vsize_megabytes=5088i,wt_connection_files_currently_open=13i,wt_data_handles_currently_active=18i,wtcache_app_threads_page_read_count=99i,wtcache_app_threads_page_read_time=44528i,wtcache_app_threads_page_write_count=19i,wtcache_bytes_read_into=3248195i,wtcache_bytes_written_from=170612i,wtcache_current_bytes=3648788i,wtcache_internal_pages_evicted=0i,wtcache_max_bytes_configured=8053063680i,wtcache_modified_pages_evicted=0i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_pages_read_into=234i,wtcache_pages_requested_from=18235i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=0i,wtcache_unmodified_pages_evicted=0i,wtcache_worker_thread_evictingpages=0i 1595691605000000000 +mongodb,hostname=127.0.0.1:27017,node_type=PRI,rs_name=rs0 active_reads=1i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=0i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=25i,assert_warning=0i,available_reads=127i,available_writes=128i,commands=345i,commands_per_sec=4i,connections_available=838853i,connections_current=7i,connections_total_created=13i,count_command_failed=0i,count_command_total=5i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=2i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=4i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=0i,document_deleted=0i,document_inserted=2i,document_returned=56i,document_updated=0i,find_and_modify_command_failed=0i,find_and_modify_command_total=0i,find_command_failed=0i,find_command_total=23i,flushes=4i,flushes_per_sec=0i,flushes_total_time_ns=43000000i,get_more_command_failed=0i,get_more_command_total=88i,getmores=88i,getmores_per_sec=0i,insert_command_failed=0i,insert_command_total=2i,inserts=2i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=82532i,latency_commands_count=337i,latency_reads=30633i,latency_reads_count=111i,latency_writes=0i,latency_writes_count=0i,member_status="PRI",net_in_bytes=636i,net_in_bytes_count=172300i,net_out_bytes=38849i,net_out_bytes_count=335459i,open_connections=7i,operation_scan_and_order=1i,operation_write_conflicts=0i,page_faults=1i,percent_cache_dirty=0,percent_cache_used=0,queries=23i,queries_per_sec=2i,queued_reads=0i,queued_writes=0i,repl_apply_batches_num=0i,repl_apply_batches_total_millis=0i,repl_apply_ops=0i,repl_buffer_count=0i,repl_buffer_size_bytes=0i,repl_commands=0i,repl_commands_per_sec=0i,repl_deletes=0i,repl_deletes_per_sec=0i,repl_executor_pool_in_progress_count=0i,repl_executor_queues_network_in_progress=0i,repl_executor_queues_sleepers=3i,repl_executor_unsignaled_events=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=0i,repl_inserts_per_sec=0i,repl_lag=0i,repl_network_bytes=0i,repl_network_getmores_num=0i,repl_network_getmores_total_millis=0i,repl_network_ops=0i,repl_oplog_window_sec=140i,repl_queries=0i,repl_queries_per_sec=0i,repl_state=1i,repl_updates=0i,repl_updates_per_sec=0i,resident_megabytes=81i,state="PRIMARY",storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=322128i,tcmalloc_current_allocated_bytes=143566680i,tcmalloc_current_total_thread_cache_bytes=1098968i,tcmalloc_heap_size=181317632i,tcmalloc_max_total_thread_cache_bytes=260046848i,tcmalloc_pageheap_commit_count=53i,tcmalloc_pageheap_committed_bytes=149106688i,tcmalloc_pageheap_decommit_count=1i,tcmalloc_pageheap_free_bytes=3244032i,tcmalloc_pageheap_reserve_count=51i,tcmalloc_pageheap_scavenge_count=1i,tcmalloc_pageheap_total_commit_bytes=183074816i,tcmalloc_pageheap_total_decommit_bytes=33968128i,tcmalloc_pageheap_total_reserve_bytes=181317632i,tcmalloc_pageheap_unmapped_bytes=32210944i,tcmalloc_spinlock_total_delay_ns=0i,tcmalloc_thread_cache_free_bytes=1098968i,tcmalloc_total_free_bytes=2295976i,tcmalloc_transfer_cache_free_bytes=874880i,total_available=0i,total_created=0i,total_docs_scanned=56i,total_in_use=0i,total_keys_scanned=2i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=2i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=0i,updates=0i,updates_per_sec=0i,uptime_ns=166481000000i,version="4.0.19",vsize_megabytes=1482i,wt_connection_files_currently_open=26i,wt_data_handles_currently_active=44i,wtcache_app_threads_page_read_count=0i,wtcache_app_threads_page_read_time=0i,wtcache_app_threads_page_write_count=56i,wtcache_bytes_read_into=0i,wtcache_bytes_written_from=130403i,wtcache_current_bytes=100312i,wtcache_internal_pages_evicted=0i,wtcache_max_bytes_configured=506462208i,wtcache_modified_pages_evicted=0i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_pages_read_into=0i,wtcache_pages_requested_from=2085i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=63929i,wtcache_unmodified_pages_evicted=0i,wtcache_worker_thread_evictingpages=0i 1595691605000000000 mongodb_db_stats,db_name=admin,hostname=127.0.0.1:27017 avg_obj_size=241,collections=2i,data_size=723i,index_size=49152i,indexes=3i,num_extents=0i,objects=3i,ok=1i,storage_size=53248i,type="db_stat" 1547159491000000000 mongodb_db_stats,db_name=local,hostname=127.0.0.1:27017 avg_obj_size=813.9705882352941,collections=6i,data_size=55350i,index_size=102400i,indexes=5i,num_extents=0i,objects=68i,ok=1i,storage_size=204800i,type="db_stat" 1547159491000000000 mongodb_col_stats,collection=foo,db_name=local,hostname=127.0.0.1:27017 size=375005928i,avg_obj_size=5494,type="col_stat",storage_size=249307136i,total_index_size=2138112i,ok=1i,count=68251i 1547159491000000000 mongodb_shard_stats,hostname=127.0.0.1:27017,in_use=3i,available=3i,created=4i,refreshing=0i 1522799074000000000 +mongodb_top_stats,collection=foo,total_time=1471,total_count=158,read_lock_time=49614,read_lock_count=657,write_lock_time=49125456,write_lock_count=9841,queries_time=174,queries_count=495,get_more_time=498,get_more_count=46,insert_time=2651,insert_count=1265,update_time=0,update_count=0,remove_time=0,remove_count=0,commands_time=498611,commands_count=4615 ``` diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index 4ba54137383dd..23d1151138043 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -1,198 +1,155 @@ +//go:generate ../../../tools/readme_config_includer/generator package mongodb import ( + "context" "crypto/tls" "crypto/x509" + _ "embed" "fmt" - "net" "net/url" "strings" "sync" "time" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/mongo/readpref" + "github.com/influxdata/telegraf" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "gopkg.in/mgo.v2" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type MongoDB struct { Servers []string Ssl Ssl - mongos map[string]*Server GatherClusterStatus bool GatherPerdbStats bool GatherColStats bool + GatherTopStat bool ColStatsDbs []string tlsint.ClientConfig - Log telegraf.Logger + Log telegraf.Logger `toml:"-"` + + clients []*Server } type Ssl struct { - Enabled bool - CaCerts []string `toml:"cacerts"` + Enabled bool `toml:"ssl_enabled" deprecated:"1.3.0;use 'tls_*' options instead"` + CaCerts []string `toml:"cacerts" deprecated:"1.3.0;use 'tls_ca' instead"` } -var sampleConfig = ` - ## An array of URLs of the form: - ## "mongodb://" [user ":" pass "@"] host [ ":" port] - ## For example: - ## mongodb://user:auth_key@10.10.3.30:27017, - ## mongodb://10.10.3.33:18832, - servers = ["mongodb://127.0.0.1:27017"] - - ## When true, collect cluster status - ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which - ## may have an impact on performance. - # gather_cluster_status = true - - ## When true, collect per database stats - # gather_perdb_stats = false - - ## When true, collect per collection stats - # gather_col_stats = false - - ## List of db where collections stats are collected - ## If empty, all db are concerned - # col_stats_dbs = ["local"] - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - -func (m *MongoDB) SampleConfig() string { +func (*MongoDB) SampleConfig() string { return sampleConfig } -func (*MongoDB) Description() string { - return "Read metrics from one or many MongoDB servers" -} +func (m *MongoDB) Init() error { + var tlsConfig *tls.Config + if m.Ssl.Enabled { + // Deprecated TLS config + tlsConfig = &tls.Config{ + InsecureSkipVerify: m.ClientConfig.InsecureSkipVerify, + } + if len(m.Ssl.CaCerts) == 0 { + return fmt.Errorf("you must explicitly set insecure_skip_verify to skip cerificate validation") + } -var localhost = &url.URL{Host: "mongodb://127.0.0.1:27017"} + roots := x509.NewCertPool() + for _, caCert := range m.Ssl.CaCerts { + if ok := roots.AppendCertsFromPEM([]byte(caCert)); !ok { + return fmt.Errorf("failed to parse root certificate") + } + } + tlsConfig.RootCAs = roots + } else { + var err error + tlsConfig, err = m.ClientConfig.TLSConfig() + if err != nil { + return err + } + } -// Reads stats from all configured servers accumulates stats. -// Returns one of the errors encountered while gather stats (if any). -func (m *MongoDB) Gather(acc telegraf.Accumulator) error { if len(m.Servers) == 0 { - m.gatherServer(m.getMongoServer(localhost), acc) - return nil + m.Servers = []string{"mongodb://127.0.0.1:27017"} } - var wg sync.WaitGroup - for i, serv := range m.Servers { - if !strings.HasPrefix(serv, "mongodb://") { + for _, connURL := range m.Servers { + if !strings.HasPrefix(connURL, "mongodb://") && !strings.HasPrefix(connURL, "mongodb+srv://") { // Preserve backwards compatibility for hostnames without a // scheme, broken in go 1.8. Remove in Telegraf 2.0 - serv = "mongodb://" + serv - m.Log.Warnf("Using %q as connection URL; please update your configuration to use an URL", serv) - m.Servers[i] = serv + connURL = "mongodb://" + connURL + m.Log.Warnf("Using %q as connection URL; please update your configuration to use an URL", connURL) } - u, err := url.Parse(serv) + u, err := url.Parse(connURL) if err != nil { - m.Log.Errorf("Unable to parse address %q: %s", serv, err.Error()) - continue - } - if u.Host == "" { - m.Log.Errorf("Unable to parse address %q", serv) - continue + return fmt.Errorf("unable to parse connection URL: %q", err) } - wg.Add(1) - go func(srv *Server) { - defer wg.Done() - err := m.gatherServer(srv, acc) - if err != nil { - m.Log.Errorf("Error in plugin: %v", err) - } - }(m.getMongoServer(u)) - } - - wg.Wait() - return nil -} + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() //nolint:revive -func (m *MongoDB) getMongoServer(url *url.URL) *Server { - if _, ok := m.mongos[url.Host]; !ok { - m.mongos[url.Host] = &Server{ - Log: m.Log, - Url: url, + opts := options.Client().ApplyURI(connURL) + if tlsConfig != nil { + opts.TLSConfig = tlsConfig } - } - return m.mongos[url.Host] -} - -func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error { - if server.Session == nil { - var dialAddrs []string - if server.Url.User != nil { - dialAddrs = []string{server.Url.String()} - } else { - dialAddrs = []string{server.Url.Host} + if opts.ReadPreference == nil { + opts.ReadPreference = readpref.Nearest() } - dialInfo, err := mgo.ParseURL(dialAddrs[0]) + + client, err := mongo.Connect(ctx, opts) if err != nil { - return fmt.Errorf("unable to parse URL %q: %s", dialAddrs[0], err.Error()) - } - dialInfo.Direct = true - dialInfo.Timeout = 5 * time.Second - - var tlsConfig *tls.Config - - if m.Ssl.Enabled { - // Deprecated TLS config - tlsConfig = &tls.Config{} - if len(m.Ssl.CaCerts) > 0 { - roots := x509.NewCertPool() - for _, caCert := range m.Ssl.CaCerts { - ok := roots.AppendCertsFromPEM([]byte(caCert)) - if !ok { - return fmt.Errorf("failed to parse root certificate") - } - } - tlsConfig.RootCAs = roots - } else { - tlsConfig.InsecureSkipVerify = true - } - } else { - tlsConfig, err = m.ClientConfig.TLSConfig() - if err != nil { - return err - } + return fmt.Errorf("unable to connect to MongoDB: %q", err) } - // If configured to use TLS, add a dial function - if tlsConfig != nil { - dialInfo.DialServer = func(addr *mgo.ServerAddr) (net.Conn, error) { - conn, err := tls.Dial("tcp", addr.String(), tlsConfig) - if err != nil { - fmt.Printf("error in Dial, %s\n", err.Error()) - } - return conn, err - } + err = client.Ping(ctx, opts.ReadPreference) + if err != nil { + return fmt.Errorf("unable to connect to MongoDB: %s", err) } - sess, err := mgo.DialWithInfo(dialInfo) - if err != nil { - return fmt.Errorf("unable to connect to MongoDB: %s", err.Error()) + server := &Server{ + client: client, + hostname: u.Host, + Log: m.Log, } - server.Session = sess + m.clients = append(m.clients, server) + } + + return nil +} + +// Reads stats from all configured servers accumulates stats. +// Returns one of the errors encountered while gather stats (if any). +func (m *MongoDB) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + for _, client := range m.clients { + wg.Add(1) + go func(srv *Server) { + defer wg.Done() + err := srv.gatherData(acc, m.GatherClusterStatus, m.GatherPerdbStats, m.GatherColStats, m.GatherTopStat, m.ColStatsDbs) + if err != nil { + m.Log.Errorf("failed to gather data: %q", err) + } + }(client) } - return server.gatherData(acc, m.GatherClusterStatus, m.GatherPerdbStats, m.GatherColStats, m.ColStatsDbs) + + wg.Wait() + return nil } func init() { inputs.Add("mongodb", func() telegraf.Input { return &MongoDB{ - mongos: make(map[string]*Server), GatherClusterStatus: true, GatherPerdbStats: false, GatherColStats: false, + GatherTopStat: false, ColStatsDbs: []string{"local"}, } }) diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index 6a2c0a86ebd12..f8d10a08d384b 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -15,6 +15,7 @@ type MongodbData struct { DbData []DbData ColData []ColData ShardHostData []DbData + TopStatsData []DbData } type DbData struct { @@ -37,7 +38,7 @@ func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData { } } -var DefaultStats = map[string]string{ +var defaultStats = map[string]string{ "uptime_ns": "UptimeNanos", "inserts": "InsertCnt", "inserts_per_sec": "Insert", @@ -94,7 +95,7 @@ var DefaultStats = map[string]string{ "total_docs_scanned": "TotalObjectsScanned", } -var DefaultAssertsStats = map[string]string{ +var defaultAssertsStats = map[string]string{ "assert_regular": "Regular", "assert_warning": "Warning", "assert_msg": "Msg", @@ -102,7 +103,7 @@ var DefaultAssertsStats = map[string]string{ "assert_rollovers": "Rollovers", } -var DefaultCommandsStats = map[string]string{ +var defaultCommandsStats = map[string]string{ "aggregate_command_total": "AggregateCommandTotal", "aggregate_command_failed": "AggregateCommandFailed", "count_command_total": "CountCommandTotal", @@ -123,7 +124,7 @@ var DefaultCommandsStats = map[string]string{ "update_command_failed": "UpdateCommandFailed", } -var DefaultLatencyStats = map[string]string{ +var defaultLatencyStats = map[string]string{ "latency_writes_count": "WriteOpsCnt", "latency_writes": "WriteLatency", "latency_reads_count": "ReadOpsCnt", @@ -132,7 +133,7 @@ var DefaultLatencyStats = map[string]string{ "latency_commands": "CommandLatency", } -var DefaultReplStats = map[string]string{ +var defaultReplStats = map[string]string{ "repl_inserts": "InsertRCnt", "repl_inserts_per_sec": "InsertR", "repl_queries": "QueryRCnt", @@ -164,37 +165,37 @@ var DefaultReplStats = map[string]string{ "repl_executor_unsignaled_events": "ReplExecutorUnsignaledEvents", } -var DefaultClusterStats = map[string]string{ +var defaultClusterStats = map[string]string{ "jumbo_chunks": "JumboChunksCount", } -var DefaultShardStats = map[string]string{ +var defaultShardStats = map[string]string{ "total_in_use": "TotalInUse", "total_available": "TotalAvailable", "total_created": "TotalCreated", "total_refreshing": "TotalRefreshing", } -var ShardHostStats = map[string]string{ +var shardHostStats = map[string]string{ "in_use": "InUse", "available": "Available", "created": "Created", "refreshing": "Refreshing", } -var MmapStats = map[string]string{ +var mmapStats = map[string]string{ "mapped_megabytes": "Mapped", "non-mapped_megabytes": "NonMapped", "page_faults": "FaultsCnt", "page_faults_per_sec": "Faults", } -var WiredTigerStats = map[string]string{ +var wiredTigerStats = map[string]string{ "percent_cache_dirty": "CacheDirtyPercent", "percent_cache_used": "CacheUsedPercent", } -var WiredTigerExtStats = map[string]string{ +var wiredTigerExtStats = map[string]string{ "wtcache_tracked_dirty_bytes": "TrackedDirtyBytes", "wtcache_current_bytes": "CurrentCachedBytes", "wtcache_max_bytes_configured": "MaxBytesConfigured", @@ -215,7 +216,15 @@ var WiredTigerExtStats = map[string]string{ "wtcache_unmodified_pages_evicted": "UnmodifiedPagesEvicted", } -var DefaultTCMallocStats = map[string]string{ +var wiredTigerConnectionStats = map[string]string{ + "wt_connection_files_currently_open": "FilesCurrentlyOpen", +} + +var wiredTigerDataHandleStats = map[string]string{ + "wt_data_handles_currently_active": "DataHandlesCurrentlyActive", +} + +var defaultTCMallocStats = map[string]string{ "tcmalloc_current_allocated_bytes": "TCMallocCurrentAllocatedBytes", "tcmalloc_heap_size": "TCMallocHeapSize", "tcmalloc_central_cache_free_bytes": "TCMallocCentralCacheFreeBytes", @@ -237,25 +246,27 @@ var DefaultTCMallocStats = map[string]string{ "tcmalloc_pageheap_total_reserve_bytes": "TCMallocPageheapTotalReserveBytes", } -var DefaultStorageStats = map[string]string{ +var defaultStorageStats = map[string]string{ "storage_freelist_search_bucket_exhausted": "StorageFreelistSearchBucketExhausted", "storage_freelist_search_requests": "StorageFreelistSearchRequests", "storage_freelist_search_scanned": "StorageFreelistSearchScanned", } -var DbDataStats = map[string]string{ - "collections": "Collections", - "objects": "Objects", - "avg_obj_size": "AvgObjSize", - "data_size": "DataSize", - "storage_size": "StorageSize", - "num_extents": "NumExtents", - "indexes": "Indexes", - "index_size": "IndexSize", - "ok": "Ok", +var dbDataStats = map[string]string{ + "collections": "Collections", + "objects": "Objects", + "avg_obj_size": "AvgObjSize", + "data_size": "DataSize", + "storage_size": "StorageSize", + "num_extents": "NumExtents", + "indexes": "Indexes", + "index_size": "IndexSize", + "ok": "Ok", + "fs_used_size": "FsUsedSize", + "fs_total_size": "FsTotalSize", } -var ColDataStats = map[string]string{ +var colDataStats = map[string]string{ "count": "Count", "size": "Size", "avg_obj_size": "AvgObjSize", @@ -264,6 +275,27 @@ var ColDataStats = map[string]string{ "ok": "Ok", } +var topDataStats = map[string]string{ + "total_time": "TotalTime", + "total_count": "TotalCount", + "read_lock_time": "ReadLockTime", + "read_lock_count": "ReadLockCount", + "write_lock_time": "WriteLockTime", + "write_lock_count": "WriteLockCount", + "queries_time": "QueriesTime", + "queries_count": "QueriesCount", + "get_more_time": "GetMoreTime", + "get_more_count": "GetMoreCount", + "insert_time": "InsertTime", + "insert_count": "InsertCount", + "update_time": "UpdateTime", + "update_count": "UpdateCount", + "remove_time": "RemoveTime", + "remove_count": "RemoveCount", + "commands_time": "CommandsTime", + "commands_count": "CommandsCount", +} + func (d *MongodbData) AddDbStats() { for _, dbstat := range d.StatLine.DbStatsLines { dbStatLine := reflect.ValueOf(&dbstat).Elem() @@ -272,7 +304,7 @@ func (d *MongodbData) AddDbStats() { Fields: make(map[string]interface{}), } newDbData.Fields["type"] = "db_stat" - for key, value := range DbDataStats { + for key, value := range dbDataStats { val := dbStatLine.FieldByName(value).Interface() newDbData.Fields[key] = val } @@ -289,7 +321,7 @@ func (d *MongodbData) AddColStats() { Fields: make(map[string]interface{}), } newColData.Fields["type"] = "col_stat" - for key, value := range ColDataStats { + for key, value := range colDataStats { val := colStatLine.FieldByName(value).Interface() newColData.Fields[key] = val } @@ -305,7 +337,7 @@ func (d *MongodbData) AddShardHostStats() { Fields: make(map[string]interface{}), } newDbData.Fields["type"] = "shard_host_stat" - for k, v := range ShardHostStats { + for k, v := range shardHostStats { val := hostStatLine.FieldByName(v).Interface() newDbData.Fields[k] = val } @@ -313,16 +345,32 @@ func (d *MongodbData) AddShardHostStats() { } } +func (d *MongodbData) AddTopStats() { + for _, topStat := range d.StatLine.TopStatLines { + topStatLine := reflect.ValueOf(&topStat).Elem() + newTopStatData := &DbData{ + Name: topStat.CollectionName, + Fields: make(map[string]interface{}), + } + newTopStatData.Fields["type"] = "top_stat" + for key, value := range topDataStats { + val := topStatLine.FieldByName(value).Interface() + newTopStatData.Fields[key] = val + } + d.TopStatsData = append(d.TopStatsData, *newTopStatData) + } +} + func (d *MongodbData) AddDefaultStats() { statLine := reflect.ValueOf(d.StatLine).Elem() - d.addStat(statLine, DefaultStats) + d.addStat(statLine, defaultStats) if d.StatLine.NodeType != "" { - d.addStat(statLine, DefaultReplStats) + d.addStat(statLine, defaultReplStats) d.Tags["node_type"] = d.StatLine.NodeType } if d.StatLine.ReadLatency > 0 { - d.addStat(statLine, DefaultLatencyStats) + d.addStat(statLine, defaultLatencyStats) } if d.StatLine.ReplSetName != "" { @@ -337,23 +385,25 @@ func (d *MongodbData) AddDefaultStats() { d.add("version", d.StatLine.Version) } - d.addStat(statLine, DefaultAssertsStats) - d.addStat(statLine, DefaultClusterStats) - d.addStat(statLine, DefaultCommandsStats) - d.addStat(statLine, DefaultShardStats) - d.addStat(statLine, DefaultStorageStats) - d.addStat(statLine, DefaultTCMallocStats) + d.addStat(statLine, defaultAssertsStats) + d.addStat(statLine, defaultClusterStats) + d.addStat(statLine, defaultCommandsStats) + d.addStat(statLine, defaultShardStats) + d.addStat(statLine, defaultStorageStats) + d.addStat(statLine, defaultTCMallocStats) if d.StatLine.StorageEngine == "mmapv1" || d.StatLine.StorageEngine == "rocksdb" { - d.addStat(statLine, MmapStats) + d.addStat(statLine, mmapStats) } else if d.StatLine.StorageEngine == "wiredTiger" { - for key, value := range WiredTigerStats { + for key, value := range wiredTigerStats { val := statLine.FieldByName(value).Interface() percentVal := fmt.Sprintf("%.1f", val.(float64)*100) floatVal, _ := strconv.ParseFloat(percentVal, 64) d.add(key, floatVal) } - d.addStat(statLine, WiredTigerExtStats) + d.addStat(statLine, wiredTigerExtStats) + d.addStat(statLine, wiredTigerConnectionStats) + d.addStat(statLine, wiredTigerDataHandleStats) d.add("page_faults", d.StatLine.FaultsCnt) } } @@ -409,4 +459,14 @@ func (d *MongodbData) flush(acc telegraf.Accumulator) { ) host.Fields = make(map[string]interface{}) } + for _, col := range d.TopStatsData { + d.Tags["collection"] = col.Name + acc.AddFields( + "mongodb_top_stats", + col.Fields, + d.Tags, + d.StatLine.Time, + ) + col.Fields = make(map[string]interface{}) + } } diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index 4a1730211b594..94b8f6f66a3ae 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -5,8 +5,9 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) var tags = make(map[string]string) @@ -64,8 +65,8 @@ func TestAddNonReplStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range DefaultStats { - assert.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) + for key := range defaultStats { + require.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) } } @@ -85,31 +86,38 @@ func TestAddReplStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range MmapStats { - assert.True(t, acc.HasInt64Field("mongodb", key), key) + for key := range mmapStats { + require.True(t, acc.HasInt64Field("mongodb", key), key) } } func TestAddWiredTigerStats(t *testing.T) { d := NewMongodbData( &StatLine{ - StorageEngine: "wiredTiger", - CacheDirtyPercent: 0, - CacheUsedPercent: 0, - TrackedDirtyBytes: 0, - CurrentCachedBytes: 0, - MaxBytesConfigured: 0, - AppThreadsPageReadCount: 0, - AppThreadsPageReadTime: 0, - AppThreadsPageWriteCount: 0, - BytesWrittenFrom: 0, - BytesReadInto: 0, - PagesEvictedByAppThread: 0, - PagesQueuedForEviction: 0, - PagesWrittenFromCache: 1247, - ServerEvictingPages: 0, - WorkerThreadEvictingPages: 0, - FaultsCnt: 204, + StorageEngine: "wiredTiger", + CacheDirtyPercent: 0, + CacheUsedPercent: 0, + TrackedDirtyBytes: 0, + CurrentCachedBytes: 0, + MaxBytesConfigured: 0, + AppThreadsPageReadCount: 0, + AppThreadsPageReadTime: 0, + AppThreadsPageWriteCount: 0, + BytesWrittenFrom: 0, + BytesReadInto: 0, + PagesEvictedByAppThread: 0, + PagesQueuedForEviction: 0, + ServerEvictingPages: 0, + WorkerThreadEvictingPages: 0, + PagesReadIntoCache: 0, + PagesRequestedFromCache: 0, + PagesWrittenFromCache: 1247, + InternalPagesEvicted: 0, + ModifiedPagesEvicted: 0, + UnmodifiedPagesEvicted: 0, + FilesCurrentlyOpen: 0, + DataHandlesCurrentlyActive: 0, + FaultsCnt: 204, }, tags, ) @@ -119,15 +127,15 @@ func TestAddWiredTigerStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range WiredTigerStats { - assert.True(t, acc.HasFloatField("mongodb", key), key) + for key := range wiredTigerStats { + require.True(t, acc.HasFloatField("mongodb", key), key) } - for key := range WiredTigerExtStats { - assert.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) + for key := range wiredTigerExtStats { + require.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) } - assert.True(t, acc.HasInt64Field("mongodb", "page_faults")) + require.True(t, acc.HasInt64Field("mongodb", "page_faults")) } func TestAddShardStats(t *testing.T) { @@ -146,8 +154,8 @@ func TestAddShardStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range DefaultShardStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + for key := range defaultShardStats { + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -169,8 +177,8 @@ func TestAddLatencyStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range DefaultLatencyStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + for key := range defaultLatencyStats { + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -191,8 +199,8 @@ func TestAddAssertsStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range DefaultAssertsStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + for key := range defaultAssertsStats { + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -226,8 +234,8 @@ func TestAddCommandsStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range DefaultCommandsStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + for key := range defaultCommandsStats { + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -262,8 +270,8 @@ func TestAddTCMallocStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range DefaultTCMallocStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + for key := range defaultTCMallocStats { + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -282,8 +290,8 @@ func TestAddStorageStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key := range DefaultStorageStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + for key := range defaultStorageStats { + require.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -312,16 +320,16 @@ func TestAddShardHostStats(t *testing.T) { var hostsFound []string for host := range hostStatLines { - for key := range ShardHostStats { - assert.True(t, acc.HasInt64Field("mongodb_shard_stats", key)) + for key := range shardHostStats { + require.True(t, acc.HasInt64Field("mongodb_shard_stats", key)) } - assert.True(t, acc.HasTag("mongodb_shard_stats", "hostname")) + require.True(t, acc.HasTag("mongodb_shard_stats", "hostname")) hostsFound = append(hostsFound, host) } sort.Strings(hostsFound) sort.Strings(expectedHosts) - assert.Equal(t, hostsFound, expectedHosts) + require.Equal(t, hostsFound, expectedHosts) } func TestStateTag(t *testing.T) { @@ -485,3 +493,49 @@ func TestStateTag(t *testing.T) { } acc.AssertContainsTaggedFields(t, "mongodb", fields, stateTags) } + +func TestAddTopStats(t *testing.T) { + collections := []string{"collectionOne", "collectionTwo"} + var topStatLines []TopStatLine + for _, collection := range collections { + topStatLine := TopStatLine{ + CollectionName: collection, + TotalTime: 0, + TotalCount: 0, + ReadLockTime: 0, + ReadLockCount: 0, + WriteLockTime: 0, + WriteLockCount: 0, + QueriesTime: 0, + QueriesCount: 0, + GetMoreTime: 0, + GetMoreCount: 0, + InsertTime: 0, + InsertCount: 0, + UpdateTime: 0, + UpdateCount: 0, + RemoveTime: 0, + RemoveCount: 0, + CommandsTime: 0, + CommandsCount: 0, + } + topStatLines = append(topStatLines, topStatLine) + } + + d := NewMongodbData( + &StatLine{ + TopStatLines: topStatLines, + }, + tags, + ) + + var acc testutil.Accumulator + d.AddTopStats() + d.flush(&acc) + + for range topStatLines { + for key := range topDataStats { + require.True(t, acc.HasInt64Field("mongodb_top_stats", key)) + } + } +} diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index 5af48c10a6f9b..ae0ab31ab37a4 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -1,19 +1,23 @@ package mongodb import ( + "context" "fmt" - "net/url" + "strconv" "strings" "time" "github.com/influxdata/telegraf" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/x/bsonx" ) type Server struct { - Url *url.URL - Session *mgo.Session + client *mongo.Client + hostname string lastResult *MongoStatus Log telegraf.Logger @@ -21,12 +25,12 @@ type Server struct { func (s *Server) getDefaultTags() map[string]string { tags := make(map[string]string) - tags["hostname"] = s.Url.Host + tags["hostname"] = s.hostname return tags } type oplogEntry struct { - Timestamp bson.MongoTimestamp `bson:"ts"` + Timestamp primitive.Timestamp `bson:"ts"` } func IsAuthorization(err error) bool { @@ -41,15 +45,23 @@ func (s *Server) authLog(err error) { } } +func (s *Server) runCommand(database string, cmd interface{}, result interface{}) error { + r := s.client.Database(database).RunCommand(context.Background(), cmd) + if r.Err() != nil { + return r.Err() + } + return r.Decode(result) +} + func (s *Server) gatherServerStatus() (*ServerStatus, error) { serverStatus := &ServerStatus{} - err := s.Session.DB("admin").Run(bson.D{ + err := s.runCommand("admin", bson.D{ { - Name: "serverStatus", + Key: "serverStatus", Value: 1, }, { - Name: "recordStats", + Key: "recordStats", Value: 0, }, }, serverStatus) @@ -61,9 +73,9 @@ func (s *Server) gatherServerStatus() (*ServerStatus, error) { func (s *Server) gatherReplSetStatus() (*ReplSetStatus, error) { replSetStatus := &ReplSetStatus{} - err := s.Session.DB("admin").Run(bson.D{ + err := s.runCommand("admin", bson.D{ { - Name: "replSetGetStatus", + Key: "replSetGetStatus", Value: 1, }, }, replSetStatus) @@ -73,22 +85,71 @@ func (s *Server) gatherReplSetStatus() (*ReplSetStatus, error) { return replSetStatus, nil } +func (s *Server) gatherTopStatData() (*TopStats, error) { + dest := &bsonx.Doc{} + err := s.runCommand("admin", bson.D{ + { + Key: "top", + Value: 1, + }, + }, dest) + if err != nil { + return nil, err + } + + // From: https://github.com/mongodb/mongo-tools/blob/master/mongotop/mongotop.go#L49-L70 + // Remove 'note' field that prevents easy decoding, then round-trip + // again to simplify unpacking into the nested data structure + totals, err := dest.LookupErr("totals") + if err != nil { + return nil, err + } + recoded, err := totals.Document().Delete("note").MarshalBSON() + if err != nil { + return nil, err + } + topInfo := make(map[string]TopStatCollection) + if err := bson.Unmarshal(recoded, &topInfo); err != nil { + return nil, err + } + + return &TopStats{Totals: topInfo}, nil +} + func (s *Server) gatherClusterStatus() (*ClusterStatus, error) { - chunkCount, err := s.Session.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count() + chunkCount, err := s.client.Database("config").Collection("chunks").CountDocuments(context.Background(), bson.M{"jumbo": true}) if err != nil { return nil, err } return &ClusterStatus{ - JumboChunksCount: int64(chunkCount), + JumboChunksCount: chunkCount, }, nil } -func (s *Server) gatherShardConnPoolStats() (*ShardStats, error) { +func poolStatsCommand(version string) (string, error) { + majorPart := string(version[0]) + major, err := strconv.ParseInt(majorPart, 10, 64) + if err != nil { + return "", err + } + + if major == 5 { + return "connPoolStats", nil + } + return "shardConnPoolStats", nil +} + +func (s *Server) gatherShardConnPoolStats(version string) (*ShardStats, error) { + command, err := poolStatsCommand(version) + if err != nil { + return nil, err + } + shardStats := &ShardStats{} - err := s.Session.DB("admin").Run(bson.D{ + err = s.runCommand("admin", bson.D{ { - Name: "shardConnPoolStats", + Key: command, Value: 1, }, }, &shardStats) @@ -100,9 +161,9 @@ func (s *Server) gatherShardConnPoolStats() (*ShardStats, error) { func (s *Server) gatherDBStats(name string) (*Db, error) { stats := &DbStatsData{} - err := s.Session.DB(name).Run(bson.D{ + err := s.runCommand(name, bson.D{ { - Name: "dbStats", + Key: "dbStats", Value: 1, }, }, stats) @@ -120,19 +181,25 @@ func (s *Server) getOplogReplLag(collection string) (*OplogStats, error) { query := bson.M{"ts": bson.M{"$exists": true}} var first oplogEntry - err := s.Session.DB("local").C(collection).Find(query).Sort("$natural").Limit(1).One(&first) - if err != nil { + firstResult := s.client.Database("local").Collection(collection).FindOne(context.Background(), query, options.FindOne().SetSort(bson.M{"$natural": 1})) + if firstResult.Err() != nil { + return nil, firstResult.Err() + } + if err := firstResult.Decode(&first); err != nil { return nil, err } var last oplogEntry - err = s.Session.DB("local").C(collection).Find(query).Sort("-$natural").Limit(1).One(&last) - if err != nil { + lastResult := s.client.Database("local").Collection(collection).FindOne(context.Background(), query, options.FindOne().SetSort(bson.M{"$natural": -1})) + if lastResult.Err() != nil { + return nil, lastResult.Err() + } + if err := lastResult.Decode(&last); err != nil { return nil, err } - firstTime := time.Unix(int64(first.Timestamp>>32), 0) - lastTime := time.Unix(int64(last.Timestamp>>32), 0) + firstTime := time.Unix(int64(first.Timestamp.T), 0) + lastTime := time.Unix(int64(last.Timestamp.T), 0) stats := &OplogStats{ TimeDiff: int64(lastTime.Sub(firstTime).Seconds()), } @@ -154,7 +221,7 @@ func (s *Server) gatherOplogStats() (*OplogStats, error) { } func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error) { - names, err := s.Session.DatabaseNames() + names, err := s.client.ListDatabaseNames(context.Background(), bson.D{}) if err != nil { return nil, err } @@ -162,17 +229,20 @@ func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error) results := &ColStats{} for _, dbName := range names { if stringInSlice(dbName, colStatsDbs) || len(colStatsDbs) == 0 { + // skip views as they fail on collStats below + filter := bson.M{"type": bson.M{"$in": bson.A{"collection", "timeseries"}}} + var colls []string - colls, err = s.Session.DB(dbName).CollectionNames() + colls, err = s.client.Database(dbName).ListCollectionNames(context.Background(), filter) if err != nil { s.Log.Errorf("Error getting collection names: %s", err.Error()) continue } for _, colName := range colls { colStatLine := &ColStatsData{} - err = s.Session.DB(dbName).Run(bson.D{ + err = s.runCommand(dbName, bson.D{ { - Name: "collStats", + Key: "collStats", Value: colName, }, }, colStatLine) @@ -192,10 +262,7 @@ func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error) return results, nil } -func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, gatherDbStats bool, gatherColStats bool, colStatsDbs []string) error { - s.Session.SetMode(mgo.Eventual, true) - s.Session.SetSocketTimeout(0) - +func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, gatherDbStats bool, gatherColStats bool, gatherTopStat bool, colStatsDbs []string) error { serverStatus, err := s.gatherServerStatus() if err != nil { return err @@ -227,7 +294,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, clusterStatus = status } - shardStats, err := s.gatherShardConnPoolStats() + shardStats, err := s.gatherShardConnPoolStats(serverStatus.Version) if err != nil { s.authLog(fmt.Errorf("unable to gather shard connection pool stats: %s", err.Error())) } @@ -243,7 +310,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, dbStats := &DbStats{} if gatherDbStats { - names, err := s.Session.DatabaseNames() + names, err := s.client.ListDatabaseNames(context.Background(), bson.D{}) if err != nil { return err } @@ -257,6 +324,16 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, } } + topStatData := &TopStats{} + if gatherTopStat { + topStats, err := s.gatherTopStatData() + if err != nil { + s.Log.Debugf("Unable to gather top stat data: %s", err.Error()) + return err + } + topStatData = topStats + } + result := &MongoStatus{ ServerStatus: serverStatus, ReplSetStatus: replSetStatus, @@ -265,6 +342,7 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, ColStats: collectionStats, ShardStats: shardStats, OplogStats: oplogStats, + TopStats: topStatData, } result.SampleTime = time.Now() @@ -275,13 +353,14 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, durationInSeconds = 1 } data := NewMongodbData( - NewStatLine(*s.lastResult, *result, s.Url.Host, true, durationInSeconds), + NewStatLine(*s.lastResult, *result, s.hostname, true, durationInSeconds), s.getDefaultTags(), ) data.AddDefaultStats() data.AddDbStats() data.AddColStats() data.AddShardHostStats() + data.AddTopStats() data.flush(acc) } diff --git a/plugins/inputs/mongodb/mongodb_server_test.go b/plugins/inputs/mongodb/mongodb_server_test.go index 91a3c0709f0d4..d2313e4088f82 100644 --- a/plugins/inputs/mongodb/mongodb_server_test.go +++ b/plugins/inputs/mongodb/mongodb_server_test.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package mongodb @@ -5,9 +6,9 @@ package mongodb import ( "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestGetDefaultTags(t *testing.T) { @@ -15,7 +16,7 @@ func TestGetDefaultTags(t *testing.T) { in string out string }{ - {"hostname", server.Url.Host}, + {"hostname", server.hostname}, } defaultTags := server.getDefaultTags() for _, tt := range tagTests { @@ -28,14 +29,56 @@ func TestGetDefaultTags(t *testing.T) { func TestAddDefaultStats(t *testing.T) { var acc testutil.Accumulator - err := server.gatherData(&acc, false) + err := server.gatherData(&acc, false, true, true, true, []string{"local"}) require.NoError(t, err) // need to call this twice so it can perform the diff - err = server.gatherData(&acc, false) + err = server.gatherData(&acc, false, true, true, true, []string{"local"}) require.NoError(t, err) - for key := range DefaultStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + for key := range defaultStats { + require.True(t, acc.HasInt64Field("mongodb", key)) + } +} + +func TestPoolStatsVersionCompatibility(t *testing.T) { + tests := []struct { + name string + version string + expectedCommand string + err bool + }{ + { + name: "mongodb v3", + version: "3.0.0", + expectedCommand: "shardConnPoolStats", + }, + { + name: "mongodb v4", + version: "4.0.0", + expectedCommand: "shardConnPoolStats", + }, + { + name: "mongodb v5", + version: "5.0.0", + expectedCommand: "connPoolStats", + }, + { + name: "invalid version", + version: "v4", + err: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + command, err := poolStatsCommand(test.version) + require.Equal(t, test.expectedCommand, command) + if test.err { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) } } diff --git a/plugins/inputs/mongodb/mongodb_test.go b/plugins/inputs/mongodb/mongodb_test.go index 73e68ed376784..24aa2fe3e0d04 100644 --- a/plugins/inputs/mongodb/mongodb_test.go +++ b/plugins/inputs/mongodb/mongodb_test.go @@ -1,62 +1,44 @@ +//go:build integration // +build integration package mongodb import ( + "context" "log" "math/rand" - "net/url" "os" "testing" "time" - "gopkg.in/mgo.v2" + "github.com/influxdata/telegraf/testutil" ) -var connect_url string var server *Server -func init() { - connect_url = os.Getenv("MONGODB_URL") - if connect_url == "" { - connect_url = "127.0.0.1:27017" - server = &Server{Url: &url.URL{Host: connect_url}} - } else { - full_url, err := url.Parse(connect_url) - if err != nil { - log.Fatalf("Unable to parse URL (%s), %s\n", full_url, err.Error()) - } - server = &Server{Url: full_url} +func testSetup(_ *testing.M) { + connectionString := os.Getenv("MONGODB_URL") + if connectionString == "" { + connectionString = "mongodb://127.0.0.1:27017" } -} -func testSetup(m *testing.M) { - var err error - var dialAddrs []string - if server.Url.User != nil { - dialAddrs = []string{server.Url.String()} - } else { - dialAddrs = []string{server.Url.Host} - } - dialInfo, err := mgo.ParseURL(dialAddrs[0]) - if err != nil { - log.Fatalf("Unable to parse URL (%s), %s\n", dialAddrs[0], err.Error()) - } - dialInfo.Direct = true - dialInfo.Timeout = 5 * time.Second - sess, err := mgo.DialWithInfo(dialInfo) - if err != nil { - log.Fatalf("Unable to connect to MongoDB, %s\n", err.Error()) + m := &MongoDB{ + Log: testutil.Logger{}, + Servers: []string{connectionString}, } - server.Session = sess - server.Session, _ = mgo.Dial(server.Url.Host) + err := m.Init() if err != nil { - log.Fatalln(err.Error()) + log.Fatalf("Failed to connect to MongoDB: %v", err) } + + server = m.clients[0] } -func testTeardown(m *testing.M) { - server.Session.Close() +func testTeardown(_ *testing.M) { + err := server.client.Disconnect(context.Background()) + if err != nil { + log.Fatalf("failed to disconnect: %v", err) + } } func TestMain(m *testing.M) { diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index ee96d5f8b3ad1..c6906dfb2a291 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -37,9 +37,12 @@ type MongoStatus struct { ColStats *ColStats ShardStats *ShardStats OplogStats *OplogStats + TopStats *TopStats } type ServerStatus struct { + SampleTime time.Time `bson:""` + Flattened map[string]interface{} `bson:""` Host string `bson:"host"` Version string `bson:"version"` Process string `bson:"process"` @@ -63,7 +66,7 @@ type ServerStatus struct { Mem *MemStats `bson:"mem"` Repl *ReplStatus `bson:"repl"` ShardCursorType map[string]interface{} `bson:"shardCursorType"` - StorageEngine map[string]string `bson:"storageEngine"` + StorageEngine *StorageEngine `bson:"storageEngine"` WiredTiger *WiredTiger `bson:"wiredTiger"` Metrics *MetricsStats `bson:"metrics"` TCMallocStats *TCMallocStats `bson:"tcmalloc"` @@ -93,6 +96,8 @@ type DbStatsData struct { IndexSize int64 `bson:"indexSize"` Ok int64 `bson:"ok"` GleStats interface{} `bson:"gleStats"` + FsUsedSize int64 `bson:"fsUsedSize"` + FsTotalSize int64 `bson:"fsTotalSize"` } type ColStats struct { @@ -144,6 +149,8 @@ type WiredTiger struct { Transaction TransactionStats `bson:"transaction"` Concurrent ConcurrentTransactions `bson:"concurrentTransactions"` Cache CacheStats `bson:"cache"` + Connection WTConnectionStats `bson:"connection"` + DataHandle DataHandleStats `bson:"data-handle"` } // ShardStats stores information from shardConnPoolStats. @@ -169,6 +176,27 @@ type ShardHostStatsData struct { Refreshing int64 `bson:"refreshing"` } +type TopStats struct { + Totals map[string]TopStatCollection `bson:"totals"` +} + +type TopStatCollection struct { + Total TopStatCollectionData `bson:"total"` + ReadLock TopStatCollectionData `bson:"readLock"` + WriteLock TopStatCollectionData `bson:"writeLock"` + Queries TopStatCollectionData `bson:"queries"` + GetMore TopStatCollectionData `bson:"getmore"` + Insert TopStatCollectionData `bson:"insert"` + Update TopStatCollectionData `bson:"update"` + Remove TopStatCollectionData `bson:"remove"` + Commands TopStatCollectionData `bson:"commands"` +} + +type TopStatCollectionData struct { + Time int64 `bson:"time"` + Count int64 `bson:"count"` +} + type ConcurrentTransactions struct { Write ConcurrentTransStats `bson:"write"` Read ConcurrentTransStats `bson:"read"` @@ -212,22 +240,37 @@ type CacheStats struct { UnmodifiedPagesEvicted int64 `bson:"unmodified pages evicted"` } +type StorageEngine struct { + Name string `bson:"name"` +} + // TransactionStats stores transaction checkpoints in WiredTiger. type TransactionStats struct { TransCheckpointsTotalTimeMsecs int64 `bson:"transaction checkpoint total time (msecs)"` TransCheckpoints int64 `bson:"transaction checkpoints"` } +// WTConnectionStats stores statistices on wiredTiger connections +type WTConnectionStats struct { + FilesCurrentlyOpen int64 `bson:"files currently open"` +} + +// DataHandleStats stores statistics for wiredTiger data-handles +type DataHandleStats struct { + DataHandlesCurrentlyActive int64 `bson:"connection data handles currently active"` +} + // ReplStatus stores data related to replica sets. type ReplStatus struct { - SetName interface{} `bson:"setName"` - IsMaster interface{} `bson:"ismaster"` - Secondary interface{} `bson:"secondary"` - IsReplicaSet interface{} `bson:"isreplicaset"` - ArbiterOnly interface{} `bson:"arbiterOnly"` - Hosts []string `bson:"hosts"` - Passives []string `bson:"passives"` - Me string `bson:"me"` + SetName string `bson:"setName"` + IsWritablePrimary interface{} `bson:"isWritablePrimary"` // mongodb 5.x + IsMaster interface{} `bson:"ismaster"` + Secondary interface{} `bson:"secondary"` + IsReplicaSet interface{} `bson:"isreplicaset"` + ArbiterOnly interface{} `bson:"arbiterOnly"` + Hosts []string `bson:"hosts"` + Passives []string `bson:"passives"` + Me string `bson:"me"` } // DBRecordStats stores data related to memory operations across databases. @@ -712,6 +755,12 @@ type StatLine struct { ModifiedPagesEvicted int64 UnmodifiedPagesEvicted int64 + // Connection statistics (wiredtiger only) + FilesCurrentlyOpen int64 + + // Data handles statistics (wiredtiger only) + DataHandlesCurrentlyActive int64 + // Replicated Opcounter fields InsertR, InsertRCnt int64 QueryR, QueryRCnt int64 @@ -768,6 +817,8 @@ type StatLine struct { // Shard Hosts stats field ShardHostStatsLines map[string]ShardHostStatLine + TopStatLines []TopStatLine + // TCMalloc stats field TCMallocCurrentAllocatedBytes int64 TCMallocHeapSize int64 @@ -806,6 +857,8 @@ type DbStatLine struct { Indexes int64 IndexSize int64 Ok int64 + FsUsedSize int64 + FsTotalSize int64 } type ColStatLine struct { Name string @@ -825,6 +878,19 @@ type ShardHostStatLine struct { Refreshing int64 } +type TopStatLine struct { + CollectionName string + TotalTime, TotalCount int64 + ReadLockTime, ReadLockCount int64 + WriteLockTime, WriteLockCount int64 + QueriesTime, QueriesCount int64 + GetMoreTime, GetMoreCount int64 + InsertTime, InsertCount int64 + UpdateTime, UpdateCount int64 + RemoveTime, RemoveCount int64 + CommandsTime, CommandsCount int64 +} + func parseLocks(stat ServerStatus) map[string]LockUsage { returnVal := map[string]LockUsage{} for namespace, lockInfo := range stat.Locks { @@ -859,7 +925,7 @@ func computeLockDiffs(prevLocks, curLocks map[string]LockUsage) []LockUsage { return lockUsages } -func diff(newVal, oldVal, sampleTime int64) (int64, int64) { +func diff(newVal, oldVal, sampleTime int64) (avg int64, newValue int64) { d := newVal - oldVal if d < 0 { d = newVal @@ -891,8 +957,8 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.TotalCreatedC = newStat.Connections.TotalCreated // set the storage engine appropriately - if newStat.StorageEngine != nil && newStat.StorageEngine["name"] != "" { - returnVal.StorageEngine = newStat.StorageEngine["name"] + if newStat.StorageEngine != nil && newStat.StorageEngine.Name != "" { + returnVal.StorageEngine = newStat.StorageEngine.Name } else { returnVal.StorageEngine = "mmapv1" } @@ -1043,8 +1109,10 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } if newStat.Metrics.Repl.Network != nil { returnVal.ReplNetworkBytes = newStat.Metrics.Repl.Network.Bytes - returnVal.ReplNetworkGetmoresNum = newStat.Metrics.Repl.Network.GetMores.Num - returnVal.ReplNetworkGetmoresTotalMillis = newStat.Metrics.Repl.Network.GetMores.TotalMillis + if newStat.Metrics.Repl.Network.GetMores != nil { + returnVal.ReplNetworkGetmoresNum = newStat.Metrics.Repl.Network.GetMores.Num + returnVal.ReplNetworkGetmoresTotalMillis = newStat.Metrics.Repl.Network.GetMores.TotalMillis + } returnVal.ReplNetworkOps = newStat.Metrics.Repl.Network.Ops } } @@ -1092,6 +1160,10 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.UnmodifiedPagesEvicted = newStat.WiredTiger.Cache.UnmodifiedPagesEvicted returnVal.FlushesTotalTime = newStat.WiredTiger.Transaction.TransCheckpointsTotalTimeMsecs * int64(time.Millisecond) + + returnVal.FilesCurrentlyOpen = newStat.WiredTiger.Connection.FilesCurrentlyOpen + + returnVal.DataHandlesCurrentlyActive = newStat.WiredTiger.DataHandle.DataHandlesCurrentlyActive } if newStat.WiredTiger != nil && oldStat.WiredTiger != nil { returnVal.Flushes, returnVal.FlushesCnt = diff(newStat.WiredTiger.Transaction.TransCheckpoints, oldStat.WiredTiger.Transaction.TransCheckpoints, sampleSecs) @@ -1101,7 +1173,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.Time = newMongo.SampleTime returnVal.IsMongos = - (newStat.ShardCursorType != nil || strings.HasPrefix(newStat.Process, MongosProcess)) + newStat.ShardCursorType != nil || strings.HasPrefix(newStat.Process, MongosProcess) // BEGIN code modification if oldStat.Mem.Supported.(bool) { @@ -1118,21 +1190,19 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } if newStat.Repl != nil { - setName, isReplSet := newStat.Repl.SetName.(string) - if isReplSet { - returnVal.ReplSetName = setName - } + returnVal.ReplSetName = newStat.Repl.SetName // BEGIN code modification - if newStat.Repl.IsMaster.(bool) { + if val, ok := newStat.Repl.IsMaster.(bool); ok && val { returnVal.NodeType = "PRI" - } else if newStat.Repl.Secondary != nil && newStat.Repl.Secondary.(bool) { + } else if val, ok := newStat.Repl.IsWritablePrimary.(bool); ok && val { + returnVal.NodeType = "PRI" + } else if val, ok := newStat.Repl.Secondary.(bool); ok && val { returnVal.NodeType = "SEC" - } else if newStat.Repl.ArbiterOnly != nil && newStat.Repl.ArbiterOnly.(bool) { + } else if val, ok := newStat.Repl.ArbiterOnly.(bool); ok && val { returnVal.NodeType = "ARB" } else { returnVal.NodeType = "UNK" - } - // END code modification + } // END code modification } else if returnVal.IsMongos { returnVal.NodeType = "RTR" } @@ -1180,9 +1250,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec // Get the entry with the highest lock highestLocked := lockdiffs[len(lockdiffs)-1] - var timeDiffMillis int64 - timeDiffMillis = newStat.UptimeMillis - oldStat.UptimeMillis - + timeDiffMillis := newStat.UptimeMillis - oldStat.UptimeMillis lockToReport := highestLocked.Writes // if the highest locked namespace is not '.' @@ -1210,7 +1278,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } if newStat.GlobalLock != nil { - hasWT := (newStat.WiredTiger != nil && oldStat.WiredTiger != nil) + hasWT := newStat.WiredTiger != nil && oldStat.WiredTiger != nil //If we have wiredtiger stats, use those instead if newStat.GlobalLock.CurrentQueue != nil { if hasWT { @@ -1269,10 +1337,10 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec // I'm the master returnVal.ReplLag = 0 break - } else { - // I'm secondary - me = member } + + // I'm secondary + me = member } else if member.State == 1 { // Master found master = member @@ -1319,6 +1387,8 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec Indexes: dbStatsData.Indexes, IndexSize: dbStatsData.IndexSize, Ok: dbStatsData.Ok, + FsTotalSize: dbStatsData.FsTotalSize, + FsUsedSize: dbStatsData.FsUsedSize, } returnVal.DbStatsLines = append(returnVal.DbStatsLines, *dbStatLine) } @@ -1365,5 +1435,32 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } } + if newMongo.TopStats != nil { + for collection, data := range newMongo.TopStats.Totals { + topStatDataLine := &TopStatLine{ + CollectionName: collection, + TotalTime: data.Total.Time, + TotalCount: data.Total.Count, + ReadLockTime: data.ReadLock.Time, + ReadLockCount: data.ReadLock.Count, + WriteLockTime: data.WriteLock.Time, + WriteLockCount: data.WriteLock.Count, + QueriesTime: data.Queries.Time, + QueriesCount: data.Queries.Count, + GetMoreTime: data.GetMore.Time, + GetMoreCount: data.GetMore.Count, + InsertTime: data.Insert.Time, + InsertCount: data.Insert.Count, + UpdateTime: data.Update.Time, + UpdateCount: data.Update.Count, + RemoveTime: data.Remove.Time, + RemoveCount: data.Remove.Count, + CommandsTime: data.Commands.Time, + CommandsCount: data.Commands.Count, + } + returnVal.TopStatLines = append(returnVal.TopStatLines, *topStatDataLine) + } + } + return returnVal } diff --git a/plugins/inputs/mongodb/mongostat_test.go b/plugins/inputs/mongodb/mongostat_test.go index 5506602a9e692..908b82de1b911 100644 --- a/plugins/inputs/mongodb/mongostat_test.go +++ b/plugins/inputs/mongodb/mongostat_test.go @@ -2,14 +2,11 @@ package mongodb import ( "testing" - //"time" - //"github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestLatencyStats(t *testing.T) { - sl := NewStatLine( MongoStatus{ ServerStatus: &ServerStatus{ @@ -56,16 +53,15 @@ func TestLatencyStats(t *testing.T) { 60, ) - assert.Equal(t, sl.CommandLatency, int64(0)) - assert.Equal(t, sl.ReadLatency, int64(0)) - assert.Equal(t, sl.WriteLatency, int64(0)) - assert.Equal(t, sl.CommandOpsCnt, int64(0)) - assert.Equal(t, sl.ReadOpsCnt, int64(0)) - assert.Equal(t, sl.WriteOpsCnt, int64(0)) + require.Equal(t, sl.CommandLatency, int64(0)) + require.Equal(t, sl.ReadLatency, int64(0)) + require.Equal(t, sl.WriteLatency, int64(0)) + require.Equal(t, sl.CommandOpsCnt, int64(0)) + require.Equal(t, sl.ReadOpsCnt, int64(0)) + require.Equal(t, sl.WriteOpsCnt, int64(0)) } func TestLatencyStatsDiffZero(t *testing.T) { - sl := NewStatLine( MongoStatus{ ServerStatus: &ServerStatus{ @@ -126,16 +122,15 @@ func TestLatencyStatsDiffZero(t *testing.T) { 60, ) - assert.Equal(t, sl.CommandLatency, int64(0)) - assert.Equal(t, sl.ReadLatency, int64(0)) - assert.Equal(t, sl.WriteLatency, int64(0)) - assert.Equal(t, sl.CommandOpsCnt, int64(0)) - assert.Equal(t, sl.ReadOpsCnt, int64(0)) - assert.Equal(t, sl.WriteOpsCnt, int64(0)) + require.Equal(t, sl.CommandLatency, int64(0)) + require.Equal(t, sl.ReadLatency, int64(0)) + require.Equal(t, sl.WriteLatency, int64(0)) + require.Equal(t, sl.CommandOpsCnt, int64(0)) + require.Equal(t, sl.ReadOpsCnt, int64(0)) + require.Equal(t, sl.WriteOpsCnt, int64(0)) } func TestLatencyStatsDiff(t *testing.T) { - sl := NewStatLine( MongoStatus{ ServerStatus: &ServerStatus{ @@ -196,10 +191,10 @@ func TestLatencyStatsDiff(t *testing.T) { 60, ) - assert.Equal(t, sl.CommandLatency, int64(59177981552)) - assert.Equal(t, sl.ReadLatency, int64(2255946760057)) - assert.Equal(t, sl.WriteLatency, int64(494479456987)) - assert.Equal(t, sl.CommandOpsCnt, int64(1019152861)) - assert.Equal(t, sl.ReadOpsCnt, int64(4189049884)) - assert.Equal(t, sl.WriteOpsCnt, int64(1691021287)) + require.Equal(t, sl.CommandLatency, int64(59177981552)) + require.Equal(t, sl.ReadLatency, int64(2255946760057)) + require.Equal(t, sl.WriteLatency, int64(494479456987)) + require.Equal(t, sl.CommandOpsCnt, int64(1019152861)) + require.Equal(t, sl.ReadOpsCnt, int64(4189049884)) + require.Equal(t, sl.WriteOpsCnt, int64(1691021287)) } diff --git a/plugins/inputs/mongodb/sample.conf b/plugins/inputs/mongodb/sample.conf new file mode 100644 index 0000000000000..5e9fbba176da8 --- /dev/null +++ b/plugins/inputs/mongodb/sample.conf @@ -0,0 +1,38 @@ +# Read metrics from one or many MongoDB servers +[[inputs.mongodb]] + ## An array of URLs of the form: + ## "mongodb://" [user ":" pass "@"] host [ ":" port] + ## For example: + ## mongodb://user:auth_key@10.10.3.30:27017, + ## mongodb://10.10.3.33:18832, + ## + ## If connecting to a cluster, users must include the "?connect=direct" in + ## the URL to ensure that the connection goes directly to the specified node + ## and not have all connections passed to the master node. + servers = ["mongodb://127.0.0.1:27017/?connect=direct"] + + ## When true, collect cluster status. + ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which + ## may have an impact on performance. + # gather_cluster_status = true + + ## When true, collect per database stats + # gather_perdb_stats = false + + ## When true, collect per collection stats + # gather_col_stats = false + + ## When true, collect usage statistics for each collection + ## (insert, update, queries, remove, getmore, commands etc...). + # gather_top_stat = false + + ## List of db where collections stats are collected + ## If empty, all db are concerned + # col_stats_dbs = ["local"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/monit/README.md b/plugins/inputs/monit/README.md index be116394d6609..ddb7ba4e1488e 100644 --- a/plugins/inputs/monit/README.md +++ b/plugins/inputs/monit/README.md @@ -12,9 +12,10 @@ Minimum Version of Monit tested with is 5.16. [monit]: https://mmonit.com/ [httpd]: https://mmonit.com/monit/documentation/monit.html#TCP-PORT -### Configuration +## Configuration -```toml +```toml @sample.conf +# Read metrics and status information about processes managed by Monit [[inputs.monit]] ## Monit HTTPD address address = "http://127.0.0.1:2812" @@ -34,7 +35,7 @@ Minimum Version of Monit tested with is 5.16. # insecure_skip_verify = false ``` -### Metrics +## Metrics - monit_filesystem - tags: @@ -57,7 +58,7 @@ Minimum Version of Monit tested with is 5.16. - inode_usage - inode_total -+ monit_directory +- monit_directory - tags: - address - version @@ -88,7 +89,7 @@ Minimum Version of Monit tested with is 5.16. - size - permissions -+ monit_process +- monit_process - tags: - address - version @@ -128,10 +129,11 @@ Minimum Version of Monit tested with is 5.16. - hostname - port_number - request + - response_time - protocol - type -+ monit_system +- monit_system - tags: - address - version @@ -168,9 +170,9 @@ Minimum Version of Monit tested with is 5.16. - status_code - monitoring_status_code - monitoring_mode_code - - permissions + - permissions -+ monit_program +- monit_program - tags: - address - version @@ -198,7 +200,7 @@ Minimum Version of Monit tested with is 5.16. - monitoring_status_code - monitoring_mode_code -+ monit_program +- monit_program - tags: - address - version @@ -226,10 +228,12 @@ Minimum Version of Monit tested with is 5.16. - monitoring_status_code - monitoring_mode_code -### Example Output -``` +## Example Output + +```shell monit_file,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,service=rsyslog_pid,source=xyzzy.local,status=running,version=5.20.0 mode=644i,monitoring_mode_code=0i,monitoring_status_code=1i,pending_action_code=0i,size=3i,status_code=0i 1579735047000000000 monit_process,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,service=rsyslog,source=xyzzy.local,status=running,version=5.20.0 children=0i,cpu_percent=0,cpu_percent_total=0,mem_kb=3148i,mem_kb_total=3148i,mem_percent=0.2,mem_percent_total=0.2,monitoring_mode_code=0i,monitoring_status_code=1i,parent_pid=1i,pending_action_code=0i,pid=318i,status_code=0i,threads=4i 1579735047000000000 monit_program,monitoring_mode=active,monitoring_status=initializing,pending_action=none,platform_name=Linux,service=echo,source=xyzzy.local,status=running,version=5.20.0 monitoring_mode_code=0i,monitoring_status_code=2i,pending_action_code=0i,program_started=0i,program_status=0i,status_code=0i 1579735047000000000 monit_system,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,service=debian-stretch-monit.virt,source=xyzzy.local,status=running,version=5.20.0 cpu_load_avg_15m=0,cpu_load_avg_1m=0,cpu_load_avg_5m=0,cpu_system=0,cpu_user=0,cpu_wait=0,mem_kb=42852i,mem_percent=2.1,monitoring_mode_code=0i,monitoring_status_code=1i,pending_action_code=0i,status_code=0i,swap_kb=0,swap_percent=0 1579735047000000000 +monit_remote_host,dc=new-12,host=palladium,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,rack=rack-0,service=blog.kalvad.com,source=palladium,status=running,version=5.27.0 monitoring_status_code=1i,monitoring_mode_code=0i,response_time=0.664412,type="TCP",pending_action_code=0i,remote_hostname="blog.kalvad.com",port_number=443i,request="/",protocol="HTTP",status_code=0i 1599138990000000000 ``` diff --git a/plugins/inputs/monit/monit.go b/plugins/inputs/monit/monit.go index a17042bf5e3a9..3c78bf84e57b0 100644 --- a/plugins/inputs/monit/monit.go +++ b/plugins/inputs/monit/monit.go @@ -1,27 +1,35 @@ +//go:generate ../../../tools/readme_config_includer/generator package monit import ( + _ "embed" "encoding/xml" "fmt" "net/http" + "time" + + "golang.org/x/net/html/charset" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "golang.org/x/net/html/charset" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const ( - fileSystem string = "0" - directory = "1" - file = "2" - process = "3" - remoteHost = "4" - system = "5" - fifo = "6" - program = "7" - network = "8" + fileSystem = "0" + directory = "1" + file = "2" + process = "3" + remoteHost = "4" + system = "5" + fifo = "6" + program = "7" + network = "8" ) var pendingActions = []string{"ignore", "alert", "restart", "stop", "exec", "unmonitor", "start", "monitor"} @@ -114,11 +122,12 @@ type Upload struct { } type Port struct { - Hostname string `xml:"hostname"` - PortNumber int64 `xml:"portnumber"` - Request string `xml:"request"` - Protocol string `xml:"protocol"` - Type string `xml:"type"` + Hostname string `xml:"hostname"` + PortNumber int64 `xml:"portnumber"` + Request string `xml:"request"` + ResponseTime float64 `xml:"responsetime"` + Protocol string `xml:"protocol"` + Type string `xml:"type"` } type Block struct { @@ -177,37 +186,14 @@ type Monit struct { Password string `toml:"password"` client http.Client tls.ClientConfig - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` } type Messagebody struct { Metrics []string `json:"metrics"` } -func (m *Monit) Description() string { - return "Read metrics and status information about processes managed by Monit" -} - -var sampleConfig = ` - ## Monit HTTPD address - address = "http://127.0.0.1:2812" - - ## Username and Password for Monit - # username = "" - # password = "" - - ## Amount of time allowed to complete the HTTP request - # timeout = "5s" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - -func (m *Monit) SampleConfig() string { +func (*Monit) SampleConfig() string { return sampleConfig } @@ -222,13 +208,12 @@ func (m *Monit) Init() error { TLSClientConfig: tlsCfg, Proxy: http.ProxyFromEnvironment, }, - Timeout: m.Timeout.Duration, + Timeout: time.Duration(m.Timeout), } return nil } func (m *Monit) Gather(acc telegraf.Accumulator) error { - req, err := http.NewRequest("GET", fmt.Sprintf("%s/_status?format=xml", m.Address), nil) if err != nil { return err @@ -243,111 +228,109 @@ func (m *Monit) Gather(acc telegraf.Accumulator) error { } defer resp.Body.Close() - if resp.StatusCode == 200 { + if resp.StatusCode != 200 { + return fmt.Errorf("received status code %d (%s), expected 200", resp.StatusCode, http.StatusText(resp.StatusCode)) + } - var status Status - decoder := xml.NewDecoder(resp.Body) - decoder.CharsetReader = charset.NewReaderLabel - if err := decoder.Decode(&status); err != nil { - return fmt.Errorf("error parsing input: %v", err) - } + var status Status + decoder := xml.NewDecoder(resp.Body) + decoder.CharsetReader = charset.NewReaderLabel + if err := decoder.Decode(&status); err != nil { + return fmt.Errorf("error parsing input: %v", err) + } - tags := map[string]string{ - "version": status.Server.Version, - "source": status.Server.LocalHostname, - "platform_name": status.Platform.Name, - } + tags := map[string]string{ + "version": status.Server.Version, + "source": status.Server.LocalHostname, + "platform_name": status.Platform.Name, + } - for _, service := range status.Services { - fields := make(map[string]interface{}) - tags["status"] = serviceStatus(service) - fields["status_code"] = service.Status - tags["pending_action"] = pendingAction(service) - fields["pending_action_code"] = service.PendingAction - tags["monitoring_status"] = monitoringStatus(service) - fields["monitoring_status_code"] = service.MonitoringStatus - tags["monitoring_mode"] = monitoringMode(service) - fields["monitoring_mode_code"] = service.MonitorMode - tags["service"] = service.Name - if service.Type == fileSystem { - fields["mode"] = service.Mode - fields["block_percent"] = service.Block.Percent - fields["block_usage"] = service.Block.Usage - fields["block_total"] = service.Block.Total - fields["inode_percent"] = service.Inode.Percent - fields["inode_usage"] = service.Inode.Usage - fields["inode_total"] = service.Inode.Total - acc.AddFields("monit_filesystem", fields, tags) - } else if service.Type == directory { - fields["mode"] = service.Mode - acc.AddFields("monit_directory", fields, tags) - } else if service.Type == file { - fields["size"] = service.Size - fields["mode"] = service.Mode - acc.AddFields("monit_file", fields, tags) - } else if service.Type == process { - fields["cpu_percent"] = service.CPU.Percent - fields["cpu_percent_total"] = service.CPU.PercentTotal - fields["mem_kb"] = service.Memory.Kilobyte - fields["mem_kb_total"] = service.Memory.KilobyteTotal - fields["mem_percent"] = service.Memory.Percent - fields["mem_percent_total"] = service.Memory.PercentTotal - fields["pid"] = service.Pid - fields["parent_pid"] = service.ParentPid - fields["threads"] = service.Threads - fields["children"] = service.Children - acc.AddFields("monit_process", fields, tags) - } else if service.Type == remoteHost { - fields["remote_hostname"] = service.Port.Hostname - fields["port_number"] = service.Port.PortNumber - fields["request"] = service.Port.Request - fields["protocol"] = service.Port.Protocol - fields["type"] = service.Port.Type - acc.AddFields("monit_remote_host", fields, tags) - } else if service.Type == system { - fields["cpu_system"] = service.System.CPU.System - fields["cpu_user"] = service.System.CPU.User - fields["cpu_wait"] = service.System.CPU.Wait - fields["cpu_load_avg_1m"] = service.System.Load.Avg01 - fields["cpu_load_avg_5m"] = service.System.Load.Avg05 - fields["cpu_load_avg_15m"] = service.System.Load.Avg15 - fields["mem_kb"] = service.System.Memory.Kilobyte - fields["mem_percent"] = service.System.Memory.Percent - fields["swap_kb"] = service.System.Swap.Kilobyte - fields["swap_percent"] = service.System.Swap.Percent - acc.AddFields("monit_system", fields, tags) - } else if service.Type == fifo { - fields["mode"] = service.Mode - acc.AddFields("monit_fifo", fields, tags) - } else if service.Type == program { - fields["program_started"] = service.Program.Started * 10000000 - fields["program_status"] = service.Program.Status - acc.AddFields("monit_program", fields, tags) - } else if service.Type == network { - fields["link_state"] = service.Link.State - fields["link_speed"] = service.Link.Speed - fields["link_mode"] = linkMode(service) - fields["download_packets_now"] = service.Link.Download.Packets.Now - fields["download_packets_total"] = service.Link.Download.Packets.Total - fields["download_bytes_now"] = service.Link.Download.Bytes.Now - fields["download_bytes_total"] = service.Link.Download.Bytes.Total - fields["download_errors_now"] = service.Link.Download.Errors.Now - fields["download_errors_total"] = service.Link.Download.Errors.Total - fields["upload_packets_now"] = service.Link.Upload.Packets.Now - fields["upload_packets_total"] = service.Link.Upload.Packets.Total - fields["upload_bytes_now"] = service.Link.Upload.Bytes.Now - fields["upload_bytes_total"] = service.Link.Upload.Bytes.Total - fields["upload_errors_now"] = service.Link.Upload.Errors.Now - fields["upload_errors_total"] = service.Link.Upload.Errors.Total - acc.AddFields("monit_network", fields, tags) - } + for _, service := range status.Services { + fields := make(map[string]interface{}) + tags["status"] = serviceStatus(service) + fields["status_code"] = service.Status + tags["pending_action"] = pendingAction(service) + fields["pending_action_code"] = service.PendingAction + tags["monitoring_status"] = monitoringStatus(service) + fields["monitoring_status_code"] = service.MonitoringStatus + tags["monitoring_mode"] = monitoringMode(service) + fields["monitoring_mode_code"] = service.MonitorMode + tags["service"] = service.Name + if service.Type == fileSystem { + fields["mode"] = service.Mode + fields["block_percent"] = service.Block.Percent + fields["block_usage"] = service.Block.Usage + fields["block_total"] = service.Block.Total + fields["inode_percent"] = service.Inode.Percent + fields["inode_usage"] = service.Inode.Usage + fields["inode_total"] = service.Inode.Total + acc.AddFields("monit_filesystem", fields, tags) + } else if service.Type == directory { + fields["mode"] = service.Mode + acc.AddFields("monit_directory", fields, tags) + } else if service.Type == file { + fields["size"] = service.Size + fields["mode"] = service.Mode + acc.AddFields("monit_file", fields, tags) + } else if service.Type == process { + fields["cpu_percent"] = service.CPU.Percent + fields["cpu_percent_total"] = service.CPU.PercentTotal + fields["mem_kb"] = service.Memory.Kilobyte + fields["mem_kb_total"] = service.Memory.KilobyteTotal + fields["mem_percent"] = service.Memory.Percent + fields["mem_percent_total"] = service.Memory.PercentTotal + fields["pid"] = service.Pid + fields["parent_pid"] = service.ParentPid + fields["threads"] = service.Threads + fields["children"] = service.Children + acc.AddFields("monit_process", fields, tags) + } else if service.Type == remoteHost { + fields["remote_hostname"] = service.Port.Hostname + fields["port_number"] = service.Port.PortNumber + fields["request"] = service.Port.Request + fields["response_time"] = service.Port.ResponseTime + fields["protocol"] = service.Port.Protocol + fields["type"] = service.Port.Type + acc.AddFields("monit_remote_host", fields, tags) + } else if service.Type == system { + fields["cpu_system"] = service.System.CPU.System + fields["cpu_user"] = service.System.CPU.User + fields["cpu_wait"] = service.System.CPU.Wait + fields["cpu_load_avg_1m"] = service.System.Load.Avg01 + fields["cpu_load_avg_5m"] = service.System.Load.Avg05 + fields["cpu_load_avg_15m"] = service.System.Load.Avg15 + fields["mem_kb"] = service.System.Memory.Kilobyte + fields["mem_percent"] = service.System.Memory.Percent + fields["swap_kb"] = service.System.Swap.Kilobyte + fields["swap_percent"] = service.System.Swap.Percent + acc.AddFields("monit_system", fields, tags) + } else if service.Type == fifo { + fields["mode"] = service.Mode + acc.AddFields("monit_fifo", fields, tags) + } else if service.Type == program { + fields["program_started"] = service.Program.Started * 10000000 + fields["program_status"] = service.Program.Status + acc.AddFields("monit_program", fields, tags) + } else if service.Type == network { + fields["link_state"] = service.Link.State + fields["link_speed"] = service.Link.Speed + fields["link_mode"] = linkMode(service) + fields["download_packets_now"] = service.Link.Download.Packets.Now + fields["download_packets_total"] = service.Link.Download.Packets.Total + fields["download_bytes_now"] = service.Link.Download.Bytes.Now + fields["download_bytes_total"] = service.Link.Download.Bytes.Total + fields["download_errors_now"] = service.Link.Download.Errors.Now + fields["download_errors_total"] = service.Link.Download.Errors.Total + fields["upload_packets_now"] = service.Link.Upload.Packets.Now + fields["upload_packets_total"] = service.Link.Upload.Packets.Total + fields["upload_bytes_now"] = service.Link.Upload.Bytes.Now + fields["upload_bytes_total"] = service.Link.Upload.Bytes.Total + fields["upload_errors_now"] = service.Link.Upload.Errors.Now + fields["upload_errors_total"] = service.Link.Upload.Errors.Total + acc.AddFields("monit_network", fields, tags) } - } else { - return fmt.Errorf("received status code %d (%s), expected 200", - resp.StatusCode, - http.StatusText(resp.StatusCode)) - } + return nil } @@ -364,9 +347,8 @@ func linkMode(s Service) string { func serviceStatus(s Service) string { if s.Status == 0 { return "running" - } else { - return "failure" } + return "failure" } func pendingAction(s Service) string { @@ -375,9 +357,8 @@ func pendingAction(s Service) string { return "unknown" } return pendingActions[s.PendingAction-1] - } else { - return "none" } + return "none" } func monitoringMode(s Service) string { diff --git a/plugins/inputs/monit/monit_test.go b/plugins/inputs/monit/monit_test.go index 1d95b45a51bc5..ef47575e80b4c 100644 --- a/plugins/inputs/monit/monit_test.go +++ b/plugins/inputs/monit/monit_test.go @@ -4,19 +4,20 @@ import ( "errors" "net/http" "net/http/httptest" + "net/url" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) type transportMock struct { } -func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { +func (t *transportMock) RoundTrip(_ *http.Request) (*http.Response, error) { errorString := "Get http://127.0.0.1:2812/_status?format=xml: " + "read tcp 192.168.10.2:55610->127.0.0.1:2812: " + "read: connection reset by peer" @@ -179,6 +180,7 @@ func TestServiceType(t *testing.T) { "request": "", "protocol": "DEFAULT", "type": "TCP", + "response_time": 0.000145, }, time.Unix(0, 0), ), @@ -333,14 +335,12 @@ func TestServiceType(t *testing.T) { Address: ts.URL, } - plugin.Init() + require.NoError(t, plugin.Init()) var acc testutil.Accumulator - err := plugin.Gather(&acc) - require.NoError(t, err) + require.NoError(t, plugin.Gather(&acc)) - testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), - testutil.IgnoreTime()) + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) } } @@ -532,14 +532,12 @@ func TestMonitFailure(t *testing.T) { Address: ts.URL, } - plugin.Init() + require.NoError(t, plugin.Init()) var acc testutil.Accumulator - err := plugin.Gather(&acc) - require.NoError(t, err) + require.NoError(t, plugin.Gather(&acc)) - testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), - testutil.IgnoreTime()) + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) } } @@ -553,7 +551,6 @@ func checkAuth(r *http.Request, username, password string) bool { } func TestAllowHosts(t *testing.T) { - r := &Monit{ Address: "http://127.0.0.1:2812", Username: "test", @@ -565,46 +562,36 @@ func TestAllowHosts(t *testing.T) { r.client.Transport = &transportMock{} err := r.Gather(&acc) - - if assert.Error(t, err) { - assert.Contains(t, err.Error(), "read: connection reset by peer") - } + require.Error(t, err) + require.Contains(t, err.Error(), "read: connection reset by peer") } func TestConnection(t *testing.T) { - r := &Monit{ Address: "http://127.0.0.1:2812", Username: "test", Password: "test", } - var acc testutil.Accumulator + require.NoError(t, r.Init()) - r.Init() + var acc testutil.Accumulator err := r.Gather(&acc) - - if assert.Error(t, err) { - assert.Contains(t, err.Error(), "connect: connection refused") - } + require.Error(t, err) + _, ok := err.(*url.Error) + require.True(t, ok) } func TestInvalidUsernameOrPassword(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "testing", "testing") { http.Error(w, "Unauthorized.", 401) return } - switch r.URL.Path { - case "/_status": - http.ServeFile(w, r, "testdata/response_servicetype_0.xml") - default: - panic("Cannot handle request") - } + require.Equal(t, r.URL.Path, "/_status", "Cannot handle request") + http.ServeFile(w, r, "testdata/response_servicetype_0.xml") })) defer ts.Close() @@ -617,28 +604,21 @@ func TestInvalidUsernameOrPassword(t *testing.T) { var acc testutil.Accumulator - r.Init() + require.NoError(t, r.Init()) err := r.Gather(&acc) - - assert.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") + require.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") } func TestNoUsernameOrPasswordConfiguration(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "testing", "testing") { http.Error(w, "Unauthorized.", 401) return } - switch r.URL.Path { - case "/_status": - http.ServeFile(w, r, "testdata/response_servicetype_0.xml") - default: - panic("Cannot handle request") - } + require.Equal(t, r.URL.Path, "/_status", "Cannot handle request") + http.ServeFile(w, r, "testdata/response_servicetype_0.xml") })) defer ts.Close() @@ -649,15 +629,13 @@ func TestNoUsernameOrPasswordConfiguration(t *testing.T) { var acc testutil.Accumulator - r.Init() + require.NoError(t, r.Init()) err := r.Gather(&acc) - - assert.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") + require.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") } func TestInvalidXMLAndInvalidTypes(t *testing.T) { - tests := []struct { name string filename string @@ -691,14 +669,13 @@ func TestInvalidXMLAndInvalidTypes(t *testing.T) { Address: ts.URL, } - plugin.Init() + require.NoError(t, plugin.Init()) var acc testutil.Accumulator - err := plugin.Gather(&acc) - if assert.Error(t, err) { - assert.Contains(t, err.Error(), "error parsing input:") - } + err := plugin.Gather(&acc) + require.Error(t, err) + require.Contains(t, err.Error(), "error parsing input:") }) } } diff --git a/plugins/inputs/monit/sample.conf b/plugins/inputs/monit/sample.conf new file mode 100644 index 0000000000000..508951dd1ada4 --- /dev/null +++ b/plugins/inputs/monit/sample.conf @@ -0,0 +1,18 @@ +# Read metrics and status information about processes managed by Monit +[[inputs.monit]] + ## Monit HTTPD address + address = "http://127.0.0.1:2812" + + ## Username and Password for Monit + # username = "" + # password = "" + + ## Amount of time allowed to complete the HTTP request + # timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/mqtt_consumer/README.md b/plugins/inputs/mqtt_consumer/README.md index a9e8236ee0cf5..59ed33854cebc 100644 --- a/plugins/inputs/mqtt_consumer/README.md +++ b/plugins/inputs/mqtt_consumer/README.md @@ -3,12 +3,13 @@ The [MQTT][mqtt] consumer plugin reads from the specified MQTT topics and creates metrics using one of the supported [input data formats][]. -### Configuration +## Configuration -```toml +```toml @sample.conf +# Read metrics from MQTT topic(s) [[inputs.mqtt_consumer]] ## Broker URLs for the MQTT server or cluster. To connect to multiple - ## clusters or standalone servers, use a seperate plugin instance. + ## clusters or standalone servers, use a separate plugin instance. ## example: servers = ["tcp://localhost:1883"] ## servers = ["ssl://localhost:1883"] ## servers = ["ws://localhost:1883"] @@ -73,12 +74,71 @@ and creates metrics using one of the supported [input data formats][]. ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" + + ## Enable extracting tag values from MQTT topics + ## _ denotes an ignored entry in the topic path + # [[inputs.mqtt_consumer.topic_parsing]] + # topic = "" + # measurement = "" + # tags = "" + # fields = "" + ## Value supported is int, float, unit + # [[inputs.mqtt_consumer.topic.types]] + # key = type ``` -### Metrics +## About Topic Parsing + +The MQTT topic as a whole is stored as a tag, but this can be far too coarse to +be easily used when utilizing the data further down the line. This change allows +tag values to be extracted from the MQTT topic letting you store the information +provided in the topic in a meaningful way. An `_` denotes an ignored entry in +the topic path. Please see the following example. + +## Example Configuration for topic parsing + +```toml +[[inputs.mqtt_consumer]] + ## Broker URLs for the MQTT server or cluster. To connect to multiple + ## clusters or standalone servers, use a separate plugin instance. + ## example: servers = ["tcp://localhost:1883"] + ## servers = ["ssl://localhost:1883"] + ## servers = ["ws://localhost:1883"] + servers = ["tcp://127.0.0.1:1883"] + + ## Topics that will be subscribed to. + topics = [ + "telegraf/+/cpu/23", + ] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "value" + data_type = "float" + + [[inputs.mqtt_consumer.topic_parsing]] + topic = "telegraf/one/cpu/23" + measurement = "_/_/measurement/_" + tags = "tag/_/_/_" + fields = "_/_/_/test" + [inputs.mqtt_consumer.topic_parsing.types] + test = "int" +``` + +## Example Output + +```shell +cpu,host=pop-os,tag=telegraf,topic=telegraf/one/cpu/23 value=45,test=23i 1637014942460689291 +``` + +## Metrics - All measurements are tagged with the incoming topic, ie `topic=telegraf/host01/cpu` +- example when [[inputs.mqtt_consumer.topic_parsing]] is set + [mqtt]: https://mqtt.org [input data formats]: /docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 26122b8e86b88..eee520db5e5ea 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -1,24 +1,33 @@ +//go:generate ../../../tools/readme_config_includer/generator package mqtt_consumer import ( "context" + _ "embed" "errors" "fmt" + "strconv" "strings" + "sync" "time" - "github.com/eclipse/paho.mqtt.golang" + mqtt "github.com/eclipse/paho.mqtt.golang" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + var ( // 30 Seconds is the default used by paho.mqtt.golang - defaultConnectionTimeout = internal.Duration{Duration: 30 * time.Second} - + defaultConnectionTimeout = config.Duration(30 * time.Second) defaultMaxUndeliveredMessages = 1000 ) @@ -38,30 +47,38 @@ type Client interface { AddRoute(topic string, callback mqtt.MessageHandler) Disconnect(quiesce uint) } - type ClientFactory func(o *mqtt.ClientOptions) Client - +type TopicParsingConfig struct { + Topic string `toml:"topic"` + Measurement string `toml:"measurement"` + Tags string `toml:"tags"` + Fields string `toml:"fields"` + FieldTypes map[string]string `toml:"types"` + // cached split of user given information + MeasurementIndex int + SplitTags []string + SplitFields []string + SplitTopic []string +} type MQTTConsumer struct { - Servers []string `toml:"servers"` - Topics []string `toml:"topics"` - TopicTag *string `toml:"topic_tag"` - Username string `toml:"username"` - Password string `toml:"password"` - QoS int `toml:"qos"` - ConnectionTimeout internal.Duration `toml:"connection_timeout"` - MaxUndeliveredMessages int `toml:"max_undelivered_messages"` - - parser parsers.Parser - - // Legacy metric buffer support; deprecated in v0.10.3 - MetricBuffer int - + Servers []string `toml:"servers"` + Topics []string `toml:"topics"` + TopicTag *string `toml:"topic_tag"` + TopicParsing []TopicParsingConfig `toml:"topic_parsing"` + Username string `toml:"username"` + Password string `toml:"password"` + QoS int `toml:"qos"` + ConnectionTimeout config.Duration `toml:"connection_timeout"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + parser parsers.Parser + + MetricBuffer int `toml:"metric_buffer" deprecated:"0.10.3;2.0.0;option is ignored"` PersistentSession bool ClientID string `toml:"client_id"` - tls.ClientConfig - Log telegraf.Logger + tls.ClientConfig + Log telegraf.Logger clientFactory ClientFactory client Client opts *mqtt.ClientOptions @@ -69,132 +86,74 @@ type MQTTConsumer struct { state ConnectionState sem semaphore messages map[telegraf.TrackingID]bool - topicTag string - - ctx context.Context - cancel context.CancelFunc + messagesMutex sync.Mutex + topicTagParse string + ctx context.Context + cancel context.CancelFunc } -var sampleConfig = ` - ## Broker URLs for the MQTT server or cluster. To connect to multiple - ## clusters or standalone servers, use a seperate plugin instance. - ## example: servers = ["tcp://localhost:1883"] - ## servers = ["ssl://localhost:1883"] - ## servers = ["ws://localhost:1883"] - servers = ["tcp://127.0.0.1:1883"] - - ## Topics that will be subscribed to. - topics = [ - "telegraf/host01/cpu", - "telegraf/+/mem", - "sensors/#", - ] - - ## The message topic will be stored in a tag specified by this value. If set - ## to the empty string no topic tag will be created. - # topic_tag = "topic" - - ## QoS policy for messages - ## 0 = at most once - ## 1 = at least once - ## 2 = exactly once - ## - ## When using a QoS of 1 or 2, you should enable persistent_session to allow - ## resuming unacknowledged messages. - # qos = 0 - - ## Connection timeout for initial connection in seconds - # connection_timeout = "30s" - - ## Maximum messages to read from the broker that have not been written by an - ## output. For best throughput set based on the number of metrics within - ## each message and the size of the output's metric_batch_size. - ## - ## For example, if each message from the queue contains 10 metrics and the - ## output metric_batch_size is 1000, setting this to 100 will ensure that a - ## full batch is collected and the write is triggered immediately without - ## waiting until the next flush_interval. - # max_undelivered_messages = 1000 - - ## Persistent session disables clearing of the client session on connection. - ## In order for this option to work you must also set client_id to identify - ## the client. To receive messages that arrived while the client is offline, - ## also set the qos option to 1 or 2 and don't forget to also set the QoS when - ## publishing. - # persistent_session = false - - ## If unset, a random client ID will be generated. - # client_id = "" - - ## Username and password to connect MQTT server. - # username = "telegraf" - # password = "metricsmetricsmetricsmetrics" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" -` - -func (m *MQTTConsumer) SampleConfig() string { +func (*MQTTConsumer) SampleConfig() string { return sampleConfig } -func (m *MQTTConsumer) Description() string { - return "Read metrics from MQTT topic(s)" -} - func (m *MQTTConsumer) SetParser(parser parsers.Parser) { m.parser = parser } - func (m *MQTTConsumer) Init() error { m.state = Disconnected - if m.PersistentSession && m.ClientID == "" { return errors.New("persistent_session requires client_id") } - if m.QoS > 2 || m.QoS < 0 { return fmt.Errorf("qos value must be 0, 1, or 2: %d", m.QoS) } - - if m.ConnectionTimeout.Duration < 1*time.Second { - return fmt.Errorf("connection_timeout must be greater than 1s: %s", m.ConnectionTimeout.Duration) + if time.Duration(m.ConnectionTimeout) < 1*time.Second { + return fmt.Errorf("connection_timeout must be greater than 1s: %s", time.Duration(m.ConnectionTimeout)) } - - m.topicTag = "topic" + m.topicTagParse = "topic" if m.TopicTag != nil { - m.topicTag = *m.TopicTag + m.topicTagParse = *m.TopicTag } - opts, err := m.createOpts() if err != nil { return err } - m.opts = opts + m.messages = map[telegraf.TrackingID]bool{} + + for i, p := range m.TopicParsing { + splitMeasurement := strings.Split(p.Measurement, "/") + for j := range splitMeasurement { + if splitMeasurement[j] != "_" { + m.TopicParsing[i].MeasurementIndex = j + break + } + } + m.TopicParsing[i].SplitTags = strings.Split(p.Tags, "/") + m.TopicParsing[i].SplitFields = strings.Split(p.Fields, "/") + m.TopicParsing[i].SplitTopic = strings.Split(p.Topic, "/") + + if len(splitMeasurement) != len(m.TopicParsing[i].SplitTopic) && len(splitMeasurement) != 1 { + return fmt.Errorf("config error topic parsing: measurement length does not equal topic length") + } + + if len(m.TopicParsing[i].SplitFields) != len(m.TopicParsing[i].SplitTopic) && p.Fields != "" { + return fmt.Errorf("config error topic parsing: fields length does not equal topic length") + } + + if len(m.TopicParsing[i].SplitTags) != len(m.TopicParsing[i].SplitTopic) && p.Tags != "" { + return fmt.Errorf("config error topic parsing: tags length does not equal topic length") + } + } return nil } - func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { m.state = Disconnected - m.acc = acc.WithTracking(m.MaxUndeliveredMessages) m.sem = make(semaphore, m.MaxUndeliveredMessages) m.ctx, m.cancel = context.WithCancel(context.Background()) - m.client = m.clientFactory(m.opts) - // AddRoute sets up the function for handling messages. These need to be // added in case we find a persistent session containing subscriptions so we // know where to dispatch persisted and new messages to. In the alternate @@ -202,13 +161,9 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { for _, topic := range m.Topics { m.client.AddRoute(topic, m.recvMessage) } - m.state = Connecting - m.connect() - - return nil + return m.connect() } - func (m *MQTTConsumer) connect() error { token := m.client.Connect() if token.Wait() && token.Error() != nil { @@ -216,11 +171,8 @@ func (m *MQTTConsumer) connect() error { m.state = Disconnected return err } - m.Log.Infof("Connected %v", m.Servers) m.state = Connected - m.messages = make(map[telegraf.TrackingID]bool) - // Persistent sessions should skip subscription if a session is present, as // the subscriptions are stored by the server. type sessionPresent interface { @@ -230,34 +182,29 @@ func (m *MQTTConsumer) connect() error { m.Log.Debugf("Session found %v", m.Servers) return nil } - topics := make(map[string]byte) for _, topic := range m.Topics { topics[topic] = byte(m.QoS) } - subscribeToken := m.client.SubscribeMultiple(topics, m.recvMessage) subscribeToken.Wait() if subscribeToken.Error() != nil { m.acc.AddError(fmt.Errorf("subscription error: topics: %s: %v", strings.Join(m.Topics[:], ","), subscribeToken.Error())) } - return nil } - -func (m *MQTTConsumer) onConnectionLost(c mqtt.Client, err error) { +func (m *MQTTConsumer) onConnectionLost(_ mqtt.Client, err error) { m.acc.AddError(fmt.Errorf("connection lost: %v", err)) m.Log.Debugf("Disconnected %v", m.Servers) m.state = Disconnected - return } - -func (m *MQTTConsumer) recvMessage(c mqtt.Client, msg mqtt.Message) { +func (m *MQTTConsumer) recvMessage(_ mqtt.Client, msg mqtt.Message) { for { select { case track := <-m.acc.Delivered(): <-m.sem + m.messagesMutex.Lock() _, ok := m.messages[track.ID()] if !ok { // Added by a previous connection @@ -265,6 +212,7 @@ func (m *MQTTConsumer) recvMessage(c mqtt.Client, msg mqtt.Message) { } // No ack, MQTT does not support durable handling delete(m.messages, track.ID()) + m.messagesMutex.Unlock() case m.sem <- empty{}: err := m.onMessage(m.acc, msg) if err != nil { @@ -276,24 +224,60 @@ func (m *MQTTConsumer) recvMessage(c mqtt.Client, msg mqtt.Message) { } } +// compareTopics is used to support the mqtt wild card `+` which allows for one topic of any value +func compareTopics(expected []string, incoming []string) bool { + if len(expected) != len(incoming) { + return false + } + + for i, expected := range expected { + if incoming[i] != expected && expected != "+" { + return false + } + } + + return true +} + func (m *MQTTConsumer) onMessage(acc telegraf.TrackingAccumulator, msg mqtt.Message) error { metrics, err := m.parser.Parse(msg.Payload()) if err != nil { return err } - if m.topicTag != "" { - topic := msg.Topic() - for _, metric := range metrics { - metric.AddTag(m.topicTag, topic) + for _, metric := range metrics { + if m.topicTagParse != "" { + metric.AddTag(m.topicTagParse, msg.Topic()) } - } + for _, p := range m.TopicParsing { + values := strings.Split(msg.Topic(), "/") + if !compareTopics(p.SplitTopic, values) { + continue + } + if p.Measurement != "" { + metric.SetName(values[p.MeasurementIndex]) + } + if p.Tags != "" { + err := parseMetric(p.SplitTags, values, p.FieldTypes, true, metric) + if err != nil { + return err + } + } + if p.Fields != "" { + err := parseMetric(p.SplitFields, values, p.FieldTypes, false, metric) + if err != nil { + return err + } + } + } + } id := acc.AddTrackingMetricGroup(metrics) + m.messagesMutex.Lock() m.messages[id] = true + m.messagesMutex.Unlock() return nil } - func (m *MQTTConsumer) Stop() { if m.state == Connected { m.Log.Debugf("Disconnecting %v", m.Servers) @@ -303,37 +287,29 @@ func (m *MQTTConsumer) Stop() { } m.cancel() } - -func (m *MQTTConsumer) Gather(acc telegraf.Accumulator) error { +func (m *MQTTConsumer) Gather(_ telegraf.Accumulator) error { if m.state == Disconnected { m.state = Connecting m.Log.Debugf("Connecting %v", m.Servers) - m.connect() + return m.connect() } - return nil } - func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { opts := mqtt.NewClientOptions() - - opts.ConnectTimeout = m.ConnectionTimeout.Duration - + opts.ConnectTimeout = time.Duration(m.ConnectionTimeout) if m.ClientID == "" { opts.SetClientID("Telegraf-Consumer-" + internal.RandomString(5)) } else { opts.SetClientID(m.ClientID) } - tlsCfg, err := m.ClientConfig.TLSConfig() if err != nil { return nil, err } - if tlsCfg != nil { opts.SetTLSConfig(tlsCfg) } - user := m.Username if user != "" { opts.SetUsername(user) @@ -342,11 +318,9 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { if password != "" { opts.SetPassword(password) } - if len(m.Servers) == 0 { return opts, fmt.Errorf("could not get host informations") } - for _, server := range m.Servers { // Preserve support for host:port style servers; deprecated in Telegraf 1.4.4 if !strings.Contains(server, "://") { @@ -357,17 +331,66 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { server = "ssl://" + server } } - opts.AddBroker(server) } opts.SetAutoReconnect(false) opts.SetKeepAlive(time.Second * 60) opts.SetCleanSession(!m.PersistentSession) opts.SetConnectionLostHandler(m.onConnectionLost) - return opts, nil } +// parseFields gets multiple fields from the topic based on the user configuration (TopicParsing.Fields) +func parseMetric(keys []string, values []string, types map[string]string, isTag bool, metric telegraf.Metric) error { + for i, k := range keys { + if k == "_" { + continue + } + + if isTag { + metric.AddTag(k, values[i]) + } else { + newType, err := typeConvert(types, values[i], k) + if err != nil { + return err + } + metric.AddField(k, newType) + } + } + return nil +} + +func typeConvert(types map[string]string, topicValue string, key string) (interface{}, error) { + var newType interface{} + var err error + // If the user configured inputs.mqtt_consumer.topic.types, check for the desired type + if desiredType, ok := types[key]; ok { + switch desiredType { + case "uint": + newType, err = strconv.ParseUint(topicValue, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to convert field '%s' to type uint: %v", topicValue, err) + } + case "int": + newType, err = strconv.ParseInt(topicValue, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to convert field '%s' to type int: %v", topicValue, err) + } + case "float": + newType, err = strconv.ParseFloat(topicValue, 64) + if err != nil { + return nil, fmt.Errorf("unable to convert field '%s' to type float: %v", topicValue, err) + } + default: + return nil, fmt.Errorf("converting to the type %s is not supported: use int, uint, or float", desiredType) + } + } else { + newType = topicValue + } + + return newType, nil +} + func New(factory ClientFactory) *MQTTConsumer { return &MQTTConsumer{ Servers: []string{"tcp://127.0.0.1:1883"}, @@ -377,7 +400,6 @@ func New(factory ClientFactory) *MQTTConsumer { state: Disconnected, } } - func init() { inputs.Add("mqtt_consumer", func() telegraf.Input { return New(func(o *mqtt.ClientOptions) Client { diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index 4884fc0508107..2eb7d6ffabc26 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -1,10 +1,11 @@ package mqtt_consumer import ( + "fmt" "testing" "time" - "github.com/eclipse/paho.mqtt.golang" + mqtt "github.com/eclipse/paho.mqtt.golang" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" @@ -49,20 +50,21 @@ type FakeParser struct { // FakeParser satisfies parsers.Parser var _ parsers.Parser = &FakeParser{} -func (p *FakeParser) Parse(buf []byte) ([]telegraf.Metric, error) { +func (p *FakeParser) Parse(_ []byte) ([]telegraf.Metric, error) { panic("not implemented") } -func (p *FakeParser) ParseLine(line string) (telegraf.Metric, error) { +func (p *FakeParser) ParseLine(_ string) (telegraf.Metric, error) { panic("not implemented") } -func (p *FakeParser) SetDefaultTags(tags map[string]string) { +func (p *FakeParser) SetDefaultTags(_ map[string]string) { panic("not implemented") } type FakeToken struct { sessionPresent bool + complete chan struct{} } // FakeToken satisfies mqtt.Token @@ -84,6 +86,10 @@ func (t *FakeToken) SessionPresent() bool { return t.sessionPresent } +func (t *FakeToken) Done() <-chan struct{} { + return t.complete +} + // Test the basic lifecycle transitions of the plugin. func TestLifecycleSanity(t *testing.T) { var acc testutil.Accumulator @@ -148,6 +154,7 @@ func TestPersistentClientIDFail(t *testing.T) { } type Message struct { + topic string } func (m *Message) Duplicate() bool { @@ -163,7 +170,7 @@ func (m *Message) Retained() bool { } func (m *Message) Topic() string { - return "telegraf" + return m.topic } func (m *Message) MessageID() uint16 { @@ -180,12 +187,16 @@ func (m *Message) Ack() { func TestTopicTag(t *testing.T) { tests := []struct { - name string - topicTag func() *string - expected []telegraf.Metric + name string + topic string + topicTag func() *string + expectedError error + topicParsing []TopicParsingConfig + expected []telegraf.Metric }{ { - name: "default topic when topic tag is unset for backwards compatibility", + name: "default topic when topic tag is unset for backwards compatibility", + topic: "telegraf", topicTag: func() *string { return nil }, @@ -203,7 +214,8 @@ func TestTopicTag(t *testing.T) { }, }, { - name: "use topic tag when set", + name: "use topic tag when set", + topic: "telegraf", topicTag: func() *string { tag := "topic_tag" return &tag @@ -222,7 +234,8 @@ func TestTopicTag(t *testing.T) { }, }, { - name: "no topic tag is added when topic tag is set to the empty string", + name: "no topic tag is added when topic tag is set to the empty string", + topic: "telegraf", topicTag: func() *string { tag := "" return &tag @@ -238,6 +251,167 @@ func TestTopicTag(t *testing.T) { ), }, }, + { + name: "topic parsing configured", + topic: "telegraf/123/test", + topicTag: func() *string { + tag := "" + return &tag + }, + topicParsing: []TopicParsingConfig{ + { + Topic: "telegraf/123/test", + Measurement: "_/_/measurement", + Tags: "testTag/_/_", + Fields: "_/testNumber/_", + FieldTypes: map[string]string{ + "testNumber": "int", + }, + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{ + "testTag": "telegraf", + }, + map[string]interface{}{ + "testNumber": 123, + "time_idle": 42, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "topic parsing configured with a mqtt wild card `+`", + topic: "telegraf/123/test/hello", + topicTag: func() *string { + tag := "" + return &tag + }, + topicParsing: []TopicParsingConfig{ + { + Topic: "telegraf/+/test/hello", + Measurement: "_/_/measurement/_", + Tags: "testTag/_/_/_", + Fields: "_/testNumber/_/testString", + FieldTypes: map[string]string{ + "testNumber": "int", + }, + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{ + "testTag": "telegraf", + }, + map[string]interface{}{ + "testNumber": 123, + "testString": "hello", + "time_idle": 42, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "topic parsing configured incorrectly", + topic: "telegraf/123/test/hello", + topicTag: func() *string { + tag := "" + return &tag + }, + expectedError: fmt.Errorf("config error topic parsing: fields length does not equal topic length"), + topicParsing: []TopicParsingConfig{ + { + Topic: "telegraf/+/test/hello", + Measurement: "_/_/measurement/_", + Tags: "testTag/_/_/_", + Fields: "_/_/testNumber:int/_/testString:string", + FieldTypes: map[string]string{ + "testNumber": "int", + }, + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{ + "testTag": "telegraf", + }, + map[string]interface{}{ + "testNumber": 123, + "testString": "hello", + "time_idle": 42, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "topic parsing configured without fields", + topic: "telegraf/123/test/hello", + topicTag: func() *string { + tag := "" + return &tag + }, + topicParsing: []TopicParsingConfig{ + { + Topic: "telegraf/+/test/hello", + Measurement: "_/_/measurement/_", + Tags: "testTag/_/_/_", + FieldTypes: map[string]string{ + "testNumber": "int", + }, + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "test", + map[string]string{ + "testTag": "telegraf", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "topic parsing configured without measurement", + topic: "telegraf/123/test/hello", + topicTag: func() *string { + tag := "" + return &tag + }, + topicParsing: []TopicParsingConfig{ + { + Topic: "telegraf/+/test/hello", + Tags: "testTag/_/_/_", + Fields: "_/testNumber/_/testString", + FieldTypes: map[string]string{ + "testNumber": "int", + }, + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "testTag": "telegraf", + }, + map[string]interface{}{ + "testNumber": 123, + "testString": "hello", + "time_idle": 42, + }, + time.Unix(0, 0), + ), + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -260,21 +434,28 @@ func TestTopicTag(t *testing.T) { return client }) plugin.Log = testutil.Logger{} - plugin.Topics = []string{"telegraf"} + plugin.Topics = []string{tt.topic} plugin.TopicTag = tt.topicTag() + plugin.TopicParsing = tt.topicParsing parser, err := parsers.NewInfluxParser() require.NoError(t, err) plugin.SetParser(parser) err = plugin.Init() - require.NoError(t, err) + require.Equal(t, tt.expectedError, err) + if tt.expectedError != nil { + return + } var acc testutil.Accumulator err = plugin.Start(&acc) require.NoError(t, err) - handler(nil, &Message{}) + var m Message + m.topic = tt.topic + + handler(nil, &m) plugin.Stop() diff --git a/plugins/inputs/mqtt_consumer/sample.conf b/plugins/inputs/mqtt_consumer/sample.conf new file mode 100644 index 0000000000000..65b78d623ef37 --- /dev/null +++ b/plugins/inputs/mqtt_consumer/sample.conf @@ -0,0 +1,79 @@ +# Read metrics from MQTT topic(s) +[[inputs.mqtt_consumer]] + ## Broker URLs for the MQTT server or cluster. To connect to multiple + ## clusters or standalone servers, use a separate plugin instance. + ## example: servers = ["tcp://localhost:1883"] + ## servers = ["ssl://localhost:1883"] + ## servers = ["ws://localhost:1883"] + servers = ["tcp://127.0.0.1:1883"] + + ## Topics that will be subscribed to. + topics = [ + "telegraf/host01/cpu", + "telegraf/+/mem", + "sensors/#", + ] + + ## The message topic will be stored in a tag specified by this value. If set + ## to the empty string no topic tag will be created. + # topic_tag = "topic" + + ## QoS policy for messages + ## 0 = at most once + ## 1 = at least once + ## 2 = exactly once + ## + ## When using a QoS of 1 or 2, you should enable persistent_session to allow + ## resuming unacknowledged messages. + # qos = 0 + + ## Connection timeout for initial connection in seconds + # connection_timeout = "30s" + + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## Persistent session disables clearing of the client session on connection. + ## In order for this option to work you must also set client_id to identify + ## the client. To receive messages that arrived while the client is offline, + ## also set the qos option to 1 or 2 and don't forget to also set the QoS when + ## publishing. + # persistent_session = false + + ## If unset, a random client ID will be generated. + # client_id = "" + + ## Username and password to connect MQTT server. + # username = "telegraf" + # password = "metricsmetricsmetricsmetrics" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + + ## Enable extracting tag values from MQTT topics + ## _ denotes an ignored entry in the topic path + # [[inputs.mqtt_consumer.topic_parsing]] + # topic = "" + # measurement = "" + # tags = "" + # fields = "" + ## Value supported is int, float, unit + # [[inputs.mqtt_consumer.topic.types]] + # key = type diff --git a/plugins/inputs/multifile/README.md b/plugins/inputs/multifile/README.md index 2d71ac159cdd2..ba6bfecb0fddf 100644 --- a/plugins/inputs/multifile/README.md +++ b/plugins/inputs/multifile/README.md @@ -7,8 +7,10 @@ useful creating custom metrics from the `/sys` or `/proc` filesystems. > Note: If you wish to parse metrics from a single file formatted in one of the supported > [input data formats][], you should use the [file][] input plugin instead. -### Configuration -```toml +## Configuration + +```toml @sample.conf +# Aggregates the contents of multiple files into a single point [[inputs.multifile]] ## Base directory where telegraf will look for files. ## Omit this option to use absolute paths. @@ -34,32 +36,40 @@ useful creating custom metrics from the `/sys` or `/proc` filesystems. ``` Each file table can contain the following options: + * `file`: Path of the file to be parsed, relative to the `base_dir`. * `dest`: Name of the field/tag key, defaults to `$(basename file)`. * `conversion`: Data format used to parse the file contents: - * `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Effectively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`. - * `float`: Converts the value into a float with no adjustment. Same as `float(0)`. - * `int`: Converts the value into an integer. - * `string`, `""`: No conversion. - * `bool`: Converts the value into a boolean. - * `tag`: File content is used as a tag. - -### Example Output -This example shows a BME280 connected to a Raspberry Pi, using the sample config. -``` + * `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Effectively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`. + * `float`: Converts the value into a float with no adjustment. Same as `float(0)`. + * `int`: Converts the value into an integer. + * `string`, `""`: No conversion. + * `bool`: Converts the value into a boolean. + * `tag`: File content is used as a tag. + +## Example Output + +This example shows a BME280 connected to a Raspberry Pi, using the sample +config. + +```sh multifile pressure=101.343285156,temperature=20.4,humidityrelative=48.9 1547202076000000000 ``` -To reproduce this, connect a BMP280 to the board's GPIO pins and register the BME280 device driver -``` +To reproduce this, connect a BMP280 to the board's GPIO pins and register the +BME280 device driver + +```sh cd /sys/bus/i2c/devices/i2c-1 echo bme280 0x76 > new_device ``` -The kernel driver provides the following files in `/sys/bus/i2c/devices/1-0076/iio:device0`: +The kernel driver provides the following files in +`/sys/bus/i2c/devices/1-0076/iio:device0`: + * `in_humidityrelative_input`: `48900` * `in_pressure_input`: `101.343285156` * `in_temp_input`: `20400` diff --git a/plugins/inputs/multifile/multifile.go b/plugins/inputs/multifile/multifile.go index 9c9813d9acf5c..159e103c930ba 100644 --- a/plugins/inputs/multifile/multifile.go +++ b/plugins/inputs/multifile/multifile.go @@ -1,11 +1,12 @@ +//go:generate ../../../tools/readme_config_includer/generator package multifile import ( "bytes" - "errors" + _ "embed" "fmt" - "io/ioutil" "math" + "os" "path" "strconv" "time" @@ -14,12 +15,14 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type MultiFile struct { BaseDir string FailEarly bool Files []File `toml:"file"` - - initialized bool } type File struct { @@ -28,44 +31,11 @@ type File struct { Conversion string } -const sampleConfig = ` - ## Base directory where telegraf will look for files. - ## Omit this option to use absolute paths. - base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" - - ## If true, Telegraf discard all data when a single file can't be read. - ## Else, Telegraf omits the field generated from this file. - # fail_early = true - - ## Files to parse each interval. - [[inputs.multifile.file]] - file = "in_pressure_input" - dest = "pressure" - conversion = "float" - [[inputs.multifile.file]] - file = "in_temp_input" - dest = "temperature" - conversion = "float(3)" - [[inputs.multifile.file]] - file = "in_humidityrelative_input" - dest = "humidityrelative" - conversion = "float(3)" -` - -// SampleConfig returns the default configuration of the Input -func (m *MultiFile) SampleConfig() string { +func (*MultiFile) SampleConfig() string { return sampleConfig } -func (m *MultiFile) Description() string { - return "Aggregates the contents of multiple files into a single point" -} - -func (m *MultiFile) init() { - if m.initialized { - return - } - +func (m *MultiFile) Init() error { for i, file := range m.Files { if m.BaseDir != "" { m.Files[i].Name = path.Join(m.BaseDir, file.Name) @@ -74,18 +44,16 @@ func (m *MultiFile) init() { m.Files[i].Dest = path.Base(file.Name) } } - - m.initialized = true + return nil } func (m *MultiFile) Gather(acc telegraf.Accumulator) error { - m.init() now := time.Now() fields := make(map[string]interface{}) tags := make(map[string]string) for _, file := range m.Files { - fileContents, err := ioutil.ReadFile(file.Name) + fileContents, err := os.ReadFile(file.Name) if err != nil { if m.FailEarly { @@ -103,7 +71,7 @@ func (m *MultiFile) Gather(acc telegraf.Accumulator) error { var value interface{} - var d int = 0 + var d int if _, errfmt := fmt.Sscanf(file.Conversion, "float(%d)", &d); errfmt == nil || file.Conversion == "float" { var v float64 v, err = strconv.ParseFloat(vStr, 64) @@ -130,7 +98,7 @@ func (m *MultiFile) Gather(acc telegraf.Accumulator) error { } if value == nil { - return errors.New(fmt.Sprintf("invalid conversion %v", file.Conversion)) + return fmt.Errorf("invalid conversion %v", file.Conversion) } fields[file.Dest] = value diff --git a/plugins/inputs/multifile/multifile_test.go b/plugins/inputs/multifile/multifile_test.go index b12f29f35c2cd..13c9457b11f03 100644 --- a/plugins/inputs/multifile/multifile_test.go +++ b/plugins/inputs/multifile/multifile_test.go @@ -5,9 +5,9 @@ import ( "path" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestFileTypes(t *testing.T) { @@ -29,11 +29,10 @@ func TestFileTypes(t *testing.T) { var acc testutil.Accumulator - err := m.Gather(&acc) - - require.NoError(t, err) - assert.Equal(t, map[string]string{"exampletag": "test"}, acc.Metrics[0].Tags) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, m.Init()) + require.NoError(t, m.Gather(&acc)) + require.Equal(t, map[string]string{"exampletag": "test"}, acc.Metrics[0].Tags) + require.Equal(t, map[string]interface{}{ "examplebool": true, "examplestring": "hello world", "exampleint": int64(123456), @@ -57,10 +56,11 @@ func FailEarly(failEarly bool, t *testing.T) error { var acc testutil.Accumulator + require.NoError(t, m.Init()) err := m.Gather(&acc) if err == nil { - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]interface{}{ "exampleint": int64(123456), }, acc.Metrics[0].Fields) } diff --git a/plugins/inputs/multifile/sample.conf b/plugins/inputs/multifile/sample.conf new file mode 100644 index 0000000000000..dff2c8bbe4c8b --- /dev/null +++ b/plugins/inputs/multifile/sample.conf @@ -0,0 +1,23 @@ +# Aggregates the contents of multiple files into a single point +[[inputs.multifile]] + ## Base directory where telegraf will look for files. + ## Omit this option to use absolute paths. + base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" + + ## If true discard all data when a single file can't be read. + ## Else, Telegraf omits the field generated from this file. + # fail_early = true + + ## Files to parse each interval. + [[inputs.multifile.file]] + file = "in_pressure_input" + dest = "pressure" + conversion = "float" + [[inputs.multifile.file]] + file = "in_temp_input" + dest = "temperature" + conversion = "float(3)" + [[inputs.multifile.file]] + file = "in_humidityrelative_input" + dest = "humidityrelative" + conversion = "float(3)" diff --git a/plugins/inputs/mysql/README.md b/plugins/inputs/mysql/README.md index 644d4cf8d7887..54199f1706304 100644 --- a/plugins/inputs/mysql/README.md +++ b/plugins/inputs/mysql/README.md @@ -18,9 +18,10 @@ This plugin gathers the statistic data from MySQL server * File events statistics * Table schema statistics -### Configuration +## Configuration -```toml +```toml @sample.conf +# Read metrics from one or many mysql servers [[inputs.mysql]] ## specify servers via a url matching: ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]] @@ -63,9 +64,15 @@ This plugin gathers the statistic data from MySQL server ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS # gather_innodb_metrics = false + ## gather metrics from all channels from SHOW SLAVE STATUS command output + # gather_all_slave_channels = false + ## gather metrics from SHOW SLAVE STATUS command output # gather_slave_status = false + ## use SHOW ALL SLAVES STATUS command output for MariaDB + # mariadb_dialect = false + ## gather metrics from SHOW BINARY LOGS command output # gather_binary_logs = false @@ -88,6 +95,15 @@ This plugin gathers the statistic data from MySQL server # gather_file_events_stats = false ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST + # gather_perf_events_statements = false + # + ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME + # gather_perf_sum_per_acc_per_event = false + # + ## list of events to be gathered for gather_perf_sum_per_acc_per_event + ## in case of empty list all events will be gathered + # perf_summary_events = [] + # # gather_perf_events_statements = false ## the limits for metrics form perf_events_statements @@ -107,7 +123,7 @@ This plugin gathers the statistic data from MySQL server # insecure_skip_verify = false ``` -#### Metric Version +### Metric Version When `metric_version = 2`, a variety of field type issues are corrected as well as naming inconsistencies. If you have existing data on the original version @@ -117,6 +133,7 @@ InfluxDB due to the change of types. For this reason, you should keep the If preserving your old data is not required you may wish to drop conflicting measurements: + ```sql DROP SERIES from mysql DROP SERIES from mysql_variables @@ -128,6 +145,7 @@ Otherwise, migration can be performed using the following steps: 1. Duplicate your `mysql` plugin configuration and add a `name_suffix` and `metric_version = 2`, this will result in collection using both the old and new style concurrently: + ```toml [[inputs.mysql]] servers = ["tcp(127.0.0.1:3306)/"] @@ -142,8 +160,8 @@ style concurrently: 2. Upgrade all affected Telegraf clients to version >=1.6. New measurements will be created with the `name_suffix`, for example:: - - `mysql_v2` - - `mysql_variables_v2` + * `mysql_v2` + * `mysql_variables_v2` 3. Update charts, alerts, and other supporting code to the new format. 4. You can now remove the old `mysql` plugin configuration and remove old @@ -154,6 +172,7 @@ historical data to the default name. Do this only after retiring the old measurement name. 1. Use the technique described above to write to multiple locations: + ```toml [[inputs.mysql]] servers = ["tcp(127.0.0.1:3306)/"] @@ -165,8 +184,10 @@ measurement name. servers = ["tcp(127.0.0.1:3306)/"] ``` + 2. Create a TICKScript to copy the historical data: - ``` + + ```sql dbrp "telegraf"."autogen" batch @@ -180,158 +201,168 @@ measurement name. .retentionPolicy('autogen') .measurement('mysql') ``` + 3. Define a task for your script: + ```sh kapacitor define copy-measurement -tick copy-measurement.task ``` + 4. Run the task over the data you would like to migrate: + ```sh kapacitor replay-live batch -start 2018-03-30T20:00:00Z -stop 2018-04-01T12:00:00Z -rec-time -task copy-measurement ``` + 5. Verify copied data and repeat for other measurements. -### Metrics: +## Metrics + * Global statuses - all numeric and boolean values of `SHOW GLOBAL STATUSES` * Global variables - all numeric and boolean values of `SHOW GLOBAL VARIABLES` * Slave status - metrics from `SHOW SLAVE STATUS` the metrics are gathered when the single-source replication is on. If the multi-source replication is set, then everything works differently, this metric does not work with multi-source -replication. - * slave_[column name]() +replication, unless you set `gather_all_slave_channels = true`. For MariaDB, +`mariadb_dialect = true` should be set to address the field names and commands +differences. + * slave_[column name] * Binary logs - all metrics including size and count of all binary files. Requires to be turned on in configuration. - * binary_size_bytes(int, number) - * binary_files_count(int, number) + * binary_size_bytes(int, number) + * binary_files_count(int, number) * Process list - connection metrics from processlist for each user. It has the following tags - * connections(int, number) + * connections(int, number) * User Statistics - connection metrics from user statistics for each user. It has the following fields - * access_denied - * binlog_bytes_written - * busy_time - * bytes_received - * bytes_sent - * commit_transactions - * concurrent_connections - * connected_time - * cpu_time - * denied_connections - * empty_queries - * hostlost_connections - * other_commands - * rollback_transactions - * rows_fetched - * rows_updated - * select_commands - * server - * table_rows_read - * total_connections - * total_ssl_connections - * update_commands - * user + * access_denied + * binlog_bytes_written + * busy_time + * bytes_received + * bytes_sent + * commit_transactions + * concurrent_connections + * connected_time + * cpu_time + * denied_connections + * empty_queries + * hostlost_connections + * other_commands + * rollback_transactions + * rows_fetched + * rows_updated + * select_commands + * server + * table_rows_read + * total_connections + * total_ssl_connections + * update_commands + * user * Perf Table IO waits - total count and time of I/O waits event for each table and process. It has following fields: - * table_io_waits_total_fetch(float, number) - * table_io_waits_total_insert(float, number) - * table_io_waits_total_update(float, number) - * table_io_waits_total_delete(float, number) - * table_io_waits_seconds_total_fetch(float, milliseconds) - * table_io_waits_seconds_total_insert(float, milliseconds) - * table_io_waits_seconds_total_update(float, milliseconds) - * table_io_waits_seconds_total_delete(float, milliseconds) + * table_io_waits_total_fetch(float, number) + * table_io_waits_total_insert(float, number) + * table_io_waits_total_update(float, number) + * table_io_waits_total_delete(float, number) + * table_io_waits_seconds_total_fetch(float, milliseconds) + * table_io_waits_seconds_total_insert(float, milliseconds) + * table_io_waits_seconds_total_update(float, milliseconds) + * table_io_waits_seconds_total_delete(float, milliseconds) * Perf index IO waits - total count and time of I/O waits event for each index and process. It has following fields: - * index_io_waits_total_fetch(float, number) - * index_io_waits_seconds_total_fetch(float, milliseconds) - * index_io_waits_total_insert(float, number) - * index_io_waits_total_update(float, number) - * index_io_waits_total_delete(float, number) - * index_io_waits_seconds_total_insert(float, milliseconds) - * index_io_waits_seconds_total_update(float, milliseconds) - * index_io_waits_seconds_total_delete(float, milliseconds) + * index_io_waits_total_fetch(float, number) + * index_io_waits_seconds_total_fetch(float, milliseconds) + * index_io_waits_total_insert(float, number) + * index_io_waits_total_update(float, number) + * index_io_waits_total_delete(float, number) + * index_io_waits_seconds_total_insert(float, milliseconds) + * index_io_waits_seconds_total_update(float, milliseconds) + * index_io_waits_seconds_total_delete(float, milliseconds) * Info schema autoincrement statuses - autoincrement fields and max values for them. It has following fields: - * auto_increment_column(int, number) - * auto_increment_column_max(int, number) -* InnoDB metrics - all metrics of information_schema.INNODB_METRICS with a status "enabled" + * auto_increment_column(int, number) + * auto_increment_column_max(int, number) +* InnoDB metrics - all metrics of information_schema.INNODB_METRICS with a status "enabled". For MariaDB, +`mariadb_dialect = true` to use `ENABLED=1`. * Perf table lock waits - gathers total number and time for SQL and external lock waits events for each table and operation. It has following fields. The unit of fields varies by the tags. - * read_normal(float, number/milliseconds) - * read_with_shared_locks(float, number/milliseconds) - * read_high_priority(float, number/milliseconds) - * read_no_insert(float, number/milliseconds) - * write_normal(float, number/milliseconds) - * write_allow_write(float, number/milliseconds) - * write_concurrent_insert(float, number/milliseconds) - * write_low_priority(float, number/milliseconds) - * read(float, number/milliseconds) - * write(float, number/milliseconds) + * read_normal(float, number/milliseconds) + * read_with_shared_locks(float, number/milliseconds) + * read_high_priority(float, number/milliseconds) + * read_no_insert(float, number/milliseconds) + * write_normal(float, number/milliseconds) + * write_allow_write(float, number/milliseconds) + * write_concurrent_insert(float, number/milliseconds) + * write_low_priority(float, number/milliseconds) + * read(float, number/milliseconds) + * write(float, number/milliseconds) * Perf events waits - gathers total time and number of event waits - * events_waits_total(float, number) - * events_waits_seconds_total(float, milliseconds) + * events_waits_total(float, number) + * events_waits_seconds_total(float, milliseconds) * Perf file events statuses - gathers file events statuses - * file_events_total(float,number) - * file_events_seconds_total(float, milliseconds) - * file_events_bytes_total(float, bytes) + * file_events_total(float,number) + * file_events_seconds_total(float, milliseconds) + * file_events_bytes_total(float, bytes) * Perf events statements - gathers attributes of each event - * events_statements_total(float, number) - * events_statements_seconds_total(float, millieconds) - * events_statements_errors_total(float, number) - * events_statements_warnings_total(float, number) - * events_statements_rows_affected_total(float, number) - * events_statements_rows_sent_total(float, number) - * events_statements_rows_examined_total(float, number) - * events_statements_tmp_tables_total(float, number) - * events_statements_tmp_disk_tables_total(float, number) - * events_statements_sort_merge_passes_totals(float, number) - * events_statements_sort_rows_total(float, number) - * events_statements_no_index_used_total(float, number) + * events_statements_total(float, number) + * events_statements_seconds_total(float, millieconds) + * events_statements_errors_total(float, number) + * events_statements_warnings_total(float, number) + * events_statements_rows_affected_total(float, number) + * events_statements_rows_sent_total(float, number) + * events_statements_rows_examined_total(float, number) + * events_statements_tmp_tables_total(float, number) + * events_statements_tmp_disk_tables_total(float, number) + * events_statements_sort_merge_passes_totals(float, number) + * events_statements_sort_rows_total(float, number) + * events_statements_no_index_used_total(float, number) * Table schema - gathers statistics of each schema. It has following measurements - * info_schema_table_rows(float, number) - * info_schema_table_size_data_length(float, number) - * info_schema_table_size_index_length(float, number) - * info_schema_table_size_data_free(float, number) - * info_schema_table_version(float, number) + * info_schema_table_rows(float, number) + * info_schema_table_size_data_length(float, number) + * info_schema_table_size_index_length(float, number) + * info_schema_table_size_data_free(float, number) + * info_schema_table_version(float, number) ## Tags + * All measurements has following tags - * server (the host name from which the metrics are gathered) + * server (the host name from which the metrics are gathered) * Process list measurement has following tags - * user (username for whom the metrics are gathered) + * user (username for whom the metrics are gathered) * User Statistics measurement has following tags - * user (username for whom the metrics are gathered) + * user (username for whom the metrics are gathered) * Perf table IO waits measurement has following tags - * schema - * name (object name for event or process) + * schema + * name (object name for event or process) * Perf index IO waits has following tags - * schema - * name - * index + * schema + * name + * index * Info schema autoincrement statuses has following tags - * schema - * table - * column + * schema + * table + * column * Perf table lock waits has following tags - * schema - * table - * sql_lock_waits_total(fields including this tag have numeric unit) - * external_lock_waits_total(fields including this tag have numeric unit) - * sql_lock_waits_seconds_total(fields including this tag have millisecond unit) - * external_lock_waits_seconds_total(fields including this tag have millisecond unit) + * schema + * table + * sql_lock_waits_total(fields including this tag have numeric unit) + * external_lock_waits_total(fields including this tag have numeric unit) + * sql_lock_waits_seconds_total(fields including this tag have millisecond unit) + * external_lock_waits_seconds_total(fields including this tag have millisecond unit) * Perf events statements has following tags - * event_name + * event_name * Perf file events statuses has following tags - * event_name - * mode + * event_name + * mode * Perf file events statements has following tags - * schema - * digest - * digest_text + * schema + * digest + * digest_text * Table schema has following tags - * schema - * table - * component - * type - * engine - * row_format - * create_options + * schema + * table + * component + * type + * engine + * row_format + * create_options diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index 7ce9bd1666173..f5dc343ce783f 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -1,8 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package mysql import ( - "bytes" "database/sql" + _ "embed" "fmt" "strconv" "strings" @@ -10,13 +11,18 @@ import ( "time" "github.com/go-sql-driver/mysql" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/plugins/inputs/mysql/v1" - "github.com/influxdata/telegraf/plugins/inputs/mysql/v2" + v1 "github.com/influxdata/telegraf/plugins/inputs/mysql/v1" + v2 "github.com/influxdata/telegraf/plugins/inputs/mysql/v2" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Mysql struct { Servers []string `toml:"servers"` PerfEventsStatementsDigestTextLimit int64 `toml:"perf_events_statements_digest_text_limit"` @@ -28,6 +34,8 @@ type Mysql struct { GatherInfoSchemaAutoInc bool `toml:"gather_info_schema_auto_inc"` GatherInnoDBMetrics bool `toml:"gather_innodb_metrics"` GatherSlaveStatus bool `toml:"gather_slave_status"` + GatherAllSlaveChannels bool `toml:"gather_all_slave_channels"` + MariadbDialect bool `toml:"mariadb_dialect"` GatherBinaryLogs bool `toml:"gather_binary_logs"` GatherTableIOWaits bool `toml:"gather_table_io_waits"` GatherTableLockWaits bool `toml:"gather_table_lock_waits"` @@ -37,6 +45,8 @@ type Mysql struct { GatherFileEventsStats bool `toml:"gather_file_events_stats"` GatherPerfEventsStatements bool `toml:"gather_perf_events_statements"` GatherGlobalVars bool `toml:"gather_global_variables"` + GatherPerfSummaryPerAccountPerEvent bool `toml:"gather_perf_sum_per_acc_per_event"` + PerfSummaryEvents []string `toml:"perf_summary_events"` IntervalSlow string `toml:"interval_slow"` MetricVersion int `toml:"metric_version"` @@ -45,112 +55,22 @@ type Mysql struct { lastT time.Time initDone bool scanIntervalSlow uint32 + getStatusQuery string } -const sampleConfig = ` - ## specify servers via a url matching: - ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]] - ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name - ## e.g. - ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] - ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"] - # - ## If no servers are specified, then localhost is used as the host. - servers = ["tcp(127.0.0.1:3306)/"] - - ## Selects the metric output format. - ## - ## This option exists to maintain backwards compatibility, if you have - ## existing metrics do not set or change this value until you are ready to - ## migrate to the new format. - ## - ## If you do not have existing metrics from this plugin set to the latest - ## version. - ## - ## Telegraf >=1.6: metric_version = 2 - ## <1.6: metric_version = 1 (or unset) - metric_version = 2 - - ## if the list is empty, then metrics are gathered from all database tables - # table_schema_databases = [] - - ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list - # gather_table_schema = false - - ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST - # gather_process_list = false - - ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS - # gather_user_statistics = false - - ## gather auto_increment columns and max values from information schema - # gather_info_schema_auto_inc = false - - ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS - # gather_innodb_metrics = false - - ## gather metrics from SHOW SLAVE STATUS command output - # gather_slave_status = false - - ## gather metrics from SHOW BINARY LOGS command output - # gather_binary_logs = false - - ## gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES - # gather_global_variables = true - - ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE - # gather_table_io_waits = false - - ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS - # gather_table_lock_waits = false - - ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE - # gather_index_io_waits = false - - ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS - # gather_event_waits = false - - ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME - # gather_file_events_stats = false - - ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST - # gather_perf_events_statements = false - - ## the limits for metrics form perf_events_statements - # perf_events_statements_digest_text_limit = 120 - # perf_events_statements_limit = 250 - # perf_events_statements_time_limit = 86400 - - ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) - ## example: interval_slow = "30m" - # interval_slow = "" - - ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - const ( - defaultTimeout = 5 * time.Second defaultPerfEventsStatementsDigestTextLimit = 120 defaultPerfEventsStatementsLimit = 250 defaultPerfEventsStatementsTimeLimit = 86400 defaultGatherGlobalVars = true ) -func (m *Mysql) SampleConfig() string { - return sampleConfig -} +const localhost = "" -func (m *Mysql) Description() string { - return "Read metrics from one or many mysql servers" +func (*Mysql) SampleConfig() string { + return sampleConfig } -const localhost = "" - func (m *Mysql) InitMysql() { if len(m.IntervalSlow) > 0 { interval, err := time.ParseDuration(m.IntervalSlow) @@ -158,6 +78,11 @@ func (m *Mysql) InitMysql() { m.scanIntervalSlow = uint32(interval.Seconds()) } } + if m.MariadbDialect { + m.getStatusQuery = slaveStatusQueryMariadb + } else { + m.getStatusQuery = slaveStatusQuery + } m.initDone = true } @@ -177,7 +102,9 @@ func (m *Mysql) Gather(acc telegraf.Accumulator) error { } if tlsConfig != nil { - mysql.RegisterTLSConfig("custom", tlsConfig) + if err := mysql.RegisterTLSConfig("custom", tlsConfig); err != nil { + return err + } } var wg sync.WaitGroup @@ -285,6 +212,7 @@ const ( globalStatusQuery = `SHOW GLOBAL STATUS` globalVariablesQuery = `SHOW GLOBAL VARIABLES` slaveStatusQuery = `SHOW SLAVE STATUS` + slaveStatusQueryMariadb = `SHOW ALL SLAVES STATUS` binaryLogsQuery = `SHOW BINARY LOGS` infoSchemaProcessListQuery = ` SELECT COALESCE(command,''),COALESCE(state,''),count(*) @@ -313,6 +241,13 @@ const ( FROM information_schema.INNODB_METRICS WHERE status='enabled' ` + innoDBMetricsQueryMariadb = ` + EXECUTE IMMEDIATE CONCAT(" + SELECT NAME, COUNT + FROM information_schema.INNODB_METRICS + WHERE ", IF(version() REGEXP '10\.[1-4].*',"status='enabled'", "ENABLED=1"), " + "); + ` perfTableIOWaitsQuery = ` SELECT OBJECT_SCHEMA, OBJECT_NAME, COUNT_FETCH, COUNT_INSERT, COUNT_UPDATE, COUNT_DELETE, SUM_TIMER_FETCH, SUM_TIMER_INSERT, SUM_TIMER_UPDATE, SUM_TIMER_DELETE @@ -416,6 +351,38 @@ const ( FROM information_schema.tables WHERE table_schema = 'performance_schema' AND table_name = ? ` + + perfSummaryPerAccountPerEvent = ` + SELECT + coalesce(user, "unknown"), + coalesce(host, "unknown"), + coalesce(event_name, "unknown"), + count_star, + sum_timer_wait, + min_timer_wait, + avg_timer_wait, + max_timer_wait, + sum_lock_time, + sum_errors, + sum_warnings, + sum_rows_affected, + sum_rows_sent, + sum_rows_examined, + sum_created_tmp_disk_tables, + sum_created_tmp_tables, + sum_select_full_join, + sum_select_full_range_join, + sum_select_range, + sum_select_range_check, + sum_select_scan, + sum_sort_merge_passes, + sum_sort_range, + sum_sort_rows, + sum_sort_scan, + sum_no_index_used, + sum_no_good_index_used + FROM performance_schema.events_statements_summary_by_account_by_event_name + ` ) func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error { @@ -491,6 +458,13 @@ func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error { } } + if m.GatherPerfSummaryPerAccountPerEvent { + err = m.gatherPerfSummaryPerAccountPerEvent(db, serv, acc) + if err != nil { + return err + } + } + if m.GatherTableIOWaits { err = m.gatherPerfTableIOWaits(db, serv, acc) if err != nil { @@ -573,7 +547,12 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, serv string, acc telegraf.Accu value, err := m.parseGlobalVariables(key, val) if err != nil { - m.Log.Debugf("Error parsing global variable %q: %v", key, err) + errString := fmt.Errorf("error parsing mysql global variable %q=%q: %v", key, string(val), err) + if m.MetricVersion < 2 { + m.Log.Debug(errString) + } else { + acc.AddError(errString) + } } else { fields[key] = value } @@ -593,14 +572,9 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, serv string, acc telegraf.Accu func (m *Mysql) parseGlobalVariables(key string, value sql.RawBytes) (interface{}, error) { if m.MetricVersion < 2 { - v, ok := v1.ParseValue(value) - if ok { - return v, nil - } - return v, fmt.Errorf("could not parse value: %q", string(value)) - } else { - return v2.ConvertGlobalVariables(key, value) + return v1.ParseValue(value) } + return v2.ConvertGlobalVariables(key, value) } // gatherSlaveStatuses can be used to get replication analytics @@ -609,7 +583,10 @@ func (m *Mysql) parseGlobalVariables(key string, value sql.RawBytes) (interface{ // This code does not work with multi-source replication. func (m *Mysql) gatherSlaveStatuses(db *sql.DB, serv string, acc telegraf.Accumulator) error { // run query - rows, err := db.Query(slaveStatusQuery) + var rows *sql.Rows + var err error + + rows, err = db.Query(m.getStatusQuery) if err != nil { return err } @@ -620,32 +597,72 @@ func (m *Mysql) gatherSlaveStatuses(db *sql.DB, serv string, acc telegraf.Accumu tags := map[string]string{"server": servtag} fields := make(map[string]interface{}) - // to save the column names as a field key - // scanning keys and values separately - if rows.Next() { + // for each channel record + for rows.Next() { + // to save the column names as a field key + // scanning keys and values separately + // get columns names, and create an array with its length - cols, err := rows.Columns() + cols, err := rows.ColumnTypes() if err != nil { return err } - vals := make([]interface{}, len(cols)) + vals := make([]sql.RawBytes, len(cols)) + valPtrs := make([]interface{}, len(cols)) // fill the array with sql.Rawbytes for i := range vals { - vals[i] = &sql.RawBytes{} + vals[i] = sql.RawBytes{} + valPtrs[i] = &vals[i] } - if err = rows.Scan(vals...); err != nil { + if err = rows.Scan(valPtrs...); err != nil { return err } + // range over columns, and try to parse values for i, col := range cols { + colName := col.Name() + if m.MetricVersion >= 2 { - col = strings.ToLower(col) + colName = strings.ToLower(colName) } - if value, ok := m.parseValue(*vals[i].(*sql.RawBytes)); ok { - fields["slave_"+col] = value + + colValue := vals[i] + + if m.GatherAllSlaveChannels && + (strings.ToLower(colName) == "channel_name" || strings.ToLower(colName) == "connection_name") { + // Since the default channel name is empty, we need this block + channelName := "default" + if len(colValue) > 0 { + channelName = string(colValue) + } + tags["channel"] = channelName + continue + } + + if len(colValue) == 0 { + continue + } + + value, err := m.parseValueByDatabaseTypeName(colValue, col.DatabaseTypeName()) + if err != nil { + errString := fmt.Errorf("error parsing mysql slave status %q=%q: %v", colName, string(colValue), err) + if m.MetricVersion < 2 { + m.Log.Debug(errString) + } else { + acc.AddError(errString) + } + continue } + + fields["slave_"+colName] = value } acc.AddFields("mysql", fields, tags) + + // Only the first row is relevant if not all slave-channels should be gathered, + // so break here and skip the remaining rows + if !m.GatherAllSlaveChannels { + break + } } return nil @@ -665,17 +682,31 @@ func (m *Mysql) gatherBinaryLogs(db *sql.DB, serv string, acc telegraf.Accumulat servtag := getDSNTag(serv) tags := map[string]string{"server": servtag} var ( - size uint64 = 0 - count uint64 = 0 - fileSize uint64 - fileName string + size uint64 + count uint64 + fileSize uint64 + fileName string + encrypted string ) + columns, err := rows.Columns() + if err != nil { + return err + } + numColumns := len(columns) + // iterate over rows and count the size and count of files for rows.Next() { - if err := rows.Scan(&fileName, &fileSize); err != nil { - return err + if numColumns == 3 { + if err := rows.Scan(&fileName, &fileSize, &encrypted); err != nil { + return err + } + } else { + if err := rows.Scan(&fileName, &fileSize); err != nil { + return err + } } + size += fileSize count++ } @@ -683,6 +714,7 @@ func (m *Mysql) gatherBinaryLogs(db *sql.DB, serv string, acc telegraf.Accumulat "binary_size_bytes": size, "binary_files_count": count, } + acc.AddFields("mysql", fields, tags) return nil } @@ -734,42 +766,42 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum case "Queries": i, err := strconv.ParseInt(string(val), 10, 64) if err != nil { - acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err)) + acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) } else { fields["queries"] = i } case "Questions": i, err := strconv.ParseInt(string(val), 10, 64) if err != nil { - acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err)) + acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) } else { fields["questions"] = i } case "Slow_queries": i, err := strconv.ParseInt(string(val), 10, 64) if err != nil { - acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err)) + acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) } else { fields["slow_queries"] = i } case "Connections": i, err := strconv.ParseInt(string(val), 10, 64) if err != nil { - acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err)) + acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) } else { fields["connections"] = i } case "Syncs": i, err := strconv.ParseInt(string(val), 10, 64) if err != nil { - acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err)) + acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) } else { fields["syncs"] = i } case "Uptime": i, err := strconv.ParseInt(string(val), 10, 64) if err != nil { - acc.AddError(fmt.Errorf("E! Error mysql: parsing %s int value (%s)", key, err)) + acc.AddError(fmt.Errorf("error mysql: parsing %s int value (%s)", key, err)) } else { fields["uptime"] = i } @@ -778,7 +810,7 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum key = strings.ToLower(key) value, err := v2.ConvertGlobalStatus(key, val) if err != nil { - m.Log.Debugf("Error parsing global status: %v", err) + acc.AddError(fmt.Errorf("error parsing mysql global status %q=%q: %v", key, string(val), err)) } else { fields[key] = value } @@ -807,6 +839,7 @@ func (m *Mysql) GatherProcessListStatuses(db *sql.DB, serv string, acc telegraf. return err } defer rows.Close() + var ( command string state string @@ -846,16 +879,17 @@ func (m *Mysql) GatherProcessListStatuses(db *sql.DB, serv string, acc telegraf. } // get count of connections from each user - conn_rows, err := db.Query("SELECT user, sum(1) AS connections FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user") + connRows, err := db.Query("SELECT user, sum(1) AS connections FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user") if err != nil { return err } + defer connRows.Close() - for conn_rows.Next() { + for connRows.Next() { var user string var connections int64 - err = conn_rows.Scan(&user, &connections) + err = connRows.Scan(&user, &connections) if err != nil { return err } @@ -870,7 +904,7 @@ func (m *Mysql) GatherProcessListStatuses(db *sql.DB, serv string, acc telegraf. return nil } -// GatherUserStatistics can be used to collect metrics on each running command +// GatherUserStatisticsStatuses can be used to collect metrics on each running command // and its state with its running count func (m *Mysql) GatherUserStatisticsStatuses(db *sql.DB, serv string, acc telegraf.Accumulator) error { // run query @@ -917,7 +951,7 @@ func (m *Mysql) GatherUserStatisticsStatuses(db *sql.DB, serv string, acc telegr case *string: fields[cols[i]] = *v default: - return fmt.Errorf("Unknown column type - %T", v) + return fmt.Errorf("unknown column type - %T", v) } } acc.AddFields("mysql_user_stats", fields, tags) @@ -942,146 +976,146 @@ func columnsToLower(s []string, e error) ([]string, error) { func getColSlice(l int) ([]interface{}, error) { // list of all possible column names var ( - user string - total_connections int64 - concurrent_connections int64 - connected_time int64 - busy_time int64 - cpu_time int64 - bytes_received int64 - bytes_sent int64 - binlog_bytes_written int64 - rows_read int64 - rows_sent int64 - rows_deleted int64 - rows_inserted int64 - rows_updated int64 - select_commands int64 - update_commands int64 - other_commands int64 - commit_transactions int64 - rollback_transactions int64 - denied_connections int64 - lost_connections int64 - access_denied int64 - empty_queries int64 - total_ssl_connections int64 - max_statement_time_exceeded int64 + user string + totalConnections int64 + concurrentConnections int64 + connectedTime int64 + busyTime int64 + cpuTime int64 + bytesReceived int64 + bytesSent int64 + binlogBytesWritten int64 + rowsRead int64 + rowsSent int64 + rowsDeleted int64 + rowsInserted int64 + rowsUpdated int64 + selectCommands int64 + updateCommands int64 + otherCommands int64 + commitTransactions int64 + rollbackTransactions int64 + deniedConnections int64 + lostConnections int64 + accessDenied int64 + emptyQueries int64 + totalSslConnections int64 + maxStatementTimeExceeded int64 // maria specific - fbusy_time float64 - fcpu_time float64 + fbusyTime float64 + fcpuTime float64 // percona specific - rows_fetched int64 - table_rows_read int64 + rowsFetched int64 + tableRowsRead int64 ) switch l { case 23: // maria5 return []interface{}{ &user, - &total_connections, - &concurrent_connections, - &connected_time, - &fbusy_time, - &fcpu_time, - &bytes_received, - &bytes_sent, - &binlog_bytes_written, - &rows_read, - &rows_sent, - &rows_deleted, - &rows_inserted, - &rows_updated, - &select_commands, - &update_commands, - &other_commands, - &commit_transactions, - &rollback_transactions, - &denied_connections, - &lost_connections, - &access_denied, - &empty_queries, + &totalConnections, + &concurrentConnections, + &connectedTime, + &fbusyTime, + &fcpuTime, + &bytesReceived, + &bytesSent, + &binlogBytesWritten, + &rowsRead, + &rowsSent, + &rowsDeleted, + &rowsInserted, + &rowsUpdated, + &selectCommands, + &updateCommands, + &otherCommands, + &commitTransactions, + &rollbackTransactions, + &deniedConnections, + &lostConnections, + &accessDenied, + &emptyQueries, }, nil case 25: // maria10 return []interface{}{ &user, - &total_connections, - &concurrent_connections, - &connected_time, - &fbusy_time, - &fcpu_time, - &bytes_received, - &bytes_sent, - &binlog_bytes_written, - &rows_read, - &rows_sent, - &rows_deleted, - &rows_inserted, - &rows_updated, - &select_commands, - &update_commands, - &other_commands, - &commit_transactions, - &rollback_transactions, - &denied_connections, - &lost_connections, - &access_denied, - &empty_queries, - &total_ssl_connections, - &max_statement_time_exceeded, + &totalConnections, + &concurrentConnections, + &connectedTime, + &fbusyTime, + &fcpuTime, + &bytesReceived, + &bytesSent, + &binlogBytesWritten, + &rowsRead, + &rowsSent, + &rowsDeleted, + &rowsInserted, + &rowsUpdated, + &selectCommands, + &updateCommands, + &otherCommands, + &commitTransactions, + &rollbackTransactions, + &deniedConnections, + &lostConnections, + &accessDenied, + &emptyQueries, + &totalSslConnections, + &maxStatementTimeExceeded, }, nil case 21: // mysql 5.5 return []interface{}{ &user, - &total_connections, - &concurrent_connections, - &connected_time, - &busy_time, - &cpu_time, - &bytes_received, - &bytes_sent, - &binlog_bytes_written, - &rows_fetched, - &rows_updated, - &table_rows_read, - &select_commands, - &update_commands, - &other_commands, - &commit_transactions, - &rollback_transactions, - &denied_connections, - &lost_connections, - &access_denied, - &empty_queries, + &totalConnections, + &concurrentConnections, + &connectedTime, + &busyTime, + &cpuTime, + &bytesReceived, + &bytesSent, + &binlogBytesWritten, + &rowsFetched, + &rowsUpdated, + &tableRowsRead, + &selectCommands, + &updateCommands, + &otherCommands, + &commitTransactions, + &rollbackTransactions, + &deniedConnections, + &lostConnections, + &accessDenied, + &emptyQueries, }, nil case 22: // percona return []interface{}{ &user, - &total_connections, - &concurrent_connections, - &connected_time, - &busy_time, - &cpu_time, - &bytes_received, - &bytes_sent, - &binlog_bytes_written, - &rows_fetched, - &rows_updated, - &table_rows_read, - &select_commands, - &update_commands, - &other_commands, - &commit_transactions, - &rollback_transactions, - &denied_connections, - &lost_connections, - &access_denied, - &empty_queries, - &total_ssl_connections, + &totalConnections, + &concurrentConnections, + &connectedTime, + &busyTime, + &cpuTime, + &bytesReceived, + &bytesSent, + &binlogBytesWritten, + &rowsFetched, + &rowsUpdated, + &tableRowsRead, + &selectCommands, + &updateCommands, + &otherCommands, + &commitTransactions, + &rollbackTransactions, + &deniedConnections, + &lostConnections, + &accessDenied, + &emptyQueries, + &totalSslConnections, }, nil } - return nil, fmt.Errorf("Not Supported - %d columns", l) + return nil, fmt.Errorf("not Supported - %d columns", l) } // gatherPerfTableIOWaits can be used to get total count and time @@ -1228,8 +1262,18 @@ func (m *Mysql) gatherInfoSchemaAutoIncStatuses(db *sql.DB, serv string, acc tel // gatherInnoDBMetrics can be used to fetch enabled metrics from // information_schema.INNODB_METRICS func (m *Mysql) gatherInnoDBMetrics(db *sql.DB, serv string, acc telegraf.Accumulator) error { + var ( + query string + ) + + if m.MariadbDialect { + query = innoDBMetricsQueryMariadb + } else { + query = innoDBMetricsQuery + } + // run query - rows, err := db.Query(innoDBMetricsQuery) + rows, err := db.Query(query) if err != nil { return err } @@ -1245,10 +1289,16 @@ func (m *Mysql) gatherInnoDBMetrics(db *sql.DB, serv string, acc telegraf.Accumu if err := rows.Scan(&key, &val); err != nil { return err } + key = strings.ToLower(key) - if value, ok := m.parseValue(val); ok { - fields[key] = value + value, err := m.parseValueByDatabaseTypeName(val, "BIGINT") + if err != nil { + acc.AddError(fmt.Errorf("error parsing mysql InnoDB metric %q=%q: %v", key, string(val), err)) + continue } + + fields[key] = value + // Send 20 fields at a time if len(fields) >= 20 { acc.AddFields("mysql_innodb", fields, tags) @@ -1262,6 +1312,142 @@ func (m *Mysql) gatherInnoDBMetrics(db *sql.DB, serv string, acc telegraf.Accumu return nil } +// gatherPerfSummaryPerAccountPerEvent can be used to fetch enabled metrics from +// performance_schema.events_statements_summary_by_account_by_event_name +func (m *Mysql) gatherPerfSummaryPerAccountPerEvent(db *sql.DB, serv string, acc telegraf.Accumulator) error { + sqlQuery := perfSummaryPerAccountPerEvent + + var rows *sql.Rows + var err error + + var ( + srcUser string + srcHost string + eventName string + countStar float64 + sumTimerWait float64 + minTimerWait float64 + avgTimerWait float64 + maxTimerWait float64 + sumLockTime float64 + sumErrors float64 + sumWarnings float64 + sumRowsAffected float64 + sumRowsSent float64 + sumRowsExamined float64 + sumCreatedTmpDiskTables float64 + sumCreatedTmpTables float64 + sumSelectFullJoin float64 + sumSelectFullRangeJoin float64 + sumSelectRange float64 + sumSelectRangeCheck float64 + sumSelectScan float64 + sumSortMergePasses float64 + sumSortRange float64 + sumSortRows float64 + sumSortScan float64 + sumNoIndexUsed float64 + sumNoGoodIndexUsed float64 + ) + + var events []interface{} + // if we have perf_summary_events set - select only listed events (adding filter criteria for rows) + if len(m.PerfSummaryEvents) > 0 { + sqlQuery += " WHERE EVENT_NAME IN (" + for i, eventName := range m.PerfSummaryEvents { + if i > 0 { + sqlQuery += ", " + } + sqlQuery += "?" + events = append(events, eventName) + } + sqlQuery += ")" + + rows, err = db.Query(sqlQuery, events...) + } else { + // otherwise no filter, hence, select all rows + rows, err = db.Query(perfSummaryPerAccountPerEvent) + } + + if err != nil { + return err + } + defer rows.Close() + + // parse DSN and save server tag + servtag := getDSNTag(serv) + tags := map[string]string{"server": servtag} + for rows.Next() { + if err := rows.Scan( + &srcUser, + &srcHost, + &eventName, + &countStar, + &sumTimerWait, + &minTimerWait, + &avgTimerWait, + &maxTimerWait, + &sumLockTime, + &sumErrors, + &sumWarnings, + &sumRowsAffected, + &sumRowsSent, + &sumRowsExamined, + &sumCreatedTmpDiskTables, + &sumCreatedTmpTables, + &sumSelectFullJoin, + &sumSelectFullRangeJoin, + &sumSelectRange, + &sumSelectRangeCheck, + &sumSelectScan, + &sumSortMergePasses, + &sumSortRange, + &sumSortRows, + &sumSortScan, + &sumNoIndexUsed, + &sumNoGoodIndexUsed, + ); err != nil { + return err + } + srcUser = strings.ToLower(srcUser) + srcHost = strings.ToLower(srcHost) + + sqlLWTags := copyTags(tags) + sqlLWTags["src_user"] = srcUser + sqlLWTags["src_host"] = srcHost + sqlLWTags["event"] = eventName + sqlLWFields := map[string]interface{}{ + "count_star": countStar, + "sum_timer_wait": sumTimerWait, + "min_timer_wait": minTimerWait, + "avg_timer_wait": avgTimerWait, + "max_timer_wait": maxTimerWait, + "sum_lock_time": sumLockTime, + "sum_errors": sumErrors, + "sum_warnings": sumWarnings, + "sum_rows_affected": sumRowsAffected, + "sum_rows_sent": sumRowsSent, + "sum_rows_examined": sumRowsExamined, + "sum_created_tmp_disk_tables": sumCreatedTmpDiskTables, + "sum_created_tmp_tables": sumCreatedTmpTables, + "sum_select_full_join": sumSelectFullJoin, + "sum_select_full_range_join": sumSelectFullRangeJoin, + "sum_select_range": sumSelectRange, + "sum_select_range_check": sumSelectRangeCheck, + "sum_select_scan": sumSelectScan, + "sum_sort_merge_passes": sumSortMergePasses, + "sum_sort_range": sumSortRange, + "sum_sort_rows": sumSortRows, + "sum_sort_scan": sumSortScan, + "sum_no_index_used": sumNoIndexUsed, + "sum_no_good_index_used": sumNoGoodIndexUsed, + } + acc.AddFields("mysql_perf_acc_event", sqlLWFields, sqlLWTags) + } + + return nil +} + // gatherPerfTableLockWaits can be used to get // the total number and time for SQL and external lock wait events // for each table and operation @@ -1479,8 +1665,8 @@ func (m *Mysql) gatherPerfFileEventsStatuses(db *sql.DB, serv string, acc telegr fields["file_events_seconds_total"] = sumTimerWrite / picoSeconds fields["file_events_bytes_totals"] = sumNumBytesWrite acc.AddFields("mysql_perf_schema", fields, writeTags) - } + return nil } @@ -1501,7 +1687,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf defer rows.Close() var ( - schemaName, digest, digest_text string + schemaName, digest, digestText string count, queryTime, errors, warnings float64 rowsAffected, rowsSent, rowsExamined float64 tmpTables, tmpDiskTables float64 @@ -1516,7 +1702,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf for rows.Next() { err = rows.Scan( - &schemaName, &digest, &digest_text, + &schemaName, &digest, &digestText, &count, &queryTime, &errors, &warnings, &rowsAffected, &rowsSent, &rowsExamined, &tmpTables, &tmpDiskTables, @@ -1529,7 +1715,7 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf } tags["schema"] = schemaName tags["digest"] = digest - tags["digest_text"] = digest_text + tags["digest_text"] = digestText fields := map[string]interface{}{ "events_statements_total": count, @@ -1578,132 +1764,129 @@ func (m *Mysql) gatherTableSchema(db *sql.DB, serv string, acc telegraf.Accumula } for _, database := range dbList { - rows, err := db.Query(fmt.Sprintf(tableSchemaQuery, database)) + err := m.gatherSchemaForDB(db, database, servtag, acc) if err != nil { return err } - defer rows.Close() - var ( - tableSchema string - tableName string - tableType string - engine string - version float64 - rowFormat string - tableRows float64 - dataLength float64 - indexLength float64 - dataFree float64 - createOptions string + } + return nil +} + +func (m *Mysql) gatherSchemaForDB(db *sql.DB, database string, servtag string, acc telegraf.Accumulator) error { + rows, err := db.Query(fmt.Sprintf(tableSchemaQuery, database)) + if err != nil { + return err + } + defer rows.Close() + + var ( + tableSchema string + tableName string + tableType string + engine string + version float64 + rowFormat string + tableRows float64 + dataLength float64 + indexLength float64 + dataFree float64 + createOptions string + ) + + for rows.Next() { + err = rows.Scan( + &tableSchema, + &tableName, + &tableType, + &engine, + &version, + &rowFormat, + &tableRows, + &dataLength, + &indexLength, + &dataFree, + &createOptions, ) - for rows.Next() { - err = rows.Scan( - &tableSchema, - &tableName, - &tableType, - &engine, - &version, - &rowFormat, - &tableRows, - &dataLength, - &indexLength, - &dataFree, - &createOptions, - ) - if err != nil { - return err - } - tags := map[string]string{"server": servtag} - tags["schema"] = tableSchema - tags["table"] = tableName + if err != nil { + return err + } + tags := map[string]string{"server": servtag} + tags["schema"] = tableSchema + tags["table"] = tableName - if m.MetricVersion < 2 { - acc.AddFields(newNamespace("info_schema", "table_rows"), - map[string]interface{}{"value": tableRows}, tags) - - dlTags := copyTags(tags) - dlTags["component"] = "data_length" - acc.AddFields(newNamespace("info_schema", "table_size", "data_length"), - map[string]interface{}{"value": dataLength}, dlTags) - - ilTags := copyTags(tags) - ilTags["component"] = "index_length" - acc.AddFields(newNamespace("info_schema", "table_size", "index_length"), - map[string]interface{}{"value": indexLength}, ilTags) - - dfTags := copyTags(tags) - dfTags["component"] = "data_free" - acc.AddFields(newNamespace("info_schema", "table_size", "data_free"), - map[string]interface{}{"value": dataFree}, dfTags) - } else { - acc.AddFields("mysql_table_schema", - map[string]interface{}{"rows": tableRows}, tags) + if m.MetricVersion < 2 { + acc.AddFields(newNamespace("info_schema", "table_rows"), + map[string]interface{}{"value": tableRows}, tags) + + dlTags := copyTags(tags) + dlTags["component"] = "data_length" + acc.AddFields(newNamespace("info_schema", "table_size", "data_length"), + map[string]interface{}{"value": dataLength}, dlTags) + + ilTags := copyTags(tags) + ilTags["component"] = "index_length" + acc.AddFields(newNamespace("info_schema", "table_size", "index_length"), + map[string]interface{}{"value": indexLength}, ilTags) + + dfTags := copyTags(tags) + dfTags["component"] = "data_free" + acc.AddFields(newNamespace("info_schema", "table_size", "data_free"), + map[string]interface{}{"value": dataFree}, dfTags) + } else { + acc.AddFields("mysql_table_schema", + map[string]interface{}{"rows": tableRows}, tags) - acc.AddFields("mysql_table_schema", - map[string]interface{}{"data_length": dataLength}, tags) + acc.AddFields("mysql_table_schema", + map[string]interface{}{"data_length": dataLength}, tags) - acc.AddFields("mysql_table_schema", - map[string]interface{}{"index_length": indexLength}, tags) + acc.AddFields("mysql_table_schema", + map[string]interface{}{"index_length": indexLength}, tags) - acc.AddFields("mysql_table_schema", - map[string]interface{}{"data_free": dataFree}, tags) - } + acc.AddFields("mysql_table_schema", + map[string]interface{}{"data_free": dataFree}, tags) + } - versionTags := copyTags(tags) - versionTags["type"] = tableType - versionTags["engine"] = engine - versionTags["row_format"] = rowFormat - versionTags["create_options"] = createOptions + versionTags := copyTags(tags) + versionTags["type"] = tableType + versionTags["engine"] = engine + versionTags["row_format"] = rowFormat + versionTags["create_options"] = createOptions - if m.MetricVersion < 2 { - acc.AddFields(newNamespace("info_schema", "table_version"), - map[string]interface{}{"value": version}, versionTags) - } else { - acc.AddFields("mysql_table_schema_version", - map[string]interface{}{"table_version": version}, versionTags) - } + if m.MetricVersion < 2 { + acc.AddFields(newNamespace("info_schema", "table_version"), + map[string]interface{}{"value": version}, versionTags) + } else { + acc.AddFields("mysql_table_schema_version", + map[string]interface{}{"table_version": version}, versionTags) } } return nil } -func (m *Mysql) parseValue(value sql.RawBytes) (interface{}, bool) { +func (m *Mysql) parseValueByDatabaseTypeName(value sql.RawBytes, databaseTypeName string) (interface{}, error) { if m.MetricVersion < 2 { return v1.ParseValue(value) - } else { - return parseValue(value) - } -} - -// parseValue can be used to convert values such as "ON","OFF","Yes","No" to 0,1 -func parseValue(value sql.RawBytes) (interface{}, bool) { - if bytes.EqualFold(value, []byte("YES")) || bytes.Compare(value, []byte("ON")) == 0 { - return 1, true - } - - if bytes.EqualFold(value, []byte("NO")) || bytes.Compare(value, []byte("OFF")) == 0 { - return 0, true - } - - if val, err := strconv.ParseInt(string(value), 10, 64); err == nil { - return val, true - } - if val, err := strconv.ParseFloat(string(value), 64); err == nil { - return val, true } - if len(string(value)) > 0 { - return string(value), true + switch databaseTypeName { + case "INT": + return v2.ParseInt(value) + case "BIGINT": + return v2.ParseUint(value) + case "VARCHAR": + return v2.ParseString(value) + default: + m.Log.Debugf("unknown database type name %q in parseValueByDatabaseTypeName", databaseTypeName) + return v2.ParseValue(value) } - return nil, false } // findThreadState can be used to find thread state by command and plain state func findThreadState(rawCommand, rawState string) string { var ( // replace '_' symbol with space - command = strings.Replace(strings.ToLower(rawCommand), "_", " ", -1) - state = strings.Replace(strings.ToLower(rawState), "_", " ", -1) + command = strings.ReplaceAll(strings.ToLower(rawCommand), "_", " ") + state = strings.ReplaceAll(strings.ToLower(rawState), "_", " ") ) // if the state is already valid, then return it if _, ok := generalThreadStates[state]; ok { @@ -1736,7 +1919,7 @@ func findThreadState(rawCommand, rawState string) string { // newNamespace can be used to make a namespace func newNamespace(words ...string) string { - return strings.Replace(strings.Join(words, "_"), " ", "_", -1) + return strings.ReplaceAll(strings.Join(words, "_"), " ", "_") } func copyTags(in map[string]string) map[string]string { diff --git a/plugins/inputs/mysql/mysql_test.go b/plugins/inputs/mysql/mysql_test.go index be9c338bf7b0e..c79c4b672d374 100644 --- a/plugins/inputs/mysql/mysql_test.go +++ b/plugins/inputs/mysql/mysql_test.go @@ -1,58 +1,105 @@ package mysql import ( - "database/sql" "fmt" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/DATA-DOG/go-sqlmock" + "github.com/docker/go-connections/nat" "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go/wait" + + "github.com/influxdata/telegraf/testutil" ) -func TestMysqlDefaultsToLocal(t *testing.T) { +const servicePort = "3306" + +func TestMysqlDefaultsToLocalIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } + container := testutil.Container{ + Image: "mysql", + Env: map[string]string{ + "MYSQL_ALLOW_EMPTY_PASSWORD": "yes", + }, + ExposedPorts: []string{servicePort}, + WaitingFor: wait.ForAll( + wait.ForLog("/usr/sbin/mysqld: ready for connections"), + wait.ForListeningPort(nat.Port(servicePort)), + ), + } + + err := container.Start() + require.NoError(t, err, "failed to start container") + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + m := &Mysql{ - Servers: []string{fmt.Sprintf("root@tcp(%s:3306)/", testutil.GetLocalHost())}, + Servers: []string{fmt.Sprintf("root@tcp(%s:%s)/", container.Address, container.Ports[servicePort])}, } var acc testutil.Accumulator - err := m.Gather(&acc) + err = m.Gather(&acc) require.NoError(t, err) + require.Empty(t, acc.Errors) - assert.True(t, acc.HasMeasurement("mysql")) + require.True(t, acc.HasMeasurement("mysql")) } -func TestMysqlMultipleInstances(t *testing.T) { +func TestMysqlMultipleInstancesIntegration(t *testing.T) { // Invoke Gather() from two separate configurations and // confirm they don't interfere with each other if testing.Short() { t.Skip("Skipping integration test in short mode") } - testServer := "root@tcp(127.0.0.1:3306)/?tls=false" + + container := testutil.Container{ + Image: "mysql", + Env: map[string]string{ + "MYSQL_ALLOW_EMPTY_PASSWORD": "yes", + }, + ExposedPorts: []string{servicePort}, + WaitingFor: wait.ForAll( + wait.ForLog("/usr/sbin/mysqld: ready for connections"), + wait.ForListeningPort(nat.Port(servicePort)), + ), + } + + err := container.Start() + require.NoError(t, err, "failed to start container") + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + + testServer := fmt.Sprintf("root@tcp(%s:%s)/?tls=false", container.Address, container.Ports[servicePort]) m := &Mysql{ - Servers: []string{testServer}, - IntervalSlow: "30s", + Servers: []string{testServer}, + IntervalSlow: "30s", + GatherGlobalVars: true, + MetricVersion: 2, } var acc, acc2 testutil.Accumulator - err := m.Gather(&acc) + err = m.Gather(&acc) require.NoError(t, err) - assert.True(t, acc.HasMeasurement("mysql")) + require.Empty(t, acc.Errors) + require.True(t, acc.HasMeasurement("mysql")) // acc should have global variables - assert.True(t, acc.HasMeasurement("mysql_variables")) + require.True(t, acc.HasMeasurement("mysql_variables")) m2 := &Mysql{ - Servers: []string{testServer}, + Servers: []string{testServer}, + MetricVersion: 2, } err = m2.Gather(&acc2) require.NoError(t, err) - assert.True(t, acc2.HasMeasurement("mysql")) + require.Empty(t, acc.Errors) + require.True(t, acc2.HasMeasurement("mysql")) // acc2 should not have global variables - assert.False(t, acc2.HasMeasurement("mysql_variables")) + require.False(t, acc2.HasMeasurement("mysql_variables")) } func TestMysqlMultipleInits(t *testing.T) { @@ -62,16 +109,16 @@ func TestMysqlMultipleInits(t *testing.T) { m2 := &Mysql{} m.InitMysql() - assert.True(t, m.initDone) - assert.False(t, m2.initDone) - assert.Equal(t, m.scanIntervalSlow, uint32(30)) - assert.Equal(t, m2.scanIntervalSlow, uint32(0)) + require.True(t, m.initDone) + require.False(t, m2.initDone) + require.Equal(t, m.scanIntervalSlow, uint32(30)) + require.Equal(t, m2.scanIntervalSlow, uint32(0)) m2.InitMysql() - assert.True(t, m.initDone) - assert.True(t, m2.initDone) - assert.Equal(t, m.scanIntervalSlow, uint32(30)) - assert.Equal(t, m2.scanIntervalSlow, uint32(0)) + require.True(t, m.initDone) + require.True(t, m2.initDone) + require.Equal(t, m.scanIntervalSlow, uint32(30)) + require.Equal(t, m2.scanIntervalSlow, uint32(0)) } func TestMysqlGetDSNTag(t *testing.T) { @@ -175,29 +222,143 @@ func TestMysqlDNSAddTimeout(t *testing.T) { } } } -func TestParseValue(t *testing.T) { + +func TestGatherGlobalVariables(t *testing.T) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + defer db.Close() + + m := Mysql{ + Log: testutil.Logger{}, + MetricVersion: 2, + } + m.InitMysql() + + columns := []string{"Variable_name", "Value"} + measurement := "mysql_variables" + + type fields []struct { + key string + rawValue string + parsedValue interface{} + } + type tags map[string]string testCases := []struct { - rawByte sql.RawBytes - output interface{} - boolValue bool + name string + fields fields + tags tags }{ - {sql.RawBytes("123"), int64(123), true}, - {sql.RawBytes("abc"), "abc", true}, - {sql.RawBytes("10.1"), 10.1, true}, - {sql.RawBytes("ON"), 1, true}, - {sql.RawBytes("OFF"), 0, true}, - {sql.RawBytes("NO"), 0, true}, - {sql.RawBytes("YES"), 1, true}, - {sql.RawBytes("No"), 0, true}, - {sql.RawBytes("Yes"), 1, true}, - {sql.RawBytes(""), nil, false}, + { + "basic variables", + fields{ + {"__test__string_variable", "text", "text"}, + {"__test__int_variable", "5", int64(5)}, + {"__test__off_variable", "OFF", int64(0)}, + {"__test__on_variable", "ON", int64(1)}, + {"__test__empty_variable", "", nil}, + }, + tags{"server": "127.0.0.1:3306"}, + }, + { + "version tag is present", + fields{ + {"__test__string_variable", "text", "text"}, + {"version", "8.0.27-0ubuntu0.20.04.1", "8.0.27-0ubuntu0.20.04.1"}, + }, + tags{"server": "127.0.0.1:3306", "version": "8.0.27-0ubuntu0.20.04.1"}, + }, + + {"", fields{{"delay_key_write", "OFF", "OFF"}}, nil}, + {"", fields{{"delay_key_write", "ON", "ON"}}, nil}, + {"", fields{{"delay_key_write", "ALL", "ALL"}}, nil}, + {"", fields{{"enforce_gtid_consistency", "OFF", "OFF"}}, nil}, + {"", fields{{"enforce_gtid_consistency", "ON", "ON"}}, nil}, + {"", fields{{"enforce_gtid_consistency", "WARN", "WARN"}}, nil}, + {"", fields{{"event_scheduler", "NO", "NO"}}, nil}, + {"", fields{{"event_scheduler", "YES", "YES"}}, nil}, + {"", fields{{"event_scheduler", "DISABLED", "DISABLED"}}, nil}, + {"", fields{{"have_ssl", "DISABLED", int64(0)}}, nil}, + {"", fields{{"have_ssl", "YES", int64(1)}}, nil}, + {"", fields{{"have_symlink", "NO", int64(0)}}, nil}, + {"", fields{{"have_symlink", "DISABLED", int64(0)}}, nil}, + {"", fields{{"have_symlink", "YES", int64(1)}}, nil}, + {"", fields{{"session_track_gtids", "OFF", "OFF"}}, nil}, + {"", fields{{"session_track_gtids", "OWN_GTID", "OWN_GTID"}}, nil}, + {"", fields{{"session_track_gtids", "ALL_GTIDS", "ALL_GTIDS"}}, nil}, + {"", fields{{"session_track_transaction_info", "OFF", "OFF"}}, nil}, + {"", fields{{"session_track_transaction_info", "STATE", "STATE"}}, nil}, + {"", fields{{"session_track_transaction_info", "CHARACTERISTICS", "CHARACTERISTICS"}}, nil}, + {"", fields{{"ssl_fips_mode", "0", "0"}}, nil}, // TODO: map this to OFF or vice versa using integers + {"", fields{{"ssl_fips_mode", "1", "1"}}, nil}, // TODO: map this to ON or vice versa using integers + {"", fields{{"ssl_fips_mode", "2", "2"}}, nil}, // TODO: map this to STRICT or vice versa using integers + {"", fields{{"ssl_fips_mode", "OFF", "OFF"}}, nil}, + {"", fields{{"ssl_fips_mode", "ON", "ON"}}, nil}, + {"", fields{{"ssl_fips_mode", "STRICT", "STRICT"}}, nil}, + {"", fields{{"use_secondary_engine", "OFF", "OFF"}}, nil}, + {"", fields{{"use_secondary_engine", "ON", "ON"}}, nil}, + {"", fields{{"use_secondary_engine", "FORCED", "FORCED"}}, nil}, + {"", fields{{"transaction_write_set_extraction", "OFF", "OFF"}}, nil}, + {"", fields{{"transaction_write_set_extraction", "MURMUR32", "MURMUR32"}}, nil}, + {"", fields{{"transaction_write_set_extraction", "XXHASH64", "XXHASH64"}}, nil}, + {"", fields{{"slave_skip_errors", "OFF", "OFF"}}, nil}, + {"", fields{{"slave_skip_errors", "0", "0"}}, nil}, + {"", fields{{"slave_skip_errors", "1007,1008,1050", "1007,1008,1050"}}, nil}, + {"", fields{{"slave_skip_errors", "all", "all"}}, nil}, + {"", fields{{"slave_skip_errors", "ddl_exist_errors", "ddl_exist_errors"}}, nil}, + {"", fields{{"gtid_mode", "OFF", int64(0)}}, nil}, + {"", fields{{"gtid_mode", "OFF_PERMISSIVE", int64(0)}}, nil}, + {"", fields{{"gtid_mode", "ON", int64(1)}}, nil}, + {"", fields{{"gtid_mode", "ON_PERMISSIVE", int64(1)}}, nil}, } - for _, cases := range testCases { - if got, ok := parseValue(cases.rawByte); got != cases.output && ok != cases.boolValue { - t.Errorf("for %s wanted %t, got %t", string(cases.rawByte), cases.output, got) + + for i, testCase := range testCases { + if testCase.name == "" { + testCase.name = fmt.Sprintf("#%d", i) } + + t.Run(testCase.name, func(t *testing.T) { + rows := sqlmock.NewRows(columns) + for _, field := range testCase.fields { + rows.AddRow(field.key, field.rawValue) + } + + mock.ExpectQuery(globalVariablesQuery).WillReturnRows(rows).RowsWillBeClosed() + + acc := &testutil.Accumulator{} + + err = m.gatherGlobalVariables(db, "test", acc) + require.NoErrorf(t, err, "err on gatherGlobalVariables (test case %q)", testCase.name) + + foundFields := map[string]bool{} + + for _, metric := range acc.Metrics { + require.Equalf(t, measurement, metric.Measurement, "wrong measurement (test case %q)", testCase.name) + + if testCase.tags != nil { + require.Equalf(t, testCase.tags, tags(metric.Tags), "wrong tags (test case %q)", testCase.name) + } + + for key, value := range metric.Fields { + for _, field := range testCase.fields { + if field.key == key { + require.Falsef(t, foundFields[key], "field %s observed multiple times (test case %q)", key, testCase.name) + require.Equalf(t, field.parsedValue, value, "wrong value for field %s (test case %q)", key, testCase.name) + foundFields[key] = true + break + } + } + + require.Truef(t, foundFields[key], "unexpected field %s=%v (test case %q)", key, value, testCase.name) + } + } + + for _, field := range testCase.fields { + require.Truef(t, foundFields[field.key], "missing field %s=%v (test case %q)", field.key, field.parsedValue, testCase.name) + } + }) } } + func TestNewNamespace(t *testing.T) { testCases := []struct { words []string diff --git a/plugins/inputs/mysql/sample.conf b/plugins/inputs/mysql/sample.conf new file mode 100644 index 0000000000000..c429ad932a50f --- /dev/null +++ b/plugins/inputs/mysql/sample.conf @@ -0,0 +1,100 @@ +# Read metrics from one or many mysql servers +[[inputs.mysql]] + ## specify servers via a url matching: + ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]] + ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name + ## e.g. + ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] + ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"] + # + ## If no servers are specified, then localhost is used as the host. + servers = ["tcp(127.0.0.1:3306)/"] + + ## Selects the metric output format. + ## + ## This option exists to maintain backwards compatibility, if you have + ## existing metrics do not set or change this value until you are ready to + ## migrate to the new format. + ## + ## If you do not have existing metrics from this plugin set to the latest + ## version. + ## + ## Telegraf >=1.6: metric_version = 2 + ## <1.6: metric_version = 1 (or unset) + metric_version = 2 + + ## if the list is empty, then metrics are gathered from all database tables + # table_schema_databases = [] + + ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list + # gather_table_schema = false + + ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST + # gather_process_list = false + + ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS + # gather_user_statistics = false + + ## gather auto_increment columns and max values from information schema + # gather_info_schema_auto_inc = false + + ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS + # gather_innodb_metrics = false + + ## gather metrics from all channels from SHOW SLAVE STATUS command output + # gather_all_slave_channels = false + + ## gather metrics from SHOW SLAVE STATUS command output + # gather_slave_status = false + + ## use SHOW ALL SLAVES STATUS command output for MariaDB + # mariadb_dialect = false + + ## gather metrics from SHOW BINARY LOGS command output + # gather_binary_logs = false + + ## gather metrics from SHOW GLOBAL VARIABLES command output + # gather_global_variables = true + + ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE + # gather_table_io_waits = false + + ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS + # gather_table_lock_waits = false + + ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE + # gather_index_io_waits = false + + ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS + # gather_event_waits = false + + ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME + # gather_file_events_stats = false + + ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST + # gather_perf_events_statements = false + # + ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME + # gather_perf_sum_per_acc_per_event = false + # + ## list of events to be gathered for gather_perf_sum_per_acc_per_event + ## in case of empty list all events will be gathered + # perf_summary_events = [] + # + # gather_perf_events_statements = false + + ## the limits for metrics form perf_events_statements + # perf_events_statements_digest_text_limit = 120 + # perf_events_statements_limit = 250 + # perf_events_statements_time_limit = 86400 + + ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) + ## example: interval_slow = "30m" + # interval_slow = "" + + ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/mysql/v1/mysql.go b/plugins/inputs/mysql/v1/mysql.go index 6f6062d14f4db..7f4e1a7dcacae 100644 --- a/plugins/inputs/mysql/v1/mysql.go +++ b/plugins/inputs/mysql/v1/mysql.go @@ -182,14 +182,14 @@ var Mappings = []*Mapping{ }, } -func ParseValue(value sql.RawBytes) (float64, bool) { - if bytes.Compare(value, []byte("Yes")) == 0 || bytes.Compare(value, []byte("ON")) == 0 { - return 1, true +func ParseValue(value sql.RawBytes) (float64, error) { + if bytes.Equal(value, []byte("Yes")) || bytes.Equal(value, []byte("ON")) { + return 1, nil } - if bytes.Compare(value, []byte("No")) == 0 || bytes.Compare(value, []byte("OFF")) == 0 { - return 0, true + if bytes.Equal(value, []byte("No")) || bytes.Equal(value, []byte("OFF")) { + return 0, nil } n, err := strconv.ParseFloat(string(value), 64) - return n, err == nil + return n, err } diff --git a/plugins/inputs/mysql/v2/convert.go b/plugins/inputs/mysql/v2/convert.go index a3ac3e976d6a3..28e99b79e4355 100644 --- a/plugins/inputs/mysql/v2/convert.go +++ b/plugins/inputs/mysql/v2/convert.go @@ -21,6 +21,14 @@ func ParseInt(value sql.RawBytes) (interface{}, error) { return v, err } +func ParseUint(value sql.RawBytes) (interface{}, error) { + return strconv.ParseUint(string(value), 10, 64) +} + +func ParseFloat(value sql.RawBytes) (interface{}, error) { + return strconv.ParseFloat(string(value), 64) +} + func ParseBoolAsInteger(value sql.RawBytes) (interface{}, error) { if bytes.EqualFold(value, []byte("YES")) || bytes.EqualFold(value, []byte("ON")) { return int64(1), nil @@ -29,6 +37,10 @@ func ParseBoolAsInteger(value sql.RawBytes) (interface{}, error) { return int64(0), nil } +func ParseString(value sql.RawBytes) (interface{}, error) { + return string(value), nil +} + func ParseGTIDMode(value sql.RawBytes) (interface{}, error) { // https://dev.mysql.com/doc/refman/8.0/en/replication-mode-change-online-concepts.html v := string(value) @@ -47,17 +59,20 @@ func ParseGTIDMode(value sql.RawBytes) (interface{}, error) { } func ParseValue(value sql.RawBytes) (interface{}, error) { - if bytes.EqualFold(value, []byte("YES")) || bytes.Compare(value, []byte("ON")) == 0 { - return 1, nil + if bytes.EqualFold(value, []byte("YES")) || bytes.Equal(value, []byte("ON")) { + return int64(1), nil } - if bytes.EqualFold(value, []byte("NO")) || bytes.Compare(value, []byte("OFF")) == 0 { - return 0, nil + if bytes.EqualFold(value, []byte("NO")) || bytes.Equal(value, []byte("OFF")) { + return int64(0), nil } if val, err := strconv.ParseInt(string(value), 10, 64); err == nil { return val, nil } + if val, err := strconv.ParseUint(string(value), 10, 64); err == nil { + return val, nil + } if val, err := strconv.ParseFloat(string(value), 64); err == nil { return val, nil } @@ -70,11 +85,41 @@ func ParseValue(value sql.RawBytes) (interface{}, error) { } var GlobalStatusConversions = map[string]ConversionFunc{ - "ssl_ctx_verify_depth": ParseInt, - "ssl_verify_depth": ParseInt, + "innodb_available_undo_logs": ParseUint, + "innodb_buffer_pool_pages_misc": ParseUint, + "innodb_data_pending_fsyncs": ParseUint, + "ssl_ctx_verify_depth": ParseUint, + "ssl_verify_depth": ParseUint, + + // see https://galeracluster.com/library/documentation/galera-status-variables.html + "wsrep_local_index": ParseUint, + "wsrep_local_send_queue_avg": ParseFloat, } var GlobalVariableConversions = map[string]ConversionFunc{ + // see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html + // see https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html + "delay_key_write": ParseString, // ON, OFF, ALL + "enforce_gtid_consistency": ParseString, // ON, OFF, WARN + "event_scheduler": ParseString, // YES, NO, DISABLED + "have_openssl": ParseBoolAsInteger, // alias for have_ssl + "have_ssl": ParseBoolAsInteger, // YES, DISABLED + "have_symlink": ParseBoolAsInteger, // YES, NO, DISABLED + "session_track_gtids": ParseString, + "session_track_transaction_info": ParseString, + "ssl_fips_mode": ParseString, + "use_secondary_engine": ParseString, + + // https://dev.mysql.com/doc/refman/5.7/en/replication-options-binary-log.html + // https://dev.mysql.com/doc/refman/8.0/en/replication-options-binary-log.html + "transaction_write_set_extraction": ParseString, + + // https://dev.mysql.com/doc/refman/5.7/en/replication-options-replica.html + // https://dev.mysql.com/doc/refman/8.0/en/replication-options-replica.html + "slave_skip_errors": ParseString, + + // https://dev.mysql.com/doc/refman/5.7/en/replication-options-gtids.html + // https://dev.mysql.com/doc/refman/8.0/en/replication-options-gtids.html "gtid_mode": ParseGTIDMode, } diff --git a/plugins/inputs/mysql/v2/convert_test.go b/plugins/inputs/mysql/v2/convert_test.go index 47189c18d1576..8276c9a61c28f 100644 --- a/plugins/inputs/mysql/v2/convert_test.go +++ b/plugins/inputs/mysql/v2/convert_test.go @@ -2,6 +2,7 @@ package v2 import ( "database/sql" + "strings" "testing" "github.com/stretchr/testify/require" @@ -19,14 +20,14 @@ func TestConvertGlobalStatus(t *testing.T) { name: "default", key: "ssl_ctx_verify_depth", value: []byte("0"), - expected: int64(0), + expected: uint64(0), expectedErr: nil, }, { name: "overflow int64", key: "ssl_ctx_verify_depth", value: []byte("18446744073709551615"), - expected: int64(9223372036854775807), + expected: uint64(18446744073709551615), expectedErr: nil, }, { @@ -84,3 +85,43 @@ func TestCovertGlobalVariables(t *testing.T) { }) } } + +func TestParseValue(t *testing.T) { + testCases := []struct { + rawByte sql.RawBytes + output interface{} + err string + }{ + {sql.RawBytes("123"), int64(123), ""}, + {sql.RawBytes("abc"), "abc", ""}, + {sql.RawBytes("10.1"), 10.1, ""}, + {sql.RawBytes("ON"), int64(1), ""}, + {sql.RawBytes("OFF"), int64(0), ""}, + {sql.RawBytes("NO"), int64(0), ""}, + {sql.RawBytes("YES"), int64(1), ""}, + {sql.RawBytes("No"), int64(0), ""}, + {sql.RawBytes("Yes"), int64(1), ""}, + {sql.RawBytes("-794"), int64(-794), ""}, + {sql.RawBytes("2147483647"), int64(2147483647), ""}, // max int32 + {sql.RawBytes("2147483648"), int64(2147483648), ""}, // too big for int32 + {sql.RawBytes("9223372036854775807"), int64(9223372036854775807), ""}, // max int64 + {sql.RawBytes("9223372036854775808"), uint64(9223372036854775808), ""}, // too big for int64 + {sql.RawBytes("18446744073709551615"), uint64(18446744073709551615), ""}, // max uint64 + {sql.RawBytes("18446744073709551616"), float64(18446744073709552000), ""}, // too big for uint64 + {sql.RawBytes("18446744073709552333"), float64(18446744073709552000), ""}, // too big for uint64 + {sql.RawBytes(""), nil, "unconvertible value"}, + } + for _, cases := range testCases { + got, err := ParseValue(cases.rawByte) + + if err != nil && cases.err == "" { + t.Errorf("for %q got unexpected error: %q", string(cases.rawByte), err.Error()) + } else if err != nil && !strings.HasPrefix(err.Error(), cases.err) { + t.Errorf("for %q wanted error %q, got %q", string(cases.rawByte), cases.err, err.Error()) + } else if err == nil && cases.err != "" { + t.Errorf("for %q did not get expected error: %s", string(cases.rawByte), cases.err) + } else if got != cases.output { + t.Errorf("for %q wanted %#v (%T), got %#v (%T)", string(cases.rawByte), cases.output, cases.output, got, got) + } + } +} diff --git a/plugins/inputs/nats/README.md b/plugins/inputs/nats/README.md index 362ee17b2aa65..0642788b7e5a6 100644 --- a/plugins/inputs/nats/README.md +++ b/plugins/inputs/nats/README.md @@ -1,11 +1,14 @@ # NATS Input Plugin -The [NATS](http://www.nats.io/about/) monitoring plugin gathers metrics from -the NATS [monitoring http server](https://www.nats.io/documentation/server/gnatsd-monitoring/). +The [NATS](http://www.nats.io/about/) monitoring plugin gathers metrics from the +NATS [monitoring http server][1]. -### Configuration +[1]: https://www.nats.io/documentation/server/gnatsd-monitoring/ -```toml +## Configuration + +```toml @sample.conf +# Provides metrics about the state of a NATS server [[inputs.nats]] ## The address of the monitoring endpoint of the NATS server server = "http://localhost:8222" @@ -14,7 +17,7 @@ the NATS [monitoring http server](https://www.nats.io/documentation/server/gnats # response_timeout = "5s" ``` -### Metrics: +## Metrics - nats - tags @@ -35,8 +38,8 @@ the NATS [monitoring http server](https://www.nats.io/documentation/server/gnats - out_msgs (integer, count) - in_bytes (integer, bytes) -### Example Output: +## Example Output -``` +```shell nats,server=http://localhost:8222 uptime=117158348682i,mem=6647808i,subscriptions=0i,out_bytes=0i,connections=0i,in_msgs=0i,total_connections=0i,cores=2i,cpu=0,slow_consumers=0i,routes=0i,remotes=0i,out_msgs=0i,in_bytes=0i 1517015107000000000 ``` diff --git a/plugins/inputs/nats/nats.go b/plugins/inputs/nats/nats.go index 1afb0046dc3a5..1a92f2ea0bc39 100644 --- a/plugins/inputs/nats/nats.go +++ b/plugins/inputs/nats/nats.go @@ -1,67 +1,63 @@ +//go:generate ../../../tools/readme_config_includer/generator +//go:build !freebsd || (freebsd && cgo) // +build !freebsd freebsd,cgo package nats import ( + _ "embed" "encoding/json" - "io/ioutil" + "io" "net/http" "net/url" "path" "time" + gnatsd "github.com/nats-io/nats-server/v2/server" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" - gnatsd "github.com/nats-io/nats-server/v2/server" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Nats struct { Server string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration client *http.Client } -var sampleConfig = ` - ## The address of the monitoring endpoint of the NATS server - server = "http://localhost:8222" - - ## Maximum time to receive response - # response_timeout = "5s" -` - -func (n *Nats) SampleConfig() string { +func (*Nats) SampleConfig() string { return sampleConfig } -func (n *Nats) Description() string { - return "Provides metrics about the state of a NATS server" -} - func (n *Nats) Gather(acc telegraf.Accumulator) error { - url, err := url.Parse(n.Server) + address, err := url.Parse(n.Server) if err != nil { return err } - url.Path = path.Join(url.Path, "varz") + address.Path = path.Join(address.Path, "varz") if n.client == nil { n.client = n.createHTTPClient() } - resp, err := n.client.Get(url.String()) + resp, err := n.client.Get(address.String()) if err != nil { return err } defer resp.Body.Close() - bytes, err := ioutil.ReadAll(resp.Body) + bytes, err := io.ReadAll(resp.Body) if err != nil { return err } stats := new(gnatsd.Varz) - err = json.Unmarshal([]byte(bytes), &stats) + err = json.Unmarshal(bytes, &stats) if err != nil { return err } @@ -93,7 +89,7 @@ func (n *Nats) createHTTPClient() *http.Client { transport := &http.Transport{ Proxy: http.ProxyFromEnvironment, } - timeout := n.ResponseTimeout.Duration + timeout := time.Duration(n.ResponseTimeout) if timeout == time.Duration(0) { timeout = 5 * time.Second } diff --git a/plugins/inputs/nats/nats_freebsd.go b/plugins/inputs/nats/nats_freebsd.go index 08d08ba760df0..f50ba2cfcf678 100644 --- a/plugins/inputs/nats/nats_freebsd.go +++ b/plugins/inputs/nats/nats_freebsd.go @@ -1,3 +1,4 @@ +//go:build freebsd && !cgo // +build freebsd,!cgo package nats diff --git a/plugins/inputs/nats/nats_test.go b/plugins/inputs/nats/nats_test.go index ece22288ff9af..135951405feda 100644 --- a/plugins/inputs/nats/nats_test.go +++ b/plugins/inputs/nats/nats_test.go @@ -1,3 +1,4 @@ +//go:build !freebsd || (freebsd && cgo) // +build !freebsd freebsd,cgo package nats @@ -69,12 +70,17 @@ var sampleVarz = ` func TestMetricsCorrect(t *testing.T) { var acc testutil.Accumulator - srv := newTestNatsServer() + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.URL.Path, "/varz", "Cannot handle request") + + rsp := sampleVarz + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) + })) defer srv.Close() n := &Nats{Server: srv.URL} - err := n.Gather(&acc) - require.NoError(t, err) + require.NoError(t, n.Gather(&acc)) fields := map[string]interface{}{ "in_msgs": int64(74148556), @@ -97,18 +103,3 @@ func TestMetricsCorrect(t *testing.T) { } acc.AssertContainsTaggedFields(t, "nats", fields, tags) } - -func newTestNatsServer() *httptest.Server { - return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var rsp string - - switch r.URL.Path { - case "/varz": - rsp = sampleVarz - default: - panic("Cannot handle request") - } - - fmt.Fprintln(w, rsp) - })) -} diff --git a/plugins/inputs/nats/sample.conf b/plugins/inputs/nats/sample.conf new file mode 100644 index 0000000000000..c5aaecd06ddbf --- /dev/null +++ b/plugins/inputs/nats/sample.conf @@ -0,0 +1,7 @@ +# Provides metrics about the state of a NATS server +[[inputs.nats]] + ## The address of the monitoring endpoint of the NATS server + server = "http://localhost:8222" + + ## Maximum time to receive response + # response_timeout = "5s" diff --git a/plugins/inputs/nats_consumer/README.md b/plugins/inputs/nats_consumer/README.md index ae40d9185100a..b3865dca74850 100644 --- a/plugins/inputs/nats_consumer/README.md +++ b/plugins/inputs/nats_consumer/README.md @@ -6,9 +6,10 @@ creates metrics using one of the supported [input data formats][]. A [Queue Group][queue group] is used when subscribing to subjects so multiple instances of telegraf can read from a NATS cluster in parallel. -### Configuration: +## Configuration -```toml +```toml @sample.conf +# Read metrics from NATS subject(s) [[inputs.nats_consumer]] ## urls of NATS servers servers = ["nats://localhost:4222"] diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index 057c77ee795c4..9adc62b60bd69 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -1,18 +1,25 @@ -package natsconsumer +//go:generate ../../../tools/readme_config_includer/generator +package nats_consumer import ( "context" + _ "embed" "fmt" "strings" "sync" + "github.com/nats-io/nats.go" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" - "github.com/nats-io/nats.go" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + var ( defaultMaxUndeliveredMessages = 1000 ) @@ -49,9 +56,7 @@ type natsConsumer struct { PendingBytesLimit int `toml:"pending_bytes_limit"` MaxUndeliveredMessages int `toml:"max_undelivered_messages"` - - // Legacy metric buffer support; deprecated in v0.10.3 - MetricBuffer int + MetricBuffer int `toml:"metric_buffer" deprecated:"0.10.3;2.0.0;option is ignored"` conn *nats.Conn subs []*nats.Subscription @@ -66,63 +71,10 @@ type natsConsumer struct { cancel context.CancelFunc } -var sampleConfig = ` - ## urls of NATS servers - servers = ["nats://localhost:4222"] - - ## subject(s) to consume - subjects = ["telegraf"] - - ## name a queue group - queue_group = "telegraf_consumers" - - ## Optional credentials - # username = "" - # password = "" - - ## Optional NATS 2.0 and NATS NGS compatible user credentials - # credentials = "/etc/telegraf/nats.creds" - - ## Use Transport Layer Security - # secure = false - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## Sets the limits for pending msgs and bytes for each subscription - ## These shouldn't need to be adjusted except in very high throughput scenarios - # pending_message_limit = 65536 - # pending_bytes_limit = 67108864 - - ## Maximum messages to read from the broker that have not been written by an - ## output. For best throughput set based on the number of metrics within - ## each message and the size of the output's metric_batch_size. - ## - ## For example, if each message from the queue contains 10 metrics and the - ## output metric_batch_size is 1000, setting this to 100 will ensure that a - ## full batch is collected and the write is triggered immediately without - ## waiting until the next flush_interval. - # max_undelivered_messages = 1000 - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" -` - -func (n *natsConsumer) SampleConfig() string { +func (*natsConsumer) SampleConfig() string { return sampleConfig } -func (n *natsConsumer) Description() string { - return "Read metrics from NATS subject(s)" -} - func (n *natsConsumer) SetParser(parser parsers.Parser) { n.parser = parser } @@ -264,7 +216,7 @@ func (n *natsConsumer) Stop() { n.clean() } -func (n *natsConsumer) Gather(acc telegraf.Accumulator) error { +func (n *natsConsumer) Gather(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/nats_consumer/sample.conf b/plugins/inputs/nats_consumer/sample.conf new file mode 100644 index 0000000000000..a498dadbc0832 --- /dev/null +++ b/plugins/inputs/nats_consumer/sample.conf @@ -0,0 +1,48 @@ +# Read metrics from NATS subject(s) +[[inputs.nats_consumer]] + ## urls of NATS servers + servers = ["nats://localhost:4222"] + + ## subject(s) to consume + subjects = ["telegraf"] + + ## name a queue group + queue_group = "telegraf_consumers" + + ## Optional credentials + # username = "" + # password = "" + + ## Optional NATS 2.0 and NATS NGS compatible user credentials + # credentials = "/etc/telegraf/nats.creds" + + ## Use Transport Layer Security + # secure = false + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Sets the limits for pending msgs and bytes for each subscription + ## These shouldn't need to be adjusted except in very high throughput scenarios + # pending_message_limit = 65536 + # pending_bytes_limit = 67108864 + + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" diff --git a/plugins/inputs/neptune_apex/README.md b/plugins/inputs/neptune_apex/README.md index 6fd28a16a6d21..70267496d184c 100644 --- a/plugins/inputs/neptune_apex/README.md +++ b/plugins/inputs/neptune_apex/README.md @@ -1,15 +1,17 @@ # Neptune Apex Input Plugin -The Neptune Apex controller family allows an aquarium hobbyist to monitor and control -their tanks based on various probes. The data is taken directly from the `/cgi-bin/status.xml` at the interval specified -in the telegraf.conf configuration file. +The Neptune Apex controller family allows an aquarium hobbyist to monitor and +control their tanks based on various probes. The data is taken directly from the +`/cgi-bin/status.xml` at the interval specified in the telegraf.conf +configuration file. -The [Neptune Apex](https://www.neptunesystems.com/) input plugin collects real-time data from the Apex's status.xml page. +The [Neptune Apex](https://www.neptunesystems.com/) input plugin collects +real-time data from the Apex's status.xml page. +## Configuration -### Configuration - -```toml +```toml @sample.conf +# Neptune Apex data collector [[inputs.neptune_apex]] ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. ## Measurements will be logged under "apex". @@ -25,15 +27,18 @@ The [Neptune Apex](https://www.neptunesystems.com/) input plugin collects real-t ``` -### Metrics +## Metrics -The Neptune Apex controller family allows an aquarium hobbyist to monitor and control -their tanks based on various probes. The data is taken directly from the /cgi-bin/status.xml at the interval specified -in the telegraf.conf configuration file. +The Neptune Apex controller family allows an aquarium hobbyist to monitor and +control their tanks based on various probes. The data is taken directly from the +/cgi-bin/status.xml at the interval specified in the telegraf.conf configuration +file. -No manipulation is done on any of the fields to ensure future changes to the status.xml do not introduce conversion bugs -to this plugin. When reasonable and predictable, some tags are derived to make graphing easier and without front-end -programming. These tags are clearly marked in the list below and should be considered a convenience rather than authoritative. +No manipulation is done on any of the fields to ensure future changes to the +status.xml do not introduce conversion bugs to this plugin. When reasonable and +predictable, some tags are derived to make graphing easier and without front-end +programming. These tags are clearly marked in the list below and should be +considered a convenience rather than authoritative. - neptune_apex (All metrics have this measurement name) - tags: @@ -62,38 +67,45 @@ programming. These tags are clearly marked in the list below and should be consi - power_failed (int64, Unix epoch in ns) when the controller last lost power. Omitted if the apex reports it as "none" - power_restored (int64, Unix epoch in ns) when the controller last powered on. Omitted if the apex reports it as "none" - serial (string, serial number) - - time: - - The time used for the metric is parsed from the status.xml page. This helps when cross-referencing events with + - time: + - The time used for the metric is parsed from the status.xml page. This helps when cross-referencing events with the local system of Apex Fusion. Since the Apex uses NTP, this should not matter in most scenarios. - -### Sample Queries - +## Sample Queries Get the max, mean, and min for the temperature in the last hour: + ```sql SELECT mean("value") FROM "neptune_apex" WHERE ("probe_type" = 'Temp') AND time >= now() - 6h GROUP BY time(20s) ``` -### Troubleshooting +## Troubleshooting + +### sendRequest failure + +This indicates a problem communicating with the local Apex controller. If on +Mac/Linux, try curl: -#### sendRequest failure -This indicates a problem communicating with the local Apex controller. If on Mac/Linux, try curl: ```sh -$ curl apex.local/cgi-bin/status.xml +curl apex.local/cgi-bin/status.xml ``` + to isolate the problem. -#### parseXML errors -Ensure the XML being returned is valid. If you get valid XML back, open a bug request. +### parseXML errors -#### Missing fields/data -The neptune_apex plugin is strict on its input to prevent any conversion errors. If you have fields in the status.xml -output that are not converted to a metric, open a feature request and paste your whole status.xml +Ensure the XML being returned is valid. If you get valid XML back, open a bug +request. -### Example Output +### Missing fields/data -``` +The neptune_apex plugin is strict on its input to prevent any conversion +errors. If you have fields in the status.xml output that are not converted to a +metric, open a feature request and paste your whole status.xml + +## Example Output + +```text neptune_apex,hardware=1.0,host=ubuntu,software=5.04_7A18,source=apex,type=controller power_failed=1544814000000000000i,power_restored=1544833875000000000i,serial="AC5:12345" 1545978278000000000 neptune_apex,device_id=base_Var1,hardware=1.0,host=ubuntu,name=VarSpd1_I1,output_id=0,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF1" 1545978278000000000 neptune_apex,device_id=base_Var2,hardware=1.0,host=ubuntu,name=VarSpd2_I2,output_id=1,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF2" 1545978278000000000 @@ -138,12 +150,13 @@ neptune_apex,hardware=1.0,host=ubuntu,name=Volt_4,software=5.04_7A18,source=apex ``` -### Contributing +## Contributing -This plugin is used for mission-critical aquatic life support. A bug could very well result in the death of animals. -Neptune does not publish a schema file and as such, we have made this plugin very strict on input with no provisions for -automatically adding fields. We are also careful to not add default values when none are presented to prevent automation -errors. +This plugin is used for mission-critical aquatic life support. A bug could very +well result in the death of animals. Neptune does not publish a schema file and +as such, we have made this plugin very strict on input with no provisions for +automatically adding fields. We are also careful to not add default values when +none are presented to prevent automation errors. -When writing unit tests, use actual Apex output to run tests. It's acceptable to abridge the number of repeated fields -but never inner fields or parameters. +When writing unit tests, use actual Apex output to run tests. It's acceptable to +abridge the number of repeated fields but never inner fields or parameters. diff --git a/plugins/inputs/neptune_apex/neptune_apex.go b/plugins/inputs/neptune_apex/neptune_apex.go index 8161ac7b4880a..1dde25741b657 100644 --- a/plugins/inputs/neptune_apex/neptune_apex.go +++ b/plugins/inputs/neptune_apex/neptune_apex.go @@ -1,11 +1,13 @@ -// Package neptuneapex implements an input plugin for the Neptune Apex +//go:generate ../../../tools/readme_config_includer/generator +// Package neptune_apex implements an input plugin for the Neptune Apex // aquarium controller. -package neptuneapex +package neptune_apex import ( + _ "embed" "encoding/xml" "fmt" - "io/ioutil" + "io" "math" "net/http" "strconv" @@ -14,10 +16,14 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // Measurement is constant across all metrics. const Measurement = "neptune_apex" @@ -51,30 +57,12 @@ type outlet struct { // NeptuneApex implements telegraf.Input. type NeptuneApex struct { Servers []string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration httpClient *http.Client } -// Description implements telegraf.Input.Description -func (*NeptuneApex) Description() string { - return "Neptune Apex data collector" -} - -// SampleConfig implements telegraf.Input.SampleConfig func (*NeptuneApex) SampleConfig() string { - return ` - ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. - ## Measurements will be logged under "apex". - - ## The base URL of the local Apex(es). If you specify more than one server, they will - ## be differentiated by the "source" tag. - servers = [ - "http://apex.local", - ] - - ## The response_timeout specifies how long to wait for a reply from the Apex. - #response_timeout = "5s" -` + return sampleConfig } // Gather implements telegraf.Input.Gather @@ -245,7 +233,7 @@ func findProbe(probe string, probes []probe) int { // returns a time.Time struct. func parseTime(val string, tz float64) (time.Time, error) { // Magic time constant from https://golang.org/pkg/time/#Parse - const TimeLayout = "01/02/2006 15:04:05 -0700" + const timeLayout = "01/02/2006 15:04:05 -0700" // Timezone offset needs to be explicit sign := '+' @@ -256,7 +244,7 @@ func parseTime(val string, tz float64) (time.Time, error) { // Build a time string with the timezone in a format Go can parse. tzs := fmt.Sprintf("%c%04d", sign, int(math.Abs(tz))*100) ts := fmt.Sprintf("%s %s", val, tzs) - t, err := time.Parse(TimeLayout, ts) + t, err := time.Parse(timeLayout, ts) if err != nil { return time.Now(), fmt.Errorf("unable to parse %q (%v)", ts, err) } @@ -276,7 +264,7 @@ func (n *NeptuneApex) sendRequest(server string) ([]byte, error) { url, resp.StatusCode, http.StatusText(resp.StatusCode), http.StatusOK, http.StatusText(http.StatusOK)) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("unable to read output from %q: %v", url, err) } diff --git a/plugins/inputs/neptune_apex/neptune_apex_test.go b/plugins/inputs/neptune_apex/neptune_apex_test.go index cefa5fad14662..0c2971efe5e65 100644 --- a/plugins/inputs/neptune_apex/neptune_apex_test.go +++ b/plugins/inputs/neptune_apex/neptune_apex_test.go @@ -1,22 +1,23 @@ -package neptuneapex +package neptune_apex import ( - "bytes" "context" "net" "net/http" "net/http/httptest" - "reflect" "testing" "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestGather(t *testing.T) { h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNotFound) - w.Write([]byte("data")) + _, err := w.Write([]byte("data")) + require.NoError(t, err) }) c, destroy := fakeHTTPClient(h) defer destroy() @@ -46,12 +47,9 @@ func TestGather(t *testing.T) { t.Run(test.name, func(t *testing.T) { var acc testutil.Accumulator n.Servers = test.servers - n.Gather(&acc) - if len(acc.Errors) != len(test.servers) { - t.Errorf("Number of servers mismatch. got=%d, want=%d", - len(acc.Errors), len(test.servers)) - } - + require.NoError(t, n.Gather(&acc)) + require.Lenf(t, acc.Errors, len(test.servers), + "Number of servers mismatch. got=%d, want=%d", len(acc.Errors), len(test.servers)) }) } } @@ -63,33 +61,32 @@ func TestParseXML(t *testing.T) { tests := []struct { name string xmlResponse []byte - wantMetrics []*testutil.Metric + wantMetrics []telegraf.Metric wantAccErr bool wantErr bool }{ { name: "Good test", xmlResponse: []byte(APEX2016), - wantMetrics: []*testutil.Metric{ - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + wantMetrics: []telegraf.Metric{ + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "type": "controller", "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "serial": "AC5:12345", "power_failed": int64(1544814000000000000), "power_restored": int64(1544833875000000000), }, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "output_id": "0", "device_id": "base_Var1", @@ -99,12 +96,12 @@ func TestParseXML(t *testing.T) { "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{"state": "PF1"}, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + map[string]interface{}{"state": "PF1"}, + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "output_id": "6", "device_id": "base_email", @@ -114,12 +111,12 @@ func TestParseXML(t *testing.T) { "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{"state": "AOF"}, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + map[string]interface{}{"state": "AOF"}, + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "output_id": "8", "device_id": "2_1", @@ -129,16 +126,16 @@ func TestParseXML(t *testing.T) { "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "state": "AON", "watt": 35.0, "amp": 0.3, }, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "output_id": "18", "device_id": "3_1", @@ -148,15 +145,15 @@ func TestParseXML(t *testing.T) { "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "state": "TBL", "xstatus": "OK", }, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "output_id": "28", "device_id": "4_9", @@ -166,12 +163,12 @@ func TestParseXML(t *testing.T) { "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{"state": "AOF"}, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + map[string]interface{}{"state": "AOF"}, + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "output_id": "32", "device_id": "Cntl_A2", @@ -181,12 +178,12 @@ func TestParseXML(t *testing.T) { "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{"state": "AOF"}, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + map[string]interface{}{"state": "AOF"}, + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "name": "Salt", "type": "probe", @@ -194,20 +191,21 @@ func TestParseXML(t *testing.T) { "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{"value": 30.1}, - }, - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + map[string]interface{}{"value": 30.1}, + goodTime, + ), + testutil.MustMetric( + Measurement, + map[string]string{ "source": "apex", "name": "Volt_2", "type": "probe", "software": "5.04_7A18", "hardware": "1.0", }, - Fields: map[string]interface{}{"value": 115.0}, - }, + map[string]interface{}{"value": 115.0}, + goodTime, + ), }, }, { @@ -226,21 +224,21 @@ func TestParseXML(t *testing.T) { `12/22/2018 21:55:37 -8.0a 12/22/2018 22:55:37`), - wantMetrics: []*testutil.Metric{ - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + wantMetrics: []telegraf.Metric{ + testutil.MustMetric( + Measurement, + map[string]string{ "source": "", "type": "controller", "hardware": "", "software": "", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "serial": "", "power_restored": int64(1545548137000000000), }, - }, + goodTime, + ), }, }, { @@ -249,21 +247,21 @@ func TestParseXML(t *testing.T) { `12/22/2018 21:55:37 -8.0a 12/22/2018 22:55:37`), - wantMetrics: []*testutil.Metric{ - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + wantMetrics: []telegraf.Metric{ + testutil.MustMetric( + Measurement, + map[string]string{ "source": "", "type": "controller", "hardware": "", "software": "", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "serial": "", "power_failed": int64(1545548137000000000), }, - }, + goodTime, + ), }, }, { @@ -283,22 +281,22 @@ func TestParseXML(t *testing.T) { o1Wabc `), wantAccErr: true, - wantMetrics: []*testutil.Metric{ - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + wantMetrics: []telegraf.Metric{ + testutil.MustMetric( + Measurement, + map[string]string{ "source": "", "type": "controller", "hardware": "", "software": "", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "serial": "", "power_failed": int64(1545544537000000000), "power_restored": int64(1545544537000000000), }, - }, + goodTime, + ), }, }, { @@ -312,22 +310,22 @@ func TestParseXML(t *testing.T) { o1Aabc `), wantAccErr: true, - wantMetrics: []*testutil.Metric{ - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + wantMetrics: []telegraf.Metric{ + testutil.MustMetric( + Measurement, + map[string]string{ "source": "", "type": "controller", "hardware": "", "software": "", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "serial": "", "power_failed": int64(1545544537000000000), "power_restored": int64(1545544537000000000), }, - }, + goodTime, + ), }, }, { @@ -340,22 +338,22 @@ func TestParseXML(t *testing.T) { p1abc `), wantAccErr: true, - wantMetrics: []*testutil.Metric{ - { - Measurement: Measurement, - Time: goodTime, - Tags: map[string]string{ + wantMetrics: []telegraf.Metric{ + testutil.MustMetric( + Measurement, + map[string]string{ "source": "", "type": "controller", "hardware": "", "software": "", }, - Fields: map[string]interface{}{ + map[string]interface{}{ "serial": "", "power_failed": int64(1545544537000000000), "power_restored": int64(1545544537000000000), }, - }, + goodTime, + ), }, }, } @@ -364,33 +362,17 @@ func TestParseXML(t *testing.T) { test := test t.Run(test.name, func(t *testing.T) { var acc testutil.Accumulator - err := n.parseXML(&acc, []byte(test.xmlResponse)) - if (err != nil) != test.wantErr { - t.Errorf("err mismatch. got=%v, want=%t", err, test.wantErr) - } + err := n.parseXML(&acc, test.xmlResponse) if test.wantErr { + require.Error(t, err, "expected error but got ") return } - if len(acc.Errors) > 0 != test.wantAccErr { - t.Errorf("Accumulator errors. got=%v, want=none", acc.Errors) - } - if len(acc.Metrics) != len(test.wantMetrics) { - t.Fatalf("Invalid number of metrics received. got=%d, want=%d", len(acc.Metrics), len(test.wantMetrics)) - } - for i, m := range acc.Metrics { - if m.Measurement != test.wantMetrics[i].Measurement { - t.Errorf("Metric measurement mismatch at position %d:\ngot=\n%s\nWant=\n%s", i, m.Measurement, test.wantMetrics[i].Measurement) - } - if !reflect.DeepEqual(m.Tags, test.wantMetrics[i].Tags) { - t.Errorf("Metric tags mismatch at position %d:\ngot=\n%v\nwant=\n%v", i, m.Tags, test.wantMetrics[i].Tags) - } - if !reflect.DeepEqual(m.Fields, test.wantMetrics[i].Fields) { - t.Errorf("Metric fields mismatch at position %d:\ngot=\n%#v\nwant=:\n%#v", i, m.Fields, test.wantMetrics[i].Fields) - } - if !m.Time.Equal(test.wantMetrics[i].Time) { - t.Errorf("Metric time mismatch at position %d:\ngot=\n%s\nwant=\n%s", i, m.Time, test.wantMetrics[i].Time) - } - } + // No error case + require.NoErrorf(t, err, "expected no error but got: %v", err) + require.Equalf(t, len(acc.Errors) > 0, test.wantAccErr, + "Accumulator errors. got=%v, want=%t", acc.Errors, test.wantAccErr) + + testutil.RequireMetricsEqual(t, acc.GetTelegrafMetrics(), test.wantMetrics) }) } } @@ -424,7 +406,8 @@ func TestSendRequest(t *testing.T) { h := http.HandlerFunc(func( w http.ResponseWriter, r *http.Request) { w.WriteHeader(test.statusCode) - w.Write([]byte("data")) + _, err := w.Write([]byte("data")) + require.NoError(t, err) }) c, destroy := fakeHTTPClient(h) defer destroy() @@ -432,16 +415,14 @@ func TestSendRequest(t *testing.T) { httpClient: c, } resp, err := n.sendRequest("http://abc") - if (err != nil) != test.wantErr { - t.Errorf("err mismatch. got=%v, want=%t", err, test.wantErr) - } if test.wantErr { + require.Error(t, err, "expected error but got ") return } - if bytes.Compare(resp, []byte("data")) != 0 { - t.Errorf( - "Response data mismatch. got=%q, want=%q", resp, "data") - } + + // No error case + require.NoErrorf(t, err, "expected no error but got: %v", err) + require.Equalf(t, resp, []byte("data"), "Response data mismatch. got=%q, want=%q", resp, "data") }) } } @@ -480,15 +461,14 @@ func TestParseTime(t *testing.T) { t.Run(test.name, func(t *testing.T) { t.Parallel() res, err := parseTime(test.input, test.timeZone) - if (err != nil) != test.wantErr { - t.Errorf("err mismatch. got=%v, want=%t", err, test.wantErr) - } if test.wantErr { + require.Error(t, err, "expected error but got ") return } - if !test.wantTime.Equal(res) { - t.Errorf("err mismatch. got=%s, want=%s", res, test.wantTime) - } + + // No error case + require.NoErrorf(t, err, "expected no error but got: %v", err) + require.Truef(t, test.wantTime.Equal(res), "time mismatch. got=%q, want=%q", res, test.wantTime) }) } } @@ -524,27 +504,11 @@ func TestFindProbe(t *testing.T) { t.Run(test.name, func(t *testing.T) { t.Parallel() index := findProbe(test.probeName, fakeProbes) - if index != test.wantIndex { - t.Errorf("probe index mismatch; got=%d, want %d", index, test.wantIndex) - } + require.Equalf(t, index, test.wantIndex, "probe index mismatch; got=%d, want %d", index, test.wantIndex) }) } } -func TestDescription(t *testing.T) { - n := &NeptuneApex{} - if n.Description() == "" { - t.Errorf("Empty description") - } -} - -func TestSampleConfig(t *testing.T) { - n := &NeptuneApex{} - if n.SampleConfig() == "" { - t.Errorf("Empty sample config") - } -} - // This fakeHttpClient creates a server and binds a client to it. // That way, it is possible to control the http // output from within the test without changes to the main code. diff --git a/plugins/inputs/neptune_apex/sample.conf b/plugins/inputs/neptune_apex/sample.conf new file mode 100644 index 0000000000000..6fb1d1f04f6fb --- /dev/null +++ b/plugins/inputs/neptune_apex/sample.conf @@ -0,0 +1,14 @@ +# Neptune Apex data collector +[[inputs.neptune_apex]] + ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. + ## Measurements will be logged under "apex". + + ## The base URL of the local Apex(es). If you specify more than one server, they will + ## be differentiated by the "source" tag. + servers = [ + "http://apex.local", + ] + + ## The response_timeout specifies how long to wait for a reply from the Apex. + #response_timeout = "5s" + diff --git a/plugins/inputs/net/NET_README.md b/plugins/inputs/net/README.md similarity index 78% rename from plugins/inputs/net/NET_README.md rename to plugins/inputs/net/README.md index d2571d29e9ede..1ef30089b8b21 100644 --- a/plugins/inputs/net/NET_README.md +++ b/plugins/inputs/net/README.md @@ -1,10 +1,11 @@ # Net Input Plugin -This plugin gathers metrics about network interface and protocol usage (Linux only). +This plugin gathers metrics about network interface and protocol usage (Linux +only). -### Configuration: +## Configuration -```toml +```toml @sample.conf # Gather metrics about network interfaces [[inputs.net]] ## By default, telegraf gathers stats from any up interface (excluding loopback) @@ -21,7 +22,7 @@ This plugin gathers metrics about network interface and protocol usage (Linux on ## ``` -### Measurements & Fields: +## Metrics The fields from this plugin are gathered in the _net_ measurement. @@ -36,36 +37,50 @@ Fields (all platforms): * drop_in - The total number of received packets dropped by the interface * drop_out - The total number of transmitted packets dropped by the interface -Different platforms gather the data above with different mechanisms. Telegraf uses the ([gopsutil](https://github.com/shirou/gopsutil)) package, which under Linux reads the /proc/net/dev file. -Under freebsd/openbsd and darwin the plugin uses netstat. +Different platforms gather the data above with different mechanisms. Telegraf +uses the ([gopsutil](https://github.com/shirou/gopsutil)) package, which under +Linux reads the /proc/net/dev file. Under freebsd/openbsd and darwin the plugin +uses netstat. -Additionally, for the time being _only under Linux_, the plugin gathers system wide stats for different network protocols using /proc/net/snmp (tcp, udp, icmp, etc.). -Explanation of the different metrics exposed by snmp is out of the scope of this document. The best way to find information would be tracing the constants in the Linux kernel source [here](https://elixir.bootlin.com/linux/latest/source/net/ipv4/proc.c) and their usage. If /proc/net/snmp cannot be read for some reason, telegraf ignores the error silently. +Additionally, for the time being _only under Linux_, the plugin gathers system +wide stats for different network protocols using /proc/net/snmp (tcp, udp, icmp, +etc.). Explanation of the different metrics exposed by snmp is out of the scope +of this document. The best way to find information would be tracing the +constants in the [Linux kernel source][source] and their usage. If +/proc/net/snmp cannot be read for some reason, telegraf ignores the error +silently. -### Tags: +[source]: https://elixir.bootlin.com/linux/latest/source/net/ipv4/proc.c + +## Tags * Net measurements have the following tags: - - interface (the interface from which metrics are gathered) + * interface (the interface from which metrics are gathered) Under Linux the system wide protocol metrics have the interface=all tag. -### Sample Queries: +## Sample Queries + +You can use the following query to get the upload/download traffic rate per +second for all interfaces in the last hour. The query uses the [derivative +function][deriv] which calculates the rate of change between subsequent field +values. -You can use the following query to get the upload/download traffic rate per second for all interfaces in the last hour. The query uses the [derivative function](https://docs.influxdata.com/influxdb/v1.2/query_language/functions#derivative) which calculates the rate of change between subsequent field values. +[deriv]: https://docs.influxdata.com/influxdb/v1.2/query_language/functions#derivative ```sql SELECT derivative(first(bytes_recv), 1s) as "download bytes/sec", derivative(first(bytes_sent), 1s) as "upload bytes/sec" FROM net WHERE time > now() - 1h AND interface != 'all' GROUP BY time(10s), interface fill(0); ``` -### Example Output: +## Example Output -``` +```shell # All platforms $ ./telegraf --config telegraf.conf --input-filter net --test net,interface=eth0,host=HOST bytes_sent=451838509i,bytes_recv=3284081640i,packets_sent=2663590i,packets_recv=3585442i,err_in=0i,err_out=0i,drop_in=4i,drop_out=0i 1492834180000000000 ``` -``` +```shell # Linux $ ./telegraf --config telegraf.conf --input-filter net --test net,interface=eth0,host=HOST bytes_sent=451838509i,bytes_recv=3284081640i,packets_sent=2663590i,packets_recv=3585442i,err_in=0i,err_out=0i,drop_in=4i,drop_out=0i 1492834180000000000 diff --git a/plugins/inputs/net/net.go b/plugins/inputs/net/net.go index f91501860e749..d3e1cae22bd63 100644 --- a/plugins/inputs/net/net.go +++ b/plugins/inputs/net/net.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package net import ( + _ "embed" "fmt" "net" "strings" @@ -11,6 +13,10 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/system" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type NetIOStats struct { filter filter.Filter ps system.PS @@ -20,36 +26,18 @@ type NetIOStats struct { Interfaces []string } -func (_ *NetIOStats) Description() string { - return "Read metrics about network interface usage" -} - -var netSampleConfig = ` - ## By default, telegraf gathers stats from any up interface (excluding loopback) - ## Setting interfaces will tell it to gather these explicit interfaces, - ## regardless of status. - ## - # interfaces = ["eth0"] - ## - ## On linux systems telegraf also collects protocol stats. - ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. - ## - # ignore_protocol_stats = false - ## -` - -func (_ *NetIOStats) SampleConfig() string { - return netSampleConfig +func (*NetIOStats) SampleConfig() string { + return sampleConfig } -func (s *NetIOStats) Gather(acc telegraf.Accumulator) error { - netio, err := s.ps.NetIO() +func (n *NetIOStats) Gather(acc telegraf.Accumulator) error { + netio, err := n.ps.NetIO() if err != nil { return fmt.Errorf("error getting net io info: %s", err) } - if s.filter == nil { - if s.filter, err = filter.Compile(s.Interfaces); err != nil { + if n.filter == nil { + if n.filter, err = filter.Compile(n.Interfaces); err != nil { return fmt.Errorf("error compiling filter: %s", err) } } @@ -64,17 +52,17 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error { } for _, io := range netio { - if len(s.Interfaces) != 0 { + if len(n.Interfaces) != 0 { var found bool - if s.filter.Match(io.Name) { + if n.filter.Match(io.Name) { found = true } if !found { continue } - } else if !s.skipChecks { + } else if !n.skipChecks { iface, ok := interfacesByName[io.Name] if !ok { continue @@ -108,8 +96,8 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error { // Get system wide stats for different network protocols // (ignore these stats if the call fails) - if !s.IgnoreProtocolStats { - netprotos, _ := s.ps.NetProto() + if !n.IgnoreProtocolStats { + netprotos, _ := n.ps.NetProto() fields := make(map[string]interface{}) for _, proto := range netprotos { for stat, value := range proto.Stats { diff --git a/plugins/inputs/net/net_test.go b/plugins/inputs/net/net_test.go index 3c4c3c7ef8d84..68babcf977fc4 100644 --- a/plugins/inputs/net/net_test.go +++ b/plugins/inputs/net/net_test.go @@ -4,9 +4,10 @@ import ( "syscall" "testing" + "github.com/influxdata/telegraf/plugins/inputs/netstat" "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/net" + "github.com/shirou/gopsutil/v3/net" "github.com/stretchr/testify/require" ) @@ -88,7 +89,9 @@ func TestNetStats(t *testing.T) { acc.Metrics = nil - err = (&NetStats{&mps}).Gather(&acc) + err = (&netstat.NetStats{ + PS: &mps, + }).Gather(&acc) require.NoError(t, err) fields3 := map[string]interface{}{ diff --git a/plugins/inputs/net/sample.conf b/plugins/inputs/net/sample.conf new file mode 100644 index 0000000000000..e7e3c8db73914 --- /dev/null +++ b/plugins/inputs/net/sample.conf @@ -0,0 +1,14 @@ +# Gather metrics about network interfaces +[[inputs.net]] + ## By default, telegraf gathers stats from any up interface (excluding loopback) + ## Setting interfaces will tell it to gather these explicit interfaces, + ## regardless of status. When specifying an interface, glob-style + ## patterns are also supported. + ## + # interfaces = ["eth*", "enp0s[0-1]", "lo"] + ## + ## On linux systems telegraf also collects protocol stats. + ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. + ## + # ignore_protocol_stats = false + ## diff --git a/plugins/inputs/net_response/README.md b/plugins/inputs/net_response/README.md index 2c492408beef2..ffae42a3d54af 100644 --- a/plugins/inputs/net_response/README.md +++ b/plugins/inputs/net_response/README.md @@ -3,9 +3,9 @@ The input plugin test UDP/TCP connections response time and can optional verify text in the response. -### Configuration: +## Configuration -```toml +```toml @sample.conf # Collect response time of a TCP or UDP connection [[inputs.net_response]] ## Protocol, must be "tcp" or "udp" @@ -33,7 +33,7 @@ verify text in the response. # fielddrop = ["result_type", "string_found"] ``` -### Metrics: +## Metrics - net_response - tags: @@ -47,9 +47,9 @@ verify text in the response. - result_type (string) **DEPRECATED in 1.7; use result tag** - string_found (boolean) **DEPRECATED in 1.4; use result tag** -### Example Output: +## Example Output -``` +```shell net_response,port=8086,protocol=tcp,result=success,server=localhost response_time=0.000092948,result_code=0i,result_type="success" 1525820185000000000 net_response,port=8080,protocol=tcp,result=connection_failed,server=localhost result_code=2i,result_type="connection_failed" 1525820088000000000 net_response,port=8080,protocol=udp,result=read_failed,server=localhost result_code=3i,result_type="read_failed",string_found=false 1525820088000000000 diff --git a/plugins/inputs/net_response/net_response.go b/plugins/inputs/net_response/net_response.go index 023b4405e3609..a3cb1467b67ae 100644 --- a/plugins/inputs/net_response/net_response.go +++ b/plugins/inputs/net_response/net_response.go @@ -1,7 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package net_response import ( "bufio" + _ "embed" "errors" "net" "net/textproto" @@ -9,78 +11,48 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type ResultType uint64 const ( Success ResultType = 0 - Timeout = 1 - ConnectionFailed = 2 - ReadFailed = 3 - StringMismatch = 4 + Timeout ResultType = 1 + ConnectionFailed ResultType = 2 + ReadFailed ResultType = 3 + StringMismatch ResultType = 4 ) // NetResponse struct type NetResponse struct { Address string - Timeout internal.Duration - ReadTimeout internal.Duration + Timeout config.Duration + ReadTimeout config.Duration Send string Expect string Protocol string } -var description = "Collect response time of a TCP or UDP connection" - -// Description will return a short string to explain what the plugin does. -func (*NetResponse) Description() string { - return description -} - -var sampleConfig = ` - ## Protocol, must be "tcp" or "udp" - ## NOTE: because the "udp" protocol does not respond to requests, it requires - ## a send/expect string pair (see below). - protocol = "tcp" - ## Server address (default localhost) - address = "localhost:80" - - ## Set timeout - # timeout = "1s" - - ## Set read timeout (only used if expecting a response) - # read_timeout = "1s" - - ## The following options are required for UDP checks. For TCP, they are - ## optional. The plugin will send the given string to the server and then - ## expect to receive the given 'expect' string back. - ## string sent to the server - # send = "ssh" - ## expected string in answer - # expect = "ssh" - - ## Uncomment to remove deprecated fields - # fielddrop = ["result_type", "string_found"] -` - -// SampleConfig will return a complete configuration example with details about each field. func (*NetResponse) SampleConfig() string { return sampleConfig } // TCPGather will execute if there are TCP tests defined in the configuration. // It will return a map[string]interface{} for fields and a map[string]string for tags -func (n *NetResponse) TCPGather() (tags map[string]string, fields map[string]interface{}) { +func (n *NetResponse) TCPGather() (map[string]string, map[string]interface{}, error) { // Prepare returns - tags = make(map[string]string) - fields = make(map[string]interface{}) + tags := make(map[string]string) + fields := make(map[string]interface{}) // Start Timer start := time.Now() // Connecting - conn, err := net.DialTimeout("tcp", n.Address, n.Timeout.Duration) + conn, err := net.DialTimeout("tcp", n.Address, time.Duration(n.Timeout)) // Stop timer responseTime := time.Since(start).Seconds() // Handle error @@ -90,20 +62,24 @@ func (n *NetResponse) TCPGather() (tags map[string]string, fields map[string]int } else { setResult(ConnectionFailed, fields, tags, n.Expect) } - return tags, fields + return tags, fields, nil } defer conn.Close() // Send string if needed if n.Send != "" { msg := []byte(n.Send) - conn.Write(msg) + if _, gerr := conn.Write(msg); gerr != nil { + return nil, nil, gerr + } // Stop timer responseTime = time.Since(start).Seconds() } // Read string if needed if n.Expect != "" { // Set read timeout - conn.SetReadDeadline(time.Now().Add(n.ReadTimeout.Duration)) + if gerr := conn.SetReadDeadline(time.Now().Add(time.Duration(n.ReadTimeout))); gerr != nil { + return nil, nil, gerr + } // Prepare reader reader := bufio.NewReader(conn) tp := textproto.NewReader(reader) @@ -116,8 +92,8 @@ func (n *NetResponse) TCPGather() (tags map[string]string, fields map[string]int setResult(ReadFailed, fields, tags, n.Expect) } else { // Looking for string in answer - RegEx := regexp.MustCompile(`.*` + n.Expect + `.*`) - find := RegEx.FindString(string(data)) + regEx := regexp.MustCompile(`.*` + n.Expect + `.*`) + find := regEx.FindString(data) if find != "" { setResult(Success, fields, tags, n.Expect) } else { @@ -128,15 +104,15 @@ func (n *NetResponse) TCPGather() (tags map[string]string, fields map[string]int setResult(Success, fields, tags, n.Expect) } fields["response_time"] = responseTime - return tags, fields + return tags, fields, nil } // UDPGather will execute if there are UDP tests defined in the configuration. // It will return a map[string]interface{} for fields and a map[string]string for tags -func (n *NetResponse) UDPGather() (tags map[string]string, fields map[string]interface{}) { +func (n *NetResponse) UDPGather() (map[string]string, map[string]interface{}, error) { // Prepare returns - tags = make(map[string]string) - fields = make(map[string]interface{}) + tags := make(map[string]string) + fields := make(map[string]interface{}) // Start Timer start := time.Now() // Resolving @@ -144,22 +120,30 @@ func (n *NetResponse) UDPGather() (tags map[string]string, fields map[string]int // Handle error if err != nil { setResult(ConnectionFailed, fields, tags, n.Expect) - return tags, fields + // Error encoded in result + //nolint:nilerr + return tags, fields, nil } // Connecting conn, err := net.DialUDP("udp", nil, udpAddr) // Handle error if err != nil { setResult(ConnectionFailed, fields, tags, n.Expect) - return tags, fields + // Error encoded in result + //nolint:nilerr + return tags, fields, nil } defer conn.Close() // Send string msg := []byte(n.Send) - conn.Write(msg) + if _, gerr := conn.Write(msg); gerr != nil { + return nil, nil, gerr + } // Read string // Set read timeout - conn.SetReadDeadline(time.Now().Add(n.ReadTimeout.Duration)) + if gerr := conn.SetReadDeadline(time.Now().Add(time.Duration(n.ReadTimeout))); gerr != nil { + return nil, nil, gerr + } // Read buf := make([]byte, 1024) _, _, err = conn.ReadFromUDP(buf) @@ -168,12 +152,14 @@ func (n *NetResponse) UDPGather() (tags map[string]string, fields map[string]int // Handle error if err != nil { setResult(ReadFailed, fields, tags, n.Expect) - return tags, fields + // Error encoded in result + //nolint:nilerr + return tags, fields, nil } // Looking for string in answer - RegEx := regexp.MustCompile(`.*` + n.Expect + `.*`) - find := RegEx.FindString(string(buf)) + regEx := regexp.MustCompile(`.*` + n.Expect + `.*`) + find := regEx.FindString(string(buf)) if find != "" { setResult(Success, fields, tags, n.Expect) } else { @@ -182,7 +168,7 @@ func (n *NetResponse) UDPGather() (tags map[string]string, fields map[string]int fields["response_time"] = responseTime - return tags, fields + return tags, fields, nil } // Gather is called by telegraf when the plugin is executed on its interval. @@ -190,18 +176,18 @@ func (n *NetResponse) UDPGather() (tags map[string]string, fields map[string]int // also fill an Accumulator that is supplied. func (n *NetResponse) Gather(acc telegraf.Accumulator) error { // Set default values - if n.Timeout.Duration == 0 { - n.Timeout.Duration = time.Second + if n.Timeout == 0 { + n.Timeout = config.Duration(time.Second) } - if n.ReadTimeout.Duration == 0 { - n.ReadTimeout.Duration = time.Second + if n.ReadTimeout == 0 { + n.ReadTimeout = config.Duration(time.Second) } // Check send and expected string if n.Protocol == "udp" && n.Send == "" { - return errors.New("Send string cannot be empty") + return errors.New("send string cannot be empty") } if n.Protocol == "udp" && n.Expect == "" { - return errors.New("Expected string cannot be empty") + return errors.New("expected string cannot be empty") } // Prepare host and port host, port, err := net.SplitHostPort(n.Address) @@ -212,22 +198,31 @@ func (n *NetResponse) Gather(acc telegraf.Accumulator) error { n.Address = "localhost:" + port } if port == "" { - return errors.New("Bad port") + return errors.New("bad port") } // Prepare data tags := map[string]string{"server": host, "port": port} var fields map[string]interface{} var returnTags map[string]string + // Gather data - if n.Protocol == "tcp" { - returnTags, fields = n.TCPGather() + switch n.Protocol { + case "tcp": + returnTags, fields, err = n.TCPGather() + if err != nil { + return err + } tags["protocol"] = "tcp" - } else if n.Protocol == "udp" { - returnTags, fields = n.UDPGather() + case "udp": + returnTags, fields, err = n.UDPGather() + if err != nil { + return err + } tags["protocol"] = "udp" - } else { - return errors.New("Bad protocol") + default: + return errors.New("bad protocol") } + // Merge the tags for k, v := range returnTags { tags[k] = v diff --git a/plugins/inputs/net_response/net_response_test.go b/plugins/inputs/net_response/net_response_test.go index ef4d0714a7a74..6a021d14ad2eb 100644 --- a/plugins/inputs/net_response/net_response_test.go +++ b/plugins/inputs/net_response/net_response_test.go @@ -6,28 +6,12 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestSample(t *testing.T) { - c := &NetResponse{} - output := c.SampleConfig() - if output != sampleConfig { - t.Error("Sample config doesn't match") - } -} - -func TestDescription(t *testing.T) { - c := &NetResponse{} - output := c.Description() - if output != description { - t.Error("Description output is not correct") - } -} func TestBadProtocol(t *testing.T) { var acc testutil.Accumulator // Init plugin @@ -36,9 +20,9 @@ func TestBadProtocol(t *testing.T) { Address: ":9999", } // Error - err1 := c.Gather(&acc) - require.Error(t, err1) - assert.Equal(t, "Bad protocol", err1.Error()) + err := c.Gather(&acc) + require.Error(t, err) + require.Equal(t, "bad protocol", err.Error()) } func TestNoPort(t *testing.T) { @@ -47,9 +31,9 @@ func TestNoPort(t *testing.T) { Protocol: "tcp", Address: ":", } - err1 := c.Gather(&acc) - require.Error(t, err1) - assert.Equal(t, "Bad port", err1.Error()) + err := c.Gather(&acc) + require.Error(t, err) + require.Equal(t, "bad port", err.Error()) } func TestAddressOnly(t *testing.T) { @@ -58,9 +42,9 @@ func TestAddressOnly(t *testing.T) { Protocol: "tcp", Address: "127.0.0.1", } - err1 := c.Gather(&acc) - require.Error(t, err1) - assert.Equal(t, "address 127.0.0.1: missing port in address", err1.Error()) + err := c.Gather(&acc) + require.Error(t, err) + require.Equal(t, "address 127.0.0.1: missing port in address", err.Error()) } func TestSendExpectStrings(t *testing.T) { @@ -77,12 +61,12 @@ func TestSendExpectStrings(t *testing.T) { Send: "toast", Expect: "", } - err1 := tc.Gather(&acc) - require.Error(t, err1) - assert.Equal(t, "Send string cannot be empty", err1.Error()) - err2 := uc.Gather(&acc) - require.Error(t, err2) - assert.Equal(t, "Expected string cannot be empty", err2.Error()) + err := tc.Gather(&acc) + require.Error(t, err) + require.Equal(t, "send string cannot be empty", err.Error()) + err = uc.Gather(&acc) + require.Error(t, err) + require.Equal(t, "expected string cannot be empty", err.Error()) } func TestTCPError(t *testing.T) { @@ -91,10 +75,10 @@ func TestTCPError(t *testing.T) { c := NetResponse{ Protocol: "tcp", Address: ":9999", + Timeout: config.Duration(time.Second * 30), } - // Error - err1 := c.Gather(&acc) - require.NoError(t, err1) + // Gather + require.NoError(t, c.Gather(&acc)) acc.AssertContainsTaggedFields(t, "net_response", map[string]interface{}{ @@ -118,23 +102,23 @@ func TestTCPOK1(t *testing.T) { Address: "127.0.0.1:2004", Send: "test", Expect: "test", - ReadTimeout: internal.Duration{Duration: time.Second * 3}, - Timeout: internal.Duration{Duration: time.Second}, + ReadTimeout: config.Duration(time.Second * 3), + Timeout: config.Duration(time.Second), Protocol: "tcp", } // Start TCP server wg.Add(1) go TCPServer(t, &wg) - wg.Wait() - // Connect + wg.Wait() // Wait for the server to spin up wg.Add(1) - err1 := c.Gather(&acc) - wg.Wait() + // Connect + require.NoError(t, c.Gather(&acc)) + acc.Wait(1) + // Override response time for _, p := range acc.Metrics { p.Fields["response_time"] = 1.0 } - require.NoError(t, err1) acc.AssertContainsTaggedFields(t, "net_response", map[string]interface{}{ @@ -162,23 +146,24 @@ func TestTCPOK2(t *testing.T) { Address: "127.0.0.1:2004", Send: "test", Expect: "test2", - ReadTimeout: internal.Duration{Duration: time.Second * 3}, - Timeout: internal.Duration{Duration: time.Second}, + ReadTimeout: config.Duration(time.Second * 3), + Timeout: config.Duration(time.Second), Protocol: "tcp", } // Start TCP server wg.Add(1) go TCPServer(t, &wg) wg.Wait() - // Connect wg.Add(1) - err1 := c.Gather(&acc) - wg.Wait() + + // Connect + require.NoError(t, c.Gather(&acc)) + acc.Wait(1) + // Override response time for _, p := range acc.Metrics { p.Fields["response_time"] = 1.0 } - require.NoError(t, err1) acc.AssertContainsTaggedFields(t, "net_response", map[string]interface{}{ @@ -208,13 +193,14 @@ func TestUDPError(t *testing.T) { Protocol: "udp", } // Gather - err1 := c.Gather(&acc) + require.NoError(t, c.Gather(&acc)) + acc.Wait(1) + // Override response time for _, p := range acc.Metrics { p.Fields["response_time"] = 1.0 } // Error - require.NoError(t, err1) acc.AssertContainsTaggedFields(t, "net_response", map[string]interface{}{ @@ -240,23 +226,24 @@ func TestUDPOK1(t *testing.T) { Address: "127.0.0.1:2004", Send: "test", Expect: "test", - ReadTimeout: internal.Duration{Duration: time.Second * 3}, - Timeout: internal.Duration{Duration: time.Second}, + ReadTimeout: config.Duration(time.Second * 3), + Timeout: config.Duration(time.Second), Protocol: "udp", } // Start UDP server wg.Add(1) go UDPServer(t, &wg) wg.Wait() - // Connect wg.Add(1) - err1 := c.Gather(&acc) - wg.Wait() + + // Connect + require.NoError(t, c.Gather(&acc)) + acc.Wait(1) + // Override response time for _, p := range acc.Metrics { p.Fields["response_time"] = 1.0 } - require.NoError(t, err1) acc.AssertContainsTaggedFields(t, "net_response", map[string]interface{}{ @@ -277,25 +264,33 @@ func TestUDPOK1(t *testing.T) { } func UDPServer(t *testing.T, wg *sync.WaitGroup) { - udpAddr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:2004") - conn, _ := net.ListenUDP("udp", udpAddr) + defer wg.Done() + udpAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:2004") + require.NoError(t, err) + conn, err := net.ListenUDP("udp", udpAddr) + require.NoError(t, err) wg.Done() buf := make([]byte, 1024) _, remoteaddr, _ := conn.ReadFromUDP(buf) - conn.WriteToUDP(buf, remoteaddr) - conn.Close() - wg.Done() + _, err = conn.WriteToUDP(buf, remoteaddr) + require.NoError(t, err) + require.NoError(t, conn.Close()) } func TCPServer(t *testing.T, wg *sync.WaitGroup) { - tcpAddr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2004") - tcpServer, _ := net.ListenTCP("tcp", tcpAddr) + defer wg.Done() + tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:2004") + require.NoError(t, err) + tcpServer, err := net.ListenTCP("tcp", tcpAddr) + require.NoError(t, err) wg.Done() - conn, _ := tcpServer.AcceptTCP() + conn, err := tcpServer.AcceptTCP() + require.NoError(t, err) buf := make([]byte, 1024) - conn.Read(buf) - conn.Write(buf) - conn.CloseWrite() - tcpServer.Close() - wg.Done() + _, err = conn.Read(buf) + require.NoError(t, err) + _, err = conn.Write(buf) + require.NoError(t, err) + require.NoError(t, conn.CloseWrite()) + require.NoError(t, tcpServer.Close()) } diff --git a/plugins/inputs/net_response/sample.conf b/plugins/inputs/net_response/sample.conf new file mode 100644 index 0000000000000..deb0939c85597 --- /dev/null +++ b/plugins/inputs/net_response/sample.conf @@ -0,0 +1,25 @@ +# Collect response time of a TCP or UDP connection +[[inputs.net_response]] + ## Protocol, must be "tcp" or "udp" + ## NOTE: because the "udp" protocol does not respond to requests, it requires + ## a send/expect string pair (see below). + protocol = "tcp" + ## Server address (default localhost) + address = "localhost:80" + + ## Set timeout + # timeout = "1s" + + ## Set read timeout (only used if expecting a response) + # read_timeout = "1s" + + ## The following options are required for UDP checks. For TCP, they are + ## optional. The plugin will send the given string to the server and then + ## expect to receive the given 'expect' string back. + ## string sent to the server + # send = "ssh" + ## expected string in answer + # expect = "ssh" + + ## Uncomment to remove deprecated fields; recommended for new deploys + # fielddrop = ["result_type", "string_found"] diff --git a/plugins/inputs/net/NETSTAT_README.md b/plugins/inputs/netstat/README.md similarity index 77% rename from plugins/inputs/net/NETSTAT_README.md rename to plugins/inputs/netstat/README.md index d0f39f5e400e6..11276aa71d97d 100644 --- a/plugins/inputs/net/NETSTAT_README.md +++ b/plugins/inputs/netstat/README.md @@ -1,16 +1,17 @@ # Netstat Input Plugin -This plugin collects TCP connections state and UDP socket counts by using `lsof`. +This plugin collects TCP connections state and UDP socket counts by using +`lsof`. -### Configuration: +## Configuration ``` toml -# Collect TCP connections state and UDP socket counts +# Read TCP metrics such as established, time wait and sockets counts. [[inputs.netstat]] # no configuration ``` -# Measurements: +## Metrics Supported TCP Connection states are follows. @@ -27,12 +28,14 @@ Supported TCP Connection states are follows. - closing - none -### TCP Connection State measurements: +## TCP Connection State measurements Meta: + - units: counts Measurement names: + - tcp_established - tcp_syn_sent - tcp_syn_recv @@ -48,10 +51,12 @@ Measurement names: If there are no connection on the state, the metric is not counted. -### UDP socket counts measurements: +## UDP socket counts measurements Meta: + - units: counts Measurement names: + - udp_socket diff --git a/plugins/inputs/net/netstat.go b/plugins/inputs/netstat/netstat.go similarity index 73% rename from plugins/inputs/net/netstat.go rename to plugins/inputs/netstat/netstat.go index 555b396afd357..8727b1333efb0 100644 --- a/plugins/inputs/net/netstat.go +++ b/plugins/inputs/netstat/netstat.go @@ -1,6 +1,8 @@ -package net +//go:generate ../../../tools/readme_config_includer/generator +package netstat import ( + _ "embed" "fmt" "syscall" @@ -9,22 +11,20 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/system" ) -type NetStats struct { - ps system.PS -} +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string -func (_ *NetStats) Description() string { - return "Read TCP metrics such as established, time wait and sockets counts." +type NetStats struct { + PS system.PS } -var tcpstatSampleConfig = "" - -func (_ *NetStats) SampleConfig() string { - return tcpstatSampleConfig +func (*NetStats) SampleConfig() string { + return sampleConfig } -func (s *NetStats) Gather(acc telegraf.Accumulator) error { - netconns, err := s.ps.NetConnections() +func (ns *NetStats) Gather(acc telegraf.Accumulator) error { + netconns, err := ns.PS.NetConnections() if err != nil { return fmt.Errorf("error getting net connections info: %s", err) } @@ -35,7 +35,7 @@ func (s *NetStats) Gather(acc telegraf.Accumulator) error { tags := map[string]string{} for _, netcon := range netconns { if netcon.Type == syscall.SOCK_DGRAM { - counts["UDP"] += 1 + counts["UDP"]++ continue // UDP has no status } c, ok := counts[netcon.Status] @@ -67,6 +67,6 @@ func (s *NetStats) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("netstat", func() telegraf.Input { - return &NetStats{ps: system.NewSystemPS()} + return &NetStats{PS: system.NewSystemPS()} }) } diff --git a/plugins/inputs/netstat/sample.conf b/plugins/inputs/netstat/sample.conf new file mode 100644 index 0000000000000..57884af4907c3 --- /dev/null +++ b/plugins/inputs/netstat/sample.conf @@ -0,0 +1,3 @@ +# Read TCP metrics such as established, time wait and sockets counts. +[[inputs.netstat]] + # no configuration diff --git a/plugins/inputs/nfsclient/README.md b/plugins/inputs/nfsclient/README.md new file mode 100644 index 0000000000000..a02f0eb0d0275 --- /dev/null +++ b/plugins/inputs/nfsclient/README.md @@ -0,0 +1,196 @@ +# NFS Client Input Plugin + +The NFS Client input plugin collects data from /proc/self/mountstats. By +default, only a limited number of general system-level metrics are collected, +including basic read/write counts. If `fullstat` is set, a great deal of +additional metrics are collected, detailed below. + +__NOTE__ Many of the metrics, even if tagged with a mount point, are really +_per-server_. Thus, if you mount these two shares: `nfs01:/vol/foo/bar` and +`nfs01:/vol/foo/baz`, there will be two near identical entries in +/proc/self/mountstats. This is a limitation of the metrics exposed by the +kernel, not the telegraf plugin. + +## Configuration + +```toml @sample.conf +# Read per-mount NFS client metrics from /proc/self/mountstats +[[inputs.nfsclient]] + ## Read more low-level metrics (optional, defaults to false) + # fullstat = false + + ## List of mounts to explictly include or exclude (optional) + ## The pattern (Go regexp) is matched against the mount point (not the + ## device being mounted). If include_mounts is set, all mounts are ignored + ## unless present in the list. If a mount is listed in both include_mounts + ## and exclude_mounts, it is excluded. Go regexp patterns can be used. + # include_mounts = [] + # exclude_mounts = [] + + ## List of operations to include or exclude from collecting. This applies + ## only when fullstat=true. Symantics are similar to {include,exclude}_mounts: + ## the default is to collect everything; when include_operations is set, only + ## those OPs are collected; when exclude_operations is set, all are collected + ## except those listed. If include and exclude are set, the OP is excluded. + ## See /proc/self/mountstats for a list of valid operations; note that + ## NFSv3 and NFSv4 have different lists. While it is not possible to + ## have different include/exclude lists for NFSv3/4, unused elements + ## in the list should be okay. It is possible to have different lists + ## for different mountpoints: use mulitple [[input.nfsclient]] stanzas, + ## with their own lists. See "include_mounts" above, and be careful of + ## duplicate metrics. + # include_operations = [] + # exclude_operations = [] +``` + +### Configuration Options + +- __fullstat__ bool: Collect per-operation type metrics. Defaults to false. +- __include_mounts__ list(string): gather metrics for only these mounts. Default is to watch all mounts. +- __exclude_mounts__ list(string): gather metrics for all mounts, except those listed in this option. Excludes take precedence over includes. +- __include_operations__ list(string): List of specific NFS operations to track. See /proc/self/mountstats (the "per-op statistics" section) for complete lists of valid options for NFSv3 and NFSV4. The default is to gather all metrics, but this is almost certainly _not_ what you want (there are 22 operations for NFSv3, and well over 50 for NFSv4). A suggested 'minimal' list of operations to collect for basic usage: `['READ','WRITE','ACCESS','GETATTR','READDIR','LOOKUP','LOOKUP']` +- __exclude_operations__ list(string): Gather all metrics, except those listed. Excludes take precedence over includes. + +_N.B._ the `include_mounts` and `exclude_mounts` arguments are both applied to +the local mount location (e.g. /mnt/NFS), not the server export +(e.g. nfsserver:/vol/NFS). Go regexp patterns can be used in either. + +### References + +1. [nfsiostat](http://git.linux-nfs.org/?p=steved/nfs-utils.git;a=summary) +2. [net/sunrpc/stats.c - Linux source code](https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/net/sunrpc/stats.c) +3. [What is in /proc/self/mountstats for NFS mounts: an introduction](https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex) +4. [The xprt: data for NFS mounts in /proc/self/mountstats](https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsXprt) + +## Metrics + +### Fields + +- nfsstat + - bytes (integer, bytes) - The total number of bytes exchanged doing this operation. This is bytes sent _and_ received, including overhead _and_ payload. (bytes = OP_bytes_sent + OP_bytes_recv. See nfs_ops below) + - ops (integer, count) - The number of operations of this type executed. + - retrans (integer, count) - The number of times an operation had to be retried (retrans = OP_trans - OP_ops. See nfs_ops below) + - exe (integer, miliseconds) - The number of miliseconds it took to process the operations. + - rtt (integer, miliseconds) - The total round-trip time for all operations. + - rtt_per_op (float, miliseconds) - The average round-trip time per operation. + +In addition enabling `fullstat` will make many more metrics available. + +### Tags + +- All measurements have the following tags: + - mountpoint - The local mountpoint, for instance: "/var/www" + - serverexport - The full server export, for instance: "nfsserver.example.org:/export" + +- Measurements nfsstat and nfs_ops will also include: + - operation - the NFS operation in question. `READ` or `WRITE` for nfsstat, but potentially one of ~20 or ~50, depending on NFS version. A complete list of operations supported is visible in `/proc/self/mountstats`. + +## Additional metrics + +When `fullstat` is true, additional measurements are collected. Tags are the +same as above. + +### NFS Operations + +Most descriptions come from [Reference][ref] and `nfs_iostat.h`. Field order +and names are the same as in `/proc/self/mountstats` and the Kernel source. + +Please refer to `/proc/self/mountstats` for a list of supported NFS operations, +as it changes occasionally. + +- nfs_bytes + - fields: + - normalreadbytes (int, bytes): Bytes read from the server via `read()` + - normalwritebytes (int, bytes): Bytes written to the server via `write()` + - directreadbytes (int, bytes): Bytes read with O_DIRECT set + - directwritebytes (int, bytes): Bytes written with O_DIRECT set + - serverreadbytes (int, bytes): Bytes read via NFS READ (via `mmap()`) + - serverwritebytes (int, bytes): Bytes written via NFS WRITE (via `mmap()`) + - readpages (int, count): Number of pages read + - writepages (int, count): Number of pages written + +- nfs_events (Per-event metrics) + - fields: + - inoderevalidates (int, count): How many times cached inode attributes have to be re-validated from the server. + - dentryrevalidates (int, count): How many times cached dentry nodes have to be re-validated. + - datainvalidates (int, count): How many times an inode had its cached data thrown out. + - attrinvalidates (int, count): How many times an inode has had cached inode attributes invalidated. + - vfsopen (int, count): How many times files or directories have been `open()`'d. + - vfslookup (int, count): How many name lookups in directories there have been. + - vfsaccess (int, count): Number of calls to `access()`. (formerly called "vfspermission") + - vfsupdatepage (int, count): Count of updates (and potential writes) to pages. + - vfsreadpage (int, count): Number of pages read. + - vfsreadpages (int, count): Count of how many times a _group_ of pages was read (possibly via `mmap()`?). + - vfswritepage (int, count): Number of pages written. + - vfswritepages (int, count): Count of how many times a _group_ of pages was written (possibly via `mmap()`?) + - vfsgetdents (int, count): Count of directory entry reads with getdents(). These reads can be served from cache and don't necessarily imply actual NFS requests. (formerly called "vfsreaddir") + - vfssetattr (int, count): How many times we've set attributes on inodes. + - vfsflush (int, count): Count of times pending writes have been forcibly flushed to the server. + - vfsfsync (int, count): Count of calls to `fsync()` on directories and files. + - vfslock (int, count): Number of times a lock was attempted on a file (regardless of success or not). + - vfsrelease (int, count): Number of calls to `close()`. + - congestionwait (int, count): Believe unused by the Linux kernel, but it is part of the NFS spec. + - setattrtrunc (int, count): How many times files have had their size truncated. + - extendwrite (int, count): How many times a file has been grown because you're writing beyond the existing end of the file. + - sillyrenames (int, count): Number of times an in-use file was removed (thus creating a temporary ".nfsXXXXXX" file) + - shortreads (int, count): Number of times the NFS server returned less data than requested. + - shortwrites (int, count): Number of times NFS server reports it wrote less data than requested. + - delay (int, count): Occurances of EJUKEBOX ("Jukebox Delay", probably unused) + - pnfsreads (int, count): Count of NFS v4.1+ pNFS reads. + - pnfswrites (int, count): Count of NFS v4.1+ pNFS writes. + +- nfs_xprt_tcp + - fields: + - bind_count (int, count): Number of_completely new_ mounts to this server (sometimes 0?) + - connect_count (int, count): How many times the client has connected to the server in question + - connect_time (int, jiffies): How long the NFS client has spent waiting for its connection(s) to the server to be established. + - idle_time (int, seconds): How long (in seconds) since the NFS mount saw any RPC traffic. + - rpcsends (int, count): How many RPC requests this mount has sent to the server. + - rpcreceives (int, count): How many RPC replies this mount has received from the server. + - badxids (int, count): Count of XIDs sent by the server that the client doesn't know about. + - inflightsends (int, count): Number of outstanding requests; always >1. (See reference #4 for comment on this field) + - backlogutil (int, count): Cumulative backlog count + +- nfs_xprt_udp + - fields: + - [same as nfs_xprt_tcp, except for connect_count, connect_time, and idle_time] + +- nfs_ops + - fields (In all cases, the `operations` tag is set to the uppercase name of the NFS operation, _e.g._ "READ", "FSINFO", _etc_. See /proc/self/mountstats for a full list): + - ops (int, count): Total operations of this type. + - trans (int, count): Total transmissions of this type, including retransmissions: `OP_ops - OP_trans = total_retransmissions` (lower is better). + - timeouts (int, count): Number of major timeouts. + - bytes_sent (int, count): Bytes sent, including headers (should also be close to on-wire size). + - bytes_recv (int, count): Bytes received, including headers (should be close to on-wire size). + - queue_time (int, milliseconds): Cumulative time a request waited in the queue before sending this OP type. + - response_time (int, milliseconds): Cumulative time waiting for a response for this OP type. + - total_time (int, milliseconds): Cumulative time a request waited in the queue before sending. + - errors (int, count): Total number operations that complete with tk_status < 0 (usually errors). This is a new field, present in kernel >=5.3, mountstats version 1.1 + +[ref]: https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex + +## Example Output + +For basic metrics showing server-wise read and write data. + +```shell +nfsstat,mountpoint=/NFS,operation=READ,serverexport=1.2.3.4:/storage/NFS ops=600i,retrans=1i,bytes=1207i,rtt=606i,exe=607i 1612651512000000000 +nfsstat,mountpoint=/NFS,operation=WRITE,serverexport=1.2.3.4:/storage/NFS bytes=1407i,rtt=706i,exe=707i,ops=700i,retrans=1i 1612651512000000000 + +``` + +For `fullstat=true` metrics, which includes additional measurements for +`nfs_bytes`, `nfs_events`, and `nfs_xprt_tcp` (and `nfs_xprt_udp` if present). +Additionally, per-OP metrics are collected, with examples for READ, LOOKUP, and +NULL shown. Please refer to `/proc/self/mountstats` for a list of supported NFS +operations, as it changes as it changes periodically. + +```shell +nfs_bytes,mountpoint=/home,serverexport=nfs01:/vol/home directreadbytes=0i,directwritebytes=0i,normalreadbytes=42648757667i,normalwritebytes=0i,readpages=10404603i,serverreadbytes=42617098139i,serverwritebytes=0i,writepages=0i 1608787697000000000 +nfs_events,mountpoint=/home,serverexport=nfs01:/vol/home attrinvalidates=116i,congestionwait=0i,datainvalidates=65i,delay=0i,dentryrevalidates=5911243i,extendwrite=0i,inoderevalidates=200378i,pnfsreads=0i,pnfswrites=0i,setattrtrunc=0i,shortreads=0i,shortwrites=0i,sillyrenames=0i,vfsaccess=7203852i,vfsflush=117405i,vfsfsync=0i,vfsgetdents=3368i,vfslock=0i,vfslookup=740i,vfsopen=157281i,vfsreadpage=16i,vfsreadpages=86874i,vfsrelease=155526i,vfssetattr=0i,vfsupdatepage=0i,vfswritepage=0i,vfswritepages=215514i 1608787697000000000 +nfs_xprt_tcp,mountpoint=/home,serverexport=nfs01:/vol/home backlogutil=0i,badxids=0i,bind_count=1i,connect_count=1i,connect_time=0i,idle_time=0i,inflightsends=15659826i,rpcreceives=2173896i,rpcsends=2173896i 1608787697000000000 + +nfs_ops,mountpoint=/NFS,operation=NULL,serverexport=1.2.3.4:/storage/NFS trans=0i,timeouts=0i,bytes_sent=0i,bytes_recv=0i,queue_time=0i,response_time=0i,total_time=0i,ops=0i 1612651512000000000 +nfs_ops,mountpoint=/NFS,operation=READ,serverexport=1.2.3.4:/storage/NFS bytes=1207i,timeouts=602i,total_time=607i,exe=607i,trans=601i,bytes_sent=603i,bytes_recv=604i,queue_time=605i,ops=600i,retrans=1i,rtt=606i,response_time=606i 1612651512000000000 +nfs_ops,mountpoint=/NFS,operation=WRITE,serverexport=1.2.3.4:/storage/NFS ops=700i,bytes=1407i,exe=707i,trans=701i,timeouts=702i,response_time=706i,total_time=707i,retrans=1i,rtt=706i,bytes_sent=703i,bytes_recv=704i,queue_time=705i 1612651512000000000 +``` diff --git a/plugins/inputs/nfsclient/nfsclient.go b/plugins/inputs/nfsclient/nfsclient.go new file mode 100644 index 0000000000000..07060a603d595 --- /dev/null +++ b/plugins/inputs/nfsclient/nfsclient.go @@ -0,0 +1,482 @@ +//go:generate ../../../tools/readme_config_includer/generator +package nfsclient + +import ( + "bufio" + _ "embed" + "fmt" + "os" + "regexp" + "strconv" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +type NFSClient struct { + Fullstat bool `toml:"fullstat"` + IncludeMounts []string `toml:"include_mounts"` + ExcludeMounts []string `toml:"exclude_mounts"` + IncludeOperations []string `toml:"include_operations"` + ExcludeOperations []string `toml:"exclude_operations"` + Log telegraf.Logger `toml:"-"` + nfs3Ops map[string]bool + nfs4Ops map[string]bool + mountstatsPath string +} + +func convertToUint64(line []string) ([]uint64, error) { + /* A "line" of input data (a pre-split array of strings) is + processed one field at a time. Each field is converted to + an uint64 value, and appened to an array of return values. + On an error, check for ErrRange, and returns an error + if found. This situation indicates a pretty major issue in + the /proc/self/mountstats file, and returning faulty data + is worse than no data. Other errors are ignored, and append + whatever we got in the first place (probably 0). + Yes, this is ugly. */ + + var nline []uint64 + + if len(line) < 2 { + return nline, nil + } + + // Skip the first field; it's handled specially as the "first" variable + for _, l := range line[1:] { + val, err := strconv.ParseUint(l, 10, 64) + if err != nil { + if numError, ok := err.(*strconv.NumError); ok { + if numError.Err == strconv.ErrRange { + return nil, fmt.Errorf("errrange: line:[%v] raw:[%v] -> parsed:[%v]", line, l, val) + } + } + } + nline = append(nline, val) + } + return nline, nil +} + +func (n *NFSClient) parseStat(mountpoint string, export string, version string, line []string, acc telegraf.Accumulator) error { + tags := map[string]string{"mountpoint": mountpoint, "serverexport": export} + nline, err := convertToUint64(line) + if err != nil { + return err + } + + if len(nline) == 0 { + n.Log.Warnf("Parsing Stat line with one field: %s\n", line) + return nil + } + + first := strings.Replace(line[0], ":", "", 1) + + var eventsFields = []string{ + "inoderevalidates", + "dentryrevalidates", + "datainvalidates", + "attrinvalidates", + "vfsopen", + "vfslookup", + "vfsaccess", + "vfsupdatepage", + "vfsreadpage", + "vfsreadpages", + "vfswritepage", + "vfswritepages", + "vfsgetdents", + "vfssetattr", + "vfsflush", + "vfsfsync", + "vfslock", + "vfsrelease", + "congestionwait", + "setattrtrunc", + "extendwrite", + "sillyrenames", + "shortreads", + "shortwrites", + "delay", + "pnfsreads", + "pnfswrites", + } + + var bytesFields = []string{ + "normalreadbytes", + "normalwritebytes", + "directreadbytes", + "directwritebytes", + "serverreadbytes", + "serverwritebytes", + "readpages", + "writepages", + } + + var xprtudpFields = []string{ + "bind_count", + "rpcsends", + "rpcreceives", + "badxids", + "inflightsends", + "backlogutil", + } + + var xprttcpFields = []string{ + "bind_count", + "connect_count", + "connect_time", + "idle_time", + "rpcsends", + "rpcreceives", + "badxids", + "inflightsends", + "backlogutil", + } + + var nfsopFields = []string{ + "ops", + "trans", + "timeouts", + "bytes_sent", + "bytes_recv", + "queue_time", + "response_time", + "total_time", + "errors", + } + + var fields = make(map[string]interface{}) + + switch first { + case "READ", "WRITE": + fields["ops"] = nline[0] + fields["retrans"] = nline[1] - nline[0] + fields["bytes"] = nline[3] + nline[4] + fields["rtt"] = nline[6] + fields["exe"] = nline[7] + fields["rtt_per_op"] = 0.0 + if nline[0] > 0 { + fields["rtt_per_op"] = float64(nline[6]) / float64(nline[0]) + } + tags["operation"] = first + acc.AddFields("nfsstat", fields, tags) + } + + if n.Fullstat { + switch first { + case "events": + if len(nline) >= len(eventsFields) { + for i, t := range eventsFields { + fields[t] = nline[i] + } + acc.AddFields("nfs_events", fields, tags) + } + + case "bytes": + if len(nline) >= len(bytesFields) { + for i, t := range bytesFields { + fields[t] = nline[i] + } + acc.AddFields("nfs_bytes", fields, tags) + } + + case "xprt": + if len(line) > 1 { + switch line[1] { + case "tcp": + if len(nline)+2 >= len(xprttcpFields) { + for i, t := range xprttcpFields { + fields[t] = nline[i+2] + } + acc.AddFields("nfs_xprt_tcp", fields, tags) + } + case "udp": + if len(nline)+2 >= len(xprtudpFields) { + for i, t := range xprtudpFields { + fields[t] = nline[i+2] + } + acc.AddFields("nfs_xprt_udp", fields, tags) + } + } + } + } + + if (version == "3" && n.nfs3Ops[first]) || (version == "4" && n.nfs4Ops[first]) { + tags["operation"] = first + if len(nline) <= len(nfsopFields) { + for i, t := range nline { + fields[nfsopFields[i]] = t + } + acc.AddFields("nfs_ops", fields, tags) + } + } + } + + return nil +} + +func (n *NFSClient) processText(scanner *bufio.Scanner, acc telegraf.Accumulator) error { + var mount string + var version string + var export string + var skip bool + + for scanner.Scan() { + line := strings.Fields(scanner.Text()) + lineLength := len(line) + + if lineLength == 0 { + continue + } + + skip = false + + // This denotes a new mount has been found, so set + // mount and export, and stop skipping (for now) + if lineLength > 4 && choice.Contains("fstype", line) && (choice.Contains("nfs", line) || choice.Contains("nfs4", line)) { + mount = line[4] + export = line[1] + } else if lineLength > 5 && (choice.Contains("(nfs)", line) || choice.Contains("(nfs4)", line)) { + version = strings.Split(line[5], "/")[1] + } + + if mount == "" { + continue + } + + if len(n.IncludeMounts) > 0 { + skip = true + for _, RE := range n.IncludeMounts { + matched, _ := regexp.MatchString(RE, mount) + if matched { + skip = false + break + } + } + } + + if !skip && len(n.ExcludeMounts) > 0 { + for _, RE := range n.ExcludeMounts { + matched, _ := regexp.MatchString(RE, mount) + if matched { + skip = true + break + } + } + } + + if !skip { + err := n.parseStat(mount, export, version, line, acc) + if err != nil { + return fmt.Errorf("could not parseStat: %w", err) + } + } + } + + return nil +} + +func (n *NFSClient) getMountStatsPath() string { + path := "/proc/self/mountstats" + if os.Getenv("MOUNT_PROC") != "" { + path = os.Getenv("MOUNT_PROC") + } + n.Log.Debugf("using [%s] for mountstats", path) + return path +} + +func (*NFSClient) SampleConfig() string { + return sampleConfig +} + +func (n *NFSClient) Gather(acc telegraf.Accumulator) error { + file, err := os.Open(n.mountstatsPath) + if err != nil { + n.Log.Errorf("Failed opening the [%s] file: %s ", file, err) + return err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + if err := n.processText(scanner, acc); err != nil { + return err + } + + if err := scanner.Err(); err != nil { + n.Log.Errorf("%s", err) + return err + } + + return nil +} + +func (n *NFSClient) Init() error { + var nfs3Fields = []string{ + "NULL", + "GETATTR", + "SETATTR", + "LOOKUP", + "ACCESS", + "READLINK", + "READ", + "WRITE", + "CREATE", + "MKDIR", + "SYMLINK", + "MKNOD", + "REMOVE", + "RMDIR", + "RENAME", + "LINK", + "READDIR", + "READDIRPLUS", + "FSSTAT", + "FSINFO", + "PATHCONF", + "COMMIT", + } + + var nfs4Fields = []string{ + "NULL", + "READ", + "WRITE", + "COMMIT", + "OPEN", + "OPEN_CONFIRM", + "OPEN_NOATTR", + "OPEN_DOWNGRADE", + "CLOSE", + "SETATTR", + "FSINFO", + "RENEW", + "SETCLIENTID", + "SETCLIENTID_CONFIRM", + "LOCK", + "LOCKT", + "LOCKU", + "ACCESS", + "GETATTR", + "LOOKUP", + "LOOKUP_ROOT", + "REMOVE", + "RENAME", + "LINK", + "SYMLINK", + "CREATE", + "PATHCONF", + "STATFS", + "READLINK", + "READDIR", + "SERVER_CAPS", + "DELEGRETURN", + "GETACL", + "SETACL", + "FS_LOCATIONS", + "RELEASE_LOCKOWNER", + "SECINFO", + "FSID_PRESENT", + "EXCHANGE_ID", + "CREATE_SESSION", + "DESTROY_SESSION", + "SEQUENCE", + "GET_LEASE_TIME", + "RECLAIM_COMPLETE", + "LAYOUTGET", + "GETDEVICEINFO", + "LAYOUTCOMMIT", + "LAYOUTRETURN", + "SECINFO_NO_NAME", + "TEST_STATEID", + "FREE_STATEID", + "GETDEVICELIST", + "BIND_CONN_TO_SESSION", + "DESTROY_CLIENTID", + "SEEK", + "ALLOCATE", + "DEALLOCATE", + "LAYOUTSTATS", + "CLONE", + "COPY", + "OFFLOAD_CANCEL", + "LOOKUPP", + "LAYOUTERROR", + "COPY_NOTIFY", + "GETXATTR", + "SETXATTR", + "LISTXATTRS", + "REMOVEXATTR", + } + + nfs3Ops := make(map[string]bool) + nfs4Ops := make(map[string]bool) + + n.mountstatsPath = n.getMountStatsPath() + + if len(n.IncludeOperations) == 0 { + for _, Op := range nfs3Fields { + nfs3Ops[Op] = true + } + for _, Op := range nfs4Fields { + nfs4Ops[Op] = true + } + } else { + for _, Op := range n.IncludeOperations { + nfs3Ops[Op] = true + } + for _, Op := range n.IncludeOperations { + nfs4Ops[Op] = true + } + } + + if len(n.ExcludeOperations) > 0 { + for _, Op := range n.ExcludeOperations { + if nfs3Ops[Op] { + delete(nfs3Ops, Op) + } + if nfs4Ops[Op] { + delete(nfs4Ops, Op) + } + } + } + + n.nfs3Ops = nfs3Ops + n.nfs4Ops = nfs4Ops + + if len(n.IncludeMounts) > 0 { + n.Log.Debugf("Including these mount patterns: %v", n.IncludeMounts) + } else { + n.Log.Debugf("Including all mounts.") + } + + if len(n.ExcludeMounts) > 0 { + n.Log.Debugf("Excluding these mount patterns: %v", n.ExcludeMounts) + } else { + n.Log.Debugf("Not excluding any mounts.") + } + + if len(n.IncludeOperations) > 0 { + n.Log.Debugf("Including these operations: %v", n.IncludeOperations) + } else { + n.Log.Debugf("Including all operations.") + } + + if len(n.ExcludeOperations) > 0 { + n.Log.Debugf("Excluding these mount patterns: %v", n.ExcludeOperations) + } else { + n.Log.Debugf("Not excluding any operations.") + } + + return nil +} + +func init() { + inputs.Add("nfsclient", func() telegraf.Input { + return &NFSClient{} + }) +} diff --git a/plugins/inputs/nfsclient/nfsclient_test.go b/plugins/inputs/nfsclient/nfsclient_test.go new file mode 100644 index 0000000000000..909c61c6f6a55 --- /dev/null +++ b/plugins/inputs/nfsclient/nfsclient_test.go @@ -0,0 +1,207 @@ +package nfsclient + +import ( + "bufio" + "os" + "strings" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func getMountStatsPath() string { + path := "./testdata/mountstats" + if os.Getenv("MOUNT_PROC") != "" { + path = os.Getenv("MOUNT_PROC") + } + + return path +} + +func TestNFSClientParsev3(t *testing.T) { + var acc testutil.Accumulator + + nfsclient := NFSClient{Fullstat: true} + nfsclient.nfs3Ops = map[string]bool{"READLINK": true, "GETATTR": false} + nfsclient.nfs4Ops = map[string]bool{"READLINK": true, "GETATTR": false} + data := strings.Fields(" READLINK: 500 501 502 503 504 505 506 507") + err := nfsclient.parseStat("1.2.3.4:/storage/NFS", "/A", "3", data, &acc) + require.NoError(t, err) + + fieldsOps := map[string]interface{}{ + "ops": uint64(500), + "trans": uint64(501), + "timeouts": uint64(502), + "bytes_sent": uint64(503), + "bytes_recv": uint64(504), + "queue_time": uint64(505), + "response_time": uint64(506), + "total_time": uint64(507), + } + acc.AssertContainsFields(t, "nfs_ops", fieldsOps) +} + +func TestNFSClientParsev4(t *testing.T) { + var acc testutil.Accumulator + + nfsclient := NFSClient{Fullstat: true} + nfsclient.nfs3Ops = map[string]bool{"DESTROY_SESSION": true, "GETATTR": false} + nfsclient.nfs4Ops = map[string]bool{"DESTROY_SESSION": true, "GETATTR": false} + data := strings.Fields(" DESTROY_SESSION: 500 501 502 503 504 505 506 507") + err := nfsclient.parseStat("2.2.2.2:/nfsdata/", "/B", "4", data, &acc) + require.NoError(t, err) + + fieldsOps := map[string]interface{}{ + "ops": uint64(500), + "trans": uint64(501), + "timeouts": uint64(502), + "bytes_sent": uint64(503), + "bytes_recv": uint64(504), + "queue_time": uint64(505), + "response_time": uint64(506), + "total_time": uint64(507), + } + acc.AssertContainsFields(t, "nfs_ops", fieldsOps) +} + +func TestNFSClientParseLargeValue(t *testing.T) { + var acc testutil.Accumulator + + nfsclient := NFSClient{Fullstat: true} + nfsclient.nfs3Ops = map[string]bool{"SETCLIENTID": true, "GETATTR": false} + nfsclient.nfs4Ops = map[string]bool{"SETCLIENTID": true, "GETATTR": false} + data := strings.Fields(" SETCLIENTID: 218 216 0 53568 12960 18446744073709531008 134 197") + err := nfsclient.parseStat("2.2.2.2:/nfsdata/", "/B", "4", data, &acc) + require.NoError(t, err) + + fieldsOps := map[string]interface{}{ + "ops": uint64(218), + "trans": uint64(216), + "timeouts": uint64(0), + "bytes_sent": uint64(53568), + "bytes_recv": uint64(12960), + "queue_time": uint64(18446744073709531008), + "response_time": uint64(134), + "total_time": uint64(197), + } + acc.AssertContainsFields(t, "nfs_ops", fieldsOps) +} + +func TestNFSClientProcessStat(t *testing.T) { + var acc testutil.Accumulator + + nfsclient := NFSClient{} + nfsclient.Fullstat = false + + file, _ := os.Open(getMountStatsPath()) + defer file.Close() + + scanner := bufio.NewScanner(file) + + err := nfsclient.processText(scanner, &acc) + require.NoError(t, err) + + fieldsReadstat := map[string]interface{}{ + "ops": uint64(600), + "retrans": uint64(1), + "bytes": uint64(1207), + "rtt": uint64(606), + "exe": uint64(607), + "rtt_per_op": float64(1.01), + } + + readTags := map[string]string{ + "serverexport": "1.2.3.4:/storage/NFS", + "mountpoint": "/A", + "operation": "READ", + } + + acc.AssertContainsTaggedFields(t, "nfsstat", fieldsReadstat, readTags) + + fieldsWritestat := map[string]interface{}{ + "ops": uint64(700), + "retrans": uint64(1), + "bytes": uint64(1407), + "rtt": uint64(706), + "exe": uint64(707), + "rtt_per_op": float64(1.0085714285714287), + } + + writeTags := map[string]string{ + "serverexport": "1.2.3.4:/storage/NFS", + "mountpoint": "/A", + "operation": "WRITE", + } + acc.AssertContainsTaggedFields(t, "nfsstat", fieldsWritestat, writeTags) +} + +func TestNFSClientProcessFull(t *testing.T) { + var acc testutil.Accumulator + + nfsclient := NFSClient{} + nfsclient.Fullstat = true + + file, _ := os.Open(getMountStatsPath()) + defer file.Close() + + scanner := bufio.NewScanner(file) + + err := nfsclient.processText(scanner, &acc) + require.NoError(t, err) + + fieldsEvents := map[string]interface{}{ + "inoderevalidates": uint64(301736), + "dentryrevalidates": uint64(22838), + "datainvalidates": uint64(410979), + "attrinvalidates": uint64(26188427), + "vfsopen": uint64(27525), + "vfslookup": uint64(9140), + "vfsaccess": uint64(114420), + "vfsupdatepage": uint64(30785253), + "vfsreadpage": uint64(5308856), + "vfsreadpages": uint64(5364858), + "vfswritepage": uint64(30784819), + "vfswritepages": uint64(79832668), + "vfsgetdents": uint64(170), + "vfssetattr": uint64(64), + "vfsflush": uint64(18194), + "vfsfsync": uint64(29294718), + "vfslock": uint64(0), + "vfsrelease": uint64(18279), + "congestionwait": uint64(0), + "setattrtrunc": uint64(2), + "extendwrite": uint64(785551), + "sillyrenames": uint64(0), + "shortreads": uint64(0), + "shortwrites": uint64(0), + "delay": uint64(0), + "pnfsreads": uint64(0), + "pnfswrites": uint64(0), + } + fieldsBytes := map[string]interface{}{ + "normalreadbytes": uint64(204440464584), + "normalwritebytes": uint64(110857586443), + "directreadbytes": uint64(783170354688), + "directwritebytes": uint64(296174954496), + "serverreadbytes": uint64(1134399088816), + "serverwritebytes": uint64(407107155723), + "readpages": uint64(85749323), + "writepages": uint64(30784819), + } + fieldsXprtTCP := map[string]interface{}{ + "bind_count": uint64(1), + "connect_count": uint64(1), + "connect_time": uint64(0), + "idle_time": uint64(0), + "rpcsends": uint64(96172963), + "rpcreceives": uint64(96172963), + "badxids": uint64(0), + "inflightsends": uint64(620878754), + "backlogutil": uint64(0), + } + + acc.AssertContainsFields(t, "nfs_events", fieldsEvents) + acc.AssertContainsFields(t, "nfs_bytes", fieldsBytes) + acc.AssertContainsFields(t, "nfs_xprt_tcp", fieldsXprtTCP) +} diff --git a/plugins/inputs/nfsclient/sample.conf b/plugins/inputs/nfsclient/sample.conf new file mode 100644 index 0000000000000..fbd1371c5f35c --- /dev/null +++ b/plugins/inputs/nfsclient/sample.conf @@ -0,0 +1,27 @@ +# Read per-mount NFS client metrics from /proc/self/mountstats +[[inputs.nfsclient]] + ## Read more low-level metrics (optional, defaults to false) + # fullstat = false + + ## List of mounts to explictly include or exclude (optional) + ## The pattern (Go regexp) is matched against the mount point (not the + ## device being mounted). If include_mounts is set, all mounts are ignored + ## unless present in the list. If a mount is listed in both include_mounts + ## and exclude_mounts, it is excluded. Go regexp patterns can be used. + # include_mounts = [] + # exclude_mounts = [] + + ## List of operations to include or exclude from collecting. This applies + ## only when fullstat=true. Symantics are similar to {include,exclude}_mounts: + ## the default is to collect everything; when include_operations is set, only + ## those OPs are collected; when exclude_operations is set, all are collected + ## except those listed. If include and exclude are set, the OP is excluded. + ## See /proc/self/mountstats for a list of valid operations; note that + ## NFSv3 and NFSv4 have different lists. While it is not possible to + ## have different include/exclude lists for NFSv3/4, unused elements + ## in the list should be okay. It is possible to have different lists + ## for different mountpoints: use mulitple [[input.nfsclient]] stanzas, + ## with their own lists. See "include_mounts" above, and be careful of + ## duplicate metrics. + # include_operations = [] + # exclude_operations = [] diff --git a/plugins/inputs/nfsclient/testdata/mountstats b/plugins/inputs/nfsclient/testdata/mountstats new file mode 100644 index 0000000000000..86651d20d26fa --- /dev/null +++ b/plugins/inputs/nfsclient/testdata/mountstats @@ -0,0 +1,231 @@ +device rootfs mounted on / with fstype rootfs +device proc mounted on /proc with fstype proc +device sysfs mounted on /sys with fstype sysfs +device devtmpfs mounted on /dev with fstype devtmpfs +device devpts mounted on /dev/pts with fstype devpts +device tmpfs mounted on /dev/shm with fstype tmpfs +device /dev/loop0 mounted on /dev/.initramfs/live with fstype iso9660 +device /dev/loop6 mounted on / with fstype ext4 +device /proc/bus/usb mounted on /proc/bus/usb with fstype usbfs +device none mounted on /proc/sys/fs/binfmt_misc with fstype binfmt_misc +device /tmp mounted on /tmp with fstype tmpfs +device /home mounted on /home with fstype tmpfs +device /var mounted on /var with fstype tmpfs +device /etc mounted on /etc with fstype tmpfs +device /dev/ram1 mounted on /root with fstype ext2 +device cgroup mounted on /cgroup/cpuset with fstype cgroup +device cgroup mounted on /cgroup/cpu with fstype cgroup +device cgroup mounted on /cgroup/cpuacct with fstype cgroup +device cgroup mounted on /cgroup/memory with fstype cgroup +device cgroup mounted on /cgroup/devices with fstype cgroup +device cgroup mounted on /cgroup/freezer with fstype cgroup +device cgroup mounted on /cgroup/net_cls with fstype cgroup +device cgroup mounted on /cgroup/blkio with fstype cgroup +device sunrpc mounted on /var/lib/nfs/rpc_pipefs with fstype rpc_pipefs +device /etc/auto.misc mounted on /misc with fstype autofs +device -hosts mounted on /net with fstype autofs +device 1.2.3.4:/storage/NFS mounted on /A with fstype nfs statvers=1.1 + opts: rw,vers=3,rsize=32768,wsize=32768,namlen=255,acregmin=60,acregmax=60,acdirmin=60,acdirmax=60,hard,nolock,noacl,nordirplus,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=1.2.3.4,mountvers=3,mountport=49193,mountproto=tcp,local_lock=all + age: 1136770 + caps: caps=0x3fe6,wtmult=512,dtsize=8192,bsize=0,namlen=255 + sec: flavor=1,pseudoflavor=1 + events: 301736 22838 410979 26188427 27525 9140 114420 30785253 5308856 5364858 30784819 79832668 170 64 18194 29294718 0 18279 0 2 785551 0 0 0 0 0 0 + bytes: 204440464584 110857586443 783170354688 296174954496 1134399088816 407107155723 85749323 30784819 + RPC iostats version: 1.0 p/v: 100003/3 (nfs) + xprt: tcp 733 1 1 0 0 96172963 96172963 0 620878754 0 690 196347132 524706275 + per-op statistics + NULL: 0 0 0 0 0 0 0 0 + GETATTR: 100 101 102 103 104 105 106 107 + SETATTR: 200 201 202 203 204 205 206 207 + LOOKUP: 300 301 302 303 304 305 306 307 + ACCESS: 400 401 402 403 404 405 406 407 + READLINK: 500 501 502 503 504 505 506 507 + READ: 600 601 602 603 604 605 606 607 + WRITE: 700 701 702 703 704 705 706 707 + CREATE: 800 801 802 803 804 805 806 807 + MKDIR: 900 901 902 903 904 905 906 907 + SYMLINK: 1000 1001 1002 1003 1004 1005 1006 1007 + MKNOD: 1100 1101 1102 1103 1104 1105 1106 1107 + REMOVE: 1200 1201 1202 1203 1204 1205 1206 1207 + RMDIR: 1300 1301 1302 1303 1304 1305 1306 1307 + RENAME: 1400 1401 1402 1403 1404 1405 1406 1407 + LINK: 1500 1501 1502 1503 1504 1505 1506 1507 + READDIR: 1600 1601 1602 1603 1604 1605 1606 1607 + READDIRPLUS: 1700 1701 1702 1703 1704 1705 1706 1707 + FSSTAT: 1800 1801 1802 1803 1804 1805 1806 1807 + FSINFO: 1900 1901 1902 1903 1904 1905 1906 1907 + PATHCONF: 2000 2001 2002 2003 2004 2005 2006 2007 + COMMIT: 2100 2101 2102 2103 2104 2105 2106 2107 + +device 2.2.2.2:/nfsdata/ mounted on /B with fstype nfs4 statvers=1.1 + opts: rw,vers=4,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60, acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys, clientaddr=3.3.3.3,minorversion=0,local_lock=none + age: 19 + caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 + nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,acl=0x0 + sec: flavor=1,pseudoflavor=1 + events: 0 168232 0 0 0 10095 217808 0 2 9797 0 9739 0 0 19739 19739 0 19739 0 0 0 0 0 0 0 0 0 + bytes: 1612840960 0 0 0 627536112 0 158076 0 + RPC iostats version: 1.0 p/v: 100003/4 (nfs) + xprt: tcp 737 0 1 0 0 69698 69697 0 81817 0 2 1082 12119 + per-op statistics + NULL: 0 0 0 0 0 0 0 0 + READ: 9797 9797 0 1000 2000 71 7953 8200 + WRITE: 0 0 0 0 0 0 0 0 + COMMIT: 0 0 0 0 0 0 0 0 + OPEN: 19740 19740 0 4737600 7343280 505 3449 4172 + OPEN_CONFIRM: 10211 10211 0 1552072 694348 74 836 1008 + OPEN_NOATTR: 0 0 0 0 0 0 0 0 + OPEN_DOWNGRADE: 0 0 0 0 0 0 0 0 + CLOSE: 19739 19739 0 3316152 2605548 334 3045 3620 + SETATTR: 0 0 0 0 0 0 0 0 + FSINFO: 1 1 0 132 108 0 0 0 + RENEW: 0 0 0 0 0 0 0 0 + SETCLIENTID: 0 0 0 0 0 0 0 0 + SETCLIENTID_CONFIRM: 0 0 0 0 0 0 0 0 + LOCK: 0 0 0 0 0 0 0 0 + LOCKT: 0 0 0 0 0 0 0 0 + LOCKU: 0 0 0 0 0 0 0 0 + ACCESS: 96 96 0 14584 19584 0 8 10 + GETATTR: 1 1 0 132 188 0 0 0 + LOOKUP: 10095 10095 0 1655576 2382420 36 898 1072 + LOOKUP_ROOT: 0 0 0 0 0 0 0 0 + REMOVE: 0 0 0 0 0 0 0 0 + RENAME: 0 0 0 0 0 0 0 0 + LINK: 0 0 0 0 0 0 0 0 + SYMLINK: 0 0 0 0 0 0 0 0 + CREATE: 0 0 0 0 0 0 0 0 + PATHCONF: 1 1 0 128 72 0 0 0 + STATFS: 0 0 0 0 0 0 0 0 + READLINK: 0 0 0 0 0 0 0 0 + READDIR: 0 0 0 0 0 0 0 0 + SERVER_CAPS: 2 2 0 256 176 0 0 0 + DELEGRETURN: 0 0 0 0 0 0 0 0 + GETACL: 0 0 0 0 0 0 0 0 + SETACL: 0 0 0 0 0 0 0 0 + FS_LOCATIONS: 0 0 0 0 0 0 0 0 + RELEASE_LOCKOWNER: 0 0 0 0 0 0 0 0 + SECINFO: 0 0 0 0 0 0 0 0 + EXCHANGE_ID: 0 0 0 0 0 0 0 0 + CREATE_SESSION: 0 0 0 0 0 0 0 0 + DESTROY_SESSION: 500 501 502 503 504 505 506 507 + SEQUENCE: 0 0 0 0 0 0 0 0 + GET_LEASE_TIME: 0 0 0 0 0 0 0 0 + RECLAIM_COMPLETE: 0 0 0 0 0 0 0 0 + LAYOUTGET: 0 0 0 0 0 0 0 0 + GETDEVICEINFO: 0 0 0 0 0 0 0 0 + LAYOUTCOMMIT: 0 0 0 0 0 0 0 0 + +device nfsserver1:/vol/export1/bread_recipes mounted on /C with fstype nfs statvers=1.1 + opts: rw,vers=3,rsize=65536,wsize=65536,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,timeo=600,retrans=2,sec=sys,mountaddr=5.4.3.2,mountvers=3,mountport=635,mountproto=udp,local_lock=none + age: 1084700 + caps: caps=0x3fc7,wtmult=512,dtsize=32768,bsize=0,namlen=255 + sec: flavor=1,pseudoflavor=1 + events: 145712 48345501 0 2476 804 1337 49359047 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + bytes: 0 0 0 0 0 0 0 0 + RPC iostats version: 1.0 p/v: 100003/3 (nfs) + xprt: tcp 871 1 1 0 0 181124336 181124308 28 1971647851 0 1100 807885669 90279840 + per-op statistics + NULL: 1 2 0 44 24 0 0 0 + GETATTR: 145712 145712 0 22994472 16319744 532 107480 109969 + SETATTR: 0 0 0 0 0 0 0 0 + LOOKUP: 2553 2553 0 385932 476148 9 1695 1739 + ACCESS: 596338 596338 0 79281020 71560560 2375 228286 237993 + READLINK: 0 0 0 0 0 0 0 0 + READ: 0 0 0 0 0 0 0 0 + WRITE: 0 0 0 0 0 0 0 0 + CREATE: 0 0 0 0 0 0 0 0 + MKDIR: 0 0 0 0 0 0 0 0 + SYMLINK: 0 0 0 0 0 0 0 0 + MKNOD: 0 0 0 0 0 0 0 0 + REMOVE: 0 0 0 0 0 0 0 0 + RMDIR: 0 0 0 0 0 0 0 0 + RENAME: 0 0 0 0 0 0 0 0 + LINK: 0 0 0 0 0 0 0 0 + READDIR: 0 0 0 0 0 0 0 0 + READDIRPLUS: 0 0 0 0 0 0 0 0 + FSSTAT: 1698 1698 0 250080 285264 6 929 951 + FSINFO: 34 34 0 4352 5576 0 5 5 + PATHCONF: 1 1 0 128 140 0 0 0 + COMMIT: 0 0 0 0 0 0 0 0 + +device nfsserver2:/tank/os2warp mounted on /D with fstype nfs4 statvers=1.1 + opts: rw,vers=4.2,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,timeo=600,retrans=2,sec=sys,clientaddr=10.66.88.239,local_lock=none + age: 2 + impl_id: name='',domain='',date='0,0' + caps: caps=0xffbfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 + nfsv4: bm0=0xfdffafff,bm1=0x40f9be3e,bm2=0x28803,acl=0x0,sessions,pnfs=not configured,lease_time=90,lease_expired=0 + sec: flavor=1,pseudoflavor=1 + events: 1 112 0 0 1 3 117 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + bytes: 0 0 0 0 0 0 0 0 + RPC iostats version: 1.1 p/v: 100003/4 (nfs) + xprt: tcp 763 0 2 0 2 39 39 0 42 0 2 0 3 + per-op statistics + NULL: 1 1 0 44 24 0 0 1 0 + READ: 0 0 0 0 0 0 0 0 0 + WRITE: 0 0 0 0 0 0 0 0 0 + COMMIT: 0 0 0 0 0 0 0 0 0 + OPEN: 0 0 0 0 0 0 0 0 0 + OPEN_CONFIRM: 0 0 0 0 0 0 0 0 0 + OPEN_NOATTR: 0 0 0 0 0 0 0 0 0 + OPEN_DOWNGRADE: 0 0 0 0 0 0 0 0 0 + CLOSE: 0 0 0 0 0 0 0 0 0 + SETATTR: 0 0 0 0 0 0 0 0 0 + FSINFO: 1 1 0 168 164 0 0 0 0 + RENEW: 0 0 0 0 0 0 0 0 0 + SETCLIENTID: 0 0 0 0 0 0 0 0 0 + SETCLIENTID_CONFIRM: 0 0 0 0 0 0 0 0 0 + LOCK: 0 0 0 0 0 0 0 0 0 + LOCKT: 0 0 0 0 0 0 0 0 0 + LOCKU: 0 0 0 0 0 0 0 0 0 + ACCESS: 3 3 0 600 504 0 1 1 0 + GETATTR: 2 2 0 364 480 0 1 1 0 + LOOKUP: 3 3 0 628 484 0 1 1 2 + LOOKUP_ROOT: 0 0 0 0 0 0 0 0 0 + REMOVE: 0 0 0 0 0 0 0 0 0 + RENAME: 0 0 0 0 0 0 0 0 0 + LINK: 0 0 0 0 0 0 0 0 0 + SYMLINK: 0 0 0 0 0 0 0 0 0 + CREATE: 0 0 0 0 0 0 0 0 0 + PATHCONF: 1 1 0 160 116 0 0 0 0 + STATFS: 1 1 0 164 160 0 0 0 0 + READLINK: 0 0 0 0 0 0 0 0 0 + READDIR: 1 1 0 224 11968 0 1 1 0 + SERVER_CAPS: 2 2 0 336 328 0 1 1 0 + DELEGRETURN: 0 0 0 0 0 0 0 0 0 + GETACL: 0 0 0 0 0 0 0 0 0 + SETACL: 0 0 0 0 0 0 0 0 0 + FS_LOCATIONS: 0 0 0 0 0 0 0 0 0 + RELEASE_LOCKOWNER: 0 0 0 0 0 0 0 0 0 + SECINFO: 0 0 0 0 0 0 0 0 0 + FSID_PRESENT: 0 0 0 0 0 0 0 0 0 + EXCHANGE_ID: 2 2 0 480 200 0 2 2 0 + CREATE_SESSION: 1 1 0 200 124 0 0 0 0 + DESTROY_SESSION: 0 0 0 0 0 0 0 0 0 + SEQUENCE: 0 0 0 0 0 0 0 0 0 + GET_LEASE_TIME: 0 0 0 0 0 0 0 0 0 + RECLAIM_COMPLETE: 1 1 0 128 88 0 107 107 0 + LAYOUTGET: 0 0 0 0 0 0 0 0 0 + GETDEVICEINFO: 0 0 0 0 0 0 0 0 0 + LAYOUTCOMMIT: 0 0 0 0 0 0 0 0 0 + LAYOUTRETURN: 0 0 0 0 0 0 0 0 0 + SECINFO_NO_NAME: 0 0 0 0 0 0 0 0 0 + TEST_STATEID: 0 0 0 0 0 0 0 0 0 + FREE_STATEID: 0 0 0 0 0 0 0 0 0 + GETDEVICELIST: 0 0 0 0 0 0 0 0 0 + BIND_CONN_TO_SESSION: 0 0 0 0 0 0 0 0 0 + DESTROY_CLIENTID: 0 0 0 0 0 0 0 0 0 + SEEK: 0 0 0 0 0 0 0 0 0 + ALLOCATE: 0 0 0 0 0 0 0 0 0 + DEALLOCATE: 0 0 0 0 0 0 0 0 0 + LAYOUTSTATS: 0 0 0 0 0 0 0 0 0 + CLONE: 0 0 0 0 0 0 0 0 0 + COPY: 0 0 0 0 0 0 0 0 0 + OFFLOAD_CANCEL: 0 0 0 0 0 0 0 0 0 + LOOKUPP: 0 0 0 0 0 0 0 0 0 + LAYOUTERROR: 0 0 0 0 0 0 0 0 0 + COPY_NOTIFY: 0 0 0 0 0 0 0 0 0 + GETXATTR: 0 0 0 0 0 0 0 0 0 + SETXATTR: 0 0 0 0 0 0 0 0 0 + LISTXATTRS: 0 0 0 0 0 0 0 0 0 + REMOVEXATTR: 0 0 0 0 0 0 0 0 0 + LAYOUTRETURN: 0 0 0 0 0 0 0 0 diff --git a/plugins/inputs/nginx/README.md b/plugins/inputs/nginx/README.md index bc4916507ef25..bfdbd9d42f5a4 100644 --- a/plugins/inputs/nginx/README.md +++ b/plugins/inputs/nginx/README.md @@ -1,8 +1,14 @@ # Nginx Input Plugin -### Configuration: +This plugin gathers basic status from the open source web server Nginx. Nginx +Plus is a commercial version. For more information about the differences between +Nginx (F/OSS) and Nginx Plus, see the Nginx [documentation][diff-doc]. -```toml +[diff-doc]: https://www.nginx.com/blog/whats-difference-nginx-foss-nginx-plus/ + +## Configuration + +```toml @sample.conf # Read Nginx's basic status information (ngx_http_stub_status_module) [[inputs.nginx]] ## An array of Nginx stub_status URI to gather stats. @@ -19,26 +25,27 @@ response_timeout = "5s" ``` -### Measurements & Fields: +## Measurements & Fields - Measurement - - accepts - - active - - handled - - reading - - requests - - waiting - - writing + - accepts + - active + - handled + - reading + - requests + - waiting + - writing -### Tags: +## Tags - All measurements have the following tags: - - port - - server + - port + - server -### Example Output: +## Example Output Using this configuration: + ```toml [[inputs.nginx]] ## An array of Nginx stub_status URI to gather stats. @@ -46,12 +53,14 @@ Using this configuration: ``` When run with: + ```sh ./telegraf --config telegraf.conf --input-filter nginx --test ``` It produces: -``` + +```shell * Plugin: nginx, Collection 1 > nginx,port=80,server=localhost accepts=605i,active=2i,handled=605i,reading=0i,requests=12132i,waiting=1i,writing=1i 1456690994701784331 ``` diff --git a/plugins/inputs/nginx/nginx.go b/plugins/inputs/nginx/nginx.go index 4834137542039..cfa378f0f727c 100644 --- a/plugins/inputs/nginx/nginx.go +++ b/plugins/inputs/nginx/nginx.go @@ -1,7 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package nginx import ( "bufio" + _ "embed" "fmt" "net" "net/http" @@ -12,50 +14,35 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Nginx struct { Urls []string - ResponseTimeout internal.Duration + ResponseTimeout config.Duration tls.ClientConfig // HTTP client client *http.Client } -var sampleConfig = ` - # An array of Nginx stub_status URI to gather stats. - urls = ["http://localhost/server_status"] - - ## Optional TLS Config - tls_ca = "/etc/telegraf/ca.pem" - tls_cert = "/etc/telegraf/cert.cer" - tls_key = "/etc/telegraf/key.key" - ## Use TLS but skip chain & host verification - insecure_skip_verify = false - - # HTTP response timeout (default: 5s) - response_timeout = "5s" -` - -func (n *Nginx) SampleConfig() string { +func (*Nginx) SampleConfig() string { return sampleConfig } -func (n *Nginx) Description() string { - return "Read Nginx's basic status information (ngx_http_stub_status_module)" -} - func (n *Nginx) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup // Create an HTTP client that is re-used for each // collection interval if n.client == nil { - client, err := n.createHttpClient() + client, err := n.createHTTPClient() if err != nil { return err } @@ -72,7 +59,7 @@ func (n *Nginx) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(addr *url.URL) { defer wg.Done() - acc.AddError(n.gatherUrl(addr, acc)) + acc.AddError(n.gatherURL(addr, acc)) }(addr) } @@ -80,27 +67,27 @@ func (n *Nginx) Gather(acc telegraf.Accumulator) error { return nil } -func (n *Nginx) createHttpClient() (*http.Client, error) { +func (n *Nginx) createHTTPClient() (*http.Client, error) { tlsCfg, err := n.ClientConfig.TLSConfig() if err != nil { return nil, err } - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = time.Second * 5 + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(time.Second * 5) } client := &http.Client{ Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } return client, nil } -func (n *Nginx) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { +func (n *Nginx) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { resp, err := n.client.Get(addr.String()) if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) diff --git a/plugins/inputs/nginx/nginx_test.go b/plugins/inputs/nginx/nginx_test.go index 7eb9e90b653ef..5a947e7e202e0 100644 --- a/plugins/inputs/nginx/nginx_test.go +++ b/plugins/inputs/nginx/nginx_test.go @@ -8,9 +8,9 @@ import ( "net/url" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const nginxSampleResponse = ` @@ -33,7 +33,7 @@ func TestNginxTags(t *testing.T) { for _, url1 := range urls { addr, _ = url.Parse(url1) tagMap := getTags(addr) - assert.Contains(t, tagMap["server"], "localhost") + require.Contains(t, tagMap["server"], "localhost") } } @@ -46,10 +46,11 @@ func TestNginxGeneratesMetrics(t *testing.T) { } else if r.URL.Path == "/tengine_status" { rsp = tengineSampleResponse } else { - panic("Cannot handle request") + require.Fail(t, "Cannot handle request") } - fmt.Fprintln(w, rsp) + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() @@ -61,16 +62,13 @@ func TestNginxGeneratesMetrics(t *testing.T) { Urls: []string{fmt.Sprintf("%s/tengine_status", ts.URL)}, } - var acc_nginx testutil.Accumulator - var acc_tengine testutil.Accumulator + var accNginx testutil.Accumulator + var accTengine testutil.Accumulator - err_nginx := acc_nginx.GatherError(n.Gather) - err_tengine := acc_tengine.GatherError(nt.Gather) + require.NoError(t, accNginx.GatherError(n.Gather)) + require.NoError(t, accTengine.GatherError(nt.Gather)) - require.NoError(t, err_nginx) - require.NoError(t, err_tengine) - - fields_nginx := map[string]interface{}{ + fieldsNginx := map[string]interface{}{ "active": uint64(585), "accepts": uint64(85340), "handled": uint64(85340), @@ -80,7 +78,7 @@ func TestNginxGeneratesMetrics(t *testing.T) { "waiting": uint64(446), } - fields_tengine := map[string]interface{}{ + fieldsTengine := map[string]interface{}{ "active": uint64(403), "accepts": uint64(853), "handled": uint64(8533), @@ -91,9 +89,7 @@ func TestNginxGeneratesMetrics(t *testing.T) { } addr, err := url.Parse(ts.URL) - if err != nil { - panic(err) - } + require.NoError(t, err) host, port, err := net.SplitHostPort(addr.Host) if err != nil { @@ -108,6 +104,6 @@ func TestNginxGeneratesMetrics(t *testing.T) { } tags := map[string]string{"server": host, "port": port} - acc_nginx.AssertContainsTaggedFields(t, "nginx", fields_nginx, tags) - acc_tengine.AssertContainsTaggedFields(t, "nginx", fields_tengine, tags) + accNginx.AssertContainsTaggedFields(t, "nginx", fieldsNginx, tags) + accTengine.AssertContainsTaggedFields(t, "nginx", fieldsTengine, tags) } diff --git a/plugins/inputs/nginx/sample.conf b/plugins/inputs/nginx/sample.conf new file mode 100644 index 0000000000000..eb395ddfb8c9b --- /dev/null +++ b/plugins/inputs/nginx/sample.conf @@ -0,0 +1,14 @@ +# Read Nginx's basic status information (ngx_http_stub_status_module) +[[inputs.nginx]] + ## An array of Nginx stub_status URI to gather stats. + urls = ["http://localhost/server_status"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## HTTP response timeout (default: 5s) + response_timeout = "5s" diff --git a/plugins/inputs/nginx_plus/README.md b/plugins/inputs/nginx_plus/README.md index cb0713ed848ff..d0584fddc6b6e 100644 --- a/plugins/inputs/nginx_plus/README.md +++ b/plugins/inputs/nginx_plus/README.md @@ -1,20 +1,36 @@ # Nginx Plus Input Plugin -Nginx Plus is a commercial version of the open source web server Nginx. The use this plugin you will need a license. For more information about the differences between Nginx (F/OSS) and Nginx Plus, [click here](https://www.nginx.com/blog/whats-difference-nginx-foss-nginx-plus/). +Nginx Plus is a commercial version of the open source web server Nginx. The use +this plugin you will need a license. For more information about the differences +between Nginx (F/OSS) and Nginx Plus, see the Nginx [documentation][diff-doc]. -Structures for Nginx Plus have been built based on history of -[status module documentation](http://nginx.org/en/docs/http/ngx_http_status_module.html) +Structures for Nginx Plus have been built based on history of [status module +documentation][status-mod]. -### Configuration: +[diff-doc]: https://www.nginx.com/blog/whats-difference-nginx-foss-nginx-plus/ -```toml +[status-mod]: http://nginx.org/en/docs/http/ngx_http_status_module.html + +## Configuration + +```toml @sample.conf # Read Nginx Plus' advanced status information [[inputs.nginx_plus]] ## An array of Nginx status URIs to gather stats. urls = ["http://localhost/status"] + + # HTTP response timeout (default: 5s) + response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ``` -### Measurements & Fields: +## Metrics - nginx_plus_processes - respawned @@ -59,8 +75,7 @@ Structures for Nginx Plus have been built based on history of - fails - downtime - -### Tags: +### Tags - nginx_plus_processes, nginx_plus_connections, nginx_plus_ssl, nginx_plus_requests - server @@ -78,9 +93,10 @@ Structures for Nginx Plus have been built based on history of - port - upstream_address -### Example Output: +## Example Output Using this configuration: + ```toml [[inputs.nginx_plus]] ## An array of Nginx Plus status URIs to gather stats. @@ -88,12 +104,14 @@ Using this configuration: ``` When run with: + ```sh ./telegraf -config telegraf.conf -input-filter nginx_plus -test ``` It produces: -``` + +```text * Plugin: inputs.nginx_plus, Collection 1 > nginx_plus_processes,server=localhost,port=12021,host=word.local respawned=0i 1505782513000000000 > nginx_plus_connections,server=localhost,port=12021,host=word.local accepted=5535735212i,dropped=10140186i,active=9541i,idle=67540i 1505782513000000000 diff --git a/plugins/inputs/nginx_plus/nginx_plus.go b/plugins/inputs/nginx_plus/nginx_plus.go index 5b0fb2596ebf8..dd073590d21d9 100644 --- a/plugins/inputs/nginx_plus/nginx_plus.go +++ b/plugins/inputs/nginx_plus/nginx_plus.go @@ -1,7 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package nginx_plus import ( "bufio" + _ "embed" "encoding/json" "fmt" "net" @@ -13,42 +15,27 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type NginxPlus struct { - Urls []string `toml:"urls"` - ResponseTimeout internal.Duration `toml:"response_timeout"` + Urls []string `toml:"urls"` + ResponseTimeout config.Duration `toml:"response_timeout"` tls.ClientConfig client *http.Client } -var sampleConfig = ` - ## An array of ngx_http_status_module or status URI to gather stats. - urls = ["http://localhost/status"] - - # HTTP response timeout (default: 5s) - response_timeout = "5s" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - -func (n *NginxPlus) SampleConfig() string { +func (*NginxPlus) SampleConfig() string { return sampleConfig } -func (n *NginxPlus) Description() string { - return "Read Nginx Plus' full status information (ngx_http_status_module)" -} - func (n *NginxPlus) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup @@ -56,7 +43,7 @@ func (n *NginxPlus) Gather(acc telegraf.Accumulator) error { // collection interval if n.client == nil { - client, err := n.createHttpClient() + client, err := n.createHTTPClient() if err != nil { return err } @@ -73,7 +60,7 @@ func (n *NginxPlus) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(addr *url.URL) { defer wg.Done() - acc.AddError(n.gatherUrl(addr, acc)) + acc.AddError(n.gatherURL(addr, acc)) }(addr) } @@ -81,9 +68,9 @@ func (n *NginxPlus) Gather(acc telegraf.Accumulator) error { return nil } -func (n *NginxPlus) createHttpClient() (*http.Client, error) { - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = time.Second * 5 +func (n *NginxPlus) createHTTPClient() (*http.Client, error) { + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(time.Second * 5) } tlsConfig, err := n.ClientConfig.TLSConfig() @@ -95,13 +82,13 @@ func (n *NginxPlus) createHttpClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } return client, nil } -func (n *NginxPlus) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { +func (n *NginxPlus) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { resp, err := n.client.Get(addr.String()) if err != nil { @@ -114,7 +101,7 @@ func (n *NginxPlus) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0] switch contentType { case "application/json": - return gatherStatusUrl(bufio.NewReader(resp.Body), getTags(addr), acc) + return gatherStatusURL(bufio.NewReader(resp.Body), getTags(addr), acc) default: return fmt.Errorf("%s returned unexpected content type %s", addr.String(), contentType) } @@ -283,7 +270,7 @@ type Status struct { } `json:"stream"` } -func gatherStatusUrl(r *bufio.Reader, tags map[string]string, acc telegraf.Accumulator) error { +func gatherStatusURL(r *bufio.Reader, tags map[string]string, acc telegraf.Accumulator) error { dec := json.NewDecoder(r) status := &Status{} if err := dec.Decode(status); err != nil { @@ -318,7 +305,6 @@ func (s *Status) gatherProcessesMetrics(tags map[string]string, acc telegraf.Acc }, tags, ) - } func (s *Status) gatherConnectionsMetrics(tags map[string]string, acc telegraf.Accumulator) { diff --git a/plugins/inputs/nginx_plus/nginx_plus_test.go b/plugins/inputs/nginx_plus/nginx_plus_test.go index 6e9a8c4d97c3e..36fe5a2dce8f6 100644 --- a/plugins/inputs/nginx_plus/nginx_plus_test.go +++ b/plugins/inputs/nginx_plus/nginx_plus_test.go @@ -253,14 +253,13 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string - if r.URL.Path == "/status" { - rsp = sampleStatusResponse - w.Header()["Content-Type"] = []string{"application/json"} - } else { - panic("Cannot handle request") - } + require.Equal(t, r.URL.Path, "/status", "Cannot handle request") - fmt.Fprintln(w, rsp) + rsp = sampleStatusResponse + w.Header()["Content-Type"] = []string{"application/json"} + + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() @@ -270,14 +269,11 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { var acc testutil.Accumulator - err_nginx := n.Gather(&acc) - - require.NoError(t, err_nginx) + errNginx := n.Gather(&acc) + require.NoError(t, errNginx) addr, err := url.Parse(ts.URL) - if err != nil { - panic(err) - } + require.NoError(t, err) host, port, err := net.SplitHostPort(addr.Host) if err != nil { @@ -409,5 +405,4 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { "upstream_address": "1.2.3.123:80", "id": "0", }) - } diff --git a/plugins/inputs/nginx_plus/sample.conf b/plugins/inputs/nginx_plus/sample.conf new file mode 100644 index 0000000000000..94415cb777e72 --- /dev/null +++ b/plugins/inputs/nginx_plus/sample.conf @@ -0,0 +1,14 @@ +# Read Nginx Plus' advanced status information +[[inputs.nginx_plus]] + ## An array of Nginx status URIs to gather stats. + urls = ["http://localhost/status"] + + # HTTP response timeout (default: 5s) + response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/nginx_plus_api/README.md b/plugins/inputs/nginx_plus_api/README.md index 57cb127b5dd12..1669f21e2f212 100644 --- a/plugins/inputs/nginx_plus_api/README.md +++ b/plugins/inputs/nginx_plus_api/README.md @@ -1,19 +1,33 @@ # Nginx Plus API Input Plugin -Nginx Plus is a commercial version of the open source web server Nginx. The use this plugin you will need a license. For more information about the differences between Nginx (F/OSS) and Nginx Plus, [click here](https://www.nginx.com/blog/whats-difference-nginx-foss-nginx-plus/). +Nginx Plus is a commercial version of the open source web server Nginx. The use +this plugin you will need a license. For more information about the differences +between Nginx (F/OSS) and Nginx Plus, see the Nginx [documentation][diff-doc]. -### Configuration: +[diff-doc]: https://www.nginx.com/blog/whats-difference-nginx-foss-nginx-plus/ -```toml +## Configuration + +```toml @sample.conf # Read Nginx Plus API advanced status information [[inputs.nginx_plus_api]] ## An array of Nginx API URIs to gather stats. urls = ["http://localhost/api"] # Nginx API version, default: 3 # api_version = 3 + + # HTTP response timeout (default: 5s) + response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ``` -### Migration from Nginx Plus (Status) input plugin +## Migration from Nginx Plus (Status) input plugin | Nginx Plus | Nginx Plus API | |---------------------------------|--------------------------------------| @@ -29,13 +43,15 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use | nginx_plus_stream_upstream_peer | nginx_plus_api_stream_upstream_peers | | nginx.stream.zone | nginx_plus_api_stream_server_zones | -### Measurements by API version +## Measurements by API version | Measurement | API version (api_version) | |--------------------------------------|---------------------------| | nginx_plus_api_processes | >= 3 | | nginx_plus_api_connections | >= 3 | | nginx_plus_api_ssl | >= 3 | +| nginx_plus_api_slabs_pages | >= 3 | +| nginx_plus_api_slabs_slots | >= 3 | | nginx_plus_api_http_requests | >= 3 | | nginx_plus_api_http_server_zones | >= 3 | | nginx_plus_api_http_upstreams | >= 3 | @@ -47,7 +63,7 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use | nginx_plus_api_http_location_zones | >= 5 | | nginx_plus_api_resolver_zones | >= 5 | -### Measurements & Fields: +## Measurements & Fields - nginx_plus_api_processes - respawned @@ -56,6 +72,14 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use - dropped - active - idle +- nginx_plus_api_slabs_pages + - used + - free +- nginx_plus_api_slabs_slots + - used + - free + - reqs + - fails - nginx_plus_api_ssl - handshakes - handshakes_failed @@ -171,7 +195,7 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use - timedout - unknown -### Tags: +## Tags - nginx_plus_api_processes, nginx_plus_api_connections, nginx_plus_api_ssl, nginx_plus_api_http_requests - source @@ -182,11 +206,17 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use - source - port -- nginx_plus_api_http_server_zones, nginx_plus_api_upstream_server_zones, nginx_plus_api_http_location_zones, nginx_plus_api_resolver_zones +- nginx_plus_api_http_server_zones, nginx_plus_api_upstream_server_zones, nginx_plus_api_http_location_zones, nginx_plus_api_resolver_zones, nginx_plus_api_slabs_pages - source - port - zone +- nginx_plus_api_slabs_slots + - source + - port + - zone + - slot + - nginx_plus_api_upstream_peers, nginx_plus_api_stream_upstream_peers - id - upstream @@ -198,9 +228,10 @@ Nginx Plus is a commercial version of the open source web server Nginx. The use - source - port -### Example Output: +## Example Output Using this configuration: + ```toml [[inputs.nginx_plus_api]] ## An array of Nginx Plus API URIs to gather stats. @@ -208,14 +239,22 @@ Using this configuration: ``` When run with: + ```sh ./telegraf -config telegraf.conf -input-filter nginx_plus_api -test ``` It produces: -``` + +```text > nginx_plus_api_processes,port=80,source=demo.nginx.com respawned=0i 1570696321000000000 > nginx_plus_api_connections,port=80,source=demo.nginx.com accepted=68998606i,active=7i,dropped=0i,idle=57i 1570696322000000000 +> nginx_plus_api_slabs_pages,port=80,source=demo.nginx.com,zone=hg.nginx.org used=1i,free=503i 1570696322000000000 +> nginx_plus_api_slabs_pages,port=80,source=demo.nginx.com,zone=trac.nginx.org used=3i,free=500i 1570696322000000000 +> nginx_plus_api_slabs_slots,port=80,source=demo.nginx.com,zone=hg.nginx.org,slot=8 used=1i,free=503i,reqs=10i,fails=0i 1570696322000000000 +> nginx_plus_api_slabs_slots,port=80,source=demo.nginx.com,zone=hg.nginx.org,slot=16 used=3i,free=500i,reqs=1024i,fails=0i 1570696322000000000 +> nginx_plus_api_slabs_slots,port=80,source=demo.nginx.com,zone=trac.nginx.org,slot=8 used=1i,free=503i,reqs=10i,fails=0i 1570696322000000000 +> nginx_plus_api_slabs_slots,port=80,source=demo.nginx.com,zone=trac.nginx.org,slot=16 used=0i,free=1520i,reqs=0i,fails=1i 1570696322000000000 > nginx_plus_api_ssl,port=80,source=demo.nginx.com handshakes=9398978i,handshakes_failed=289353i,session_reuses=1004389i 1570696322000000000 > nginx_plus_api_http_requests,port=80,source=demo.nginx.com current=51i,total=264649353i 1570696322000000000 > nginx_plus_api_http_server_zones,port=80,source=demo.nginx.com,zone=hg.nginx.org discarded=5i,processing=0i,received=24123604i,requests=60138i,responses_1xx=0i,responses_2xx=59353i,responses_3xx=531i,responses_4xx=249i,responses_5xx=0i,responses_total=60133i,sent=830165221i 1570696322000000000 diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api.go b/plugins/inputs/nginx_plus_api/nginx_plus_api.go index 8ec1ea0f7725f..069f906e314f7 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package nginx_plus_api import ( + _ "embed" "fmt" "net/http" "net/url" @@ -8,15 +10,19 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) -type NginxPlusApi struct { - Urls []string `toml:"urls"` - ApiVersion int64 `toml:"api_version"` - ResponseTimeout internal.Duration `toml:"response_timeout"` +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +type NginxPlusAPI struct { + Urls []string `toml:"urls"` + APIVersion int64 `toml:"api_version"` + ResponseTimeout config.Duration `toml:"response_timeout"` tls.ClientConfig client *http.Client @@ -24,11 +30,12 @@ type NginxPlusApi struct { const ( // Default settings - defaultApiVersion = 3 + defaultAPIVersion = 3 // Paths processesPath = "processes" connectionsPath = "connections" + slabsPath = "slabs" sslPath = "ssl" httpRequestsPath = "http/requests" @@ -43,44 +50,22 @@ const ( streamUpstreamsPath = "stream/upstreams" ) -var sampleConfig = ` - ## An array of API URI to gather stats. - urls = ["http://localhost/api"] - - # Nginx API version, default: 3 - # api_version = 3 - - # HTTP response timeout (default: 5s) - response_timeout = "5s" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - -func (n *NginxPlusApi) SampleConfig() string { +func (*NginxPlusAPI) SampleConfig() string { return sampleConfig } -func (n *NginxPlusApi) Description() string { - return "Read Nginx Plus Api documentation" -} - -func (n *NginxPlusApi) Gather(acc telegraf.Accumulator) error { +func (n *NginxPlusAPI) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup // Create an HTTP client that is re-used for each // collection interval - if n.ApiVersion == 0 { - n.ApiVersion = defaultApiVersion + if n.APIVersion == 0 { + n.APIVersion = defaultAPIVersion } if n.client == nil { - client, err := n.createHttpClient() + client, err := n.createHTTPClient() if err != nil { return err } @@ -105,9 +90,9 @@ func (n *NginxPlusApi) Gather(acc telegraf.Accumulator) error { return nil } -func (n *NginxPlusApi) createHttpClient() (*http.Client, error) { - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = time.Second * 5 +func (n *NginxPlusAPI) createHTTPClient() (*http.Client, error) { + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(time.Second * 5) } tlsConfig, err := n.ClientConfig.TLSConfig() @@ -119,7 +104,7 @@ func (n *NginxPlusApi) createHttpClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } return client, nil @@ -127,6 +112,6 @@ func (n *NginxPlusApi) createHttpClient() (*http.Client, error) { func init() { inputs.Add("nginx_plus_api", func() telegraf.Input { - return &NginxPlusApi{} + return &NginxPlusAPI{} }) } diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go index 6aaaff2d344c7..ab2699595e8d0 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go @@ -4,7 +4,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -19,19 +19,20 @@ var ( errNotFound = errors.New("not found") ) -func (n *NginxPlusApi) gatherMetrics(addr *url.URL, acc telegraf.Accumulator) { +func (n *NginxPlusAPI) gatherMetrics(addr *url.URL, acc telegraf.Accumulator) { addError(acc, n.gatherProcessesMetrics(addr, acc)) addError(acc, n.gatherConnectionsMetrics(addr, acc)) + addError(acc, n.gatherSlabsMetrics(addr, acc)) addError(acc, n.gatherSslMetrics(addr, acc)) - addError(acc, n.gatherHttpRequestsMetrics(addr, acc)) - addError(acc, n.gatherHttpServerZonesMetrics(addr, acc)) - addError(acc, n.gatherHttpUpstreamsMetrics(addr, acc)) - addError(acc, n.gatherHttpCachesMetrics(addr, acc)) + addError(acc, n.gatherHTTPRequestsMetrics(addr, acc)) + addError(acc, n.gatherHTTPServerZonesMetrics(addr, acc)) + addError(acc, n.gatherHTTPUpstreamsMetrics(addr, acc)) + addError(acc, n.gatherHTTPCachesMetrics(addr, acc)) addError(acc, n.gatherStreamServerZonesMetrics(addr, acc)) addError(acc, n.gatherStreamUpstreamsMetrics(addr, acc)) - if n.ApiVersion >= 5 { - addError(acc, n.gatherHttpLocationZonesMetrics(addr, acc)) + if n.APIVersion >= 5 { + addError(acc, n.gatherHTTPLocationZonesMetrics(addr, acc)) addError(acc, n.gatherResolverZonesMetrics(addr, acc)) } } @@ -48,12 +49,12 @@ func addError(acc telegraf.Accumulator, err error) { } } -func (n *NginxPlusApi) gatherUrl(addr *url.URL, path string) ([]byte, error) { - url := fmt.Sprintf("%s/%d/%s", addr.String(), n.ApiVersion, path) - resp, err := n.client.Get(url) +func (n *NginxPlusAPI) gatherURL(addr *url.URL, path string) ([]byte, error) { + address := fmt.Sprintf("%s/%d/%s", addr.String(), n.APIVersion, path) + resp, err := n.client.Get(address) if err != nil { - return nil, fmt.Errorf("error making HTTP request to %s: %s", url, err) + return nil, fmt.Errorf("error making HTTP request to %s: %s", address, err) } defer resp.Body.Close() @@ -64,25 +65,25 @@ func (n *NginxPlusApi) gatherUrl(addr *url.URL, path string) ([]byte, error) { // features are either optional, or only available in some versions return nil, errNotFound default: - return nil, fmt.Errorf("%s returned HTTP status %s", url, resp.Status) + return nil, fmt.Errorf("%s returned HTTP status %s", address, resp.Status) } contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0] switch contentType { case "application/json": - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return nil, err } return body, nil default: - return nil, fmt.Errorf("%s returned unexpected content type %s", url, contentType) + return nil, fmt.Errorf("%s returned unexpected content type %s", address, contentType) } } -func (n *NginxPlusApi) gatherProcessesMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, processesPath) +func (n *NginxPlusAPI) gatherProcessesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, processesPath) if err != nil { return err } @@ -104,8 +105,8 @@ func (n *NginxPlusApi) gatherProcessesMetrics(addr *url.URL, acc telegraf.Accumu return nil } -func (n *NginxPlusApi) gatherConnectionsMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, connectionsPath) +func (n *NginxPlusAPI) gatherConnectionsMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, connectionsPath) if err != nil { return err } @@ -130,8 +131,61 @@ func (n *NginxPlusApi) gatherConnectionsMetrics(addr *url.URL, acc telegraf.Accu return nil } -func (n *NginxPlusApi) gatherSslMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, sslPath) +func (n *NginxPlusAPI) gatherSlabsMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, slabsPath) + if err != nil { + return err + } + + var slabs Slabs + + if err := json.Unmarshal(body, &slabs); err != nil { + return err + } + + tags := getTags(addr) + + for zoneName, slab := range slabs { + slabTags := map[string]string{} + for k, v := range tags { + slabTags[k] = v + } + slabTags["zone"] = zoneName + + acc.AddFields( + "nginx_plus_api_slabs_pages", + map[string]interface{}{ + "used": slab.Pages.Used, + "free": slab.Pages.Free, + }, + slabTags, + ) + + for slotID, slot := range slab.Slots { + slotTags := map[string]string{} + for k, v := range slabTags { + slotTags[k] = v + } + slotTags["slot"] = slotID + + acc.AddFields( + "nginx_plus_api_slabs_slots", + map[string]interface{}{ + "used": slot.Used, + "free": slot.Free, + "reqs": slot.Reqs, + "fails": slot.Fails, + }, + slotTags, + ) + } + } + + return nil +} + +func (n *NginxPlusAPI) gatherSslMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, sslPath) if err != nil { return err } @@ -155,13 +209,13 @@ func (n *NginxPlusApi) gatherSslMetrics(addr *url.URL, acc telegraf.Accumulator) return nil } -func (n *NginxPlusApi) gatherHttpRequestsMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, httpRequestsPath) +func (n *NginxPlusAPI) gatherHTTPRequestsMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, httpRequestsPath) if err != nil { return err } - var httpRequests = &HttpRequests{} + var httpRequests = &HTTPRequests{} if err := json.Unmarshal(body, httpRequests); err != nil { return err @@ -179,13 +233,13 @@ func (n *NginxPlusApi) gatherHttpRequestsMetrics(addr *url.URL, acc telegraf.Acc return nil } -func (n *NginxPlusApi) gatherHttpServerZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, httpServerZonesPath) +func (n *NginxPlusAPI) gatherHTTPServerZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, httpServerZonesPath) if err != nil { return err } - var httpServerZones HttpServerZones + var httpServerZones HTTPServerZones if err := json.Unmarshal(body, &httpServerZones); err != nil { return err @@ -227,13 +281,13 @@ func (n *NginxPlusApi) gatherHttpServerZonesMetrics(addr *url.URL, acc telegraf. } // Added in 5 API version -func (n *NginxPlusApi) gatherHttpLocationZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, httpLocationZonesPath) +func (n *NginxPlusAPI) gatherHTTPLocationZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, httpLocationZonesPath) if err != nil { return err } - var httpLocationZones HttpLocationZones + var httpLocationZones HTTPLocationZones if err := json.Unmarshal(body, &httpLocationZones); err != nil { return err @@ -273,13 +327,13 @@ func (n *NginxPlusApi) gatherHttpLocationZonesMetrics(addr *url.URL, acc telegra return nil } -func (n *NginxPlusApi) gatherHttpUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, httpUpstreamsPath) +func (n *NginxPlusAPI) gatherHTTPUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, httpUpstreamsPath) if err != nil { return err } - var httpUpstreams HttpUpstreams + var httpUpstreams HTTPUpstreams if err := json.Unmarshal(body, &httpUpstreams); err != nil { return err @@ -357,13 +411,13 @@ func (n *NginxPlusApi) gatherHttpUpstreamsMetrics(addr *url.URL, acc telegraf.Ac return nil } -func (n *NginxPlusApi) gatherHttpCachesMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, httpCachesPath) +func (n *NginxPlusAPI) gatherHTTPCachesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, httpCachesPath) if err != nil { return err } - var httpCaches HttpCaches + var httpCaches HTTPCaches if err := json.Unmarshal(body, &httpCaches); err != nil { return err @@ -411,8 +465,8 @@ func (n *NginxPlusApi) gatherHttpCachesMetrics(addr *url.URL, acc telegraf.Accum return nil } -func (n *NginxPlusApi) gatherStreamServerZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, streamServerZonesPath) +func (n *NginxPlusAPI) gatherStreamServerZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, streamServerZonesPath) if err != nil { return err } @@ -447,8 +501,8 @@ func (n *NginxPlusApi) gatherStreamServerZonesMetrics(addr *url.URL, acc telegra } // Added in 5 API version -func (n *NginxPlusApi) gatherResolverZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, resolverZonesPath) +func (n *NginxPlusAPI) gatherResolverZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, resolverZonesPath) if err != nil { return err } @@ -490,8 +544,8 @@ func (n *NginxPlusApi) gatherResolverZonesMetrics(addr *url.URL, acc telegraf.Ac return nil } -func (n *NginxPlusApi) gatherStreamUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error { - body, err := n.gatherUrl(addr, streamUpstreamsPath) +func (n *NginxPlusAPI) gatherStreamUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherURL(addr, streamUpstreamsPath) if err != nil { return err } diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go index f309886cff58e..a1c43c645a349 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go @@ -27,6 +27,135 @@ const connectionsPayload = ` } ` +const slabsPayload = ` +{ + "zone1":{ + "pages":{ + "used":7, + "free":56 + }, + "slots":{ + "8":{ + "used":1, + "free":503, + "reqs":1, + "fails":0 + }, + "16":{ + "used":1, + "free":253, + "reqs":1, + "fails":0 + }, + "32":{ + "used":3, + "free":124, + "reqs":3, + "fails":0 + }, + "64":{ + "used":3, + "free":61, + "reqs":3, + "fails":0 + }, + "128":{ + "used":6, + "free":26, + "reqs":6, + "fails":0 + }, + "256":{ + "used":0, + "free":0, + "reqs":0, + "fails":0 + }, + "512":{ + "used":2, + "free":6, + "reqs":2, + "fails":0 + }, + "1024":{ + "used":2, + "free":2, + "reqs":2, + "fails":0 + }, + "2048":{ + "used":0, + "free":0, + "reqs":0, + "fails":0 + } + } + }, + "zone2":{ + "pages":{ + "used":2218, + "free":252290 + }, + "slots":{ + "8":{ + "used":1, + "free":503, + "reqs":4, + "fails":0 + }, + "16":{ + "used":0, + "free":0, + "reqs":0, + "fails":0 + }, + "32":{ + "used":8, + "free":119, + "reqs":98, + "fails":0 + }, + "64":{ + "used":10899, + "free":45, + "reqs":124255, + "fails":0 + }, + "128":{ + "used":1, + "free":31, + "reqs":1, + "fails":0 + }, + "256":{ + "used":10901, + "free":11, + "reqs":124270, + "fails":0 + }, + "512":{ + "used":10893, + "free":3, + "reqs":124245, + "fails":0 + }, + "1024":{ + "used":0, + "free":0, + "reqs":0, + "fails":0 + }, + "2048":{ + "used":0, + "free":0, + "reqs":10, + "fails":0 + } + } + } +} +` + const sslPayload = ` { "handshakes": 79572, @@ -520,7 +649,7 @@ const streamServerZonesPayload = ` ` func TestGatherProcessesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, processesPath, defaultApiVersion, processesPayload) + ts, n := prepareEndpoint(t, processesPath, processesPayload) defer ts.Close() var acc testutil.Accumulator @@ -541,7 +670,7 @@ func TestGatherProcessesMetrics(t *testing.T) { } func TestGatherConnectionsMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, connectionsPath, defaultApiVersion, connectionsPayload) + ts, n := prepareEndpoint(t, connectionsPath, connectionsPayload) defer ts.Close() var acc testutil.Accumulator @@ -564,8 +693,73 @@ func TestGatherConnectionsMetrics(t *testing.T) { }) } +func TestGatherSlabsMetrics(t *testing.T) { + ts, n := prepareEndpoint(t, slabsPath, slabsPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(t, ts) + + require.NoError(t, n.gatherSlabsMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_slabs_pages", + map[string]interface{}{ + "used": int64(7), + "free": int64(56), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "zone1", + }) + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_slabs_pages", + map[string]interface{}{ + "used": int64(2218), + "free": int64(252290), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "zone2", + }) + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_slabs_slots", + map[string]interface{}{ + "used": int64(1), + "free": int64(503), + "reqs": int64(1), + "fails": int64(0), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "zone1", + "slot": "8", + }) + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_slabs_slots", + map[string]interface{}{ + "used": int64(10893), + "free": int64(3), + "reqs": int64(124245), + "fails": int64(0), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "zone2", + "slot": "512", + }) +} + func TestGatherSslMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, sslPath, defaultApiVersion, sslPayload) + ts, n := prepareEndpoint(t, sslPath, sslPayload) defer ts.Close() var acc testutil.Accumulator @@ -588,13 +782,13 @@ func TestGatherSslMetrics(t *testing.T) { } func TestGatherHttpRequestsMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, httpRequestsPath, defaultApiVersion, httpRequestsPayload) + ts, n := prepareEndpoint(t, httpRequestsPath, httpRequestsPayload) defer ts.Close() var acc testutil.Accumulator addr, host, port := prepareAddr(t, ts) - require.NoError(t, n.gatherHttpRequestsMetrics(addr, &acc)) + require.NoError(t, n.gatherHTTPRequestsMetrics(addr, &acc)) acc.AssertContainsTaggedFields( t, @@ -610,13 +804,13 @@ func TestGatherHttpRequestsMetrics(t *testing.T) { } func TestGatherHttpServerZonesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, httpServerZonesPath, defaultApiVersion, httpServerZonesPayload) + ts, n := prepareEndpoint(t, httpServerZonesPath, httpServerZonesPayload) defer ts.Close() var acc testutil.Accumulator addr, host, port := prepareAddr(t, ts) - require.NoError(t, n.gatherHttpServerZonesMetrics(addr, &acc)) + require.NoError(t, n.gatherHTTPServerZonesMetrics(addr, &acc)) acc.AssertContainsTaggedFields( t, @@ -664,13 +858,13 @@ func TestGatherHttpServerZonesMetrics(t *testing.T) { } func TestGatherHttpLocationZonesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, httpLocationZonesPath, defaultApiVersion, httpLocationZonesPayload) + ts, n := prepareEndpoint(t, httpLocationZonesPath, httpLocationZonesPayload) defer ts.Close() var acc testutil.Accumulator addr, host, port := prepareAddr(t, ts) - require.NoError(t, n.gatherHttpLocationZonesMetrics(addr, &acc)) + require.NoError(t, n.gatherHTTPLocationZonesMetrics(addr, &acc)) acc.AssertContainsTaggedFields( t, @@ -716,13 +910,13 @@ func TestGatherHttpLocationZonesMetrics(t *testing.T) { } func TestGatherHttpUpstreamsMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, httpUpstreamsPath, defaultApiVersion, httpUpstreamsPayload) + ts, n := prepareEndpoint(t, httpUpstreamsPath, httpUpstreamsPayload) defer ts.Close() var acc testutil.Accumulator addr, host, port := prepareAddr(t, ts) - require.NoError(t, n.gatherHttpUpstreamsMetrics(addr, &acc)) + require.NoError(t, n.gatherHTTPUpstreamsMetrics(addr, &acc)) acc.AssertContainsTaggedFields( t, @@ -888,13 +1082,13 @@ func TestGatherHttpUpstreamsMetrics(t *testing.T) { } func TestGatherHttpCachesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, httpCachesPath, defaultApiVersion, httpCachesPayload) + ts, n := prepareEndpoint(t, httpCachesPath, httpCachesPayload) defer ts.Close() var acc testutil.Accumulator addr, host, port := prepareAddr(t, ts) - require.NoError(t, n.gatherHttpCachesMetrics(addr, &acc)) + require.NoError(t, n.gatherHTTPCachesMetrics(addr, &acc)) acc.AssertContainsTaggedFields( t, @@ -966,7 +1160,7 @@ func TestGatherHttpCachesMetrics(t *testing.T) { } func TestGatherResolverZonesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, resolverZonesPath, defaultApiVersion, resolverZonesPayload) + ts, n := prepareEndpoint(t, resolverZonesPath, resolverZonesPayload) defer ts.Close() var acc testutil.Accumulator @@ -1020,7 +1214,7 @@ func TestGatherResolverZonesMetrics(t *testing.T) { } func TestGatherStreamUpstreams(t *testing.T) { - ts, n := prepareEndpoint(t, streamUpstreamsPath, defaultApiVersion, streamUpstreamsPayload) + ts, n := prepareEndpoint(t, streamUpstreamsPath, streamUpstreamsPayload) defer ts.Close() var acc testutil.Accumulator @@ -1159,11 +1353,10 @@ func TestGatherStreamUpstreams(t *testing.T) { "upstream_address": "10.0.0.1:12348", "id": "1", }) - } func TestGatherStreamServerZonesMetrics(t *testing.T) { - ts, n := prepareEndpoint(t, streamServerZonesPath, defaultApiVersion, streamServerZonesPayload) + ts, n := prepareEndpoint(t, streamServerZonesPath, streamServerZonesPayload) defer ts.Close() var acc testutil.Accumulator @@ -1208,14 +1401,12 @@ func TestUnavailableEndpoints(t *testing.T) { })) defer ts.Close() - n := &NginxPlusApi{ + n := &NginxPlusAPI{ client: ts.Client(), } addr, err := url.Parse(ts.URL) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) var acc testutil.Accumulator n.gatherMetrics(addr, &acc) @@ -1228,14 +1419,12 @@ func TestServerError(t *testing.T) { })) defer ts.Close() - n := &NginxPlusApi{ + n := &NginxPlusAPI{ client: ts.Client(), } addr, err := url.Parse(ts.URL) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) var acc testutil.Accumulator n.gatherMetrics(addr, &acc) @@ -1245,18 +1434,17 @@ func TestServerError(t *testing.T) { func TestMalformedJSON(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintln(w, "this is not JSON") + _, err := fmt.Fprintln(w, "this is not JSON") + require.NoError(t, err) })) defer ts.Close() - n := &NginxPlusApi{ + n := &NginxPlusAPI{ client: ts.Client(), } addr, err := url.Parse(ts.URL) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) var acc testutil.Accumulator n.gatherMetrics(addr, &acc) @@ -1269,14 +1457,12 @@ func TestUnknownContentType(t *testing.T) { })) defer ts.Close() - n := &NginxPlusApi{ + n := &NginxPlusAPI{ client: ts.Client(), } addr, err := url.Parse(ts.URL) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) var acc testutil.Accumulator n.gatherMetrics(addr, &acc) @@ -1286,9 +1472,7 @@ func TestUnknownContentType(t *testing.T) { func prepareAddr(t *testing.T, ts *httptest.Server) (*url.URL, string, string) { t.Helper() addr, err := url.Parse(fmt.Sprintf("%s/api", ts.URL)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) host, port, err := net.SplitHostPort(addr.Host) @@ -1306,29 +1490,23 @@ func prepareAddr(t *testing.T, ts *httptest.Server) (*url.URL, string, string) { return addr, host, port } -func prepareEndpoint(t *testing.T, path string, apiVersion int64, payload string) (*httptest.Server, *NginxPlusApi) { +func prepareEndpoint(t *testing.T, path string, payload string) (*httptest.Server, *NginxPlusAPI) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var rsp string - - if r.URL.Path == fmt.Sprintf("/api/%d/%s", apiVersion, path) { - rsp = payload - w.Header()["Content-Type"] = []string{"application/json"} - } else { - t.Errorf("unknown request path") - } + require.Equal(t, r.URL.Path, fmt.Sprintf("/api/%d/%s", defaultAPIVersion, path), "unknown request path") - fmt.Fprintln(w, rsp) + w.Header()["Content-Type"] = []string{"application/json"} + _, err := fmt.Fprintln(w, payload) + require.NoError(t, err) })) - n := &NginxPlusApi{ + n := &NginxPlusAPI{ Urls: []string{fmt.Sprintf("%s/api", ts.URL)}, - ApiVersion: apiVersion, + APIVersion: defaultAPIVersion, } - client, err := n.createHttpClient() - if err != nil { - t.Fatal(err) - } + client, err := n.createHTTPClient() + require.NoError(t, err) + n.client = client return ts, n diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go index 868bc04e445eb..654c3cb91ecad 100644 --- a/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go @@ -11,6 +11,19 @@ type Connections struct { Idle int64 `json:"idle"` } +type Slabs map[string]struct { + Pages struct { + Used int64 `json:"used"` + Free int64 `json:"free"` + } `json:"pages"` + Slots map[string]struct { + Used int64 `json:"used"` + Free int64 `json:"free"` + Reqs int64 `json:"reqs"` + Fails int64 `json:"fails"` + } `json:"slots"` +} + type Ssl struct { // added in version 6 Handshakes int64 `json:"handshakes"` HandshakesFailed int64 `json:"handshakes_failed"` @@ -35,7 +48,7 @@ type ResolverZones map[string]struct { } `json:"responses"` } -type HttpRequests struct { +type HTTPRequests struct { Total int64 `json:"total"` Current int64 `json:"current"` } @@ -49,7 +62,7 @@ type ResponseStats struct { Total int64 `json:"total"` } -type HttpServerZones map[string]struct { +type HTTPServerZones map[string]struct { Processing int `json:"processing"` Requests int64 `json:"requests"` Responses ResponseStats `json:"responses"` @@ -58,7 +71,7 @@ type HttpServerZones map[string]struct { Sent int64 `json:"sent"` } -type HttpLocationZones map[string]struct { +type HTTPLocationZones map[string]struct { Requests int64 `json:"requests"` Responses ResponseStats `json:"responses"` Discarded *int64 `json:"discarded"` // added in version 6 @@ -73,7 +86,7 @@ type HealthCheckStats struct { LastPassed *bool `json:"last_passed"` } -type HttpUpstreams map[string]struct { +type HTTPUpstreams map[string]struct { Peers []struct { ID *int `json:"id"` // added in version 3 Server string `json:"server"` @@ -145,7 +158,7 @@ type ExtendedHitStats struct { BytesWritten int64 `json:"bytes_written"` } -type HttpCaches map[string]struct { // added in version 2 +type HTTPCaches map[string]struct { // added in version 2 Size int64 `json:"size"` MaxSize int64 `json:"max_size"` Cold bool `json:"cold"` diff --git a/plugins/inputs/nginx_plus_api/sample.conf b/plugins/inputs/nginx_plus_api/sample.conf new file mode 100644 index 0000000000000..12c5635dbe1e8 --- /dev/null +++ b/plugins/inputs/nginx_plus_api/sample.conf @@ -0,0 +1,16 @@ +# Read Nginx Plus API advanced status information +[[inputs.nginx_plus_api]] + ## An array of Nginx API URIs to gather stats. + urls = ["http://localhost/api"] + # Nginx API version, default: 3 + # api_version = 3 + + # HTTP response timeout (default: 5s) + response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/nginx_sts/README.md b/plugins/inputs/nginx_sts/README.md index 935bc9af83c62..a935da4637a77 100644 --- a/plugins/inputs/nginx_sts/README.md +++ b/plugins/inputs/nginx_sts/README.md @@ -1,7 +1,7 @@ # Nginx Stream STS Input Plugin This plugin gathers Nginx status using external virtual host traffic status -module - https://github.com/vozlt/nginx-module-sts. This is an Nginx module +module - . This is an Nginx module that provides access to stream host status information. It contains the current status such as servers, upstreams, caches. This is similar to the live activity monitoring of Nginx plus. For module configuration details please see its @@ -9,9 +9,10 @@ monitoring of Nginx plus. For module configuration details please see its Telegraf minimum version: Telegraf 1.15.0 -### Configuration +## Configuration -```toml +```toml @sample.conf +# Read Nginx virtual host traffic status module information (nginx-module-sts) [[inputs.nginx_sts]] ## An array of ngx_http_status_module or status URI to gather stats. urls = ["http://localhost/status"] @@ -27,7 +28,7 @@ Telegraf minimum version: Telegraf 1.15.0 # insecure_skip_verify = false ``` -### Metrics +## Metrics - nginx_sts_connections - tags: @@ -42,7 +43,7 @@ Telegraf minimum version: Telegraf 1.15.0 - handled - requests -+ nginx_sts_server +- nginx_sts_server - tags: - source - port @@ -77,7 +78,7 @@ Telegraf minimum version: Telegraf 1.15.0 - session_msec_counter - session_msec -+ nginx_sts_upstream +- nginx_sts_upstream - tags: - source - port @@ -106,9 +107,9 @@ Telegraf minimum version: Telegraf 1.15.0 - backup - down -### Example Output: +## Example Output -``` +```shell nginx_sts_upstream,host=localhost,port=80,source=127.0.0.1,upstream=backend_cluster,upstream_address=1.2.3.4:8080 upstream_connect_msec_counter=0i,out_bytes=0i,down=false,connects=0i,session_msec=0i,upstream_session_msec=0i,upstream_session_msec_counter=0i,upstream_connect_msec=0i,upstream_firstbyte_msec_counter=0i,response_3xx_count=0i,session_msec_counter=0i,weight=1i,max_fails=1i,backup=false,upstream_firstbyte_msec=0i,in_bytes=0i,response_1xx_count=0i,response_2xx_count=0i,response_4xx_count=0i,response_5xx_count=0i,fail_timeout=10i 1584699180000000000 nginx_sts_upstream,host=localhost,port=80,source=127.0.0.1,upstream=backend_cluster,upstream_address=9.8.7.6:8080 upstream_firstbyte_msec_counter=0i,response_2xx_count=0i,down=false,upstream_session_msec_counter=0i,out_bytes=0i,response_5xx_count=0i,weight=1i,max_fails=1i,fail_timeout=10i,connects=0i,session_msec_counter=0i,upstream_session_msec=0i,in_bytes=0i,response_1xx_count=0i,response_3xx_count=0i,response_4xx_count=0i,session_msec=0i,upstream_connect_msec=0i,upstream_connect_msec_counter=0i,upstream_firstbyte_msec=0i,backup=false 1584699180000000000 nginx_sts_server,host=localhost,port=80,source=127.0.0.1,zone=* response_2xx_count=0i,response_4xx_count=0i,response_5xx_count=0i,session_msec_counter=0i,in_bytes=0i,out_bytes=0i,session_msec=0i,response_1xx_count=0i,response_3xx_count=0i,connects=0i 1584699180000000000 diff --git a/plugins/inputs/nginx_sts/nginx_sts.go b/plugins/inputs/nginx_sts/nginx_sts.go index 046460069c65d..7a6ff25bf2379 100644 --- a/plugins/inputs/nginx_sts/nginx_sts.go +++ b/plugins/inputs/nginx_sts/nginx_sts.go @@ -1,7 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package nginx_sts import ( "bufio" + _ "embed" "encoding/json" "fmt" "net" @@ -12,42 +14,27 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type NginxSTS struct { - Urls []string `toml:"urls"` - ResponseTimeout internal.Duration `toml:"response_timeout"` + Urls []string `toml:"urls"` + ResponseTimeout config.Duration `toml:"response_timeout"` tls.ClientConfig client *http.Client } -var sampleConfig = ` - ## An array of ngx_http_status_module or status URI to gather stats. - urls = ["http://localhost/status"] - - ## HTTP response timeout (default: 5s) - response_timeout = "5s" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - -func (n *NginxSTS) SampleConfig() string { +func (*NginxSTS) SampleConfig() string { return sampleConfig } -func (n *NginxSTS) Description() string { - return "Read Nginx virtual host traffic status module information (nginx-module-sts)" -} - func (n *NginxSTS) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup @@ -81,8 +68,8 @@ func (n *NginxSTS) Gather(acc telegraf.Accumulator) error { } func (n *NginxSTS) createHTTPClient() (*http.Client, error) { - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = time.Second * 5 + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(time.Second * 5) } tlsConfig, err := n.ClientConfig.TLSConfig() @@ -94,7 +81,7 @@ func (n *NginxSTS) createHTTPClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } return client, nil diff --git a/plugins/inputs/nginx_sts/nginx_sts_test.go b/plugins/inputs/nginx_sts/nginx_sts_test.go index 18081eadf7f43..9ebb5f91ad9d8 100644 --- a/plugins/inputs/nginx_sts/nginx_sts_test.go +++ b/plugins/inputs/nginx_sts/nginx_sts_test.go @@ -166,14 +166,13 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string - if r.URL.Path == "/status" { - rsp = sampleStatusResponse - w.Header()["Content-Type"] = []string{"application/json"} - } else { - panic("Cannot handle request") - } + require.Equal(t, r.URL.Path, "/status", "Cannot handle request") - fmt.Fprintln(w, rsp) + rsp = sampleStatusResponse + w.Header()["Content-Type"] = []string{"application/json"} + + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() @@ -184,13 +183,10 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { var acc testutil.Accumulator err := n.Gather(&acc) - require.NoError(t, err) addr, err := url.Parse(ts.URL) - if err != nil { - panic(err) - } + require.NoError(t, err) host, port, err := net.SplitHostPort(addr.Host) if err != nil { diff --git a/plugins/inputs/nginx_sts/sample.conf b/plugins/inputs/nginx_sts/sample.conf new file mode 100644 index 0000000000000..269c09419dd31 --- /dev/null +++ b/plugins/inputs/nginx_sts/sample.conf @@ -0,0 +1,14 @@ +# Read Nginx virtual host traffic status module information (nginx-module-sts) +[[inputs.nginx_sts]] + ## An array of ngx_http_status_module or status URI to gather stats. + urls = ["http://localhost/status"] + + ## HTTP response timeout (default: 5s) + response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/nginx_upstream_check/README.md b/plugins/inputs/nginx_upstream_check/README.md index 58bee07be931d..d2af0425825f1 100644 --- a/plugins/inputs/nginx_upstream_check/README.md +++ b/plugins/inputs/nginx_upstream_check/README.md @@ -1,16 +1,22 @@ # Nginx Upstream Check Input Plugin -Read the status output of the nginx_upstream_check (https://github.com/yaoweibin/nginx_upstream_check_module). -This module can periodically check the servers in the Nginx's upstream with configured request and interval to determine -if the server is still available. If checks are failed the server is marked as "down" and will not receive any requests -until the check will pass and a server will be marked as "up" again. +Read the status output of the [nginx_upstream_check][1]. This module can +periodically check the servers in the Nginx's upstream with configured request +and interval to determine if the server is still available. If checks are failed +the server is marked as "down" and will not receive any requests until the check +will pass and a server will be marked as "up" again. -The status page displays the current status of all upstreams and servers as well as number of the failed and successful -checks. This information can be exported in JSON format and parsed by this input. +The status page displays the current status of all upstreams and servers as well +as number of the failed and successful checks. This information can be exported +in JSON format and parsed by this input. -### Configuration: +[1]: https://github.com/yaoweibin/nginx_upstream_check_module -```toml +## Configuration + +```toml @sample.conf +# Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module) +[[inputs.nginx_upstream_check]] ## An URL where Nginx Upstream check module is enabled ## It should be set to return a JSON formatted response url = "http://127.0.0.1/status?format=json" @@ -39,36 +45,40 @@ checks. This information can be exported in JSON format and parsed by this input # insecure_skip_verify = false ``` -### Measurements & Fields: +## Metrics - Measurement - - fall (The number of failed server check attempts, counter) - - rise (The number of successful server check attempts, counter) - - status (The reporter server status as a string) - - status_code (The server status code. 1 - up, 2 - down, 0 - other) + - fall (The number of failed server check attempts, counter) + - rise (The number of successful server check attempts, counter) + - status (The reporter server status as a string) + - status_code (The server status code. 1 - up, 2 - down, 0 - other) -The "status_code" field most likely will be the most useful one because it allows you to determine the current -state of every server and, possible, add some monitoring to watch over it. InfluxDB can use string values and the -"status" field can be used instead, but for most other monitoring solutions the integer code will be appropriate. +The "status_code" field most likely will be the most useful one because it +allows you to determine the current state of every server and, possible, add +some monitoring to watch over it. InfluxDB can use string values and the +"status" field can be used instead, but for most other monitoring solutions the +integer code will be appropriate. -### Tags: +### Tags - All measurements have the following tags: - - name (The hostname or IP of the upstream server) - - port (The alternative check port, 0 if the default one is used) - - type (The check type, http/tcp) - - upstream (The name of the upstream block in the Nginx configuration) - - url (The status url used by telegraf) + - name (The hostname or IP of the upstream server) + - port (The alternative check port, 0 if the default one is used) + - type (The check type, http/tcp) + - upstream (The name of the upstream block in the Nginx configuration) + - url (The status url used by telegraf) -### Example Output: +## Example Output When run with: + ```sh ./telegraf --config telegraf.conf --input-filter nginx_upstream_check --test ``` It produces: -``` + +```text * Plugin: nginx_upstream_check, Collection 1 > nginx_upstream_check,host=node1,name=192.168.0.1:8080,port=0,type=http,upstream=my_backends,url=http://127.0.0.1:80/status?format\=json fall=0i,rise=100i,status="up",status_code=1i 1529088524000000000 > nginx_upstream_check,host=node2,name=192.168.0.2:8080,port=0,type=http,upstream=my_backends,url=http://127.0.0.1:80/status?format\=json fall=100i,rise=0i,status="down",status_code=2i 1529088524000000000 diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go index 0fe2907c9a08a..457487cd0b6b8 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go @@ -1,51 +1,25 @@ +//go:generate ../../../tools/readme_config_includer/generator package nginx_upstream_check import ( + _ "embed" "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strconv" "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) -const sampleConfig = ` - ## An URL where Nginx Upstream check module is enabled - ## It should be set to return a JSON formatted response - url = "http://127.0.0.1/status?format=json" - - ## HTTP method - # method = "GET" - - ## Optional HTTP headers - # headers = {"X-Special-Header" = "Special-Value"} - - ## Override HTTP "Host" header - # host_header = "check.example.com" - - ## Timeout for HTTP requests - timeout = "5s" - - ## Optional HTTP Basic Auth credentials - # username = "username" - # password = "pa$$word" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - -const description = "Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module)" +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string type NginxUpstreamCheck struct { URL string `toml:"url"` @@ -55,7 +29,7 @@ type NginxUpstreamCheck struct { Method string `toml:"method"` Headers map[string]string `toml:"headers"` HostHeader string `toml:"host_header"` - Timeout internal.Duration `toml:"timeout"` + Timeout config.Duration `toml:"timeout"` tls.ClientConfig client *http.Client @@ -67,7 +41,7 @@ func NewNginxUpstreamCheck() *NginxUpstreamCheck { Method: "GET", Headers: make(map[string]string), HostHeader: "", - Timeout: internal.Duration{Duration: time.Second * 5}, + Timeout: config.Duration(time.Second * 5), } } @@ -77,14 +51,6 @@ func init() { }) } -func (check *NginxUpstreamCheck) SampleConfig() string { - return sampleConfig -} - -func (check *NginxUpstreamCheck) Description() string { - return description -} - type NginxUpstreamCheckData struct { Servers struct { Total uint64 `json:"total"` @@ -104,8 +70,8 @@ type NginxUpstreamCheckServer struct { Port uint16 `json:"port"` } -// createHttpClient create a clients to access API -func (check *NginxUpstreamCheck) createHttpClient() (*http.Client, error) { +// createHTTPClient create a clients to access API +func (check *NginxUpstreamCheck) createHTTPClient() (*http.Client, error) { tlsConfig, err := check.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -115,15 +81,14 @@ func (check *NginxUpstreamCheck) createHttpClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, - Timeout: check.Timeout.Duration, + Timeout: time.Duration(check.Timeout), } return client, nil } -// gatherJsonData query the data source and parse the response JSON -func (check *NginxUpstreamCheck) gatherJsonData(url string, value interface{}) error { - +// gatherJSONData query the data source and parse the response JSON +func (check *NginxUpstreamCheck) gatherJSONData(address string, value interface{}) error { var method string if check.Method != "" { method = check.Method @@ -131,7 +96,7 @@ func (check *NginxUpstreamCheck) gatherJsonData(url string, value interface{}) e method = "GET" } - request, err := http.NewRequest(method, url, nil) + request, err := http.NewRequest(method, address, nil) if err != nil { return err } @@ -154,8 +119,8 @@ func (check *NginxUpstreamCheck) gatherJsonData(url string, value interface{}) e defer response.Body.Close() if response.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) - return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) + body, _ := io.ReadAll(io.LimitReader(response.Body, 200)) + return fmt.Errorf("%s returned HTTP status %s: %q", address, response.Status, body) } err = json.NewDecoder(response.Body).Decode(value) @@ -166,9 +131,13 @@ func (check *NginxUpstreamCheck) gatherJsonData(url string, value interface{}) e return nil } +func (*NginxUpstreamCheck) SampleConfig() string { + return sampleConfig +} + func (check *NginxUpstreamCheck) Gather(accumulator telegraf.Accumulator) error { if check.client == nil { - client, err := check.createHttpClient() + client, err := check.createHTTPClient() if err != nil { return err @@ -187,25 +156,23 @@ func (check *NginxUpstreamCheck) Gather(accumulator telegraf.Accumulator) error } return nil - } -func (check *NginxUpstreamCheck) gatherStatusData(url string, accumulator telegraf.Accumulator) error { +func (check *NginxUpstreamCheck) gatherStatusData(address string, accumulator telegraf.Accumulator) error { checkData := &NginxUpstreamCheckData{} - err := check.gatherJsonData(url, checkData) + err := check.gatherJSONData(address, checkData) if err != nil { return err } for _, server := range checkData.Servers.Server { - tags := map[string]string{ "upstream": server.Upstream, "type": server.Type, "name": server.Name, "port": strconv.Itoa(int(server.Port)), - "url": url, + "url": address, } fields := map[string]interface{}{ diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go index 1b70770d01075..353619b362228 100644 --- a/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go @@ -45,14 +45,13 @@ func TestNginxUpstreamCheckData(test *testing.T) { testServer := httptest.NewServer(http.HandlerFunc(func(responseWriter http.ResponseWriter, request *http.Request) { var response string - if request.URL.Path == "/status" { - response = sampleStatusResponse - responseWriter.Header()["Content-Type"] = []string{"application/json"} - } else { - panic("Cannot handle request") - } - - fmt.Fprintln(responseWriter, response) + require.Equal(test, request.URL.Path, "/status", "Cannot handle request") + + response = sampleStatusResponse + responseWriter.Header()["Content-Type"] = []string{"application/json"} + + _, err := fmt.Fprintln(responseWriter, response) + require.NoError(test, err) })) defer testServer.Close() @@ -103,20 +102,18 @@ func TestNginxUpstreamCheckRequest(test *testing.T) { testServer := httptest.NewServer(http.HandlerFunc(func(responseWriter http.ResponseWriter, request *http.Request) { var response string - if request.URL.Path == "/status" { - response = sampleStatusResponse - responseWriter.Header()["Content-Type"] = []string{"application/json"} - } else { - panic("Cannot handle request") - } + require.Equal(test, request.URL.Path, "/status", "Cannot handle request") - fmt.Fprintln(responseWriter, response) + response = sampleStatusResponse + responseWriter.Header()["Content-Type"] = []string{"application/json"} + + _, err := fmt.Fprintln(responseWriter, response) + require.NoError(test, err) require.Equal(test, request.Method, "POST") require.Equal(test, request.Header.Get("X-Test"), "test-value") require.Equal(test, request.Header.Get("Authorization"), "Basic dXNlcjpwYXNzd29yZA==") require.Equal(test, request.Host, "status.local") - })) defer testServer.Close() diff --git a/plugins/inputs/nginx_upstream_check/sample.conf b/plugins/inputs/nginx_upstream_check/sample.conf new file mode 100644 index 0000000000000..b42ff14ebdb6c --- /dev/null +++ b/plugins/inputs/nginx_upstream_check/sample.conf @@ -0,0 +1,28 @@ +# Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module) +[[inputs.nginx_upstream_check]] + ## An URL where Nginx Upstream check module is enabled + ## It should be set to return a JSON formatted response + url = "http://127.0.0.1/status?format=json" + + ## HTTP method + # method = "GET" + + ## Optional HTTP headers + # headers = {"X-Special-Header" = "Special-Value"} + + ## Override HTTP "Host" header + # host_header = "check.example.com" + + ## Timeout for HTTP requests + timeout = "5s" + + ## Optional HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/nginx_vts/README.md b/plugins/inputs/nginx_vts/README.md index fe9e7fd6ea62f..c7e2697213519 100644 --- a/plugins/inputs/nginx_vts/README.md +++ b/plugins/inputs/nginx_vts/README.md @@ -1,18 +1,32 @@ # Nginx Virtual Host Traffic (VTS) Input Plugin -This plugin gathers Nginx status using external virtual host traffic status module - https://github.com/vozlt/nginx-module-vts. This is an Nginx module that provides access to virtual host status information. It contains the current status such as servers, upstreams, caches. This is similar to the live activity monitoring of Nginx plus. -For module configuration details please see its [documentation](https://github.com/vozlt/nginx-module-vts#synopsis). +This plugin gathers Nginx status using external virtual host traffic status +module - . This is an Nginx module +that provides access to virtual host status information. It contains the current +status such as servers, upstreams, caches. This is similar to the live activity +monitoring of Nginx plus. For module configuration details please see its +[documentation](https://github.com/vozlt/nginx-module-vts#synopsis). -### Configuration: +## Configuration -```toml -# Read nginx status information using nginx-module-vts module +```toml @sample.conf +# Read Nginx virtual host traffic status module information (nginx-module-vts) [[inputs.nginx_vts]] - ## An array of Nginx status URIs to gather stats. + ## An array of ngx_http_status_module or status URI to gather stats. urls = ["http://localhost/status"] + + ## HTTP response timeout (default: 5s) + response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ``` -### Measurements & Fields: +## Metrics - nginx_vts_connections - active @@ -70,8 +84,7 @@ For module configuration details please see its [documentation](https://github.c - hit - scarce - -### Tags: +### Tags - nginx_vts_connections - source @@ -95,10 +108,10 @@ For module configuration details please see its [documentation](https://github.c - port - zone - -### Example Output: +## Example Output Using this configuration: + ```toml [[inputs.nginx_vts]] ## An array of Nginx status URIs to gather stats. @@ -106,12 +119,14 @@ Using this configuration: ``` When run with: + ```sh ./telegraf -config telegraf.conf -input-filter nginx_vts -test ``` It produces: -``` + +```shell nginx_vts_connections,source=localhost,port=80,host=localhost waiting=30i,accepted=295333i,handled=295333i,requests=6833487i,active=33i,reading=0i,writing=3i 1518341521000000000 nginx_vts_server,zone=example.com,port=80,host=localhost,source=localhost cache_hit=158915i,in_bytes=1935528964i,out_bytes=6531366419i,response_2xx_count=809994i,response_4xx_count=16664i,cache_bypass=0i,cache_stale=0i,cache_revalidated=0i,requests=2187977i,response_1xx_count=0i,response_3xx_count=1360390i,cache_miss=2249i,cache_updating=0i,cache_scarce=0i,request_time=13i,response_5xx_count=929i,cache_expired=0i 1518341521000000000 nginx_vts_server,host=localhost,source=localhost,port=80,zone=* requests=6775284i,in_bytes=5003242389i,out_bytes=36858233827i,cache_expired=318881i,cache_updating=0i,request_time=51i,response_1xx_count=0i,response_2xx_count=4385916i,response_4xx_count=83680i,response_5xx_count=1186i,cache_bypass=0i,cache_revalidated=0i,cache_hit=1972222i,cache_scarce=0i,response_3xx_count=2304502i,cache_miss=408251i,cache_stale=0i 1518341521000000000 diff --git a/plugins/inputs/nginx_vts/nginx_vts.go b/plugins/inputs/nginx_vts/nginx_vts.go index 57453c0b4e3b0..62fe23b2bef98 100644 --- a/plugins/inputs/nginx_vts/nginx_vts.go +++ b/plugins/inputs/nginx_vts/nginx_vts.go @@ -1,7 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package nginx_vts import ( "bufio" + _ "embed" "encoding/json" "fmt" "net" @@ -12,42 +14,27 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type NginxVTS struct { - Urls []string `toml:"urls"` - ResponseTimeout internal.Duration `toml:"response_timeout"` + Urls []string `toml:"urls"` + ResponseTimeout config.Duration `toml:"response_timeout"` tls.ClientConfig client *http.Client } -var sampleConfig = ` - ## An array of ngx_http_status_module or status URI to gather stats. - urls = ["http://localhost/status"] - - ## HTTP response timeout (default: 5s) - response_timeout = "5s" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - -func (n *NginxVTS) SampleConfig() string { +func (*NginxVTS) SampleConfig() string { return sampleConfig } -func (n *NginxVTS) Description() string { - return "Read Nginx virtual host traffic status module information (nginx-module-vts)" -} - func (n *NginxVTS) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup @@ -81,8 +68,8 @@ func (n *NginxVTS) Gather(acc telegraf.Accumulator) error { } func (n *NginxVTS) createHTTPClient() (*http.Client, error) { - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = time.Second * 5 + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(time.Second * 5) } tlsConfig, err := n.ClientConfig.TLSConfig() @@ -94,7 +81,7 @@ func (n *NginxVTS) createHTTPClient() (*http.Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } return client, nil diff --git a/plugins/inputs/nginx_vts/nginx_vts_test.go b/plugins/inputs/nginx_vts/nginx_vts_test.go index 085fc38433dff..589bc634f9358 100644 --- a/plugins/inputs/nginx_vts/nginx_vts_test.go +++ b/plugins/inputs/nginx_vts/nginx_vts_test.go @@ -203,14 +203,13 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string - if r.URL.Path == "/status" { - rsp = sampleStatusResponse - w.Header()["Content-Type"] = []string{"application/json"} - } else { - panic("Cannot handle request") - } + require.Equal(t, r.URL.Path, "/status", "Cannot handle request") - fmt.Fprintln(w, rsp) + rsp = sampleStatusResponse + w.Header()["Content-Type"] = []string{"application/json"} + + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() @@ -221,13 +220,10 @@ func TestNginxPlusGeneratesMetrics(t *testing.T) { var acc testutil.Accumulator err := n.Gather(&acc) - require.NoError(t, err) addr, err := url.Parse(ts.URL) - if err != nil { - panic(err) - } + require.NoError(t, err) host, port, err := net.SplitHostPort(addr.Host) if err != nil { diff --git a/plugins/inputs/nginx_vts/sample.conf b/plugins/inputs/nginx_vts/sample.conf new file mode 100644 index 0000000000000..903df8f4e0697 --- /dev/null +++ b/plugins/inputs/nginx_vts/sample.conf @@ -0,0 +1,14 @@ +# Read Nginx virtual host traffic status module information (nginx-module-vts) +[[inputs.nginx_vts]] + ## An array of ngx_http_status_module or status URI to gather stats. + urls = ["http://localhost/status"] + + ## HTTP response timeout (default: 5s) + response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/nomad/README.md b/plugins/inputs/nomad/README.md new file mode 100644 index 0000000000000..1a3f47b69ba4d --- /dev/null +++ b/plugins/inputs/nomad/README.md @@ -0,0 +1,32 @@ +# Hashicorp Nomad Input Plugin + +The Nomad plugin must grab metrics from every Nomad agent of the +cluster. Telegraf may be present in every node and connect to the agent +locally. In this case should be something like `http://127.0.0.1:4646`. + +> Tested on Nomad 1.1.6 + +## Configuration + +```toml @sample.conf +# Read metrics from the Nomad API +[[inputs.nomad]] + ## URL for the Nomad agent + # url = "http://127.0.0.1:4646" + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile +``` + +## Metrics + +Both Nomad servers and agents collect various metrics. For every details, please +have a look at Nomad following documentation: + +- [https://www.nomadproject.io/docs/operations/metrics](https://www.nomadproject.io/docs/operations/metrics) +- [https://www.nomadproject.io/docs/operations/telemetry](https://www.nomadproject.io/docs/operations/telemetry) diff --git a/plugins/inputs/nomad/nomad.go b/plugins/inputs/nomad/nomad.go new file mode 100644 index 0000000000000..8e8b446c0584b --- /dev/null +++ b/plugins/inputs/nomad/nomad.go @@ -0,0 +1,164 @@ +//go:generate ../../../tools/readme_config_includer/generator +package nomad + +import ( + _ "embed" + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +// Nomad configuration object +type Nomad struct { + URL string `toml:"url"` + + ResponseTimeout config.Duration `toml:"response_timeout"` + + tls.ClientConfig + + roundTripper http.RoundTripper +} + +const timeLayout = "2006-01-02 15:04:05 -0700 MST" + +func init() { + inputs.Add("nomad", func() telegraf.Input { + return &Nomad{ + ResponseTimeout: config.Duration(5 * time.Second), + } + }) +} + +func (*Nomad) SampleConfig() string { + return sampleConfig +} + +func (n *Nomad) Init() error { + if n.URL == "" { + n.URL = "http://127.0.0.1:4646" + } + + tlsCfg, err := n.ClientConfig.TLSConfig() + if err != nil { + return fmt.Errorf("setting up TLS configuration failed: %v", err) + } + + n.roundTripper = &http.Transport{ + TLSHandshakeTimeout: 5 * time.Second, + TLSClientConfig: tlsCfg, + ResponseHeaderTimeout: time.Duration(n.ResponseTimeout), + } + + return nil +} + +// Gather, collects metrics from Nomad endpoint +func (n *Nomad) Gather(acc telegraf.Accumulator) error { + summaryMetrics := &MetricsSummary{} + err := n.loadJSON(n.URL+"/v1/metrics", summaryMetrics) + if err != nil { + return err + } + + err = buildNomadMetrics(acc, summaryMetrics) + if err != nil { + return err + } + + return nil +} + +func (n *Nomad) loadJSON(url string, v interface{}) error { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return err + } + + resp, err := n.roundTripper.RoundTrip(req) + if err != nil { + return fmt.Errorf("error making HTTP request to %s: %s", url, err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status %s", url, resp.Status) + } + + err = json.NewDecoder(resp.Body).Decode(v) + if err != nil { + return fmt.Errorf("error parsing json response: %s", err) + } + + return nil +} + +// buildNomadMetrics, it builds all the metrics and adds them to the accumulator) +func buildNomadMetrics(acc telegraf.Accumulator, summaryMetrics *MetricsSummary) error { + t, err := time.Parse(timeLayout, summaryMetrics.Timestamp) + if err != nil { + return fmt.Errorf("error parsing time: %s", err) + } + + for _, counters := range summaryMetrics.Counters { + tags := counters.DisplayLabels + + fields := map[string]interface{}{ + "count": counters.Count, + "rate": counters.Rate, + "sum": counters.Sum, + "sumsq": counters.SumSq, + "min": counters.Min, + "max": counters.Max, + "mean": counters.Mean, + } + acc.AddCounter(counters.Name, fields, tags, t) + } + + for _, gauges := range summaryMetrics.Gauges { + tags := gauges.DisplayLabels + + fields := map[string]interface{}{ + "value": gauges.Value, + } + + acc.AddGauge(gauges.Name, fields, tags, t) + } + + for _, points := range summaryMetrics.Points { + tags := make(map[string]string) + + fields := map[string]interface{}{ + "value": points.Points, + } + + acc.AddFields(points.Name, fields, tags, t) + } + + for _, samples := range summaryMetrics.Samples { + tags := samples.DisplayLabels + + fields := map[string]interface{}{ + "count": samples.Count, + "rate": samples.Rate, + "sum": samples.Sum, + "stddev": samples.Stddev, + "sumsq": samples.SumSq, + "min": samples.Min, + "max": samples.Max, + "mean": samples.Mean, + } + acc.AddCounter(samples.Name, fields, tags, t) + } + + return nil +} diff --git a/plugins/inputs/nomad/nomad_metrics.go b/plugins/inputs/nomad/nomad_metrics.go new file mode 100644 index 0000000000000..72445df0b83ae --- /dev/null +++ b/plugins/inputs/nomad/nomad_metrics.go @@ -0,0 +1,53 @@ +package nomad + +import ( + "time" +) + +type MetricsSummary struct { + Timestamp string `json:"timestamp"` + Gauges []GaugeValue `json:"gauges"` + Points []PointValue `json:"points"` + Counters []SampledValue `json:"counters"` + Samples []SampledValue `json:"samples"` +} + +type GaugeValue struct { + Name string `json:"name"` + Hash string `json:"-"` + Value float32 `json:"value"` + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +type PointValue struct { + Name string `json:"name"` + Points []float32 `json:"points"` +} + +type SampledValue struct { + Name string `json:"name"` + Hash string `json:"-"` + *AggregateSample + Mean float64 `json:"mean"` + Stddev float64 `json:"stddev"` + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +type AggregateSample struct { + Count int `json:"count"` + Rate float64 `json:"rate"` + Sum float64 `json:"sum"` + SumSq float64 `json:"-"` + Min float64 `json:"min"` + Max float64 `json:"max"` + LastUpdated time.Time `json:"-"` +} + +type Label struct { + Name string `json:"name"` + Value string `json:"value"` +} diff --git a/plugins/inputs/nomad/nomad_test.go b/plugins/inputs/nomad/nomad_test.go new file mode 100644 index 0000000000000..49e39e3b7f531 --- /dev/null +++ b/plugins/inputs/nomad/nomad_test.go @@ -0,0 +1,104 @@ +package nomad + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestNomadStats(t *testing.T) { + var applyTests = []struct { + name string + expected []telegraf.Metric + }{ + { + name: "Metrics", + expected: []telegraf.Metric{ + testutil.MustMetric( + "nomad.nomad.rpc.query", + map[string]string{ + "host": "node1", + }, + map[string]interface{}{ + "count": int(7), + "max": float64(1), + "min": float64(1), + "mean": float64(1), + "rate": float64(0.7), + "sum": float64(7), + "sumsq": float64(0), + }, + time.Unix(1636843140, 0), + 1, + ), + testutil.MustMetric( + "nomad.client.allocated.cpu", + map[string]string{ + "node_scheduling_eligibility": "eligible", + "host": "node1", + "node_id": "2bbff078-8473-a9de-6c5e-42b4e053e12f", + "datacenter": "dc1", + "node_class": "none", + "node_status": "ready", + }, + map[string]interface{}{ + "value": float32(500), + }, + time.Unix(1636843140, 0), + 2, + ), + testutil.MustMetric( + "nomad.memberlist.gossip", + map[string]string{ + "host": "node1", + }, + map[string]interface{}{ + "count": int(20), + "max": float64(0.03747599944472313), + "mean": float64(0.013159099989570678), + "min": float64(0.003459000028669834), + "rate": float64(0.026318199979141355), + "stddev": float64(0.009523742715522742), + "sum": float64(0.26318199979141355), + "sumsq": float64(0), + }, + time.Unix(1636843140, 0), + 1, + ), + }, + }, + } + + for _, tt := range applyTests { + t.Run(tt.name, func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.RequestURI == "/v1/metrics" { + w.WriteHeader(http.StatusOK) + responseKeyMetrics, _ := ioutil.ReadFile("testdata/response_key_metrics.json") + _, err := fmt.Fprintln(w, string(responseKeyMetrics)) + require.NoError(t, err) + } + })) + defer ts.Close() + + plugin := &Nomad{ + URL: ts.URL, + } + err := plugin.Init() + require.NoError(t, err) + + acc := testutil.Accumulator{} + err = plugin.Gather(&acc) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics()) + }) + } +} diff --git a/plugins/inputs/nomad/sample.conf b/plugins/inputs/nomad/sample.conf new file mode 100644 index 0000000000000..2cb90a8d02608 --- /dev/null +++ b/plugins/inputs/nomad/sample.conf @@ -0,0 +1,12 @@ +# Read metrics from the Nomad API +[[inputs.nomad]] + ## URL for the Nomad agent + # url = "http://127.0.0.1:4646" + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile diff --git a/plugins/inputs/nomad/testdata/response_key_metrics.json b/plugins/inputs/nomad/testdata/response_key_metrics.json new file mode 100644 index 0000000000000..4e9879bdd7afa --- /dev/null +++ b/plugins/inputs/nomad/testdata/response_key_metrics.json @@ -0,0 +1,48 @@ +{ + "Counters": [ + { + "Count": 7, + "Labels": { + "host": "node1" + }, + "Max": 1, + "Mean": 1, + "Min": 1, + "Name": "nomad.nomad.rpc.query", + "Rate": 0.7, + "Stddev": 0, + "Sum": 7 + } + ], + "Gauges": [ + { + "Labels": { + "node_scheduling_eligibility": "eligible", + "host": "node1", + "node_id": "2bbff078-8473-a9de-6c5e-42b4e053e12f", + "datacenter": "dc1", + "node_class": "none", + "node_status": "ready" + }, + "Name": "nomad.client.allocated.cpu", + "Value": 500 + } + ], + "Points": [], + "Samples": [ + { + "Count": 20, + "Labels": { + "host": "node1" + }, + "Max": 0.03747599944472313, + "Mean": 0.013159099989570678, + "Min": 0.003459000028669834, + "Name": "nomad.memberlist.gossip", + "Rate": 0.026318199979141355, + "Stddev": 0.009523742715522742, + "Sum": 0.26318199979141355 + } + ], + "Timestamp": "2021-11-13 22:39:00 +0000 UTC" +} diff --git a/plugins/inputs/nsd/README.md b/plugins/inputs/nsd/README.md index 2d7f8833c2db8..f2364e3217661 100644 --- a/plugins/inputs/nsd/README.md +++ b/plugins/inputs/nsd/README.md @@ -4,9 +4,9 @@ This plugin gathers stats from [NSD](https://www.nlnetlabs.nl/projects/nsd/about) - an authoritative DNS name server. -### Configuration: +## Configuration -```toml +```toml @sample.conf # A plugin to collect stats from the NSD DNS resolver [[inputs.nsd]] ## Address of server to connect to, optionally ':port'. Defaults to the @@ -26,7 +26,7 @@ server. # timeout = "1s" ``` -#### Permissions: +### Permissions It's important to note that this plugin references nsd-control, which may require additional permissions to execute successfully. Depending on the @@ -34,6 +34,7 @@ user/group permissions of the telegraf user executing this plugin, you may need to alter the group membership, set facls, or use sudo. **Group membership (Recommended)**: + ```bash $ groups telegraf telegraf : telegraf @@ -46,12 +47,14 @@ telegraf : telegraf nsd **Sudo privileges**: If you use this method, you will need the following in your telegraf config: + ```toml [[inputs.nsd]] use_sudo = true ``` You will also need to update your sudoers file: + ```bash $ visudo # Add the following line: @@ -62,11 +65,11 @@ Defaults!NSDCONTROLCTL !logfile, !syslog, !pam_session Please use the solution you see as most appropriate. -### Metrics: +## Metrics This is the full list of stats provided by nsd-control. In the output, the dots in the nsd-control stat name are replaced by underscores (see -https://www.nlnetlabs.nl/documentation/nsd/nsd-control/ for details). + for details). - nsd - fields: diff --git a/plugins/inputs/nsd/nsd.go b/plugins/inputs/nsd/nsd.go index 3c5d2695dcb33..3f8832ae3a83c 100644 --- a/plugins/inputs/nsd/nsd.go +++ b/plugins/inputs/nsd/nsd.go @@ -1,8 +1,10 @@ +//go:generate ../../../tools/readme_config_includer/generator package nsd import ( "bufio" "bytes" + _ "embed" "fmt" "net" "os/exec" @@ -11,83 +13,58 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) -type runner func(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ConfigFile string) (*bytes.Buffer, error) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +type runner func(cmdName string, timeout config.Duration, useSudo bool, Server string, ConfigFile string) (*bytes.Buffer, error) // NSD is used to store configuration values type NSD struct { Binary string - Timeout internal.Duration + Timeout config.Duration UseSudo bool Server string ConfigFile string - filter filter.Filter - run runner + run runner } var defaultBinary = "/usr/sbin/nsd-control" -var defaultTimeout = internal.Duration{Duration: time.Second} - -var sampleConfig = ` - ## Address of server to connect to, optionally ':port'. Defaults to the - ## address in the nsd config file. - server = "127.0.0.1:8953" - - ## If running as a restricted user you can prepend sudo for additional access: - # use_sudo = false - - ## The default location of the nsd-control binary can be overridden with: - # binary = "/usr/sbin/nsd-control" - - ## The default location of the nsd config file can be overridden with: - # config_file = "/etc/nsd/nsd.conf" - - ## The default timeout of 1s can be overridden with: - # timeout = "1s" -` - -// Description displays what this plugin is about -func (s *NSD) Description() string { - return "A plugin to collect stats from the NSD authoritative DNS name server" -} - -// SampleConfig displays configuration instructions -func (s *NSD) SampleConfig() string { - return sampleConfig -} +var defaultTimeout = config.Duration(time.Second) // Shell out to nsd_stat and return the output -func nsdRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ConfigFile string) (*bytes.Buffer, error) { +func nsdRunner(cmdName string, timeout config.Duration, useSudo bool, server string, configFile string) (*bytes.Buffer, error) { cmdArgs := []string{"stats_noreset"} - if Server != "" { - host, port, err := net.SplitHostPort(Server) + if server != "" { + host, port, err := net.SplitHostPort(server) if err == nil { - Server = host + "@" + port + server = host + "@" + port } - cmdArgs = append([]string{"-s", Server}, cmdArgs...) + cmdArgs = append([]string{"-s", server}, cmdArgs...) } - if ConfigFile != "" { - cmdArgs = append([]string{"-c", ConfigFile}, cmdArgs...) + if configFile != "" { + cmdArgs = append([]string{"-c", configFile}, cmdArgs...) } cmd := exec.Command(cmdName, cmdArgs...) - if UseSudo { + if useSudo { cmdArgs = append([]string{cmdName}, cmdArgs...) cmd = exec.Command("sudo", cmdArgs...) } var out bytes.Buffer cmd.Stdout = &out - err := internal.RunTimeout(cmd, Timeout.Duration) + err := internal.RunTimeout(cmd, time.Duration(timeout)) if err != nil { return &out, fmt.Errorf("error running nsd-control: %s (%s %v)", err, cmdName, cmdArgs) } @@ -95,6 +72,10 @@ func nsdRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Server s return &out, nil } +func (*NSD) SampleConfig() string { + return sampleConfig +} + // Gather collects stats from nsd-control and adds them to the Accumulator func (s *NSD) Gather(acc telegraf.Accumulator) error { out, err := s.run(s.Binary, s.Timeout, s.UseSudo, s.Server, s.ConfigFile) @@ -120,7 +101,7 @@ func (s *NSD) Gather(acc telegraf.Accumulator) error { fieldValue, err := strconv.ParseFloat(value, 64) if err != nil { - acc.AddError(fmt.Errorf("Expected a numerical value for %s = %v", + acc.AddError(fmt.Errorf("expected a numerical value for %s = %v", stat, value)) continue } @@ -128,25 +109,25 @@ func (s *NSD) Gather(acc telegraf.Accumulator) error { if strings.HasPrefix(stat, "server") { statTokens := strings.Split(stat, ".") if len(statTokens) > 1 { - serverId := strings.TrimPrefix(statTokens[0], "server") - if _, err := strconv.Atoi(serverId); err == nil { + serverID := strings.TrimPrefix(statTokens[0], "server") + if _, err := strconv.Atoi(serverID); err == nil { serverTokens := statTokens[1:] field := strings.Join(serverTokens[:], "_") - if fieldsServers[serverId] == nil { - fieldsServers[serverId] = make(map[string]interface{}) + if fieldsServers[serverID] == nil { + fieldsServers[serverID] = make(map[string]interface{}) } - fieldsServers[serverId][field] = fieldValue + fieldsServers[serverID][field] = fieldValue } } } else { - field := strings.Replace(stat, ".", "_", -1) + field := strings.ReplaceAll(stat, ".", "_") fields[field] = fieldValue } } acc.AddFields("nsd", fields, nil) - for thisServerId, thisServerFields := range fieldsServers { - thisServerTag := map[string]string{"server": thisServerId} + for thisServerID, thisServerFields := range fieldsServers { + thisServerTag := map[string]string{"server": thisServerID} acc.AddFields("nsd_servers", thisServerFields, thisServerTag) } diff --git a/plugins/inputs/nsd/nsd_test.go b/plugins/inputs/nsd/nsd_test.go index ee527f7b7f0b2..74f4a14cf96fa 100644 --- a/plugins/inputs/nsd/nsd_test.go +++ b/plugins/inputs/nsd/nsd_test.go @@ -3,17 +3,15 @@ package nsd import ( "bytes" "testing" - "time" - "github.com/influxdata/telegraf/internal" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) -var TestTimeout = internal.Duration{Duration: time.Second} - -func NSDControl(output string, Timeout internal.Duration, useSudo bool, Server string, ConfigFile string) func(string, internal.Duration, bool, string, string) (*bytes.Buffer, error) { - return func(string, internal.Duration, bool, string, string) (*bytes.Buffer, error) { +func NSDControl(output string) func(string, config.Duration, bool, string, string) (*bytes.Buffer, error) { + return func(string, config.Duration, bool, string, string) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } } @@ -21,21 +19,20 @@ func NSDControl(output string, Timeout internal.Duration, useSudo bool, Server s func TestParseFullOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &NSD{ - run: NSDControl(fullOutput, TestTimeout, true, "", ""), + run: NSDControl(fullOutput), } err := v.Gather(acc) - assert.NoError(t, err) + require.NoError(t, err) - assert.True(t, acc.HasMeasurement("nsd")) - assert.True(t, acc.HasMeasurement("nsd_servers")) + require.True(t, acc.HasMeasurement("nsd")) + require.True(t, acc.HasMeasurement("nsd_servers")) - assert.Len(t, acc.Metrics, 2) - assert.Equal(t, 99, acc.NFields()) + require.Len(t, acc.Metrics, 2) + require.Equal(t, 99, acc.NFields()) acc.AssertContainsFields(t, "nsd", parsedFullOutput) acc.AssertContainsFields(t, "nsd_servers", parsedFullOutputServerAsTag) - } var parsedFullOutputServerAsTag = map[string]interface{}{ diff --git a/plugins/inputs/nsd/sample.conf b/plugins/inputs/nsd/sample.conf new file mode 100644 index 0000000000000..7ba94e893657c --- /dev/null +++ b/plugins/inputs/nsd/sample.conf @@ -0,0 +1,17 @@ +# A plugin to collect stats from the NSD DNS resolver +[[inputs.nsd]] + ## Address of server to connect to, optionally ':port'. Defaults to the + ## address in the nsd config file. + server = "127.0.0.1:8953" + + ## If running as a restricted user you can prepend sudo for additional access: + # use_sudo = false + + ## The default location of the nsd-control binary can be overridden with: + # binary = "/usr/sbin/nsd-control" + + ## The default location of the nsd config file can be overridden with: + # config_file = "/etc/nsd/nsd.conf" + + ## The default timeout of 1s can be overridden with: + # timeout = "1s" diff --git a/plugins/inputs/nsq/README.md b/plugins/inputs/nsq/README.md index 00c1089afe309..631e23c7659d2 100644 --- a/plugins/inputs/nsq/README.md +++ b/plugins/inputs/nsq/README.md @@ -1,9 +1,14 @@ # NSQ Input Plugin -### Configuration: +This plugin gathers metrics from [NSQ](https://nsq.io/). -```toml -# Description +See the [NSQD API docs](https://nsq.io/components/nsqd.html) for endpoints that +the plugin can read. + +## Configuration + +```toml @sample.conf +# Read NSQ topic and channel statistics. [[inputs.nsq]] ## An array of NSQD HTTP API endpoints endpoints = ["http://localhost:4151"] diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go index fe941982646b1..f14cb4bd2cccc 100644 --- a/plugins/inputs/nsq/nsq.go +++ b/plugins/inputs/nsq/nsq.go @@ -1,3 +1,4 @@ +//go:generate ../../../tools/readme_config_includer/generator // The MIT License (MIT) // // Copyright (c) 2015 Jeff Nickoloff (jeff@allingeek.com) @@ -23,9 +24,10 @@ package nsq import ( + _ "embed" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strconv" @@ -37,6 +39,10 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // Might add Lookupd endpoints for cluster discovery type NSQ struct { Endpoints []string @@ -44,18 +50,6 @@ type NSQ struct { httpClient *http.Client } -var sampleConfig = ` - ## An array of NSQD HTTP API endpoints - endpoints = ["http://localhost:4151"] - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - const ( requestPattern = `%s/stats?format=json` ) @@ -70,19 +64,15 @@ func New() *NSQ { return &NSQ{} } -func (n *NSQ) SampleConfig() string { +func (*NSQ) SampleConfig() string { return sampleConfig } -func (n *NSQ) Description() string { - return "Read NSQ topic and channel statistics." -} - func (n *NSQ) Gather(acc telegraf.Accumulator) error { var err error if n.httpClient == nil { - n.httpClient, err = n.getHttpClient() + n.httpClient, err = n.getHTTPClient() if err != nil { return err } @@ -101,7 +91,7 @@ func (n *NSQ) Gather(acc telegraf.Accumulator) error { return nil } -func (n *NSQ) getHttpClient() (*http.Client, error) { +func (n *NSQ) getHTTPClient() (*http.Client, error) { tlsConfig, err := n.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -111,7 +101,7 @@ func (n *NSQ) getHttpClient() (*http.Client, error) { } httpClient := &http.Client{ Transport: tr, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, } return httpClient, nil } @@ -123,7 +113,7 @@ func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error { } r, err := n.httpClient.Get(u.String()) if err != nil { - return fmt.Errorf("Error while polling %s: %s", u.String(), err) + return fmt.Errorf("error while polling %s: %s", u.String(), err) } defer r.Body.Close() @@ -131,22 +121,22 @@ func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error { return fmt.Errorf("%s returned HTTP status %s", u.String(), r.Status) } - body, err := ioutil.ReadAll(r.Body) + body, err := io.ReadAll(r.Body) if err != nil { - return fmt.Errorf(`Error reading body: %s`, err) + return fmt.Errorf(`error reading body: %s`, err) } data := &NSQStatsData{} err = json.Unmarshal(body, data) if err != nil { - return fmt.Errorf(`Error parsing response: %s`, err) + return fmt.Errorf(`error parsing response: %s`, err) } // Data was not parsed correctly attempt to use old format. if len(data.Version) < 1 { wrapper := &NSQStats{} err = json.Unmarshal(body, wrapper) if err != nil { - return fmt.Errorf(`Error parsing response: %s`, err) + return fmt.Errorf(`error parsing response: %s`, err) } data = &wrapper.Data } @@ -176,7 +166,7 @@ func buildURL(e string) (*url.URL, error) { u := fmt.Sprintf(requestPattern, e) addr, err := url.Parse(u) if err != nil { - return nil, fmt.Errorf("Unable to parse address '%s': %s", u, err) + return nil, fmt.Errorf("unable to parse address '%s': %s", u, err) } return addr, nil } diff --git a/plugins/inputs/nsq/nsq_test.go b/plugins/inputs/nsq/nsq_test.go index 23af13a4c82bc..03ebeaed65382 100644 --- a/plugins/inputs/nsq/nsq_test.go +++ b/plugins/inputs/nsq/nsq_test.go @@ -15,7 +15,8 @@ import ( func TestNSQStatsV1(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, responseV1) + _, err := fmt.Fprintln(w, responseV1) + require.NoError(t, err) })) defer ts.Close() @@ -271,7 +272,8 @@ var responseV1 = ` func TestNSQStatsPreV1(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, responsePreV1) + _, err := fmt.Fprintln(w, responsePreV1) + require.NoError(t, err) })) defer ts.Close() diff --git a/plugins/inputs/nsq/sample.conf b/plugins/inputs/nsq/sample.conf new file mode 100644 index 0000000000000..f06662a6b4032 --- /dev/null +++ b/plugins/inputs/nsq/sample.conf @@ -0,0 +1,11 @@ +# Read NSQ topic and channel statistics. +[[inputs.nsq]] + ## An array of NSQD HTTP API endpoints + endpoints = ["http://localhost:4151"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/nsq_consumer/README.md b/plugins/inputs/nsq_consumer/README.md index d1e7194bbd7e0..d2fc086ed26c1 100644 --- a/plugins/inputs/nsq_consumer/README.md +++ b/plugins/inputs/nsq_consumer/README.md @@ -3,9 +3,9 @@ The [NSQ][nsq] consumer plugin reads from NSQD and creates metrics using one of the supported [input data formats][]. -### Configuration: +## Configuration -```toml +```toml @sample.conf # Read metrics from NSQD topic(s) [[inputs.nsq_consumer]] ## Server option still works but is deprecated, we just prepend it to the nsqd array. diff --git a/plugins/inputs/nsq_consumer/nsq_consumer.go b/plugins/inputs/nsq_consumer/nsq_consumer.go index 2c25cce7d8114..5967e7a219661 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer.go @@ -1,15 +1,23 @@ +//go:generate ../../../tools/readme_config_includer/generator package nsq_consumer import ( "context" + _ "embed" + "fmt" "sync" + nsq "github.com/nsqio/go-nsq" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" - nsq "github.com/nsqio/go-nsq" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const ( defaultMaxUndeliveredMessages = 1000 ) @@ -21,14 +29,14 @@ type logger struct { log telegraf.Logger } -func (l *logger) Output(calldepth int, s string) error { +func (l *logger) Output(_ int, s string) error { l.log.Debug(s) return nil } //NSQConsumer represents the configuration of the plugin type NSQConsumer struct { - Server string `toml:"server"` + Server string `toml:"server" deprecated:"1.5.0;use 'nsqd' instead"` Nsqd []string `toml:"nsqd"` Nsqlookupd []string `toml:"nsqlookupd"` Topic string `toml:"topic"` @@ -48,51 +56,15 @@ type NSQConsumer struct { cancel context.CancelFunc } -var sampleConfig = ` - ## Server option still works but is deprecated, we just prepend it to the nsqd array. - # server = "localhost:4150" - - ## An array representing the NSQD TCP HTTP Endpoints - nsqd = ["localhost:4150"] - - ## An array representing the NSQLookupd HTTP Endpoints - nsqlookupd = ["localhost:4161"] - topic = "telegraf" - channel = "consumer" - max_in_flight = 100 - - ## Maximum messages to read from the broker that have not been written by an - ## output. For best throughput set based on the number of metrics within - ## each message and the size of the output's metric_batch_size. - ## - ## For example, if each message from the queue contains 10 metrics and the - ## output metric_batch_size is 1000, setting this to 100 will ensure that a - ## full batch is collected and the write is triggered immediately without - ## waiting until the next flush_interval. - # max_undelivered_messages = 1000 - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" -` +func (*NSQConsumer) SampleConfig() string { + return sampleConfig +} // SetParser takes the data_format from the config and finds the right parser for that format func (n *NSQConsumer) SetParser(parser parsers.Parser) { n.parser = parser } -// SampleConfig returns config values for generating a sample configuration file -func (n *NSQConsumer) SampleConfig() string { - return sampleConfig -} - -// Description prints description string -func (n *NSQConsumer) Description() string { - return "Read NSQ topic for metrics." -} - // Start pulls data from nsq func (n *NSQConsumer) Start(ac telegraf.Accumulator) error { acc := ac.WithTracking(n.MaxUndeliveredMessages) @@ -102,7 +74,9 @@ func (n *NSQConsumer) Start(ac telegraf.Accumulator) error { ctx, cancel := context.WithCancel(context.Background()) n.cancel = cancel - n.connect() + if err := n.connect(); err != nil { + return err + } n.consumer.SetLogger(&logger{log: n.Log}, nsq.LogLevelInfo) n.consumer.AddHandler(nsq.HandlerFunc(func(message *nsq.Message) error { metrics, err := n.parser.Parse(message.Body) @@ -132,10 +106,29 @@ func (n *NSQConsumer) Start(ac telegraf.Accumulator) error { return nil })) + // For backward compatibility + if n.Server != "" { + n.Nsqd = append(n.Nsqd, n.Server) + } + + // Check if we have anything to connect to + if len(n.Nsqlookupd) == 0 && len(n.Nsqd) == 0 { + return fmt.Errorf("either 'nsqd' or 'nsqlookupd' needs to be specified") + } + if len(n.Nsqlookupd) > 0 { - n.consumer.ConnectToNSQLookupds(n.Nsqlookupd) + err := n.consumer.ConnectToNSQLookupds(n.Nsqlookupd) + if err != nil && err != nsq.ErrAlreadyConnected { + return err + } + } + + if len(n.Nsqd) > 0 { + err := n.consumer.ConnectToNSQDs(n.Nsqd) + if err != nil && err != nsq.ErrAlreadyConnected { + return err + } } - n.consumer.ConnectToNSQDs(append(n.Nsqd, n.Server)) n.wg.Add(1) go func() { @@ -179,7 +172,7 @@ func (n *NSQConsumer) Stop() { } // Gather is a noop -func (n *NSQConsumer) Gather(acc telegraf.Accumulator) error { +func (n *NSQConsumer) Gather(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/nsq_consumer/nsq_consumer_test.go b/plugins/inputs/nsq_consumer/nsq_consumer_test.go index e07b125ccdb8f..4c6d944746440 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer_test.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer_test.go @@ -11,10 +11,11 @@ import ( "testing" "time" + "github.com/nsqio/go-nsq" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "github.com/nsqio/go-nsq" - "github.com/stretchr/testify/assert" ) // This test is modeled after the kafka consumer integration test @@ -22,18 +23,21 @@ func TestReadsMetricsFromNSQ(t *testing.T) { msgID := nsq.MessageID{'1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'} msg := nsq.NewMessage(msgID, []byte("cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257\n")) + frameMsg, err := frameMessage(msg) + require.NoError(t, err) + script := []instruction{ // SUB {0, nsq.FrameTypeResponse, []byte("OK")}, // IDENTIFY {0, nsq.FrameTypeResponse, []byte("OK")}, - {20 * time.Millisecond, nsq.FrameTypeMessage, frameMessage(msg)}, + {20 * time.Millisecond, nsq.FrameTypeMessage, frameMsg}, // needed to exit test {100 * time.Millisecond, -1, []byte("exit")}, } addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:4155") - newMockNSQD(script, addr.String()) + newMockNSQD(t, script, addr.String()) consumer := &NSQConsumer{ Log: testutil.Logger{}, @@ -48,27 +52,22 @@ func TestReadsMetricsFromNSQ(t *testing.T) { p, _ := parsers.NewInfluxParser() consumer.SetParser(p) var acc testutil.Accumulator - assert.Equal(t, 0, len(acc.Metrics), "There should not be any points") - if err := consumer.Start(&acc); err != nil { - t.Fatal(err.Error()) - } + require.Len(t, acc.Metrics, 0, "There should not be any points") + require.NoError(t, consumer.Start(&acc)) waitForPoint(&acc, t) - if len(acc.Metrics) == 1 { - point := acc.Metrics[0] - assert.Equal(t, "cpu_load_short", point.Measurement) - assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields) - assert.Equal(t, map[string]string{ - "host": "server01", - "direction": "in", - "region": "us-west", - }, point.Tags) - assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix()) - } else { - t.Errorf("No points found in accumulator, expected 1") - } - + require.Len(t, acc.Metrics, 1, "No points found in accumulator, expected 1") + + point := acc.Metrics[0] + require.Equal(t, "cpu_load_short", point.Measurement) + require.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields) + require.Equal(t, map[string]string{ + "host": "server01", + "direction": "in", + "region": "us-west", + }, point.Tags) + require.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix()) } // Waits for the metric that was sent to the kafka broker to arrive at the kafka @@ -78,6 +77,8 @@ func waitForPoint(acc *testutil.Accumulator, t *testing.T) { ticker := time.NewTicker(5 * time.Millisecond) defer ticker.Stop() counter := 0 + + //nolint:gosimple // for-select used on purpose for { select { case <-ticker.C: @@ -91,16 +92,15 @@ func waitForPoint(acc *testutil.Accumulator, t *testing.T) { } } -func newMockNSQD(script []instruction, addr string) *mockNSQD { +func newMockNSQD(t *testing.T, script []instruction, addr string) *mockNSQD { n := &mockNSQD{ script: script, exitChan: make(chan int), } tcpListener, err := net.Listen("tcp", addr) - if err != nil { - log.Fatalf("FATAL: listen (%s) failed - %s", n.tcpAddr.String(), err) - } + require.NoError(t, err, "listen (%s) failed", n.tcpAddr.String()) + n.tcpListener = tcpListener n.tcpAddr = tcpListener.Addr().(*net.TCPAddr) @@ -141,6 +141,7 @@ func (n *mockNSQD) handle(conn net.Conn) { buf := make([]byte, 4) _, err := io.ReadFull(conn, buf) if err != nil { + //nolint:revive // log.Fatalf called intentionally log.Fatalf("ERROR: failed to read protocol version - %s", err) } @@ -173,14 +174,14 @@ func (n *mockNSQD) handle(conn net.Conn) { l := make([]byte, 4) _, err := io.ReadFull(rdr, l) if err != nil { - log.Printf(err.Error()) + log.Print(err.Error()) goto exit } size := int32(binary.BigEndian.Uint32(l)) b := make([]byte, size) _, err = io.ReadFull(rdr, b) if err != nil { - log.Printf(err.Error()) + log.Print(err.Error()) goto exit } case bytes.Equal(params[0], []byte("RDY")): @@ -202,9 +203,14 @@ func (n *mockNSQD) handle(conn net.Conn) { } rdyCount-- } - _, err := conn.Write(framedResponse(inst.frameType, inst.body)) + buf, err := framedResponse(inst.frameType, inst.body) + if err != nil { + log.Print(err.Error()) + goto exit + } + _, err = conn.Write(buf) if err != nil { - log.Printf(err.Error()) + log.Print(err.Error()) goto exit } scriptTime = time.After(n.script[idx+1].delay) @@ -213,11 +219,14 @@ func (n *mockNSQD) handle(conn net.Conn) { } exit: + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive n.tcpListener.Close() + //nolint:errcheck,revive conn.Close() } -func framedResponse(frameType int32, data []byte) []byte { +func framedResponse(frameType int32, data []byte) ([]byte, error) { var w bytes.Buffer beBuf := make([]byte, 4) @@ -226,21 +235,21 @@ func framedResponse(frameType int32, data []byte) []byte { binary.BigEndian.PutUint32(beBuf, size) _, err := w.Write(beBuf) if err != nil { - return nil + return nil, err } binary.BigEndian.PutUint32(beBuf, uint32(frameType)) _, err = w.Write(beBuf) if err != nil { - return nil + return nil, err } - w.Write(data) - return w.Bytes() + _, err = w.Write(data) + return w.Bytes(), err } -func frameMessage(m *nsq.Message) []byte { +func frameMessage(m *nsq.Message) ([]byte, error) { var b bytes.Buffer - m.WriteTo(&b) - return b.Bytes() + _, err := m.WriteTo(&b) + return b.Bytes(), err } diff --git a/plugins/inputs/nsq_consumer/sample.conf b/plugins/inputs/nsq_consumer/sample.conf new file mode 100644 index 0000000000000..99003633ebe37 --- /dev/null +++ b/plugins/inputs/nsq_consumer/sample.conf @@ -0,0 +1,29 @@ +# Read metrics from NSQD topic(s) +[[inputs.nsq_consumer]] + ## Server option still works but is deprecated, we just prepend it to the nsqd array. + # server = "localhost:4150" + + ## An array representing the NSQD TCP HTTP Endpoints + nsqd = ["localhost:4150"] + + ## An array representing the NSQLookupd HTTP Endpoints + nsqlookupd = ["localhost:4161"] + topic = "telegraf" + channel = "consumer" + max_in_flight = 100 + + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" diff --git a/plugins/inputs/nstat/README.md b/plugins/inputs/nstat/README.md index c0ebc2654f5b8..1a8cd229745a9 100644 --- a/plugins/inputs/nstat/README.md +++ b/plugins/inputs/nstat/README.md @@ -1,11 +1,26 @@ # Nstat Input Plugin -Plugin collects network metrics from `/proc/net/netstat`, `/proc/net/snmp` and `/proc/net/snmp6` files +Plugin collects network metrics from `/proc/net/netstat`, `/proc/net/snmp` and +`/proc/net/snmp6` files -### Configuration +## Configuration + +```toml @sample.conf +# Collect kernel snmp counters and network interface statistics +[[inputs.nstat]] + ## file paths for proc files. If empty default paths will be used: + ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 + ## These can also be overridden with env variables, see README. + proc_net_netstat = "/proc/net/netstat" + proc_net_snmp = "/proc/net/snmp" + proc_net_snmp6 = "/proc/net/snmp6" + ## dump metrics with 0 values too + dump_zeros = true +``` + +The plugin firstly tries to read file paths from config values if it is empty, +then it reads from env variables. -The plugin firstly tries to read file paths from config values -if it is empty, then it reads from env variables. * `PROC_NET_NETSTAT` * `PROC_NET_SNMP` * `PROC_NET_SNMP6` @@ -15,331 +30,324 @@ then it tries to read the proc root from env - `PROC_ROOT`, and sets `/proc` as a root path if `PROC_ROOT` is also empty. Then appends default file paths: + * `/net/netstat` * `/net/snmp` * `/net/snmp6` -So if nothing is given, no paths in config and in env vars, the plugin takes the default paths. +So if nothing is given, no paths in config and in env vars, the plugin takes the +default paths. + * `/proc/net/netstat` * `/proc/net/snmp` * `/proc/net/snmp6` -The sample config file -```toml -[[inputs.nstat]] - ## file paths - ## e.g: /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 - # proc_net_netstat = "" - # proc_net_snmp = "" - # proc_net_snmp6 = "" - ## dump metrics with 0 values too - # dump_zeros = true -``` - -In case that `proc_net_snmp6` path doesn't exist (e.g. IPv6 is not enabled) no error would be raised. +In case that `proc_net_snmp6` path doesn't exist (e.g. IPv6 is not enabled) no +error would be raised. -### Measurements & Fields +## Metrics -- nstat - - Icmp6InCsumErrors - - Icmp6InDestUnreachs - - Icmp6InEchoReplies - - Icmp6InEchos - - Icmp6InErrors - - Icmp6InGroupMembQueries - - Icmp6InGroupMembReductions - - Icmp6InGroupMembResponses - - Icmp6InMLDv2Reports - - Icmp6InMsgs - - Icmp6InNeighborAdvertisements - - Icmp6InNeighborSolicits - - Icmp6InParmProblems - - Icmp6InPktTooBigs - - Icmp6InRedirects - - Icmp6InRouterAdvertisements - - Icmp6InRouterSolicits - - Icmp6InTimeExcds - - Icmp6OutDestUnreachs - - Icmp6OutEchoReplies - - Icmp6OutEchos - - Icmp6OutErrors - - Icmp6OutGroupMembQueries - - Icmp6OutGroupMembReductions - - Icmp6OutGroupMembResponses - - Icmp6OutMLDv2Reports - - Icmp6OutMsgs - - Icmp6OutNeighborAdvertisements - - Icmp6OutNeighborSolicits - - Icmp6OutParmProblems - - Icmp6OutPktTooBigs - - Icmp6OutRedirects - - Icmp6OutRouterAdvertisements - - Icmp6OutRouterSolicits - - Icmp6OutTimeExcds - - Icmp6OutType133 - - Icmp6OutType135 - - Icmp6OutType143 - - IcmpInAddrMaskReps - - IcmpInAddrMasks - - IcmpInCsumErrors - - IcmpInDestUnreachs - - IcmpInEchoReps - - IcmpInEchos - - IcmpInErrors - - IcmpInMsgs - - IcmpInParmProbs - - IcmpInRedirects - - IcmpInSrcQuenchs - - IcmpInTimeExcds - - IcmpInTimestampReps - - IcmpInTimestamps - - IcmpMsgInType3 - - IcmpMsgOutType3 - - IcmpOutAddrMaskReps - - IcmpOutAddrMasks - - IcmpOutDestUnreachs - - IcmpOutEchoReps - - IcmpOutEchos - - IcmpOutErrors - - IcmpOutMsgs - - IcmpOutParmProbs - - IcmpOutRedirects - - IcmpOutSrcQuenchs - - IcmpOutTimeExcds - - IcmpOutTimestampReps - - IcmpOutTimestamps - - Ip6FragCreates - - Ip6FragFails - - Ip6FragOKs - - Ip6InAddrErrors - - Ip6InBcastOctets - - Ip6InCEPkts - - Ip6InDelivers - - Ip6InDiscards - - Ip6InECT0Pkts - - Ip6InECT1Pkts - - Ip6InHdrErrors - - Ip6InMcastOctets - - Ip6InMcastPkts - - Ip6InNoECTPkts - - Ip6InNoRoutes - - Ip6InOctets - - Ip6InReceives - - Ip6InTooBigErrors - - Ip6InTruncatedPkts - - Ip6InUnknownProtos - - Ip6OutBcastOctets - - Ip6OutDiscards - - Ip6OutForwDatagrams - - Ip6OutMcastOctets - - Ip6OutMcastPkts - - Ip6OutNoRoutes - - Ip6OutOctets - - Ip6OutRequests - - Ip6ReasmFails - - Ip6ReasmOKs - - Ip6ReasmReqds - - Ip6ReasmTimeout - - IpDefaultTTL - - IpExtInBcastOctets - - IpExtInBcastPkts - - IpExtInCEPkts - - IpExtInCsumErrors - - IpExtInECT0Pkts - - IpExtInECT1Pkts - - IpExtInMcastOctets - - IpExtInMcastPkts - - IpExtInNoECTPkts - - IpExtInNoRoutes - - IpExtInOctets - - IpExtInTruncatedPkts - - IpExtOutBcastOctets - - IpExtOutBcastPkts - - IpExtOutMcastOctets - - IpExtOutMcastPkts - - IpExtOutOctets - - IpForwDatagrams - - IpForwarding - - IpFragCreates - - IpFragFails - - IpFragOKs - - IpInAddrErrors - - IpInDelivers - - IpInDiscards - - IpInHdrErrors - - IpInReceives - - IpInUnknownProtos - - IpOutDiscards - - IpOutNoRoutes - - IpOutRequests - - IpReasmFails - - IpReasmOKs - - IpReasmReqds - - IpReasmTimeout - - TcpActiveOpens - - TcpAttemptFails - - TcpCurrEstab - - TcpEstabResets - - TcpExtArpFilter - - TcpExtBusyPollRxPackets - - TcpExtDelayedACKLocked - - TcpExtDelayedACKLost - - TcpExtDelayedACKs - - TcpExtEmbryonicRsts - - TcpExtIPReversePathFilter - - TcpExtListenDrops - - TcpExtListenOverflows - - TcpExtLockDroppedIcmps - - TcpExtOfoPruned - - TcpExtOutOfWindowIcmps - - TcpExtPAWSActive - - TcpExtPAWSEstab - - TcpExtPAWSPassive - - TcpExtPruneCalled - - TcpExtRcvPruned - - TcpExtSyncookiesFailed - - TcpExtSyncookiesRecv - - TcpExtSyncookiesSent - - TcpExtTCPACKSkippedChallenge - - TcpExtTCPACKSkippedFinWait2 - - TcpExtTCPACKSkippedPAWS - - TcpExtTCPACKSkippedSeq - - TcpExtTCPACKSkippedSynRecv - - TcpExtTCPACKSkippedTimeWait - - TcpExtTCPAbortFailed - - TcpExtTCPAbortOnClose - - TcpExtTCPAbortOnData - - TcpExtTCPAbortOnLinger - - TcpExtTCPAbortOnMemory - - TcpExtTCPAbortOnTimeout - - TcpExtTCPAutoCorking - - TcpExtTCPBacklogDrop - - TcpExtTCPChallengeACK - - TcpExtTCPDSACKIgnoredNoUndo - - TcpExtTCPDSACKIgnoredOld - - TcpExtTCPDSACKOfoRecv - - TcpExtTCPDSACKOfoSent - - TcpExtTCPDSACKOldSent - - TcpExtTCPDSACKRecv - - TcpExtTCPDSACKUndo - - TcpExtTCPDeferAcceptDrop - - TcpExtTCPDirectCopyFromBacklog - - TcpExtTCPDirectCopyFromPrequeue - - TcpExtTCPFACKReorder - - TcpExtTCPFastOpenActive - - TcpExtTCPFastOpenActiveFail - - TcpExtTCPFastOpenCookieReqd - - TcpExtTCPFastOpenListenOverflow - - TcpExtTCPFastOpenPassive - - TcpExtTCPFastOpenPassiveFail - - TcpExtTCPFastRetrans - - TcpExtTCPForwardRetrans - - TcpExtTCPFromZeroWindowAdv - - TcpExtTCPFullUndo - - TcpExtTCPHPAcks - - TcpExtTCPHPHits - - TcpExtTCPHPHitsToUser - - TcpExtTCPHystartDelayCwnd - - TcpExtTCPHystartDelayDetect - - TcpExtTCPHystartTrainCwnd - - TcpExtTCPHystartTrainDetect - - TcpExtTCPKeepAlive - - TcpExtTCPLossFailures - - TcpExtTCPLossProbeRecovery - - TcpExtTCPLossProbes - - TcpExtTCPLossUndo - - TcpExtTCPLostRetransmit - - TcpExtTCPMD5NotFound - - TcpExtTCPMD5Unexpected - - TcpExtTCPMTUPFail - - TcpExtTCPMTUPSuccess - - TcpExtTCPMemoryPressures - - TcpExtTCPMinTTLDrop - - TcpExtTCPOFODrop - - TcpExtTCPOFOMerge - - TcpExtTCPOFOQueue - - TcpExtTCPOrigDataSent - - TcpExtTCPPartialUndo - - TcpExtTCPPrequeueDropped - - TcpExtTCPPrequeued - - TcpExtTCPPureAcks - - TcpExtTCPRcvCoalesce - - TcpExtTCPRcvCollapsed - - TcpExtTCPRenoFailures - - TcpExtTCPRenoRecovery - - TcpExtTCPRenoRecoveryFail - - TcpExtTCPRenoReorder - - TcpExtTCPReqQFullDoCookies - - TcpExtTCPReqQFullDrop - - TcpExtTCPRetransFail - - TcpExtTCPSACKDiscard - - TcpExtTCPSACKReneging - - TcpExtTCPSACKReorder - - TcpExtTCPSYNChallenge - - TcpExtTCPSackFailures - - TcpExtTCPSackMerged - - TcpExtTCPSackRecovery - - TcpExtTCPSackRecoveryFail - - TcpExtTCPSackShiftFallback - - TcpExtTCPSackShifted - - TcpExtTCPSchedulerFailed - - TcpExtTCPSlowStartRetrans - - TcpExtTCPSpuriousRTOs - - TcpExtTCPSpuriousRtxHostQueues - - TcpExtTCPSynRetrans - - TcpExtTCPTSReorder - - TcpExtTCPTimeWaitOverflow - - TcpExtTCPTimeouts - - TcpExtTCPToZeroWindowAdv - - TcpExtTCPWantZeroWindowAdv - - TcpExtTCPWinProbe - - TcpExtTW - - TcpExtTWKilled - - TcpExtTWRecycled - - TcpInCsumErrors - - TcpInErrs - - TcpInSegs - - TcpMaxConn - - TcpOutRsts - - TcpOutSegs - - TcpPassiveOpens - - TcpRetransSegs - - TcpRtoAlgorithm - - TcpRtoMax - - TcpRtoMin - - Udp6IgnoredMulti - - Udp6InCsumErrors - - Udp6InDatagrams - - Udp6InErrors - - Udp6NoPorts - - Udp6OutDatagrams - - Udp6RcvbufErrors - - Udp6SndbufErrors - - UdpIgnoredMulti - - UdpInCsumErrors - - UdpInDatagrams - - UdpInErrors - - UdpLite6InCsumErrors - - UdpLite6InDatagrams - - UdpLite6InErrors - - UdpLite6NoPorts - - UdpLite6OutDatagrams - - UdpLite6RcvbufErrors - - UdpLite6SndbufErrors - - UdpLiteIgnoredMulti - - UdpLiteInCsumErrors - - UdpLiteInDatagrams - - UdpLiteInErrors - - UdpLiteNoPorts - - UdpLiteOutDatagrams - - UdpLiteRcvbufErrors - - UdpLiteSndbufErrors - - UdpNoPorts - - UdpOutDatagrams - - UdpRcvbufErrors - - UdpSndbufErrors +* nstat + * Icmp6InCsumErrors + * Icmp6InDestUnreachs + * Icmp6InEchoReplies + * Icmp6InEchos + * Icmp6InErrors + * Icmp6InGroupMembQueries + * Icmp6InGroupMembReductions + * Icmp6InGroupMembResponses + * Icmp6InMLDv2Reports + * Icmp6InMsgs + * Icmp6InNeighborAdvertisements + * Icmp6InNeighborSolicits + * Icmp6InParmProblems + * Icmp6InPktTooBigs + * Icmp6InRedirects + * Icmp6InRouterAdvertisements + * Icmp6InRouterSolicits + * Icmp6InTimeExcds + * Icmp6OutDestUnreachs + * Icmp6OutEchoReplies + * Icmp6OutEchos + * Icmp6OutErrors + * Icmp6OutGroupMembQueries + * Icmp6OutGroupMembReductions + * Icmp6OutGroupMembResponses + * Icmp6OutMLDv2Reports + * Icmp6OutMsgs + * Icmp6OutNeighborAdvertisements + * Icmp6OutNeighborSolicits + * Icmp6OutParmProblems + * Icmp6OutPktTooBigs + * Icmp6OutRedirects + * Icmp6OutRouterAdvertisements + * Icmp6OutRouterSolicits + * Icmp6OutTimeExcds + * Icmp6OutType133 + * Icmp6OutType135 + * Icmp6OutType143 + * IcmpInAddrMaskReps + * IcmpInAddrMasks + * IcmpInCsumErrors + * IcmpInDestUnreachs + * IcmpInEchoReps + * IcmpInEchos + * IcmpInErrors + * IcmpInMsgs + * IcmpInParmProbs + * IcmpInRedirects + * IcmpInSrcQuenchs + * IcmpInTimeExcds + * IcmpInTimestampReps + * IcmpInTimestamps + * IcmpMsgInType3 + * IcmpMsgOutType3 + * IcmpOutAddrMaskReps + * IcmpOutAddrMasks + * IcmpOutDestUnreachs + * IcmpOutEchoReps + * IcmpOutEchos + * IcmpOutErrors + * IcmpOutMsgs + * IcmpOutParmProbs + * IcmpOutRedirects + * IcmpOutSrcQuenchs + * IcmpOutTimeExcds + * IcmpOutTimestampReps + * IcmpOutTimestamps + * Ip6FragCreates + * Ip6FragFails + * Ip6FragOKs + * Ip6InAddrErrors + * Ip6InBcastOctets + * Ip6InCEPkts + * Ip6InDelivers + * Ip6InDiscards + * Ip6InECT0Pkts + * Ip6InECT1Pkts + * Ip6InHdrErrors + * Ip6InMcastOctets + * Ip6InMcastPkts + * Ip6InNoECTPkts + * Ip6InNoRoutes + * Ip6InOctets + * Ip6InReceives + * Ip6InTooBigErrors + * Ip6InTruncatedPkts + * Ip6InUnknownProtos + * Ip6OutBcastOctets + * Ip6OutDiscards + * Ip6OutForwDatagrams + * Ip6OutMcastOctets + * Ip6OutMcastPkts + * Ip6OutNoRoutes + * Ip6OutOctets + * Ip6OutRequests + * Ip6ReasmFails + * Ip6ReasmOKs + * Ip6ReasmReqds + * Ip6ReasmTimeout + * IpDefaultTTL + * IpExtInBcastOctets + * IpExtInBcastPkts + * IpExtInCEPkts + * IpExtInCsumErrors + * IpExtInECT0Pkts + * IpExtInECT1Pkts + * IpExtInMcastOctets + * IpExtInMcastPkts + * IpExtInNoECTPkts + * IpExtInNoRoutes + * IpExtInOctets + * IpExtInTruncatedPkts + * IpExtOutBcastOctets + * IpExtOutBcastPkts + * IpExtOutMcastOctets + * IpExtOutMcastPkts + * IpExtOutOctets + * IpForwDatagrams + * IpForwarding + * IpFragCreates + * IpFragFails + * IpFragOKs + * IpInAddrErrors + * IpInDelivers + * IpInDiscards + * IpInHdrErrors + * IpInReceives + * IpInUnknownProtos + * IpOutDiscards + * IpOutNoRoutes + * IpOutRequests + * IpReasmFails + * IpReasmOKs + * IpReasmReqds + * IpReasmTimeout + * TcpActiveOpens + * TcpAttemptFails + * TcpCurrEstab + * TcpEstabResets + * TcpExtArpFilter + * TcpExtBusyPollRxPackets + * TcpExtDelayedACKLocked + * TcpExtDelayedACKLost + * TcpExtDelayedACKs + * TcpExtEmbryonicRsts + * TcpExtIPReversePathFilter + * TcpExtListenDrops + * TcpExtListenOverflows + * TcpExtLockDroppedIcmps + * TcpExtOfoPruned + * TcpExtOutOfWindowIcmps + * TcpExtPAWSActive + * TcpExtPAWSEstab + * TcpExtPAWSPassive + * TcpExtPruneCalled + * TcpExtRcvPruned + * TcpExtSyncookiesFailed + * TcpExtSyncookiesRecv + * TcpExtSyncookiesSent + * TcpExtTCPACKSkippedChallenge + * TcpExtTCPACKSkippedFinWait2 + * TcpExtTCPACKSkippedPAWS + * TcpExtTCPACKSkippedSeq + * TcpExtTCPACKSkippedSynRecv + * TcpExtTCPACKSkippedTimeWait + * TcpExtTCPAbortFailed + * TcpExtTCPAbortOnClose + * TcpExtTCPAbortOnData + * TcpExtTCPAbortOnLinger + * TcpExtTCPAbortOnMemory + * TcpExtTCPAbortOnTimeout + * TcpExtTCPAutoCorking + * TcpExtTCPBacklogDrop + * TcpExtTCPChallengeACK + * TcpExtTCPDSACKIgnoredNoUndo + * TcpExtTCPDSACKIgnoredOld + * TcpExtTCPDSACKOfoRecv + * TcpExtTCPDSACKOfoSent + * TcpExtTCPDSACKOldSent + * TcpExtTCPDSACKRecv + * TcpExtTCPDSACKUndo + * TcpExtTCPDeferAcceptDrop + * TcpExtTCPDirectCopyFromBacklog + * TcpExtTCPDirectCopyFromPrequeue + * TcpExtTCPFACKReorder + * TcpExtTCPFastOpenActive + * TcpExtTCPFastOpenActiveFail + * TcpExtTCPFastOpenCookieReqd + * TcpExtTCPFastOpenListenOverflow + * TcpExtTCPFastOpenPassive + * TcpExtTCPFastOpenPassiveFail + * TcpExtTCPFastRetrans + * TcpExtTCPForwardRetrans + * TcpExtTCPFromZeroWindowAdv + * TcpExtTCPFullUndo + * TcpExtTCPHPAcks + * TcpExtTCPHPHits + * TcpExtTCPHPHitsToUser + * TcpExtTCPHystartDelayCwnd + * TcpExtTCPHystartDelayDetect + * TcpExtTCPHystartTrainCwnd + * TcpExtTCPHystartTrainDetect + * TcpExtTCPKeepAlive + * TcpExtTCPLossFailures + * TcpExtTCPLossProbeRecovery + * TcpExtTCPLossProbes + * TcpExtTCPLossUndo + * TcpExtTCPLostRetransmit + * TcpExtTCPMD5NotFound + * TcpExtTCPMD5Unexpected + * TcpExtTCPMTUPFail + * TcpExtTCPMTUPSuccess + * TcpExtTCPMemoryPressures + * TcpExtTCPMinTTLDrop + * TcpExtTCPOFODrop + * TcpExtTCPOFOMerge + * TcpExtTCPOFOQueue + * TcpExtTCPOrigDataSent + * TcpExtTCPPartialUndo + * TcpExtTCPPrequeueDropped + * TcpExtTCPPrequeued + * TcpExtTCPPureAcks + * TcpExtTCPRcvCoalesce + * TcpExtTCPRcvCollapsed + * TcpExtTCPRenoFailures + * TcpExtTCPRenoRecovery + * TcpExtTCPRenoRecoveryFail + * TcpExtTCPRenoReorder + * TcpExtTCPReqQFullDoCookies + * TcpExtTCPReqQFullDrop + * TcpExtTCPRetransFail + * TcpExtTCPSACKDiscard + * TcpExtTCPSACKReneging + * TcpExtTCPSACKReorder + * TcpExtTCPSYNChallenge + * TcpExtTCPSackFailures + * TcpExtTCPSackMerged + * TcpExtTCPSackRecovery + * TcpExtTCPSackRecoveryFail + * TcpExtTCPSackShiftFallback + * TcpExtTCPSackShifted + * TcpExtTCPSchedulerFailed + * TcpExtTCPSlowStartRetrans + * TcpExtTCPSpuriousRTOs + * TcpExtTCPSpuriousRtxHostQueues + * TcpExtTCPSynRetrans + * TcpExtTCPTSReorder + * TcpExtTCPTimeWaitOverflow + * TcpExtTCPTimeouts + * TcpExtTCPToZeroWindowAdv + * TcpExtTCPWantZeroWindowAdv + * TcpExtTCPWinProbe + * TcpExtTW + * TcpExtTWKilled + * TcpExtTWRecycled + * TcpInCsumErrors + * TcpInErrs + * TcpInSegs + * TcpMaxConn + * TcpOutRsts + * TcpOutSegs + * TcpPassiveOpens + * TcpRetransSegs + * TcpRtoAlgorithm + * TcpRtoMax + * TcpRtoMin + * Udp6IgnoredMulti + * Udp6InCsumErrors + * Udp6InDatagrams + * Udp6InErrors + * Udp6NoPorts + * Udp6OutDatagrams + * Udp6RcvbufErrors + * Udp6SndbufErrors + * UdpIgnoredMulti + * UdpInCsumErrors + * UdpInDatagrams + * UdpInErrors + * UdpLite6InCsumErrors + * UdpLite6InDatagrams + * UdpLite6InErrors + * UdpLite6NoPorts + * UdpLite6OutDatagrams + * UdpLite6RcvbufErrors + * UdpLite6SndbufErrors + * UdpLiteIgnoredMulti + * UdpLiteInCsumErrors + * UdpLiteInDatagrams + * UdpLiteInErrors + * UdpLiteNoPorts + * UdpLiteOutDatagrams + * UdpLiteRcvbufErrors + * UdpLiteSndbufErrors + * UdpNoPorts + * UdpOutDatagrams + * UdpRcvbufErrors + * UdpSndbufErrors ### Tags -- All measurements have the following tags - - host (host of the system) - - name (the type of the metric: snmp, snmp6 or netstat) + +* All measurements have the following tags + * host (host of the system) + * name (the type of the metric: snmp, snmp6 or netstat) diff --git a/plugins/inputs/nstat/nstat.go b/plugins/inputs/nstat/nstat.go index e6dcb420f30ce..867fa2ae60ec7 100644 --- a/plugins/inputs/nstat/nstat.go +++ b/plugins/inputs/nstat/nstat.go @@ -1,8 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package nstat import ( "bytes" - "io/ioutil" + _ "embed" "os" "strconv" @@ -10,6 +11,10 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + var ( zeroByte = []byte("0") newLineByte = []byte("\n") @@ -18,18 +23,18 @@ var ( // default file paths const ( - NET_NETSTAT = "/net/netstat" - NET_SNMP = "/net/snmp" - NET_SNMP6 = "/net/snmp6" - NET_PROC = "/proc" + NetNetstat = "/net/netstat" + NetSnmp = "/net/snmp" + NetSnmp6 = "/net/snmp6" + NetProc = "/proc" ) // env variable names const ( - ENV_NETSTAT = "PROC_NET_NETSTAT" - ENV_SNMP = "PROC_NET_SNMP" - ENV_SNMP6 = "PROC_NET_SNMP6" - ENV_ROOT = "PROC_ROOT" + EnvNetstat = "PROC_NET_NETSTAT" + EnvSnmp = "PROC_NET_SNMP" + EnvSnmp6 = "PROC_NET_SNMP6" + EnvRoot = "PROC_ROOT" ) type Nstat struct { @@ -39,22 +44,7 @@ type Nstat struct { DumpZeros bool `toml:"dump_zeros"` } -var sampleConfig = ` - ## file paths for proc files. If empty default paths will be used: - ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 - ## These can also be overridden with env variables, see README. - proc_net_netstat = "/proc/net/netstat" - proc_net_snmp = "/proc/net/snmp" - proc_net_snmp6 = "/proc/net/snmp6" - ## dump metrics with 0 values too - dump_zeros = true -` - -func (ns *Nstat) Description() string { - return "Collect kernel snmp counters and network interface statistics" -} - -func (ns *Nstat) SampleConfig() string { +func (*Nstat) SampleConfig() string { return sampleConfig } @@ -62,93 +52,72 @@ func (ns *Nstat) Gather(acc telegraf.Accumulator) error { // load paths, get from env if config values are empty ns.loadPaths() - netstat, err := ioutil.ReadFile(ns.ProcNetNetstat) + netstat, err := os.ReadFile(ns.ProcNetNetstat) if err != nil { return err } // collect netstat data - err = ns.gatherNetstat(netstat, acc) - if err != nil { - return err - } + ns.gatherNetstat(netstat, acc) // collect SNMP data - snmp, err := ioutil.ReadFile(ns.ProcNetSNMP) - if err != nil { - return err - } - err = ns.gatherSNMP(snmp, acc) + snmp, err := os.ReadFile(ns.ProcNetSNMP) if err != nil { return err } + ns.gatherSNMP(snmp, acc) // collect SNMP6 data, if SNMP6 directory exists (IPv6 enabled) - snmp6, err := ioutil.ReadFile(ns.ProcNetSNMP6) + snmp6, err := os.ReadFile(ns.ProcNetSNMP6) if err == nil { - err = ns.gatherSNMP6(snmp6, acc) - if err != nil { - return err - } + ns.gatherSNMP6(snmp6, acc) } else if !os.IsNotExist(err) { return err } return nil } -func (ns *Nstat) gatherNetstat(data []byte, acc telegraf.Accumulator) error { - metrics, err := loadUglyTable(data, ns.DumpZeros) - if err != nil { - return err - } +func (ns *Nstat) gatherNetstat(data []byte, acc telegraf.Accumulator) { + metrics := ns.loadUglyTable(data) tags := map[string]string{ "name": "netstat", } acc.AddFields("nstat", metrics, tags) - return nil } -func (ns *Nstat) gatherSNMP(data []byte, acc telegraf.Accumulator) error { - metrics, err := loadUglyTable(data, ns.DumpZeros) - if err != nil { - return err - } +func (ns *Nstat) gatherSNMP(data []byte, acc telegraf.Accumulator) { + metrics := ns.loadUglyTable(data) tags := map[string]string{ "name": "snmp", } acc.AddFields("nstat", metrics, tags) - return nil } -func (ns *Nstat) gatherSNMP6(data []byte, acc telegraf.Accumulator) error { - metrics, err := loadGoodTable(data, ns.DumpZeros) - if err != nil { - return err - } +func (ns *Nstat) gatherSNMP6(data []byte, acc telegraf.Accumulator) { + metrics := ns.loadGoodTable(data) tags := map[string]string{ "name": "snmp6", } acc.AddFields("nstat", metrics, tags) - return nil } // loadPaths can be used to read paths firstly from config // if it is empty then try read from env variables func (ns *Nstat) loadPaths() { if ns.ProcNetNetstat == "" { - ns.ProcNetNetstat = proc(ENV_NETSTAT, NET_NETSTAT) + ns.ProcNetNetstat = proc(EnvNetstat, NetNetstat) } if ns.ProcNetSNMP == "" { - ns.ProcNetSNMP = proc(ENV_SNMP, NET_SNMP) + ns.ProcNetSNMP = proc(EnvSnmp, NetSnmp) } if ns.ProcNetSNMP6 == "" { - ns.ProcNetSNMP6 = proc(ENV_SNMP6, NET_SNMP6) + ns.ProcNetSNMP6 = proc(EnvSnmp6, NetSnmp6) } } // loadGoodTable can be used to parse string heap that // headers and values are arranged in right order -func loadGoodTable(table []byte, dumpZeros bool) (map[string]interface{}, error) { +func (ns *Nstat) loadGoodTable(table []byte) map[string]interface{} { entries := map[string]interface{}{} fields := bytes.Fields(table) var value int64 @@ -158,12 +127,12 @@ func loadGoodTable(table []byte, dumpZeros bool) (map[string]interface{}, error) for i := 0; i < len(fields); i = i + 2 { // counter is zero if bytes.Equal(fields[i+1], zeroByte) { - if !dumpZeros { - continue - } else { - entries[string(fields[i])] = int64(0) + if !ns.DumpZeros { continue } + + entries[string(fields[i])] = int64(0) + continue } // the counter is not zero, so parse it. value, err = strconv.ParseInt(string(fields[i+1]), 10, 64) @@ -171,12 +140,12 @@ func loadGoodTable(table []byte, dumpZeros bool) (map[string]interface{}, error) entries[string(fields[i])] = value } } - return entries, nil + return entries } // loadUglyTable can be used to parse string heap that // the headers and values are splitted with a newline -func loadUglyTable(table []byte, dumpZeros bool) (map[string]interface{}, error) { +func (ns *Nstat) loadUglyTable(table []byte) map[string]interface{} { entries := map[string]interface{}{} // split the lines by newline lines := bytes.Split(table, newLineByte) @@ -196,12 +165,12 @@ func loadUglyTable(table []byte, dumpZeros bool) (map[string]interface{}, error) for j := 1; j < len(headers); j++ { // counter is zero if bytes.Equal(metrics[j], zeroByte) { - if !dumpZeros { - continue - } else { - entries[string(append(prefix, headers[j]...))] = int64(0) + if !ns.DumpZeros { continue } + + entries[string(append(prefix, headers[j]...))] = int64(0) + continue } // the counter is not zero, so parse it. value, err = strconv.ParseInt(string(metrics[j]), 10, 64) @@ -210,7 +179,7 @@ func loadUglyTable(table []byte, dumpZeros bool) (map[string]interface{}, error) } } } - return entries, nil + return entries } // proc can be used to read file paths from env @@ -220,9 +189,9 @@ func proc(env, path string) string { return p } // try to read root path, or use default root path - root := os.Getenv(ENV_ROOT) + root := os.Getenv(EnvRoot) if root == "" { - root = NET_PROC + root = NetProc } return root + path } diff --git a/plugins/inputs/nstat/nstat_test.go b/plugins/inputs/nstat/nstat_test.go index 7f4c09ce4d4be..95b64777b08af 100644 --- a/plugins/inputs/nstat/nstat_test.go +++ b/plugins/inputs/nstat/nstat_test.go @@ -12,11 +12,8 @@ func TestLoadUglyTable(t *testing.T) { "IpExtInCEPkts": int64(2660494435), } - got, err := loadUglyTable([]byte(uglyStr), true) - if err != nil { - t.Fatal(err) - } - + n := Nstat{DumpZeros: true} + got := n.loadUglyTable([]byte(uglyStr)) if len(got) == 0 { t.Fatalf("want %+v, got %+v", parsed, got) } @@ -40,10 +37,8 @@ func TestLoadGoodTable(t *testing.T) { "Ip6InDelivers": int64(62), "Ip6InMcastOctets": int64(1242966), } - got, err := loadGoodTable([]byte(goodStr), true) - if err != nil { - t.Fatal(err) - } + n := Nstat{DumpZeros: true} + got := n.loadGoodTable([]byte(goodStr)) if len(got) == 0 { t.Fatalf("want %+v, got %+v", parsed, got) } diff --git a/plugins/inputs/nstat/sample.conf b/plugins/inputs/nstat/sample.conf new file mode 100644 index 0000000000000..21f8d19e7f8fe --- /dev/null +++ b/plugins/inputs/nstat/sample.conf @@ -0,0 +1,10 @@ +# Collect kernel snmp counters and network interface statistics +[[inputs.nstat]] + ## file paths for proc files. If empty default paths will be used: + ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 + ## These can also be overridden with env variables, see README. + proc_net_netstat = "/proc/net/netstat" + proc_net_snmp = "/proc/net/snmp" + proc_net_snmp6 = "/proc/net/snmp6" + ## dump metrics with 0 values too + dump_zeros = true diff --git a/plugins/inputs/ntpq/README.md b/plugins/inputs/ntpq/README.md index e691200ddd682..1463ad21ac409 100644 --- a/plugins/inputs/ntpq/README.md +++ b/plugins/inputs/ntpq/README.md @@ -24,36 +24,36 @@ the remote peer or server (RMS, milliseconds); - jitter – Mean deviation (jitter) in the time reported for that remote peer or server (RMS of difference of multiple time samples, milliseconds); -### Configuration: +## Configuration -```toml -# Get standard NTP query metrics, requires ntpq executable +```toml @sample.conf +# Get standard NTP query metrics, requires ntpq executable. [[inputs.ntpq]] - ## If false, add -n for ntpq command. Can reduce metric gather times. + ## If false, set the -n ntpq flag. Can reduce metric gather time. dns_lookup = true ``` -### Measurements & Fields: +## Metrics - ntpq - - delay (float, milliseconds) - - jitter (float, milliseconds) - - offset (float, milliseconds) - - poll (int, seconds) - - reach (int) - - when (int, seconds) + - delay (float, milliseconds) + - jitter (float, milliseconds) + - offset (float, milliseconds) + - poll (int, seconds) + - reach (int) + - when (int, seconds) -### Tags: +### Tags - All measurements have the following tags: - - refid - - remote - - type - - stratum + - refid + - remote + - type + - stratum -### Example Output: +## Example Output -``` +```shell $ telegraf --config ~/ws/telegraf.conf --input-filter ntpq --test * Plugin: ntpq, Collection 1 > ntpq,refid=.GPSs.,remote=*time.apple.com,stratum=1,type=u delay=91.797,jitter=3.735,offset=12.841,poll=64i,reach=377i,when=35i 1457960478909556134 diff --git a/plugins/inputs/ntpq/ntpq.go b/plugins/inputs/ntpq/ntpq.go index 80b5dcd0f16be..fb09b08b13f1a 100644 --- a/plugins/inputs/ntpq/ntpq.go +++ b/plugins/inputs/ntpq/ntpq.go @@ -1,8 +1,10 @@ +//go:generate ../../../tools/readme_config_includer/generator package ntpq import ( "bufio" "bytes" + _ "embed" "fmt" "os/exec" "regexp" @@ -13,8 +15,12 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // Mapping of ntpq header names to tag keys -var tagHeaders map[string]string = map[string]string{ +var tagHeaders = map[string]string{ "remote": "remote", "refid": "refid", "st": "stratum", @@ -30,15 +36,8 @@ type NTPQ struct { DNSLookup bool `toml:"dns_lookup"` } -func (n *NTPQ) Description() string { - return "Get standard NTP query metrics, requires ntpq executable." -} - -func (n *NTPQ) SampleConfig() string { - return ` - ## If false, set the -n ntpq flag. Can reduce metric gather time. - dns_lookup = true -` +func (*NTPQ) SampleConfig() string { + return sampleConfig } func (n *NTPQ) Gather(acc telegraf.Accumulator) error { @@ -50,7 +49,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { // Due to problems with a parsing, we have to use regexp expression in order // to remove string that starts from '(' and ends with space // see: https://github.com/influxdata/telegraf/issues/2386 - reg, err := regexp.Compile("\\s+\\([\\S]*") + reg, err := regexp.Compile(`\s+\([\S]*`) if err != nil { return err } @@ -122,13 +121,13 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { continue } - if key == "when" { + if key == "when" || key == "poll" { when := fields[index] switch { case strings.HasSuffix(when, "h"): m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "h")) if err != nil { - acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index])) + acc.AddError(fmt.Errorf("error ntpq: parsing %s as int: %s", key, fields[index])) continue } // seconds in an hour @@ -137,7 +136,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { case strings.HasSuffix(when, "d"): m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "d")) if err != nil { - acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index])) + acc.AddError(fmt.Errorf("error ntpq: parsing %s as int: %s", key, fields[index])) continue } // seconds in a day @@ -146,7 +145,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { case strings.HasSuffix(when, "m"): m, err := strconv.Atoi(strings.TrimSuffix(fields[index], "m")) if err != nil { - acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index])) + acc.AddError(fmt.Errorf("error ntpq: parsing %s as int: %s", key, fields[index])) continue } // seconds in a day @@ -157,7 +156,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { m, err := strconv.Atoi(fields[index]) if err != nil { - acc.AddError(fmt.Errorf("E! Error ntpq: parsing int: %s", fields[index])) + acc.AddError(fmt.Errorf("error ntpq: parsing %s as int: %s", key, fields[index])) continue } mFields[key] = int64(m) @@ -174,7 +173,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { m, err := strconv.ParseFloat(fields[index], 64) if err != nil { - acc.AddError(fmt.Errorf("E! Error ntpq: parsing float: %s", fields[index])) + acc.AddError(fmt.Errorf("error ntpq: parsing float: %s", fields[index])) continue } mFields[key] = m diff --git a/plugins/inputs/ntpq/ntpq_test.go b/plugins/inputs/ntpq/ntpq_test.go index b0db77e45784f..a70b43d4e2aaa 100644 --- a/plugins/inputs/ntpq/ntpq_test.go +++ b/plugins/inputs/ntpq/ntpq_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestSingleNTPQ(t *testing.T) { @@ -20,7 +20,7 @@ func TestSingleNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(101), @@ -49,7 +49,7 @@ func TestBadIntNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.Error(t, acc.GatherError(n.Gather)) + require.Error(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(101), @@ -77,7 +77,7 @@ func TestBadFloatNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.Error(t, acc.GatherError(n.Gather)) + require.Error(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(2), @@ -105,7 +105,7 @@ func TestDaysNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(172800), @@ -134,7 +134,7 @@ func TestHoursNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(7200), @@ -163,7 +163,7 @@ func TestMinutesNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(120), @@ -192,7 +192,7 @@ func TestBadWhenNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.Error(t, acc.GatherError(n.Gather)) + require.Error(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "poll": int64(256), @@ -222,7 +222,7 @@ func TestParserNTPQ(t *testing.T) { n := newNTPQ() n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "poll": int64(64), @@ -285,7 +285,7 @@ func TestMultiNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "delay": float64(54.033), @@ -329,7 +329,7 @@ func TestBadHeaderNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(101), @@ -357,7 +357,7 @@ func TestMissingDelayColumnNTPQ(t *testing.T) { n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) + require.NoError(t, acc.GatherError(n.Gather)) fields := map[string]interface{}{ "when": int64(101), @@ -375,16 +375,46 @@ func TestMissingDelayColumnNTPQ(t *testing.T) { acc.AssertContainsTaggedFields(t, "ntpq", fields, tags) } +func TestLongPoll(t *testing.T) { + tt := tester{ + ret: []byte(longPollTime), + err: nil, + } + n := newNTPQ() + n.runQ = tt.runqTest + + acc := testutil.Accumulator{} + require.NoError(t, acc.GatherError(n.Gather)) + + fields := map[string]interface{}{ + "when": int64(617), + "poll": int64(4080), + "reach": int64(377), + "offset": float64(2.849), + "jitter": float64(1.192), + "delay": float64(9.145), + } + tags := map[string]string{ + "remote": "uschi5-ntp-002.", + "state_prefix": "-", + "refid": "10.177.80.46", + "type": "u", + "stratum": "3", + } + + acc.AssertContainsTaggedFields(t, "ntpq", fields, tags) +} + func TestFailedNTPQ(t *testing.T) { tt := tester{ ret: []byte(singleNTPQ), - err: fmt.Errorf("Test failure"), + err: fmt.Errorf("test failure"), } n := newNTPQ() n.runQ = tt.runqTest acc := testutil.Accumulator{} - assert.Error(t, acc.GatherError(n.Gather)) + require.Error(t, acc.GatherError(n.Gather)) } // It is possible for the output of ntqp to be missing the refid column. This @@ -520,3 +550,8 @@ var noRefID = ` remote refid st t when poll reach delay o 91.189.94.4 2 u 673 1024 377 143.047 274.726 449445. 131.188.3.221 10.177.80.37 2 u 783 1024 377 111.820 261.921 449528. ` + +var longPollTime = ` remote refid st t when poll reach delay offset jitter +============================================================================== +-uschi5-ntp-002. 10.177.80.46 3 u 617 68m 377 9.145 +2.849 1.192 +` diff --git a/plugins/inputs/ntpq/sample.conf b/plugins/inputs/ntpq/sample.conf new file mode 100644 index 0000000000000..b94004c256073 --- /dev/null +++ b/plugins/inputs/ntpq/sample.conf @@ -0,0 +1,4 @@ +# Get standard NTP query metrics, requires ntpq executable. +[[inputs.ntpq]] + ## If false, set the -n ntpq flag. Can reduce metric gather time. + dns_lookup = true diff --git a/plugins/inputs/nvidia_smi/README.md b/plugins/inputs/nvidia_smi/README.md index bbe90e005c6d6..331e4685d7a7c 100644 --- a/plugins/inputs/nvidia_smi/README.md +++ b/plugins/inputs/nvidia_smi/README.md @@ -1,27 +1,38 @@ # Nvidia System Management Interface (SMI) Input Plugin -This plugin uses a query on the [`nvidia-smi`](https://developer.nvidia.com/nvidia-system-management-interface) binary to pull GPU stats including memory and GPU usage, temp and other. +This plugin uses a query on the +[`nvidia-smi`](https://developer.nvidia.com/nvidia-system-management-interface) +binary to pull GPU stats including memory and GPU usage, temp and other. -### Configuration +## Configuration -```toml +```toml @sample.conf # Pulls statistics from nvidia GPUs attached to the host [[inputs.nvidia_smi]] - ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath + ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" + ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), + ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned # bin_path = "/usr/bin/nvidia-smi" ## Optional: timeout for GPU polling # timeout = "5s" ``` -#### Windows +### Linux -On Windows, `nvidia-smi` is generally located at `C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe` -On Windows 10, you may also find this located here `C:\Windows\System32\nvidia-smi.exe` +On Linux, `nvidia-smi` is generally located at `/usr/bin/nvidia-smi` -You'll need to escape the `\` within the `telegraf.conf` like this: `C:\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe` +### Windows + +On Windows, `nvidia-smi` is generally located at `C:\Program Files\NVIDIA +Corporation\NVSMI\nvidia-smi.exe` On Windows 10, you may also find this located +here `C:\Windows\System32\nvidia-smi.exe` + +You'll need to escape the `\` within the `telegraf.conf` like this: `C:\\Program +Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe` + +## Metrics -### Metrics - measurement: `nvidia_smi` - tags - `name` (type of GPU e.g. `GeForce GTX 1070 Ti`) @@ -52,38 +63,51 @@ You'll need to escape the `\` within the `telegraf.conf` like this: `C:\\Program - `clocks_current_sm` (integer, MHz) - `clocks_current_memory` (integer, MHz) - `clocks_current_video` (integer, MHz) + - `driver_version` (string) + - `cuda_version` (string) -### Sample Query +## Sample Query -The below query could be used to alert on the average temperature of the your GPUs over the last minute +The below query could be used to alert on the average temperature of the your +GPUs over the last minute ```sql SELECT mean("temperature_gpu") FROM "nvidia_smi" WHERE time > now() - 5m GROUP BY time(1m), "index", "name", "host" ``` -### Troubleshooting +## Troubleshooting Check the full output by running `nvidia-smi` binary manually. Linux: + ```sh sudo -u telegraf -- /usr/bin/nvidia-smi -q -x ``` Windows: -``` + +```sh "C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe" -q -x ``` Please include the output of this command if opening an GitHub issue. -### Example Output -``` +## Example Output + +```text nvidia_smi,compute_mode=Default,host=8218cf,index=0,name=GeForce\ GTX\ 1070,pstate=P2,uuid=GPU-823bc202-6279-6f2c-d729-868a30f14d96 fan_speed=100i,memory_free=7563i,memory_total=8112i,memory_used=549i,temperature_gpu=53i,utilization_gpu=100i,utilization_memory=90i 1523991122000000000 nvidia_smi,compute_mode=Default,host=8218cf,index=1,name=GeForce\ GTX\ 1080,pstate=P2,uuid=GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665 fan_speed=100i,memory_free=7557i,memory_total=8114i,memory_used=557i,temperature_gpu=50i,utilization_gpu=100i,utilization_memory=85i 1523991122000000000 nvidia_smi,compute_mode=Default,host=8218cf,index=2,name=GeForce\ GTX\ 1080,pstate=P2,uuid=GPU-d4cfc28d-0481-8d07-b81a-ddfc63d74adf fan_speed=100i,memory_free=7557i,memory_total=8114i,memory_used=557i,temperature_gpu=58i,utilization_gpu=100i,utilization_memory=86i 1523991122000000000 ``` -### Limitations -Note that there seems to be an issue with getting current memory clock values when the memory is overclocked. -This may or may not apply to everyone but it's confirmed to be an issue on an EVGA 2080 Ti. +## Limitations + +Note that there seems to be an issue with getting current memory clock values +when the memory is overclocked. This may or may not apply to everyone but it's +confirmed to be an issue on an EVGA 2080 Ti. + +**NOTE:** For use with docker either generate your own custom docker image based +on nvidia/cuda which also installs a telegraf package or use [volume mount +binding](https://docs.docker.com/storage/bind-mounts/) to inject the required +binary into the docker container. diff --git a/plugins/inputs/nvidia_smi/nvidia_smi.go b/plugins/inputs/nvidia_smi/nvidia_smi.go index 688c3d4bb7680..d44d1ad82c39e 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package nvidia_smi import ( + _ "embed" "encoding/xml" "fmt" "os" @@ -10,40 +12,42 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const measurement = "nvidia_smi" // NvidiaSMI holds the methods for this plugin type NvidiaSMI struct { BinPath string - Timeout internal.Duration + Timeout config.Duration } -// Description returns the description of the NvidiaSMI plugin -func (smi *NvidiaSMI) Description() string { - return "Pulls statistics from nvidia GPUs attached to the host" +func (*NvidiaSMI) SampleConfig() string { + return sampleConfig } -// SampleConfig returns the sample configuration for the NvidiaSMI plugin -func (smi *NvidiaSMI) SampleConfig() string { - return ` - ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath - # bin_path = "/usr/bin/nvidia-smi" +func (smi *NvidiaSMI) Init() error { + if _, err := os.Stat(smi.BinPath); os.IsNotExist(err) { + binPath, err := exec.LookPath("nvidia-smi") + // fail-fast + if err != nil { + return fmt.Errorf("nvidia-smi not found in %q and not in PATH; please make sure nvidia-smi is installed and/or is in PATH", smi.BinPath) + } + smi.BinPath = binPath + } - ## Optional: timeout for GPU polling - # timeout = "5s" -` + return nil } // Gather implements the telegraf interface func (smi *NvidiaSMI) Gather(acc telegraf.Accumulator) error { - if _, err := os.Stat(smi.BinPath); os.IsNotExist(err) { - return fmt.Errorf("nvidia-smi binary not at path %s, cannot gather GPU data", smi.BinPath) - } - data, err := smi.pollSMI() if err != nil { return err @@ -61,14 +65,14 @@ func init() { inputs.Add("nvidia_smi", func() telegraf.Input { return &NvidiaSMI{ BinPath: "/usr/bin/nvidia-smi", - Timeout: internal.Duration{Duration: 5 * time.Second}, + Timeout: config.Duration(5 * time.Second), } }) } func (smi *NvidiaSMI) pollSMI() ([]byte, error) { // Construct and execute metrics query - ret, err := internal.CombinedOutputTimeout(exec.Command(smi.BinPath, "-q", "-x"), smi.Timeout.Duration) + ret, err := internal.CombinedOutputTimeout(exec.Command(smi.BinPath, "-q", "-x"), time.Duration(smi.Timeout)) if err != nil { return nil, err } @@ -109,6 +113,8 @@ func (s *SMI) genTagsFields() []metric { setTagIfUsed(tags, "uuid", gpu.UUID) setTagIfUsed(tags, "compute_mode", gpu.ComputeMode) + setIfUsed("str", fields, "driver_version", s.DriverVersion) + setIfUsed("str", fields, "cuda_version", s.CUDAVersion) setIfUsed("int", fields, "fan_speed", gpu.FanSpeed) setIfUsed("int", fields, "memory_total", gpu.Memory.Total) setIfUsed("int", fields, "memory_used", gpu.Memory.Used) @@ -169,12 +175,18 @@ func setIfUsed(t string, m map[string]interface{}, k, v string) { m[k] = i } } + case "str": + if val != "" { + m[k] = val + } } } // SMI defines the structure for the output of _nvidia-smi -q -x_. type SMI struct { - GPU GPU `xml:"gpu"` + GPU GPU `xml:"gpu"` + DriverVersion string `xml:"driver_version"` + CUDAVersion string `xml:"cuda_version"` } // GPU defines the structure of the GPU portion of the smi output. diff --git a/plugins/inputs/nvidia_smi/nvidia_smi_test.go b/plugins/inputs/nvidia_smi/nvidia_smi_test.go index 3c191e609ade4..3c0b14d6e4559 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi_test.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi_test.go @@ -1,7 +1,7 @@ package nvidia_smi import ( - "io/ioutil" + "os" "path/filepath" "testing" "time" @@ -69,6 +69,8 @@ func TestGatherValidXML(t *testing.T) { "clocks_current_memory": 405, "clocks_current_sm": 300, "clocks_current_video": 540, + "cuda_version": "10.1", + "driver_version": "418.43", "encoder_stats_average_fps": 0, "encoder_stats_average_latency": 0, "encoder_stats_session_count": 0, @@ -109,6 +111,8 @@ func TestGatherValidXML(t *testing.T) { "clocks_current_memory": 405, "clocks_current_sm": 139, "clocks_current_video": 544, + "cuda_version": "10.1", + "driver_version": "418.43", "encoder_stats_average_fps": 0, "encoder_stats_average_latency": 0, "encoder_stats_session_count": 0, @@ -135,7 +139,7 @@ func TestGatherValidXML(t *testing.T) { t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - octets, err := ioutil.ReadFile(filepath.Join("testdata", tt.filename)) + octets, err := os.ReadFile(filepath.Join("testdata", tt.filename)) require.NoError(t, err) err = gatherNvidiaSMI(octets, &acc) diff --git a/plugins/inputs/nvidia_smi/sample.conf b/plugins/inputs/nvidia_smi/sample.conf new file mode 100644 index 0000000000000..47431718716dc --- /dev/null +++ b/plugins/inputs/nvidia_smi/sample.conf @@ -0,0 +1,9 @@ +# Pulls statistics from nvidia GPUs attached to the host +[[inputs.nvidia_smi]] + ## Optional: path to nvidia-smi binary, defaults "/usr/bin/nvidia-smi" + ## We will first try to locate the nvidia-smi binary with the explicitly specified value (or default value), + ## if it is not found, we will try to locate it on PATH(exec.LookPath), if it is still not found, an error will be returned + # bin_path = "/usr/bin/nvidia-smi" + + ## Optional: timeout for GPU polling + # timeout = "5s" diff --git a/plugins/inputs/opcua/README.md b/plugins/inputs/opcua/README.md index 173d98b6fac98..3cd8e270cc77b 100644 --- a/plugins/inputs/opcua/README.md +++ b/plugins/inputs/opcua/README.md @@ -5,12 +5,13 @@ The `opcua` plugin retrieves data from OPC UA client devices. Telegraf minimum version: Telegraf 1.16 Plugin minimum tested version: 1.16 -### Configuration: +## Configuration -```toml +```toml @sample.conf +# Retrieve data from OPCUA devices [[inputs.opcua]] - ## Device name - # name = "localhost" + ## Metric name + # name = "opcua" # ## OPC UA Endpoint URL # endpoint = "opc.tcp://localhost:4840" @@ -46,35 +47,116 @@ Plugin minimum tested version: 1.16 ## Password. Required for auth_method = "UserName" # password = "" # + ## Option to select the metric timestamp to use. Valid options are: + ## "gather" -- uses the time of receiving the data in telegraf + ## "server" -- uses the timestamp provided by the server + ## "source" -- uses the timestamp provided by the source + # timestamp = "gather" + # ## Node ID configuration - ## name - the variable name - ## namespace - integer value 0 thru 3 - ## identifier_type - s=string, i=numeric, g=guid, b=opaque - ## identifier - tag as shown in opcua browser - ## data_type - boolean, byte, short, int, uint, uint16, int16, - ## uint32, int32, float, double, string, datetime, number + ## name - field name to use in the output + ## namespace - OPC UA namespace of the node (integer value 0 thru 3) + ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) + ## identifier - OPC UA ID (tag as shown in opcua browser) + ## tags - extra tags to be added to the output metric (optional) ## Example: - ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", data_type="string", description="http://open62541.org"} - nodes = [ - {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, - {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, - ] + ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", tags=[["tag1","value1"],["tag2","value2]]} + # nodes = [ + # {name="", namespace="", identifier_type="", identifier=""}, + # {name="", namespace="", identifier_type="", identifier=""}, + #] + # + ## Node Group + ## Sets defaults for OPC UA namespace and ID type so they aren't required in + ## every node. A group can also have a metric name that overrides the main + ## plugin metric name. + ## + ## Multiple node groups are allowed + #[[inputs.opcua.group]] + ## Group Metric name. Overrides the top level name. If unset, the + ## top level name is used. + # name = + # + ## Group default namespace. If a node in the group doesn't set its + ## namespace, this is used. + # namespace = + # + ## Group default identifier type. If a node in the group doesn't set its + ## namespace, this is used. + # identifier_type = + # + ## Node ID Configuration. Array of nodes with the same settings as above. + # nodes = [ + # {name="", namespace="", identifier_type="", identifier=""}, + # {name="", namespace="", identifier_type="", identifier=""}, + #] + + ## Enable workarounds required by some devices to work correctly + # [inputs.opcua.workarounds] + ## Set additional valid status codes, StatusOK (0x0) is always considered valid + # additional_valid_status_codes = ["0xC0"] ``` -### Example Node Configuration -An OPC UA node ID may resemble: "n=3,s=Temperature". In this example: +## Node Configuration + +An OPC UA node ID may resemble: "n=3;s=Temperature". In this example: + - n=3 is indicating the `namespace` is 3 - s=Temperature is indicting that the `identifier_type` is a string and `identifier` value is 'Temperature' -- This example temperature node has a value of 79.0, which makes the `data_type` a 'float'. +- This example temperature node has a value of 79.0 To gather data from this node enter the following line into the 'nodes' property above: + +```shell +{field_name="temp", namespace="3", identifier_type="s", identifier="Temperature"}, ``` -{name="LabelName", namespace="3", identifier_type="s", identifier="Temperature", data_type="float", description="Description of node"}, + +This node configuration produces a metric like this: + +```text +opcua,id=n\=3;s\=Temperature temp=79.0,quality="OK (0x0)" 1597820490000000000 + ``` +## Group Configuration -### Example Output +Groups can set default values for the namespace, identifier type, and +tags settings. The default values apply to all the nodes in the +group. If a default is set, a node may omit the setting altogether. +This simplifies node configuration, especially when many nodes share +the same namespace or identifier type. +The output metric will include tags set in the group and the node. If +a tag with the same name is set in both places, the tag value from the +node is used. + +This example group configuration has two groups with two nodes each: + +```toml + [[inputs.opcua.group]] + name="group1_metric_name" + namespace="3" + identifier_type="i" + tags=[["group1_tag", "val1"]] + nodes = [ + {name="name", identifier="1001", tags=[["node1_tag", "val2"]]}, + {name="name", identifier="1002", tags=[["node1_tag", "val3"]]}, + ] + [[inputs.opcua.group]] + name="group2_metric_name" + namespace="3" + identifier_type="i" + tags=[["group2_tag", "val3"]] + nodes = [ + {name="saw", identifier="1003", tags=[["node2_tag", "val4"]]}, + {name="sin", identifier="1004"}, + ] ``` -opcua,host=3c70aee0901e,name=Random,type=double Random=0.018158170305814902 1597820490000000000 +## Example Output + +```text +group1_metric_name,group1_tag=val1,id=ns\=3;i\=1001,node1_tag=val2 name=0,Quality="OK (0x0)" 1606893246000000000 +group1_metric_name,group1_tag=val1,id=ns\=3;i\=1002,node1_tag=val3 name=-1.389117,Quality="OK (0x0)" 1606893246000000000 +group2_metric_name,group2_tag=val3,id=ns\=3;i\=1003,node2_tag=val4 Quality="OK (0x0)",saw=-1.6 1606893246000000000 +group2_metric_name,group2_tag=val3,id=ns\=3;i\=1004 sin=1.902113,Quality="OK (0x0)" 1606893246000000000 ``` diff --git a/plugins/inputs/opcua/opcua.go b/plugins/inputs/opcua/opcua.go new file mode 100644 index 0000000000000..028de1129311b --- /dev/null +++ b/plugins/inputs/opcua/opcua.go @@ -0,0 +1,551 @@ +//go:generate ../../../tools/readme_config_includer/generator +package opcua + +import ( + "context" + _ "embed" + "fmt" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/gopcua/opcua" + "github.com/gopcua/opcua/ua" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/selfstat" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +type OpcuaWorkarounds struct { + AdditionalValidStatusCodes []string `toml:"additional_valid_status_codes"` +} + +// OpcUA type +type OpcUA struct { + MetricName string `toml:"name"` + Endpoint string `toml:"endpoint"` + SecurityPolicy string `toml:"security_policy"` + SecurityMode string `toml:"security_mode"` + Certificate string `toml:"certificate"` + PrivateKey string `toml:"private_key"` + Username string `toml:"username"` + Password string `toml:"password"` + Timestamp string `toml:"timestamp"` + AuthMethod string `toml:"auth_method"` + ConnectTimeout config.Duration `toml:"connect_timeout"` + RequestTimeout config.Duration `toml:"request_timeout"` + RootNodes []NodeSettings `toml:"nodes"` + Groups []GroupSettings `toml:"group"` + Workarounds OpcuaWorkarounds `toml:"workarounds"` + Log telegraf.Logger `toml:"-"` + + nodes []Node + nodeData []OPCData + nodeIDs []*ua.NodeID + nodeIDerror []error + state ConnectionState + + // status + ReadSuccess selfstat.Stat `toml:"-"` + ReadError selfstat.Stat `toml:"-"` + + // internal values + client *opcua.Client + req *ua.ReadRequest + opts []opcua.Option + codes []ua.StatusCode +} + +type NodeSettings struct { + FieldName string `toml:"name"` + Namespace string `toml:"namespace"` + IdentifierType string `toml:"identifier_type"` + Identifier string `toml:"identifier"` + DataType string `toml:"data_type"` // Kept for backward compatibility but was never used. + Description string `toml:"description"` // Kept for backward compatibility but was never used. + TagsSlice [][]string `toml:"tags"` +} + +type Node struct { + tag NodeSettings + idStr string + metricName string + metricTags map[string]string +} + +type GroupSettings struct { + MetricName string `toml:"name"` // Overrides plugin's setting + Namespace string `toml:"namespace"` // Can be overridden by node setting + IdentifierType string `toml:"identifier_type"` // Can be overridden by node setting + Nodes []NodeSettings `toml:"nodes"` + TagsSlice [][]string `toml:"tags"` +} + +// OPCData type +type OPCData struct { + TagName string + Value interface{} + Quality ua.StatusCode + ServerTime time.Time + SourceTime time.Time + DataType ua.TypeID +} + +// ConnectionState used for constants +type ConnectionState int + +const ( + //Disconnected constant state 0 + Disconnected ConnectionState = iota + //Connecting constant state 1 + Connecting + //Connected constant state 2 + Connected +) + +func (*OpcUA) SampleConfig() string { + return sampleConfig +} + +// Init will initialize all tags +func (o *OpcUA) Init() error { + o.state = Disconnected + + err := choice.Check(o.Timestamp, []string{"", "gather", "server", "source"}) + if err != nil { + return err + } + + err = o.validateEndpoint() + if err != nil { + return err + } + + err = o.InitNodes() + if err != nil { + return err + } + + err = o.setupOptions() + if err != nil { + return err + } + + err = o.setupWorkarounds() + if err != nil { + return err + } + + tags := map[string]string{ + "endpoint": o.Endpoint, + } + o.ReadError = selfstat.Register("opcua", "read_error", tags) + o.ReadSuccess = selfstat.Register("opcua", "read_success", tags) + + return nil +} + +func (o *OpcUA) validateEndpoint() error { + if o.MetricName == "" { + return fmt.Errorf("device name is empty") + } + + if o.Endpoint == "" { + return fmt.Errorf("endpoint url is empty") + } + + _, err := url.Parse(o.Endpoint) + if err != nil { + return fmt.Errorf("endpoint url is invalid") + } + + //search security policy type + switch o.SecurityPolicy { + case "None", "Basic128Rsa15", "Basic256", "Basic256Sha256", "auto": + // Valid security policy type - do nothing. + default: + return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityPolicy, o.MetricName) + } + //search security mode type + switch o.SecurityMode { + case "None", "Sign", "SignAndEncrypt", "auto": + // Valid security mode type - do nothing. + default: + return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityMode, o.MetricName) + } + return nil +} + +func tagsSliceToMap(tags [][]string) (map[string]string, error) { + m := make(map[string]string) + for i, tag := range tags { + if len(tag) != 2 { + return nil, fmt.Errorf("tag %d needs 2 values, has %d: %v", i+1, len(tag), tag) + } + if tag[0] == "" { + return nil, fmt.Errorf("tag %d has empty name", i+1) + } + if tag[1] == "" { + return nil, fmt.Errorf("tag %d has empty value", i+1) + } + if _, ok := m[tag[0]]; ok { + return nil, fmt.Errorf("tag %d has duplicate key: %v", i+1, tag[0]) + } + m[tag[0]] = tag[1] + } + return m, nil +} + +//InitNodes Method on OpcUA +func (o *OpcUA) InitNodes() error { + for _, node := range o.RootNodes { + o.nodes = append(o.nodes, Node{ + metricName: o.MetricName, + tag: node, + }) + } + + for _, group := range o.Groups { + if group.MetricName == "" { + group.MetricName = o.MetricName + } + groupTags, err := tagsSliceToMap(group.TagsSlice) + if err != nil { + return err + } + for _, node := range group.Nodes { + if node.Namespace == "" { + node.Namespace = group.Namespace + } + if node.IdentifierType == "" { + node.IdentifierType = group.IdentifierType + } + nodeTags, err := tagsSliceToMap(node.TagsSlice) + if err != nil { + return err + } + mergedTags := make(map[string]string) + for k, v := range groupTags { + mergedTags[k] = v + } + for k, v := range nodeTags { + mergedTags[k] = v + } + o.nodes = append(o.nodes, Node{ + metricName: group.MetricName, + tag: node, + metricTags: mergedTags, + }) + } + } + + err := o.validateOPCTags() + if err != nil { + return err + } + + return nil +} + +type metricParts struct { + metricName string + fieldName string + tags string // sorted by tag name and in format tag1=value1, tag2=value2 +} + +func newMP(n *Node) metricParts { + var keys []string + for key := range n.metricTags { + keys = append(keys, key) + } + sort.Strings(keys) + var sb strings.Builder + for i, key := range keys { + if i != 0 { + // Writes to a string-builder will always succeed + //nolint:errcheck,revive + sb.WriteString(", ") + } + // Writes to a string-builder will always succeed + //nolint:errcheck,revive + sb.WriteString(key) + // Writes to a string-builder will always succeed + //nolint:errcheck,revive + sb.WriteString("=") + // Writes to a string-builder will always succeed + //nolint:errcheck,revive + sb.WriteString(n.metricTags[key]) + } + x := metricParts{ + metricName: n.metricName, + fieldName: n.tag.FieldName, + tags: sb.String(), + } + return x +} + +func (o *OpcUA) validateOPCTags() error { + nameEncountered := map[metricParts]struct{}{} + for _, node := range o.nodes { + mp := newMP(&node) + //check empty name + if node.tag.FieldName == "" { + return fmt.Errorf("empty name in '%s'", node.tag.FieldName) + } + //search name duplicate + if _, ok := nameEncountered[mp]; ok { + return fmt.Errorf("name '%s' is duplicated (metric name '%s', tags '%s')", + mp.fieldName, mp.metricName, mp.tags) + } + + //add it to the set + nameEncountered[mp] = struct{}{} + + //search identifier type + switch node.tag.IdentifierType { + case "s", "i", "g", "b": + // Valid identifier type - do nothing. + default: + return fmt.Errorf("invalid identifier type '%s' in '%s'", node.tag.IdentifierType, node.tag.FieldName) + } + + node.idStr = BuildNodeID(node.tag) + + //parse NodeIds and NodeIds errors + nid, niderr := ua.ParseNodeID(node.idStr) + // build NodeIds and Errors + o.nodeIDs = append(o.nodeIDs, nid) + o.nodeIDerror = append(o.nodeIDerror, niderr) + // Grow NodeData for later input + o.nodeData = append(o.nodeData, OPCData{}) + } + return nil +} + +// BuildNodeID build node ID from OPC tag +func BuildNodeID(tag NodeSettings) string { + return "ns=" + tag.Namespace + ";" + tag.IdentifierType + "=" + tag.Identifier +} + +// Connect to a OPCUA device +func Connect(o *OpcUA) error { + u, err := url.Parse(o.Endpoint) + if err != nil { + return err + } + + switch u.Scheme { + case "opc.tcp": + o.state = Connecting + + if o.client != nil { + if err := o.client.Close(); err != nil { + // Only log the error but to not bail-out here as this prevents + // reconnections for multiple parties (see e.g. #9523). + o.Log.Errorf("Closing connection failed: %v", err) + } + } + + o.client = opcua.NewClient(o.Endpoint, o.opts...) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(o.ConnectTimeout)) + defer cancel() + if err := o.client.Connect(ctx); err != nil { + return fmt.Errorf("error in Client Connection: %s", err) + } + + regResp, err := o.client.RegisterNodes(&ua.RegisterNodesRequest{ + NodesToRegister: o.nodeIDs, + }) + if err != nil { + return fmt.Errorf("registerNodes failed: %v", err) + } + + o.req = &ua.ReadRequest{ + MaxAge: 2000, + NodesToRead: readvalues(regResp.RegisteredNodeIDs), + TimestampsToReturn: ua.TimestampsToReturnBoth, + } + + err = o.getData() + if err != nil { + return fmt.Errorf("get Data Failed: %v", err) + } + + default: + return fmt.Errorf("unsupported scheme %q in endpoint. Expected opc.tcp", u.Scheme) + } + return nil +} + +func (o *OpcUA) setupOptions() error { + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(o.ConnectTimeout)) + defer cancel() + // Get a list of the endpoints for our target server + endpoints, err := opcua.GetEndpoints(ctx, o.Endpoint) + if err != nil { + return err + } + + if o.Certificate == "" && o.PrivateKey == "" { + if o.SecurityPolicy != "None" || o.SecurityMode != "None" { + o.Certificate, o.PrivateKey, err = generateCert("urn:telegraf:gopcua:client", 2048, o.Certificate, o.PrivateKey, 365*24*time.Hour) + if err != nil { + return err + } + } + } + + o.opts, err = o.generateClientOpts(endpoints) + + return err +} + +func (o *OpcUA) setupWorkarounds() error { + if len(o.Workarounds.AdditionalValidStatusCodes) != 0 { + for _, c := range o.Workarounds.AdditionalValidStatusCodes { + val, err := strconv.ParseInt(c, 0, 32) // setting 32 bits to allow for safe conversion + if err != nil { + return err + } + o.codes = append(o.codes, ua.StatusCode(uint32(val))) + } + } + return nil +} + +func (o *OpcUA) checkStatusCode(code ua.StatusCode) bool { + for _, val := range o.codes { + if val == code { + return true + } + } + return false +} + +func (o *OpcUA) getData() error { + resp, err := o.client.Read(o.req) + if err != nil { + o.ReadError.Incr(1) + return fmt.Errorf("RegisterNodes Read failed: %v", err) + } + o.ReadSuccess.Incr(1) + for i, d := range resp.Results { + o.nodeData[i].Quality = d.Status + if !o.checkStatusCode(d.Status) { + mp := newMP(&o.nodes[i]) + o.Log.Errorf("status not OK for node '%s'(metric name '%s', tags '%s')", + mp.fieldName, mp.metricName, mp.tags) + continue + } + o.nodeData[i].TagName = o.nodes[i].tag.FieldName + if d.Value != nil { + o.nodeData[i].Value = d.Value.Value() + o.nodeData[i].DataType = d.Value.Type() + } + o.nodeData[i].Quality = d.Status + o.nodeData[i].ServerTime = d.ServerTimestamp + o.nodeData[i].SourceTime = d.SourceTimestamp + } + return nil +} + +func readvalues(ids []*ua.NodeID) []*ua.ReadValueID { + rvids := make([]*ua.ReadValueID, len(ids)) + for i, v := range ids { + rvids[i] = &ua.ReadValueID{NodeID: v} + } + return rvids +} + +func disconnect(o *OpcUA) error { + u, err := url.Parse(o.Endpoint) + if err != nil { + return err + } + + switch u.Scheme { + case "opc.tcp": + o.state = Disconnected + o.client.Close() + o.client = nil + return nil + default: + return fmt.Errorf("invalid controller") + } +} + +// Gather defines what data the plugin will gather. +func (o *OpcUA) Gather(acc telegraf.Accumulator) error { + if o.state == Disconnected { + o.state = Connecting + err := Connect(o) + if err != nil { + o.state = Disconnected + return err + } + } + + o.state = Connected + + err := o.getData() + if err != nil && o.state == Connected { + o.state = Disconnected + // Ignore returned error to not mask the original problem + //nolint:errcheck,revive + disconnect(o) + return err + } + + for i, n := range o.nodes { + if o.checkStatusCode(o.nodeData[i].Quality) { + fields := make(map[string]interface{}) + tags := map[string]string{ + "id": n.idStr, + } + for k, v := range n.metricTags { + tags[k] = v + } + + fields[o.nodeData[i].TagName] = o.nodeData[i].Value + fields["Quality"] = strings.TrimSpace(fmt.Sprint(o.nodeData[i].Quality)) + + switch o.Timestamp { + case "server": + acc.AddFields(n.metricName, fields, tags, o.nodeData[i].ServerTime) + case "source": + acc.AddFields(n.metricName, fields, tags, o.nodeData[i].SourceTime) + default: + acc.AddFields(n.metricName, fields, tags) + } + } + } + return nil +} + +// Add this plugin to telegraf +func init() { + inputs.Add("opcua", func() telegraf.Input { + return &OpcUA{ + MetricName: "opcua", + Endpoint: "opc.tcp://localhost:4840", + SecurityPolicy: "auto", + SecurityMode: "auto", + Timestamp: "gather", + RequestTimeout: config.Duration(5 * time.Second), + ConnectTimeout: config.Duration(10 * time.Second), + Certificate: "/etc/telegraf/cert.pem", + PrivateKey: "/etc/telegraf/key.pem", + AuthMethod: "Anonymous", + codes: []ua.StatusCode{ua.StatusOK}, + } + }) +} diff --git a/plugins/inputs/opcua/opcua_client.go b/plugins/inputs/opcua/opcua_client.go deleted file mode 100644 index 87647e2b9d5f8..0000000000000 --- a/plugins/inputs/opcua/opcua_client.go +++ /dev/null @@ -1,424 +0,0 @@ -package opcua_client - -import ( - "context" - "fmt" - "log" - "net/url" - "strings" - "time" - - "github.com/gopcua/opcua" - "github.com/gopcua/opcua/ua" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/plugins/inputs" -) - -// OpcUA type -type OpcUA struct { - Name string `toml:"name"` - Endpoint string `toml:"endpoint"` - SecurityPolicy string `toml:"security_policy"` - SecurityMode string `toml:"security_mode"` - Certificate string `toml:"certificate"` - PrivateKey string `toml:"private_key"` - Username string `toml:"username"` - Password string `toml:"password"` - AuthMethod string `toml:"auth_method"` - ConnectTimeout config.Duration `toml:"connect_timeout"` - RequestTimeout config.Duration `toml:"request_timeout"` - NodeList []OPCTag `toml:"nodes"` - - Nodes []string `toml:"-"` - NodeData []OPCData `toml:"-"` - NodeIDs []*ua.NodeID `toml:"-"` - NodeIDerror []error `toml:"-"` - state ConnectionState - - // status - ReadSuccess int `toml:"-"` - ReadError int `toml:"-"` - NumberOfTags int `toml:"-"` - - // internal values - client *opcua.Client - req *ua.ReadRequest - opts []opcua.Option -} - -// OPCTag type -type OPCTag struct { - Name string `toml:"name"` - Namespace string `toml:"namespace"` - IdentifierType string `toml:"identifier_type"` - Identifier string `toml:"identifier"` - DataType string `toml:"data_type"` - Description string `toml:"description"` -} - -// OPCData type -type OPCData struct { - TagName string - Value interface{} - Quality ua.StatusCode - TimeStamp string - Time string - DataType ua.TypeID -} - -// ConnectionState used for constants -type ConnectionState int - -const ( - //Disconnected constant state 0 - Disconnected ConnectionState = iota - //Connecting constant state 1 - Connecting - //Connected constant state 2 - Connected -) - -const description = `Retrieve data from OPCUA devices` -const sampleConfig = ` -[[inputs.opcua]] - ## Device name - # name = "localhost" - # - ## OPC UA Endpoint URL - # endpoint = "opc.tcp://localhost:4840" - # - ## Maximum time allowed to establish a connect to the endpoint. - # connect_timeout = "10s" - # - ## Maximum time allowed for a request over the estabilished connection. - # request_timeout = "5s" - # - ## Security policy, one of "None", "Basic128Rsa15", "Basic256", - ## "Basic256Sha256", or "auto" - # security_policy = "auto" - # - ## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto" - # security_mode = "auto" - # - ## Path to cert.pem. Required when security mode or policy isn't "None". - ## If cert path is not supplied, self-signed cert and key will be generated. - # certificate = "/etc/telegraf/cert.pem" - # - ## Path to private key.pem. Required when security mode or policy isn't "None". - ## If key path is not supplied, self-signed cert and key will be generated. - # private_key = "/etc/telegraf/key.pem" - # - ## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To - ## authenticate using a specific ID, select 'Certificate' or 'UserName' - # auth_method = "Anonymous" - # - ## Username. Required for auth_method = "UserName" - # username = "" - # - ## Password. Required for auth_method = "UserName" - # password = "" - # - ## Node ID configuration - ## name - the variable name - ## namespace - integer value 0 thru 3 - ## identifier_type - s=string, i=numeric, g=guid, b=opaque - ## identifier - tag as shown in opcua browser - ## data_type - boolean, byte, short, int, uint, uint16, int16, - ## uint32, int32, float, double, string, datetime, number - ## Example: - ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", data_type="string", description="http://open62541.org"} - nodes = [ - {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, - {name="", namespace="", identifier_type="", identifier="", data_type="", description=""}, - ] -` - -// Description will appear directly above the plugin definition in the config file -func (o *OpcUA) Description() string { - return description -} - -// SampleConfig will populate the sample configuration portion of the plugin's configuration -func (o *OpcUA) SampleConfig() string { - return sampleConfig -} - -// Init will initialize all tags -func (o *OpcUA) Init() error { - o.state = Disconnected - - err := o.validateEndpoint() - if err != nil { - return err - } - - err = o.InitNodes() - if err != nil { - return err - } - o.NumberOfTags = len(o.NodeList) - - o.setupOptions() - - return nil - -} - -func (o *OpcUA) validateEndpoint() error { - if o.Name == "" { - return fmt.Errorf("device name is empty") - } - - if o.Endpoint == "" { - return fmt.Errorf("endpoint url is empty") - } - - _, err := url.Parse(o.Endpoint) - if err != nil { - return fmt.Errorf("endpoint url is invalid") - } - - //search security policy type - switch o.SecurityPolicy { - case "None", "Basic128Rsa15", "Basic256", "Basic256Sha256", "auto": - break - default: - return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityPolicy, o.Name) - } - //search security mode type - switch o.SecurityMode { - case "None", "Sign", "SignAndEncrypt", "auto": - break - default: - return fmt.Errorf("invalid security type '%s' in '%s'", o.SecurityMode, o.Name) - } - return nil -} - -//InitNodes Method on OpcUA -func (o *OpcUA) InitNodes() error { - if len(o.NodeList) == 0 { - return nil - } - - err := o.validateOPCTags() - if err != nil { - return err - } - - return nil -} - -func (o *OpcUA) validateOPCTags() error { - nameEncountered := map[string]bool{} - for i, item := range o.NodeList { - //check empty name - if item.Name == "" { - return fmt.Errorf("empty name in '%s'", item.Name) - } - //search name duplicate - if nameEncountered[item.Name] { - return fmt.Errorf("name '%s' is duplicated in '%s'", item.Name, item.Name) - } else { - nameEncountered[item.Name] = true - } - //search identifier type - switch item.IdentifierType { - case "s", "i", "g", "b": - break - default: - return fmt.Errorf("invalid identifier type '%s' in '%s'", item.IdentifierType, item.Name) - } - // search data type - switch item.DataType { - case "boolean", "byte", "short", "int", "uint", "uint16", "int16", "uint32", "int32", "float", "double", "string", "datetime", "number": - break - default: - return fmt.Errorf("invalid data type '%s' in '%s'", item.DataType, item.Name) - } - - // build nodeid - o.Nodes = append(o.Nodes, BuildNodeID(item)) - - //parse NodeIds and NodeIds errors - nid, niderr := ua.ParseNodeID(o.Nodes[i]) - // build NodeIds and Errors - o.NodeIDs = append(o.NodeIDs, nid) - o.NodeIDerror = append(o.NodeIDerror, niderr) - // Grow NodeData for later input - o.NodeData = append(o.NodeData, OPCData{}) - } - return nil -} - -// BuildNodeID build node ID from OPC tag -func BuildNodeID(tag OPCTag) string { - return "ns=" + tag.Namespace + ";" + tag.IdentifierType + "=" + tag.Identifier -} - -// Connect to a OPCUA device -func Connect(o *OpcUA) error { - u, err := url.Parse(o.Endpoint) - if err != nil { - return err - } - - switch u.Scheme { - case "opc.tcp": - o.state = Connecting - - if o.client != nil { - o.client.CloseSession() - } - - o.client = opcua.NewClient(o.Endpoint, o.opts...) - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(o.ConnectTimeout)) - defer cancel() - if err := o.client.Connect(ctx); err != nil { - return fmt.Errorf("Error in Client Connection: %s", err) - } - - regResp, err := o.client.RegisterNodes(&ua.RegisterNodesRequest{ - NodesToRegister: o.NodeIDs, - }) - if err != nil { - return fmt.Errorf("RegisterNodes failed: %v", err) - } - - o.req = &ua.ReadRequest{ - MaxAge: 2000, - NodesToRead: readvalues(regResp.RegisteredNodeIDs), - TimestampsToReturn: ua.TimestampsToReturnBoth, - } - - err = o.getData() - if err != nil { - return fmt.Errorf("Get Data Failed: %v", err) - } - - default: - return fmt.Errorf("unsupported scheme %q in endpoint. Expected opc.tcp", u.Scheme) - } - return nil -} - -func (o *OpcUA) setupOptions() error { - - // Get a list of the endpoints for our target server - endpoints, err := opcua.GetEndpoints(o.Endpoint) - if err != nil { - log.Fatal(err) - } - - if o.Certificate == "" && o.PrivateKey == "" { - if o.SecurityPolicy != "None" || o.SecurityMode != "None" { - o.Certificate, o.PrivateKey = generateCert("urn:telegraf:gopcua:client", 2048, o.Certificate, o.PrivateKey, (365 * 24 * time.Hour)) - } - } - - o.opts = generateClientOpts(endpoints, o.Certificate, o.PrivateKey, o.SecurityPolicy, o.SecurityMode, o.AuthMethod, o.Username, o.Password, time.Duration(o.RequestTimeout)) - - return nil -} - -func (o *OpcUA) getData() error { - resp, err := o.client.Read(o.req) - if err != nil { - o.ReadError++ - return fmt.Errorf("RegisterNodes Read failed: %v", err) - } - o.ReadSuccess++ - for i, d := range resp.Results { - if d.Status != ua.StatusOK { - return fmt.Errorf("Status not OK: %v", d.Status) - } - o.NodeData[i].TagName = o.NodeList[i].Name - if d.Value != nil { - o.NodeData[i].Value = d.Value.Value() - o.NodeData[i].DataType = d.Value.Type() - } - o.NodeData[i].Quality = d.Status - o.NodeData[i].TimeStamp = d.ServerTimestamp.String() - o.NodeData[i].Time = d.SourceTimestamp.String() - } - return nil -} - -func readvalues(ids []*ua.NodeID) []*ua.ReadValueID { - rvids := make([]*ua.ReadValueID, len(ids)) - for i, v := range ids { - rvids[i] = &ua.ReadValueID{NodeID: v} - } - return rvids -} - -func disconnect(o *OpcUA) error { - u, err := url.Parse(o.Endpoint) - if err != nil { - return err - } - - o.ReadError = 0 - o.ReadSuccess = 0 - - switch u.Scheme { - case "opc.tcp": - o.state = Disconnected - o.client.Close() - return nil - default: - return fmt.Errorf("invalid controller") - } -} - -// Gather defines what data the plugin will gather. -func (o *OpcUA) Gather(acc telegraf.Accumulator) error { - if o.state == Disconnected { - o.state = Connecting - err := Connect(o) - if err != nil { - o.state = Disconnected - return err - } - } - - o.state = Connected - - err := o.getData() - if err != nil && o.state == Connected { - o.state = Disconnected - disconnect(o) - return err - } - - for i, n := range o.NodeList { - fields := make(map[string]interface{}) - tags := map[string]string{ - "name": n.Name, - "id": BuildNodeID(n), - } - - fields[o.NodeData[i].TagName] = o.NodeData[i].Value - fields["Quality"] = strings.TrimSpace(fmt.Sprint(o.NodeData[i].Quality)) - acc.AddFields(o.Name, fields, tags) - } - return nil -} - -// Add this plugin to telegraf -func init() { - inputs.Add("opcua", func() telegraf.Input { - return &OpcUA{ - Name: "localhost", - Endpoint: "opc.tcp://localhost:4840", - SecurityPolicy: "auto", - SecurityMode: "auto", - RequestTimeout: config.Duration(5 * time.Second), - ConnectTimeout: config.Duration(10 * time.Second), - Certificate: "/etc/telegraf/cert.pem", - PrivateKey: "/etc/telegraf/key.pem", - AuthMethod: "Anonymous", - } - }) -} diff --git a/plugins/inputs/opcua/opcua_client_test.go b/plugins/inputs/opcua/opcua_client_test.go deleted file mode 100644 index 637ac87bc0afa..0000000000000 --- a/plugins/inputs/opcua/opcua_client_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package opcua_client - -import ( - "fmt" - "reflect" - "testing" - "time" - - "github.com/influxdata/telegraf/config" - "github.com/stretchr/testify/require" -) - -type OPCTags struct { - Name string - Namespace string - IdentifierType string - Identifier string - DataType string - Want string -} - -func TestClient1(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - - var testopctags = []OPCTags{ - {"ProductName", "0", "i", "2261", "string", "open62541 OPC UA Server"}, - {"ProductUri", "0", "i", "2262", "string", "http://open62541.org"}, - {"ManufacturerName", "0", "i", "2263", "string", "open62541"}, - } - - var o OpcUA - var err error - - o.Name = "testing" - o.Endpoint = "opc.tcp://opcua.rocks:4840" - o.AuthMethod = "Anonymous" - o.ConnectTimeout = config.Duration(10 * time.Second) - o.RequestTimeout = config.Duration(1 * time.Second) - o.SecurityPolicy = "None" - o.SecurityMode = "None" - for _, tags := range testopctags { - o.NodeList = append(o.NodeList, MapOPCTag(tags)) - } - err = o.Init() - if err != nil { - t.Errorf("Initialize Error: %s", err) - } - err = Connect(&o) - if err != nil { - t.Fatalf("Connect Error: %s", err) - } - - for i, v := range o.NodeData { - if v.Value != nil { - types := reflect.TypeOf(v.Value) - value := reflect.ValueOf(v.Value) - compare := fmt.Sprintf("%v", value.Interface()) - if compare != testopctags[i].Want { - t.Errorf("Tag %s: Values %v for type %s does not match record", o.NodeList[i].Name, value.Interface(), types) - } - } else { - t.Errorf("Tag: %s has value: %v", o.NodeList[i].Name, v.Value) - } - } -} - -func MapOPCTag(tags OPCTags) (out OPCTag) { - out.Name = tags.Name - out.Namespace = tags.Namespace - out.IdentifierType = tags.IdentifierType - out.Identifier = tags.Identifier - out.DataType = tags.DataType - return out -} - -func TestConfig(t *testing.T) { - toml := ` -[[inputs.opcua]] -name = "localhost" -endpoint = "opc.tcp://localhost:4840" -connect_timeout = "10s" -request_timeout = "5s" -security_policy = "auto" -security_mode = "auto" -certificate = "/etc/telegraf/cert.pem" -private_key = "/etc/telegraf/key.pem" -auth_method = "Anonymous" -username = "" -password = "" -nodes = [ - {name="name", namespace="", identifier_type="", identifier="", data_type="", description=""}, - {name="name2", namespace="", identifier_type="", identifier="", data_type="", description=""}, -] -` - - c := config.NewConfig() - err := c.LoadConfigData([]byte(toml)) - require.NoError(t, err) - - require.Len(t, c.Inputs, 1) - - o, ok := c.Inputs[0].Input.(*OpcUA) - require.True(t, ok) - - require.Len(t, o.NodeList, 2) - require.Equal(t, o.NodeList[0].Name, "name") - require.Equal(t, o.NodeList[1].Name, "name2") -} diff --git a/plugins/inputs/opcua/opcua_test.go b/plugins/inputs/opcua/opcua_test.go new file mode 100644 index 0000000000000..55b1bc2870856 --- /dev/null +++ b/plugins/inputs/opcua/opcua_test.go @@ -0,0 +1,357 @@ +package opcua + +import ( + "fmt" + "reflect" + "testing" + "time" + + "github.com/docker/go-connections/nat" + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go/wait" + + "github.com/gopcua/opcua/ua" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" +) + +type OPCTags struct { + Name string + Namespace string + IdentifierType string + Identifier string + Want interface{} +} + +const servicePort = "4840" + +func TestGetDataBadNodeContainerIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + container := testutil.Container{ + Image: "open62541/open62541", + ExposedPorts: []string{servicePort}, + WaitingFor: wait.ForListeningPort(nat.Port(servicePort)), + } + err := container.Start() + require.NoError(t, err, "failed to start container") + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + + var testopctags = []OPCTags{ + {"ProductName", "1", "i", "2261", "open62541 OPC UA Server"}, + {"ProductUri", "0", "i", "2262", "http://open62541.org"}, + {"ManufacturerName", "0", "i", "2263", "open62541"}, + } + + var o OpcUA + o.MetricName = "testing" + o.Endpoint = fmt.Sprintf("opc.tcp://%s:%s", container.Address, container.Ports[servicePort]) + fmt.Println(o.Endpoint) + o.AuthMethod = "Anonymous" + o.ConnectTimeout = config.Duration(10 * time.Second) + o.RequestTimeout = config.Duration(1 * time.Second) + o.SecurityPolicy = "None" + o.SecurityMode = "None" + o.codes = []ua.StatusCode{ua.StatusOK} + logger := &testutil.CaptureLogger{} + o.Log = logger + + g := GroupSettings{ + MetricName: "anodic_current", + TagsSlice: [][]string{ + {"pot", "2002"}, + }, + } + + for _, tags := range testopctags { + g.Nodes = append(g.Nodes, MapOPCTag(tags)) + } + o.Groups = append(o.Groups, g) + err = o.Init() + require.NoError(t, err) + err = Connect(&o) + require.NoError(t, err) + require.Contains(t, logger.LastError, "E! [] status not OK for node 'ProductName'(metric name 'anodic_current', tags 'pot=2002')") +} + +func TestClient1Integration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + container := testutil.Container{ + Image: "open62541/open62541", + ExposedPorts: []string{servicePort}, + WaitingFor: wait.ForListeningPort(nat.Port(servicePort)), + } + err := container.Start() + require.NoError(t, err, "failed to start container") + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + + var testopctags = []OPCTags{ + {"ProductName", "0", "i", "2261", "open62541 OPC UA Server"}, + {"ProductUri", "0", "i", "2262", "http://open62541.org"}, + {"ManufacturerName", "0", "i", "2263", "open62541"}, + {"badnode", "1", "i", "1337", nil}, + {"goodnode", "1", "s", "the.answer", "42"}, + } + + var o OpcUA + o.MetricName = "testing" + o.Endpoint = fmt.Sprintf("opc.tcp://%s:%s", container.Address, container.Ports[servicePort]) + o.AuthMethod = "Anonymous" + o.ConnectTimeout = config.Duration(10 * time.Second) + o.RequestTimeout = config.Duration(1 * time.Second) + o.SecurityPolicy = "None" + o.SecurityMode = "None" + o.codes = []ua.StatusCode{ua.StatusOK} + o.Log = testutil.Logger{} + for _, tags := range testopctags { + o.RootNodes = append(o.RootNodes, MapOPCTag(tags)) + } + err = o.Init() + if err != nil { + t.Errorf("Initialize Error: %s", err) + } + err = Connect(&o) + if err != nil { + t.Fatalf("Connect Error: %s", err) + } + + for i, v := range o.nodeData { + if v.Value != nil { + types := reflect.TypeOf(v.Value) + value := reflect.ValueOf(v.Value) + compare := fmt.Sprintf("%v", value.Interface()) + if compare != testopctags[i].Want { + t.Errorf("Tag %s: Values %v for type %s does not match record", o.nodes[i].tag.FieldName, value.Interface(), types) + } + } else if testopctags[i].Want != nil { + t.Errorf("Tag: %s has value: %v", o.nodes[i].tag.FieldName, v.Value) + } + } +} + +func MapOPCTag(tags OPCTags) (out NodeSettings) { + out.FieldName = tags.Name + out.Namespace = tags.Namespace + out.IdentifierType = tags.IdentifierType + out.Identifier = tags.Identifier + return out +} + +func TestConfig(t *testing.T) { + toml := ` +[[inputs.opcua]] +name = "localhost" +endpoint = "opc.tcp://localhost:4840" +connect_timeout = "10s" +request_timeout = "5s" +security_policy = "auto" +security_mode = "auto" +certificate = "/etc/telegraf/cert.pem" +private_key = "/etc/telegraf/key.pem" +auth_method = "Anonymous" +username = "" +password = "" +nodes = [ + {name="name", namespace="1", identifier_type="s", identifier="one"}, + {name="name2", namespace="2", identifier_type="s", identifier="two"}, +] +[[inputs.opcua.group]] +name = "foo" +namespace = "3" +identifier_type = "i" +tags = [["tag1", "val1"], ["tag2", "val2"]] +nodes = [{name="name3", identifier="3000", tags=[["tag3", "val3"]]}] +[[inputs.opcua.group]] +name = "bar" +namespace = "0" +identifier_type = "i" +tags = [["tag1", "val1"], ["tag2", "val2"]] +nodes = [{name="name4", identifier="4000", tags=[["tag1", "override"]]}] + +[inputs.opcua.workarounds] +additional_valid_status_codes = ["0xC0"] +` + + c := config.NewConfig() + err := c.LoadConfigData([]byte(toml)) + require.NoError(t, err) + + require.Len(t, c.Inputs, 1) + + o, ok := c.Inputs[0].Input.(*OpcUA) + require.True(t, ok) + + require.Len(t, o.RootNodes, 2) + require.Equal(t, o.RootNodes[0].FieldName, "name") + require.Equal(t, o.RootNodes[1].FieldName, "name2") + + require.Len(t, o.Groups, 2) + require.Equal(t, o.Groups[0].MetricName, "foo") + require.Len(t, o.Groups[0].Nodes, 1) + require.Equal(t, o.Groups[0].Nodes[0].Identifier, "3000") + + require.NoError(t, o.InitNodes()) + require.Len(t, o.nodes, 4) + require.Len(t, o.nodes[2].metricTags, 3) + require.Len(t, o.nodes[3].metricTags, 2) + + require.Len(t, o.Workarounds.AdditionalValidStatusCodes, 1) + require.Equal(t, o.Workarounds.AdditionalValidStatusCodes[0], "0xC0") +} + +func TestTagsSliceToMap(t *testing.T) { + m, err := tagsSliceToMap([][]string{{"foo", "bar"}, {"baz", "bat"}}) + require.NoError(t, err) + require.Len(t, m, 2) + require.Equal(t, m["foo"], "bar") + require.Equal(t, m["baz"], "bat") +} + +func TestTagsSliceToMap_twoStrings(t *testing.T) { + var err error + _, err = tagsSliceToMap([][]string{{"foo", "bar", "baz"}}) + require.Error(t, err) + _, err = tagsSliceToMap([][]string{{"foo"}}) + require.Error(t, err) +} + +func TestTagsSliceToMap_dupeKey(t *testing.T) { + _, err := tagsSliceToMap([][]string{{"foo", "bar"}, {"foo", "bat"}}) + require.Error(t, err) +} + +func TestTagsSliceToMap_empty(t *testing.T) { + _, err := tagsSliceToMap([][]string{{"foo", ""}}) + require.Equal(t, fmt.Errorf("tag 1 has empty value"), err) + _, err = tagsSliceToMap([][]string{{"", "bar"}}) + require.Equal(t, fmt.Errorf("tag 1 has empty name"), err) +} + +func TestValidateOPCTags(t *testing.T) { + tests := []struct { + name string + nodes []Node + err error + }{ + { + "same", + []Node{ + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "v1", "t2": "v2"}, + }, + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "v1", "t2": "v2"}, + }, + }, + fmt.Errorf("name 'fn' is duplicated (metric name 'mn', tags 't1=v1, t2=v2')"), + }, + { + "different metric tag names", + []Node{ + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "", "t2": ""}, + }, + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "", "t3": ""}, + }, + }, + nil, + }, + { + "different metric tag values", + []Node{ + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "foo", "t2": ""}, + }, + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "bar", "t2": ""}, + }, + }, + nil, + }, + { + "different metric names", + []Node{ + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "", "t2": ""}, + }, + { + metricName: "mn2", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "", "t2": ""}, + }, + }, + nil, + }, + { + "different field names", + []Node{ + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "", "t2": ""}, + }, + { + metricName: "mn", + tag: NodeSettings{FieldName: "fn2", IdentifierType: "s"}, + metricTags: map[string]string{"t1": "", "t2": ""}, + }, + }, + nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := OpcUA{ + nodes: tt.nodes, + Log: testutil.Logger{}, + } + require.Equal(t, tt.err, o.validateOPCTags()) + }) + } +} + +func TestSetupWorkarounds(t *testing.T) { + var o OpcUA + o.codes = []ua.StatusCode{ua.StatusOK} + + o.Workarounds.AdditionalValidStatusCodes = []string{"0xC0", "0x00AA0000"} + + err := o.setupWorkarounds() + require.NoError(t, err) + + require.Len(t, o.codes, 3) + require.Equal(t, o.codes[0], ua.StatusCode(0)) + require.Equal(t, o.codes[1], ua.StatusCode(192)) + require.Equal(t, o.codes[2], ua.StatusCode(11141120)) +} + +func TestCheckStatusCode(t *testing.T) { + var o OpcUA + o.codes = []ua.StatusCode{ua.StatusCode(0), ua.StatusCode(192), ua.StatusCode(11141120)} + require.Equal(t, o.checkStatusCode(ua.StatusCode(192)), true) +} diff --git a/plugins/inputs/opcua/opcua_util.go b/plugins/inputs/opcua/opcua_util.go index c0eac2483eb22..e5335babae2ad 100644 --- a/plugins/inputs/opcua/opcua_util.go +++ b/plugins/inputs/opcua/opcua_util.go @@ -1,4 +1,4 @@ -package opcua_client +package opcua import ( "crypto/ecdsa" @@ -9,8 +9,6 @@ import ( "crypto/x509/pkix" "encoding/pem" "fmt" - "io/ioutil" - "log" "math/big" "net" "net/url" @@ -27,16 +25,15 @@ import ( // SELF SIGNED CERT FUNCTIONS func newTempDir() (string, error) { - dir, err := ioutil.TempDir("", "ssc") + dir, err := os.MkdirTemp("", "ssc") return dir, err } -func generateCert(host string, rsaBits int, certFile, keyFile string, dur time.Duration) (string, string) { - +func generateCert(host string, rsaBits int, certFile, keyFile string, dur time.Duration) (cert string, key string, err error) { dir, _ := newTempDir() if len(host) == 0 { - log.Fatalf("Missing required host parameter") + return "", "", fmt.Errorf("missing required host parameter") } if rsaBits == 0 { rsaBits = 2048 @@ -50,7 +47,7 @@ func generateCert(host string, rsaBits int, certFile, keyFile string, dur time.D priv, err := rsa.GenerateKey(rand.Reader, rsaBits) if err != nil { - log.Fatalf("failed to generate private key: %s", err) + return "", "", fmt.Errorf("failed to generate private key: %s", err) } notBefore := time.Now() @@ -59,7 +56,7 @@ func generateCert(host string, rsaBits int, certFile, keyFile string, dur time.D serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { - log.Fatalf("failed to generate serial number: %s", err) + return "", "", fmt.Errorf("failed to generate serial number: %s", err) } template := x509.Certificate{ @@ -89,33 +86,36 @@ func generateCert(host string, rsaBits int, certFile, keyFile string, dur time.D derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, publicKey(priv), priv) if err != nil { - log.Fatalf("Failed to create certificate: %s", err) + return "", "", fmt.Errorf("failed to create certificate: %s", err) } certOut, err := os.Create(certFile) if err != nil { - log.Fatalf("failed to open %s for writing: %s", certFile, err) + return "", "", fmt.Errorf("failed to open %s for writing: %s", certFile, err) } if err := pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { - log.Fatalf("failed to write data to %s: %s", certFile, err) + return "", "", fmt.Errorf("failed to write data to %s: %s", certFile, err) } if err := certOut.Close(); err != nil { - log.Fatalf("error closing %s: %s", certFile, err) + return "", "", fmt.Errorf("error closing %s: %s", certFile, err) } keyOut, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { - log.Printf("failed to open %s for writing: %s", keyFile, err) - return "", "" + return "", "", fmt.Errorf("failed to open %s for writing: %s", keyFile, err) + } + keyBlock, err := pemBlockForKey(priv) + if err != nil { + return "", "", fmt.Errorf("error generating block: %v", err) } - if err := pem.Encode(keyOut, pemBlockForKey(priv)); err != nil { - log.Fatalf("failed to write data to %s: %s", keyFile, err) + if err := pem.Encode(keyOut, keyBlock); err != nil { + return "", "", fmt.Errorf("failed to write data to %s: %s", keyFile, err) } if err := keyOut.Close(); err != nil { - log.Fatalf("error closing %s: %s", keyFile, err) + return "", "", fmt.Errorf("error closing %s: %s", keyFile, err) } - return certFile, keyFile + return certFile, keyFile, nil } func publicKey(priv interface{}) interface{} { @@ -129,25 +129,23 @@ func publicKey(priv interface{}) interface{} { } } -func pemBlockForKey(priv interface{}) *pem.Block { +func pemBlockForKey(priv interface{}) (*pem.Block, error) { switch k := priv.(type) { case *rsa.PrivateKey: - return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} + return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)}, nil case *ecdsa.PrivateKey: b, err := x509.MarshalECPrivateKey(k) if err != nil { - fmt.Fprintf(os.Stderr, "Unable to marshal ECDSA private key: %v", err) - os.Exit(2) + return nil, fmt.Errorf("unable to marshal ECDSA private key: %v", err) } - return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}, nil default: - return nil + return nil, nil } } -// OPT FUNCTIONS - -func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, policy, mode, auth, username, password string, requestTimeout time.Duration) []opcua.Option { +//revive:disable-next-line +func (o *OpcUA) generateClientOpts(endpoints []*ua.EndpointDescription) ([]opcua.Option, error) { opts := []opcua.Option{} appuri := "urn:telegraf:gopcua:client" appname := "Telegraf" @@ -155,12 +153,19 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, // ApplicationURI is automatically read from the cert so is not required if a cert if provided opts = append(opts, opcua.ApplicationURI(appuri)) opts = append(opts, opcua.ApplicationName(appname)) + opts = append(opts, opcua.RequestTimeout(time.Duration(o.RequestTimeout))) - opts = append(opts, opcua.RequestTimeout(requestTimeout)) - + certFile := o.Certificate + keyFile := o.PrivateKey + policy := o.SecurityPolicy + mode := o.SecurityMode + var err error if certFile == "" && keyFile == "" { if policy != "None" || mode != "None" { - certFile, keyFile = generateCert(appuri, 2048, certFile, keyFile, (365 * 24 * time.Hour)) + certFile, keyFile, err = generateCert(appuri, 2048, certFile, keyFile, 365*24*time.Hour) + if err != nil { + return nil, err + } } } @@ -169,11 +174,11 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, debug.Printf("Loading cert/key from %s/%s", certFile, keyFile) c, err := tls.LoadX509KeyPair(certFile, keyFile) if err != nil { - log.Printf("Failed to load certificate: %s", err) + o.Log.Warnf("Failed to load certificate: %s", err) } else { pk, ok := c.PrivateKey.(*rsa.PrivateKey) if !ok { - log.Fatalf("Invalid private key") + return nil, fmt.Errorf("invalid private key") } cert = c.Certificate[0] opts = append(opts, opcua.PrivateKey(pk), opcua.Certificate(cert)) @@ -191,11 +196,15 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, secPolicy = ua.SecurityPolicyURIPrefix + policy policy = "" default: - log.Fatalf("Invalid security policy: %s", policy) + return nil, fmt.Errorf("invalid security policy: %s", policy) } // Select the most appropriate authentication mode from server capabilities and user input - authMode, authOption := generateAuth(auth, cert, username, password) + authMode, authOption, err := o.generateAuth(o.AuthMethod, cert, o.Username, o.Password) + if err != nil { + return nil, err + } + opts = append(opts, authOption) var secMode ua.MessageSecurityMode @@ -211,7 +220,7 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, secMode = ua.MessageSecurityModeSignAndEncrypt mode = "" default: - log.Fatalf("Invalid security mode: %s", mode) + return nil, fmt.Errorf("invalid security mode: %s", mode) } // Allow input of only one of sec-mode,sec-policy when choosing 'None' @@ -253,24 +262,23 @@ func generateClientOpts(endpoints []*ua.EndpointDescription, certFile, keyFile, } if serverEndpoint == nil { // Didn't find an endpoint with matching policy and mode. - log.Printf("unable to find suitable server endpoint with selected sec-policy and sec-mode") - log.Fatalf("quitting") + return nil, fmt.Errorf("unable to find suitable server endpoint with selected sec-policy and sec-mode") } secPolicy = serverEndpoint.SecurityPolicyURI secMode = serverEndpoint.SecurityMode // Check that the selected endpoint is a valid combo - err := validateEndpointConfig(endpoints, secPolicy, secMode, authMode) + err = validateEndpointConfig(endpoints, secPolicy, secMode, authMode) if err != nil { - log.Fatalf("error validating input: %s", err) + return nil, fmt.Errorf("error validating input: %s", err) } opts = append(opts, opcua.SecurityFromEndpoint(serverEndpoint, authMode)) - return opts + return opts, nil } -func generateAuth(a string, cert []byte, un, pw string) (ua.UserTokenType, opcua.Option) { +func (o *OpcUA) generateAuth(a string, cert []byte, un, pw string) (ua.UserTokenType, opcua.Option, error) { var err error var authMode ua.UserTokenType @@ -285,13 +293,13 @@ func generateAuth(a string, cert []byte, un, pw string) (ua.UserTokenType, opcua if un == "" { if err != nil { - log.Fatalf("error reading username input: %s", err) + return 0, nil, fmt.Errorf("error reading the username input: %s", err) } } if pw == "" { if err != nil { - log.Fatalf("error reading username input: %s", err) + return 0, nil, fmt.Errorf("error reading the password input: %s", err) } } @@ -307,13 +315,12 @@ func generateAuth(a string, cert []byte, un, pw string) (ua.UserTokenType, opcua authOption = opcua.AuthIssuedToken([]byte(nil)) default: - log.Printf("unknown auth-mode, defaulting to Anonymous") + o.Log.Warnf("unknown auth-mode, defaulting to Anonymous") authMode = ua.UserTokenTypeAnonymous authOption = opcua.AuthAnonymous() - } - return authMode, authOption + return authMode, authOption, nil } func validateEndpointConfig(endpoints []*ua.EndpointDescription, secPolicy string, secMode ua.MessageSecurityMode, authMode ua.UserTokenType) error { diff --git a/plugins/inputs/opcua/sample.conf b/plugins/inputs/opcua/sample.conf new file mode 100644 index 0000000000000..0e44d1d6c50d5 --- /dev/null +++ b/plugins/inputs/opcua/sample.conf @@ -0,0 +1,87 @@ +# Retrieve data from OPCUA devices +[[inputs.opcua]] + ## Metric name + # name = "opcua" + # + ## OPC UA Endpoint URL + # endpoint = "opc.tcp://localhost:4840" + # + ## Maximum time allowed to establish a connect to the endpoint. + # connect_timeout = "10s" + # + ## Maximum time allowed for a request over the estabilished connection. + # request_timeout = "5s" + # + ## Security policy, one of "None", "Basic128Rsa15", "Basic256", + ## "Basic256Sha256", or "auto" + # security_policy = "auto" + # + ## Security mode, one of "None", "Sign", "SignAndEncrypt", or "auto" + # security_mode = "auto" + # + ## Path to cert.pem. Required when security mode or policy isn't "None". + ## If cert path is not supplied, self-signed cert and key will be generated. + # certificate = "/etc/telegraf/cert.pem" + # + ## Path to private key.pem. Required when security mode or policy isn't "None". + ## If key path is not supplied, self-signed cert and key will be generated. + # private_key = "/etc/telegraf/key.pem" + # + ## Authentication Method, one of "Certificate", "UserName", or "Anonymous". To + ## authenticate using a specific ID, select 'Certificate' or 'UserName' + # auth_method = "Anonymous" + # + ## Username. Required for auth_method = "UserName" + # username = "" + # + ## Password. Required for auth_method = "UserName" + # password = "" + # + ## Option to select the metric timestamp to use. Valid options are: + ## "gather" -- uses the time of receiving the data in telegraf + ## "server" -- uses the timestamp provided by the server + ## "source" -- uses the timestamp provided by the source + # timestamp = "gather" + # + ## Node ID configuration + ## name - field name to use in the output + ## namespace - OPC UA namespace of the node (integer value 0 thru 3) + ## identifier_type - OPC UA ID type (s=string, i=numeric, g=guid, b=opaque) + ## identifier - OPC UA ID (tag as shown in opcua browser) + ## tags - extra tags to be added to the output metric (optional) + ## Example: + ## {name="ProductUri", namespace="0", identifier_type="i", identifier="2262", tags=[["tag1","value1"],["tag2","value2]]} + # nodes = [ + # {name="", namespace="", identifier_type="", identifier=""}, + # {name="", namespace="", identifier_type="", identifier=""}, + #] + # + ## Node Group + ## Sets defaults for OPC UA namespace and ID type so they aren't required in + ## every node. A group can also have a metric name that overrides the main + ## plugin metric name. + ## + ## Multiple node groups are allowed + #[[inputs.opcua.group]] + ## Group Metric name. Overrides the top level name. If unset, the + ## top level name is used. + # name = + # + ## Group default namespace. If a node in the group doesn't set its + ## namespace, this is used. + # namespace = + # + ## Group default identifier type. If a node in the group doesn't set its + ## namespace, this is used. + # identifier_type = + # + ## Node ID Configuration. Array of nodes with the same settings as above. + # nodes = [ + # {name="", namespace="", identifier_type="", identifier=""}, + # {name="", namespace="", identifier_type="", identifier=""}, + #] + + ## Enable workarounds required by some devices to work correctly + # [inputs.opcua.workarounds] + ## Set additional valid status codes, StatusOK (0x0) is always considered valid + # additional_valid_status_codes = ["0xC0"] diff --git a/plugins/inputs/openldap/README.md b/plugins/inputs/openldap/README.md index fcb175bd430f8..7d07eb1f0eedc 100644 --- a/plugins/inputs/openldap/README.md +++ b/plugins/inputs/openldap/README.md @@ -2,11 +2,10 @@ This plugin gathers metrics from OpenLDAP's cn=Monitor backend. -### Configuration: +## Configuration -To use this plugin you must enable the [slapd monitoring](https://www.openldap.org/devel/admin/monitoringslapd.html) backend. - -```toml +```toml @sample.conf +# OpenLDAP cn=Monitor plugin [[inputs.openldap]] host = "localhost" port = 389 @@ -25,73 +24,81 @@ To use this plugin you must enable the [slapd monitoring](https://www.openldap.o # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed. bind_dn = "" bind_password = "" - + # reverse metric names so they sort more naturally # Defaults to false if unset, but is set to true when generating a new config reverse_metric_names = true ``` -### Measurements & Fields: +To use this plugin you must enable the [slapd +monitoring](https://www.openldap.org/devel/admin/monitoringslapd.html) backend. -All **monitorCounter**, **monitoredInfo**, **monitorOpInitiated**, and **monitorOpCompleted** attributes are gathered based on this LDAP query: +## Metrics -``` +All **monitorCounter**, **monitoredInfo**, **monitorOpInitiated**, and +**monitorOpCompleted** attributes are gathered based on this LDAP query: + +```sh (|(objectClass=monitorCounterObject)(objectClass=monitorOperation)(objectClass=monitoredObject)) ``` -Metric names are based on their entry DN with the cn=Monitor base removed. If `reverse_metric_names` is not set, metrics are based on their DN. If `reverse_metric_names` is set to `true`, the names are reversed. This is recommended as it allows the names to sort more naturally. +Metric names are based on their entry DN with the cn=Monitor base removed. If +`reverse_metric_names` is not set, metrics are based on their DN. If +`reverse_metric_names` is set to `true`, the names are reversed. This is +recommended as it allows the names to sort more naturally. -Metrics for the **monitorOp*** attributes have **_initiated** and **_completed** added to the base name as appropriate. +Metrics for the **monitorOp*** attributes have **_initiated** and **_completed** +added to the base name as appropriate. An OpenLDAP 2.4 server will provide these metrics: - openldap - - connections_current - - connections_max_file_descriptors - - connections_total - - operations_abandon_completed - - operations_abandon_initiated - - operations_add_completed - - operations_add_initiated - - operations_bind_completed - - operations_bind_initiated - - operations_compare_completed - - operations_compare_initiated - - operations_delete_completed - - operations_delete_initiated - - operations_extended_completed - - operations_extended_initiated - - operations_modify_completed - - operations_modify_initiated - - operations_modrdn_completed - - operations_modrdn_initiated - - operations_search_completed - - operations_search_initiated - - operations_unbind_completed - - operations_unbind_initiated - - statistics_bytes - - statistics_entries - - statistics_pdu - - statistics_referrals - - threads_active - - threads_backload - - threads_max - - threads_max_pending - - threads_open - - threads_pending - - threads_starting - - time_uptime - - waiters_read - - waiters_write - -### Tags: + - connections_current + - connections_max_file_descriptors + - connections_total + - operations_abandon_completed + - operations_abandon_initiated + - operations_add_completed + - operations_add_initiated + - operations_bind_completed + - operations_bind_initiated + - operations_compare_completed + - operations_compare_initiated + - operations_delete_completed + - operations_delete_initiated + - operations_extended_completed + - operations_extended_initiated + - operations_modify_completed + - operations_modify_initiated + - operations_modrdn_completed + - operations_modrdn_initiated + - operations_search_completed + - operations_search_initiated + - operations_unbind_completed + - operations_unbind_initiated + - statistics_bytes + - statistics_entries + - statistics_pdu + - statistics_referrals + - threads_active + - threads_backload + - threads_max + - threads_max_pending + - threads_open + - threads_pending + - threads_starting + - time_uptime + - waiters_read + - waiters_write + +### Tags - server= # value from config - port= # value from config -### Example Output: +## Example Output -``` +```shell $ telegraf -config telegraf.conf -input-filter openldap -test --debug * Plugin: inputs.openldap, Collection 1 > openldap,server=localhost,port=389,host=niska.ait.psu.edu operations_bind_initiated=10i,operations_unbind_initiated=6i,operations_modrdn_completed=0i,operations_delete_initiated=0i,operations_add_completed=2i,operations_delete_completed=0i,operations_abandon_completed=0i,statistics_entries=1516i,threads_open=2i,threads_active=1i,waiters_read=1i,operations_modify_completed=0i,operations_extended_initiated=4i,threads_pending=0i,operations_search_initiated=36i,operations_compare_initiated=0i,connections_max_file_descriptors=4096i,operations_modify_initiated=0i,operations_modrdn_initiated=0i,threads_max=16i,time_uptime=6017i,connections_total=1037i,connections_current=1i,operations_add_initiated=2i,statistics_bytes=162071i,operations_unbind_completed=6i,operations_abandon_initiated=0i,statistics_pdu=1566i,threads_max_pending=0i,threads_backload=1i,waiters_write=0i,operations_bind_completed=10i,operations_search_completed=35i,operations_compare_completed=0i,operations_extended_completed=4i,statistics_referrals=0i,threads_starting=0i 1516912070000000000 diff --git a/plugins/inputs/openldap/openldap.go b/plugins/inputs/openldap/openldap.go index d5ed7e4cc1c3f..f08d3b8f539f0 100644 --- a/plugins/inputs/openldap/openldap.go +++ b/plugins/inputs/openldap/openldap.go @@ -1,53 +1,36 @@ +//go:generate ../../../tools/readme_config_includer/generator package openldap import ( + _ "embed" "fmt" "strconv" "strings" + ldap "github.com/go-ldap/ldap/v3" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" - "gopkg.in/ldap.v3" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Openldap struct { Host string Port int - SSL string `toml:"ssl"` // Deprecated in 1.7; use TLS + SSL string `toml:"ssl" deprecated:"1.7.0;use 'tls' instead"` TLS string `toml:"tls"` InsecureSkipVerify bool - SSLCA string `toml:"ssl_ca"` // Deprecated in 1.7; use TLSCA + SSLCA string `toml:"ssl_ca" deprecated:"1.7.0;use 'tls_ca' instead"` TLSCA string `toml:"tls_ca"` BindDn string BindPassword string ReverseMetricNames bool } -const sampleConfig string = ` - host = "localhost" - port = 389 - - # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption. - # note that port will likely need to be changed to 636 for ldaps - # valid options: "" | "starttls" | "ldaps" - tls = "" - - # skip peer certificate verification. Default is false. - insecure_skip_verify = false - - # Path to PEM-encoded Root certificate to use to verify server certificate - tls_ca = "/etc/ssl/certs.pem" - - # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed. - bind_dn = "" - bind_password = "" - - # Reverse metric names so they sort more naturally. Recommended. - # This defaults to false if unset, but is set to true when generating a new config - reverse_metric_names = true -` - var searchBase = "cn=Monitor" var searchFilter = "(|(objectClass=monitorCounterObject)(objectClass=monitorOperation)(objectClass=monitoredObject))" var searchAttrs = []string{"monitorCounter", "monitorOpInitiated", "monitorOpCompleted", "monitoredInfo"} @@ -64,14 +47,6 @@ var attrTranslate = map[string]string{ "olmMDBEntries": "_mdb_entries", } -func (o *Openldap) SampleConfig() string { - return sampleConfig -} - -func (o *Openldap) Description() string { - return "OpenLDAP cn=Monitor plugin" -} - // return an initialized Openldap func NewOpenldap() *Openldap { return &Openldap{ @@ -88,6 +63,10 @@ func NewOpenldap() *Openldap { } } +func (*Openldap) SampleConfig() string { + return sampleConfig +} + // gather metrics func (o *Openldap) Gather(acc telegraf.Accumulator) error { if o.TLS == "" { @@ -110,13 +89,15 @@ func (o *Openldap) Gather(acc telegraf.Accumulator) error { acc.AddError(err) return nil } - if o.TLS == "ldaps" { + + switch o.TLS { + case "ldaps": l, err = ldap.DialTLS("tcp", fmt.Sprintf("%s:%d", o.Host, o.Port), tlsConfig) if err != nil { acc.AddError(err) return nil } - } else if o.TLS == "starttls" { + case "starttls": l, err = ldap.Dial("tcp", fmt.Sprintf("%s:%d", o.Host, o.Port)) if err != nil { acc.AddError(err) @@ -127,8 +108,8 @@ func (o *Openldap) Gather(acc telegraf.Accumulator) error { acc.AddError(err) return nil } - } else { - acc.AddError(fmt.Errorf("Invalid setting for ssl: %s", o.TLS)) + default: + acc.AddError(fmt.Errorf("invalid setting for ssl: %s", o.TLS)) return nil } } else { @@ -190,7 +171,6 @@ func gatherSearchResult(sr *ldap.SearchResult, o *Openldap, acc telegraf.Accumul } } acc.AddFields("openldap", fields, tags) - return } // Convert a DN to metric name, eg cn=Read,cn=Waiters,cn=Monitor becomes waiters_read @@ -200,23 +180,23 @@ func dnToMetric(dn string, o *Openldap) string { var metricParts []string dn = strings.Trim(dn, " ") - dn = strings.Replace(dn, " ", "_", -1) - dn = strings.Replace(dn, "cn=", "", -1) + dn = strings.ReplaceAll(dn, " ", "_") + dn = strings.ReplaceAll(dn, "cn=", "") dn = strings.ToLower(dn) metricParts = strings.Split(dn, ",") for i, j := 0, len(metricParts)-1; i < j; i, j = i+1, j-1 { metricParts[i], metricParts[j] = metricParts[j], metricParts[i] } return strings.Join(metricParts[1:], "_") - } else { - metricName := strings.Trim(dn, " ") - metricName = strings.Replace(metricName, " ", "_", -1) - metricName = strings.ToLower(metricName) - metricName = strings.TrimPrefix(metricName, "cn=") - metricName = strings.Replace(metricName, strings.ToLower("cn=Monitor"), "", -1) - metricName = strings.Replace(metricName, "cn=", "_", -1) - return strings.Replace(metricName, ",", "", -1) } + + metricName := strings.Trim(dn, " ") + metricName = strings.ReplaceAll(metricName, " ", "_") + metricName = strings.ToLower(metricName) + metricName = strings.TrimPrefix(metricName, "cn=") + metricName = strings.ReplaceAll(metricName, strings.ToLower("cn=Monitor"), "") + metricName = strings.ReplaceAll(metricName, "cn=", "_") + return strings.ReplaceAll(metricName, ",", "") } func init() { diff --git a/plugins/inputs/openldap/openldap_test.go b/plugins/inputs/openldap/openldap_test.go index 76d9cc3a9dd42..c821fe9a5b2a6 100644 --- a/plugins/inputs/openldap/openldap_test.go +++ b/plugins/inputs/openldap/openldap_test.go @@ -1,13 +1,21 @@ package openldap import ( + "path/filepath" "strconv" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/docker/go-connections/nat" + "github.com/go-ldap/ldap/v3" "github.com/stretchr/testify/require" - "gopkg.in/ldap.v3" + "github.com/testcontainers/testcontainers-go/wait" + + "github.com/influxdata/telegraf/testutil" +) + +const ( + servicePort = "1389" + servicePortSecure = "1636" ) func TestOpenldapMockResult(t *testing.T) { @@ -33,7 +41,7 @@ func TestOpenldapMockResult(t *testing.T) { commonTests(t, o, &acc) } -func TestOpenldapNoConnection(t *testing.T) { +func TestOpenldapNoConnectionIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -45,127 +53,319 @@ func TestOpenldapNoConnection(t *testing.T) { var acc testutil.Accumulator err := o.Gather(&acc) - require.NoError(t, err) // test that we didn't return an error - assert.Zero(t, acc.NFields()) // test that we didn't return any fields - assert.NotEmpty(t, acc.Errors) // test that we set an error + require.NoError(t, err) // test that we didn't return an error + require.Zero(t, acc.NFields()) // test that we didn't return any fields + require.NotEmpty(t, acc.Errors) // test that we set an error } -func TestOpenldapGeneratesMetrics(t *testing.T) { +func TestOpenldapGeneratesMetricsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } + container := testutil.Container{ + Image: "bitnami/openldap", + ExposedPorts: []string{servicePort}, + Env: map[string]string{ + "LDAP_ADMIN_USERNAME": "manager", + "LDAP_ADMIN_PASSWORD": "secret", + }, + WaitingFor: wait.ForAll( + wait.ForLog("slapd starting"), + wait.ForListeningPort(nat.Port(servicePort)), + ), + } + err := container.Start() + require.NoError(t, err, "failed to start container") + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + + port, err := strconv.Atoi(container.Ports[servicePort]) + require.NoError(t, err) + o := &Openldap{ - Host: testutil.GetLocalHost(), - Port: 389, + Host: container.Address, + Port: port, + BindDn: "CN=manager,DC=example,DC=org", + BindPassword: "secret", } var acc testutil.Accumulator - err := o.Gather(&acc) + err = o.Gather(&acc) require.NoError(t, err) commonTests(t, o, &acc) } -func TestOpenldapStartTLS(t *testing.T) { +func TestOpenldapStartTLSIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } + pki := testutil.NewPKI("../../../testutil/pki") + + tlsPem, err := filepath.Abs(pki.ServerCertAndKeyPath()) + require.NoError(t, err) + tlsCert, err := filepath.Abs(pki.ServerCertPath()) + require.NoError(t, err) + tlsKey, err := filepath.Abs(pki.ServerKeyPath()) + require.NoError(t, err) + + container := testutil.Container{ + Image: "bitnami/openldap", + ExposedPorts: []string{servicePort}, + Env: map[string]string{ + "LDAP_ADMIN_USERNAME": "manager", + "LDAP_ADMIN_PASSWORD": "secret", + "LDAP_ENABLE_TLS": "yes", + "LDAP_TLS_CA_FILE": "server.pem", + "LDAP_TLS_CERT_FILE": "server.crt", + "LDAP_TLS_KEY_FILE": "server.key", + }, + BindMounts: map[string]string{ + "/server.pem": tlsPem, + "/server.crt": tlsCert, + "/server.key": tlsKey, + }, + WaitingFor: wait.ForAll( + wait.ForLog("slapd starting"), + wait.ForListeningPort(nat.Port(servicePort)), + ), + } + err = container.Start() + require.NoError(t, err, "failed to start container") + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + + port, err := strconv.Atoi(container.Ports[servicePort]) + require.NoError(t, err) + + cert, err := filepath.Abs(pki.ClientCertPath()) + require.NoError(t, err) + o := &Openldap{ - Host: testutil.GetLocalHost(), - Port: 389, + Host: container.Address, + Port: port, SSL: "starttls", InsecureSkipVerify: true, + BindDn: "CN=manager,DC=example,DC=org", + BindPassword: "secret", + TLSCA: cert, } var acc testutil.Accumulator - err := o.Gather(&acc) + err = o.Gather(&acc) require.NoError(t, err) commonTests(t, o, &acc) } -func TestOpenldapLDAPS(t *testing.T) { +func TestOpenldapLDAPSIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } + pki := testutil.NewPKI("../../../testutil/pki") + + tlsPem, err := filepath.Abs(pki.ServerCertAndKeyPath()) + require.NoError(t, err) + tlsCert, err := filepath.Abs(pki.ServerCertPath()) + require.NoError(t, err) + tlsKey, err := filepath.Abs(pki.ServerKeyPath()) + require.NoError(t, err) + + container := testutil.Container{ + Image: "bitnami/openldap", + ExposedPorts: []string{servicePortSecure}, + Env: map[string]string{ + "LDAP_ADMIN_USERNAME": "manager", + "LDAP_ADMIN_PASSWORD": "secret", + "LDAP_ENABLE_TLS": "yes", + "LDAP_TLS_CA_FILE": "server.pem", + "LDAP_TLS_CERT_FILE": "server.crt", + "LDAP_TLS_KEY_FILE": "server.key", + }, + BindMounts: map[string]string{ + "/server.pem": tlsPem, + "/server.crt": tlsCert, + "/server.key": tlsKey, + }, + WaitingFor: wait.ForAll( + wait.ForLog("slapd starting"), + wait.ForListeningPort(nat.Port(servicePortSecure)), + ), + } + err = container.Start() + require.NoError(t, err, "failed to start container") + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + + port, err := strconv.Atoi(container.Ports[servicePortSecure]) + require.NoError(t, err) + o := &Openldap{ - Host: testutil.GetLocalHost(), - Port: 636, + Host: container.Address, + Port: port, SSL: "ldaps", InsecureSkipVerify: true, + BindDn: "CN=manager,DC=example,DC=org", + BindPassword: "secret", } var acc testutil.Accumulator - err := o.Gather(&acc) + err = o.Gather(&acc) require.NoError(t, err) commonTests(t, o, &acc) } -func TestOpenldapInvalidSSL(t *testing.T) { +func TestOpenldapInvalidSSLIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } + pki := testutil.NewPKI("../../../testutil/pki") + tlsPem, err := filepath.Abs(pki.ServerCertAndKeyPath()) + require.NoError(t, err) + tlsCert, err := filepath.Abs(pki.ServerCertPath()) + require.NoError(t, err) + tlsKey, err := filepath.Abs(pki.ServerKeyPath()) + require.NoError(t, err) + + container := testutil.Container{ + Image: "bitnami/openldap", + ExposedPorts: []string{servicePortSecure}, + Env: map[string]string{ + "LDAP_ADMIN_USERNAME": "manager", + "LDAP_ADMIN_PASSWORD": "secret", + "LDAP_ENABLE_TLS": "yes", + "LDAP_TLS_CA_FILE": "server.pem", + "LDAP_TLS_CERT_FILE": "server.crt", + "LDAP_TLS_KEY_FILE": "server.key", + }, + BindMounts: map[string]string{ + "/server.pem": tlsPem, + "/server.crt": tlsCert, + "/server.key": tlsKey, + }, + WaitingFor: wait.ForAll( + wait.ForLog("slapd starting"), + wait.ForListeningPort(nat.Port(servicePortSecure)), + ), + } + err = container.Start() + require.NoError(t, err, "failed to start container") + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + + port, err := strconv.Atoi(container.Ports[servicePortSecure]) + require.NoError(t, err) + o := &Openldap{ - Host: testutil.GetLocalHost(), - Port: 636, + Host: container.Address, + Port: port, SSL: "invalid", InsecureSkipVerify: true, } var acc testutil.Accumulator - err := o.Gather(&acc) - require.NoError(t, err) // test that we didn't return an error - assert.Zero(t, acc.NFields()) // test that we didn't return any fields - assert.NotEmpty(t, acc.Errors) // test that we set an error + err = o.Gather(&acc) + require.NoError(t, err) // test that we didn't return an error + require.Zero(t, acc.NFields()) // test that we didn't return any fields + require.NotEmpty(t, acc.Errors) // test that we set an error } -func TestOpenldapBind(t *testing.T) { +func TestOpenldapBindIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } + container := testutil.Container{ + Image: "bitnami/openldap", + ExposedPorts: []string{servicePort}, + Env: map[string]string{ + "LDAP_ADMIN_USERNAME": "manager", + "LDAP_ADMIN_PASSWORD": "secret", + }, + WaitingFor: wait.ForAll( + wait.ForLog("slapd starting"), + wait.ForListeningPort(nat.Port(servicePort)), + ), + } + err := container.Start() + require.NoError(t, err, "failed to start container") + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + + port, err := strconv.Atoi(container.Ports[servicePort]) + require.NoError(t, err) + o := &Openldap{ - Host: testutil.GetLocalHost(), - Port: 389, + Host: container.Address, + Port: port, SSL: "", InsecureSkipVerify: true, - BindDn: "cn=manager,cn=config", + BindDn: "CN=manager,DC=example,DC=org", BindPassword: "secret", } var acc testutil.Accumulator - err := o.Gather(&acc) + err = o.Gather(&acc) require.NoError(t, err) commonTests(t, o, &acc) } func commonTests(t *testing.T, o *Openldap, acc *testutil.Accumulator) { - assert.Empty(t, acc.Errors, "accumulator had no errors") - assert.True(t, acc.HasMeasurement("openldap"), "Has a measurement called 'openldap'") - assert.Equal(t, o.Host, acc.TagValue("openldap", "server"), "Has a tag value of server=o.Host") - assert.Equal(t, strconv.Itoa(o.Port), acc.TagValue("openldap", "port"), "Has a tag value of port=o.Port") - assert.True(t, acc.HasInt64Field("openldap", "total_connections"), "Has an integer field called total_connections") + // helpful local commands to run: + // ldapwhoami -D "CN=manager,DC=example,DC=org" -H ldap://localhost:1389 -w secret + // ldapsearch -D "CN=manager,DC=example,DC=org" -H "ldap://localhost:1389" -b cn=Monitor -w secret + require.Empty(t, acc.Errors, "accumulator had no errors") + require.True(t, acc.HasMeasurement("openldap"), "Has a measurement called 'openldap'") + require.Equal(t, o.Host, acc.TagValue("openldap", "server"), "Has a tag value of server=o.Host") + require.Equal(t, strconv.Itoa(o.Port), acc.TagValue("openldap", "port"), "Has a tag value of port=o.Port") + require.True(t, acc.HasInt64Field("openldap", "total_connections"), "Has an integer field called total_connections") } -func TestOpenldapReverseMetrics(t *testing.T) { +func TestOpenldapReverseMetricsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } + container := testutil.Container{ + Image: "bitnami/openldap", + ExposedPorts: []string{servicePort}, + Env: map[string]string{ + "LDAP_ADMIN_USERNAME": "manager", + "LDAP_ADMIN_PASSWORD": "secret", + }, + WaitingFor: wait.ForAll( + wait.ForLog("slapd starting"), + wait.ForListeningPort(nat.Port(servicePort)), + ), + } + err := container.Start() + require.NoError(t, err, "failed to start container") + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + + port, err := strconv.Atoi(container.Ports[servicePort]) + require.NoError(t, err) + o := &Openldap{ - Host: testutil.GetLocalHost(), - Port: 389, + Host: container.Address, + Port: port, SSL: "", InsecureSkipVerify: true, - BindDn: "cn=manager,cn=config", + BindDn: "CN=manager,DC=example,DC=org", BindPassword: "secret", ReverseMetricNames: true, } var acc testutil.Accumulator - err := o.Gather(&acc) + err = o.Gather(&acc) require.NoError(t, err) - assert.True(t, acc.HasInt64Field("openldap", "connections_total"), "Has an integer field called connections_total") + require.True(t, acc.HasInt64Field("openldap", "connections_total"), "Has an integer field called connections_total") } diff --git a/plugins/inputs/openldap/sample.conf b/plugins/inputs/openldap/sample.conf new file mode 100644 index 0000000000000..9a3e747ff8f5d --- /dev/null +++ b/plugins/inputs/openldap/sample.conf @@ -0,0 +1,23 @@ +# OpenLDAP cn=Monitor plugin +[[inputs.openldap]] + host = "localhost" + port = 389 + + # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption. + # note that port will likely need to be changed to 636 for ldaps + # valid options: "" | "starttls" | "ldaps" + tls = "" + + # skip peer certificate verification. Default is false. + insecure_skip_verify = false + + # Path to PEM-encoded Root certificate to use to verify server certificate + tls_ca = "/etc/ssl/certs.pem" + + # dn/password to bind with. If bind_dn is empty, an anonymous bind is performed. + bind_dn = "" + bind_password = "" + + # reverse metric names so they sort more naturally + # Defaults to false if unset, but is set to true when generating a new config + reverse_metric_names = true diff --git a/plugins/inputs/openntpd/README.md b/plugins/inputs/openntpd/README.md index 877c3a46092b1..25eaddd47fdf2 100644 --- a/plugins/inputs/openntpd/README.md +++ b/plugins/inputs/openntpd/README.md @@ -20,9 +20,10 @@ the remote peer or server (RMS, milliseconds); - jitter – Mean deviation (jitter) in the time reported for that remote peer or server (RMS of difference of multiple time samples, milliseconds); -### Configuration +## Configuration -```toml +```toml @sample.conf +# Get standard NTP query metrics from OpenNTPD. [[inputs.openntpd]] ## Run ntpctl binary with sudo. # use_sudo = false @@ -34,7 +35,7 @@ server (RMS of difference of multiple time samples, milliseconds); # timeout = "5ms" ``` -### Metrics +## Metrics - ntpctl - tags: @@ -49,7 +50,7 @@ server (RMS of difference of multiple time samples, milliseconds); - wt (int) - tl (int) -### Permissions +## Permissions It's important to note that this plugin references ntpctl, which may require additional permissions to execute successfully. @@ -57,6 +58,7 @@ Depending on the user/group permissions of the telegraf user executing this plugin, you may need to alter the group membership, set facls, or use sudo. **Group membership (Recommended)**: + ```bash $ groups telegraf telegraf : telegraf @@ -69,12 +71,14 @@ telegraf : telegraf ntpd **Sudo privileges**: If you use this method, you will need the following in your telegraf config: + ```toml [[inputs.openntpd]] use_sudo = true ``` You will also need to update your sudoers file: + ```bash $ visudo # Add the following lines: @@ -85,9 +89,9 @@ Defaults!NTPCTL !logfile, !syslog, !pam_session Please use the solution you see as most appropriate. -### Example Output +## Example Output -``` +```shell openntpd,remote=194.57.169.1,stratum=2,host=localhost tl=10i,poll=1007i, offset=2.295,jitter=3.896,delay=53.766,next=266i,wt=1i 1514454299000000000 ``` diff --git a/plugins/inputs/openntpd/openntpd.go b/plugins/inputs/openntpd/openntpd.go index e7723b480a581..d954221340445 100644 --- a/plugins/inputs/openntpd/openntpd.go +++ b/plugins/inputs/openntpd/openntpd.go @@ -1,8 +1,10 @@ +//go:generate ../../../tools/readme_config_includer/generator package openntpd import ( "bufio" "bytes" + _ "embed" "fmt" "os/exec" "strconv" @@ -10,15 +12,14 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) -// Mapping of ntpctl header names to tag keys -var tagHeaders = map[string]string{ - "st": "stratum", -} +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string // Mapping of the ntpctl tag key to the index in the command output var tagI = map[string]int{ @@ -40,52 +41,34 @@ var intI = map[string]int{ "poll": 4, } -type runner func(cmdName string, Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) +type runner func(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) // Openntpd is used to store configuration values type Openntpd struct { Binary string - Timeout internal.Duration + Timeout config.Duration UseSudo bool - filter filter.Filter - run runner + run runner } var defaultBinary = "/usr/sbin/ntpctl" -var defaultTimeout = internal.Duration{Duration: 5 * time.Second} - -func (n *Openntpd) Description() string { - return "Get standard NTP query metrics from OpenNTPD." -} - -func (n *Openntpd) SampleConfig() string { - return ` - ## Run ntpctl binary with sudo. - # use_sudo = false - - ## Location of the ntpctl binary. - # binary = "/usr/sbin/ntpctl" - - ## Maximum time the ntpctl binary is allowed to run. - # timeout = "5ms" - ` -} +var defaultTimeout = config.Duration(5 * time.Second) // Shell out to ntpctl and return the output -func openntpdRunner(cmdName string, Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { +func openntpdRunner(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { cmdArgs := []string{"-s", "peers"} cmd := exec.Command(cmdName, cmdArgs...) - if UseSudo { + if useSudo { cmdArgs = append([]string{cmdName}, cmdArgs...) cmd = exec.Command("sudo", cmdArgs...) } var out bytes.Buffer cmd.Stdout = &out - err := internal.RunTimeout(cmd, Timeout.Duration) + err := internal.RunTimeout(cmd, time.Duration(timeout)) if err != nil { return &out, fmt.Errorf("error running ntpctl: %s", err) } @@ -93,6 +76,10 @@ func openntpdRunner(cmdName string, Timeout internal.Duration, UseSudo bool) (*b return &out, nil } +func (*Openntpd) SampleConfig() string { + return sampleConfig +} + func (n *Openntpd) Gather(acc telegraf.Accumulator) error { out, err := n.run(n.Binary, n.Timeout, n.UseSudo) if err != nil { @@ -133,8 +120,8 @@ func (n *Openntpd) Gather(acc telegraf.Accumulator) error { fields = strings.Fields(line) // if there is an ntpctl state prefix, remove it and make it it's own tag - if strings.ContainsAny(string(fields[0]), "*") { - tags["state_prefix"] = string(fields[0]) + if strings.ContainsAny(fields[0], "*") { + tags["state_prefix"] = fields[0] fields = fields[1:] } @@ -156,16 +143,13 @@ func (n *Openntpd) Gather(acc telegraf.Accumulator) error { } if key == "next" || key == "poll" { - m, err := strconv.ParseInt(strings.TrimSuffix(fields[index], "s"), 10, 64) if err != nil { acc.AddError(fmt.Errorf("integer value expected, got: %s", fields[index])) continue } mFields[key] = m - } else { - m, err := strconv.ParseInt(fields[index], 10, 64) if err != nil { acc.AddError(fmt.Errorf("integer value expected, got: %s", fields[index])) @@ -185,23 +169,19 @@ func (n *Openntpd) Gather(acc telegraf.Accumulator) error { } if key == "offset" || key == "delay" || key == "jitter" { - m, err := strconv.ParseFloat(strings.TrimSuffix(fields[index], "ms"), 64) if err != nil { acc.AddError(fmt.Errorf("float value expected, got: %s", fields[index])) continue } mFields[key] = m - } else { - m, err := strconv.ParseFloat(fields[index], 64) if err != nil { acc.AddError(fmt.Errorf("float value expected, got: %s", fields[index])) continue } mFields[key] = m - } } acc.AddFields("openntpd", mFields, tags) diff --git a/plugins/inputs/openntpd/openntpd_test.go b/plugins/inputs/openntpd/openntpd_test.go index d629949a533c4..ffca02b31a908 100644 --- a/plugins/inputs/openntpd/openntpd_test.go +++ b/plugins/inputs/openntpd/openntpd_test.go @@ -3,17 +3,15 @@ package openntpd import ( "bytes" "testing" - "time" - "github.com/influxdata/telegraf/internal" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) -var TestTimeout = internal.Duration{Duration: time.Second} - -func OpenntpdCTL(output string, Timeout internal.Duration, useSudo bool) func(string, internal.Duration, bool) (*bytes.Buffer, error) { - return func(string, internal.Duration, bool) (*bytes.Buffer, error) { +func OpenntpdCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, error) { + return func(string, config.Duration, bool) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } } @@ -21,15 +19,15 @@ func OpenntpdCTL(output string, Timeout internal.Duration, useSudo bool) func(st func TestParseSimpleOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutput, TestTimeout, false), + run: OpenntpdCTL(simpleOutput), } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 7) + require.Equal(t, acc.NFields(), 7) firstpeerfields := map[string]interface{}{ "wt": int64(1), @@ -52,15 +50,15 @@ func TestParseSimpleOutput(t *testing.T) { func TestParseSimpleOutputwithStatePrefix(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutputwithStatePrefix, TestTimeout, false), + run: OpenntpdCTL(simpleOutputwithStatePrefix), } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 7) + require.Equal(t, acc.NFields(), 7) firstpeerfields := map[string]interface{}{ "wt": int64(1), @@ -84,15 +82,15 @@ func TestParseSimpleOutputwithStatePrefix(t *testing.T) { func TestParseSimpleOutputInvalidPeer(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutputInvalidPeer, TestTimeout, false), + run: OpenntpdCTL(simpleOutputInvalidPeer), } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 4) + require.Equal(t, acc.NFields(), 4) firstpeerfields := map[string]interface{}{ "wt": int64(1), @@ -112,15 +110,15 @@ func TestParseSimpleOutputInvalidPeer(t *testing.T) { func TestParseSimpleOutputServersDNSError(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutputServersDNSError, TestTimeout, false), + run: OpenntpdCTL(simpleOutputServersDNSError), } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 4) + require.Equal(t, acc.NFields(), 4) firstpeerfields := map[string]interface{}{ "next": int64(2), @@ -154,15 +152,15 @@ func TestParseSimpleOutputServersDNSError(t *testing.T) { func TestParseSimpleOutputServerDNSError(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(simpleOutputServerDNSError, TestTimeout, false), + run: OpenntpdCTL(simpleOutputServerDNSError), } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 4) + require.Equal(t, acc.NFields(), 4) firstpeerfields := map[string]interface{}{ "next": int64(12), @@ -182,15 +180,15 @@ func TestParseSimpleOutputServerDNSError(t *testing.T) { func TestParseFullOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &Openntpd{ - run: OpenntpdCTL(fullOutput, TestTimeout, false), + run: OpenntpdCTL(fullOutput), } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("openntpd")) - assert.Equal(t, acc.NMetrics(), uint64(20)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("openntpd")) + require.Equal(t, acc.NMetrics(), uint64(20)) - assert.Equal(t, acc.NFields(), 113) + require.Equal(t, acc.NFields(), 113) firstpeerfields := map[string]interface{}{ "wt": int64(1), diff --git a/plugins/inputs/openntpd/sample.conf b/plugins/inputs/openntpd/sample.conf new file mode 100644 index 0000000000000..b6538c18c0773 --- /dev/null +++ b/plugins/inputs/openntpd/sample.conf @@ -0,0 +1,10 @@ +# Get standard NTP query metrics from OpenNTPD. +[[inputs.openntpd]] + ## Run ntpctl binary with sudo. + # use_sudo = false + + ## Location of the ntpctl binary. + # binary = "/usr/sbin/ntpctl" + + ## Maximum time the ntpctl binary is allowed to run. + # timeout = "5ms" diff --git a/plugins/inputs/opensmtpd/README.md b/plugins/inputs/opensmtpd/README.md index 5bbd4be89658a..2a8a5fad1f5b8 100644 --- a/plugins/inputs/opensmtpd/README.md +++ b/plugins/inputs/opensmtpd/README.md @@ -1,10 +1,12 @@ # OpenSMTPD Input Plugin -This plugin gathers stats from [OpenSMTPD - a FREE implementation of the server-side SMTP protocol](https://www.opensmtpd.org/) +This plugin gathers stats from [OpenSMTPD - a FREE implementation of the +server-side SMTP protocol](https://www.opensmtpd.org/) -### Configuration: +## Configuration -```toml +```toml @sample.conf +# A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver [[inputs.opensmtpd]] ## If running as a restricted user you can prepend sudo for additional access: #use_sudo = false @@ -16,10 +18,10 @@ This plugin gathers stats from [OpenSMTPD - a FREE implementation of the server- #timeout = "1s" ``` -### Measurements & Fields: +## Metrics -This is the full list of stats provided by smtpctl and potentially collected by telegram -depending of your smtpctl configuration. +This is the full list of stats provided by smtpctl and potentially collected by +telegram depending of your smtpctl configuration. - smtpctl bounce_envelope @@ -59,12 +61,15 @@ depending of your smtpctl configuration. smtp_session_local uptime -### Permissions: +## Permissions -It's important to note that this plugin references smtpctl, which may require additional permissions to execute successfully. -Depending on the user/group permissions of the telegraf user executing this plugin, you may need to alter the group membership, set facls, or use sudo. +It's important to note that this plugin references smtpctl, which may require +additional permissions to execute successfully. Depending on the user/group +permissions of the telegraf user executing this plugin, you may need to alter +the group membership, set facls, or use sudo. **Group membership (Recommended)**: + ```bash $ groups telegraf telegraf : telegraf @@ -77,12 +82,14 @@ telegraf : telegraf opensmtpd **Sudo privileges**: If you use this method, you will need the following in your telegraf config: + ```toml [[inputs.opensmtpd]] use_sudo = true ``` You will also need to update your sudoers file: + ```bash $ visudo # Add the following line: @@ -93,9 +100,9 @@ Defaults!SMTPCTL !logfile, !syslog, !pam_session Please use the solution you see as most appropriate. -### Example Output: +## Example Output -``` +```shell telegraf --config etc/telegraf.conf --input-filter opensmtpd --test * Plugin: inputs.opensmtpd, Collection 1 > opensmtpd,host=localhost scheduler_delivery_tempfail=822,mta_host=10,mta_task_running=4,queue_bounce=13017,scheduler_delivery_permfail=51022,mta_relay=7,queue_evpcache_size=2,scheduler_envelope_expired=26,bounce_message=0,mta_domain=7,queue_evpcache_update_hit=848,smtp_session_local=12294,bounce_envelope=0,queue_evpcache_load_hit=4389703,scheduler_ramqueue_update=0,mta_route=3,scheduler_delivery_ok=2149489,smtp_session_inet4=2131997,control_session=1,scheduler_envelope_incoming=0,uptime=10346728,scheduler_ramqueue_envelope=2,smtp_session=0,bounce_session=0,mta_envelope=2,mta_session=6,mta_task=2,scheduler_ramqueue_message=2,mta_connector=7,mta_source=1,scheduler_envelope=2,scheduler_envelope_inflight=2 1510220300000000000 diff --git a/plugins/inputs/opensmtpd/opensmtpd.go b/plugins/inputs/opensmtpd/opensmtpd.go index c3f76f2efa850..c9e5ffe52431d 100644 --- a/plugins/inputs/opensmtpd/opensmtpd.go +++ b/plugins/inputs/opensmtpd/opensmtpd.go @@ -1,8 +1,10 @@ +//go:generate ../../../tools/readme_config_includer/generator package opensmtpd import ( "bufio" "bytes" + _ "embed" "fmt" "os/exec" "strconv" @@ -10,60 +12,44 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) -type runner func(cmdName string, Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +type runner func(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) // Opensmtpd is used to store configuration values type Opensmtpd struct { Binary string - Timeout internal.Duration + Timeout config.Duration UseSudo bool - filter filter.Filter - run runner + run runner } var defaultBinary = "/usr/sbin/smtpctl" -var defaultTimeout = internal.Duration{Duration: time.Second} - -var sampleConfig = ` - ## If running as a restricted user you can prepend sudo for additional access: - #use_sudo = false - - ## The default location of the smtpctl binary can be overridden with: - binary = "/usr/sbin/smtpctl" - - ## The default timeout of 1000ms can be overridden with (in milliseconds): - timeout = 1000 -` - -func (s *Opensmtpd) Description() string { - return "A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver " -} - -// SampleConfig displays configuration instructions -func (s *Opensmtpd) SampleConfig() string { - return sampleConfig -} +var defaultTimeout = config.Duration(time.Second) // Shell out to opensmtpd_stat and return the output -func opensmtpdRunner(cmdName string, Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { +func opensmtpdRunner(cmdName string, timeout config.Duration, useSudo bool) (*bytes.Buffer, error) { cmdArgs := []string{"show", "stats"} cmd := exec.Command(cmdName, cmdArgs...) - if UseSudo { + if useSudo { cmdArgs = append([]string{cmdName}, cmdArgs...) cmd = exec.Command("sudo", cmdArgs...) } var out bytes.Buffer cmd.Stdout = &out - err := internal.RunTimeout(cmd, Timeout.Duration) + err := internal.RunTimeout(cmd, time.Duration(timeout)) if err != nil { return &out, fmt.Errorf("error running smtpctl: %s", err) } @@ -74,11 +60,15 @@ func opensmtpdRunner(cmdName string, Timeout internal.Duration, UseSudo bool) (* // Gather collects the configured stats from smtpctl and adds them to the // Accumulator // +func (*Opensmtpd) SampleConfig() string { + return sampleConfig +} + // All the dots in stat name will replaced by underscores. Histogram statistics will not be collected. func (s *Opensmtpd) Gather(acc telegraf.Accumulator) error { // Always exclude uptime.human statistics - stat_excluded := []string{"uptime.human"} - filter_excluded, err := filter.Compile(stat_excluded) + statExcluded := []string{"uptime.human"} + filterExcluded, err := filter.Compile(statExcluded) if err != nil { return err } @@ -92,7 +82,6 @@ func (s *Opensmtpd) Gather(acc telegraf.Accumulator) error { fields := make(map[string]interface{}) scanner := bufio.NewScanner(out) for scanner.Scan() { - cols := strings.Split(scanner.Text(), "=") // Check split correctness @@ -104,16 +93,15 @@ func (s *Opensmtpd) Gather(acc telegraf.Accumulator) error { value := cols[1] // Filter value - if filter_excluded.Match(stat) { + if filterExcluded.Match(stat) { continue } - field := strings.Replace(stat, ".", "_", -1) + field := strings.ReplaceAll(stat, ".", "_") fields[field], err = strconv.ParseFloat(value, 64) if err != nil { - acc.AddError(fmt.Errorf("Expected a numerical value for %s = %v\n", - stat, value)) + acc.AddError(fmt.Errorf("expected a numerical value for %s = %v", stat, value)) } } diff --git a/plugins/inputs/opensmtpd/opensmtpd_test.go b/plugins/inputs/opensmtpd/opensmtpd_test.go index 42e978b6c34e7..3b625be51cef2 100644 --- a/plugins/inputs/opensmtpd/opensmtpd_test.go +++ b/plugins/inputs/opensmtpd/opensmtpd_test.go @@ -3,17 +3,15 @@ package opensmtpd import ( "bytes" "testing" - "time" - "github.com/influxdata/telegraf/internal" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) -var TestTimeout = internal.Duration{Duration: time.Second} - -func SmtpCTL(output string, Timeout internal.Duration, useSudo bool) func(string, internal.Duration, bool) (*bytes.Buffer, error) { - return func(string, internal.Duration, bool) (*bytes.Buffer, error) { +func SMTPCTL(output string) func(string, config.Duration, bool) (*bytes.Buffer, error) { + return func(string, config.Duration, bool) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } } @@ -21,15 +19,15 @@ func SmtpCTL(output string, Timeout internal.Duration, useSudo bool) func(string func TestFilterSomeStats(t *testing.T) { acc := &testutil.Accumulator{} v := &Opensmtpd{ - run: SmtpCTL(fullOutput, TestTimeout, false), + run: SMTPCTL(fullOutput), } err := v.Gather(acc) - assert.NoError(t, err) - assert.True(t, acc.HasMeasurement("opensmtpd")) - assert.Equal(t, acc.NMetrics(), uint64(1)) + require.NoError(t, err) + require.True(t, acc.HasMeasurement("opensmtpd")) + require.Equal(t, acc.NMetrics(), uint64(1)) - assert.Equal(t, acc.NFields(), 36) + require.Equal(t, acc.NFields(), 36) acc.AssertContainsFields(t, "opensmtpd", parsedFullOutput) } diff --git a/plugins/inputs/opensmtpd/sample.conf b/plugins/inputs/opensmtpd/sample.conf new file mode 100644 index 0000000000000..b1794cdb13e0a --- /dev/null +++ b/plugins/inputs/opensmtpd/sample.conf @@ -0,0 +1,10 @@ +# A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver + [[inputs.opensmtpd]] + ## If running as a restricted user you can prepend sudo for additional access: + #use_sudo = false + + ## The default location of the smtpctl binary can be overridden with: + binary = "/usr/sbin/smtpctl" + + # The default timeout of 1s can be overridden with: + #timeout = "1s" diff --git a/plugins/inputs/openstack/README.md b/plugins/inputs/openstack/README.md new file mode 100644 index 0000000000000..bc1b44bc2c50e --- /dev/null +++ b/plugins/inputs/openstack/README.md @@ -0,0 +1,371 @@ +# OpenStack Input Plugin + +Collects the metrics from following services of OpenStack: + +* CINDER(Block Storage) +* GLANCE(Image service) +* HEAT(Orchestration) +* KEYSTONE(Identity service) +* NEUTRON(Networking) +* NOVA(Compute Service) + +At present this plugin requires the following APIs: + +* blockstorage v2 +* compute v2 +* identity v3 +* networking v2 +* orchestration v1 + +## Configuration and Recommendations + +### Recommendations + +Due to the large number of unique tags that this plugin generates, in order to +keep the cardinality down it is **highly recommended** to use +[modifiers](../../../docs/CONFIGURATION.md#modifiers) like `tagexclude` to +discard unwanted tags. + +For deployments with only a small number of VMs and hosts, a small polling +interval (e.g. seconds-minutes) is acceptable. For larger deployments, polling a +large number of systems will impact performance. Use the `interval` option to +change how often the plugin is run: + +`interval`: How often a metric is gathered. Setting this value at the plugin +level overrides the global agent interval setting. + +Also, consider polling OpenStack services at different intervals depending on +your requirements. This will help with load and cardinality as well. + +```toml +[[inputs.openstack]] + interval = 5m + .... + authentication_endpoint = "https://my.openstack.cloud:5000" + ... + enabled_services = ["nova_services"] + .... + +[[inputs.openstack]] + interval = 30m + .... + authentication_endpoint = "https://my.openstack.cloud:5000" + ... + enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"] + .... +``` + +### Configuration + +```toml @sample.conf +# Collects performance metrics from OpenStack services +[[inputs.openstack]] + ## The recommended interval to poll is '30m' + + ## The identity endpoint to authenticate against and get the service catalog from. + authentication_endpoint = "https://my.openstack.cloud:5000" + + ## The domain to authenticate against when using a V3 identity endpoint. + # domain = "default" + + ## The project to authenticate as. + # project = "admin" + + ## User authentication credentials. Must have admin rights. + username = "admin" + password = "password" + + ## Available services are: + ## "agents", "aggregates", "flavors", "hypervisors", "networks", "nova_services", + ## "ports", "projects", "servers", "services", "stacks", "storage_pools", "subnets", "volumes" + # enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"] + + ## Collect Server Diagnostics + # server_diagnotics = false + + ## output secrets (such as adminPass(for server) and UserID(for volume)). + # output_secrets = false + + ## Amount of time allowed to complete the HTTP(s) request. + # timeout = "5s" + + ## HTTP Proxy support + # http_proxy_url = "" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Options for tags received from Openstack + # tag_prefix = "openstack_tag_" + # tag_value = "true" + + ## Timestamp format for timestamp data received from Openstack. + ## If false format is unix nanoseconds. + # human_readable_timestamps = false + + ## Measure Openstack call duration + # measure_openstack_requests = false +``` + +## Metrics + +* openstack_aggregate + * name + * aggregate_host [string] + * aggregate_hosts [integer] + * created_at [string] + * deleted [boolean] + * deleted_at [string] + * id [integer] + * updated_at [string] +* openstack_flavor + * is_public + * name + * disk [integer] + * ephemeral [integer] + * id [string] + * ram [integer] + * rxtx_factor [float] + * swap [integer] + * vcpus [integer] +* openstack_hypervisor + * cpu_arch + * cpu_feature_tsc + * cpu_feature_tsc-deadline + * cpu_feature_tsc_adjust + * cpu_feature_tsx-ctrl + * cpu_feature_vme + * cpu_feature_vmx + * cpu_feature_x2apic + * cpu_feature_xgetbv1 + * cpu_feature_xsave + * cpu_model + * cpu_vendor + * hypervisor_hostname + * hypervisor_type + * hypervisor_version + * service_host + * service_id + * state + * status + * cpu_topology_cores [integer] + * cpu_topology_sockets [integer] + * cpu_topology_threads [integer] + * current_workload [integer] + * disk_available_least [integer] + * free_disk_gb [integer] + * free_ram_mb [integer] + * host_ip [string] + * id [string] + * local_gb [integer] + * local_gb_used [integer] + * memory_mb [integer] + * memory_mb_used [integer] + * running_vms [integer] + * vcpus [integer] + * vcpus_used [integer] +* openstack_identity + * description + * domain_id + * name + * parent_id + * enabled boolean + * id string + * is_domain boolean + * projects integer +* openstack_network + * name + * openstack_tags_xyz + * project_id + * status + * tenant_id + * admin_state_up [boolean] + * availability_zone_hints [string] + * created_at [string] + * id [string] + * shared [boolean] + * subnet_id [string] + * subnets [integer] + * updated_at [string] +* openstack_neutron_agent + * agent_host + * agent_type + * availability_zone + * binary + * topic + * admin_state_up [boolean] + * alive [boolean] + * created_at [string] + * heartbeat_timestamp [string] + * id [string] + * resources_synced [boolean] + * started_at [string] +* openstack_nova_service + * host_machine + * name + * state + * status + * zone + * disabled_reason [string] + * forced_down [boolean] + * id [string] + * updated_at [string] +* openstack_port + * device_id + * device_owner + * name + * network_id + * project_id + * status + * tenant_id + * admin_state_up [boolean] + * allowed_address_pairs [integer] + * fixed_ips [integer] + * id [string] + * ip_address [string] + * mac_address [string] + * security_groups [string] + * subnet_id [string] +* openstack_request_duration + * agents [integer] + * aggregates [integer] + * flavors [integer] + * hypervisors [integer] + * networks [integer] + * nova_services [integer] + * ports [integer] + * projects [integer] + * servers [integer] + * stacks [integer] + * storage_pools [integer] + * subnets [integer] + * volumes [integer] +* openstack_server + * flavor + * host_id + * host_name + * image + * key_name + * name + * project + * status + * tenant_id + * user_id + * accessIPv4 [string] + * accessIPv6 [string] + * addresses [integer] + * adminPass [string] + * created [string] + * disk_gb [integer] + * fault_code [integer] + * fault_created [string] + * fault_details [string] + * fault_message [string] + * id [string] + * progress [integer] + * ram_mb [integer] + * security_groups [integer] + * updated [string] + * vcpus [integer] + * volume_id [string] + * volumes_attached [integer] +* openstack_server_diagnostics + * disk_name + * no_of_disks + * no_of_ports + * port_name + * server_id + * cpu0_time [float] + * cpu1_time [float] + * cpu2_time [float] + * cpu3_time [float] + * cpu4_time [float] + * cpu5_time [float] + * cpu6_time [float] + * cpu7_time [float] + * disk_errors [float] + * disk_read [float] + * disk_read_req [float] + * disk_write [float] + * disk_write_req [float] + * memory [float] + * memory-actual [float] + * memory-rss [float] + * memory-swap_in [float] + * port_rx [float] + * port_rx_drop [float] + * port_rx_errors [float] + * port_rx_packets [float] + * port_tx [float] + * port_tx_drop [float] + * port_tx_errors [float] + * port_tx_packets [float] +* openstack_service + * name + * service_enabled [boolean] + * service_id [string] +* openstack_storage_pool + * driver_version + * name + * storage_protocol + * vendor_name + * volume_backend_name + * free_capacity_gb [float] + * total_capacity_gb [float] +* openstack_subnet + * cidr + * gateway_ip + * ip_version + * name + * network_id + * openstack_tags_subnet_type_PRV + * project_id + * tenant_id + * allocation_pools [string] + * dhcp_enabled [boolean] + * dns_nameservers [string] + * id [string] +* openstack_volume + * attachment_attachment_id + * attachment_device + * attachment_host_name + * availability_zone + * bootable + * description + * name + * status + * user_id + * volume_type + * attachment_attached_at [string] + * attachment_server_id [string] + * created_at [string] + * encrypted [boolean] + * id [string] + * multiattach [boolean] + * size [integer] + * total_attachments [integer] + * updated_at [string] + +## Example Output + +```text +> openstack_neutron_agent,agent_host=vim2,agent_type=DHCP\ agent,availability_zone=nova,binary=neutron-dhcp-agent,host=telegraf_host,topic=dhcp_agent admin_state_up=true,alive=true,created_at="2021-01-07T03:40:53Z",heartbeat_timestamp="2021-10-14T07:46:40Z",id="17e1e446-d7da-4656-9e32-67d3690a306f",resources_synced=false,started_at="2021-07-02T21:47:42Z" 1634197616000000000 +> openstack_aggregate,host=telegraf_host,name=non-dpdk aggregate_host="vim3",aggregate_hosts=2i,created_at="2021-02-01T18:28:00Z",deleted=false,deleted_at="0001-01-01T00:00:00Z",id=3i,updated_at="0001-01-01T00:00:00Z" 1634197617000000000 +> openstack_flavor,host=telegraf_host,is_public=true,name=hwflavor disk=20i,ephemeral=0i,id="f89785c0-6b9f-47f5-a02e-f0fcbb223163",ram=8192i,rxtx_factor=1,swap=0i,vcpus=8i 1634197617000000000 +> openstack_hypervisor,cpu_arch=x86_64,cpu_feature_3dnowprefetch=true,cpu_feature_abm=true,cpu_feature_acpi=true,cpu_feature_adx=true,cpu_feature_aes=true,cpu_feature_apic=true,cpu_feature_xtpr=true,cpu_model=C-Server,cpu_vendor=xyz,host=telegraf_host,hypervisor_hostname=vim3,hypervisor_type=QEMU,hypervisor_version=4002000,service_host=vim3,service_id=192,state=up,status=enabled cpu_topology_cores=28i,cpu_topology_sockets=1i,cpu_topology_threads=2i,current_workload=0i,disk_available_least=2596i,free_disk_gb=2744i,free_ram_mb=374092i,host_ip="xx:xx:xx:x::xxx",id="12",local_gb=3366i,local_gb_used=622i,memory_mb=515404i,memory_mb_used=141312i,running_vms=15i,vcpus=0i,vcpus_used=72i 1634197618000000000 +> openstack_network,host=telegraf_host,name=Network\ 2,project_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,status=active,tenant_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx admin_state_up=true,availability_zone_hints="",created_at="2021-07-29T15:58:25Z",id="f5af5e71-e890-4245-a377-d4d86273c319",shared=false,subnet_id="2f7341c6-074d-42aa-9abc-71c662d9b336",subnets=1i,updated_at="2021-09-02T16:46:48Z" 1634197618000000000 +> openstack_nova_service,host=telegraf_host,host_machine=vim3,name=nova-compute,state=up,status=enabled,zone=nova disabled_reason="",forced_down=false,id="192",updated_at="2021-10-14T07:46:52Z" 1634197619000000000 +> openstack_port,device_id=a043b8b3-2831-462a-bba8-19088f3db45a,device_owner=compute:nova,host=telegraf_host,name=offload-port1,network_id=6b40d744-9a48-43f2-a4c8-2e0ccb45ac96,project_id=71f9bc44621234f8af99a3949258fc7b,status=ACTIVE,tenant_id=71f9bc44621234f8af99a3949258fc7b admin_state_up=true,allowed_address_pairs=0i,fixed_ips=1i,id="fb64626a-07e1-4d78-a70d-900e989537cc",ip_address="1.1.1.5",mac_address="xx:xx:xx:xx:xx:xx",security_groups="",subnet_id="eafa1eca-b318-4746-a55a-682478466689" 1634197620000000000 +> openstack_identity,domain_id=default,host=telegraf_host,name=service,parent_id=default enabled=true,id="a0877dd2ed1d4b5f952f5689bc04b0cb",is_domain=false,projects=7i 1634197621000000000 +> openstack_server,flavor=0d438971-56cf-4f86-801f-7b04b29384cb,host=telegraf_host,host_id=c0fe05b14261d35cf8748a3f5aae1234b88c2fd62b69fe24ca4a27e9,host_name=vim1,image=b295f1f3-1w23-470c-8734-197676eedd16,name=test-VM7,project=admin,status=active,tenant_id=80ac889731f540498fb1dc78e4bcd5ed,user_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx accessIPv4="",accessIPv6="",addresses=1i,adminPass="",created="2021-09-07T14:40:11Z",disk_gb=8i,fault_code=0i,fault_created="0001-01-01T00:00:00Z",fault_details="",fault_message="",id="db92ee0d-459b-458e-9fe3-2be5ec7c87e1",progress=0i,ram_mb=16384i,security_groups=1i,updated="2021-09-07T14:40:19Z",vcpus=4i,volumes_attached=0i 1634197656000000000 +> openstack_service,host=telegraf_host,name=identity service_enabled=true,service_id="ad605eff92444a158d0f78768f2c4668" 1634197656000000000 +> openstack_storage_pool,driver_version=1.0.0,host=telegraf_host,name=storage_bloack_1,storage_protocol=nfs,vendor_name=xyz,volume_backend_name=abc free_capacity_gb=4847.54,total_capacity_gb=4864 1634197658000000000 +> openstack_subnet,cidr=10.10.20.10/28,gateway_ip=10.10.20.17,host=telegraf_host,ip_version=4,name=IPv4_Subnet_2,network_id=73c6e1d3-f522-4a3f-8e3c-762a0c06d68b,openstack_tags_lab=True,project_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,tenant_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx allocation_pools="10.10.20.11-10.10.20.30",dhcp_enabled=true,dns_nameservers="",id="db69fbb2-9ca1-4370-8c78-82a27951c94b" 1634197660000000000 +> openstack_volume,attachment_attachment_id=c83ca0d6-c467-44a0-ac1f-f87d769c0c65,attachment_device=/dev/vda,attachment_host_name=vim1,availability_zone=nova,bootable=true,host=telegraf_host,status=in-use,user_id=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,volume_type=storage_bloack_1 attachment_attached_at="2021-01-12T21:02:04Z",attachment_server_id="c0c6b4af-0d26-4a0b-a6b4-4ea41fa3bb4a",created_at="2021-01-12T21:01:47Z",encrypted=false,id="d4204f1b-b1ae-1233-b25c-a57d91d2846e",multiattach=false,size=80i,total_attachments=1i,updated_at="2021-01-12T21:02:04Z" 1634197660000000000 +> openstack_request_duration,host=telegraf_host networks=703214354i 1634197660000000000 +> openstack_server_diagnostics,disk_name=vda,host=telegraf_host,no_of_disks=1,no_of_ports=2,port_name=vhu1234566c-9c,server_id=fdddb58c-bbb9-1234-894b-7ae140178909 cpu0_time=4924220000000,cpu1_time=218809610000000,cpu2_time=218624300000000,cpu3_time=220505700000000,disk_errors=-1,disk_read=619156992,disk_read_req=35423,disk_write=8432728064,disk_write_req=882445,memory=8388608,memory-actual=8388608,memory-rss=37276,memory-swap_in=0,port_rx=410516469288,port_rx_drop=13373626,port_rx_errors=-1,port_rx_packets=52140392,port_tx=417312195654,port_tx_drop=0,port_tx_errors=0,port_tx_packets=321385978 1634197660000000000 +``` diff --git a/plugins/inputs/openstack/openstack.go b/plugins/inputs/openstack/openstack.go new file mode 100644 index 0000000000000..c8bf93b854a96 --- /dev/null +++ b/plugins/inputs/openstack/openstack.go @@ -0,0 +1,904 @@ +//go:generate ../../../tools/readme_config_includer/generator +// Package openstack implements an OpenStack input plugin for Telegraf +// +// The OpenStack input plug is a simple two phase metric collector. In the first +// pass a set of gatherers are run against the API to cache collections of resources. +// In the second phase the gathered resources are combined and emitted as metrics. +// +// No aggregation is performed by the input plugin, instead queries to InfluxDB should +// be used to gather global totals of things such as tag frequency. +package openstack + +import ( + "context" + _ "embed" + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats" + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumetenants" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diagnostics" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors" + nova_services "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services" + "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/openstack/identity/v3/projects" + "github.com/gophercloud/gophercloud/openstack/identity/v3/services" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/agents" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/choice" + httpconfig "github.com/influxdata/telegraf/plugins/common/http" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +var ( + typePort = regexp.MustCompile(`_rx$|_rx_drop$|_rx_errors$|_rx_packets$|_tx$|_tx_drop$|_tx_errors$|_tx_packets$`) + typeCPU = regexp.MustCompile(`cpu[0-9]{1,2}_time$`) + typeStorage = regexp.MustCompile(`_errors$|_read$|_read_req$|_write$|_write_req$`) +) + +// volume is a structure used to unmarshal raw JSON from the API into. +type volume struct { + volumes.Volume + volumetenants.VolumeTenantExt +} + +// OpenStack is the main structure associated with a collection instance. +type OpenStack struct { + // Configuration variables + IdentityEndpoint string `toml:"authentication_endpoint"` + Domain string `toml:"domain"` + Project string `toml:"project"` + Username string `toml:"username"` + Password string `toml:"password"` + EnabledServices []string `toml:"enabled_services"` + ServerDiagnotics bool `toml:"server_diagnotics"` + OutputSecrets bool `toml:"output_secrets"` + TagPrefix string `toml:"tag_prefix"` + TagValue string `toml:"tag_value"` + HumanReadableTS bool `toml:"human_readable_timestamps"` + MeasureRequest bool `toml:"measure_openstack_requests"` + Log telegraf.Logger `toml:"-"` + httpconfig.HTTPClientConfig + + // Locally cached clients + identity *gophercloud.ServiceClient + compute *gophercloud.ServiceClient + volume *gophercloud.ServiceClient + network *gophercloud.ServiceClient + stack *gophercloud.ServiceClient + + // Locally cached resources + openstackFlavors map[string]flavors.Flavor + openstackHypervisors []hypervisors.Hypervisor + diag map[string]interface{} + openstackProjects map[string]projects.Project + openstackServices map[string]services.Service +} + +// containsService indicates whether a particular service is enabled +func (o *OpenStack) containsService(t string) bool { + for _, service := range o.openstackServices { + if service.Type == t { + return true + } + } + + return false +} + +// convertTimeFormat, to convert time format based on HumanReadableTS +func (o *OpenStack) convertTimeFormat(t time.Time) interface{} { + if o.HumanReadableTS { + return t.Format("2006-01-02T15:04:05.999999999Z07:00") + } + return t.UnixNano() +} + +func (*OpenStack) SampleConfig() string { + return sampleConfig +} + +// initialize performs any necessary initialization functions +func (o *OpenStack) Init() error { + if len(o.EnabledServices) == 0 { + o.EnabledServices = []string{"services", "projects", "hypervisors", "flavors", "networks", "volumes"} + } + if o.Username == "" || o.Password == "" { + return fmt.Errorf("username or password can not be empty string") + } + if o.TagValue == "" { + return fmt.Errorf("tag_value option can not be empty string") + } + sort.Strings(o.EnabledServices) + o.openstackFlavors = map[string]flavors.Flavor{} + o.openstackHypervisors = []hypervisors.Hypervisor{} + o.diag = map[string]interface{}{} + o.openstackProjects = map[string]projects.Project{} + o.openstackServices = map[string]services.Service{} + + // Authenticate against Keystone and get a token provider + authOption := gophercloud.AuthOptions{ + IdentityEndpoint: o.IdentityEndpoint, + DomainName: o.Domain, + TenantName: o.Project, + Username: o.Username, + Password: o.Password, + } + provider, err := openstack.NewClient(authOption.IdentityEndpoint) + if err != nil { + return fmt.Errorf("unable to create client for OpenStack endpoint %v", err) + } + + ctx := context.Background() + client, err := o.HTTPClientConfig.CreateClient(ctx, o.Log) + if err != nil { + return err + } + + provider.HTTPClient = *client + + if err := openstack.Authenticate(provider, authOption); err != nil { + return fmt.Errorf("unable to authenticate OpenStack user %v", err) + } + + // Create required clients and attach to the OpenStack struct + if o.identity, err = openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{}); err != nil { + return fmt.Errorf("unable to create V3 identity client %v", err) + } + + if err := o.gatherServices(); err != nil { + return fmt.Errorf("failed to get resource openstack services %v", err) + } + + if o.compute, err = openstack.NewComputeV2(provider, gophercloud.EndpointOpts{}); err != nil { + return fmt.Errorf("unable to create V2 compute client %v", err) + } + + // Create required clients and attach to the OpenStack struct + if o.network, err = openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{}); err != nil { + return fmt.Errorf("unable to create V2 network client %v", err) + } + + // The Orchestration service is optional + if o.containsService("orchestration") { + if o.stack, err = openstack.NewOrchestrationV1(provider, gophercloud.EndpointOpts{}); err != nil { + return fmt.Errorf("unable to create V1 stack client %v", err) + } + } + + // The Cinder volume storage service is optional + if o.containsService("volumev2") { + if o.volume, err = openstack.NewBlockStorageV2(provider, gophercloud.EndpointOpts{}); err != nil { + return fmt.Errorf("unable to create V2 volume client %v", err) + } + } + + return nil +} + +// Gather gathers resources from the OpenStack API and accumulates metrics. This +// implements the Input interface. +func (o *OpenStack) Gather(acc telegraf.Accumulator) error { + // Gather resources. Note service harvesting must come first as the other + // gatherers are dependant on this information. + gatherers := map[string]func(telegraf.Accumulator) error{ + "projects": o.gatherProjects, + "hypervisors": o.gatherHypervisors, + "flavors": o.gatherFlavors, + "servers": o.gatherServers, + "volumes": o.gatherVolumes, + "storage_pools": o.gatherStoragePools, + "subnets": o.gatherSubnets, + "ports": o.gatherPorts, + "networks": o.gatherNetworks, + "aggregates": o.gatherAggregates, + "nova_services": o.gatherNovaServices, + "agents": o.gatherAgents, + "stacks": o.gatherStacks, + } + + callDuration := map[string]interface{}{} + for _, service := range o.EnabledServices { + // As Services are already gathered in Init(), using this to accumulate them. + if service == "services" { + o.accumulateServices(acc) + continue + } + start := time.Now() + gatherer := gatherers[service] + if err := gatherer(acc); err != nil { + acc.AddError(fmt.Errorf("failed to get resource %q %v", service, err)) + } + callDuration[service] = time.Since(start).Nanoseconds() + } + + if o.MeasureRequest { + for service, duration := range callDuration { + acc.AddFields("openstack_request_duration", map[string]interface{}{service: duration}, map[string]string{}) + } + } + + if o.ServerDiagnotics { + if !choice.Contains("servers", o.EnabledServices) { + if err := o.gatherServers(acc); err != nil { + acc.AddError(fmt.Errorf("failed to get resource server diagnostics %v", err)) + return nil + } + } + o.accumulateServerDiagnostics(acc) + } + + return nil +} + +// gatherServices collects services from the OpenStack API. +func (o *OpenStack) gatherServices() error { + page, err := services.List(o.identity, &services.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list services %v", err) + } + extractedServices, err := services.ExtractServices(page) + if err != nil { + return fmt.Errorf("unable to extract services %v", err) + } + for _, service := range extractedServices { + o.openstackServices[service.ID] = service + } + + return nil +} + +// gatherStacks collects and accumulates stacks data from the OpenStack API. +func (o *OpenStack) gatherStacks(acc telegraf.Accumulator) error { + page, err := stacks.List(o.stack, &stacks.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list stacks %v", err) + } + extractedStacks, err := stacks.ExtractStacks(page) + if err != nil { + return fmt.Errorf("unable to extract stacks %v", err) + } + for _, stack := range extractedStacks { + tags := map[string]string{ + "description": stack.Description, + "name": stack.Name, + } + for _, stackTag := range stack.Tags { + tags[o.TagPrefix+stackTag] = o.TagValue + } + fields := map[string]interface{}{ + "status": strings.ToLower(stack.Status), + "id": stack.ID, + "status_reason": stack.StatusReason, + "creation_time": o.convertTimeFormat(stack.CreationTime), + "updated_time": o.convertTimeFormat(stack.UpdatedTime), + } + acc.AddFields("openstack_stack", fields, tags) + } + + return nil +} + +// gatherNovaServices collects and accumulates nova_services data from the OpenStack API. +func (o *OpenStack) gatherNovaServices(acc telegraf.Accumulator) error { + page, err := nova_services.List(o.compute, &nova_services.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list nova_services %v", err) + } + novaServices, err := nova_services.ExtractServices(page) + if err != nil { + return fmt.Errorf("unable to extract nova_services %v", err) + } + for _, novaService := range novaServices { + tags := map[string]string{ + "name": novaService.Binary, + "host_machine": novaService.Host, + "state": novaService.State, + "status": strings.ToLower(novaService.Status), + "zone": novaService.Zone, + } + fields := map[string]interface{}{ + "id": novaService.ID, + "disabled_reason": novaService.DisabledReason, + "forced_down": novaService.ForcedDown, + "updated_at": o.convertTimeFormat(novaService.UpdatedAt), + } + acc.AddFields("openstack_nova_service", fields, tags) + } + + return nil +} + +// gatherSubnets collects and accumulates subnets data from the OpenStack API. +func (o *OpenStack) gatherSubnets(acc telegraf.Accumulator) error { + page, err := subnets.List(o.network, &subnets.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list subnets %v", err) + } + extractedSubnets, err := subnets.ExtractSubnets(page) + if err != nil { + return fmt.Errorf("unable to extract subnets %v", err) + } + for _, subnet := range extractedSubnets { + var allocationPools []string + for _, pool := range subnet.AllocationPools { + allocationPools = append(allocationPools, pool.Start+"-"+pool.End) + } + tags := map[string]string{ + "network_id": subnet.NetworkID, + "name": subnet.Name, + "description": subnet.Description, + "ip_version": strconv.Itoa(subnet.IPVersion), + "cidr": subnet.CIDR, + "gateway_ip": subnet.GatewayIP, + "tenant_id": subnet.TenantID, + "project_id": subnet.ProjectID, + "ipv6_address_mode": subnet.IPv6AddressMode, + "ipv6_ra_mode": subnet.IPv6RAMode, + "subnet_pool_id": subnet.SubnetPoolID, + } + for _, subnetTag := range subnet.Tags { + tags[o.TagPrefix+subnetTag] = o.TagValue + } + fields := map[string]interface{}{ + "id": subnet.ID, + "dhcp_enabled": subnet.EnableDHCP, + "dns_nameservers": strings.Join(subnet.DNSNameservers[:], ","), + "allocation_pools": strings.Join(allocationPools[:], ","), + } + acc.AddFields("openstack_subnet", fields, tags) + } + return nil +} + +// gatherPorts collects and accumulates ports data from the OpenStack API. +func (o *OpenStack) gatherPorts(acc telegraf.Accumulator) error { + page, err := ports.List(o.network, &ports.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list ports %v", err) + } + extractedPorts, err := ports.ExtractPorts(page) + if err != nil { + return fmt.Errorf("unable to extract ports %v", err) + } + for _, port := range extractedPorts { + tags := map[string]string{ + "network_id": port.NetworkID, + "name": port.Name, + "description": port.Description, + "status": strings.ToLower(port.Status), + "tenant_id": port.TenantID, + "project_id": port.ProjectID, + "device_owner": port.DeviceOwner, + "device_id": port.DeviceID, + } + for _, portTag := range port.Tags { + tags[o.TagPrefix+portTag] = o.TagValue + } + fields := map[string]interface{}{ + "id": port.ID, + "mac_address": port.MACAddress, + "admin_state_up": port.AdminStateUp, + "fixed_ips": len(port.FixedIPs), + "allowed_address_pairs": len(port.AllowedAddressPairs), + "security_groups": strings.Join(port.SecurityGroups[:], ","), + } + if len(port.FixedIPs) > 0 { + for _, ip := range port.FixedIPs { + fields["subnet_id"] = ip.SubnetID + fields["ip_address"] = ip.IPAddress + acc.AddFields("openstack_port", fields, tags) + } + } else { + acc.AddFields("openstack_port", fields, tags) + } + } + return nil +} + +// gatherNetworks collects and accumulates networks data from the OpenStack API. +func (o *OpenStack) gatherNetworks(acc telegraf.Accumulator) error { + page, err := networks.List(o.network, &networks.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list networks %v", err) + } + extractedNetworks, err := networks.ExtractNetworks(page) + if err != nil { + return fmt.Errorf("unable to extract networks %v", err) + } + for _, network := range extractedNetworks { + tags := map[string]string{ + "name": network.Name, + "description": network.Description, + "status": strings.ToLower(network.Status), + "tenant_id": network.TenantID, + "project_id": network.ProjectID, + } + for _, networkTag := range network.Tags { + tags[o.TagPrefix+networkTag] = o.TagValue + } + fields := map[string]interface{}{ + "id": network.ID, + "admin_state_up": network.AdminStateUp, + "subnets": len(network.Subnets), + "shared": network.Shared, + "availability_zone_hints": strings.Join(network.AvailabilityZoneHints[:], ","), + "updated_at": o.convertTimeFormat(network.UpdatedAt), + "created_at": o.convertTimeFormat(network.CreatedAt), + } + if len(network.Subnets) > 0 { + for _, subnet := range network.Subnets { + fields["subnet_id"] = subnet + acc.AddFields("openstack_network", fields, tags) + } + } else { + acc.AddFields("openstack_network", fields, tags) + } + } + return nil +} + +// gatherAgents collects and accumulates agents data from the OpenStack API. +func (o *OpenStack) gatherAgents(acc telegraf.Accumulator) error { + page, err := agents.List(o.network, &agents.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list neutron agents %v", err) + } + extractedAgents, err := agents.ExtractAgents(page) + if err != nil { + return fmt.Errorf("unable to extract neutron agents %v", err) + } + for _, agent := range extractedAgents { + tags := map[string]string{ + "agent_type": agent.AgentType, + "availability_zone": agent.AvailabilityZone, + "binary": agent.Binary, + "description": agent.Description, + "agent_host": agent.Host, + "topic": agent.Topic, + } + fields := map[string]interface{}{ + "id": agent.ID, + "admin_state_up": agent.AdminStateUp, + "alive": agent.Alive, + "resources_synced": agent.ResourcesSynced, + "created_at": o.convertTimeFormat(agent.CreatedAt), + "started_at": o.convertTimeFormat(agent.StartedAt), + "heartbeat_timestamp": o.convertTimeFormat(agent.HeartbeatTimestamp), + } + acc.AddFields("openstack_neutron_agent", fields, tags) + } + return nil +} + +// gatherAggregates collects and accumulates aggregates data from the OpenStack API. +func (o *OpenStack) gatherAggregates(acc telegraf.Accumulator) error { + page, err := aggregates.List(o.compute).AllPages() + if err != nil { + return fmt.Errorf("unable to list aggregates %v", err) + } + extractedAggregates, err := aggregates.ExtractAggregates(page) + if err != nil { + return fmt.Errorf("unable to extract aggregates %v", err) + } + for _, aggregate := range extractedAggregates { + tags := map[string]string{ + "availability_zone": aggregate.AvailabilityZone, + "name": aggregate.Name, + } + fields := map[string]interface{}{ + "id": aggregate.ID, + "aggregate_hosts": len(aggregate.Hosts), + "deleted": aggregate.Deleted, + "created_at": o.convertTimeFormat(aggregate.CreatedAt), + "updated_at": o.convertTimeFormat(aggregate.UpdatedAt), + "deleted_at": o.convertTimeFormat(aggregate.DeletedAt), + } + if len(aggregate.Hosts) > 0 { + for _, host := range aggregate.Hosts { + fields["aggregate_host"] = host + acc.AddFields("openstack_aggregate", fields, tags) + } + } else { + acc.AddFields("openstack_aggregate", fields, tags) + } + } + return nil +} + +// gatherProjects collects and accumulates projects data from the OpenStack API. +func (o *OpenStack) gatherProjects(acc telegraf.Accumulator) error { + page, err := projects.List(o.identity, &projects.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list projects %v", err) + } + extractedProjects, err := projects.ExtractProjects(page) + if err != nil { + return fmt.Errorf("unable to extract projects %v", err) + } + for _, project := range extractedProjects { + o.openstackProjects[project.ID] = project + tags := map[string]string{ + "description": project.Description, + "domain_id": project.DomainID, + "name": project.Name, + "parent_id": project.ParentID, + } + for _, projectTag := range project.Tags { + tags[o.TagPrefix+projectTag] = o.TagValue + } + fields := map[string]interface{}{ + "id": project.ID, + "is_domain": project.IsDomain, + "enabled": project.Enabled, + "projects": len(extractedProjects), + } + acc.AddFields("openstack_identity", fields, tags) + } + return nil +} + +// gatherHypervisors collects and accumulates hypervisors data from the OpenStack API. +func (o *OpenStack) gatherHypervisors(acc telegraf.Accumulator) error { + page, err := hypervisors.List(o.compute, hypervisors.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list hypervisors %v", err) + } + extractedHypervisors, err := hypervisors.ExtractHypervisors(page) + if err != nil { + return fmt.Errorf("unable to extract hypervisors %v", err) + } + o.openstackHypervisors = extractedHypervisors + if choice.Contains("hypervisors", o.EnabledServices) { + for _, hypervisor := range extractedHypervisors { + tags := map[string]string{ + "cpu_vendor": hypervisor.CPUInfo.Vendor, + "cpu_arch": hypervisor.CPUInfo.Arch, + "cpu_model": hypervisor.CPUInfo.Model, + "status": strings.ToLower(hypervisor.Status), + "state": hypervisor.State, + "hypervisor_hostname": hypervisor.HypervisorHostname, + "hypervisor_type": hypervisor.HypervisorType, + "hypervisor_version": strconv.Itoa(hypervisor.HypervisorVersion), + "service_host": hypervisor.Service.Host, + "service_id": hypervisor.Service.ID, + "service_disabled_reason": hypervisor.Service.DisabledReason, + } + for _, cpuFeature := range hypervisor.CPUInfo.Features { + tags["cpu_feature_"+cpuFeature] = "true" + } + fields := map[string]interface{}{ + "id": hypervisor.ID, + "host_ip": hypervisor.HostIP, + "cpu_topology_sockets": hypervisor.CPUInfo.Topology.Sockets, + "cpu_topology_cores": hypervisor.CPUInfo.Topology.Cores, + "cpu_topology_threads": hypervisor.CPUInfo.Topology.Threads, + "current_workload": hypervisor.CurrentWorkload, + "disk_available_least": hypervisor.DiskAvailableLeast, + "free_disk_gb": hypervisor.FreeDiskGB, + "free_ram_mb": hypervisor.FreeRamMB, + "local_gb": hypervisor.LocalGB, + "local_gb_used": hypervisor.LocalGBUsed, + "memory_mb": hypervisor.MemoryMB, + "memory_mb_used": hypervisor.MemoryMBUsed, + "running_vms": hypervisor.RunningVMs, + "vcpus": hypervisor.VCPUs, + "vcpus_used": hypervisor.VCPUsUsed, + } + acc.AddFields("openstack_hypervisor", fields, tags) + } + } + return nil +} + +// gatherFlavors collects and accumulates flavors data from the OpenStack API. +func (o *OpenStack) gatherFlavors(acc telegraf.Accumulator) error { + page, err := flavors.ListDetail(o.compute, &flavors.ListOpts{}).AllPages() + if err != nil { + return fmt.Errorf("unable to list flavors %v", err) + } + extractedflavors, err := flavors.ExtractFlavors(page) + if err != nil { + return fmt.Errorf("unable to extract flavors %v", err) + } + for _, flavor := range extractedflavors { + o.openstackFlavors[flavor.ID] = flavor + tags := map[string]string{ + "name": flavor.Name, + "is_public": strconv.FormatBool(flavor.IsPublic), + } + fields := map[string]interface{}{ + "id": flavor.ID, + "disk": flavor.Disk, + "ram": flavor.RAM, + "rxtx_factor": flavor.RxTxFactor, + "swap": flavor.Swap, + "vcpus": flavor.VCPUs, + "ephemeral": flavor.Ephemeral, + } + acc.AddFields("openstack_flavor", fields, tags) + } + return nil +} + +// gatherVolumes collects and accumulates volumes data from the OpenStack API. +func (o *OpenStack) gatherVolumes(acc telegraf.Accumulator) error { + page, err := volumes.List(o.volume, &volumes.ListOpts{AllTenants: true}).AllPages() + if err != nil { + return fmt.Errorf("unable to list volumes %v", err) + } + v := []volume{} + if err := volumes.ExtractVolumesInto(page, &v); err != nil { + return fmt.Errorf("unable to extract volumes %v", err) + } + for _, volume := range v { + tags := map[string]string{ + "status": strings.ToLower(volume.Status), + "availability_zone": volume.AvailabilityZone, + "name": volume.Name, + "description": volume.Description, + "volume_type": volume.VolumeType, + "snapshot_id": volume.SnapshotID, + "source_volid": volume.SourceVolID, + "bootable": volume.Bootable, + "replication_status": strings.ToLower(volume.ReplicationStatus), + "consistency_group_id": volume.ConsistencyGroupID, + } + fields := map[string]interface{}{ + "id": volume.ID, + "size": volume.Size, + "total_attachments": len(volume.Attachments), + "encrypted": volume.Encrypted, + "multiattach": volume.Multiattach, + "created_at": o.convertTimeFormat(volume.CreatedAt), + "updated_at": o.convertTimeFormat(volume.UpdatedAt), + } + if o.OutputSecrets { + tags["user_id"] = volume.UserID + } + if len(volume.Attachments) > 0 { + for _, attachment := range volume.Attachments { + if !o.HumanReadableTS { + fields["attachment_attached_at"] = attachment.AttachedAt.UnixNano() + } else { + fields["attachment_attached_at"] = attachment.AttachedAt.Format("2006-01-02T15:04:05.999999999Z07:00") + } + tags["attachment_attachment_id"] = attachment.AttachmentID + tags["attachment_device"] = attachment.Device + tags["attachment_host_name"] = attachment.HostName + fields["attachment_server_id"] = attachment.ServerID + acc.AddFields("openstack_volume", fields, tags) + } + } else { + acc.AddFields("openstack_volume", fields, tags) + } + } + return nil +} + +// gatherStoragePools collects and accumulates storage pools data from the OpenStack API. +func (o *OpenStack) gatherStoragePools(acc telegraf.Accumulator) error { + results, err := schedulerstats.List(o.volume, &schedulerstats.ListOpts{Detail: true}).AllPages() + if err != nil { + return fmt.Errorf("unable to list storage pools %v", err) + } + storagePools, err := schedulerstats.ExtractStoragePools(results) + if err != nil { + return fmt.Errorf("unable to extract storage pools %v", err) + } + for _, storagePool := range storagePools { + tags := map[string]string{ + "name": storagePool.Capabilities.VolumeBackendName, + "driver_version": storagePool.Capabilities.DriverVersion, + "storage_protocol": storagePool.Capabilities.StorageProtocol, + "vendor_name": storagePool.Capabilities.VendorName, + "volume_backend_name": storagePool.Capabilities.VolumeBackendName, + } + fields := map[string]interface{}{ + "total_capacity_gb": storagePool.Capabilities.TotalCapacityGB, + "free_capacity_gb": storagePool.Capabilities.FreeCapacityGB, + } + acc.AddFields("openstack_storage_pool", fields, tags) + } + return nil +} + +// gatherServers collects servers from the OpenStack API. +func (o *OpenStack) gatherServers(acc telegraf.Accumulator) error { + if !choice.Contains("hypervisors", o.EnabledServices) { + if err := o.gatherHypervisors(acc); err != nil { + acc.AddError(fmt.Errorf("failed to get resource hypervisors %v", err)) + } + } + serverGather := choice.Contains("servers", o.EnabledServices) + for _, hypervisor := range o.openstackHypervisors { + page, err := servers.List(o.compute, &servers.ListOpts{AllTenants: true, Host: hypervisor.HypervisorHostname}).AllPages() + if err != nil { + return fmt.Errorf("unable to list servers %v", err) + } + extractedServers, err := servers.ExtractServers(page) + if err != nil { + return fmt.Errorf("unable to extract servers %v", err) + } + for _, server := range extractedServers { + if serverGather { + o.accumulateServer(acc, server, hypervisor.HypervisorHostname) + } + if !o.ServerDiagnotics || server.Status != "ACTIVE" { + continue + } + diagnostic, err := diagnostics.Get(o.compute, server.ID).Extract() + if err != nil { + acc.AddError(fmt.Errorf("unable to get diagnostics for server(%v) %v", server.ID, err)) + continue + } + o.diag[server.ID] = diagnostic + } + } + return nil +} + +// accumulateServices accumulates statistics of services. +func (o *OpenStack) accumulateServices(acc telegraf.Accumulator) { + for _, service := range o.openstackServices { + tags := map[string]string{ + "name": service.Type, + } + fields := map[string]interface{}{ + "service_id": service.ID, + "service_enabled": service.Enabled, + } + acc.AddFields("openstack_service", fields, tags) + } +} + +// accumulateServer accumulates statistics of a server. +func (o *OpenStack) accumulateServer(acc telegraf.Accumulator, server servers.Server, hostName string) { + tags := map[string]string{} + // Extract the flavor details to avoid joins (ignore errors and leave as zero values) + var vcpus, ram, disk int + if flavorIDInterface, ok := server.Flavor["id"]; ok { + if flavorID, ok := flavorIDInterface.(string); ok { + tags["flavor"] = flavorID + if flavor, ok := o.openstackFlavors[flavorID]; ok { + vcpus = flavor.VCPUs + ram = flavor.RAM + disk = flavor.Disk + } + } + } + if imageIDInterface, ok := server.Image["id"]; ok { + if imageID, ok := imageIDInterface.(string); ok { + tags["image"] = imageID + } + } + // Try derive the associated project + project := "unknown" + if p, ok := o.openstackProjects[server.TenantID]; ok { + project = p.Name + } + tags["tenant_id"] = server.TenantID + tags["name"] = server.Name + tags["host_id"] = server.HostID + tags["status"] = strings.ToLower(server.Status) + tags["key_name"] = server.KeyName + tags["host_name"] = hostName + tags["project"] = project + fields := map[string]interface{}{ + "id": server.ID, + "progress": server.Progress, + "accessIPv4": server.AccessIPv4, + "accessIPv6": server.AccessIPv6, + "addresses": len(server.Addresses), + "security_groups": len(server.SecurityGroups), + "volumes_attached": len(server.AttachedVolumes), + "fault_code": server.Fault.Code, + "fault_details": server.Fault.Details, + "fault_message": server.Fault.Message, + "vcpus": vcpus, + "ram_mb": ram, + "disk_gb": disk, + "fault_created": o.convertTimeFormat(server.Fault.Created), + "updated": o.convertTimeFormat(server.Updated), + "created": o.convertTimeFormat(server.Created), + } + if o.OutputSecrets { + tags["user_id"] = server.UserID + fields["adminPass"] = server.AdminPass + } + if len(server.AttachedVolumes) == 0 { + acc.AddFields("openstack_server", fields, tags) + } else { + for _, AttachedVolume := range server.AttachedVolumes { + fields["volume_id"] = AttachedVolume.ID + acc.AddFields("openstack_server", fields, tags) + } + } +} + +// accumulateServerDiagnostics accumulates statistics from the compute(nova) service. +// currently only supports 'libvirt' driver. +func (o *OpenStack) accumulateServerDiagnostics(acc telegraf.Accumulator) { + for serverID, diagnostic := range o.diag { + s, ok := diagnostic.(map[string]interface{}) + if !ok { + o.Log.Warnf("unknown type for diagnostics %T", diagnostic) + continue + } + tags := map[string]string{ + "server_id": serverID, + } + fields := map[string]interface{}{} + portName := make(map[string]bool) + storageName := make(map[string]bool) + memoryStats := make(map[string]interface{}) + for k, v := range s { + if typePort.MatchString(k) { + portName[strings.Split(k, "_")[0]] = true + } else if typeCPU.MatchString(k) { + fields[k] = v + } else if typeStorage.MatchString(k) { + storageName[strings.Split(k, "_")[0]] = true + } else { + memoryStats[k] = v + } + } + fields["memory"] = memoryStats["memory"] + fields["memory-actual"] = memoryStats["memory-actual"] + fields["memory-rss"] = memoryStats["memory-rss"] + fields["memory-swap_in"] = memoryStats["memory-swap_in"] + tags["no_of_ports"] = strconv.Itoa(len(portName)) + tags["no_of_disks"] = strconv.Itoa(len(storageName)) + for key := range storageName { + fields["disk_errors"] = s[key+"_errors"] + fields["disk_read"] = s[key+"_read"] + fields["disk_read_req"] = s[key+"_read_req"] + fields["disk_write"] = s[key+"_write"] + fields["disk_write_req"] = s[key+"_write_req"] + tags["disk_name"] = key + acc.AddFields("openstack_server_diagnostics", fields, tags) + } + for key := range portName { + fields["port_rx"] = s[key+"_rx"] + fields["port_rx_drop"] = s[key+"_rx_drop"] + fields["port_rx_errors"] = s[key+"_rx_errors"] + fields["port_rx_packets"] = s[key+"_rx_packets"] + fields["port_tx"] = s[key+"_tx"] + fields["port_tx_drop"] = s[key+"_tx_drop"] + fields["port_tx_errors"] = s[key+"_tx_errors"] + fields["port_tx_packets"] = s[key+"_tx_packets"] + tags["port_name"] = key + acc.AddFields("openstack_server_diagnostics", fields, tags) + } + } +} + +// init registers a callback which creates a new OpenStack input instance. +func init() { + inputs.Add("openstack", func() telegraf.Input { + return &OpenStack{ + Domain: "default", + Project: "admin", + TagPrefix: "openstack_tag_", + TagValue: "true", + } + }) +} diff --git a/plugins/inputs/openstack/sample.conf b/plugins/inputs/openstack/sample.conf new file mode 100644 index 0000000000000..1df47df4ea4e0 --- /dev/null +++ b/plugins/inputs/openstack/sample.conf @@ -0,0 +1,51 @@ +# Collects performance metrics from OpenStack services +[[inputs.openstack]] + ## The recommended interval to poll is '30m' + + ## The identity endpoint to authenticate against and get the service catalog from. + authentication_endpoint = "https://my.openstack.cloud:5000" + + ## The domain to authenticate against when using a V3 identity endpoint. + # domain = "default" + + ## The project to authenticate as. + # project = "admin" + + ## User authentication credentials. Must have admin rights. + username = "admin" + password = "password" + + ## Available services are: + ## "agents", "aggregates", "flavors", "hypervisors", "networks", "nova_services", + ## "ports", "projects", "servers", "services", "stacks", "storage_pools", "subnets", "volumes" + # enabled_services = ["services", "projects", "hypervisors", "flavors", "networks", "volumes"] + + ## Collect Server Diagnostics + # server_diagnotics = false + + ## output secrets (such as adminPass(for server) and UserID(for volume)). + # output_secrets = false + + ## Amount of time allowed to complete the HTTP(s) request. + # timeout = "5s" + + ## HTTP Proxy support + # http_proxy_url = "" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Options for tags received from Openstack + # tag_prefix = "openstack_tag_" + # tag_value = "true" + + ## Timestamp format for timestamp data received from Openstack. + ## If false format is unix nanoseconds. + # human_readable_timestamps = false + + ## Measure Openstack call duration + # measure_openstack_requests = false diff --git a/plugins/inputs/opentelemetry/README.md b/plugins/inputs/opentelemetry/README.md new file mode 100644 index 0000000000000..9e1caefc9dd25 --- /dev/null +++ b/plugins/inputs/opentelemetry/README.md @@ -0,0 +1,104 @@ +# OpenTelemetry Input Plugin + +This plugin receives traces, metrics and logs from +[OpenTelemetry](https://opentelemetry.io) clients and agents via gRPC. + +## Configuration + +```toml @sample.conf +# Receive OpenTelemetry traces, metrics, and logs over gRPC +[[inputs.opentelemetry]] + ## Override the default (0.0.0.0:4317) destination OpenTelemetry gRPC service + ## address:port + # service_address = "0.0.0.0:4317" + + ## Override the default (5s) new connection timeout + # timeout = "5s" + + ## Override the default (prometheus-v1) metrics schema. + ## Supports: "prometheus-v1", "prometheus-v2" + ## For more information about the alternatives, read the Prometheus input + ## plugin notes. + # metrics_schema = "prometheus-v1" + + ## Optional TLS Config. + ## For advanced options: https://github.com/influxdata/telegraf/blob/v1.18.3/docs/TLS.md + ## + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections. + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + ## Add service certificate and key. + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" +``` + +### Schema + +The OpenTelemetry->InfluxDB conversion [schema][1] and [implementation][2] are +hosted at . + +Spans are stored in measurement `spans`. +Logs are stored in measurement `logs`. + +For metrics, two output schemata exist. Metrics received with +`metrics_schema=prometheus-v1` are assigned measurement from the OTel field +`Metric.name`. Metrics received with `metrics_schema=prometheus-v2` are stored +in measurement `prometheus`. + +Also see the OpenTelemetry output plugin for Telegraf. + +[1]: https://github.com/influxdata/influxdb-observability/blob/main/docs/index.md + +[2]: https://github.com/influxdata/influxdb-observability/tree/main/otel2influx + +## Example Output + +### Tracing Spans + +```text +spans end_time_unix_nano="2021-02-19 20:50:25.6893952 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="okey-dokey",net.peer.ip="1.2.3.4",parent_span_id="d5270e78d85f570f",peer.service="tracegen-client",service.name="tracegen",span.kind="server",span_id="4c28227be6a010e1",status_code="STATUS_CODE_OK",trace_id="7d4854815225332c9834e6dbf85b9380" 1613767825689169000 +spans end_time_unix_nano="2021-02-19 20:50:25.6893952 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="lets-go",net.peer.ip="1.2.3.4",peer.service="tracegen-server",service.name="tracegen",span.kind="client",span_id="d5270e78d85f570f",status_code="STATUS_CODE_OK",trace_id="7d4854815225332c9834e6dbf85b9380" 1613767825689135000 +spans end_time_unix_nano="2021-02-19 20:50:25.6895667 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="okey-dokey",net.peer.ip="1.2.3.4",parent_span_id="b57e98af78c3399b",peer.service="tracegen-client",service.name="tracegen",span.kind="server",span_id="a0643a156d7f9f7f",status_code="STATUS_CODE_OK",trace_id="fd6b8bb5965e726c94978c644962cdc8" 1613767825689388000 +spans end_time_unix_nano="2021-02-19 20:50:25.6895667 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="lets-go",net.peer.ip="1.2.3.4",peer.service="tracegen-server",service.name="tracegen",span.kind="client",span_id="b57e98af78c3399b",status_code="STATUS_CODE_OK",trace_id="fd6b8bb5965e726c94978c644962cdc8" 1613767825689303300 +spans end_time_unix_nano="2021-02-19 20:50:25.6896741 +0000 UTC",instrumentation_library_name="tracegen",kind="SPAN_KIND_INTERNAL",name="okey-dokey",net.peer.ip="1.2.3.4",parent_span_id="6a8e6a0edcc1c966",peer.service="tracegen-client",service.name="tracegen",span.kind="server",span_id="d68f7f3b41eb8075",status_code="STATUS_CODE_OK",trace_id="651dadde186b7834c52b13a28fc27bea" 1613767825689480300 +``` + +## Metrics + +### `prometheus-v1` + +```shell +cpu_temp,foo=bar gauge=87.332 +http_requests_total,method=post,code=200 counter=1027 +http_requests_total,method=post,code=400 counter=3 +http_request_duration_seconds 0.05=24054,0.1=33444,0.2=100392,0.5=129389,1=133988,sum=53423,count=144320 +rpc_duration_seconds 0.01=3102,0.05=3272,0.5=4773,0.9=9001,0.99=76656,sum=1.7560473e+07,count=2693 +``` + +### `prometheus-v2` + +```shell +prometheus,foo=bar cpu_temp=87.332 +prometheus,method=post,code=200 http_requests_total=1027 +prometheus,method=post,code=400 http_requests_total=3 +prometheus,le=0.05 http_request_duration_seconds_bucket=24054 +prometheus,le=0.1 http_request_duration_seconds_bucket=33444 +prometheus,le=0.2 http_request_duration_seconds_bucket=100392 +prometheus,le=0.5 http_request_duration_seconds_bucket=129389 +prometheus,le=1 http_request_duration_seconds_bucket=133988 +prometheus http_request_duration_seconds_count=144320,http_request_duration_seconds_sum=53423 +prometheus,quantile=0.01 rpc_duration_seconds=3102 +prometheus,quantile=0.05 rpc_duration_seconds=3272 +prometheus,quantile=0.5 rpc_duration_seconds=4773 +prometheus,quantile=0.9 rpc_duration_seconds=9001 +prometheus,quantile=0.99 rpc_duration_seconds=76656 +prometheus rpc_duration_seconds_count=1.7560473e+07,rpc_duration_seconds_sum=2693 +``` + +### Logs + +```text +logs fluent.tag="fluent.info",pid=18i,ppid=9i,worker=0i 1613769568895331700 +logs fluent.tag="fluent.debug",instance=1720i,queue_size=0i,stage_size=0i 1613769568895697200 +logs fluent.tag="fluent.info",worker=0i 1613769568896515100 +``` diff --git a/plugins/inputs/opentelemetry/grpc_services.go b/plugins/inputs/opentelemetry/grpc_services.go new file mode 100644 index 0000000000000..ee75b6f3b865f --- /dev/null +++ b/plugins/inputs/opentelemetry/grpc_services.go @@ -0,0 +1,85 @@ +package opentelemetry + +import ( + "context" + "fmt" + "go.opentelemetry.io/collector/pdata/plog/plogotlp" + "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" + + "github.com/influxdata/influxdb-observability/common" + "github.com/influxdata/influxdb-observability/otel2influx" +) + +type traceService struct { + converter *otel2influx.OtelTracesToLineProtocol + writer *writeToAccumulator +} + +var _ ptraceotlp.Server = (*traceService)(nil) + +func newTraceService(logger common.Logger, writer *writeToAccumulator) *traceService { + converter := otel2influx.NewOtelTracesToLineProtocol(logger) + return &traceService{ + converter: converter, + writer: writer, + } +} + +func (s *traceService) Export(ctx context.Context, req ptraceotlp.Request) (ptraceotlp.Response, error) { + err := s.converter.WriteTraces(ctx, req.Traces(), s.writer) + return ptraceotlp.NewResponse(), err +} + +type metricsService struct { + converter *otel2influx.OtelMetricsToLineProtocol + writer *writeToAccumulator +} + +var _ pmetricotlp.Server = (*metricsService)(nil) + +var metricsSchemata = map[string]common.MetricsSchema{ + "prometheus-v1": common.MetricsSchemaTelegrafPrometheusV1, + "prometheus-v2": common.MetricsSchemaTelegrafPrometheusV2, +} + +func newMetricsService(logger common.Logger, writer *writeToAccumulator, schema string) (*metricsService, error) { + ms, found := metricsSchemata[schema] + if !found { + return nil, fmt.Errorf("schema '%s' not recognized", schema) + } + + converter, err := otel2influx.NewOtelMetricsToLineProtocol(logger, ms) + if err != nil { + return nil, err + } + return &metricsService{ + converter: converter, + writer: writer, + }, nil +} + +func (s *metricsService) Export(ctx context.Context, req pmetricotlp.Request) (pmetricotlp.Response, error) { + err := s.converter.WriteMetrics(ctx, req.Metrics(), s.writer) + return pmetricotlp.NewResponse(), err +} + +type logsService struct { + converter *otel2influx.OtelLogsToLineProtocol + writer *writeToAccumulator +} + +var _ plogotlp.Server = (*logsService)(nil) + +func newLogsService(logger common.Logger, writer *writeToAccumulator) *logsService { + converter := otel2influx.NewOtelLogsToLineProtocol(logger) + return &logsService{ + converter: converter, + writer: writer, + } +} + +func (s *logsService) Export(ctx context.Context, req plogotlp.Request) (plogotlp.Response, error) { + err := s.converter.WriteLogs(ctx, req.Logs(), s.writer) + return plogotlp.NewResponse(), err +} diff --git a/plugins/inputs/opentelemetry/logger.go b/plugins/inputs/opentelemetry/logger.go new file mode 100644 index 0000000000000..3db3621bcc672 --- /dev/null +++ b/plugins/inputs/opentelemetry/logger.go @@ -0,0 +1,16 @@ +package opentelemetry + +import ( + "strings" + + "github.com/influxdata/telegraf" +) + +type otelLogger struct { + telegraf.Logger +} + +func (l otelLogger) Debug(msg string, kv ...interface{}) { + format := msg + strings.Repeat(" %s=%q", len(kv)/2) + l.Logger.Debugf(format, kv...) +} diff --git a/plugins/inputs/opentelemetry/opentelemetry.go b/plugins/inputs/opentelemetry/opentelemetry.go new file mode 100644 index 0000000000000..11c0d3092f420 --- /dev/null +++ b/plugins/inputs/opentelemetry/opentelemetry.go @@ -0,0 +1,107 @@ +//go:generate ../../../tools/readme_config_includer/generator +package opentelemetry + +import ( + _ "embed" + "fmt" + "net" + "sync" + "time" + + "go.opentelemetry.io/collector/pdata/plog/plogotlp" + "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +type OpenTelemetry struct { + ServiceAddress string `toml:"service_address"` + MetricsSchema string `toml:"metrics_schema"` + + tls.ServerConfig + Timeout config.Duration `toml:"timeout"` + + Log telegraf.Logger `toml:"-"` + + listener net.Listener // overridden in tests + grpcServer *grpc.Server + + wg sync.WaitGroup +} + +func (*OpenTelemetry) SampleConfig() string { + return sampleConfig +} + +func (o *OpenTelemetry) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (o *OpenTelemetry) Start(accumulator telegraf.Accumulator) error { + var grpcOptions []grpc.ServerOption + if tlsConfig, err := o.ServerConfig.TLSConfig(); err != nil { + return err + } else if tlsConfig != nil { + grpcOptions = append(grpcOptions, grpc.Creds(credentials.NewTLS(tlsConfig))) + } + if o.Timeout > 0 { + grpcOptions = append(grpcOptions, grpc.ConnectionTimeout(time.Duration(o.Timeout))) + } + + logger := &otelLogger{o.Log} + influxWriter := &writeToAccumulator{accumulator} + o.grpcServer = grpc.NewServer(grpcOptions...) + + ptraceotlp.RegisterServer(o.grpcServer, newTraceService(logger, influxWriter)) + ms, err := newMetricsService(logger, influxWriter, o.MetricsSchema) + if err != nil { + return err + } + pmetricotlp.RegisterServer(o.grpcServer, ms) + plogotlp.RegisterServer(o.grpcServer, newLogsService(logger, influxWriter)) + + if o.listener == nil { + o.listener, err = net.Listen("tcp", o.ServiceAddress) + if err != nil { + return err + } + } + + o.wg.Add(1) + go func() { + if err := o.grpcServer.Serve(o.listener); err != nil { + accumulator.AddError(fmt.Errorf("failed to stop OpenTelemetry gRPC service: %w", err)) + } + o.wg.Done() + }() + + return nil +} + +func (o *OpenTelemetry) Stop() { + if o.grpcServer != nil { + o.grpcServer.Stop() + } + + o.wg.Wait() +} + +func init() { + inputs.Add("opentelemetry", func() telegraf.Input { + return &OpenTelemetry{ + ServiceAddress: "0.0.0.0:4317", + MetricsSchema: "prometheus-v1", + Timeout: config.Duration(5 * time.Second), + } + }) +} diff --git a/plugins/inputs/opentelemetry/opentelemetry_test.go b/plugins/inputs/opentelemetry/opentelemetry_test.go new file mode 100644 index 0000000000000..7f608bbfe7f68 --- /dev/null +++ b/plugins/inputs/opentelemetry/opentelemetry_test.go @@ -0,0 +1,82 @@ +package opentelemetry + +import ( + "context" + "net" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + "go.opentelemetry.io/otel/metric/global" + controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" + processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" + "go.opentelemetry.io/otel/sdk/metric/selector/simple" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/testutil" +) + +func TestOpenTelemetry(t *testing.T) { + mockListener := bufconn.Listen(1024 * 1024) + plugin := inputs.Inputs["opentelemetry"]().(*OpenTelemetry) + plugin.listener = mockListener + accumulator := new(testutil.Accumulator) + + err := plugin.Start(accumulator) + require.NoError(t, err) + t.Cleanup(plugin.Stop) + + metricExporter, err := otlpmetricgrpc.New(context.Background(), + otlpmetricgrpc.WithInsecure(), + otlpmetricgrpc.WithDialOption( + grpc.WithBlock(), + grpc.WithContextDialer(func(_ context.Context, _ string) (net.Conn, error) { + return mockListener.Dial() + })), + ) + require.NoError(t, err) + t.Cleanup(func() { _ = metricExporter.Shutdown(context.Background()) }) + + pusher := controller.New( + processor.NewFactory( + simple.NewWithHistogramDistribution(), + metricExporter, + ), + controller.WithExporter(metricExporter), + ) + + err = pusher.Start(context.Background()) + require.NoError(t, err) + t.Cleanup(func() { _ = pusher.Stop(context.Background()) }) + + global.SetMeterProvider(pusher) + + // write metrics + meter := global.MeterProvider().Meter("library-name") + counter, err := meter.SyncInt64().Counter("measurement-counter") + require.NoError(t, err) + counter.Add(context.Background(), 7) + + err = pusher.Stop(context.Background()) + require.NoError(t, err) + + // Shutdown + + plugin.Stop() + + err = metricExporter.Shutdown(context.Background()) + require.NoError(t, err) + + // Check + + require.Empty(t, accumulator.Errors) + + require.Len(t, accumulator.Metrics, 1) + got := accumulator.Metrics[0] + require.Equal(t, "measurement-counter", got.Measurement) + require.Equal(t, telegraf.Counter, got.Type) + require.Equal(t, "library-name", got.Tags["otel.library.name"]) +} diff --git a/plugins/inputs/opentelemetry/sample.conf b/plugins/inputs/opentelemetry/sample.conf new file mode 100644 index 0000000000000..5dae3afe4fd57 --- /dev/null +++ b/plugins/inputs/opentelemetry/sample.conf @@ -0,0 +1,24 @@ +# Receive OpenTelemetry traces, metrics, and logs over gRPC +[[inputs.opentelemetry]] + ## Override the default (0.0.0.0:4317) destination OpenTelemetry gRPC service + ## address:port + # service_address = "0.0.0.0:4317" + + ## Override the default (5s) new connection timeout + # timeout = "5s" + + ## Override the default (prometheus-v1) metrics schema. + ## Supports: "prometheus-v1", "prometheus-v2" + ## For more information about the alternatives, read the Prometheus input + ## plugin notes. + # metrics_schema = "prometheus-v1" + + ## Optional TLS Config. + ## For advanced options: https://github.com/influxdata/telegraf/blob/v1.18.3/docs/TLS.md + ## + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections. + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + ## Add service certificate and key. + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" diff --git a/plugins/inputs/opentelemetry/writer.go b/plugins/inputs/opentelemetry/writer.go new file mode 100644 index 0000000000000..58906e62aa391 --- /dev/null +++ b/plugins/inputs/opentelemetry/writer.go @@ -0,0 +1,32 @@ +package opentelemetry + +import ( + "context" + "fmt" + "time" + + "github.com/influxdata/influxdb-observability/common" + "github.com/influxdata/telegraf" +) + +type writeToAccumulator struct { + accumulator telegraf.Accumulator +} + +func (w *writeToAccumulator) WritePoint(_ context.Context, measurement string, tags map[string]string, fields map[string]interface{}, ts time.Time, vType common.InfluxMetricValueType) error { + switch vType { + case common.InfluxMetricValueTypeUntyped: + w.accumulator.AddFields(measurement, fields, tags, ts) + case common.InfluxMetricValueTypeGauge: + w.accumulator.AddGauge(measurement, fields, tags, ts) + case common.InfluxMetricValueTypeSum: + w.accumulator.AddCounter(measurement, fields, tags, ts) + case common.InfluxMetricValueTypeHistogram: + w.accumulator.AddHistogram(measurement, fields, tags, ts) + case common.InfluxMetricValueTypeSummary: + w.accumulator.AddSummary(measurement, fields, tags, ts) + default: + return fmt.Errorf("unrecognized InfluxMetricValueType %q", vType) + } + return nil +} diff --git a/plugins/inputs/openweathermap/README.md b/plugins/inputs/openweathermap/README.md index 85803f76ab046..f6884371f9ae2 100644 --- a/plugins/inputs/openweathermap/README.md +++ b/plugins/inputs/openweathermap/README.md @@ -6,13 +6,14 @@ To use this plugin you will need an [api key][] (app_id). City identifiers can be found in the [city list][]. Alternately you can [search][] by name; the `city_id` can be found as the last digits -of the URL: https://openweathermap.org/city/2643743. Language +of the URL: . Language identifiers can be found in the [lang list][]. Documentation for condition ID, icon, and main is at [weather conditions][]. -### Configuration +## Configuration -```toml +```toml @sample.conf +# Read current weather and forecasts data from openweathermap.org [[inputs.openweathermap]] ## OpenWeatherMap API key. app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -44,7 +45,7 @@ condition ID, icon, and main is at [weather conditions][]. interval = "10m" ``` -### Metrics +## Metrics - weather - tags: @@ -60,19 +61,20 @@ condition ID, icon, and main is at [weather conditions][]. - sunrise (int, nanoseconds since unix epoch) - sunset (int, nanoseconds since unix epoch) - temperature (float, degrees) + - feels_like (float, degrees) - visibility (int, meters, not available on forecast data) - wind_degrees (float, wind direction in degrees) - wind_speed (float, wind speed in meters/sec or miles/sec) - condition_description (string, localized long description) - condition_icon +## Example Output -### Example Output +```shell +> weather,city=San\ Francisco,city_id=5391959,condition_id=803,condition_main=Clouds,country=US,forecast=114h,host=robot pressure=1027,temperature=10.09,wind_degrees=34,wind_speed=1.24,condition_description="broken clouds",cloudiness=80i,humidity=67i,rain=0,feels_like=8.9,condition_icon="04n" 1645952400000000000 +> weather,city=San\ Francisco,city_id=5391959,condition_id=804,condition_main=Clouds,country=US,forecast=117h,host=robot humidity=65i,rain=0,temperature=10.12,wind_degrees=31,cloudiness=90i,pressure=1026,feels_like=8.88,wind_speed=1.31,condition_description="overcast clouds",condition_icon="04n" 1645963200000000000 +> weather,city=San\ Francisco,city_id=5391959,condition_id=804,condition_main=Clouds,country=US,forecast=120h,host=robot cloudiness=100i,humidity=61i,rain=0,temperature=10.28,wind_speed=1.94,condition_icon="04d",pressure=1027,feels_like=8.96,wind_degrees=16,condition_description="overcast clouds" 1645974000000000000 -``` -> weather,city=San\ Francisco,city_id=5391959,condition_id=800,condition_main=Clear,country=US,forecast=* cloudiness=1i,condition_description="clear sky",condition_icon="01d",humidity=35i,pressure=1012,rain=0,sunrise=1570630329000000000i,sunset=1570671689000000000i,temperature=21.52,visibility=16093i,wind_degrees=280,wind_speed=5.7 1570659256000000000 -> weather,city=San\ Francisco,city_id=5391959,condition_id=800,condition_main=Clear,country=US,forecast=3h cloudiness=0i,condition_description="clear sky",condition_icon="01n",humidity=41i,pressure=1010,rain=0,temperature=22.34,wind_degrees=249.393,wind_speed=2.085 1570665600000000000 -> weather,city=San\ Francisco,city_id=5391959,condition_id=800,condition_main=Clear,country=US,forecast=6h cloudiness=0i,condition_description="clear sky",condition_icon="01n",humidity=50i,pressure=1012,rain=0,temperature=17.09,wind_degrees=310.754,wind_speed=3.009 1570676400000000000 ``` [api key]: https://openweathermap.org/appid diff --git a/plugins/inputs/openweathermap/openweathermap.go b/plugins/inputs/openweathermap/openweathermap.go index 94055a6f8bb6a..ed22bde6bc7a7 100644 --- a/plugins/inputs/openweathermap/openweathermap.go +++ b/plugins/inputs/openweathermap/openweathermap.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package openweathermap import ( + _ "embed" "encoding/json" "fmt" "io" @@ -13,86 +15,55 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const ( // https://openweathermap.org/current#severalid // Call for several city IDs // The limit of locations is 20. - owmRequestSeveralCityId int = 20 + owmRequestSeveralCityID int = 20 - defaultBaseUrl = "https://api.openweathermap.org/" - defaultResponseTimeout time.Duration = time.Second * 5 - defaultUnits string = "metric" - defaultLang string = "en" + defaultBaseURL = "https://api.openweathermap.org/" + defaultResponseTimeout = time.Second * 5 + defaultUnits = "metric" + defaultLang = "en" ) type OpenWeatherMap struct { - AppId string `toml:"app_id"` - CityId []string `toml:"city_id"` - Lang string `toml:"lang"` - Fetch []string `toml:"fetch"` - BaseUrl string `toml:"base_url"` - ResponseTimeout internal.Duration `toml:"response_timeout"` - Units string `toml:"units"` - - client *http.Client - baseUrl *url.URL + AppID string `toml:"app_id"` + CityID []string `toml:"city_id"` + Lang string `toml:"lang"` + Fetch []string `toml:"fetch"` + BaseURL string `toml:"base_url"` + ResponseTimeout config.Duration `toml:"response_timeout"` + Units string `toml:"units"` + + client *http.Client + baseParsedURL *url.URL } -var sampleConfig = ` - ## OpenWeatherMap API key. - app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" - - ## City ID's to collect weather data from. - city_id = ["5391959"] - - ## Language of the description field. Can be one of "ar", "bg", - ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu", - ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru", - ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw" - # lang = "en" - - ## APIs to fetch; can contain "weather" or "forecast". - fetch = ["weather", "forecast"] - - ## OpenWeatherMap base URL - # base_url = "https://api.openweathermap.org/" - - ## Timeout for HTTP response. - # response_timeout = "5s" - - ## Preferred unit system for temperature and wind speed. Can be one of - ## "metric", "imperial", or "standard". - # units = "metric" - - ## Query interval; OpenWeatherMap updates their weather data every 10 - ## minutes. - interval = "10m" -` - -func (n *OpenWeatherMap) SampleConfig() string { +func (*OpenWeatherMap) SampleConfig() string { return sampleConfig } -func (n *OpenWeatherMap) Description() string { - return "Read current weather and forecasts data from openweathermap.org" -} - func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup var strs []string for _, fetch := range n.Fetch { if fetch == "forecast" { - for _, city := range n.CityId { + for _, city := range n.CityID { addr := n.formatURL("/data/2.5/forecast", city) wg.Add(1) go func() { defer wg.Done() - status, err := n.gatherUrl(addr) + status, err := n.gatherURL(addr) if err != nil { acc.AddError(err) return @@ -103,10 +74,10 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { } } else if fetch == "weather" { j := 0 - for j < len(n.CityId) { + for j < len(n.CityID) { strs = make([]string, 0) - for i := 0; j < len(n.CityId) && i < owmRequestSeveralCityId; i++ { - strs = append(strs, n.CityId[j]) + for i := 0; j < len(n.CityID) && i < owmRequestSeveralCityID; i++ { + strs = append(strs, n.CityID[j]) j++ } cities := strings.Join(strs, ",") @@ -115,7 +86,7 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func() { defer wg.Done() - status, err := n.gatherUrl(addr) + status, err := n.gatherURL(addr) if err != nil { acc.AddError(err) return @@ -124,7 +95,6 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { gatherWeather(acc, status) }() } - } } @@ -132,20 +102,20 @@ func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { return nil } -func (n *OpenWeatherMap) createHttpClient() (*http.Client, error) { - if n.ResponseTimeout.Duration < time.Second { - n.ResponseTimeout.Duration = defaultResponseTimeout +func (n *OpenWeatherMap) createHTTPClient() *http.Client { + if n.ResponseTimeout < config.Duration(time.Second) { + n.ResponseTimeout = config.Duration(defaultResponseTimeout) } client := &http.Client{ Transport: &http.Transport{}, - Timeout: n.ResponseTimeout.Duration, + Timeout: time.Duration(n.ResponseTimeout), } - return client, nil + return client } -func (n *OpenWeatherMap) gatherUrl(addr string) (*Status, error) { +func (n *OpenWeatherMap) gatherURL(addr string) (*Status, error) { resp, err := n.client.Get(addr) if err != nil { return nil, fmt.Errorf("error making HTTP request to %s: %s", addr, err) @@ -165,7 +135,7 @@ func (n *OpenWeatherMap) gatherUrl(addr string) (*Status, error) { return nil, fmt.Errorf("%s returned unexpected content type %s", addr, mediaType) } - return gatherWeatherUrl(resp.Body) + return gatherWeatherURL(resp.Body) } type WeatherEntry struct { @@ -177,6 +147,7 @@ type WeatherEntry struct { Humidity int64 `json:"humidity"` Pressure float64 `json:"pressure"` Temp float64 `json:"temp"` + Feels float64 `json:"feels_like"` } `json:"main"` Rain struct { Rain1 float64 `json:"1h"` @@ -191,7 +162,7 @@ type WeatherEntry struct { Deg float64 `json:"deg"` Speed float64 `json:"speed"` } `json:"wind"` - Id int64 `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` Coord struct { Lat float64 `json:"lat"` @@ -213,13 +184,13 @@ type Status struct { Lon float64 `json:"lon"` } `json:"coord"` Country string `json:"country"` - Id int64 `json:"id"` + ID int64 `json:"id"` Name string `json:"name"` } `json:"city"` List []WeatherEntry `json:"list"` } -func gatherWeatherUrl(r io.Reader) (*Status, error) { +func gatherWeatherURL(r io.Reader) (*Status, error) { dec := json.NewDecoder(r) status := &Status{} if err := dec.Decode(status); err != nil { @@ -247,13 +218,14 @@ func gatherWeather(acc telegraf.Accumulator, status *Status) { "sunrise": time.Unix(e.Sys.Sunrise, 0).UnixNano(), "sunset": time.Unix(e.Sys.Sunset, 0).UnixNano(), "temperature": e.Main.Temp, + "feels_like": e.Main.Feels, "visibility": e.Visibility, "wind_degrees": e.Wind.Deg, "wind_speed": e.Wind.Speed, } tags := map[string]string{ "city": e.Name, - "city_id": strconv.FormatInt(e.Id, 10), + "city_id": strconv.FormatInt(e.ID, 10), "country": e.Sys.Country, "forecast": "*", } @@ -271,7 +243,7 @@ func gatherWeather(acc telegraf.Accumulator, status *Status) { func gatherForecast(acc telegraf.Accumulator, status *Status) { tags := map[string]string{ - "city_id": strconv.FormatInt(status.City.Id, 10), + "city_id": strconv.FormatInt(status.City.ID, 10), "forecast": "*", "city": status.City.Name, "country": status.City.Country, @@ -284,6 +256,7 @@ func gatherForecast(acc telegraf.Accumulator, status *Status) { "pressure": e.Main.Pressure, "rain": gatherRain(e), "temperature": e.Main.Temp, + "feels_like": e.Main.Feels, "wind_degrees": e.Wind.Deg, "wind_speed": e.Wind.Speed, } @@ -300,29 +273,24 @@ func gatherForecast(acc telegraf.Accumulator, status *Status) { func init() { inputs.Add("openweathermap", func() telegraf.Input { - tmout := internal.Duration{ - Duration: defaultResponseTimeout, - } + tmout := config.Duration(defaultResponseTimeout) return &OpenWeatherMap{ ResponseTimeout: tmout, - BaseUrl: defaultBaseUrl, + BaseURL: defaultBaseURL, } }) } func (n *OpenWeatherMap) Init() error { var err error - n.baseUrl, err = url.Parse(n.BaseUrl) + n.baseParsedURL, err = url.Parse(n.BaseURL) if err != nil { return err } // Create an HTTP client that is re-used for each // collection interval - n.client, err = n.createHttpClient() - if err != nil { - return err - } + n.client = n.createHTTPClient() switch n.Units { case "imperial", "standard", "metric": @@ -349,7 +317,7 @@ func (n *OpenWeatherMap) Init() error { func (n *OpenWeatherMap) formatURL(path string, city string) string { v := url.Values{ "id": []string{city}, - "APPID": []string{n.AppId}, + "APPID": []string{n.AppID}, "lang": []string{n.Lang}, "units": []string{n.Units}, } @@ -359,5 +327,5 @@ func (n *OpenWeatherMap) formatURL(path string, city string) string { RawQuery: v.Encode(), } - return n.baseUrl.ResolveReference(relative).String() + return n.baseParsedURL.ResolveReference(relative).String() } diff --git a/plugins/inputs/openweathermap/openweathermap_test.go b/plugins/inputs/openweathermap/openweathermap_test.go index 9bee1d2e96199..5684ef39b7d0b 100644 --- a/plugins/inputs/openweathermap/openweathermap_test.go +++ b/plugins/inputs/openweathermap/openweathermap_test.go @@ -43,6 +43,7 @@ const sampleStatusResponse = ` "pressure": 1018.65, "sea_level": 1030.99, "temp": 6.71, + "feels_like": 5.71, "temp_kf": -2.14 }, "rain": { @@ -76,6 +77,7 @@ const sampleStatusResponse = ` "pressure": 1032.18, "sea_level": 1044.78, "temp": 6.38, + "feels_like": 5.38, "temp_kf": 0 }, "rain": { @@ -118,7 +120,8 @@ const groupWeatherResponse = ` "main": { "humidity": 87, "pressure": 1007, - "temp": 9.25 + "temp": 9.25, + "feels_like": 8.25 }, "name": "Paris", "sys": { @@ -155,7 +158,8 @@ const rainWeatherResponse = ` "main": { "humidity": 87, "pressure": 1007, - "temp": 9.25 + "temp": 9.25, + "feels_like": 8.25 }, "name": "Paris", "sys": { @@ -189,7 +193,8 @@ const rainWeatherResponse = ` "main": { "humidity": 87, "pressure": 1007, - "temp": 9.25 + "temp": 9.25, + "feels_like": 8.25 }, "name": "Paris", "sys": { @@ -223,7 +228,8 @@ const rainWeatherResponse = ` "main": { "humidity": 87, "pressure": 1007, - "temp": 9.25 + "temp": 9.25, + "feels_like": 8.25 }, "name": "Paris", "sys": { @@ -258,7 +264,8 @@ const rainWeatherResponse = ` "main": { "humidity": 87, "pressure": 1007, - "temp": 9.25 + "temp": 9.25, + "feels_like": 8.25 }, "name": "Paris", "sys": { @@ -309,6 +316,7 @@ const batchWeatherResponse = ` }], "main": { "temp": 9.57, + "feels_like": 8.57, "pressure": 1014, "humidity": 46 }, @@ -344,6 +352,7 @@ const batchWeatherResponse = ` }], "main": { "temp": 19.29, + "feels_like": 18.29, "pressure": 1009, "humidity": 63 }, @@ -378,6 +387,7 @@ const batchWeatherResponse = ` }], "main": { "temp": 10.62, + "feels_like": 9.62, "pressure": 1019, "humidity": 66 }, @@ -408,26 +418,26 @@ func TestForecastGeneratesMetrics(t *testing.T) { } else if r.URL.Path == "/data/2.5/group" { rsp = sampleNoContent } else { - panic("Cannot handle request") + require.Fail(t, "Cannot handle request") } - fmt.Fprintln(w, rsp) + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() n := &OpenWeatherMap{ - BaseUrl: ts.URL, - AppId: "noappid", - CityId: []string{"2988507"}, + BaseURL: ts.URL, + AppID: "noappid", + CityID: []string{"2988507"}, Fetch: []string{"weather", "forecast"}, Units: "metric", } - n.Init() + require.NoError(t, n.Init()) var acc testutil.Accumulator - err := n.Gather(&acc) - require.NoError(t, err) + require.NoError(t, n.Gather(&acc)) expected := []telegraf.Metric{ testutil.MustMetric( @@ -445,6 +455,7 @@ func TestForecastGeneratesMetrics(t *testing.T) { "humidity": int64(91), "pressure": 1018.65, "temperature": 6.71, + "feels_like": 5.71, "rain": 0.035, "wind_degrees": 228.501, "wind_speed": 3.76, @@ -468,6 +479,7 @@ func TestForecastGeneratesMetrics(t *testing.T) { "humidity": int64(98), "pressure": 1032.18, "temperature": 6.38, + "feels_like": 5.38, "rain": 0.049999999999997, "wind_degrees": 335.005, "wind_speed": 2.66, @@ -492,26 +504,26 @@ func TestWeatherGeneratesMetrics(t *testing.T) { } else if r.URL.Path == "/data/2.5/forecast" { rsp = sampleNoContent } else { - panic("Cannot handle request") + require.Fail(t, "Cannot handle request") } - fmt.Fprintln(w, rsp) + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() n := &OpenWeatherMap{ - BaseUrl: ts.URL, - AppId: "noappid", - CityId: []string{"2988507"}, + BaseURL: ts.URL, + AppID: "noappid", + CityID: []string{"2988507"}, Fetch: []string{"weather"}, Units: "metric", } - n.Init() + require.NoError(t, n.Init()) var acc testutil.Accumulator - err := n.Gather(&acc) - require.NoError(t, err) + require.NoError(t, n.Gather(&acc)) expected := []telegraf.Metric{ testutil.MustMetric( @@ -529,6 +541,7 @@ func TestWeatherGeneratesMetrics(t *testing.T) { "humidity": int64(87), "pressure": 1007.0, "temperature": 9.25, + "feels_like": 8.25, "rain": 0.0, "sunrise": int64(1544167818000000000), "sunset": int64(1544198047000000000), @@ -552,26 +565,26 @@ func TestRainMetrics(t *testing.T) { rsp = rainWeatherResponse w.Header()["Content-Type"] = []string{"application/json"} } else { - panic("Cannot handle request") + require.Fail(t, "Cannot handle request") } - fmt.Fprintln(w, rsp) + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() n := &OpenWeatherMap{ - BaseUrl: ts.URL, - AppId: "noappid", - CityId: []string{"111", "222", "333", "444"}, + BaseURL: ts.URL, + AppID: "noappid", + CityID: []string{"111", "222", "333", "444"}, Fetch: []string{"weather"}, Units: "metric", } - n.Init() + require.NoError(t, n.Init()) var acc testutil.Accumulator - err := n.Gather(&acc) - require.NoError(t, err) + require.NoError(t, n.Gather(&acc)) expected := []telegraf.Metric{ // City with 1h rain value @@ -590,6 +603,7 @@ func TestRainMetrics(t *testing.T) { "humidity": int64(87), "pressure": 1007.0, "temperature": 9.25, + "feels_like": 8.25, "rain": 1.0, "sunrise": int64(1544167818000000000), "sunset": int64(1544198047000000000), @@ -617,6 +631,7 @@ func TestRainMetrics(t *testing.T) { "humidity": int64(87), "pressure": 1007.0, "temperature": 9.25, + "feels_like": 8.25, "rain": 3.0, "sunrise": int64(1544167818000000000), "sunset": int64(1544198047000000000), @@ -644,6 +659,7 @@ func TestRainMetrics(t *testing.T) { "humidity": int64(87), "pressure": 1007.0, "temperature": 9.25, + "feels_like": 8.25, "rain": 1.3, "sunrise": int64(1544167818000000000), "sunset": int64(1544198047000000000), @@ -671,6 +687,7 @@ func TestRainMetrics(t *testing.T) { "humidity": int64(87), "pressure": 1007.0, "temperature": 9.25, + "feels_like": 8.25, "rain": 0.0, "sunrise": int64(1544167818000000000), "sunset": int64(1544198047000000000), @@ -695,26 +712,26 @@ func TestBatchWeatherGeneratesMetrics(t *testing.T) { } else if r.URL.Path == "/data/2.5/forecast" { rsp = sampleNoContent } else { - panic("Cannot handle request") + require.Fail(t, "Cannot handle request") } - fmt.Fprintln(w, rsp) + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() n := &OpenWeatherMap{ - BaseUrl: ts.URL, - AppId: "noappid", - CityId: []string{"524901", "703448", "2643743"}, + BaseURL: ts.URL, + AppID: "noappid", + CityID: []string{"524901", "703448", "2643743"}, Fetch: []string{"weather"}, Units: "metric", } - n.Init() + require.NoError(t, n.Init()) var acc testutil.Accumulator - err := n.Gather(&acc) - require.NoError(t, err) + require.NoError(t, n.Gather(&acc)) expected := []telegraf.Metric{ testutil.MustMetric( @@ -732,6 +749,7 @@ func TestBatchWeatherGeneratesMetrics(t *testing.T) { "humidity": int64(46), "pressure": 1014.0, "temperature": 9.57, + "feels_like": 8.57, "wind_degrees": 60.0, "wind_speed": 5.0, "rain": 0.0, @@ -758,6 +776,7 @@ func TestBatchWeatherGeneratesMetrics(t *testing.T) { "humidity": int64(63), "pressure": 1009.0, "temperature": 19.29, + "feels_like": 18.29, "wind_degrees": 0.0, "wind_speed": 1.0, "rain": 0.0, @@ -784,6 +803,7 @@ func TestBatchWeatherGeneratesMetrics(t *testing.T) { "humidity": int64(66), "pressure": 1019.0, "temperature": 10.62, + "feels_like": 9.62, "wind_degrees": 290.0, "wind_speed": 6.2, "rain": 0.072, @@ -803,28 +823,28 @@ func TestBatchWeatherGeneratesMetrics(t *testing.T) { func TestFormatURL(t *testing.T) { n := &OpenWeatherMap{ - AppId: "appid", - Units: "units", - Lang: "lang", - BaseUrl: "http://foo.com", + AppID: "appid", + Units: "metric", + Lang: "de", + BaseURL: "http://foo.com", } - n.Init() + require.NoError(t, n.Init()) require.Equal(t, - "http://foo.com/data/2.5/forecast?APPID=appid&id=12345&lang=lang&units=units", + "http://foo.com/data/2.5/forecast?APPID=appid&id=12345&lang=de&units=metric", n.formatURL("/data/2.5/forecast", "12345")) } func TestDefaultUnits(t *testing.T) { n := &OpenWeatherMap{} - n.Init() + require.NoError(t, n.Init()) require.Equal(t, "metric", n.Units) } func TestDefaultLang(t *testing.T) { n := &OpenWeatherMap{} - n.Init() + require.NoError(t, n.Init()) require.Equal(t, "en", n.Lang) } diff --git a/plugins/inputs/openweathermap/sample.conf b/plugins/inputs/openweathermap/sample.conf new file mode 100644 index 0000000000000..174a33d9a5c40 --- /dev/null +++ b/plugins/inputs/openweathermap/sample.conf @@ -0,0 +1,30 @@ +# Read current weather and forecasts data from openweathermap.org +[[inputs.openweathermap]] + ## OpenWeatherMap API key. + app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + + ## City ID's to collect weather data from. + city_id = ["5391959"] + + ## Language of the description field. Can be one of "ar", "bg", + ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu", + ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru", + ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw" + # lang = "en" + + ## APIs to fetch; can contain "weather" or "forecast". + fetch = ["weather", "forecast"] + + ## OpenWeatherMap base URL + # base_url = "https://api.openweathermap.org/" + + ## Timeout for HTTP response. + # response_timeout = "5s" + + ## Preferred unit system for temperature and wind speed. Can be one of + ## "metric", "imperial", or "standard". + # units = "metric" + + ## Query interval; OpenWeatherMap weather data is updated every 10 + ## minutes. + interval = "10m" diff --git a/plugins/inputs/passenger/README.md b/plugins/inputs/passenger/README.md index 688f4e69aef0d..a104ca8c99589 100644 --- a/plugins/inputs/passenger/README.md +++ b/plugins/inputs/passenger/README.md @@ -1,8 +1,9 @@ # Passenger Input Plugin -Gather [Phusion Passenger](https://www.phusionpassenger.com/) metrics using the `passenger-status` command line utility. +Gather [Phusion Passenger](https://www.phusionpassenger.com/) metrics using the +`passenger-status` command line utility. -**Series Cardinality Warning** +## Series Cardinality Warning Depending on your environment, this `passenger_process` measurement of this plugin can quickly create a high number of series which, when unchecked, can @@ -15,17 +16,14 @@ manage your series cardinality: `tagexclude` to remove the `pid` and `process_group_id` tags. - Write to a database with an appropriate [retention policy](https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/). -- Limit series cardinality in your database using the - [`max-series-per-database`](https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000) and - [`max-values-per-tag`](https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000) settings. - Consider using the [Time Series Index](https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/). - Monitor your databases [series cardinality](https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality). -### Configuration +## Configuration -```toml +```toml @sample.conf # Read metrics of passenger using passenger-status [[inputs.passenger]] ## Path of passenger-status. @@ -39,11 +37,12 @@ manage your series cardinality: command = "passenger-status -v --show=xml" ``` -#### Permissions: +### Permissions -Telegraf must have permission to execute the `passenger-status` command. On most systems, Telegraf runs as the `telegraf` user. +Telegraf must have permission to execute the `passenger-status` command. On +most systems, Telegraf runs as the `telegraf` user. -### Metrics: +## Metrics - passenger - tags: @@ -98,8 +97,9 @@ Telegraf must have permission to execute the `passenger-status` command. On mos - real_memory - vmsize -### Example Output: -``` +## Example Output + +```shell passenger,passenger_version=5.0.17 capacity_used=23i,get_wait_list_size=0i,max=23i,process_count=23i 1452984112799414257 passenger_supergroup,name=/var/app/current/public capacity_used=23i,get_wait_list_size=0i 1452984112799496977 passenger_group,app_root=/var/app/current,app_type=rack,name=/var/app/current/public capacity_used=23i,get_wait_list_size=0i,processes_being_spawned=0i 1452984112799527021 diff --git a/plugins/inputs/passenger/passenger.go b/plugins/inputs/passenger/passenger.go index f00bfc824de28..170a3325544f5 100644 --- a/plugins/inputs/passenger/passenger.go +++ b/plugins/inputs/passenger/passenger.go @@ -1,18 +1,25 @@ +//go:generate ../../../tools/readme_config_includer/generator package passenger import ( "bytes" + _ "embed" "encoding/xml" "fmt" "os/exec" "strconv" "strings" + "golang.org/x/net/html/charset" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - "golang.org/x/net/html/charset" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type passenger struct { Command string } @@ -32,27 +39,27 @@ func (p *passenger) parseCommand() (string, []string) { } type info struct { - Passenger_version string `xml:"passenger_version"` - Process_count int `xml:"process_count"` - Capacity_used int `xml:"capacity_used"` - Get_wait_list_size int `xml:"get_wait_list_size"` - Max int `xml:"max"` - Supergroups struct { + PassengerVersion string `xml:"passenger_version"` + ProcessCount int `xml:"process_count"` + CapacityUsed int `xml:"capacity_used"` + GetWaitListSize int `xml:"get_wait_list_size"` + Max int `xml:"max"` + Supergroups struct { Supergroup []struct { - Name string `xml:"name"` - Get_wait_list_size int `xml:"get_wait_list_size"` - Capacity_used int `xml:"capacity_used"` - Group []struct { - Name string `xml:"name"` - AppRoot string `xml:"app_root"` - AppType string `xml:"app_type"` - Enabled_process_count int `xml:"enabled_process_count"` - Disabling_process_count int `xml:"disabling_process_count"` - Disabled_process_count int `xml:"disabled_process_count"` - Capacity_used int `xml:"capacity_used"` - Get_wait_list_size int `xml:"get_wait_list_size"` - Processes_being_spawned int `xml:"processes_being_spawned"` - Processes struct { + Name string `xml:"name"` + GetWaitListSize int `xml:"get_wait_list_size"` + CapacityUsed int `xml:"capacity_used"` + Group []struct { + Name string `xml:"name"` + AppRoot string `xml:"app_root"` + AppType string `xml:"app_type"` + EnabledProcessCount int `xml:"enabled_process_count"` + DisablingProcessCount int `xml:"disabling_process_count"` + DisabledProcessCount int `xml:"disabled_process_count"` + CapacityUsed int `xml:"capacity_used"` + GetWaitListSize int `xml:"get_wait_list_size"` + ProcessesBeingSpawned int `xml:"processes_being_spawned"` + Processes struct { Process []*process `xml:"process"` } `xml:"processes"` } `xml:"group"` @@ -61,28 +68,28 @@ type info struct { } type process struct { - Pid int `xml:"pid"` - Concurrency int `xml:"concurrency"` - Sessions int `xml:"sessions"` - Busyness int `xml:"busyness"` - Processed int `xml:"processed"` - Spawner_creation_time int64 `xml:"spawner_creation_time"` - Spawn_start_time int64 `xml:"spawn_start_time"` - Spawn_end_time int64 `xml:"spawn_end_time"` - Last_used int64 `xml:"last_used"` - Uptime string `xml:"uptime"` - Code_revision string `xml:"code_revision"` - Life_status string `xml:"life_status"` - Enabled string `xml:"enabled"` - Has_metrics bool `xml:"has_metrics"` - Cpu int64 `xml:"cpu"` - Rss int64 `xml:"rss"` - Pss int64 `xml:"pss"` - Private_dirty int64 `xml:"private_dirty"` - Swap int64 `xml:"swap"` - Real_memory int64 `xml:"real_memory"` - Vmsize int64 `xml:"vmsize"` - Process_group_id string `xml:"process_group_id"` + Pid int `xml:"pid"` + Concurrency int `xml:"concurrency"` + Sessions int `xml:"sessions"` + Busyness int `xml:"busyness"` + Processed int `xml:"processed"` + SpawnerCreationTime int64 `xml:"spawner_creation_time"` + SpawnStartTime int64 `xml:"spawn_start_time"` + SpawnEndTime int64 `xml:"spawn_end_time"` + LastUsed int64 `xml:"last_used"` + Uptime string `xml:"uptime"` + CodeRevision string `xml:"code_revision"` + LifeStatus string `xml:"life_status"` + Enabled string `xml:"enabled"` + HasMetrics bool `xml:"has_metrics"` + CPU int64 `xml:"cpu"` + Rss int64 `xml:"rss"` + Pss int64 `xml:"pss"` + PrivateDirty int64 `xml:"private_dirty"` + Swap int64 `xml:"swap"` + RealMemory int64 `xml:"real_memory"` + Vmsize int64 `xml:"vmsize"` + ProcessGroupID string `xml:"process_group_id"` } func (p *process) getUptime() int64 { @@ -125,43 +132,23 @@ func (p *process) getUptime() int64 { return uptime } -var sampleConfig = ` - ## Path of passenger-status. - ## - ## Plugin gather metric via parsing XML output of passenger-status - ## More information about the tool: - ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html - ## - ## If no path is specified, then the plugin simply execute passenger-status - ## hopefully it can be found in your PATH - command = "passenger-status -v --show=xml" -` - -func (r *passenger) SampleConfig() string { +func (*passenger) SampleConfig() string { return sampleConfig } -func (r *passenger) Description() string { - return "Read metrics of passenger using passenger-status" -} - -func (g *passenger) Gather(acc telegraf.Accumulator) error { - if g.Command == "" { - g.Command = "passenger-status -v --show=xml" +func (p *passenger) Gather(acc telegraf.Accumulator) error { + if p.Command == "" { + p.Command = "passenger-status -v --show=xml" } - cmd, args := g.parseCommand() + cmd, args := p.parseCommand() out, err := exec.Command(cmd, args...).Output() if err != nil { return err } - if err = importMetric(out, acc); err != nil { - return err - } - - return nil + return importMetric(out, acc) } func importMetric(stat []byte, acc telegraf.Accumulator) error { @@ -170,17 +157,17 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error { decoder := xml.NewDecoder(bytes.NewReader(stat)) decoder.CharsetReader = charset.NewReaderLabel if err := decoder.Decode(&p); err != nil { - return fmt.Errorf("Cannot parse input with error: %v\n", err) + return fmt.Errorf("cannot parse input with error: %v", err) } tags := map[string]string{ - "passenger_version": p.Passenger_version, + "passenger_version": p.PassengerVersion, } fields := map[string]interface{}{ - "process_count": p.Process_count, + "process_count": p.ProcessCount, "max": p.Max, - "capacity_used": p.Capacity_used, - "get_wait_list_size": p.Get_wait_list_size, + "capacity_used": p.CapacityUsed, + "get_wait_list_size": p.GetWaitListSize, } acc.AddFields("passenger", fields, tags) @@ -189,8 +176,8 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error { "name": sg.Name, } fields := map[string]interface{}{ - "get_wait_list_size": sg.Get_wait_list_size, - "capacity_used": sg.Capacity_used, + "get_wait_list_size": sg.GetWaitListSize, + "capacity_used": sg.CapacityUsed, } acc.AddFields("passenger_supergroup", fields, tags) @@ -201,9 +188,9 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error { "app_type": group.AppType, } fields := map[string]interface{}{ - "get_wait_list_size": group.Get_wait_list_size, - "capacity_used": group.Capacity_used, - "processes_being_spawned": group.Processes_being_spawned, + "get_wait_list_size": group.GetWaitListSize, + "capacity_used": group.CapacityUsed, + "processes_being_spawned": group.ProcessesBeingSpawned, } acc.AddFields("passenger_group", fields, tags) @@ -213,26 +200,26 @@ func importMetric(stat []byte, acc telegraf.Accumulator) error { "app_root": group.AppRoot, "supergroup_name": sg.Name, "pid": fmt.Sprintf("%d", process.Pid), - "code_revision": process.Code_revision, - "life_status": process.Life_status, - "process_group_id": process.Process_group_id, + "code_revision": process.CodeRevision, + "life_status": process.LifeStatus, + "process_group_id": process.ProcessGroupID, } fields := map[string]interface{}{ "concurrency": process.Concurrency, "sessions": process.Sessions, "busyness": process.Busyness, "processed": process.Processed, - "spawner_creation_time": process.Spawner_creation_time, - "spawn_start_time": process.Spawn_start_time, - "spawn_end_time": process.Spawn_end_time, - "last_used": process.Last_used, + "spawner_creation_time": process.SpawnerCreationTime, + "spawn_start_time": process.SpawnStartTime, + "spawn_end_time": process.SpawnEndTime, + "last_used": process.LastUsed, "uptime": process.getUptime(), - "cpu": process.Cpu, + "cpu": process.CPU, "rss": process.Rss, "pss": process.Pss, - "private_dirty": process.Private_dirty, + "private_dirty": process.PrivateDirty, "swap": process.Swap, - "real_memory": process.Real_memory, + "real_memory": process.RealMemory, "vmsize": process.Vmsize, } acc.AddFields("passenger_process", fields, tags) diff --git a/plugins/inputs/passenger/passenger_test.go b/plugins/inputs/passenger/passenger_test.go index c54239d39ecfd..5578b88b77525 100644 --- a/plugins/inputs/passenger/passenger_test.go +++ b/plugins/inputs/passenger/passenger_test.go @@ -2,22 +2,41 @@ package passenger import ( "fmt" - "io/ioutil" "os" + "path/filepath" + "runtime" + "strings" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) -func fakePassengerStatus(stat string) { - content := fmt.Sprintf("#!/bin/sh\ncat << EOF\n%s\nEOF", stat) - ioutil.WriteFile("/tmp/passenger-status", []byte(content), 0700) +func fakePassengerStatus(stat string) (string, error) { + var fileExtension, content string + if runtime.GOOS == "windows" { + fileExtension = ".bat" + content = "@echo off\n" + for _, line := range strings.Split(strings.TrimSuffix(stat, "\n"), "\n") { + content += "for /f \"delims=\" %%A in (\"" + line + "\") do echo %%~A\n" //my eyes are bleeding + } + } else { + content = fmt.Sprintf("#!/bin/sh\ncat << EOF\n%s\nEOF", stat) + } + + tempFilePath := filepath.Join(os.TempDir(), "passenger-status"+fileExtension) + if err := os.WriteFile(tempFilePath, []byte(content), 0700); err != nil { + return "", err + } + + return tempFilePath, nil } -func teardown() { - os.Remove("/tmp/passenger-status") +func teardown(tempFilePath string) { + // Ignore the returned error as we want to remove the file and ignore missing file errors + //nolint:errcheck,revive + os.Remove(tempFilePath) } func Test_Invalid_Passenger_Status_Cli(t *testing.T) { @@ -29,51 +48,53 @@ func Test_Invalid_Passenger_Status_Cli(t *testing.T) { err := r.Gather(&acc) require.Error(t, err) - assert.Equal(t, err.Error(), `exec: "an-invalid-command": executable file not found in $PATH`) + require.Contains(t, err.Error(), `exec: "an-invalid-command": executable file not found in `) } func Test_Invalid_Xml(t *testing.T) { - fakePassengerStatus("invalid xml") - defer teardown() + tempFilePath, err := fakePassengerStatus("invalid xml") + require.NoError(t, err) + defer teardown(tempFilePath) r := &passenger{ - Command: "/tmp/passenger-status", + Command: tempFilePath, } var acc testutil.Accumulator - err := r.Gather(&acc) + err = r.Gather(&acc) require.Error(t, err) - assert.Equal(t, err.Error(), "Cannot parse input with error: EOF\n") + require.Equal(t, "cannot parse input with error: EOF", err.Error()) } // We test this by ensure that the error message match the path of default cli func Test_Default_Config_Load_Default_Command(t *testing.T) { - fakePassengerStatus("invalid xml") - defer teardown() + tempFilePath, err := fakePassengerStatus("invalid xml") + require.NoError(t, err) + defer teardown(tempFilePath) r := &passenger{} var acc testutil.Accumulator - err := r.Gather(&acc) + err = r.Gather(&acc) require.Error(t, err) - assert.Equal(t, err.Error(), "exec: \"passenger-status\": executable file not found in $PATH") + require.Contains(t, err.Error(), "exec: \"passenger-status\": executable file not found in ") } func TestPassengerGenerateMetric(t *testing.T) { - fakePassengerStatus(sampleStat) - defer teardown() + tempFilePath, err := fakePassengerStatus(sampleStat) + require.NoError(t, err) + defer teardown(tempFilePath) //Now we tested again above server, with our authentication data r := &passenger{ - Command: "/tmp/passenger-status", + Command: tempFilePath, } var acc testutil.Accumulator - err := r.Gather(&acc) - require.NoError(t, err) + require.NoError(t, r.Gather(&acc)) tags := map[string]string{ "passenger_version": "5.0.17", diff --git a/plugins/inputs/passenger/sample.conf b/plugins/inputs/passenger/sample.conf new file mode 100644 index 0000000000000..93585b1131794 --- /dev/null +++ b/plugins/inputs/passenger/sample.conf @@ -0,0 +1,11 @@ +# Read metrics of passenger using passenger-status +[[inputs.passenger]] + ## Path of passenger-status. + ## + ## Plugin gather metric via parsing XML output of passenger-status + ## More information about the tool: + ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html + ## + ## If no path is specified, then the plugin simply execute passenger-status + ## hopefully it can be found in your PATH + command = "passenger-status -v --show=xml" diff --git a/plugins/inputs/pf/README.md b/plugins/inputs/pf/README.md index 83215d8f62f3a..c34213fca107b 100644 --- a/plugins/inputs/pf/README.md +++ b/plugins/inputs/pf/README.md @@ -1,14 +1,20 @@ # PF Input Plugin -The pf plugin gathers information from the FreeBSD/OpenBSD pf firewall. Currently it can retrieve information about the state table: the number of current entries in the table, and counters for the number of searches, inserts, and removals to the table. +The pf plugin gathers information from the FreeBSD/OpenBSD pf +firewall. Currently it can retrieve information about the state table: the +number of current entries in the table, and counters for the number of searches, +inserts, and removals to the table. -The pf plugin retrieves this information by invoking the `pfstat` command. The `pfstat` command requires read access to the device file `/dev/pf`. You have several options to permit telegraf to run `pfctl`: +The pf plugin retrieves this information by invoking the `pfstat` command. The +`pfstat` command requires read access to the device file `/dev/pf`. You have +several options to permit telegraf to run `pfctl`: * Run telegraf as root. This is strongly discouraged. * Change the ownership and permissions for /dev/pf such that the user telegraf runs at can read the /dev/pf device file. This is probably not that good of an idea either. * Configure sudo to grant telegraf to run `pfctl` as root. This is the most restrictive option, but require sudo setup. +* Add "telegraf" to the "proxy" group as /dev/pf is owned by root:proxy. -### Using sudo +## Using sudo You may edit your sudo configuration with the following: @@ -16,45 +22,49 @@ You may edit your sudo configuration with the following: telegraf ALL=(root) NOPASSWD: /sbin/pfctl -s info ``` -### Configuration: +## Configuration -```toml - # use sudo to run pfctl +```toml @sample.conf +# Gather counters from PF +[[inputs.pf]] + ## PF require root access on most systems. + ## Setting 'use_sudo' to true will make use of sudo to run pfctl. + ## Users must configure sudo to allow telegraf user to run pfctl with no password. + ## pfctl can be restricted to only list command "pfctl -s info". use_sudo = false ``` -### Measurements & Fields: +## Metrics +* pf + * entries (integer, count) + * searches (integer, count) + * inserts (integer, count) + * removals (integer, count) + * match (integer, count) + * bad-offset (integer, count) + * fragment (integer, count) + * short (integer, count) + * normalize (integer, count) + * memory (integer, count) + * bad-timestamp (integer, count) + * congestion (integer, count) + * ip-option (integer, count) + * proto-cksum (integer, count) + * state-mismatch (integer, count) + * state-insert (integer, count) + * state-limit (integer, count) + * src-limit (integer, count) + * synproxy (integer, count) -- pf - - entries (integer, count) - - searches (integer, count) - - inserts (integer, count) - - removals (integer, count) - - match (integer, count) - - bad-offset (integer, count) - - fragment (integer, count) - - short (integer, count) - - normalize (integer, count) - - memory (integer, count) - - bad-timestamp (integer, count) - - congestion (integer, count) - - ip-option (integer, count) - - proto-cksum (integer, count) - - state-mismatch (integer, count) - - state-insert (integer, count) - - state-limit (integer, count) - - src-limit (integer, count) - - synproxy (integer, count) +## Example Output -### Example Output: - -``` +```text > pfctl -s info Status: Enabled for 0 days 00:26:05 Debug: Urgent State Table Total Rate - current entries 2 + current entries 2 searches 11325 7.2/s inserts 5 0.0/s removals 3 0.0/s @@ -76,7 +86,7 @@ Counters synproxy 0 0.0/s ``` -``` +```shell > ./telegraf --config telegraf.conf --input-filter pf --test * Plugin: inputs.pf, Collection 1 > pf,host=columbia entries=3i,searches=2668i,inserts=12i,removals=9i 1510941775000000000 diff --git a/plugins/inputs/pf/pf.go b/plugins/inputs/pf/pf.go index 035c44fbe1404..b510842b32124 100644 --- a/plugins/inputs/pf/pf.go +++ b/plugins/inputs/pf/pf.go @@ -1,7 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package pf import ( "bufio" + _ "embed" "fmt" "os/exec" "regexp" @@ -12,6 +14,10 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const measurement = "pf" const pfctlCommand = "pfctl" @@ -23,18 +29,8 @@ type PF struct { infoFunc func() (string, error) } -func (pf *PF) Description() string { - return "Gather counters from PF" -} - -func (pf *PF) SampleConfig() string { - return ` - ## PF require root access on most systems. - ## Setting 'use_sudo' to true will make use of sudo to run pfctl. - ## Users must configure sudo to allow telegraf user to run pfctl with no password. - ## pfctl can be restricted to only list command "pfctl -s info". - use_sudo = false -` +func (*PF) SampleConfig() string { + return sampleConfig } // Gather is the entrypoint for the plugin. @@ -164,7 +160,6 @@ func parseCounterTable(lines []string, fields map[string]interface{}) error { } func storeFieldValues(lines []string, regex *regexp.Regexp, fields map[string]interface{}, entryTable []*Entry) error { - for _, v := range lines { entries := regex.FindStringSubmatch(v) if entries != nil { @@ -223,7 +218,7 @@ func (pf *PF) buildPfctlCmd() (string, []string, error) { func init() { inputs.Add("pf", func() telegraf.Input { - pf := new(PF) + pf := &PF{} pf.infoFunc = pf.callPfctl return pf }) diff --git a/plugins/inputs/pf/sample.conf b/plugins/inputs/pf/sample.conf new file mode 100644 index 0000000000000..aa5ab157dc5df --- /dev/null +++ b/plugins/inputs/pf/sample.conf @@ -0,0 +1,7 @@ +# Gather counters from PF +[[inputs.pf]] + ## PF require root access on most systems. + ## Setting 'use_sudo' to true will make use of sudo to run pfctl. + ## Users must configure sudo to allow telegraf user to run pfctl with no password. + ## pfctl can be restricted to only list command "pfctl -s info". + use_sudo = false diff --git a/plugins/inputs/pgbouncer/README.md b/plugins/inputs/pgbouncer/README.md index 53737a81ad098..a1f7585ca0f57 100644 --- a/plugins/inputs/pgbouncer/README.md +++ b/plugins/inputs/pgbouncer/README.md @@ -7,9 +7,10 @@ More information about the meaning of these metrics can be found in the - PgBouncer minimum tested version: 1.5 -### Configuration example +## Configuration -```toml +```toml @sample.conf +# Read metrics from one or many pgbouncer servers [[inputs.pgbouncer]] ## specify address via a url matching: ## postgres://[pqgotest[:password]]@host:port[/dbname]\ @@ -22,22 +23,27 @@ More information about the meaning of these metrics can be found in the address = "host=localhost user=pgbouncer sslmode=disable" ``` -#### `address` +### `address` Specify address via a postgresql connection string: - `host=/run/postgresql port=6432 user=telegraf database=pgbouncer` +```text +host=/run/postgresql port=6432 user=telegraf database=pgbouncer +``` Or via an url matching: - `postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=[disable|verify-ca|verify-full]` +```text +postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=[disable|verify-ca|verify-full] +``` All connection parameters are optional. -Without the dbname parameter, the driver will default to a database with the same name as the user. -This dbname is just for instantiating a connection with the server and doesn't restrict the databases we are trying to grab metrics for. +Without the dbname parameter, the driver will default to a database with the +same name as the user. This dbname is just for instantiating a connection with +the server and doesn't restrict the databases we are trying to grab metrics for. -### Metrics +## Metrics - pgbouncer - tags: @@ -57,7 +63,7 @@ This dbname is just for instantiating a connection with the server and doesn't r - total_xact_count - total_xact_time -+ pgbouncer_pools +- pgbouncer_pools - tags: - db - pool_mode @@ -74,9 +80,9 @@ This dbname is just for instantiating a connection with the server and doesn't r - sv_tested - sv_used -### Example Output +## Example Output -``` +```shell pgbouncer,db=pgbouncer,server=host\=debian-buster-postgres\ user\=dbn\ port\=6432\ dbname\=pgbouncer\ avg_query_count=0i,avg_query_time=0i,avg_wait_time=0i,avg_xact_count=0i,avg_xact_time=0i,total_query_count=26i,total_query_time=0i,total_received=0i,total_sent=0i,total_wait_time=0i,total_xact_count=26i,total_xact_time=0i 1581569936000000000 pgbouncer_pools,db=pgbouncer,pool_mode=statement,server=host\=debian-buster-postgres\ user\=dbn\ port\=6432\ dbname\=pgbouncer\ ,user=pgbouncer cl_active=1i,cl_waiting=0i,maxwait=0i,maxwait_us=0i,sv_active=0i,sv_idle=0i,sv_login=0i,sv_tested=0i,sv_used=0i 1581569936000000000 ``` diff --git a/plugins/inputs/pgbouncer/pgbouncer.go b/plugins/inputs/pgbouncer/pgbouncer.go index 0b8c8c16acd02..5f68eddce9d21 100644 --- a/plugins/inputs/pgbouncer/pgbouncer.go +++ b/plugins/inputs/pgbouncer/pgbouncer.go @@ -1,16 +1,24 @@ +//go:generate ../../../tools/readme_config_includer/generator package pgbouncer import ( "bytes" + _ "embed" "strconv" + // Required for SQL framework driver + _ "github.com/jackc/pgx/v4/stdlib" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/postgresql" - _ "github.com/jackc/pgx/stdlib" // register driver ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type PgBouncer struct { postgresql.Service } @@ -19,26 +27,10 @@ var ignoredColumns = map[string]bool{"user": true, "database": true, "pool_mode" "avg_req": true, "avg_recv": true, "avg_sent": true, "avg_query": true, } -var sampleConfig = ` - ## specify address via a url matching: - ## postgres://[pqgotest[:password]]@localhost[/dbname]\ - ## ?sslmode=[disable|verify-ca|verify-full] - ## or a simple string: - ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production - ## - ## All connection parameters are optional. - ## - address = "host=localhost user=pgbouncer sslmode=disable" -` - -func (p *PgBouncer) SampleConfig() string { +func (*PgBouncer) SampleConfig() string { return sampleConfig } -func (p *PgBouncer) Description() string { - return "Read metrics from one or many pgbouncer servers" -} - func (p *PgBouncer) Gather(acc telegraf.Accumulator) error { var ( err error @@ -61,7 +53,7 @@ func (p *PgBouncer) Gather(acc telegraf.Accumulator) error { } for rows.Next() { - tags, columnMap, err := p.accRow(rows, acc, columns) + tags, columnMap, err := p.accRow(rows, columns) if err != nil { return err @@ -111,7 +103,7 @@ func (p *PgBouncer) Gather(acc telegraf.Accumulator) error { } for poolRows.Next() { - tags, columnMap, err := p.accRow(poolRows, acc, columns) + tags, columnMap, err := p.accRow(poolRows, columns) if err != nil { return err } @@ -145,7 +137,7 @@ type scanner interface { Scan(dest ...interface{}) error } -func (p *PgBouncer) accRow(row scanner, acc telegraf.Accumulator, columns []string) (map[string]string, +func (p *PgBouncer) accRow(row scanner, columns []string) (map[string]string, map[string]*interface{}, error) { var columnVars []interface{} var dbname bytes.Buffer @@ -170,9 +162,13 @@ func (p *PgBouncer) accRow(row scanner, acc telegraf.Accumulator, columns []stri } if columnMap["database"] != nil { // extract the database name from the column map - dbname.WriteString((*columnMap["database"]).(string)) + if _, err := dbname.WriteString((*columnMap["database"]).(string)); err != nil { + return nil, nil, err + } } else { - dbname.WriteString("postgres") + if _, err := dbname.WriteString("postgres"); err != nil { + return nil, nil, err + } } var tagAddress string @@ -189,11 +185,9 @@ func init() { inputs.Add("pgbouncer", func() telegraf.Input { return &PgBouncer{ Service: postgresql.Service{ - MaxIdle: 1, - MaxOpen: 1, - MaxLifetime: internal.Duration{ - Duration: 0, - }, + MaxIdle: 1, + MaxOpen: 1, + MaxLifetime: config.Duration(0), IsPgBouncer: true, }, } diff --git a/plugins/inputs/pgbouncer/pgbouncer_test.go b/plugins/inputs/pgbouncer/pgbouncer_test.go index 44e28c7f3335e..8d6d344f3a24c 100644 --- a/plugins/inputs/pgbouncer/pgbouncer_test.go +++ b/plugins/inputs/pgbouncer/pgbouncer_test.go @@ -2,23 +2,62 @@ package pgbouncer import ( "fmt" + "testing" + + "github.com/docker/go-connections/nat" + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go/wait" + "github.com/influxdata/telegraf/plugins/inputs/postgresql" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "testing" ) -func TestPgBouncerGeneratesMetrics(t *testing.T) { +func TestPgBouncerGeneratesMetricsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } + postgresServicePort := "5432" + pgBouncerServicePort := "6432" + + backend := testutil.Container{ + Image: "postgres:alpine", + ExposedPorts: []string{postgresServicePort}, + Env: map[string]string{ + "POSTGRES_HOST_AUTH_METHOD": "trust", + }, + WaitingFor: wait.ForLog("database system is ready to accept connections"), + } + err := backend.Start() + require.NoError(t, err, "failed to start container") + defer func() { + require.NoError(t, backend.Terminate(), "terminating container failed") + }() + + container := testutil.Container{ + Image: "z9pascal/pgbouncer-container:1.17.0-latest", + ExposedPorts: []string{pgBouncerServicePort}, + Env: map[string]string{ + "PG_ENV_POSTGRESQL_USER": "pgbouncer", + "PG_ENV_POSTGRESQL_PASS": "pgbouncer", + }, + WaitingFor: wait.ForAll( + wait.ForListeningPort(nat.Port(pgBouncerServicePort)), + wait.ForLog("LOG process up"), + ), + } + err = container.Start() + require.NoError(t, err, "failed to start container") + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + p := &PgBouncer{ Service: postgresql.Service{ Address: fmt.Sprintf( - "host=%s user=pgbouncer password=pgbouncer dbname=pgbouncer port=6432 sslmode=disable", - testutil.GetLocalHost(), + "host=%s user=pgbouncer password=pgbouncer dbname=pgbouncer port=%s sslmode=disable", + container.Address, + container.Ports[pgBouncerServicePort], ), IsPgBouncer: true, }, @@ -28,15 +67,19 @@ func TestPgBouncerGeneratesMetrics(t *testing.T) { require.NoError(t, p.Start(&acc)) require.NoError(t, p.Gather(&acc)) - intMetrics := []string{ - "total_requests", + // Return value of pgBouncer + // [pgbouncer map[db:pgbouncer server:host=localhost user=pgbouncer dbname=pgbouncer port=6432 ] map[avg_query_count:0 avg_query_time:0 avg_wait_time:0 avg_xact_count:0 avg_xact_time:0 total_query_count:3 total_query_time:0 total_received:0 total_sent:0 total_wait_time:0 total_xact_count:3 total_xact_time:0] 1620163750039747891 pgbouncer_pools map[db:pgbouncer pool_mode:statement server:host=localhost user=pgbouncer dbname=pgbouncer port=6432 user:pgbouncer] map[cl_active:1 cl_waiting:0 maxwait:0 maxwait_us:0 sv_active:0 sv_idle:0 sv_login:0 sv_tested:0 sv_used:0] 1620163750041444466] + + intMetricsPgBouncer := []string{ "total_received", "total_sent", "total_query_time", - "avg_req", - "avg_recv", - "avg_sent", - "avg_query", + "avg_query_count", + "avg_query_time", + "avg_wait_time", + } + + intMetricsPgBouncerPools := []string{ "cl_active", "cl_waiting", "sv_active", @@ -51,16 +94,21 @@ func TestPgBouncerGeneratesMetrics(t *testing.T) { metricsCounted := 0 - for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("pgbouncer", metric)) + for _, metric := range intMetricsPgBouncer { + require.True(t, acc.HasInt64Field("pgbouncer", metric)) + metricsCounted++ + } + + for _, metric := range intMetricsPgBouncerPools { + require.True(t, acc.HasInt64Field("pgbouncer_pools", metric)) metricsCounted++ } for _, metric := range int32Metrics { - assert.True(t, acc.HasInt32Field("pgbouncer", metric)) + require.True(t, acc.HasInt32Field("pgbouncer", metric)) metricsCounted++ } - assert.True(t, metricsCounted > 0) - assert.Equal(t, len(intMetrics)+len(int32Metrics), metricsCounted) + require.True(t, metricsCounted > 0) + require.Equal(t, len(intMetricsPgBouncer)+len(intMetricsPgBouncerPools)+len(int32Metrics), metricsCounted) } diff --git a/plugins/inputs/pgbouncer/sample.conf b/plugins/inputs/pgbouncer/sample.conf new file mode 100644 index 0000000000000..ed592bc8d3e89 --- /dev/null +++ b/plugins/inputs/pgbouncer/sample.conf @@ -0,0 +1,11 @@ +# Read metrics from one or many pgbouncer servers +[[inputs.pgbouncer]] + ## specify address via a url matching: + ## postgres://[pqgotest[:password]]@host:port[/dbname]\ + ## ?sslmode=[disable|verify-ca|verify-full] + ## or a simple string: + ## host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production + ## + ## All connection parameters are optional. + ## + address = "host=localhost user=pgbouncer sslmode=disable" diff --git a/plugins/inputs/phpfpm/README.md b/plugins/inputs/phpfpm/README.md index b31f4b7e427bd..a1b3759c6ea63 100644 --- a/plugins/inputs/phpfpm/README.md +++ b/plugins/inputs/phpfpm/README.md @@ -2,9 +2,9 @@ Get phpfpm stats using either HTTP status page or fpm socket. -### Configuration: +## Configuration -```toml +```toml @sample.conf # Read metrics of phpfpm, via HTTP status page or socket [[inputs.phpfpm]] ## An array of addresses to gather stats about. Specify an ip or hostname @@ -44,7 +44,7 @@ Get phpfpm stats using either HTTP status page or fpm socket. When using `unixsocket`, you have to ensure that telegraf runs on same host, and socket path is accessible to telegraf user. -### Metrics: +## Metrics - phpfpm - tags: @@ -62,9 +62,9 @@ host, and socket path is accessible to telegraf user. - max_children_reached - slow_requests -# Example Output +## Example Output -``` +```shell phpfpm,pool=www accepted_conn=13i,active_processes=2i,idle_processes=1i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083331187 phpfpm,pool=www2 accepted_conn=12i,active_processes=1i,idle_processes=2i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083691422 phpfpm,pool=www3 accepted_conn=11i,active_processes=1i,idle_processes=2i,listen_queue=0i,listen_queue_len=0i,max_active_processes=2i,max_children_reached=0i,max_listen_queue=0i,slow_requests=0i,total_processes=3i 1453011293083691658 diff --git a/plugins/inputs/phpfpm/child.go b/plugins/inputs/phpfpm/child.go index 2ebdf2ffbca35..b6a6f956d3bf0 100644 --- a/plugins/inputs/phpfpm/child.go +++ b/plugins/inputs/phpfpm/child.go @@ -10,7 +10,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/http/cgi" @@ -24,16 +23,16 @@ import ( // it's converted to an http.Request. type request struct { pw *io.PipeWriter - reqId uint16 + reqID uint16 params map[string]string buf [1024]byte rawParams []byte keepConn bool } -func newRequest(reqId uint16, flags uint8) *request { +func newRequest(reqID uint16, flags uint8) *request { r := &request{ - reqId: reqId, + reqID: reqID, params: map[string]string{}, keepConn: flags&flagKeepConn != 0, } @@ -79,7 +78,7 @@ func newResponse(c *child, req *request) *response { return &response{ req: req, header: http.Header{}, - w: newWriter(c.conn, typeStdout, req.reqId), + w: newWriter(c.conn, typeStdout, req.reqID), } } @@ -161,7 +160,7 @@ func (c *child) serve() { var errCloseConn = errors.New("fcgi: connection should be closed") -var emptyBody = ioutil.NopCloser(strings.NewReader("")) +var emptyBody = io.NopCloser(strings.NewReader("")) // ErrRequestAborted is returned by Read when a handler attempts to read the // body of a request that has been aborted by the web server. @@ -173,7 +172,7 @@ var ErrConnClosed = errors.New("fcgi: connection to web server closed") func (c *child) handleRecord(rec *record) error { c.mu.Lock() - req, ok := c.requests[rec.h.Id] + req, ok := c.requests[rec.h.ID] c.mu.Unlock() if !ok && rec.h.Type != typeBeginRequest && rec.h.Type != typeGetValues { // The spec says to ignore unknown request IDs. @@ -193,12 +192,11 @@ func (c *child) handleRecord(rec *record) error { return err } if br.role != roleResponder { - c.conn.writeEndRequest(rec.h.Id, 0, statusUnknownRole) - return nil + return c.conn.writeEndRequest(rec.h.ID, 0, statusUnknownRole) } - req = newRequest(rec.h.Id, br.flags) + req = newRequest(rec.h.ID, br.flags) c.mu.Lock() - c.requests[rec.h.Id] = req + c.requests[rec.h.ID] = req c.mu.Unlock() return nil case typeParams: @@ -226,25 +224,32 @@ func (c *child) handleRecord(rec *record) error { if len(content) > 0 { // TODO(eds): This blocks until the handler reads from the pipe. // If the handler takes a long time, it might be a problem. - req.pw.Write(content) + if _, err := req.pw.Write(content); err != nil { + return err + } } else if req.pw != nil { - req.pw.Close() + if err := req.pw.Close(); err != nil { + return err + } } return nil case typeGetValues: values := map[string]string{"FCGI_MPXS_CONNS": "1"} - c.conn.writePairs(typeGetValuesResult, 0, values) - return nil + return c.conn.writePairs(typeGetValuesResult, 0, values) case typeData: // If the filter role is implemented, read the data stream here. return nil case typeAbortRequest: c.mu.Lock() - delete(c.requests, rec.h.Id) + delete(c.requests, rec.h.ID) c.mu.Unlock() - c.conn.writeEndRequest(rec.h.Id, 0, statusRequestComplete) + if err := c.conn.writeEndRequest(rec.h.ID, 0, statusRequestComplete); err != nil { + return err + } if req.pw != nil { - req.pw.CloseWithError(ErrRequestAborted) + if err := req.pw.CloseWithError(ErrRequestAborted); err != nil { + return err + } } if !req.keepConn { // connection will close upon return @@ -254,8 +259,7 @@ func (c *child) handleRecord(rec *record) error { default: b := make([]byte, 8) b[0] = byte(rec.h.Type) - c.conn.writeRecord(typeUnknownType, 0, b) - return nil + return c.conn.writeRecord(typeUnknownType, 0, b) } } @@ -265,16 +269,22 @@ func (c *child) serveRequest(req *request, body io.ReadCloser) { if err != nil { // there was an error reading the request r.WriteHeader(http.StatusInternalServerError) - c.conn.writeRecord(typeStderr, req.reqId, []byte(err.Error())) + if err := c.conn.writeRecord(typeStderr, req.reqID, []byte(err.Error())); err != nil { + return + } } else { httpReq.Body = body c.handler.ServeHTTP(r, httpReq) } + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive r.Close() c.mu.Lock() - delete(c.requests, req.reqId) + delete(c.requests, req.reqID) c.mu.Unlock() - c.conn.writeEndRequest(req.reqId, 0, statusRequestComplete) + if err := c.conn.writeEndRequest(req.reqID, 0, statusRequestComplete); err != nil { + return + } // Consume the entire body, so the host isn't still writing to // us when we close the socket below in the !keepConn case, @@ -283,10 +293,14 @@ func (c *child) serveRequest(req *request, body io.ReadCloser) { // some sort of abort request to the host, so the host // can properly cut off the client sending all the data. // For now just bound it a little and - io.CopyN(ioutil.Discard, body, 100<<20) + //nolint:errcheck,revive + io.CopyN(io.Discard, body, 100<<20) + //nolint:errcheck,revive body.Close() if !req.keepConn { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive c.conn.Close() } } @@ -298,6 +312,8 @@ func (c *child) cleanUp() { if req.pw != nil { // race with call to Close in c.serveRequest doesn't matter because // Pipe(Reader|Writer).Close are idempotent + // Ignore the returned error as we continue in the loop anyway + //nolint:errcheck,revive req.pw.CloseWithError(ErrConnClosed) } } diff --git a/plugins/inputs/phpfpm/fcgi.go b/plugins/inputs/phpfpm/fcgi.go index 689660ea093c3..45248329efda6 100644 --- a/plugins/inputs/phpfpm/fcgi.go +++ b/plugins/inputs/phpfpm/fcgi.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package fcgi implements the FastCGI protocol. +// Package phpfpm implements the FastCGI protocol. // Currently only the responder role is supported. // The protocol is defined at http://www.fastcgi.com/drupal/node/6?q=node/22 package phpfpm @@ -45,12 +45,14 @@ const ( maxPad = 255 ) +//nolint:varcheck // For having proper order const ( roleResponder = iota + 1 // only Responders are implemented. roleAuthorizer roleFilter ) +//nolint:varcheck // For having proper order const ( statusRequestComplete = iota statusCantMultiplex @@ -58,12 +60,10 @@ const ( statusUnknownRole ) -const headerLen = 8 - type header struct { Version uint8 Type recType - Id uint16 + ID uint16 ContentLength uint16 PaddingLength uint8 Reserved uint8 @@ -72,7 +72,7 @@ type header struct { type beginRequest struct { role uint16 flags uint8 - reserved [5]uint8 + reserved [5]uint8 //nolint:unused // Memory reservation } func (br *beginRequest) read(content []byte) error { @@ -88,10 +88,10 @@ func (br *beginRequest) read(content []byte) error { // not synchronized because we don't care what the contents are var pad [maxPad]byte -func (h *header) init(recType recType, reqId uint16, contentLength int) { +func (h *header) init(recType recType, reqID uint16, contentLength int) { h.Version = 1 h.Type = recType - h.Id = reqId + h.ID = reqID h.ContentLength = uint16(contentLength) h.PaddingLength = uint8(-contentLength & 7) } @@ -135,16 +135,16 @@ func (rec *record) read(r io.Reader) (err error) { return nil } -func (r *record) content() []byte { - return r.buf[:r.h.ContentLength] +func (rec *record) content() []byte { + return rec.buf[:rec.h.ContentLength] } // writeRecord writes and sends a single record. -func (c *conn) writeRecord(recType recType, reqId uint16, b []byte) error { +func (c *conn) writeRecord(recType recType, reqID uint16, b []byte) error { c.mutex.Lock() defer c.mutex.Unlock() c.buf.Reset() - c.h.init(recType, reqId, len(b)) + c.h.init(recType, reqID, len(b)) if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil { return err } @@ -158,20 +158,20 @@ func (c *conn) writeRecord(recType recType, reqId uint16, b []byte) error { return err } -func (c *conn) writeBeginRequest(reqId uint16, role uint16, flags uint8) error { +func (c *conn) writeBeginRequest(reqID uint16, role uint16, flags uint8) error { b := [8]byte{byte(role >> 8), byte(role), flags} - return c.writeRecord(typeBeginRequest, reqId, b[:]) + return c.writeRecord(typeBeginRequest, reqID, b[:]) } -func (c *conn) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) error { +func (c *conn) writeEndRequest(reqID uint16, appStatus int, protocolStatus uint8) error { b := make([]byte, 8) binary.BigEndian.PutUint32(b, uint32(appStatus)) b[4] = protocolStatus - return c.writeRecord(typeEndRequest, reqId, b) + return c.writeRecord(typeEndRequest, reqID, b) } -func (c *conn) writePairs(recType recType, reqId uint16, pairs map[string]string) error { - w := newWriter(c, recType, reqId) +func (c *conn) writePairs(recType recType, reqID uint16, pairs map[string]string) error { + w := newWriter(c, recType, reqID) b := make([]byte, 8) for k, v := range pairs { n := encodeSize(b, uint32(len(k))) @@ -186,8 +186,7 @@ func (c *conn) writePairs(recType recType, reqId uint16, pairs map[string]string return err } } - w.Close() - return nil + return w.Close() } func readSize(s []byte) (uint32, int) { @@ -232,14 +231,16 @@ type bufWriter struct { func (w *bufWriter) Close() error { if err := w.Writer.Flush(); err != nil { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive w.closer.Close() return err } return w.closer.Close() } -func newWriter(c *conn, recType recType, reqId uint16) *bufWriter { - s := &streamWriter{c: c, recType: recType, reqId: reqId} +func newWriter(c *conn, recType recType, reqID uint16) *bufWriter { + s := &streamWriter{c: c, recType: recType, reqID: reqID} w := bufio.NewWriterSize(s, maxWrite) return &bufWriter{s, w} } @@ -249,7 +250,7 @@ func newWriter(c *conn, recType recType, reqId uint16) *bufWriter { type streamWriter struct { c *conn recType recType - reqId uint16 + reqID uint16 } func (w *streamWriter) Write(p []byte) (int, error) { @@ -259,7 +260,7 @@ func (w *streamWriter) Write(p []byte) (int, error) { if n > maxWrite { n = maxWrite } - if err := w.c.writeRecord(w.recType, w.reqId, p[:n]); err != nil { + if err := w.c.writeRecord(w.recType, w.reqID, p[:n]); err != nil { return nn, err } nn += n @@ -270,5 +271,5 @@ func (w *streamWriter) Write(p []byte) (int, error) { func (w *streamWriter) Close() error { // send empty record to close the stream - return w.c.writeRecord(w.recType, w.reqId, nil) + return w.c.writeRecord(w.recType, w.reqID, nil) } diff --git a/plugins/inputs/phpfpm/fcgi_client.go b/plugins/inputs/phpfpm/fcgi_client.go index 9b42d91bd961a..b34b8a3063b52 100644 --- a/plugins/inputs/phpfpm/fcgi_client.go +++ b/plugins/inputs/phpfpm/fcgi_client.go @@ -24,7 +24,7 @@ func newFcgiClient(h string, args ...interface{}) (*conn, error) { laddr := net.UnixAddr{Name: args[0].(string), Net: h} con, err = net.DialUnix(h, nil, &laddr) default: - err = errors.New("fcgi: we only accept int (port) or string (socket) params.") + err = errors.New("fcgi: we only accept int (port) or string (socket) params") } fcgi := &conn{ rwc: con, @@ -33,26 +33,23 @@ func newFcgiClient(h string, args ...interface{}) (*conn, error) { return fcgi, err } -func (client *conn) Request( - env map[string]string, - requestData string, -) (retout []byte, reterr []byte, err error) { - defer client.rwc.Close() - var reqId uint16 = 1 +func (c *conn) Request(env map[string]string, requestData string) (retout []byte, reterr []byte, err error) { + defer c.rwc.Close() + var reqID uint16 = 1 - err = client.writeBeginRequest(reqId, uint16(roleResponder), 0) + err = c.writeBeginRequest(reqID, uint16(roleResponder), 0) if err != nil { - return + return nil, nil, err } - err = client.writePairs(typeParams, reqId, env) + err = c.writePairs(typeParams, reqID, env) if err != nil { - return + return nil, nil, err } if len(requestData) > 0 { - if err = client.writeRecord(typeStdin, reqId, []byte(requestData)); err != nil { - return + if err = c.writeRecord(typeStdin, reqID, []byte(requestData)); err != nil { + return nil, nil, err } } @@ -62,7 +59,7 @@ func (client *conn) Request( // receive until EOF or FCGI_END_REQUEST READ_LOOP: for { - err1 = rec.read(client.rwc) + err1 = rec.read(c.rwc) if err1 != nil && strings.Contains(err1.Error(), "use of closed network connection") { if err1 != io.EOF { err = err1 @@ -82,5 +79,5 @@ READ_LOOP: } } - return + return retout, reterr, err } diff --git a/plugins/inputs/phpfpm/fcgi_test.go b/plugins/inputs/phpfpm/fcgi_test.go index 15e0030a77151..7211c0c3971e1 100644 --- a/plugins/inputs/phpfpm/fcgi_test.go +++ b/plugins/inputs/phpfpm/fcgi_test.go @@ -8,11 +8,12 @@ import ( "bytes" "errors" "io" - "io/ioutil" "net/http" "testing" ) +const requestID uint16 = 1 + var sizeTests = []struct { size uint32 bytes []byte @@ -44,7 +45,7 @@ func TestSize(t *testing.T) { var streamTests = []struct { desc string recType recType - reqId uint16 + reqID uint16 content []byte raw []byte }{ @@ -90,8 +91,8 @@ outer: t.Errorf("%s: got type %d expected %d", test.desc, rec.h.Type, test.recType) continue } - if rec.h.Id != test.reqId { - t.Errorf("%s: got request ID %d expected %d", test.desc, rec.h.Id, test.reqId) + if rec.h.ID != test.reqID { + t.Errorf("%s: got request ID %d expected %d", test.desc, rec.h.ID, test.reqID) continue } if !bytes.Equal(content, test.content) { @@ -100,7 +101,7 @@ outer: } buf.Reset() c := newConn(&nilCloser{buf}) - w := newWriter(c, test.recType, test.reqId) + w := newWriter(c, test.recType, test.reqID) if _, err := w.Write(test.content); err != nil { t.Errorf("%s: error writing record: %v", test.desc, err) continue @@ -124,7 +125,7 @@ func (c *writeOnlyConn) Write(p []byte) (int, error) { return len(p), nil } -func (c *writeOnlyConn) Read(p []byte) (int, error) { +func (c *writeOnlyConn) Read(_ []byte) (int, error) { return 0, errors.New("conn is write-only") } @@ -164,17 +165,16 @@ func nameValuePair11(nameData, valueData string) []byte { func makeRecord( recordType recType, - requestId uint16, contentData []byte, ) []byte { - requestIdB1 := byte(requestId >> 8) - requestIdB0 := byte(requestId) + requestIDB1 := byte(requestID >> 8) + requestIDB0 := byte(requestID) contentLength := len(contentData) contentLengthB1 := byte(contentLength >> 8) contentLengthB0 := byte(contentLength) return bytes.Join([][]byte{ - {1, byte(recordType), requestIdB1, requestIdB0, contentLengthB1, + {1, byte(recordType), requestIDB1, requestIDB0, contentLengthB1, contentLengthB0, 0, 0}, contentData, }, @@ -185,14 +185,13 @@ func makeRecord( // request body var streamBeginTypeStdin = bytes.Join([][]byte{ // set up request 1 - makeRecord(typeBeginRequest, 1, - []byte{0, byte(roleResponder), 0, 0, 0, 0, 0, 0}), + makeRecord(typeBeginRequest, []byte{0, byte(roleResponder), 0, 0, 0, 0, 0, 0}), // add required parameters to request 1 - makeRecord(typeParams, 1, nameValuePair11("REQUEST_METHOD", "GET")), - makeRecord(typeParams, 1, nameValuePair11("SERVER_PROTOCOL", "HTTP/1.1")), - makeRecord(typeParams, 1, nil), + makeRecord(typeParams, nameValuePair11("REQUEST_METHOD", "GET")), + makeRecord(typeParams, nameValuePair11("SERVER_PROTOCOL", "HTTP/1.1")), + makeRecord(typeParams, nil), // begin sending body of request 1 - makeRecord(typeStdin, 1, []byte("0123456789abcdef")), + makeRecord(typeStdin, []byte("0123456789abcdef")), }, nil) @@ -204,7 +203,7 @@ var cleanUpTests = []struct { { bytes.Join([][]byte{ streamBeginTypeStdin, - makeRecord(typeAbortRequest, 1, nil), + makeRecord(typeAbortRequest, nil), }, nil), ErrRequestAborted, @@ -242,7 +241,7 @@ func TestChildServeCleansUp(t *testing.T) { r *http.Request, ) { // block on reading body of request - _, err := io.Copy(ioutil.Discard, r.Body) + _, err := io.Copy(io.Discard, r.Body) if err != tt.err { t.Errorf("Expected %#v, got %#v", tt.err, err) } @@ -265,7 +264,7 @@ func (rwNopCloser) Close() error { } // Verifies it doesn't crash. Issue 11824. -func TestMalformedParams(t *testing.T) { +func TestMalformedParams(_ *testing.T) { input := []byte{ // beginRequest, requestId=1, contentLength=8, role=1, keepConn=1 1, 1, 0, 1, 0, 8, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, @@ -274,7 +273,7 @@ func TestMalformedParams(t *testing.T) { // end of params 1, 4, 0, 1, 0, 0, 0, 0, } - rw := rwNopCloser{bytes.NewReader(input), ioutil.Discard} + rw := rwNopCloser{bytes.NewReader(input), io.Discard} c := newChild(rw, http.DefaultServeMux) c.serve() } diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index 52907bb50749e..bba87a793fd03 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -1,38 +1,44 @@ +//go:generate ../../../tools/readme_config_includer/generator package phpfpm import ( "bufio" "bytes" + _ "embed" "fmt" "io" "net/http" "net/url" - "os" "strconv" "strings" "sync" + "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const ( - PF_POOL = "pool" - PF_PROCESS_MANAGER = "process manager" - PF_START_SINCE = "start since" - PF_ACCEPTED_CONN = "accepted conn" - PF_LISTEN_QUEUE = "listen queue" - PF_MAX_LISTEN_QUEUE = "max listen queue" - PF_LISTEN_QUEUE_LEN = "listen queue len" - PF_IDLE_PROCESSES = "idle processes" - PF_ACTIVE_PROCESSES = "active processes" - PF_TOTAL_PROCESSES = "total processes" - PF_MAX_ACTIVE_PROCESSES = "max active processes" - PF_MAX_CHILDREN_REACHED = "max children reached" - PF_SLOW_REQUESTS = "slow requests" + PfPool = "pool" + PfProcessManager = "process manager" + PfStartSince = "start since" + PfAcceptedConn = "accepted conn" + PfListenQueue = "listen queue" + PfMaxListenQueue = "max listen queue" + PfListenQueueLen = "listen queue len" + PfIdleProcesses = "idle processes" + PfActiveProcesses = "active processes" + PfTotalProcesses = "total processes" + PfMaxActiveProcesses = "max active processes" + PfMaxChildrenReached = "max children reached" + PfSlowRequests = "slow requests" ) type metric map[string]int64 @@ -40,53 +46,16 @@ type poolStat map[string]metric type phpfpm struct { Urls []string - Timeout internal.Duration + Timeout config.Duration tls.ClientConfig client *http.Client } -var sampleConfig = ` - ## An array of addresses to gather stats about. Specify an ip or hostname - ## with optional port and path - ## - ## Plugin can be configured in three modes (either can be used): - ## - http: the URL must start with http:// or https://, ie: - ## "http://localhost/status" - ## "http://192.168.130.1/status?full" - ## - ## - unixsocket: path to fpm socket, ie: - ## "/var/run/php5-fpm.sock" - ## or using a custom fpm status path: - ## "/var/run/php5-fpm.sock:fpm-custom-status-path" - ## - ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: - ## "fcgi://10.0.0.12:9000/status" - ## "cgi://10.0.10.12:9001/status" - ## - ## Example of multiple gathering from local socket and remote host - ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] - urls = ["http://localhost/status"] - - ## Duration allowed to complete HTTP requests. - # timeout = "5s" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - -func (p *phpfpm) SampleConfig() string { +func (*phpfpm) SampleConfig() string { return sampleConfig } -func (p *phpfpm) Description() string { - return "Read metrics of phpfpm, via HTTP status page or socket" -} - func (p *phpfpm) Init() error { tlsCfg, err := p.ClientConfig.TLSConfig() if err != nil { @@ -97,7 +66,7 @@ func (p *phpfpm) Init() error { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: p.Timeout.Duration, + Timeout: time.Duration(p.Timeout), } return nil } @@ -132,7 +101,7 @@ func (p *phpfpm) Gather(acc telegraf.Accumulator) error { // Request status page to get stat raw data and import it func (p *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { if strings.HasPrefix(addr, "http://") || strings.HasPrefix(addr, "https://") { - return p.gatherHttp(addr, acc) + return p.gatherHTTP(addr, acc) } var ( @@ -145,12 +114,12 @@ func (p *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { if strings.HasPrefix(addr, "fcgi://") || strings.HasPrefix(addr, "cgi://") { u, err := url.Parse(addr) if err != nil { - return fmt.Errorf("Unable parse server address '%s': %s", addr, err) + return fmt.Errorf("unable parse server address '%s': %s", addr, err) } socketAddr := strings.Split(u.Host, ":") - fcgiIp := socketAddr[0] + fcgiIP := socketAddr[0] fcgiPort, _ := strconv.Atoi(socketAddr[1]) - fcgi, err = newFcgiClient(fcgiIp, fcgiPort) + fcgi, err = newFcgiClient(fcgiIP, fcgiPort) if err != nil { return err } @@ -189,13 +158,12 @@ func (p *phpfpm) gatherFcgi(fcgi *conn, statusPath string, acc telegraf.Accumula if len(fpmErr) == 0 && err == nil { importMetric(bytes.NewReader(fpmOutput), acc, addr) return nil - } else { - return fmt.Errorf("Unable parse phpfpm status. Error: %v %v", string(fpmErr), err) } + return fmt.Errorf("unable parse phpfpm status, error: %v %v", string(fpmErr), err) } // Gather stat using http protocol -func (p *phpfpm) gatherHttp(addr string, acc telegraf.Accumulator) error { +func (p *phpfpm) gatherHTTP(addr string, acc telegraf.Accumulator) error { u, err := url.Parse(addr) if err != nil { return fmt.Errorf("unable parse server address '%s': %v", addr, err) @@ -221,7 +189,7 @@ func (p *phpfpm) gatherHttp(addr string, acc telegraf.Accumulator) error { } // Import stat data into Telegraf system -func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) poolStat { +func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) { stats := make(poolStat) var currentPool string @@ -235,7 +203,7 @@ func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) poolStat { } fieldName := strings.Trim(keyvalue[0], " ") // We start to gather data for a new pool here - if fieldName == PF_POOL { + if fieldName == PfPool { currentPool = strings.Trim(keyvalue[1], " ") stats[currentPool] = make(metric) continue @@ -243,17 +211,17 @@ func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) poolStat { // Start to parse metric for current pool switch fieldName { - case PF_START_SINCE, - PF_ACCEPTED_CONN, - PF_LISTEN_QUEUE, - PF_MAX_LISTEN_QUEUE, - PF_LISTEN_QUEUE_LEN, - PF_IDLE_PROCESSES, - PF_ACTIVE_PROCESSES, - PF_TOTAL_PROCESSES, - PF_MAX_ACTIVE_PROCESSES, - PF_MAX_CHILDREN_REACHED, - PF_SLOW_REQUESTS: + case PfStartSince, + PfAcceptedConn, + PfListenQueue, + PfMaxListenQueue, + PfListenQueueLen, + PfIdleProcesses, + PfActiveProcesses, + PfTotalProcesses, + PfMaxActiveProcesses, + PfMaxChildrenReached, + PfSlowRequests: fieldValue, err := strconv.ParseInt(strings.Trim(keyvalue[1], " "), 10, 64) if err == nil { stats[currentPool][fieldName] = fieldValue @@ -269,22 +237,20 @@ func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) poolStat { } fields := make(map[string]interface{}) for k, v := range stats[pool] { - fields[strings.Replace(k, " ", "_", -1)] = v + fields[strings.ReplaceAll(k, " ", "_")] = v } acc.AddFields("phpfpm", fields, tags) } - - return stats } func expandUrls(urls []string) ([]string, error) { addrs := make([]string, 0, len(urls)) - for _, url := range urls { - if isNetworkURL(url) { - addrs = append(addrs, url) + for _, address := range urls { + if isNetworkURL(address) { + addrs = append(addrs, address) continue } - paths, err := globUnixSocket(url) + paths, err := globUnixSocket(address) if err != nil { return nil, err } @@ -293,38 +259,29 @@ func expandUrls(urls []string) ([]string, error) { return addrs, nil } -func globUnixSocket(url string) ([]string, error) { - pattern, status := unixSocketPaths(url) +func globUnixSocket(address string) ([]string, error) { + pattern, status := unixSocketPaths(address) glob, err := globpath.Compile(pattern) if err != nil { return nil, fmt.Errorf("could not compile glob %q: %v", pattern, err) } paths := glob.Match() if len(paths) == 0 { - if _, err := os.Stat(paths[0]); err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("Socket doesn't exist '%s': %s", pattern, err) - } - return nil, err - } - return nil, nil + return nil, fmt.Errorf("socket doesn't exist %q", pattern) } - addrs := make([]string, 0, len(paths)) - + addresses := make([]string, 0, len(paths)) for _, path := range paths { if status != "" { path = path + ":" + status } - addrs = append(addrs, path) + addresses = append(addresses, path) } - return addrs, nil + return addresses, nil } -func unixSocketPaths(addr string) (string, string) { - var socketPath, statusPath string - +func unixSocketPaths(addr string) (socketPath string, statusPath string) { socketAddr := strings.Split(addr, ":") if len(socketAddr) >= 2 { socketPath = socketAddr[0] diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index 7be2e6a27dbf8..14d03dd3fafe7 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -1,3 +1,9 @@ +//go:build !windows +// +build !windows + +// TODO: Windows - should be enabled for Windows when super asterisk is fixed on Windows +// https://github.com/influxdata/telegraf/issues/6248 + package phpfpm import ( @@ -10,17 +16,19 @@ import ( "net/http/httptest" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) type statServer struct{} // We create a fake server to return test data -func (s statServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (s statServer) ServeHTTP(w http.ResponseWriter, _ *http.Request) { w.Header().Set("Content-Type", "text/plain") w.Header().Set("Content-Length", fmt.Sprint(len(outputSample))) + // Ignore the returned error as the tests will fail anyway + //nolint:errcheck,revive fmt.Fprint(w, outputSample) } @@ -29,7 +37,8 @@ func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) { require.Equal(t, "ok", r.URL.Query().Get("test")) w.Header().Set("Content-Type", "text/plain") w.Header().Set("Content-Length", fmt.Sprint(len(outputSample))) - fmt.Fprint(w, outputSample) + _, err := fmt.Fprint(w, outputSample) + require.NoError(t, err) })) defer ts.Close() @@ -38,13 +47,11 @@ func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) { Urls: []string{url}, } - err := r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc testutil.Accumulator - err = acc.GatherError(r.Gather) - require.NoError(t, err) + require.NoError(t, acc.GatherError(r.Gather)) tags := map[string]string{ "pool": "www", @@ -71,12 +78,11 @@ func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) { func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) { // Let OS find an available port tcp, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal("Cannot initialize test server") - } + require.NoError(t, err, "Cannot initialize test server") defer tcp.Close() s := statServer{} + //nolint:errcheck,revive go fcgi.Serve(tcp, s) //Now we tested again above server @@ -84,12 +90,10 @@ func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) { Urls: []string{"fcgi://" + tcp.Addr().String() + "/status"}, } - err = r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc testutil.Accumulator - err = acc.GatherError(r.Gather) - require.NoError(t, err) + require.NoError(t, acc.GatherError(r.Gather)) tags := map[string]string{ "pool": "www", @@ -118,27 +122,24 @@ func TestPhpFpmGeneratesMetrics_From_Socket(t *testing.T) { // removing of socket fail when system restart /tmp is clear so // we don't have junk files around var randomNumber int64 - binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + require.NoError(t, binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)) tcp, err := net.Listen("unix", fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber)) - if err != nil { - t.Fatal("Cannot initialize server on port ") - } + require.NoError(t, err, "Cannot initialize server on port ") defer tcp.Close() s := statServer{} + //nolint:errcheck,revive go fcgi.Serve(tcp, s) r := &phpfpm{ Urls: []string{tcp.Addr().String()}, } - err = r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc testutil.Accumulator - err = acc.GatherError(r.Gather) - require.NoError(t, err) + require.NoError(t, acc.GatherError(r.Gather)) tags := map[string]string{ "pool": "www", @@ -167,40 +168,35 @@ func TestPhpFpmGeneratesMetrics_From_Multiple_Sockets_With_Glob(t *testing.T) { // removing of socket fail when system restart /tmp is clear so // we don't have junk files around var randomNumber int64 - binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + require.NoError(t, binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)) socket1 := fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber) tcp1, err := net.Listen("unix", socket1) - if err != nil { - t.Fatal("Cannot initialize server on port ") - } + require.NoError(t, err, "Cannot initialize server on port ") defer tcp1.Close() - binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + require.NoError(t, binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)) socket2 := fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber) tcp2, err := net.Listen("unix", socket2) - if err != nil { - t.Fatal("Cannot initialize server on port ") - } + require.NoError(t, err, "Cannot initialize server on port ") defer tcp2.Close() s := statServer{} + //nolint:errcheck,revive go fcgi.Serve(tcp1, s) + //nolint:errcheck,revive go fcgi.Serve(tcp2, s) r := &phpfpm{ Urls: []string{"/tmp/test-fpm[\\-0-9]*.sock"}, } - err = r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc1, acc2 testutil.Accumulator - err = acc1.GatherError(r.Gather) - require.NoError(t, err) + require.NoError(t, acc1.GatherError(r.Gather)) - err = acc2.GatherError(r.Gather) - require.NoError(t, err) + require.NoError(t, acc2.GatherError(r.Gather)) tags1 := map[string]string{ "pool": "www", @@ -235,27 +231,24 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) { // removing of socket fail we won't have junk files around. Cuz when system // restart, it clears out /tmp var randomNumber int64 - binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + require.NoError(t, binary.Read(rand.Reader, binary.LittleEndian, &randomNumber)) tcp, err := net.Listen("unix", fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber)) - if err != nil { - t.Fatal("Cannot initialize server on port ") - } + require.NoError(t, err, "Cannot initialize server on port ") defer tcp.Close() s := statServer{} + //nolint:errcheck,revive go fcgi.Serve(tcp, s) r := &phpfpm{ Urls: []string{tcp.Addr().String() + ":custom-status-path"}, } - err = r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc testutil.Accumulator - err = acc.GatherError(r.Gather) - require.NoError(t, err) + require.NoError(t, acc.GatherError(r.Gather)) tags := map[string]string{ "pool": "www", @@ -282,32 +275,34 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) { //When not passing server config, we default to localhost //We just want to make sure we did request stat from localhost func TestPhpFpmDefaultGetFromLocalhost(t *testing.T) { - r := &phpfpm{} + r := &phpfpm{Urls: []string{"http://bad.localhost:62001/status"}} - err := r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc testutil.Accumulator - err = acc.GatherError(r.Gather) + err := acc.GatherError(r.Gather) require.Error(t, err) - assert.Contains(t, err.Error(), "127.0.0.1/status") + require.Contains(t, err.Error(), "/status") } func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t *testing.T) { + if testing.Short() { + t.Skip("Skipping long test in short mode") + } + r := &phpfpm{ Urls: []string{"http://aninvalidone"}, } - err := r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc testutil.Accumulator - err = acc.GatherError(r.Gather) + err := acc.GatherError(r.Gather) require.Error(t, err) - assert.Contains(t, err.Error(), `unable to connect to phpfpm status page 'http://aninvalidone'`) - assert.Contains(t, err.Error(), `lookup aninvalidone`) + require.Contains(t, err.Error(), `unable to connect to phpfpm status page 'http://aninvalidone'`) + require.Contains(t, err.Error(), `lookup aninvalidone`) } func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testing.T) { @@ -315,15 +310,13 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testi Urls: []string{"/tmp/invalid.sock"}, } - err := r.Init() - require.NoError(t, err) + require.NoError(t, r.Init()) var acc testutil.Accumulator - err = acc.GatherError(r.Gather) + err := acc.GatherError(r.Gather) require.Error(t, err) - assert.Equal(t, `dial unix /tmp/invalid.sock: connect: no such file or directory`, err.Error()) - + require.Equal(t, `socket doesn't exist "/tmp/invalid.sock"`, err.Error()) } const outputSample = ` diff --git a/plugins/inputs/phpfpm/sample.conf b/plugins/inputs/phpfpm/sample.conf new file mode 100644 index 0000000000000..62dbf501e53f2 --- /dev/null +++ b/plugins/inputs/phpfpm/sample.conf @@ -0,0 +1,34 @@ +# Read metrics of phpfpm, via HTTP status page or socket +[[inputs.phpfpm]] + ## An array of addresses to gather stats about. Specify an ip or hostname + ## with optional port and path + ## + ## Plugin can be configured in three modes (either can be used): + ## - http: the URL must start with http:// or https://, ie: + ## "http://localhost/status" + ## "http://192.168.130.1/status?full" + ## + ## - unixsocket: path to fpm socket, ie: + ## "/var/run/php5-fpm.sock" + ## or using a custom fpm status path: + ## "/var/run/php5-fpm.sock:fpm-custom-status-path" + ## glob patterns are also supported: + ## "/var/run/php*.sock" + ## + ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: + ## "fcgi://10.0.0.12:9000/status" + ## "cgi://10.0.10.12:9001/status" + ## + ## Example of multiple gathering from local socket and remote host + ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] + urls = ["http://localhost/status"] + + ## Duration allowed to complete HTTP requests. + # timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index 91af1b2ae33ed..64a496a8755e1 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -1,6 +1,7 @@ # Ping Input Plugin -Sends a ping message by executing the system ping command and reports the results. +Sends a ping message by executing the system ping command and reports the +results. This plugin has two main methods of operation: `exec` and `native`. The recommended method is `native`, which has greater system compatibility and @@ -13,7 +14,8 @@ ping packets. Most ping command implementations are supported, one notable exception being that there is currently no support for GNU Inetutils ping. You may instead use the iputils-ping implementation: -``` + +```sh apt-get install iputils-ping ``` @@ -21,9 +23,10 @@ When using `method = "native"` a ping is sent and the results are reported in native Go by the Telegraf process, eliminating the need to execute the system `ping` command. -### Configuration: +## Configuration -```toml +```toml @sample.conf +# Ping given url(s) and return statistics [[inputs.ping]] ## Hosts to send ping packets to. urls = ["example.org"] @@ -57,6 +60,9 @@ native Go by the Telegraf process, eliminating the need to execute the system ## option of the ping command. # interface = "" + ## Percentiles to calculate. This only works with the native method. + # percentiles = [50, 95, 99] + ## Specify the ping executable binary. # binary = "ping" @@ -67,9 +73,13 @@ native Go by the Telegraf process, eliminating the need to execute the system ## Use only IPv6 addresses when resolving a hostname. # ipv6 = false + + ## Number of data bytes to be sent. Corresponds to the "-s" + ## option of the ping command. This only works with the native method. + # size = 56 ``` -#### File Limit +### File Limit Since this plugin runs the ping command, it may need to open multiple files per host. The number of files used is lessened with the `native` option but still @@ -81,42 +91,50 @@ use the "drop-in directory", usually located at `/etc/systemd/system/telegraf.service.d`. You can create or edit a drop-in file in the correct location using: + ```sh -$ systemctl edit telegraf +systemctl edit telegraf ``` Increase the number of open files: + ```ini [Service] LimitNOFILE=8192 ``` Restart Telegraf: + ```sh -$ systemctl edit telegraf +systemctl restart telegraf ``` -#### Linux Permissions +### Linux Permissions -When using `method = "native"`, Telegraf will attempt to use privileged raw -ICMP sockets. On most systems, doing so requires `CAP_NET_RAW` capabilities. +When using `method = "native"`, Telegraf will attempt to use privileged raw ICMP +sockets. On most systems, doing so requires `CAP_NET_RAW` capabilities or for +Telegraf to be run as root. With systemd: + ```sh -$ systemctl edit telegraf +systemctl edit telegraf ``` + ```ini [Service] CapabilityBoundingSet=CAP_NET_RAW AmbientCapabilities=CAP_NET_RAW ``` + ```sh -$ systemctl restart telegraf +systemctl restart telegraf ``` Without systemd: + ```sh -$ setcap cap_net_raw=eip /usr/bin/telegraf +setcap cap_net_raw=eip /usr/bin/telegraf ``` Reference [`man 7 capabilities`][man 7 capabilities] for more information about @@ -124,20 +142,12 @@ setting capabilities. [man 7 capabilities]: http://man7.org/linux/man-pages/man7/capabilities.7.html -When Telegraf cannot listen on a privileged ICMP socket it will attempt to use -ICMP echo sockets. If you wish to use this method you must ensure Telegraf's -group, usually `telegraf`, is allowed to use ICMP echo sockets: +### Other OS Permissions -```sh -$ sysctl -w net.ipv4.ping_group_range="GROUP_ID_LOW GROUP_ID_HIGH" -``` +When using `method = "native"`, you will need permissions similar to the +executable ping program for your OS. -Reference [`man 7 icmp`][man 7 icmp] for more information about ICMP echo -sockets and the `ping_group_range` setting. - -[man 7 icmp]: http://man7.org/linux/man-pages/man7/icmp.7.html - -### Metrics +## Metrics - ping - tags: @@ -147,28 +157,29 @@ sockets and the `ping_group_range` setting. - packets_received (integer) - percent_packet_loss (float) - ttl (integer, Not available on Windows) - - average_response_ms (integer) - - minimum_response_ms (integer) - - maximum_response_ms (integer) - - standard_deviation_ms (integer, Available on Windows only with native ping) + - average_response_ms (float) + - minimum_response_ms (float) + - maximum_response_ms (float) + - standard_deviation_ms (float, Available on Windows only with method = "native") + - percentile\_ms (float, Where `` is the percentile specified in `percentiles`. Available with method = "native" only) - errors (float, Windows only) - reply_received (integer, Windows with method = "exec" only) - percent_reply_loss (float, Windows with method = "exec" only) - result_code (int, success = 0, no such host = 1, ping error = 2) -##### reply_received vs packets_received +### reply_received vs packets_received -On Windows systems with `method = "exec"`, the "Destination net unreachable" reply will increment `packets_received` but not `reply_received`*. +On Windows systems with `method = "exec"`, the "Destination net unreachable" +reply will increment `packets_received` but not `reply_received`*. -##### ttl +### ttl There is currently no support for TTL on windows with `"native"`; track -progress at https://github.com/golang/go/issues/7175 and -https://github.com/golang/go/issues/7174 +progress at and + +## Example Output -### Example Output - -``` +```shell ping,url=example.org average_response_ms=23.066,ttl=63,maximum_response_ms=24.64,minimum_response_ms=22.451,packets_received=5i,packets_transmitted=5i,percent_packet_loss=0,result_code=0i,standard_deviation_ms=0.809 1535747258000000000 ``` diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 008cfceacc5b9..42a7cbbef27d8 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -1,35 +1,51 @@ +//go:generate ../../../tools/readme_config_includer/generator package ping import ( - "context" + _ "embed" "errors" - "log" + "fmt" "math" "net" "os/exec" "runtime" + "sort" "strings" "sync" "time" - "github.com/glinton/ping" + "github.com/go-ping/ping" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +const ( + defaultPingDataBytesSize = 56 +) + // HostPinger is a function that runs the "ping" function using a list of // passed arguments. This can be easily switched with a mocked ping function // for unit test purposes (see ping_test.go) type HostPinger func(binary string, timeout float64, args ...string) (string, error) -type HostResolver func(ctx context.Context, ipv6 bool, host string) (*net.IPAddr, error) - -type IsCorrectNetwork func(ip net.IPAddr) bool - type Ping struct { + // wg is used to wait for ping with multiple URLs wg sync.WaitGroup + // Pre-calculated interval and timeout + calcInterval time.Duration + calcTimeout time.Duration + + sourceAddress string + + Log telegraf.Logger `toml:"-"` + // Interval at which to ping (ping -i ) PingInterval float64 `toml:"ping_interval"` @@ -64,71 +80,34 @@ type Ping struct { // host ping function pingHost HostPinger - // resolve host function - resolveHost HostResolver + nativePingFunc NativePingFunc - // listenAddr is the address associated with the interface defined. - listenAddr string -} + // Calculate the given percentiles when using native method + Percentiles []int -func (*Ping) Description() string { - return "Ping given url(s) and return statistics" + // Packet size + Size *int } -const sampleConfig = ` - ## Hosts to send ping packets to. - urls = ["example.org"] - - ## Method used for sending pings, can be either "exec" or "native". When set - ## to "exec" the systems ping command will be executed. When set to "native" - ## the plugin will send pings directly. - ## - ## While the default is "exec" for backwards compatibility, new deployments - ## are encouraged to use the "native" method for improved compatibility and - ## performance. - # method = "exec" - - ## Number of ping packets to send per interval. Corresponds to the "-c" - ## option of the ping command. - # count = 1 - - ## Time to wait between sending ping packets in seconds. Operates like the - ## "-i" option of the ping command. - # ping_interval = 1.0 - - ## If set, the time to wait for a ping response in seconds. Operates like - ## the "-W" option of the ping command. - # timeout = 1.0 - - ## If set, the total ping deadline, in seconds. Operates like the -w option - ## of the ping command. - # deadline = 10 - - ## Interface or source address to send ping from. Operates like the -I or -S - ## option of the ping command. - # interface = "" - - ## Specify the ping executable binary. - # binary = "ping" - - ## Arguments for ping command. When arguments is not empty, the command from - ## the binary option will be used and other options (ping_interval, timeout, - ## etc) will be ignored. - # arguments = ["-c", "3"] +type roundTripTimeStats struct { + min float64 + avg float64 + max float64 + stddev float64 +} - ## Use only IPv6 addresses when resolving a hostname. - # ipv6 = false -` +type stats struct { + trans int + recv int + ttl int + roundTripTimeStats +} func (*Ping) SampleConfig() string { return sampleConfig } func (p *Ping) Gather(acc telegraf.Accumulator) error { - if p.Interface != "" && p.listenAddr == "" { - p.listenAddr = getAddr(p.Interface) - } - for _, host := range p.Urls { p.wg.Add(1) go func(host string) { @@ -148,268 +127,158 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error { return nil } -func getAddr(iface string) string { - if addr := net.ParseIP(iface); addr != nil { - return addr.String() - } +type pingStats struct { + ping.Statistics + ttl int +} - ifaces, err := net.Interfaces() - if err != nil { - return "" - } +type NativePingFunc func(destination string) (*pingStats, error) - var ip net.IP - for i := range ifaces { - if ifaces[i].Name == iface { - addrs, err := ifaces[i].Addrs() - if err != nil { - return "" - } - if len(addrs) > 0 { - switch v := addrs[0].(type) { - case *net.IPNet: - ip = v.IP - case *net.IPAddr: - ip = v.IP - } - if len(ip) == 0 { - return "" - } - return ip.String() - } - } +func (p *Ping) nativePing(destination string) (*pingStats, error) { + ps := &pingStats{} + + pinger, err := ping.NewPinger(destination) + if err != nil { + return nil, fmt.Errorf("failed to create new pinger: %w", err) } - return "" -} + pinger.SetPrivileged(true) -func hostPinger(binary string, timeout float64, args ...string) (string, error) { - bin, err := exec.LookPath(binary) - if err != nil { - return "", err + if p.IPv6 { + pinger.SetNetwork("ip6") } - c := exec.Command(bin, args...) - out, err := internal.CombinedOutputTimeout(c, - time.Second*time.Duration(timeout+5)) - return string(out), err -} -func filterIPs(addrs []net.IPAddr, filterFunc IsCorrectNetwork) []net.IPAddr { - n := 0 - for _, x := range addrs { - if filterFunc(x) { - addrs[n] = x - n++ + if p.Method == "native" { + pinger.Size = defaultPingDataBytesSize + if p.Size != nil { + pinger.Size = *p.Size } } - return addrs[:n] -} -func hostResolver(ctx context.Context, ipv6 bool, destination string) (*net.IPAddr, error) { - resolver := &net.Resolver{} - ips, err := resolver.LookupIPAddr(ctx, destination) + pinger.Source = p.sourceAddress + pinger.Interval = p.calcInterval - if err != nil { - return nil, err + if p.Deadline > 0 { + pinger.Timeout = time.Duration(p.Deadline) * time.Second } - if ipv6 { - ips = filterIPs(ips, isV6) - } else { - ips = filterIPs(ips, isV4) + // Get Time to live (TTL) of first response, matching original implementation + once := &sync.Once{} + pinger.OnRecv = func(pkt *ping.Packet) { + once.Do(func() { + ps.ttl = pkt.Ttl + }) } - if len(ips) == 0 { - return nil, errors.New("Cannot resolve ip address") + pinger.Count = p.Count + err = pinger.Run() + if err != nil { + if strings.Contains(err.Error(), "operation not permitted") { + if runtime.GOOS == "linux" { + return nil, fmt.Errorf("permission changes required, enable CAP_NET_RAW capabilities (refer to the ping plugin's README.md for more info)") + } + + return nil, fmt.Errorf("permission changes required, refer to the ping plugin's README.md for more info") + } + return nil, fmt.Errorf("%w", err) } - return &ips[0], err -} -func isV4(ip net.IPAddr) bool { - return ip.IP.To4() != nil -} + ps.Statistics = *pinger.Statistics() -func isV6(ip net.IPAddr) bool { - return !isV4(ip) + return ps, nil } func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { - ctx := context.Background() - interval := p.PingInterval - if interval < 0.2 { - interval = 0.2 - } + tags := map[string]string{"url": destination} + fields := map[string]interface{}{} - timeout := p.Timeout - if timeout == 0 { - timeout = 5 + stats, err := p.nativePingFunc(destination) + if err != nil { + p.Log.Errorf("ping failed: %s", err.Error()) + if strings.Contains(err.Error(), "unknown") { + fields["result_code"] = 1 + } else { + fields["result_code"] = 2 + } + acc.AddFields("ping", fields, tags) + return } - tick := time.NewTicker(time.Duration(interval * float64(time.Second))) - defer tick.Stop() - - if p.Deadline > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, time.Duration(p.Deadline)*time.Second) - defer cancel() + fields = map[string]interface{}{ + "result_code": 0, + "packets_transmitted": stats.PacketsSent, + "packets_received": stats.PacketsRecv, } - host, err := p.resolveHost(ctx, p.IPv6, destination) - if err != nil { - acc.AddFields( - "ping", - map[string]interface{}{"result_code": 1}, - map[string]string{"url": destination}, - ) - acc.AddError(err) + if stats.PacketsSent == 0 { + p.Log.Debug("no packets sent") + fields["result_code"] = 2 + acc.AddFields("ping", fields, tags) return } - resps := make(chan *ping.Response) - rsps := []*ping.Response{} - - r := &sync.WaitGroup{} - r.Add(1) - go func() { - for res := range resps { - rsps = append(rsps, res) - } - r.Done() - }() - - wg := &sync.WaitGroup{} - c := ping.Client{} - - var doErr error - var packetsSent int - - type sentReq struct { - err error - sent bool + if stats.PacketsRecv == 0 { + p.Log.Debug("no packets received") + fields["result_code"] = 1 + fields["percent_packet_loss"] = float64(100) + acc.AddFields("ping", fields, tags) + return } - sents := make(chan sentReq) - r.Add(1) - go func() { - for sent := range sents { - if sent.err != nil { - doErr = sent.err - } - if sent.sent { - packetsSent++ - } - } - r.Done() - }() - - for i := 0; i < p.Count; i++ { - select { - case <-ctx.Done(): - goto finish - case <-tick.C: - ctx, cancel := context.WithTimeout(ctx, time.Duration(timeout*float64(time.Second))) - defer cancel() - - wg.Add(1) - go func(seq int) { - defer wg.Done() - resp, err := c.Do(ctx, &ping.Request{ - Dst: net.ParseIP(host.String()), - Src: net.ParseIP(p.listenAddr), - Seq: seq, - }) - - sent := sentReq{err: err, sent: true} - if err != nil { - if strings.Contains(err.Error(), "not permitted") { - sent.sent = false - } - sents <- sent - return - } - - resps <- resp - sents <- sent - }(i + 1) - } + sort.Sort(durationSlice(stats.Rtts)) + for _, perc := range p.Percentiles { + var value = percentile(stats.Rtts, perc) + var field = fmt.Sprintf("percentile%v_ms", perc) + fields[field] = float64(value.Nanoseconds()) / float64(time.Millisecond) } -finish: - wg.Wait() - close(resps) - close(sents) - - r.Wait() - - if doErr != nil && strings.Contains(doErr.Error(), "not permitted") { - log.Printf("D! [inputs.ping] %s", doErr.Error()) + // Set TTL only on supported platform. See golang.org/x/net/ipv4/payload_cmsg.go + switch runtime.GOOS { + case "aix", "darwin", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": + fields["ttl"] = stats.ttl } - tags, fields := onFin(packetsSent, rsps, doErr, destination) + //nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40 + fields["percent_packet_loss"] = float64(stats.PacketLoss) + fields["minimum_response_ms"] = float64(stats.MinRtt) / float64(time.Millisecond) + fields["average_response_ms"] = float64(stats.AvgRtt) / float64(time.Millisecond) + fields["maximum_response_ms"] = float64(stats.MaxRtt) / float64(time.Millisecond) + fields["standard_deviation_ms"] = float64(stats.StdDevRtt) / float64(time.Millisecond) + acc.AddFields("ping", fields, tags) } -func onFin(packetsSent int, resps []*ping.Response, err error, destination string) (map[string]string, map[string]interface{}) { - packetsRcvd := len(resps) +type durationSlice []time.Duration - tags := map[string]string{"url": destination} - fields := map[string]interface{}{ - "result_code": 0, - "packets_transmitted": packetsSent, - "packets_received": packetsRcvd, - } +func (p durationSlice) Len() int { return len(p) } +func (p durationSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p durationSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - if packetsSent == 0 { - if err != nil { - fields["result_code"] = 2 - } - return tags, fields +// R7 from Hyndman and Fan (1996), which matches Excel +func percentile(values durationSlice, perc int) time.Duration { + if len(values) == 0 { + return 0 } - - if packetsRcvd == 0 { - if err != nil { - fields["result_code"] = 1 - } - fields["percent_packet_loss"] = float64(100) - return tags, fields + if perc < 0 { + perc = 0 } - - fields["percent_packet_loss"] = float64(packetsSent-packetsRcvd) / float64(packetsSent) * 100 - ttl := resps[0].TTL - - var min, max, avg, total time.Duration - min = resps[0].RTT - max = resps[0].RTT - - for _, res := range resps { - if res.RTT < min { - min = res.RTT - } - if res.RTT > max { - max = res.RTT - } - total += res.RTT + if perc > 100 { + perc = 100 } + var percFloat = float64(perc) / 100.0 - avg = total / time.Duration(packetsRcvd) - var sumsquares time.Duration - for _, res := range resps { - sumsquares += (res.RTT - avg) * (res.RTT - avg) - } - stdDev := time.Duration(math.Sqrt(float64(sumsquares / time.Duration(packetsRcvd)))) + var count = len(values) + var rank = percFloat * float64(count-1) + var rankInteger = int(rank) + var rankFraction = rank - math.Floor(rank) - // Set TTL only on supported platform. See golang.org/x/net/ipv4/payload_cmsg.go - switch runtime.GOOS { - case "aix", "darwin", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": - fields["ttl"] = ttl + if rankInteger >= count-1 { + return values[count-1] } - fields["minimum_response_ms"] = float64(min.Nanoseconds()) / float64(time.Millisecond) - fields["average_response_ms"] = float64(avg.Nanoseconds()) / float64(time.Millisecond) - fields["maximum_response_ms"] = float64(max.Nanoseconds()) / float64(time.Millisecond) - fields["standard_deviation_ms"] = float64(stdDev.Nanoseconds()) / float64(time.Millisecond) - - return tags, fields + upper := values[rankInteger+1] + lower := values[rankInteger] + return lower + time.Duration(rankFraction*float64(upper-lower)) } // Init ensures the plugin is configured correctly. @@ -418,14 +287,55 @@ func (p *Ping) Init() error { return errors.New("bad number of packets to transmit") } + // The interval cannot be below 0.2 seconds, matching ping implementation: https://linux.die.net/man/8/ping + if p.PingInterval < 0.2 { + p.calcInterval = time.Duration(.2 * float64(time.Second)) + } else { + p.calcInterval = time.Duration(p.PingInterval * float64(time.Second)) + } + + // If no timeout is given default to 5 seconds, matching original implementation + if p.Timeout == 0 { + p.calcTimeout = time.Duration(5) * time.Second + } else { + p.calcTimeout = time.Duration(p.Timeout) * time.Second + } + + // Support either an IP address or interface name + if p.Interface != "" { + if addr := net.ParseIP(p.Interface); addr != nil { + p.sourceAddress = p.Interface + } else { + i, err := net.InterfaceByName(p.Interface) + if err != nil { + return fmt.Errorf("failed to get interface: %w", err) + } + addrs, err := i.Addrs() + if err != nil { + return fmt.Errorf("failed to get the address of interface: %w", err) + } + p.sourceAddress = addrs[0].(*net.IPNet).IP.String() + } + } + return nil } +func hostPinger(binary string, timeout float64, args ...string) (string, error) { + bin, err := exec.LookPath(binary) + if err != nil { + return "", err + } + c := exec.Command(bin, args...) + out, err := internal.CombinedOutputTimeout(c, + time.Second*time.Duration(timeout+5)) + return string(out), err +} + func init() { inputs.Add("ping", func() telegraf.Input { - return &Ping{ + p := &Ping{ pingHost: hostPinger, - resolveHost: hostResolver, PingInterval: 1.0, Count: 1, Timeout: 1.0, @@ -433,6 +343,9 @@ func init() { Method: "exec", Binary: "ping", Arguments: []string{}, + Percentiles: []int{}, } + p.nativePingFunc = p.nativePing + return p }) } diff --git a/plugins/inputs/ping/ping_notwindows.go b/plugins/inputs/ping/ping_notwindows.go index a014a8237e8e7..c09c4a3fcd359 100644 --- a/plugins/inputs/ping/ping_notwindows.go +++ b/plugins/inputs/ping/ping_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package ping @@ -56,7 +57,7 @@ func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) { return } } - trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(out) + stats, err := processPingOutput(out) if err != nil { // fatal error acc.AddError(fmt.Errorf("%s: %s", err, u)) @@ -66,25 +67,25 @@ func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) { } // Calculate packet loss percentage - loss := float64(trans-rec) / float64(trans) * 100.0 + loss := float64(stats.trans-stats.recv) / float64(stats.trans) * 100.0 - fields["packets_transmitted"] = trans - fields["packets_received"] = rec + fields["packets_transmitted"] = stats.trans + fields["packets_received"] = stats.recv fields["percent_packet_loss"] = loss - if ttl >= 0 { - fields["ttl"] = ttl + if stats.ttl >= 0 { + fields["ttl"] = stats.ttl } - if min >= 0 { - fields["minimum_response_ms"] = min + if stats.min >= 0 { + fields["minimum_response_ms"] = stats.min } - if avg >= 0 { - fields["average_response_ms"] = avg + if stats.avg >= 0 { + fields["average_response_ms"] = stats.avg } - if max >= 0 { - fields["maximum_response_ms"] = max + if stats.max >= 0 { + fields["maximum_response_ms"] = stats.max } - if stddev >= 0 { - fields["standard_deviation_ms"] = stddev + if stats.stddev >= 0 { + fields["standard_deviation_ms"] = stats.stddev } acc.AddFields("ping", fields, tags) } @@ -164,36 +165,47 @@ func (p *Ping) args(url string, system string) []string { // round-trip min/avg/max/stddev = 34.843/43.508/52.172/8.664 ms // // It returns (, , ) -func processPingOutput(out string) (int, int, int, float64, float64, float64, float64, error) { - var trans, recv, ttl int = 0, 0, -1 - var min, avg, max, stddev float64 = -1.0, -1.0, -1.0, -1.0 +func processPingOutput(out string) (stats, error) { + stats := stats{ + trans: 0, + recv: 0, + ttl: -1, + roundTripTimeStats: roundTripTimeStats{ + min: -1.0, + avg: -1.0, + max: -1.0, + stddev: -1.0, + }, + } + // Set this error to nil if we find a 'transmitted' line - err := errors.New("Fatal error processing ping output") + err := errors.New("fatal error processing ping output") lines := strings.Split(out, "\n") for _, line := range lines { // Reading only first TTL, ignoring other TTL messages - if ttl == -1 && (strings.Contains(line, "ttl=") || strings.Contains(line, "hlim=")) { - ttl, err = getTTL(line) - } else if strings.Contains(line, "transmitted") && - strings.Contains(line, "received") { - trans, recv, err = getPacketStats(line, trans, recv) + if stats.ttl == -1 && (strings.Contains(line, "ttl=") || strings.Contains(line, "hlim=")) { + stats.ttl, err = getTTL(line) + } else if strings.Contains(line, "transmitted") && strings.Contains(line, "received") { + stats.trans, stats.recv, err = getPacketStats(line) if err != nil { - return trans, recv, ttl, min, avg, max, stddev, err + return stats, err } } else if strings.Contains(line, "min/avg/max") { - min, avg, max, stddev, err = checkRoundTripTimeStats(line, min, avg, max, stddev) + stats.roundTripTimeStats, err = checkRoundTripTimeStats(line) if err != nil { - return trans, recv, ttl, min, avg, max, stddev, err + return stats, err } } } - return trans, recv, ttl, min, avg, max, stddev, err + return stats, err } -func getPacketStats(line string, trans, recv int) (int, int, error) { +func getPacketStats(line string) (trans int, recv int, err error) { + trans, recv = 0, 0 + stats := strings.Split(line, ", ") // Transmitted packets - trans, err := strconv.Atoi(strings.Split(stats[0], " ")[0]) + trans, err = strconv.Atoi(strings.Split(stats[0], " ")[0]) if err != nil { return trans, recv, err } @@ -208,28 +220,35 @@ func getTTL(line string) (int, error) { return strconv.Atoi(ttlMatch[2]) } -func checkRoundTripTimeStats(line string, min, avg, max, - stddev float64) (float64, float64, float64, float64, error) { +func checkRoundTripTimeStats(line string) (roundTripTimeStats, error) { + roundTripTimeStats := roundTripTimeStats{ + min: -1.0, + avg: -1.0, + max: -1.0, + stddev: -1.0, + } + stats := strings.Split(line, " ")[3] data := strings.Split(stats, "/") - min, err := strconv.ParseFloat(data[0], 64) + var err error + roundTripTimeStats.min, err = strconv.ParseFloat(data[0], 64) if err != nil { - return min, avg, max, stddev, err + return roundTripTimeStats, err } - avg, err = strconv.ParseFloat(data[1], 64) + roundTripTimeStats.avg, err = strconv.ParseFloat(data[1], 64) if err != nil { - return min, avg, max, stddev, err + return roundTripTimeStats, err } - max, err = strconv.ParseFloat(data[2], 64) + roundTripTimeStats.max, err = strconv.ParseFloat(data[2], 64) if err != nil { - return min, avg, max, stddev, err + return roundTripTimeStats, err } if len(data) == 4 { - stddev, err = strconv.ParseFloat(data[3], 64) + roundTripTimeStats.stddev, err = strconv.ParseFloat(data[3], 64) if err != nil { - return min, avg, max, stddev, err + return roundTripTimeStats, err } } - return min, avg, max, stddev, err + return roundTripTimeStats, err } diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 0c8cfb0939daa..94a65075e651a 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -1,18 +1,21 @@ +//go:build !windows // +build !windows package ping import ( - "context" "errors" - "net" + "fmt" "reflect" "sort" "testing" + "time" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/go-ping/ping" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/testutil" ) // BSD/Darwin ping output @@ -77,45 +80,45 @@ ping: -i interval too short: Operation not permitted // Test that ping command output is processed properly func TestProcessPingOutput(t *testing.T) { - trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(bsdPingOutput) - assert.NoError(t, err) - assert.Equal(t, 55, ttl, "ttl value is 55") - assert.Equal(t, 5, trans, "5 packets were transmitted") - assert.Equal(t, 5, rec, "5 packets were received") - assert.InDelta(t, 15.087, min, 0.001) - assert.InDelta(t, 20.224, avg, 0.001) - assert.InDelta(t, 27.263, max, 0.001) - assert.InDelta(t, 4.076, stddev, 0.001) - - trans, rec, ttl, min, avg, max, stddev, err = processPingOutput(freebsdPing6Output) - assert.NoError(t, err) - assert.Equal(t, 117, ttl, "ttl value is 117") - assert.Equal(t, 5, trans, "5 packets were transmitted") - assert.Equal(t, 5, rec, "5 packets were received") - assert.InDelta(t, 35.727, min, 0.001) - assert.InDelta(t, 53.211, avg, 0.001) - assert.InDelta(t, 93.870, max, 0.001) - assert.InDelta(t, 22.000, stddev, 0.001) - - trans, rec, ttl, min, avg, max, stddev, err = processPingOutput(linuxPingOutput) - assert.NoError(t, err) - assert.Equal(t, 63, ttl, "ttl value is 63") - assert.Equal(t, 5, trans, "5 packets were transmitted") - assert.Equal(t, 5, rec, "5 packets were received") - assert.InDelta(t, 35.225, min, 0.001) - assert.InDelta(t, 43.628, avg, 0.001) - assert.InDelta(t, 51.806, max, 0.001) - assert.InDelta(t, 5.325, stddev, 0.001) - - trans, rec, ttl, min, avg, max, stddev, err = processPingOutput(busyBoxPingOutput) - assert.NoError(t, err) - assert.Equal(t, 56, ttl, "ttl value is 56") - assert.Equal(t, 4, trans, "4 packets were transmitted") - assert.Equal(t, 4, rec, "4 packets were received") - assert.InDelta(t, 15.810, min, 0.001) - assert.InDelta(t, 17.611, avg, 0.001) - assert.InDelta(t, 22.559, max, 0.001) - assert.InDelta(t, -1.0, stddev, 0.001) + stats, err := processPingOutput(bsdPingOutput) + require.NoError(t, err) + require.Equal(t, 55, stats.ttl, "ttl value is 55") + require.Equal(t, 5, stats.trans, "5 packets were transmitted") + require.Equal(t, 5, stats.recv, "5 packets were received") + require.InDelta(t, 15.087, stats.min, 0.001) + require.InDelta(t, 20.224, stats.avg, 0.001) + require.InDelta(t, 27.263, stats.max, 0.001) + require.InDelta(t, 4.076, stats.stddev, 0.001) + + stats, err = processPingOutput(freebsdPing6Output) + require.NoError(t, err) + require.Equal(t, 117, stats.ttl, "ttl value is 117") + require.Equal(t, 5, stats.trans, "5 packets were transmitted") + require.Equal(t, 5, stats.recv, "5 packets were received") + require.InDelta(t, 35.727, stats.min, 0.001) + require.InDelta(t, 53.211, stats.avg, 0.001) + require.InDelta(t, 93.870, stats.max, 0.001) + require.InDelta(t, 22.000, stats.stddev, 0.001) + + stats, err = processPingOutput(linuxPingOutput) + require.NoError(t, err) + require.Equal(t, 63, stats.ttl, "ttl value is 63") + require.Equal(t, 5, stats.trans, "5 packets were transmitted") + require.Equal(t, 5, stats.recv, "5 packets were received") + require.InDelta(t, 35.225, stats.min, 0.001) + require.InDelta(t, 43.628, stats.avg, 0.001) + require.InDelta(t, 51.806, stats.max, 0.001) + require.InDelta(t, 5.325, stats.stddev, 0.001) + + stats, err = processPingOutput(busyBoxPingOutput) + require.NoError(t, err) + require.Equal(t, 56, stats.ttl, "ttl value is 56") + require.Equal(t, 4, stats.trans, "4 packets were transmitted") + require.Equal(t, 4, stats.recv, "4 packets were received") + require.InDelta(t, 15.810, stats.min, 0.001) + require.InDelta(t, 17.611, stats.avg, 0.001) + require.InDelta(t, 22.559, stats.max, 0.001) + require.InDelta(t, -1.0, stats.stddev, 0.001) } // Linux ping output with varying TTL @@ -134,22 +137,22 @@ rtt min/avg/max/mdev = 35.225/43.628/51.806/5.325 ms // Test that ping command output is processed properly func TestProcessPingOutputWithVaryingTTL(t *testing.T) { - trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(linuxPingOutputWithVaryingTTL) - assert.NoError(t, err) - assert.Equal(t, 63, ttl, "ttl value is 63") - assert.Equal(t, 5, trans, "5 packets were transmitted") - assert.Equal(t, 5, rec, "5 packets were transmitted") - assert.InDelta(t, 35.225, min, 0.001) - assert.InDelta(t, 43.628, avg, 0.001) - assert.InDelta(t, 51.806, max, 0.001) - assert.InDelta(t, 5.325, stddev, 0.001) + stats, err := processPingOutput(linuxPingOutputWithVaryingTTL) + require.NoError(t, err) + require.Equal(t, 63, stats.ttl, "ttl value is 63") + require.Equal(t, 5, stats.trans, "5 packets were transmitted") + require.Equal(t, 5, stats.recv, "5 packets were transmitted") + require.InDelta(t, 35.225, stats.min, 0.001) + require.InDelta(t, 43.628, stats.avg, 0.001) + require.InDelta(t, 51.806, stats.max, 0.001) + require.InDelta(t, 5.325, stats.stddev, 0.001) } // Test that processPingOutput returns an error when 'ping' fails to run, such // as when an invalid argument is provided func TestErrorProcessPingOutput(t *testing.T) { - _, _, _, _, _, _, _, err := processPingOutput(fatalPingOutput) - assert.Error(t, err, "Error was expected from processPingOutput") + _, err := processPingOutput(fatalPingOutput) + require.Error(t, err, "Error was expected from processPingOutput") } // Test that default arg lists are created correctly @@ -227,7 +230,7 @@ func TestArguments(t *testing.T) { } } -func mockHostPinger(binary string, timeout float64, args ...string) (string, error) { +func mockHostPinger(_ string, _ float64, _ ...string) (string, error) { return linuxPingOutput, nil } @@ -239,7 +242,7 @@ func TestPingGather(t *testing.T) { pingHost: mockHostPinger, } - acc.GatherError(p.Gather) + require.NoError(t, acc.GatherError(p.Gather)) tags := map[string]string{"url": "localhost"} fields := map[string]interface{}{ "packets_transmitted": 5, @@ -258,6 +261,22 @@ func TestPingGather(t *testing.T) { acc.AssertContainsTaggedFields(t, "ping", fields, tags) } +func TestPingGatherIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode, retrieves systems ping utility") + } + + var acc testutil.Accumulator + p, ok := inputs.Inputs["ping"]().(*Ping) + p.Log = testutil.Logger{} + require.True(t, ok) + p.Urls = []string{"localhost", "influxdata.com"} + require.NoError(t, acc.GatherError(p.Gather)) + + require.Equal(t, 0, acc.Metrics[0].Fields["result_code"]) + require.Equal(t, 0, acc.Metrics[1].Fields["result_code"]) +} + var lossyPingOutput = ` PING www.google.com (216.58.218.164) 56(84) bytes of data. 64 bytes from host.net (216.58.218.164): icmp_seq=1 ttl=63 time=35.2 ms @@ -269,7 +288,7 @@ PING www.google.com (216.58.218.164) 56(84) bytes of data. rtt min/avg/max/mdev = 35.225/44.033/51.806/5.325 ms ` -func mockLossyHostPinger(binary string, timeout float64, args ...string) (string, error) { +func mockLossyHostPinger(_ string, _ float64, _ ...string) (string, error) { return lossyPingOutput, nil } @@ -281,7 +300,7 @@ func TestLossyPingGather(t *testing.T) { pingHost: mockLossyHostPinger, } - acc.GatherError(p.Gather) + require.NoError(t, acc.GatherError(p.Gather)) tags := map[string]string{"url": "www.google.com"} fields := map[string]interface{}{ "packets_transmitted": 5, @@ -305,7 +324,7 @@ Request timeout for icmp_seq 0 2 packets transmitted, 0 packets received, 100.0% packet loss ` -func mockErrorHostPinger(binary string, timeout float64, args ...string) (string, error) { +func mockErrorHostPinger(_ string, _ float64, _ ...string) (string, error) { // This error will not trigger correct error paths return errorPingOutput, nil } @@ -319,7 +338,7 @@ func TestBadPingGather(t *testing.T) { pingHost: mockErrorHostPinger, } - acc.GatherError(p.Gather) + require.NoError(t, acc.GatherError(p.Gather)) tags := map[string]string{"url": "www.amazon.com"} fields := map[string]interface{}{ "packets_transmitted": 2, @@ -330,8 +349,8 @@ func TestBadPingGather(t *testing.T) { acc.AssertContainsTaggedFields(t, "ping", fields, tags) } -func mockFatalHostPinger(binary string, timeout float64, args ...string) (string, error) { - return fatalPingOutput, errors.New("So very bad") +func mockFatalHostPinger(_ string, _ float64, _ ...string) (string, error) { + return fatalPingOutput, errors.New("so very bad") } // Test that a fatal ping command does not gather any statistics. @@ -342,20 +361,22 @@ func TestFatalPingGather(t *testing.T) { pingHost: mockFatalHostPinger, } - acc.GatherError(p.Gather) - assert.False(t, acc.HasMeasurement("packets_transmitted"), + err := acc.GatherError(p.Gather) + require.Error(t, err) + require.EqualValues(t, err.Error(), "host www.amazon.com: ping: -i interval too short: Operation not permitted, so very bad") + require.False(t, acc.HasMeasurement("packets_transmitted"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasMeasurement("packets_received"), + require.False(t, acc.HasMeasurement("packets_received"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasMeasurement("percent_packet_loss"), + require.False(t, acc.HasMeasurement("percent_packet_loss"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasMeasurement("ttl"), + require.False(t, acc.HasMeasurement("ttl"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasMeasurement("minimum_response_ms"), + require.False(t, acc.HasMeasurement("minimum_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasMeasurement("average_response_ms"), + require.False(t, acc.HasMeasurement("average_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasMeasurement("maximum_response_ms"), + require.False(t, acc.HasMeasurement("maximum_response_ms"), "Fatal ping should not have packet measurements") } @@ -364,8 +385,8 @@ func TestErrorWithHostNamePingGather(t *testing.T) { out string error error }{ - {"", errors.New("host www.amazon.com: So very bad")}, - {"so bad", errors.New("host www.amazon.com: so bad, So very bad")}, + {"", errors.New("host www.amazon.com: so very bad")}, + {"so bad", errors.New("host www.amazon.com: so bad, so very bad")}, } for _, param := range params { @@ -373,12 +394,12 @@ func TestErrorWithHostNamePingGather(t *testing.T) { p := Ping{ Urls: []string{"www.amazon.com"}, pingHost: func(binary string, timeout float64, args ...string) (string, error) { - return param.out, errors.New("So very bad") + return param.out, errors.New("so very bad") }, } - acc.GatherError(p.Gather) - assert.True(t, len(acc.Errors) > 0) - assert.Contains(t, acc.Errors, param.error) + require.Error(t, acc.GatherError(p.Gather)) + require.True(t, len(acc.Errors) > 0) + require.Contains(t, acc.Errors, param.error) } } @@ -388,56 +409,128 @@ func TestPingBinary(t *testing.T) { Urls: []string{"www.google.com"}, Binary: "ping6", pingHost: func(binary string, timeout float64, args ...string) (string, error) { - assert.True(t, binary == "ping6") + require.True(t, binary == "ping6") return "", nil }, } - acc.GatherError(p.Gather) -} - -func mockHostResolver(ctx context.Context, ipv6 bool, host string) (*net.IPAddr, error) { - ipaddr := net.IPAddr{} - ipaddr.IP = net.IPv4(127, 0, 0, 1) - return &ipaddr, nil + err := acc.GatherError(p.Gather) + require.Error(t, err) + require.EqualValues(t, err.Error(), "fatal error processing ping output: www.google.com") } // Test that Gather function works using native ping func TestPingGatherNative(t *testing.T) { - if testing.Short() { - t.Skip("Skipping test due to permission requirements.") + type test struct { + P *Ping } - var acc testutil.Accumulator - p := Ping{ + fakePingFunc := func(destination string) (*pingStats, error) { + s := &pingStats{ + Statistics: ping.Statistics{ + PacketsSent: 5, + PacketsRecv: 5, + Rtts: []time.Duration{ + 3 * time.Millisecond, + 4 * time.Millisecond, + 1 * time.Millisecond, + 5 * time.Millisecond, + 2 * time.Millisecond, + }, + }, + ttl: 1, + } + + return s, nil + } + + tests := []test{ + { + P: &Ping{ + Urls: []string{"localhost", "127.0.0.2"}, + Method: "native", + Count: 5, + Percentiles: []int{50, 95, 99}, + nativePingFunc: fakePingFunc, + }, + }, + { + P: &Ping{ + Urls: []string{"localhost", "127.0.0.2"}, + Method: "native", + Count: 5, + PingInterval: 1, + Percentiles: []int{50, 95, 99}, + nativePingFunc: fakePingFunc, + }, + }, + } + + for _, tc := range tests { + var acc testutil.Accumulator + require.NoError(t, tc.P.Init()) + require.NoError(t, acc.GatherError(tc.P.Gather)) + require.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_transmitted", 5)) + require.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_received", 5)) + require.True(t, acc.HasField("ping", "percentile50_ms")) + require.Equal(t, float64(3), acc.Metrics[0].Fields["percentile50_ms"]) + require.True(t, acc.HasField("ping", "percentile95_ms")) + require.Equal(t, float64(4.799999), acc.Metrics[0].Fields["percentile95_ms"]) + require.True(t, acc.HasField("ping", "percentile99_ms")) + require.Equal(t, float64(4.96), acc.Metrics[0].Fields["percentile99_ms"]) + require.True(t, acc.HasField("ping", "percent_packet_loss")) + require.True(t, acc.HasField("ping", "minimum_response_ms")) + require.True(t, acc.HasField("ping", "average_response_ms")) + require.True(t, acc.HasField("ping", "maximum_response_ms")) + require.True(t, acc.HasField("ping", "standard_deviation_ms")) + } +} + +func TestNoPacketsSent(t *testing.T) { + p := &Ping{ + Log: testutil.Logger{}, Urls: []string{"localhost", "127.0.0.2"}, Method: "native", Count: 5, - resolveHost: mockHostResolver, + Percentiles: []int{50, 95, 99}, + nativePingFunc: func(destination string) (*pingStats, error) { + s := &pingStats{ + Statistics: ping.Statistics{ + PacketsSent: 0, + PacketsRecv: 0, + }, + } + + return s, nil + }, } - assert.NoError(t, acc.GatherError(p.Gather)) - assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_transmitted", 5)) - assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_received", 5)) -} + var testAcc testutil.Accumulator + require.NoError(t, p.Init()) -func mockHostResolverError(ctx context.Context, ipv6 bool, host string) (*net.IPAddr, error) { - return nil, errors.New("myMock error") + p.pingToURLNative("localhost", &testAcc) + require.Zero(t, testAcc.Errors) + require.True(t, testAcc.HasField("ping", "result_code")) + require.Equal(t, 2, testAcc.Metrics[0].Fields["result_code"]) } // Test failed DNS resolutions func TestDNSLookupError(t *testing.T) { - if testing.Short() { - t.Skip("Skipping test due to permission requirements.") + p := &Ping{ + Count: 1, + Log: testutil.Logger{}, + Urls: []string{"localhost"}, + Method: "native", + IPv6: false, + nativePingFunc: func(destination string) (*pingStats, error) { + return nil, fmt.Errorf("unknown") + }, } - var acc testutil.Accumulator - p := Ping{ - Urls: []string{"localhost"}, - Method: "native", - IPv6: false, - resolveHost: mockHostResolverError, - } + var testAcc testutil.Accumulator + require.NoError(t, p.Init()) - acc.GatherError(p.Gather) - assert.True(t, len(acc.Errors) > 0) + p.pingToURLNative("localhost", &testAcc) + require.Zero(t, testAcc.Errors) + require.True(t, testAcc.HasField("ping", "result_code")) + require.Equal(t, 1, testAcc.Metrics[0].Fields["result_code"]) } diff --git a/plugins/inputs/ping/ping_windows.go b/plugins/inputs/ping/ping_windows.go index f53d6f09a7373..1d3d933e7736b 100644 --- a/plugins/inputs/ping/ping_windows.go +++ b/plugins/inputs/ping/ping_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package ping diff --git a/plugins/inputs/ping/ping_windows_test.go b/plugins/inputs/ping/ping_windows_test.go index 4618ec4db4942..77137b1700ef6 100644 --- a/plugins/inputs/ping/ping_windows_test.go +++ b/plugins/inputs/ping/ping_windows_test.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package ping @@ -7,9 +8,9 @@ import ( "reflect" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) // Windows ping format ( should support multilanguage ?) @@ -43,22 +44,22 @@ Approximate round trip times in milli-seconds: func TestHost(t *testing.T) { trans, recReply, recPacket, avg, min, max, err := processPingOutput(winPLPingOutput) - assert.NoError(t, err) - assert.Equal(t, 4, trans, "4 packets were transmitted") - assert.Equal(t, 4, recReply, "4 packets were reply") - assert.Equal(t, 4, recPacket, "4 packets were received") - assert.Equal(t, 50, avg, "Average 50") - assert.Equal(t, 46, min, "Min 46") - assert.Equal(t, 57, max, "max 57") + require.NoError(t, err) + require.Equal(t, 4, trans, "4 packets were transmitted") + require.Equal(t, 4, recReply, "4 packets were reply") + require.Equal(t, 4, recPacket, "4 packets were received") + require.Equal(t, 50, avg, "Average 50") + require.Equal(t, 46, min, "Min 46") + require.Equal(t, 57, max, "max 57") trans, recReply, recPacket, avg, min, max, err = processPingOutput(winENPingOutput) - assert.NoError(t, err) - assert.Equal(t, 4, trans, "4 packets were transmitted") - assert.Equal(t, 4, recReply, "4 packets were reply") - assert.Equal(t, 4, recPacket, "4 packets were received") - assert.Equal(t, 50, avg, "Average 50") - assert.Equal(t, 50, min, "Min 50") - assert.Equal(t, 52, max, "Max 52") + require.NoError(t, err) + require.Equal(t, 4, trans, "4 packets were transmitted") + require.Equal(t, 4, recReply, "4 packets were reply") + require.Equal(t, 4, recPacket, "4 packets were received") + require.Equal(t, 50, avg, "Average 50") + require.Equal(t, 50, min, "Min 50") + require.Equal(t, 52, max, "Max 52") } func mockHostPinger(binary string, timeout float64, args ...string) (string, error) { @@ -113,6 +114,7 @@ func mockErrorHostPinger(binary string, timeout float64, args ...string) (string func TestBadPingGather(t *testing.T) { var acc testutil.Accumulator p := Ping{ + Log: testutil.Logger{}, Urls: []string{"www.amazon.com"}, pingHost: mockErrorHostPinger, } @@ -133,6 +135,7 @@ func TestBadPingGather(t *testing.T) { func TestArguments(t *testing.T) { arguments := []string{"-c", "3"} p := Ping{ + Log: testutil.Logger{}, Count: 2, Timeout: 12.0, Arguments: arguments, @@ -169,6 +172,7 @@ func mockLossyHostPinger(binary string, timeout float64, args ...string) (string func TestLossyPingGather(t *testing.T) { var acc testutil.Accumulator p := Ping{ + Log: testutil.Logger{}, Urls: []string{"www.google.com"}, pingHost: mockLossyHostPinger, } @@ -229,26 +233,27 @@ func mockFatalHostPinger(binary string, timeout float64, args ...string) (string func TestFatalPingGather(t *testing.T) { var acc testutil.Accumulator p := Ping{ + Log: testutil.Logger{}, Urls: []string{"www.amazon.com"}, pingHost: mockFatalHostPinger, } acc.GatherError(p.Gather) - assert.True(t, acc.HasFloatField("ping", "errors"), + require.True(t, acc.HasFloatField("ping", "errors"), "Fatal ping should have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "packets_transmitted"), + require.False(t, acc.HasInt64Field("ping", "packets_transmitted"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "packets_received"), + require.False(t, acc.HasInt64Field("ping", "packets_received"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasFloatField("ping", "percent_packet_loss"), + require.False(t, acc.HasFloatField("ping", "percent_packet_loss"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasFloatField("ping", "percent_reply_loss"), + require.False(t, acc.HasFloatField("ping", "percent_reply_loss"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "average_response_ms"), + require.False(t, acc.HasInt64Field("ping", "average_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "maximum_response_ms"), + require.False(t, acc.HasInt64Field("ping", "maximum_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "minimum_response_ms"), + require.False(t, acc.HasInt64Field("ping", "minimum_response_ms"), "Fatal ping should not have packet measurements") } @@ -274,6 +279,7 @@ func mockUnreachableHostPinger(binary string, timeout float64, args ...string) ( func TestUnreachablePingGather(t *testing.T) { var acc testutil.Accumulator p := Ping{ + Log: testutil.Logger{}, Urls: []string{"www.google.com"}, pingHost: mockUnreachableHostPinger, } @@ -291,13 +297,13 @@ func TestUnreachablePingGather(t *testing.T) { } acc.AssertContainsTaggedFields(t, "ping", fields, tags) - assert.False(t, acc.HasFloatField("ping", "errors"), + require.False(t, acc.HasFloatField("ping", "errors"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "average_response_ms"), + require.False(t, acc.HasInt64Field("ping", "average_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "maximum_response_ms"), + require.False(t, acc.HasInt64Field("ping", "maximum_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "minimum_response_ms"), + require.False(t, acc.HasInt64Field("ping", "minimum_response_ms"), "Fatal ping should not have packet measurements") } @@ -321,6 +327,7 @@ func mockTTLExpiredPinger(binary string, timeout float64, args ...string) (strin func TestTTLExpiredPingGather(t *testing.T) { var acc testutil.Accumulator p := Ping{ + Log: testutil.Logger{}, Urls: []string{"www.google.com"}, pingHost: mockTTLExpiredPinger, } @@ -338,23 +345,24 @@ func TestTTLExpiredPingGather(t *testing.T) { } acc.AssertContainsTaggedFields(t, "ping", fields, tags) - assert.False(t, acc.HasFloatField("ping", "errors"), + require.False(t, acc.HasFloatField("ping", "errors"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "average_response_ms"), + require.False(t, acc.HasInt64Field("ping", "average_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "maximum_response_ms"), + require.False(t, acc.HasInt64Field("ping", "maximum_response_ms"), "Fatal ping should not have packet measurements") - assert.False(t, acc.HasInt64Field("ping", "minimum_response_ms"), + require.False(t, acc.HasInt64Field("ping", "minimum_response_ms"), "Fatal ping should not have packet measurements") } func TestPingBinary(t *testing.T) { var acc testutil.Accumulator p := Ping{ + Log: testutil.Logger{}, Urls: []string{"www.google.com"}, Binary: "ping6", pingHost: func(binary string, timeout float64, args ...string) (string, error) { - assert.True(t, binary == "ping6") + require.True(t, binary == "ping6") return "", nil }, } diff --git a/plugins/inputs/ping/sample.conf b/plugins/inputs/ping/sample.conf new file mode 100644 index 0000000000000..976d813b0b876 --- /dev/null +++ b/plugins/inputs/ping/sample.conf @@ -0,0 +1,51 @@ +# Ping given url(s) and return statistics +[[inputs.ping]] + ## Hosts to send ping packets to. + urls = ["example.org"] + + ## Method used for sending pings, can be either "exec" or "native". When set + ## to "exec" the systems ping command will be executed. When set to "native" + ## the plugin will send pings directly. + ## + ## While the default is "exec" for backwards compatibility, new deployments + ## are encouraged to use the "native" method for improved compatibility and + ## performance. + # method = "exec" + + ## Number of ping packets to send per interval. Corresponds to the "-c" + ## option of the ping command. + # count = 1 + + ## Time to wait between sending ping packets in seconds. Operates like the + ## "-i" option of the ping command. + # ping_interval = 1.0 + + ## If set, the time to wait for a ping response in seconds. Operates like + ## the "-W" option of the ping command. + # timeout = 1.0 + + ## If set, the total ping deadline, in seconds. Operates like the -w option + ## of the ping command. + # deadline = 10 + + ## Interface or source address to send ping from. Operates like the -I or -S + ## option of the ping command. + # interface = "" + + ## Percentiles to calculate. This only works with the native method. + # percentiles = [50, 95, 99] + + ## Specify the ping executable binary. + # binary = "ping" + + ## Arguments for ping command. When arguments is not empty, the command from + ## the binary option will be used and other options (ping_interval, timeout, + ## etc) will be ignored. + # arguments = ["-c", "3"] + + ## Use only IPv6 addresses when resolving a hostname. + # ipv6 = false + + ## Number of data bytes to be sent. Corresponds to the "-s" + ## option of the ping command. This only works with the native method. + # size = 56 diff --git a/plugins/inputs/postfix/README.md b/plugins/inputs/postfix/README.md index 2fdfacd9d193c..069bd74538629 100644 --- a/plugins/inputs/postfix/README.md +++ b/plugins/inputs/postfix/README.md @@ -3,20 +3,21 @@ The postfix plugin reports metrics on the postfix queues. For each of the active, hold, incoming, maildrop, and deferred queues -(http://www.postfix.org/QSHAPE_README.html#queues), it will report the queue +(), it will report the queue length (number of items), size (bytes used by items), and age (age of oldest item in seconds). -### Configuration +## Configuration -```toml +```toml @sample.conf +# Measure postfix queue statistics [[inputs.postfix]] ## Postfix queue directory. If not provided, telegraf will try to use ## 'postconf -h queue_directory' to determine it. # queue_directory = "/var/spool/postfix" ``` -#### Permissions +### Permissions Telegraf will need read access to the files in the queue directory. You may need to alter the permissions of these directories to provide access to the @@ -26,20 +27,22 @@ This can be setup either using standard unix permissions or with Posix ACLs, you will only need to use one method: Unix permissions: + ```sh -$ sudo chgrp -R telegraf /var/spool/postfix/{active,hold,incoming,deferred} -$ sudo chmod -R g+rXs /var/spool/postfix/{active,hold,incoming,deferred} -$ sudo usermod -a -G postdrop telegraf -$ sudo chmod g+r /var/spool/postfix/maildrop +sudo chgrp -R telegraf /var/spool/postfix/{active,hold,incoming,deferred} +sudo chmod -R g+rXs /var/spool/postfix/{active,hold,incoming,deferred} +sudo usermod -a -G postdrop telegraf +sudo chmod g+r /var/spool/postfix/maildrop ``` Posix ACL: + ```sh -$ sudo setfacl -Rm g:telegraf:rX /var/spool/postfix/ -$ sudo setfacl -dm g:telegraf:rX /var/spool/postfix/ +sudo setfacl -Rm g:telegraf:rX /var/spool/postfix/ +sudo setfacl -dm g:telegraf:rX /var/spool/postfix/ ``` -### Metrics +## Metrics - postfix_queue - tags: @@ -49,10 +52,9 @@ $ sudo setfacl -dm g:telegraf:rX /var/spool/postfix/ - size (integer, bytes) - age (integer, seconds) +## Example Output -### Example Output - -``` +```shell postfix_queue,queue=active length=3,size=12345,age=9 postfix_queue,queue=hold length=0,size=0,age=0 postfix_queue,queue=maildrop length=1,size=2000,age=2 diff --git a/plugins/inputs/postfix/postfix.go b/plugins/inputs/postfix/postfix.go index 8700362d0d63f..fd6c149b2f34f 100644 --- a/plugins/inputs/postfix/postfix.go +++ b/plugins/inputs/postfix/postfix.go @@ -1,6 +1,13 @@ +//go:generate ../../../tools/readme_config_includer/generator +//go:build !windows +// +build !windows + +// postfix doesn't aim for Windows + package postfix import ( + _ "embed" "fmt" "os" "os/exec" @@ -12,13 +19,9 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -const sampleConfig = ` - ## Postfix queue directory. If not provided, telegraf will try to use - ## 'postconf -h queue_directory' to determine it. - # queue_directory = "/var/spool/postfix" -` - -const description = "Measure postfix queue statistics" +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string func getQueueDirectory() (string, error) { qd, err := exec.Command("postconf", "-h", "queue_directory").Output() @@ -28,9 +31,10 @@ func getQueueDirectory() (string, error) { return strings.TrimSpace(string(qd)), nil } -func qScan(path string, acc telegraf.Accumulator) (int64, int64, int64, error) { +func qScan(path string, acc telegraf.Accumulator) (map[string]interface{}, error) { var length, size int64 var oldest time.Time + err := filepath.Walk(path, func(_ string, finfo os.FileInfo, err error) error { if err != nil { acc.AddError(fmt.Errorf("error scanning %s: %s", path, err)) @@ -52,23 +56,35 @@ func qScan(path string, acc telegraf.Accumulator) (int64, int64, int64, error) { } return nil }) + if err != nil { - return 0, 0, 0, err + return nil, err } + var age int64 if !oldest.IsZero() { - age = int64(time.Now().Sub(oldest) / time.Second) + age = int64(time.Since(oldest) / time.Second) } else if length != 0 { // system doesn't support ctime age = -1 } - return length, size, age, nil + + fields := map[string]interface{}{"length": length, "size": size} + if age != -1 { + fields["age"] = age + } + + return fields, nil } type Postfix struct { QueueDirectory string } +func (*Postfix) SampleConfig() string { + return sampleConfig +} + func (p *Postfix) Gather(acc telegraf.Accumulator) error { if p.QueueDirectory == "" { var err error @@ -79,29 +95,18 @@ func (p *Postfix) Gather(acc telegraf.Accumulator) error { } for _, q := range []string{"active", "hold", "incoming", "maildrop", "deferred"} { - length, size, age, err := qScan(filepath.Join(p.QueueDirectory, q), acc) + fields, err := qScan(filepath.Join(p.QueueDirectory, q), acc) if err != nil { acc.AddError(fmt.Errorf("error scanning queue %s: %s", q, err)) continue } - fields := map[string]interface{}{"length": length, "size": size} - if age != -1 { - fields["age"] = age - } + acc.AddFields("postfix_queue", fields, map[string]string{"queue": q}) } return nil } -func (p *Postfix) SampleConfig() string { - return sampleConfig -} - -func (p *Postfix) Description() string { - return description -} - func init() { inputs.Add("postfix", func() telegraf.Input { return &Postfix{ diff --git a/plugins/inputs/postfix/postfix_test.go b/plugins/inputs/postfix/postfix_test.go index 5dbc91d13e23f..5215fe291d633 100644 --- a/plugins/inputs/postfix/postfix_test.go +++ b/plugins/inputs/postfix/postfix_test.go @@ -1,31 +1,31 @@ +//go:build !windows +// +build !windows + package postfix import ( - "io/ioutil" "os" "path/filepath" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestGather(t *testing.T) { - td, err := ioutil.TempDir("", "") - require.NoError(t, err) - defer os.RemoveAll(td) + td := t.TempDir() for _, q := range []string{"active", "hold", "incoming", "maildrop", "deferred/0/0", "deferred/F/F"} { require.NoError(t, os.MkdirAll(filepath.FromSlash(td+"/"+q), 0755)) } - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/active/01"), []byte("abc"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/active/02"), []byte("defg"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/hold/01"), []byte("abc"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/incoming/01"), []byte("abcd"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/deferred/0/0/01"), []byte("abc"), 0644)) - require.NoError(t, ioutil.WriteFile(filepath.FromSlash(td+"/deferred/F/F/F1"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/active/01"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/active/02"), []byte("defg"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/hold/01"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/incoming/01"), []byte("abcd"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/deferred/0/0/01"), []byte("abc"), 0644)) + require.NoError(t, os.WriteFile(filepath.FromSlash(td+"/deferred/F/F/F1"), []byte("abc"), 0644)) p := Postfix{ QueueDirectory: td, @@ -39,20 +39,20 @@ func TestGather(t *testing.T) { metrics[m.Tags["queue"]] = m } - assert.Equal(t, int64(2), metrics["active"].Fields["length"]) - assert.Equal(t, int64(7), metrics["active"].Fields["size"]) - assert.InDelta(t, 0, metrics["active"].Fields["age"], 10) + require.Equal(t, int64(2), metrics["active"].Fields["length"]) + require.Equal(t, int64(7), metrics["active"].Fields["size"]) + require.InDelta(t, 0, metrics["active"].Fields["age"], 10) - assert.Equal(t, int64(1), metrics["hold"].Fields["length"]) - assert.Equal(t, int64(3), metrics["hold"].Fields["size"]) + require.Equal(t, int64(1), metrics["hold"].Fields["length"]) + require.Equal(t, int64(3), metrics["hold"].Fields["size"]) - assert.Equal(t, int64(1), metrics["incoming"].Fields["length"]) - assert.Equal(t, int64(4), metrics["incoming"].Fields["size"]) + require.Equal(t, int64(1), metrics["incoming"].Fields["length"]) + require.Equal(t, int64(4), metrics["incoming"].Fields["size"]) - assert.Equal(t, int64(0), metrics["maildrop"].Fields["length"]) - assert.Equal(t, int64(0), metrics["maildrop"].Fields["size"]) - assert.Equal(t, int64(0), metrics["maildrop"].Fields["age"]) + require.Equal(t, int64(0), metrics["maildrop"].Fields["length"]) + require.Equal(t, int64(0), metrics["maildrop"].Fields["size"]) + require.Equal(t, int64(0), metrics["maildrop"].Fields["age"]) - assert.Equal(t, int64(2), metrics["deferred"].Fields["length"]) - assert.Equal(t, int64(6), metrics["deferred"].Fields["size"]) + require.Equal(t, int64(2), metrics["deferred"].Fields["length"]) + require.Equal(t, int64(6), metrics["deferred"].Fields["size"]) } diff --git a/plugins/inputs/postfix/postfix_windows.go b/plugins/inputs/postfix/postfix_windows.go new file mode 100644 index 0000000000000..3a2c5e5cb3619 --- /dev/null +++ b/plugins/inputs/postfix/postfix_windows.go @@ -0,0 +1,4 @@ +//go:build windows +// +build windows + +package postfix diff --git a/plugins/inputs/postfix/sample.conf b/plugins/inputs/postfix/sample.conf new file mode 100644 index 0000000000000..72e4d1c186bab --- /dev/null +++ b/plugins/inputs/postfix/sample.conf @@ -0,0 +1,5 @@ +# Measure postfix queue statistics +[[inputs.postfix]] + ## Postfix queue directory. If not provided, telegraf will try to use + ## 'postconf -h queue_directory' to determine it. + # queue_directory = "/var/spool/postfix" diff --git a/plugins/inputs/postfix/stat_ctim.go b/plugins/inputs/postfix/stat_ctim.go index 456df5ffd4dd2..06ddccb178fce 100644 --- a/plugins/inputs/postfix/stat_ctim.go +++ b/plugins/inputs/postfix/stat_ctim.go @@ -1,3 +1,4 @@ +//go:build dragonfly || linux || netbsd || openbsd || solaris // +build dragonfly linux netbsd openbsd solaris package postfix diff --git a/plugins/inputs/postfix/stat_ctimespec.go b/plugins/inputs/postfix/stat_ctimespec.go index 40e0de6cc4a40..03f4e0a435f2c 100644 --- a/plugins/inputs/postfix/stat_ctimespec.go +++ b/plugins/inputs/postfix/stat_ctimespec.go @@ -1,3 +1,4 @@ +//go:build darwin || freebsd // +build darwin freebsd package postfix diff --git a/plugins/inputs/postfix/stat_none.go b/plugins/inputs/postfix/stat_none.go index d9b67b1663af8..c1ca6a41c662f 100644 --- a/plugins/inputs/postfix/stat_none.go +++ b/plugins/inputs/postfix/stat_none.go @@ -1,3 +1,4 @@ +//go:build !dragonfly && !linux && !netbsd && !openbsd && !solaris && !darwin && !freebsd // +build !dragonfly,!linux,!netbsd,!openbsd,!solaris,!darwin,!freebsd package postfix diff --git a/plugins/inputs/postgresql/README.md b/plugins/inputs/postgresql/README.md index 627fd2dbbfa88..940dedbba0337 100644 --- a/plugins/inputs/postgresql/README.md +++ b/plugins/inputs/postgresql/README.md @@ -1,7 +1,11 @@ # PostgreSQL Input Plugin -This postgresql plugin provides metrics for your postgres database. It currently works with postgres versions 8.1+. It uses data from the built in _pg_stat_database_ and pg_stat_bgwriter views. The metrics recorded depend on your version of postgres. See table: -``` +This postgresql plugin provides metrics for your postgres database. It currently +works with postgres versions 8.1+. It uses data from the built in +_pg_stat_database_ and pg_stat_bgwriter views. The metrics recorded depend on +your version of postgres. See table: + +```sh pg version 9.2+ 9.1 8.3-9.0 8.1-8.2 7.4-8.0(unsupported) --- --- --- ------- ------- ------- datid x x x x @@ -27,38 +31,88 @@ stats_reset* x x _* value ignored and therefore not recorded._ +More information about the meaning of these metrics can be found in the +[PostgreSQL Documentation][1]. -More information about the meaning of these metrics can be found in the [PostgreSQL Documentation](http://www.postgresql.org/docs/9.2/static/monitoring-stats.html#PG-STAT-DATABASE-VIEW) +[1]: http://www.postgresql.org/docs/9.2/static/monitoring-stats.html#PG-STAT-DATABASE-VIEW ## Configuration + +```toml @sample.conf +# Read metrics from one or many postgresql servers +[[inputs.postgresql]] + ## specify address via a url matching: + ## postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] + ## or a simple string: + ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production + ## + ## All connection parameters are optional. + ## + ## Without the dbname parameter, the driver will default to a database + ## with the same name as the user. This dbname is just for instantiating a + ## connection with the server and doesn't restrict the databases we are trying + ## to grab metrics for. + ## + address = "host=localhost user=postgres sslmode=disable" + ## A custom name for the database that will be used as the "server" tag in the + ## measurement output. If not specified, a default one generated from + ## the connection address is used. + # outputaddress = "db01" + + ## connection configuration. + ## maxlifetime - specify the maximum lifetime of a connection. + ## default is forever (0s) + # max_lifetime = "0s" + + ## A list of databases to explicitly ignore. If not specified, metrics for all + ## databases are gathered. Do NOT use with the 'databases' option. + # ignored_databases = ["postgres", "template0", "template1"] + + ## A list of databases to pull metrics about. If not specified, metrics for all + ## databases are gathered. Do NOT use with the 'ignored_databases' option. + # databases = ["app_production", "testing"] + + ## Whether to use prepared statements when connecting to the database. + ## This should be set to false when connecting through a PgBouncer instance + ## with pool_mode set to transaction. + prepared_statements = true +``` + Specify address via a postgresql connection string: - `host=localhost port=5432 user=telegraf database=telegraf` +```text +host=localhost port=5432 user=telegraf database=telegraf +``` Or via an url matching: - `postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=[disable|verify-ca|verify-full]` +```text +postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=[disable|verify-ca|verify-full] +``` -All connection parameters are optional. Without the dbname parameter, the driver will default to a database with the same name as the user. This dbname is just for instantiating a connection with the server and doesn't restrict the databases we are trying to grab metrics for. +All connection parameters are optional. Without the dbname parameter, the driver +will default to a database with the same name as the user. This dbname is just +for instantiating a connection with the server and doesn't restrict the +databases we are trying to grab metrics for. -A list of databases to explicitly ignore. If not specified, metrics for all databases are gathered. Do NOT use with the 'databases' option. +A list of databases to explicitly ignore. If not specified, metrics for all +databases are gathered. Do NOT use with the 'databases' option. - `ignored_databases = ["postgres", "template0", "template1"]` +```text +ignored_databases = ["postgres", "template0", "template1"]` +``` -A list of databases to pull metrics about. If not specified, metrics for all databases are gathered. Do NOT use with the 'ignored_databases' option. +A list of databases to pull metrics about. If not specified, metrics for all +databases are gathered. Do NOT use with the 'ignored_databases' option. - `databases = ["app_production", "testing"]` +```text +databases = ["app_production", "testing"]` +``` ### TLS Configuration Add the `sslkey`, `sslcert` and `sslrootcert` options to your DSN: -``` -host=localhost user=pgotest dbname=app_production sslmode=require sslkey=/etc/telegraf/key.pem sslcert=/etc/telegraf/cert.pem sslrootcert=/etc/telegraf/ca.pem -``` -### Configuration example -```toml -[[inputs.postgresql]] - address = "postgres://telegraf@localhost/someDB" - ignored_databases = ["template0", "template1"] +```shell +host=localhost user=pgotest dbname=app_production sslmode=require sslkey=/etc/telegraf/key.pem sslcert=/etc/telegraf/cert.pem sslrootcert=/etc/telegraf/ca.pem ``` diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index 0911b20ce7184..d99ee2db8aba6 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -1,72 +1,45 @@ +//go:generate ../../../tools/readme_config_includer/generator package postgresql import ( "bytes" + _ "embed" "fmt" "strings" - // register in driver. - _ "github.com/jackc/pgx/stdlib" + _ "github.com/jackc/pgx/v4/stdlib" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Postgresql struct { Service - Databases []string - IgnoredDatabases []string + Databases []string `toml:"databases"` + IgnoredDatabases []string `toml:"ignored_databases"` + PreparedStatements bool `toml:"prepared_statements"` } var ignoredColumns = map[string]bool{"stats_reset": true} -var sampleConfig = ` - ## specify address via a url matching: - ## postgres://[pqgotest[:password]]@localhost[/dbname]\ - ## ?sslmode=[disable|verify-ca|verify-full] - ## or a simple string: - ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production - ## - ## All connection parameters are optional. - ## - ## Without the dbname parameter, the driver will default to a database - ## with the same name as the user. This dbname is just for instantiating a - ## connection with the server and doesn't restrict the databases we are trying - ## to grab metrics for. - ## - address = "host=localhost user=postgres sslmode=disable" - ## A custom name for the database that will be used as the "server" tag in the - ## measurement output. If not specified, a default one generated from - ## the connection address is used. - # outputaddress = "db01" - - ## connection configuration. - ## maxlifetime - specify the maximum lifetime of a connection. - ## default is forever (0s) - max_lifetime = "0s" - - ## A list of databases to explicitly ignore. If not specified, metrics for all - ## databases are gathered. Do NOT use with the 'databases' option. - # ignored_databases = ["postgres", "template0", "template1"] - - ## A list of databases to pull metrics about. If not specified, metrics for all - ## databases are gathered. Do NOT use with the 'ignored_databases' option. - # databases = ["app_production", "testing"] -` - -func (p *Postgresql) SampleConfig() string { +func (*Postgresql) SampleConfig() string { return sampleConfig } -func (p *Postgresql) Description() string { - return "Read metrics from one or many postgresql servers" -} - func (p *Postgresql) IgnoredColumns() map[string]bool { return ignoredColumns } +func (p *Postgresql) Init() error { + p.Service.IsPgBouncer = !p.PreparedStatements + return nil +} + func (p *Postgresql) Gather(acc telegraf.Accumulator) error { var ( err error @@ -105,26 +78,26 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { query = `SELECT * FROM pg_stat_bgwriter` - bg_writer_row, err := p.DB.Query(query) + bgWriterRow, err := p.DB.Query(query) if err != nil { return err } - defer bg_writer_row.Close() + defer bgWriterRow.Close() // grab the column information from the result - if columns, err = bg_writer_row.Columns(); err != nil { + if columns, err = bgWriterRow.Columns(); err != nil { return err } - for bg_writer_row.Next() { - err = p.accRow(bg_writer_row, acc, columns) + for bgWriterRow.Next() { + err = p.accRow(bgWriterRow, acc, columns) if err != nil { return err } } - return bg_writer_row.Err() + return bgWriterRow.Err() } type scanner interface { @@ -156,13 +129,19 @@ func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator, columns []str if columnMap["datname"] != nil { // extract the database name from the column map if dbNameStr, ok := (*columnMap["datname"]).(string); ok { - dbname.WriteString(dbNameStr) + if _, err := dbname.WriteString(dbNameStr); err != nil { + return err + } } else { // PG 12 adds tracking of global objects to pg_stat_database - dbname.WriteString("postgres_global") + if _, err := dbname.WriteString("postgres_global"); err != nil { + return err + } } } else { - dbname.WriteString("postgres") + if _, err := dbname.WriteString("postgres"); err != nil { + return err + } } var tagAddress string @@ -189,13 +168,11 @@ func init() { inputs.Add("postgresql", func() telegraf.Input { return &Postgresql{ Service: Service{ - MaxIdle: 1, - MaxOpen: 1, - MaxLifetime: internal.Duration{ - Duration: 0, - }, - IsPgBouncer: false, + MaxIdle: 1, + MaxOpen: 1, + MaxLifetime: config.Duration(0), }, + PreparedStatements: true, } }) } diff --git a/plugins/inputs/postgresql/postgresql_test.go b/plugins/inputs/postgresql/postgresql_test.go index b23321019f5f8..98e5c0ee26065 100644 --- a/plugins/inputs/postgresql/postgresql_test.go +++ b/plugins/inputs/postgresql/postgresql_test.go @@ -4,21 +4,52 @@ import ( "fmt" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/docker/go-connections/nat" "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go/wait" + + "github.com/influxdata/telegraf/testutil" ) -func TestPostgresqlGeneratesMetrics(t *testing.T) { +const servicePort = "5432" + +func launchTestContainer(t *testing.T) *testutil.Container { + container := testutil.Container{ + Image: "postgres:alpine", + ExposedPorts: []string{servicePort}, + Env: map[string]string{ + "POSTGRES_HOST_AUTH_METHOD": "trust", + }, + WaitingFor: wait.ForAll( + // the database comes up twice, once right away, then again a second + // time after the docker entrypoint starts configuraiton + wait.ForLog("database system is ready to accept connections").WithOccurrence(2), + wait.ForListeningPort(nat.Port(servicePort)), + ), + } + + err := container.Start() + require.NoError(t, err, "failed to start container") + + return &container +} + +func TestPostgresqlGeneratesMetricsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } + container := launchTestContainer(t) + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + p := &Postgresql{ Service: Service{ Address: fmt.Sprintf( - "host=%s user=postgres sslmode=disable", - testutil.GetLocalHost(), + "host=%s port=%s user=postgres sslmode=disable", + container.Address, + container.Ports[servicePort], ), IsPgBouncer: false, }, @@ -71,39 +102,45 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { metricsCounted := 0 for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("postgresql", metric)) + require.True(t, acc.HasInt64Field("postgresql", metric)) metricsCounted++ } for _, metric := range int32Metrics { - assert.True(t, acc.HasInt32Field("postgresql", metric)) + require.True(t, acc.HasInt32Field("postgresql", metric)) metricsCounted++ } for _, metric := range floatMetrics { - assert.True(t, acc.HasFloatField("postgresql", metric)) + require.True(t, acc.HasFloatField("postgresql", metric)) metricsCounted++ } for _, metric := range stringMetrics { - assert.True(t, acc.HasStringField("postgresql", metric)) + require.True(t, acc.HasStringField("postgresql", metric)) metricsCounted++ } - assert.True(t, metricsCounted > 0) - assert.Equal(t, len(floatMetrics)+len(intMetrics)+len(int32Metrics)+len(stringMetrics), metricsCounted) + require.True(t, metricsCounted > 0) + require.Equal(t, len(floatMetrics)+len(intMetrics)+len(int32Metrics)+len(stringMetrics), metricsCounted) } -func TestPostgresqlTagsMetricsWithDatabaseName(t *testing.T) { +func TestPostgresqlTagsMetricsWithDatabaseNameIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } + container := launchTestContainer(t) + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + p := &Postgresql{ Service: Service{ Address: fmt.Sprintf( - "host=%s user=postgres sslmode=disable", - testutil.GetLocalHost(), + "host=%s port=%s user=postgres sslmode=disable", + container.Address, + container.Ports[servicePort], ), }, Databases: []string{"postgres"}, @@ -117,19 +154,25 @@ func TestPostgresqlTagsMetricsWithDatabaseName(t *testing.T) { point, ok := acc.Get("postgresql") require.True(t, ok) - assert.Equal(t, "postgres", point.Tags["db"]) + require.Equal(t, "postgres", point.Tags["db"]) } -func TestPostgresqlDefaultsToAllDatabases(t *testing.T) { +func TestPostgresqlDefaultsToAllDatabasesIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } + container := launchTestContainer(t) + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + p := &Postgresql{ Service: Service{ Address: fmt.Sprintf( - "host=%s user=postgres sslmode=disable", - testutil.GetLocalHost(), + "host=%s port=%s user=postgres sslmode=disable", + container.Address, + container.Ports[servicePort], ), }, } @@ -150,19 +193,25 @@ func TestPostgresqlDefaultsToAllDatabases(t *testing.T) { } } - assert.True(t, found) + require.True(t, found) } -func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { +func TestPostgresqlIgnoresUnwantedColumnsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } + container := launchTestContainer(t) + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + p := &Postgresql{ Service: Service{ Address: fmt.Sprintf( - "host=%s user=postgres sslmode=disable", - testutil.GetLocalHost(), + "host=%s port=%s user=postgres sslmode=disable", + container.Address, + container.Ports[servicePort], ), }, } @@ -172,20 +221,26 @@ func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { require.NoError(t, p.Gather(&acc)) for col := range p.IgnoredColumns() { - assert.False(t, acc.HasMeasurement(col)) + require.False(t, acc.HasMeasurement(col)) } } -func TestPostgresqlDatabaseWhitelistTest(t *testing.T) { +func TestPostgresqlDatabaseWhitelistTestIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } + container := launchTestContainer(t) + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + p := &Postgresql{ Service: Service{ Address: fmt.Sprintf( - "host=%s user=postgres sslmode=disable", - testutil.GetLocalHost(), + "host=%s port=%s user=postgres sslmode=disable", + container.Address, + container.Ports[servicePort], ), }, Databases: []string{"template0"}, @@ -212,20 +267,26 @@ func TestPostgresqlDatabaseWhitelistTest(t *testing.T) { } } - assert.True(t, foundTemplate0) - assert.False(t, foundTemplate1) + require.True(t, foundTemplate0) + require.False(t, foundTemplate1) } -func TestPostgresqlDatabaseBlacklistTest(t *testing.T) { +func TestPostgresqlDatabaseBlacklistTestIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } + container := launchTestContainer(t) + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + p := &Postgresql{ Service: Service{ Address: fmt.Sprintf( - "host=%s user=postgres sslmode=disable", - testutil.GetLocalHost(), + "host=%s port=%s user=postgres sslmode=disable", + container.Address, + container.Ports[servicePort], ), }, IgnoredDatabases: []string{"template0"}, @@ -251,6 +312,6 @@ func TestPostgresqlDatabaseBlacklistTest(t *testing.T) { } } - assert.False(t, foundTemplate0) - assert.True(t, foundTemplate1) + require.False(t, foundTemplate0) + require.True(t, foundTemplate1) } diff --git a/plugins/inputs/postgresql/sample.conf b/plugins/inputs/postgresql/sample.conf new file mode 100644 index 0000000000000..e49d8786555af --- /dev/null +++ b/plugins/inputs/postgresql/sample.conf @@ -0,0 +1,37 @@ +# Read metrics from one or many postgresql servers +[[inputs.postgresql]] + ## specify address via a url matching: + ## postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] + ## or a simple string: + ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production + ## + ## All connection parameters are optional. + ## + ## Without the dbname parameter, the driver will default to a database + ## with the same name as the user. This dbname is just for instantiating a + ## connection with the server and doesn't restrict the databases we are trying + ## to grab metrics for. + ## + address = "host=localhost user=postgres sslmode=disable" + ## A custom name for the database that will be used as the "server" tag in the + ## measurement output. If not specified, a default one generated from + ## the connection address is used. + # outputaddress = "db01" + + ## connection configuration. + ## maxlifetime - specify the maximum lifetime of a connection. + ## default is forever (0s) + # max_lifetime = "0s" + + ## A list of databases to explicitly ignore. If not specified, metrics for all + ## databases are gathered. Do NOT use with the 'databases' option. + # ignored_databases = ["postgres", "template0", "template1"] + + ## A list of databases to pull metrics about. If not specified, metrics for all + ## databases are gathered. Do NOT use with the 'ignored_databases' option. + # databases = ["app_production", "testing"] + + ## Whether to use prepared statements when connecting to the database. + ## This should be set to false when connecting through a PgBouncer instance + ## with pool_mode set to transaction. + prepared_statements = true diff --git a/plugins/inputs/postgresql/service.go b/plugins/inputs/postgresql/service.go index 96a9a63175658..2ef65617d8a49 100644 --- a/plugins/inputs/postgresql/service.go +++ b/plugins/inputs/postgresql/service.go @@ -3,17 +3,18 @@ package postgresql import ( "database/sql" "fmt" - "github.com/jackc/pgx" - "github.com/jackc/pgx/pgtype" - "github.com/jackc/pgx/stdlib" "net" "net/url" "regexp" "sort" "strings" + "time" + + "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v4/stdlib" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" ) // pulled from lib/pq @@ -88,14 +89,16 @@ func parseURL(uri string) (string, error) { // packages. type Service struct { Address string - Outputaddress string + OutputAddress string MaxIdle int MaxOpen int - MaxLifetime internal.Duration + MaxLifetime config.Duration DB *sql.DB - IsPgBouncer bool + IsPgBouncer bool `toml:"-"` } +var socketRegexp = regexp.MustCompile(`/\.s\.PGSQL\.\d+$`) + // Start starts the ServiceInput's service, whatever that may be func (p *Service) Start(telegraf.Accumulator) (err error) { const localhost = "host=localhost sslmode=disable" @@ -104,57 +107,42 @@ func (p *Service) Start(telegraf.Accumulator) (err error) { p.Address = localhost } - connectionString := p.Address + connConfig, err := pgx.ParseConfig(p.Address) + if err != nil { + return err + } + + // Remove the socket name from the path + connConfig.Host = socketRegexp.ReplaceAllLiteralString(connConfig.Host, "") // Specific support to make it work with PgBouncer too // See https://github.com/influxdata/telegraf/issues/3253#issuecomment-357505343 if p.IsPgBouncer { - d := &stdlib.DriverConfig{ - ConnConfig: pgx.ConnConfig{ - PreferSimpleProtocol: true, - RuntimeParams: map[string]string{ - "client_encoding": "UTF8", - }, - CustomConnInfo: func(c *pgx.Conn) (*pgtype.ConnInfo, error) { - info := c.ConnInfo.DeepCopy() - info.RegisterDataType(pgtype.DataType{ - Value: &pgtype.OIDValue{}, - Name: "int8OID", - OID: pgtype.Int8OID, - }) - // Newer versions of pgbouncer need this defined. See the discussion here: - // https://github.com/jackc/pgx/issues/649 - info.RegisterDataType(pgtype.DataType{ - Value: &pgtype.OIDValue{}, - Name: "numericOID", - OID: pgtype.NumericOID, - }) - - return info, nil - }, - }, - } - stdlib.RegisterDriverConfig(d) - connectionString = d.ConnectionString(p.Address) + // Remove DriveConfig and revert it by the ParseConfig method + // See https://github.com/influxdata/telegraf/issues/9134 + connConfig.PreferSimpleProtocol = true } + connectionString := stdlib.RegisterConnConfig(connConfig) if p.DB, err = sql.Open("pgx", connectionString); err != nil { return err } p.DB.SetMaxOpenConns(p.MaxOpen) p.DB.SetMaxIdleConns(p.MaxIdle) - p.DB.SetConnMaxLifetime(p.MaxLifetime.Duration) + p.DB.SetConnMaxLifetime(time.Duration(p.MaxLifetime)) return nil } // Stop stops the services and closes any necessary channels and connections func (p *Service) Stop() { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive p.DB.Close() } -var kvMatcher, _ = regexp.Compile("(password|sslcert|sslkey|sslmode|sslrootcert)=\\S+ ?") +var kvMatcher, _ = regexp.Compile(`(password|sslcert|sslkey|sslmode|sslrootcert)=\S+ ?`) // SanitizedAddress utility function to strip sensitive information from the connection string. func (p *Service) SanitizedAddress() (sanitizedAddress string, err error) { @@ -162,8 +150,8 @@ func (p *Service) SanitizedAddress() (sanitizedAddress string, err error) { canonicalizedAddress string ) - if p.Outputaddress != "" { - return p.Outputaddress, nil + if p.OutputAddress != "" { + return p.OutputAddress, nil } if strings.HasPrefix(p.Address, "postgres://") || strings.HasPrefix(p.Address, "postgresql://") { diff --git a/plugins/inputs/postgresql_extensible/README.md b/plugins/inputs/postgresql_extensible/README.md index abbdd07f43d1b..d2f706606ded6 100644 --- a/plugins/inputs/postgresql_extensible/README.md +++ b/plugins/inputs/postgresql_extensible/README.md @@ -11,7 +11,10 @@ The example below has two queries are specified, with the following parameters: * The name of the measurement * A list of the columns to be defined as tags -```toml +## Configuration + +```toml @sample.conf +# Read metrics from one or many postgresql servers [[inputs.postgresql_extensible]] # specify address via a url matching: # postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=... @@ -25,25 +28,17 @@ The example below has two queries are specified, with the following parameters: # to grab metrics for. # address = "host=localhost user=postgres sslmode=disable" - # A list of databases to pull metrics about. If not specified, metrics for all - # databases are gathered. + + ## A list of databases to pull metrics about. + ## deprecated in 1.22.3; use the sqlquery option to specify database to use # databases = ["app_production", "testing"] - # + + ## Whether to use prepared statements when connecting to the database. + ## This should be set to false when connecting through a PgBouncer instance + ## with pool_mode set to transaction. + prepared_statements = true + # Define the toml config where the sql queries are stored - # New queries can be added, if the withdbname is set to true and there is no - # databases defined in the 'databases field', the sql query is ended by a 'is - # not null' in order to make the query succeed. - # Be careful that the sqlquery must contain the where clause with a part of - # the filtering, the plugin will add a 'IN (dbname list)' clause if the - # withdbname is set to true - # Example : - # The sqlquery : "SELECT * FROM pg_stat_database where datname" become - # "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" - # because the databases variable was set to ['postgres', 'pgbench' ] and the - # withdbname was true. - # Be careful that if the withdbname is set to false you don't have to define - # the where clause (aka with the dbname) - # # The script option can be used to specify the .sql file path. # If script and sqlquery options specified at same time, sqlquery will be used # @@ -52,12 +47,17 @@ The example below has two queries are specified, with the following parameters: # defined tags. The values in these columns must be of a string-type, # a number-type or a blob-type. # + # The timestamp field is used to override the data points timestamp value. By + # default, all rows inserted with current time. By setting a timestamp column, + # the row will be inserted with that column's value. + # # Structure : # [[inputs.postgresql_extensible.query]] # sqlquery string # version string # withdbname boolean # tagvalue string (coma separated) + # timestamp string [[inputs.postgresql_extensible.query]] sqlquery="SELECT * FROM pg_stat_database where datname" version=901 @@ -71,11 +71,20 @@ The example below has two queries are specified, with the following parameters: ``` The system can be easily extended using homemade metrics collection tools or -using postgresql extensions ([pg_stat_statements](http://www.postgresql.org/docs/current/static/pgstatstatements.html), [pg_proctab](https://github.com/markwkm/pg_proctab) or [powa](http://dalibo.github.io/powa/)) +using postgresql extensions ([pg_stat_statements][1], [pg_proctab][2] or +[powa][3]) + +[1]: http://www.postgresql.org/docs/current/static/pgstatstatements.html + +[2]: https://github.com/markwkm/pg_proctab -# Sample Queries : -- telegraf.conf postgresql_extensible queries (assuming that you have configured +[3]: http://dalibo.github.io/powa/ + +## Sample Queries + +* telegraf.conf postgresql_extensible queries (assuming that you have configured correctly your connection) + ```toml [[inputs.postgresql_extensible.query]] sqlquery="SELECT * FROM pg_stat_database" @@ -127,27 +136,33 @@ using postgresql extensions ([pg_stat_statements](http://www.postgresql.org/docs tagvalue="type,enabled" ``` -# Postgresql Side +## Postgresql Side + postgresql.conf : -``` + +```sql shared_preload_libraries = 'pg_stat_statements,pg_stat_kcache' ``` Please follow the requirements to setup those extensions. In the database (can be a specific monitoring db) -``` + +```sql create extension pg_stat_statements; create extension pg_stat_kcache; create extension pg_proctab; ``` + (assuming that the extension is installed on the OS Layer) - - pg_stat_kcache is available on the postgresql.org yum repo - - pg_proctab is available at : https://github.com/markwkm/pg_proctab +* pg_stat_kcache is available on the postgresql.org yum repo +* pg_proctab is available at : + +## Views + +* Blocking sessions - ## Views - - Blocking sessions ```sql CREATE OR REPLACE VIEW public.blocking_procs AS SELECT a.datname AS db, @@ -171,7 +186,9 @@ CREATE OR REPLACE VIEW public.blocking_procs AS WHERE kl.granted AND NOT bl.granted ORDER BY a.query_start; ``` - - Sessions Statistics + +* Sessions Statistics + ```sql CREATE OR REPLACE VIEW public.sessions AS WITH proctab AS ( diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index f91feaf407d49..3109f1d6aa218 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -1,26 +1,36 @@ +//go:generate ../../../tools/readme_config_includer/generator package postgresql_extensible import ( "bytes" + _ "embed" "fmt" - "io/ioutil" + "io" "os" "strings" + "time" - _ "github.com/jackc/pgx/stdlib" + // Required for SQL framework driver + _ "github.com/jackc/pgx/v4/stdlib" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/postgresql" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Postgresql struct { postgresql.Service - Databases []string - AdditionalTags []string - Query query - Debug bool + Databases []string `deprecated:"1.22.4;use the sqlquery option to specify database to use"` + AdditionalTags []string + Timestamp string + Query query + Debug bool + PreparedStatements bool `toml:"prepared_statements"` Log telegraf.Logger } @@ -29,78 +39,17 @@ type query []struct { Sqlquery string Script string Version int - Withdbname bool + Withdbname bool `deprecated:"1.22.4;use the sqlquery option to specify database to use"` Tagvalue string Measurement string + Timestamp string } var ignoredColumns = map[string]bool{"stats_reset": true} -var sampleConfig = ` - ## specify address via a url matching: - ## postgres://[pqgotest[:password]]@localhost[/dbname]\ - ## ?sslmode=[disable|verify-ca|verify-full] - ## or a simple string: - ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production - # - ## All connection parameters are optional. # - ## Without the dbname parameter, the driver will default to a database - ## with the same name as the user. This dbname is just for instantiating a - ## connection with the server and doesn't restrict the databases we are trying - ## to grab metrics for. - # - address = "host=localhost user=postgres sslmode=disable" - - ## connection configuration. - ## maxlifetime - specify the maximum lifetime of a connection. - ## default is forever (0s) - max_lifetime = "0s" - - ## A list of databases to pull metrics about. If not specified, metrics for all - ## databases are gathered. - ## databases = ["app_production", "testing"] - # - ## A custom name for the database that will be used as the "server" tag in the - ## measurement output. If not specified, a default one generated from - ## the connection address is used. - # outputaddress = "db01" - # - ## Define the toml config where the sql queries are stored - ## New queries can be added, if the withdbname is set to true and there is no - ## databases defined in the 'databases field', the sql query is ended by a - ## 'is not null' in order to make the query succeed. - ## Example : - ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become - ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" - ## because the databases variable was set to ['postgres', 'pgbench' ] and the - ## withdbname was true. Be careful that if the withdbname is set to false you - ## don't have to define the where clause (aka with the dbname) the tagvalue - ## field is used to define custom tags (separated by commas) - ## The optional "measurement" value can be used to override the default - ## output measurement name ("postgresql"). - ## - ## The script option can be used to specify the .sql file path. - ## If script and sqlquery options specified at same time, sqlquery will be used - ## - ## Structure : - ## [[inputs.postgresql_extensible.query]] - ## sqlquery string - ## version string - ## withdbname boolean - ## tagvalue string (comma separated) - ## measurement string - [[inputs.postgresql_extensible.query]] - sqlquery="SELECT * FROM pg_stat_database" - version=901 - withdbname=false - tagvalue="" - measurement="" - [[inputs.postgresql_extensible.query]] - sqlquery="SELECT * FROM pg_stat_bgwriter" - version=901 - withdbname=false - tagvalue="postgresql.stats" -` +func (*Postgresql) SampleConfig() string { + return sampleConfig +} func (p *Postgresql) Init() error { var err error @@ -112,17 +61,10 @@ func (p *Postgresql) Init() error { } } } + p.Service.IsPgBouncer = !p.PreparedStatements return nil } -func (p *Postgresql) SampleConfig() string { - return sampleConfig -} - -func (p *Postgresql) Description() string { - return "Read metrics from one or many postgresql servers" -} - func (p *Postgresql) IgnoredColumns() map[string]bool { return ignoredColumns } @@ -134,7 +76,7 @@ func ReadQueryFromFile(filePath string) (string, error) { } defer file.Close() - query, err := ioutil.ReadAll(file) + query, err := io.ReadAll(file) if err != nil { return "", err } @@ -143,91 +85,96 @@ func ReadQueryFromFile(filePath string) (string, error) { func (p *Postgresql) Gather(acc telegraf.Accumulator) error { var ( - err error - sql_query string - query_addon string - db_version int - query string - tag_value string - meas_name string - columns []string + err error + sqlQuery string + queryAddon string + dbVersion int + query string + measName string ) // Retrieving the database version query = `SELECT setting::integer / 100 AS version FROM pg_settings WHERE name = 'server_version_num'` - if err = p.DB.QueryRow(query).Scan(&db_version); err != nil { - db_version = 0 + if err = p.DB.QueryRow(query).Scan(&dbVersion); err != nil { + dbVersion = 0 } // We loop in order to process each query // Query is not run if Database version does not match the query version. for i := range p.Query { - sql_query = p.Query[i].Sqlquery - tag_value = p.Query[i].Tagvalue + sqlQuery = p.Query[i].Sqlquery if p.Query[i].Measurement != "" { - meas_name = p.Query[i].Measurement + measName = p.Query[i].Measurement } else { - meas_name = "postgresql" + measName = "postgresql" } if p.Query[i].Withdbname { if len(p.Databases) != 0 { - query_addon = fmt.Sprintf(` IN ('%s')`, - strings.Join(p.Databases, "','")) + queryAddon = fmt.Sprintf(` IN ('%s')`, strings.Join(p.Databases, "','")) } else { - query_addon = " is not null" + queryAddon = " is not null" } } else { - query_addon = "" + queryAddon = "" } - sql_query += query_addon + sqlQuery += queryAddon - if p.Query[i].Version <= db_version { - rows, err := p.DB.Query(sql_query) - if err != nil { - p.Log.Error(err.Error()) - continue - } + if p.Query[i].Version <= dbVersion { + p.gatherMetricsFromQuery(acc, sqlQuery, p.Query[i].Tagvalue, p.Query[i].Timestamp, measName) + } + } + return nil +} - defer rows.Close() +func (p *Postgresql) gatherMetricsFromQuery(acc telegraf.Accumulator, sqlQuery string, tagValue string, timestamp string, measName string) { + var columns []string - // grab the column information from the result - if columns, err = rows.Columns(); err != nil { - p.Log.Error(err.Error()) - continue - } + rows, err := p.DB.Query(sqlQuery) + if err != nil { + acc.AddError(err) + return + } - p.AdditionalTags = nil - if tag_value != "" { - tag_list := strings.Split(tag_value, ",") - for t := range tag_list { - p.AdditionalTags = append(p.AdditionalTags, tag_list[t]) - } - } + defer rows.Close() - for rows.Next() { - err = p.accRow(meas_name, rows, acc, columns) - if err != nil { - p.Log.Error(err.Error()) - break - } - } + // grab the column information from the result + if columns, err = rows.Columns(); err != nil { + acc.AddError(err) + return + } + + p.AdditionalTags = nil + if tagValue != "" { + tagList := strings.Split(tagValue, ",") + for t := range tagList { + p.AdditionalTags = append(p.AdditionalTags, tagList[t]) + } + } + + p.Timestamp = timestamp + + for rows.Next() { + err = p.accRow(measName, rows, acc, columns) + if err != nil { + acc.AddError(err) + break } } - return nil } type scanner interface { Scan(dest ...interface{}) error } -func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumulator, columns []string) error { +func (p *Postgresql) accRow(measName string, row scanner, acc telegraf.Accumulator, columns []string) error { var ( err error columnVars []interface{} dbname bytes.Buffer tagAddress string + timestamp time.Time ) // this is where we'll store the column name with its *interface{} @@ -251,12 +198,18 @@ func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumula // extract the database name from the column map switch datname := (*c).(type) { case string: - dbname.WriteString(datname) + if _, err := dbname.WriteString(datname); err != nil { + return err + } default: - dbname.WriteString("postgres") + if _, err := dbname.WriteString("postgres"); err != nil { + return err + } } } else { - dbname.WriteString("postgres") + if _, err := dbname.WriteString("postgres"); err != nil { + return err + } } if tagAddress, err = p.SanitizedAddress(); err != nil { @@ -269,6 +222,9 @@ func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumula "db": dbname.String(), } + // set default timestamp to Now + timestamp = time.Now() + fields := make(map[string]interface{}) COLUMN: for col, val := range columnMap { @@ -278,6 +234,13 @@ COLUMN: continue } + if col == p.Timestamp { + if v, ok := (*val).(time.Time); ok { + timestamp = v + } + continue + } + for _, tag := range p.AdditionalTags { if col != tag { continue @@ -301,7 +264,7 @@ COLUMN: fields[col] = *val } } - acc.AddFields(meas_name, fields, tags) + acc.AddFields(measName, fields, tags, timestamp) return nil } @@ -309,13 +272,12 @@ func init() { inputs.Add("postgresql_extensible", func() telegraf.Input { return &Postgresql{ Service: postgresql.Service{ - MaxIdle: 1, - MaxOpen: 1, - MaxLifetime: internal.Duration{ - Duration: 0, - }, + MaxIdle: 1, + MaxOpen: 1, + MaxLifetime: config.Duration(0), IsPgBouncer: false, }, + PreparedStatements: true, } }) } diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go index bca009f167cf7..9550a52dd31a8 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go @@ -4,34 +4,58 @@ import ( "errors" "fmt" "testing" + "time" + + "github.com/docker/go-connections/nat" + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go/wait" "github.com/influxdata/telegraf/plugins/inputs/postgresql" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func queryRunner(t *testing.T, q query) *testutil.Accumulator { + servicePort := "5432" + container := testutil.Container{ + Image: "postgres:alpine", + ExposedPorts: []string{servicePort}, + Env: map[string]string{ + "POSTGRES_HOST_AUTH_METHOD": "trust", + }, + WaitingFor: wait.ForAll( + wait.ForLog("database system is ready to accept connections"), + wait.ForListeningPort(nat.Port(servicePort)), + ), + } + + err := container.Start() + require.NoError(t, err, "failed to start container") + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + p := &Postgresql{ Log: testutil.Logger{}, Service: postgresql.Service{ Address: fmt.Sprintf( - "host=%s user=postgres sslmode=disable", - testutil.GetLocalHost(), + "host=%s port=%s user=postgres sslmode=disable", + container.Address, + container.Ports[servicePort], ), IsPgBouncer: false, }, Databases: []string{"postgres"}, Query: q, } + var acc testutil.Accumulator - p.Start(&acc) - p.Init() + require.NoError(t, p.Init()) + require.NoError(t, p.Start(&acc)) require.NoError(t, acc.GatherError(p.Gather)) return &acc } -func TestPostgresqlGeneratesMetrics(t *testing.T) { +func TestPostgresqlGeneratesMetricsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -75,30 +99,30 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { metricsCounted := 0 for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("postgresql", metric)) + require.True(t, acc.HasInt64Field("postgresql", metric)) metricsCounted++ } for _, metric := range int32Metrics { - assert.True(t, acc.HasInt32Field("postgresql", metric)) + require.True(t, acc.HasInt32Field("postgresql", metric)) metricsCounted++ } for _, metric := range floatMetrics { - assert.True(t, acc.HasFloatField("postgresql", metric)) + require.True(t, acc.HasFloatField("postgresql", metric)) metricsCounted++ } for _, metric := range stringMetrics { - assert.True(t, acc.HasStringField("postgresql", metric)) + require.True(t, acc.HasStringField("postgresql", metric)) metricsCounted++ } - assert.True(t, metricsCounted > 0) - assert.Equal(t, len(floatMetrics)+len(intMetrics)+len(int32Metrics)+len(stringMetrics), metricsCounted) + require.True(t, metricsCounted > 0) + require.Equal(t, len(floatMetrics)+len(intMetrics)+len(int32Metrics)+len(stringMetrics), metricsCounted) } -func TestPostgresqlQueryOutputTests(t *testing.T) { +func TestPostgresqlQueryOutputTestsIntegration(t *testing.T) { const measurement = "postgresql" if testing.Short() { @@ -108,23 +132,30 @@ func TestPostgresqlQueryOutputTests(t *testing.T) { examples := map[string]func(*testutil.Accumulator){ "SELECT 10.0::float AS myvalue": func(acc *testutil.Accumulator) { v, found := acc.FloatField(measurement, "myvalue") - assert.True(t, found) - assert.Equal(t, 10.0, v) + require.True(t, found) + require.Equal(t, 10.0, v) }, "SELECT 10.0 AS myvalue": func(acc *testutil.Accumulator) { v, found := acc.StringField(measurement, "myvalue") - assert.True(t, found) - assert.Equal(t, "10.0", v) + require.True(t, found) + require.Equal(t, "10.0", v) }, "SELECT 'hello world' AS myvalue": func(acc *testutil.Accumulator) { v, found := acc.StringField(measurement, "myvalue") - assert.True(t, found) - assert.Equal(t, "hello world", v) + require.True(t, found) + require.Equal(t, "hello world", v) }, "SELECT true AS myvalue": func(acc *testutil.Accumulator) { v, found := acc.BoolField(measurement, "myvalue") - assert.True(t, found) - assert.Equal(t, true, v) + require.True(t, found) + require.Equal(t, true, v) + }, + "SELECT timestamp'1980-07-23' as ts, true AS myvalue": func(acc *testutil.Accumulator) { + expectedTime := time.Date(1980, 7, 23, 0, 0, 0, 0, time.UTC) + v, found := acc.BoolField(measurement, "myvalue") + require.True(t, found) + require.Equal(t, true, v) + require.True(t, acc.HasTimestamp(measurement, expectedTime)) }, } @@ -134,12 +165,13 @@ func TestPostgresqlQueryOutputTests(t *testing.T) { Version: 901, Withdbname: false, Tagvalue: "", + Timestamp: "ts", }}) assertions(acc) } } -func TestPostgresqlFieldOutput(t *testing.T) { +func TestPostgresqlFieldOutputIntegration(t *testing.T) { const measurement = "postgresql" if testing.Short() { t.Skip("Skipping integration test in short mode") @@ -183,22 +215,22 @@ func TestPostgresqlFieldOutput(t *testing.T) { for _, field := range intMetrics { _, found := acc.Int64Field(measurement, field) - assert.True(t, found, fmt.Sprintf("expected %s to be an integer", field)) + require.True(t, found, fmt.Sprintf("expected %s to be an integer", field)) } for _, field := range int32Metrics { _, found := acc.Int32Field(measurement, field) - assert.True(t, found, fmt.Sprintf("expected %s to be an int32", field)) + require.True(t, found, fmt.Sprintf("expected %s to be an int32", field)) } for _, field := range floatMetrics { _, found := acc.FloatField(measurement, field) - assert.True(t, found, fmt.Sprintf("expected %s to be a float64", field)) + require.True(t, found, fmt.Sprintf("expected %s to be a float64", field)) } for _, field := range stringMetrics { _, found := acc.StringField(measurement, field) - assert.True(t, found, fmt.Sprintf("expected %s to be a str", field)) + require.True(t, found, fmt.Sprintf("expected %s to be a str", field)) } } @@ -222,13 +254,13 @@ func TestPostgresqlSqlScript(t *testing.T) { Query: q, } var acc testutil.Accumulator - p.Start(&acc) - p.Init() + require.NoError(t, p.Init()) + require.NoError(t, p.Start(&acc)) require.NoError(t, acc.GatherError(p.Gather)) } -func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { +func TestPostgresqlIgnoresUnwantedColumnsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -247,9 +279,9 @@ func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { require.NoError(t, p.Start(&acc)) require.NoError(t, acc.GatherError(p.Gather)) - assert.NotEmpty(t, p.IgnoredColumns()) + require.NotEmpty(t, p.IgnoredColumns()) for col := range p.IgnoredColumns() { - assert.False(t, acc.HasMeasurement(col)) + require.False(t, acc.HasMeasurement(col)) } } @@ -280,15 +312,15 @@ type fakeRow struct { func (f fakeRow) Scan(dest ...interface{}) error { if len(f.fields) != len(dest) { - return errors.New("Nada matchy buddy") + return errors.New("nada matchy buddy") } for i, d := range dest { - switch d.(type) { - case (*interface{}): - *d.(*interface{}) = f.fields[i] + switch d := d.(type) { + case *interface{}: + *d = f.fields[i] default: - return fmt.Errorf("Bad type %T", d) + return fmt.Errorf("bad type %T", d) } } return nil diff --git a/plugins/inputs/postgresql_extensible/sample.conf b/plugins/inputs/postgresql_extensible/sample.conf new file mode 100644 index 0000000000000..e9c26c10d9697 --- /dev/null +++ b/plugins/inputs/postgresql_extensible/sample.conf @@ -0,0 +1,54 @@ +# Read metrics from one or many postgresql servers +[[inputs.postgresql_extensible]] + # specify address via a url matching: + # postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=... + # or a simple string: + # host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production + # + # All connection parameters are optional. + # Without the dbname parameter, the driver will default to a database + # with the same name as the user. This dbname is just for instantiating a + # connection with the server and doesn't restrict the databases we are trying + # to grab metrics for. + # + address = "host=localhost user=postgres sslmode=disable" + + ## A list of databases to pull metrics about. + ## deprecated in 1.22.3; use the sqlquery option to specify database to use + # databases = ["app_production", "testing"] + + ## Whether to use prepared statements when connecting to the database. + ## This should be set to false when connecting through a PgBouncer instance + ## with pool_mode set to transaction. + prepared_statements = true + + # Define the toml config where the sql queries are stored + # The script option can be used to specify the .sql file path. + # If script and sqlquery options specified at same time, sqlquery will be used + # + # the tagvalue field is used to define custom tags (separated by comas). + # the query is expected to return columns which match the names of the + # defined tags. The values in these columns must be of a string-type, + # a number-type or a blob-type. + # + # The timestamp field is used to override the data points timestamp value. By + # default, all rows inserted with current time. By setting a timestamp column, + # the row will be inserted with that column's value. + # + # Structure : + # [[inputs.postgresql_extensible.query]] + # sqlquery string + # version string + # withdbname boolean + # tagvalue string (coma separated) + # timestamp string + [[inputs.postgresql_extensible.query]] + sqlquery="SELECT * FROM pg_stat_database where datname" + version=901 + withdbname=false + tagvalue="" + [[inputs.postgresql_extensible.query]] + script="your_sql-filepath.sql" + version=901 + withdbname=false + tagvalue="" diff --git a/plugins/inputs/powerdns/README.md b/plugins/inputs/powerdns/README.md index a6bad660fc37b..56e1efcdb1446 100644 --- a/plugins/inputs/powerdns/README.md +++ b/plugins/inputs/powerdns/README.md @@ -2,10 +2,10 @@ The powerdns plugin gathers metrics about PowerDNS using unix socket. -### Configuration: +## Configuration -```toml -# Description +```toml @sample.conf +# Read metrics from one or many PowerDNS servers [[inputs.powerdns]] # An array of sockets to gather stats about. # Specify a path to unix socket. @@ -14,17 +14,18 @@ The powerdns plugin gathers metrics about PowerDNS using unix socket. unix_sockets = ["/var/run/pdns.controlsocket"] ``` -#### Permissions +### Permissions Telegraf will need read access to the powerdns control socket. On many systems this can be accomplished by adding the `telegraf` user to the `pdns` group: -``` + +```sh usermod telegraf -a -G pdns ``` -### Measurements & Fields: +## Measurements & Fields - powerdns - corrupt-packets @@ -66,13 +67,13 @@ usermod telegraf -a -G pdns - uptime - user-msec -### Tags: +## Tags - tags: `server=socket` -### Example Output: +## Example Output -``` +```sh $ ./telegraf --config telegraf.conf --input-filter powerdns --test > powerdns,server=/var/run/pdns.controlsocket corrupt-packets=0i,deferred-cache-inserts=0i,deferred-cache-lookup=0i,dnsupdate-answers=0i,dnsupdate-changes=0i,dnsupdate-queries=0i,dnsupdate-refused=0i,key-cache-size=0i,latency=26i,meta-cache-size=0i,packetcache-hit=0i,packetcache-miss=1i,packetcache-size=0i,qsize-q=0i,query-cache-hit=0i,query-cache-miss=6i,rd-queries=1i,recursing-answers=0i,recursing-questions=0i,recursion-unanswered=0i,security-status=3i,servfail-packets=0i,signature-cache-size=0i,signatures=0i,sys-msec=4349i,tcp-answers=0i,tcp-queries=0i,timedout-packets=0i,udp-answers=1i,udp-answers-bytes=50i,udp-do-queries=0i,udp-queries=0i,udp4-answers=1i,udp4-queries=1i,udp6-answers=0i,udp6-queries=0i,uptime=166738i,user-msec=3036i 1454078624932715706 ``` diff --git a/plugins/inputs/powerdns/powerdns.go b/plugins/inputs/powerdns/powerdns.go index 3c661990cee4c..2a5abce09fdeb 100644 --- a/plugins/inputs/powerdns/powerdns.go +++ b/plugins/inputs/powerdns/powerdns.go @@ -1,10 +1,11 @@ +//go:generate ../../../tools/readme_config_includer/generator package powerdns import ( "bufio" + _ "embed" "fmt" "io" - "log" "net" "strconv" "strings" @@ -14,26 +15,22 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Powerdns struct { UnixSockets []string -} -var sampleConfig = ` - ## An array of sockets to gather stats about. - ## Specify a path to unix socket. - unix_sockets = ["/var/run/pdns.controlsocket"] -` + Log telegraf.Logger `toml:"-"` +} var defaultTimeout = 5 * time.Second -func (p *Powerdns) SampleConfig() string { +func (*Powerdns) SampleConfig() string { return sampleConfig } -func (p *Powerdns) Description() string { - return "Read metrics from one or many PowerDNS servers" -} - func (p *Powerdns) Gather(acc telegraf.Accumulator) error { if len(p.UnixSockets) == 0 { return p.gatherServer("/var/run/pdns.controlsocket", acc) @@ -56,14 +53,16 @@ func (p *Powerdns) gatherServer(address string, acc telegraf.Accumulator) error defer conn.Close() - conn.SetDeadline(time.Now().Add(defaultTimeout)) + if err := conn.SetDeadline(time.Now().Add(defaultTimeout)); err != nil { + return err + } // Read and write buffer rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)) // Send command if _, err := fmt.Fprint(conn, "show * \n"); err != nil { - return nil + return err } if err := rw.Flush(); err != nil { return err @@ -87,7 +86,7 @@ func (p *Powerdns) gatherServer(address string, acc telegraf.Accumulator) error metrics := string(buf) // Process data - fields := parseResponse(metrics) + fields := p.parseResponse(metrics) // Add server socket as a tag tags := map[string]string{"server": address} @@ -97,7 +96,7 @@ func (p *Powerdns) gatherServer(address string, acc telegraf.Accumulator) error return nil } -func parseResponse(metrics string) map[string]interface{} { +func (p *Powerdns) parseResponse(metrics string) map[string]interface{} { values := make(map[string]interface{}) s := strings.Split(metrics, ",") @@ -110,8 +109,7 @@ func parseResponse(metrics string) map[string]interface{} { i, err := strconv.ParseInt(m[1], 10, 64) if err != nil { - log.Printf("E! [inputs.powerdns] error parsing integer for metric %q: %s", - metric, err.Error()) + p.Log.Errorf("error parsing integer for metric %q: %s", metric, err.Error()) continue } values[m[0]] = i diff --git a/plugins/inputs/powerdns/powerdns_test.go b/plugins/inputs/powerdns/powerdns_test.go index fe64be5db62eb..5afa9008ae124 100644 --- a/plugins/inputs/powerdns/powerdns_test.go +++ b/plugins/inputs/powerdns/powerdns_test.go @@ -3,11 +3,13 @@ package powerdns import ( "fmt" "net" + "os" + "path/filepath" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) type statServer struct{} @@ -48,7 +50,6 @@ var intOverflowMetrics = "corrupt-packets=18446744073709550195,deferred-cache-in "signature-cache-size=0,sys-msec=2889,uptime=86317,user-msec=2167," func (s statServer) serverSocket(l net.Listener) { - for { conn, err := l.Accept() if err != nil { @@ -61,7 +62,11 @@ func (s statServer) serverSocket(l net.Listener) { data := buf[:n] if string(data) == "show * \n" { + // Ignore the returned error as we need to close the socket anyway + //nolint:errcheck,revive c.Write([]byte(metrics)) + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive c.Close() } }(conn) @@ -71,7 +76,8 @@ func (s statServer) serverSocket(l net.Listener) { func TestPowerdnsGeneratesMetrics(t *testing.T) { // We create a fake server to return test data randomNumber := int64(5239846799706671610) - socket, err := net.Listen("unix", fmt.Sprintf("/tmp/pdns%d.controlsocket", randomNumber)) + sockname := filepath.Join(os.TempDir(), fmt.Sprintf("pdns%d.controlsocket", randomNumber)) + socket, err := net.Listen("unix", sockname) if err != nil { t.Fatal("Cannot initialize server on port ") } @@ -82,11 +88,10 @@ func TestPowerdnsGeneratesMetrics(t *testing.T) { go s.serverSocket(socket) p := &Powerdns{ - UnixSockets: []string{fmt.Sprintf("/tmp/pdns%d.controlsocket", randomNumber)}, + UnixSockets: []string{sockname}, } var acc testutil.Accumulator - err = acc.GatherError(p.Gather) require.NoError(t, err) @@ -102,12 +107,16 @@ func TestPowerdnsGeneratesMetrics(t *testing.T) { "meta-cache-size", "qsize-q", "signature-cache-size", "sys-msec", "uptime", "user-msec"} for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("powerdns", metric), metric) + require.True(t, acc.HasInt64Field("powerdns", metric), metric) } } func TestPowerdnsParseMetrics(t *testing.T) { - values := parseResponse(metrics) + p := &Powerdns{ + Log: testutil.Logger{}, + } + + values := p.parseResponse(metrics) tests := []struct { key string @@ -167,7 +176,11 @@ func TestPowerdnsParseMetrics(t *testing.T) { } func TestPowerdnsParseCorruptMetrics(t *testing.T) { - values := parseResponse(corruptMetrics) + p := &Powerdns{ + Log: testutil.Logger{}, + } + + values := p.parseResponse(corruptMetrics) tests := []struct { key string @@ -226,7 +239,11 @@ func TestPowerdnsParseCorruptMetrics(t *testing.T) { } func TestPowerdnsParseIntOverflowMetrics(t *testing.T) { - values := parseResponse(intOverflowMetrics) + p := &Powerdns{ + Log: testutil.Logger{}, + } + + values := p.parseResponse(intOverflowMetrics) tests := []struct { key string diff --git a/plugins/inputs/powerdns/sample.conf b/plugins/inputs/powerdns/sample.conf new file mode 100644 index 0000000000000..8751206a350a3 --- /dev/null +++ b/plugins/inputs/powerdns/sample.conf @@ -0,0 +1,7 @@ +# Read metrics from one or many PowerDNS servers +[[inputs.powerdns]] + # An array of sockets to gather stats about. + # Specify a path to unix socket. + # + # If no servers are specified, then '/var/run/pdns.controlsocket' is used as the path. + unix_sockets = ["/var/run/pdns.controlsocket"] diff --git a/plugins/inputs/powerdns_recursor/README.md b/plugins/inputs/powerdns_recursor/README.md index 09192db35ad2b..377e1ef0e8ce3 100644 --- a/plugins/inputs/powerdns_recursor/README.md +++ b/plugins/inputs/powerdns_recursor/README.md @@ -3,9 +3,10 @@ The `powerdns_recursor` plugin gathers metrics about PowerDNS Recursor using the unix controlsocket. -### Configuration +## Configuration -```toml +```toml @sample.conf +# Read metrics from one or many PowerDNS Recursor servers [[inputs.powerdns_recursor]] ## Path to the Recursor control socket. unix_sockets = ["/var/run/pdns_recursor.controlsocket"] @@ -17,7 +18,7 @@ the unix controlsocket. # socket_mode = "0666" ``` -#### Permissions +### Permissions Telegraf will need read/write access to the control socket and to the `socket_dir`. PowerDNS will need to be able to write to the `socket_dir`. @@ -27,25 +28,28 @@ adapted for other systems. First change permissions on the controlsocket in the PowerDNS recursor configuration, usually in `/etc/powerdns/recursor.conf`: -``` + +```sh socket-mode = 660 ``` Then place the `telegraf` user into the `pdns` group: -``` + +```sh usermod telegraf -a -G pdns ``` Since `telegraf` cannot write to to the default `/var/run` socket directory, create a subdirectory and adjust permissions for this directory so that both users can access it. + ```sh -$ mkdir /var/run/pdns -$ chown root:pdns /var/run/pdns -$ chmod 770 /var/run/pdns +mkdir /var/run/pdns +chown root:pdns /var/run/pdns +chmod 770 /var/run/pdns ``` -### Metrics +## Metrics - powerdns_recursor - tags: @@ -156,8 +160,8 @@ $ chmod 770 /var/run/pdns - x-ourtime4-8 - x-ourtime8-16 -### Example Output +## Example Output -``` +```shell powerdns_recursor,server=/var/run/pdns_recursor.controlsocket all-outqueries=3631810i,answers-slow=36863i,answers0-1=179612i,answers1-10=1223305i,answers10-100=1252199i,answers100-1000=408357i,auth-zone-queries=4i,auth4-answers-slow=44758i,auth4-answers0-1=59721i,auth4-answers1-10=1766787i,auth4-answers10-100=1329638i,auth4-answers100-1000=430372i,auth6-answers-slow=0i,auth6-answers0-1=0i,auth6-answers1-10=0i,auth6-answers10-100=0i,auth6-answers100-1000=0i,cache-entries=296689i,cache-hits=150654i,cache-misses=2949682i,case-mismatches=0i,chain-resends=420004i,client-parse-errors=0i,concurrent-queries=0i,dlg-only-drops=0i,dnssec-queries=152970i,dnssec-result-bogus=0i,dnssec-result-indeterminate=0i,dnssec-result-insecure=0i,dnssec-result-nta=0i,dnssec-result-secure=47i,dnssec-validations=47i,dont-outqueries=62i,ecs-queries=0i,ecs-responses=0i,edns-ping-matches=0i,edns-ping-mismatches=0i,failed-host-entries=21i,fd-usage=32i,ignored-packets=0i,ipv6-outqueries=0i,ipv6-questions=0i,malloc-bytes=0i,max-cache-entries=1000000i,max-mthread-stack=33747i,max-packetcache-entries=500000i,negcache-entries=100019i,no-packet-error=0i,noedns-outqueries=73341i,noerror-answers=25453808i,noping-outqueries=0i,nsset-invalidations=2398i,nsspeeds-entries=3966i,nxdomain-answers=3341302i,outgoing-timeouts=44384i,outgoing4-timeouts=44384i,outgoing6-timeouts=0i,over-capacity-drops=0i,packetcache-entries=78258i,packetcache-hits=25999027i,packetcache-misses=3100179i,policy-drops=0i,policy-result-custom=0i,policy-result-drop=0i,policy-result-noaction=3100336i,policy-result-nodata=0i,policy-result-nxdomain=0i,policy-result-truncate=0i,qa-latency=6553i,query-pipe-full-drops=0i,questions=29099363i,real-memory-usage=280494080i,resource-limits=0i,security-status=1i,server-parse-errors=0i,servfail-answers=304253i,spoof-prevents=0i,sys-msec=1312600i,tcp-client-overflow=0i,tcp-clients=0i,tcp-outqueries=116i,tcp-questions=133i,throttle-entries=21i,throttled-out=13296i,throttled-outqueries=13296i,too-old-drops=2i,udp-in-errors=4i,udp-noport-errors=2918i,udp-recvbuf-errors=0i,udp-sndbuf-errors=0i,unauthorized-tcp=0i,unauthorized-udp=0i,unexpected-packets=0i,unreachables=1708i,uptime=167482i,user-msec=1282640i,x-our-latency=19i,x-ourtime-slow=642i,x-ourtime0-1=3095566i,x-ourtime1-2=3401i,x-ourtime16-32=201i,x-ourtime2-4=304i,x-ourtime4-8=198i,x-ourtime8-16=24i 1533903879000000000 ``` diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor.go b/plugins/inputs/powerdns_recursor/powerdns_recursor.go index d040d8355329d..61637bd46c89c 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor.go @@ -1,10 +1,11 @@ +//go:generate ../../../tools/readme_config_includer/generator package powerdns_recursor import ( "bufio" + _ "embed" "errors" "fmt" - "log" "math/rand" "net" "os" @@ -17,35 +18,26 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type PowerdnsRecursor struct { UnixSockets []string `toml:"unix_sockets"` SocketDir string `toml:"socket_dir"` SocketMode string `toml:"socket_mode"` + Log telegraf.Logger `toml:"-"` + mode uint32 } var defaultTimeout = 5 * time.Second -var sampleConfig = ` - ## Path to the Recursor control socket. - unix_sockets = ["/var/run/pdns_recursor.controlsocket"] - - ## Directory to create receive socket. This default is likely not writable, - ## please reference the full plugin documentation for a recommended setup. - # socket_dir = "/var/run/" - ## Socket permissions for the receive socket. - # socket_mode = "0666" -` - -func (p *PowerdnsRecursor) SampleConfig() string { +func (*PowerdnsRecursor) SampleConfig() string { return sampleConfig } -func (p *PowerdnsRecursor) Description() string { - return "Read metrics from one or many PowerDNS Recursor servers" -} - func (p *PowerdnsRecursor) Init() error { if p.SocketMode != "" { mode, err := strconv.ParseUint(p.SocketMode, 8, 32) @@ -97,14 +89,16 @@ func (p *PowerdnsRecursor) gatherServer(address string, acc telegraf.Accumulator } defer conn.Close() - conn.SetDeadline(time.Now().Add(defaultTimeout)) + if err := conn.SetDeadline(time.Now().Add(defaultTimeout)); err != nil { + return err + } // Read and write buffer rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)) // Send command if _, err := fmt.Fprint(rw, "get-all\n"); err != nil { - return nil + return err } if err := rw.Flush(); err != nil { return err @@ -123,19 +117,17 @@ func (p *PowerdnsRecursor) gatherServer(address string, acc telegraf.Accumulator metrics := string(buf) // Process data - fields := parseResponse(metrics) + fields := p.parseResponse(metrics) // Add server socket as a tag tags := map[string]string{"server": address} acc.AddFields("powerdns_recursor", fields, tags) - conn.Close() - - return nil + return conn.Close() } -func parseResponse(metrics string) map[string]interface{} { +func (p *PowerdnsRecursor) parseResponse(metrics string) map[string]interface{} { values := make(map[string]interface{}) s := strings.Split(metrics, "\n") @@ -148,8 +140,7 @@ func parseResponse(metrics string) map[string]interface{} { i, err := strconv.ParseInt(m[1], 10, 64) if err != nil { - log.Printf("E! [inputs.powerdns_recursor] error parsing integer for metric %q: %s", - metric, err.Error()) + p.Log.Errorf("error parsing integer for metric %q: %s", metric, err.Error()) continue } values[m[0]] = i diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go index d0f5690cc31cb..a4fe9586cd8df 100644 --- a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go @@ -8,12 +8,10 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" -) -type statServer struct{} + "github.com/influxdata/telegraf/testutil" +) var metrics = "all-outqueries\t3591637\nanswers-slow\t36451\nanswers0-1\t177297\nanswers1-10\t1209328\n" + "answers10-100\t1238786\nanswers100-1000\t402917\nauth-zone-queries\t4\nauth4-answers-slow\t44248\n" + @@ -99,25 +97,26 @@ var intOverflowMetrics = "all-outqueries\t18446744073709550195\nanswers-slow\t36 "x-ourtime2-4\t302\nx-ourtime4-8\t194\nx-ourtime8-16\t24\n" func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) { - if runtime.GOOS == "darwin" { - t.Skip("Skipping test on darwin") + if runtime.GOOS == "darwin" || runtime.GOOS == "windows" { + t.Skip("Skipping on windows and darwin, as unixgram sockets are not supported") } // We create a fake server to return test data controlSocket := "/tmp/pdns5724354148158589552.controlsocket" addr, err := net.ResolveUnixAddr("unixgram", controlSocket) - if err != nil { - t.Fatal("Cannot parse unix socket") - } + require.NoError(t, err, "Cannot parse unix socket") socket, err := net.ListenUnixgram("unixgram", addr) - if err != nil { - t.Fatal("Cannot initialize server on port") - } + require.NoError(t, err, "Cannot initialize server on port") var wg sync.WaitGroup wg.Add(1) go func() { defer func() { + // Ignore the returned error as we need to remove the socket file anyway + //nolint:errcheck,revive socket.Close() + // Ignore the returned error as we want to remove the file and ignore + // no-such-file errors + //nolint:errcheck,revive os.Remove(controlSocket) wg.Done() }() @@ -126,13 +125,19 @@ func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) { buf := make([]byte, 1024) n, remote, err := socket.ReadFromUnix(buf) if err != nil { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive socket.Close() return } data := buf[:n] if string(data) == "get-all\n" { + // Ignore the returned error as we need to close the socket anyway + //nolint:errcheck,revive socket.WriteToUnix([]byte(metrics), remote) + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive socket.Close() } @@ -145,13 +150,11 @@ func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) { SocketDir: "/tmp", SocketMode: "0666", } - err = p.Init() - require.NoError(t, err) + require.NoError(t, p.Init()) var acc testutil.Accumulator - err = acc.GatherError(p.Gather) - require.NoError(t, err) + require.NoError(t, acc.GatherError(p.Gather)) wg.Wait() @@ -180,12 +183,16 @@ func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) { "x-ourtime2-4", "x-ourtime4-8", "x-ourtime8-16"} for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("powerdns_recursor", metric), metric) + require.True(t, acc.HasInt64Field("powerdns_recursor", metric), metric) } } func TestPowerdnsRecursorParseMetrics(t *testing.T) { - values := parseResponse(metrics) + p := &PowerdnsRecursor{ + Log: testutil.Logger{}, + } + + values := p.parseResponse(metrics) tests := []struct { key string @@ -299,19 +306,17 @@ func TestPowerdnsRecursorParseMetrics(t *testing.T) { for _, test := range tests { value, ok := values[test.key] - if !ok { - t.Errorf("Did not find key for metric %s in values", test.key) - continue - } - if value != test.value { - t.Errorf("Metric: %s, Expected: %d, actual: %d", - test.key, test.value, value) - } + require.Truef(t, ok, "Did not find key for metric %s in values", test.key) + require.EqualValuesf(t, value, test.value, "Metric: %s, Expected: %d, actual: %d", test.key, test.value, value) } } func TestPowerdnsRecursorParseCorruptMetrics(t *testing.T) { - values := parseResponse(corruptMetrics) + p := &PowerdnsRecursor{ + Log: testutil.Logger{}, + } + + values := p.parseResponse(corruptMetrics) tests := []struct { key string @@ -424,19 +429,17 @@ func TestPowerdnsRecursorParseCorruptMetrics(t *testing.T) { for _, test := range tests { value, ok := values[test.key] - if !ok { - t.Errorf("Did not find key for metric %s in values", test.key) - continue - } - if value != test.value { - t.Errorf("Metric: %s, Expected: %d, actual: %d", - test.key, test.value, value) - } + require.Truef(t, ok, "Did not find key for metric %s in values", test.key) + require.EqualValuesf(t, value, test.value, "Metric: %s, Expected: %d, actual: %d", test.key, test.value, value) } } func TestPowerdnsRecursorParseIntOverflowMetrics(t *testing.T) { - values := parseResponse(intOverflowMetrics) + p := &PowerdnsRecursor{ + Log: testutil.Logger{}, + } + + values := p.parseResponse(intOverflowMetrics) tests := []struct { key string @@ -549,13 +552,7 @@ func TestPowerdnsRecursorParseIntOverflowMetrics(t *testing.T) { for _, test := range tests { value, ok := values[test.key] - if !ok { - t.Errorf("Did not find key for metric %s in values", test.key) - continue - } - if value != test.value { - t.Errorf("Metric: %s, Expected: %d, actual: %d", - test.key, test.value, value) - } + require.Truef(t, ok, "Did not find key for metric %s in values", test.key) + require.EqualValuesf(t, value, test.value, "Metric: %s, Expected: %d, actual: %d", test.key, test.value, value) } } diff --git a/plugins/inputs/powerdns_recursor/sample.conf b/plugins/inputs/powerdns_recursor/sample.conf new file mode 100644 index 0000000000000..da42beb3ea3d7 --- /dev/null +++ b/plugins/inputs/powerdns_recursor/sample.conf @@ -0,0 +1,10 @@ +# Read metrics from one or many PowerDNS Recursor servers +[[inputs.powerdns_recursor]] + ## Path to the Recursor control socket. + unix_sockets = ["/var/run/pdns_recursor.controlsocket"] + + ## Directory to create receive socket. This default is likely not writable, + ## please reference the full plugin documentation for a recommended setup. + # socket_dir = "/var/run/" + ## Socket permissions for the receive socket. + # socket_mode = "0666" diff --git a/plugins/inputs/processes/README.md b/plugins/inputs/processes/README.md index 756326d75246d..12caba4eca684 100644 --- a/plugins/inputs/processes/README.md +++ b/plugins/inputs/processes/README.md @@ -8,20 +8,21 @@ it requires access to execute `ps`. **Supported Platforms**: Linux, FreeBSD, Darwin -### Configuration +## Configuration -```toml +```toml @sample.conf # Get the number of processes and group them by status [[inputs.processes]] # no configuration ``` -Another possible configuration is to define an alternative path for resolving the /proc location. -Using the environment variable `HOST_PROC` the plugin will retrieve process information from the specified location. +Another possible configuration is to define an alternative path for resolving +the /proc location. Using the environment variable `HOST_PROC` the plugin will +retrieve process information from the specified location. `docker run -v /proc:/rootfs/proc:ro -e HOST_PROC=/rootfs/proc` -### Metrics +## Metrics - processes - fields: @@ -38,13 +39,13 @@ Using the environment variable `HOST_PROC` the plugin will retrieve process info - parked (linux only) - total_threads (linux only) -### Process State Mappings +## Process State Mappings Different OSes use slightly different State codes for their processes, these state codes are documented in `man ps`, and I will give a mapping of what major OS state codes correspond to in telegraf metrics: -``` +```sh Linux FreeBSD Darwin meaning R R R running S S S sleeping @@ -56,8 +57,8 @@ Linux FreeBSD Darwin meaning W W none paging (linux kernel < 2.6 only), wait (freebsd) ``` -### Example Output +## Example Output -``` +```shell processes blocked=8i,running=1i,sleeping=265i,stopped=0i,total=274i,zombie=0i,dead=0i,paging=0i,total_threads=687i 1457478636980905042 ``` diff --git a/plugins/inputs/processes/processes.go b/plugins/inputs/processes/processes.go index 9ee583dbacecf..2f5551e11215a 100644 --- a/plugins/inputs/processes/processes.go +++ b/plugins/inputs/processes/processes.go @@ -1,7 +1,11 @@ package processes -func (p *Processes) Description() string { - return "Get the number of processes and group them by status" -} +import _ "embed" + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string -func (p *Processes) SampleConfig() string { return "" } +func (*Processes) SampleConfig() string { + return sampleConfig +} diff --git a/plugins/inputs/processes/processes_notwindows.go b/plugins/inputs/processes/processes_notwindows.go index 9faec83afa7d0..9395d34436238 100644 --- a/plugins/inputs/processes/processes_notwindows.go +++ b/plugins/inputs/processes/processes_notwindows.go @@ -1,3 +1,5 @@ +//go:generate ../../../tools/readme_config_includer/generator +//go:build !windows // +build !windows package processes @@ -5,7 +7,6 @@ package processes import ( "bytes" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -151,7 +152,7 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error { stats := bytes.Fields(data) if len(stats) < 3 { - return fmt.Errorf("Something is terribly wrong with %s", filename) + return fmt.Errorf("something is terribly wrong with %s", filename) } switch stats[0][0] { case 'R': @@ -191,7 +192,7 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error { } func readProcFile(filename string) ([]byte, error) { - data, err := ioutil.ReadFile(filename) + data, err := os.ReadFile(filename) if err != nil { if os.IsNotExist(err) { return nil, nil diff --git a/plugins/inputs/processes/processes_test.go b/plugins/inputs/processes/processes_test.go index ca74bd0f59442..7fc0a76dac036 100644 --- a/plugins/inputs/processes/processes_test.go +++ b/plugins/inputs/processes/processes_test.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package processes @@ -8,10 +9,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestProcesses(t *testing.T) { @@ -26,13 +27,13 @@ func TestProcesses(t *testing.T) { err := processes.Gather(&acc) require.NoError(t, err) - assert.True(t, acc.HasInt64Field("processes", "running")) - assert.True(t, acc.HasInt64Field("processes", "sleeping")) - assert.True(t, acc.HasInt64Field("processes", "stopped")) - assert.True(t, acc.HasInt64Field("processes", "total")) + require.True(t, acc.HasInt64Field("processes", "running")) + require.True(t, acc.HasInt64Field("processes", "sleeping")) + require.True(t, acc.HasInt64Field("processes", "stopped")) + require.True(t, acc.HasInt64Field("processes", "total")) total, ok := acc.Get("processes") require.True(t, ok) - assert.True(t, total.Fields["total"].(int64) > 0) + require.True(t, total.Fields["total"].(int64) > 0) } func TestFromPS(t *testing.T) { @@ -189,7 +190,7 @@ func (t *tester) testProcFile2(_ string) ([]byte, error) { } func testExecPSError() ([]byte, error) { - return []byte("\nSTAT\nD\nI\nL\nR\nR+\nS\nS+\nSNs\nSs\nU\nZ\n"), fmt.Errorf("ERROR!") + return []byte("\nSTAT\nD\nI\nL\nR\nR+\nS\nS+\nSNs\nSs\nU\nZ\n"), fmt.Errorf("error") } const testProcStat = `10 (rcuob/0) %s 2 0 0 0 -1 2129984 0 0 0 0 0 0 0 0 20 0 %s 0 11 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0 diff --git a/plugins/inputs/processes/processes_windows.go b/plugins/inputs/processes/processes_windows.go index 567373c7c7260..f798a1668c738 100644 --- a/plugins/inputs/processes/processes_windows.go +++ b/plugins/inputs/processes/processes_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package processes diff --git a/plugins/inputs/processes/sample.conf b/plugins/inputs/processes/sample.conf new file mode 100644 index 0000000000000..4c39c02eb433d --- /dev/null +++ b/plugins/inputs/processes/sample.conf @@ -0,0 +1,3 @@ +# Get the number of processes and group them by status +[[inputs.processes]] + # no configuration diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index 3803215697ec7..e59549267efa3 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -1,10 +1,11 @@ # Procstat Input Plugin -The procstat plugin can be used to monitor the system resource usage of one or more processes. -The procstat_lookup metric displays the query information, +The procstat plugin can be used to monitor the system resource usage of one or +more processes. The procstat_lookup metric displays the query information, specifically the number of PIDs returned on a search Processes can be selected for monitoring using one of several methods: + - pidfile - exe - pattern @@ -13,9 +14,9 @@ Processes can be selected for monitoring using one of several methods: - cgroup - win_service -### Configuration: +## Configuration -```toml +```toml @sample.conf # Monitor process cpu and memory usage [[inputs.procstat]] ## PID file to monitor process @@ -26,9 +27,10 @@ Processes can be selected for monitoring using one of several methods: # pattern = "nginx" ## user as argument for pgrep (ie, pgrep -u ) # user = "nginx" - ## Systemd unit name + ## Systemd unit name, supports globs when include_systemd_children is set to true # systemd_unit = "nginx.service" - ## CGroup name or path + # include_systemd_children = false + ## CGroup name or path, supports globs # cgroup = "systemd/system.slice/nginx.service" ## Windows service name @@ -44,6 +46,9 @@ Processes can be selected for monitoring using one of several methods: ## When true add the full cmdline as a tag. # cmdline_tag = false + ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. + # mode = "irix" + ## Add the PID as a tag instead of as a field. When collecting multiple ## processes with otherwise matching tags this setting should be enabled to ## ensure each process has a unique identity. @@ -59,21 +64,12 @@ Processes can be selected for monitoring using one of several methods: # pid_finder = "pgrep" ``` -#### Windows support +### Windows support Preliminary support for Windows has been added, however you may prefer using the `win_perf_counters` input plugin as a more mature alternative. -When using the `pid_finder = "native"` in Windows, the pattern lookup method is -implemented as a WMI query. The pattern allows fuzzy matching using only -[WMI query patterns](https://msdn.microsoft.com/en-us/library/aa392263(v=vs.85).aspx): -```toml -[[inputs.procstat]] - pattern = "%influx%" - pid_finder = "native" -``` - -### Metrics: +## Metrics - procstat - tags: @@ -86,6 +82,7 @@ implemented as a WMI query. The pattern allows fuzzy matching using only - user (when selected) - systemd_unit (when defined) - cgroup (when defined) + - cgroup_full (when cgroup or systemd_unit is used with glob) - win_service (when defined) - fields: - child_major_faults (int) @@ -165,9 +162,9 @@ implemented as a WMI query. The pattern allows fuzzy matching using only *NOTE: Resource limit > 2147483647 will be reported as 2147483647.* -### Example Output: +## Example Output -``` +```shell procstat_lookup,host=prash-laptop,pattern=influxd,pid_finder=pgrep,result=success pid_count=1i,running=1i,result_code=0i 1582089700000000000 procstat,host=prash-laptop,pattern=influxd,process_name=influxd,user=root involuntary_context_switches=151496i,child_minor_faults=1061i,child_major_faults=8i,cpu_time_user=2564.81,cpu_time_idle=0,cpu_time_irq=0,cpu_time_guest=0,pid=32025i,major_faults=8609i,created_at=1580107536000000000i,voluntary_context_switches=1058996i,cpu_time_system=616.98,cpu_time_steal=0,cpu_time_guest_nice=0,memory_swap=0i,memory_locked=0i,memory_usage=1.7797634601593018,num_threads=18i,cpu_time_nice=0,cpu_time_iowait=0,cpu_time_soft_irq=0,memory_rss=148643840i,memory_vms=1435688960i,memory_data=0i,memory_stack=0i,minor_faults=1856550i 1582089700000000000 ``` diff --git a/plugins/inputs/procstat/native_finder.go b/plugins/inputs/procstat/native_finder.go index 5f286dd64a63e..041e2cae91888 100644 --- a/plugins/inputs/procstat/native_finder.go +++ b/plugins/inputs/procstat/native_finder.go @@ -2,12 +2,12 @@ package procstat import ( "fmt" - "io/ioutil" + "os" "regexp" "strconv" "strings" - "github.com/shirou/gopsutil/process" + "github.com/shirou/gopsutil/v3/process" ) //NativeFinder uses gopsutil to find processes @@ -20,7 +20,7 @@ func NewNativeFinder() (PIDFinder, error) { } //Uid will return all pids for the given user -func (pg *NativeFinder) Uid(user string) ([]PID, error) { +func (pg *NativeFinder) UID(user string) ([]PID, error) { var dst []PID procs, err := process.Processes() if err != nil { @@ -43,7 +43,7 @@ func (pg *NativeFinder) Uid(user string) ([]PID, error) { //PidFile returns the pid from the pid file given. func (pg *NativeFinder) PidFile(path string) ([]PID, error) { var pids []PID - pidString, err := ioutil.ReadFile(path) + pidString, err := os.ReadFile(path) if err != nil { return pids, fmt.Errorf("Failed to read pidfile '%s'. Error: '%s'", path, err) @@ -54,7 +54,6 @@ func (pg *NativeFinder) PidFile(path string) ([]PID, error) { } pids = append(pids, PID(pid)) return pids, nil - } //FullPattern matches on the command line when the process was executed diff --git a/plugins/inputs/procstat/native_finder_notwindows.go b/plugins/inputs/procstat/native_finder_notwindows.go index 9d7409ba1df8e..528b083ae628b 100644 --- a/plugins/inputs/procstat/native_finder_notwindows.go +++ b/plugins/inputs/procstat/native_finder_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package procstat diff --git a/plugins/inputs/procstat/native_finder_windows_test.go b/plugins/inputs/procstat/native_finder_windows_test.go index ef9c5ffb11523..f6068ac268e0e 100644 --- a/plugins/inputs/procstat/native_finder_windows_test.go +++ b/plugins/inputs/procstat/native_finder_windows_test.go @@ -2,15 +2,13 @@ package procstat import ( "fmt" - "testing" - "os/user" + "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestGather_RealPattern(t *testing.T) { +func TestGather_RealPatternIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -19,10 +17,10 @@ func TestGather_RealPattern(t *testing.T) { pids, err := pg.Pattern(`procstat`) require.NoError(t, err) fmt.Println(pids) - assert.Equal(t, len(pids) > 0, true) + require.Equal(t, len(pids) > 0, true) } -func TestGather_RealFullPattern(t *testing.T) { +func TestGather_RealFullPatternIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -31,10 +29,10 @@ func TestGather_RealFullPattern(t *testing.T) { pids, err := pg.FullPattern(`%procstat%`) require.NoError(t, err) fmt.Println(pids) - assert.Equal(t, len(pids) > 0, true) + require.Equal(t, len(pids) > 0, true) } -func TestGather_RealUser(t *testing.T) { +func TestGather_RealUserIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } @@ -42,8 +40,8 @@ func TestGather_RealUser(t *testing.T) { require.NoError(t, err) pg, err := NewNativeFinder() require.NoError(t, err) - pids, err := pg.Uid(user.Username) + pids, err := pg.UID(user.Username) require.NoError(t, err) fmt.Println(pids) - assert.Equal(t, len(pids) > 0, true) + require.Equal(t, len(pids) > 0, true) } diff --git a/plugins/inputs/procstat/pgrep.go b/plugins/inputs/procstat/pgrep.go index 37f9dfc3f67a9..34c44e0b2fefb 100644 --- a/plugins/inputs/procstat/pgrep.go +++ b/plugins/inputs/procstat/pgrep.go @@ -2,7 +2,7 @@ package procstat import ( "fmt" - "io/ioutil" + "os" "os/exec" "strconv" "strings" @@ -25,7 +25,7 @@ func NewPgrep() (PIDFinder, error) { func (pg *Pgrep) PidFile(path string) ([]PID, error) { var pids []PID - pidString, err := ioutil.ReadFile(path) + pidString, err := os.ReadFile(path) if err != nil { return pids, fmt.Errorf("Failed to read pidfile '%s'. Error: '%s'", path, err) @@ -43,7 +43,7 @@ func (pg *Pgrep) Pattern(pattern string) ([]PID, error) { return find(pg.path, args) } -func (pg *Pgrep) Uid(user string) ([]PID, error) { +func (pg *Pgrep) UID(user string) ([]PID, error) { args := []string{"-u", user} return find(pg.path, args) } diff --git a/plugins/inputs/procstat/process.go b/plugins/inputs/procstat/process.go index 042929f0864cf..f31cef4abe1c6 100644 --- a/plugins/inputs/procstat/process.go +++ b/plugins/inputs/procstat/process.go @@ -4,8 +4,8 @@ import ( "fmt" "time" - "github.com/shirou/gopsutil/cpu" - "github.com/shirou/gopsutil/process" + "github.com/shirou/gopsutil/v3/cpu" + "github.com/shirou/gopsutil/v3/process" ) type Process interface { @@ -26,12 +26,13 @@ type Process interface { RlimitUsage(bool) ([]process.RlimitStat, error) Username() (string, error) CreateTime() (int64, error) + Ppid() (int32, error) } type PIDFinder interface { PidFile(path string) ([]PID, error) Pattern(pattern string) ([]PID, error) - Uid(user string) ([]PID, error) + UID(user string) ([]PID, error) FullPattern(path string) ([]PID, error) } @@ -42,13 +43,13 @@ type Proc struct { } func NewProc(pid PID) (Process, error) { - process, err := process.NewProcess(int32(pid)) + p, err := process.NewProcess(int32(pid)) if err != nil { return nil, err } proc := &Proc{ - Process: process, + Process: p, hasCPUTimes: false, tags: make(map[string]string), } @@ -67,11 +68,11 @@ func (p *Proc) Username() (string, error) { return p.Process.Username() } -func (p *Proc) Percent(interval time.Duration) (float64, error) { - cpu_perc, err := p.Process.Percent(time.Duration(0)) +func (p *Proc) Percent(_ time.Duration) (float64, error) { + cpuPerc, err := p.Process.Percent(time.Duration(0)) if !p.hasCPUTimes && err == nil { p.hasCPUTimes = true - return 0, fmt.Errorf("Must call Percent twice to compute percent cpu.") + return 0, fmt.Errorf("must call Percent twice to compute percent cpu") } - return cpu_perc, err + return cpuPerc, err } diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 1d6af5df42246..79047cfa80aab 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -1,19 +1,28 @@ +//go:generate ../../../tools/readme_config_includer/generator package procstat import ( "bytes" + _ "embed" "fmt" - "io/ioutil" + "os" "os/exec" "path/filepath" + "runtime" "strconv" + "strings" "time" + "github.com/shirou/gopsutil/v3/process" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/shirou/gopsutil/process" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + var ( defaultPIDFinder = NewPgrep defaultProcess = NewProc @@ -22,18 +31,22 @@ var ( type PID int32 type Procstat struct { - PidFinder string `toml:"pid_finder"` - PidFile string `toml:"pid_file"` - Exe string - Pattern string - Prefix string - CmdLineTag bool `toml:"cmdline_tag"` - ProcessName string - User string - SystemdUnit string - CGroup string `toml:"cgroup"` - PidTag bool - WinService string `toml:"win_service"` + PidFinder string `toml:"pid_finder"` + PidFile string `toml:"pid_file"` + Exe string + Pattern string + Prefix string + CmdLineTag bool `toml:"cmdline_tag"` + ProcessName string + User string + SystemdUnit string `toml:"systemd_unit"` + IncludeSystemdChildren bool `toml:"include_systemd_children"` + CGroup string `toml:"cgroup"` + PidTag bool + WinService string `toml:"win_service"` + Mode string + + solarisMode bool finder PIDFinder @@ -42,54 +55,14 @@ type Procstat struct { createProcess func(PID) (Process, error) } -var sampleConfig = ` - ## PID file to monitor process - pid_file = "/var/run/nginx.pid" - ## executable name (ie, pgrep ) - # exe = "nginx" - ## pattern as argument for pgrep (ie, pgrep -f ) - # pattern = "nginx" - ## user as argument for pgrep (ie, pgrep -u ) - # user = "nginx" - ## Systemd unit name - # systemd_unit = "nginx.service" - ## CGroup name or path - # cgroup = "systemd/system.slice/nginx.service" - - ## Windows service name - # win_service = "" - - ## override for process_name - ## This is optional; default is sourced from /proc//status - # process_name = "bar" - - ## Field name prefix - # prefix = "" - - ## When true add the full cmdline as a tag. - # cmdline_tag = false - - ## Add the PID as a tag instead of as a field. When collecting multiple - ## processes with otherwise matching tags this setting should be enabled to - ## ensure each process has a unique identity. - ## - ## Enabling this option may result in a large number of series, especially - ## when processes have a short lifetime. - # pid_tag = false - - ## Method to use when finding process IDs. Can be one of 'pgrep', or - ## 'native'. The pgrep finder calls the pgrep executable in the PATH while - ## the native finder performs the search directly in a manor dependent on the - ## platform. Default is 'pgrep' - # pid_finder = "pgrep" -` - -func (_ *Procstat) SampleConfig() string { - return sampleConfig +type PidsTags struct { + PIDS []PID + Tags map[string]string + Err error } -func (_ *Procstat) Description() string { - return "Monitor process cpu and memory usage" +func (*Procstat) SampleConfig() string { + return sampleConfig } func (p *Procstat) Gather(acc telegraf.Accumulator) error { @@ -103,52 +76,68 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { p.PidFinder = "pgrep" p.createPIDFinder = defaultPIDFinder } - } if p.createProcess == nil { p.createProcess = defaultProcess } - pids, tags, err := p.findPids(acc) - if err != nil { - fields := map[string]interface{}{ - "pid_count": 0, - "running": 0, - "result_code": 1, + pidCount := 0 + now := time.Now() + newProcs := make(map[PID]Process, len(p.procs)) + pidTags := p.findPids() + for _, pidTag := range pidTags { + pids := pidTag.PIDS + tags := pidTag.Tags + err := pidTag.Err + pidCount += len(pids) + if err != nil { + fields := map[string]interface{}{ + "pid_count": 0, + "running": 0, + "result_code": 1, + } + tags := map[string]string{ + "pid_finder": p.PidFinder, + "result": "lookup_error", + } + acc.AddFields("procstat_lookup", fields, tags, now) + return err } - tags := map[string]string{ - "pid_finder": p.PidFinder, - "result": "lookup_error", + + err = p.updateProcesses(pids, tags, p.procs, newProcs) + if err != nil { + acc.AddError(fmt.Errorf("procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", + p.Exe, p.PidFile, p.Pattern, p.User, err.Error())) } - acc.AddFields("procstat_lookup", fields, tags) - return err } - procs, err := p.updateProcesses(pids, tags, p.procs) - if err != nil { - acc.AddError(fmt.Errorf("E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", - p.Exe, p.PidFile, p.Pattern, p.User, err.Error())) + p.procs = newProcs + for _, proc := range p.procs { + p.addMetric(proc, acc, now) } - p.procs = procs - for _, proc := range p.procs { - p.addMetric(proc, acc) + tags := make(map[string]string) + for _, pidTag := range pidTags { + for key, value := range pidTag.Tags { + tags[key] = value + } } fields := map[string]interface{}{ - "pid_count": len(pids), - "running": len(procs), + "pid_count": pidCount, + "running": len(p.procs), "result_code": 0, } + tags["pid_finder"] = p.PidFinder tags["result"] = "success" - acc.AddFields("procstat_lookup", fields, tags) + acc.AddFields("procstat_lookup", fields, tags, now) return nil } // Add metrics a single Process -func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) { +func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator, t time.Time) { var prefix string if p.Prefix != "" { prefix = p.Prefix + "_" @@ -180,9 +169,9 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) { //If cmd_line tag is true and it is not already set add cmdline as a tag if p.CmdLineTag { if _, ok := proc.Tags()["cmdline"]; !ok { - Cmdline, err := proc.Cmdline() + cmdline, err := proc.Cmdline() if err == nil { - proc.Tags()["cmdline"] = Cmdline + proc.Tags()["cmdline"] = cmdline } } } @@ -224,23 +213,27 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) { fields[prefix+"created_at"] = createdAt * 1000000 //Convert ms to ns } - cpu_time, err := proc.Times() + cpuTime, err := proc.Times() if err == nil { - fields[prefix+"cpu_time_user"] = cpu_time.User - fields[prefix+"cpu_time_system"] = cpu_time.System - fields[prefix+"cpu_time_idle"] = cpu_time.Idle - fields[prefix+"cpu_time_nice"] = cpu_time.Nice - fields[prefix+"cpu_time_iowait"] = cpu_time.Iowait - fields[prefix+"cpu_time_irq"] = cpu_time.Irq - fields[prefix+"cpu_time_soft_irq"] = cpu_time.Softirq - fields[prefix+"cpu_time_steal"] = cpu_time.Steal - fields[prefix+"cpu_time_guest"] = cpu_time.Guest - fields[prefix+"cpu_time_guest_nice"] = cpu_time.GuestNice - } - - cpu_perc, err := proc.Percent(time.Duration(0)) + fields[prefix+"cpu_time_user"] = cpuTime.User + fields[prefix+"cpu_time_system"] = cpuTime.System + fields[prefix+"cpu_time_idle"] = cpuTime.Idle + fields[prefix+"cpu_time_nice"] = cpuTime.Nice + fields[prefix+"cpu_time_iowait"] = cpuTime.Iowait + fields[prefix+"cpu_time_irq"] = cpuTime.Irq + fields[prefix+"cpu_time_soft_irq"] = cpuTime.Softirq + fields[prefix+"cpu_time_steal"] = cpuTime.Steal + fields[prefix+"cpu_time_guest"] = cpuTime.Guest + fields[prefix+"cpu_time_guest_nice"] = cpuTime.GuestNice + } + + cpuPerc, err := proc.Percent(time.Duration(0)) if err == nil { - fields[prefix+"cpu_usage"] = cpu_perc + if p.solarisMode { + fields[prefix+"cpu_usage"] = cpuPerc / float64(runtime.NumCPU()) + } else { + fields[prefix+"cpu_usage"] = cpuPerc + } } mem, err := proc.MemoryInfo() @@ -253,9 +246,9 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) { fields[prefix+"memory_locked"] = mem.Locked } - mem_perc, err := proc.MemoryPercent() + memPerc, err := proc.MemoryPercent() if err == nil { - fields[prefix+"memory_usage"] = mem_perc + fields[prefix+"memory_usage"] = memPerc } rlims, err := proc.RlimitUsage(true) @@ -297,13 +290,16 @@ func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) { } } - acc.AddFields("procstat", fields, proc.Tags()) + ppid, err := proc.Ppid() + if err == nil { + fields[prefix+"ppid"] = ppid + } + + acc.AddFields("procstat", fields, proc.Tags(), t) } // Update monitored Processes -func (p *Procstat) updateProcesses(pids []PID, tags map[string]string, prevInfo map[PID]Process) (map[PID]Process, error) { - procs := make(map[PID]Process, len(prevInfo)) - +func (p *Procstat) updateProcesses(pids []PID, tags map[string]string, prevInfo map[PID]Process, procs map[PID]Process) error { for _, pid := range pids { info, ok := prevInfo[pid] if ok { @@ -338,7 +334,7 @@ func (p *Procstat) updateProcesses(pids []PID, tags map[string]string, prevInfo } } } - return procs, nil + return nil } // Create and return PIDGatherer lazily @@ -354,16 +350,34 @@ func (p *Procstat) getPIDFinder() (PIDFinder, error) { } // Get matching PIDs and their initial tags -func (p *Procstat) findPids(acc telegraf.Accumulator) ([]PID, map[string]string, error) { +func (p *Procstat) findPids() []PidsTags { + var pidTags []PidsTags + + if p.SystemdUnit != "" { + groups := p.systemdUnitPIDs() + return groups + } else if p.CGroup != "" { + groups := p.cgroupPIDs() + return groups + } else { + f, err := p.getPIDFinder() + if err != nil { + pidTags = append(pidTags, PidsTags{nil, nil, err}) + return pidTags + } + pids, tags, err := p.SimpleFindPids(f) + pidTags = append(pidTags, PidsTags{pids, tags, err}) + } + + return pidTags +} + +// Get matching PIDs and their initial tags +func (p *Procstat) SimpleFindPids(f PIDFinder) ([]PID, map[string]string, error) { var pids []PID tags := make(map[string]string) var err error - f, err := p.getPIDFinder() - if err != nil { - return nil, nil, err - } - if p.PidFile != "" { pids, err = f.PidFile(p.PidFile) tags = map[string]string{"pidfile": p.PidFile} @@ -374,19 +388,13 @@ func (p *Procstat) findPids(acc telegraf.Accumulator) ([]PID, map[string]string, pids, err = f.FullPattern(p.Pattern) tags = map[string]string{"pattern": p.Pattern} } else if p.User != "" { - pids, err = f.Uid(p.User) + pids, err = f.UID(p.User) tags = map[string]string{"user": p.User} - } else if p.SystemdUnit != "" { - pids, err = p.systemdUnitPIDs() - tags = map[string]string{"systemd_unit": p.SystemdUnit} - } else if p.CGroup != "" { - pids, err = p.cgroupPIDs() - tags = map[string]string{"cgroup": p.CGroup} } else if p.WinService != "" { pids, err = p.winServicePIDs() tags = map[string]string{"win_service": p.WinService} } else { - err = fmt.Errorf("Either exe, pid_file, user, pattern, systemd_unit, cgroup, or win_service must be specified") + err = fmt.Errorf("either exe, pid_file, user, pattern, systemd_unit, cgroup, or win_service must be specified") } return pids, tags, err @@ -395,8 +403,23 @@ func (p *Procstat) findPids(acc telegraf.Accumulator) ([]PID, map[string]string, // execCommand is so tests can mock out exec.Command usage. var execCommand = exec.Command -func (p *Procstat) systemdUnitPIDs() ([]PID, error) { +func (p *Procstat) systemdUnitPIDs() []PidsTags { + if p.IncludeSystemdChildren { + p.CGroup = fmt.Sprintf("systemd/system.slice/%s", p.SystemdUnit) + return p.cgroupPIDs() + } + + var pidTags []PidsTags + + pids, err := p.simpleSystemdUnitPIDs() + tags := map[string]string{"systemd_unit": p.SystemdUnit} + pidTags = append(pidTags, PidsTags{pids, tags, err}) + return pidTags +} + +func (p *Procstat) simpleSystemdUnitPIDs() ([]PID, error) { var pids []PID + cmd := execCommand("systemctl", "show", p.SystemdUnit) out, err := cmd.Output() if err != nil { @@ -419,18 +442,43 @@ func (p *Procstat) systemdUnitPIDs() ([]PID, error) { } pids = append(pids, PID(pid)) } + return pids, nil } -func (p *Procstat) cgroupPIDs() ([]PID, error) { - var pids []PID +func (p *Procstat) cgroupPIDs() []PidsTags { + var pidTags []PidsTags procsPath := p.CGroup if procsPath[0] != '/' { procsPath = "/sys/fs/cgroup/" + procsPath } - procsPath = filepath.Join(procsPath, "cgroup.procs") - out, err := ioutil.ReadFile(procsPath) + items, err := filepath.Glob(procsPath) + if err != nil { + pidTags = append(pidTags, PidsTags{nil, nil, fmt.Errorf("glob failed '%s'", err)}) + return pidTags + } + for _, item := range items { + pids, err := p.singleCgroupPIDs(item) + tags := map[string]string{"cgroup": p.CGroup, "cgroup_full": item} + pidTags = append(pidTags, PidsTags{pids, tags, err}) + } + + return pidTags +} + +func (p *Procstat) singleCgroupPIDs(path string) ([]PID, error) { + var pids []PID + + ok, err := isDir(path) + if err != nil { + return nil, err + } + if !ok { + return nil, fmt.Errorf("not a directory %s", path) + } + procsPath := filepath.Join(path, "cgroup.procs") + out, err := os.ReadFile(procsPath) if err != nil { return nil, err } @@ -448,6 +496,14 @@ func (p *Procstat) cgroupPIDs() ([]PID, error) { return pids, nil } +func isDir(path string) (bool, error) { + result, err := os.Stat(path) + if err != nil { + return false, err + } + return result.IsDir(), nil +} + func (p *Procstat) winServicePIDs() ([]PID, error) { var pids []PID @@ -461,6 +517,14 @@ func (p *Procstat) winServicePIDs() ([]PID, error) { return pids, nil } +func (p *Procstat) Init() error { + if strings.ToLower(p.Mode) == "solaris" { + p.solarisMode = true + } + + return nil +} + func init() { inputs.Add("procstat", func() telegraf.Input { return &Procstat{} diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index e1ee8ab921841..b42356eeba0d9 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -2,7 +2,6 @@ package procstat import ( "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -11,11 +10,11 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/cpu" - "github.com/shirou/gopsutil/process" - "github.com/stretchr/testify/assert" + "github.com/shirou/gopsutil/v3/cpu" + "github.com/shirou/gopsutil/v3/process" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func init() { @@ -27,17 +26,17 @@ func mockExecCommand(arg0 string, args ...string) *exec.Cmd { cmd.Stderr = os.Stderr return cmd } -func TestMockExecCommand(t *testing.T) { +func TestMockExecCommand(_ *testing.T) { var cmd []string for _, arg := range os.Args { - if string(arg) == "--" { + if arg == "--" { cmd = []string{} continue } if cmd == nil { continue } - cmd = append(cmd, string(arg)) + cmd = append(cmd, arg) } if cmd == nil { return @@ -45,16 +44,20 @@ func TestMockExecCommand(t *testing.T) { cmdline := strings.Join(cmd, " ") if cmdline == "systemctl show TestGather_systemdUnitPIDs" { + //nolint:errcheck,revive fmt.Printf(`PIDFile= GuessMainPID=yes MainPID=11408 ControlPID=0 ExecMainPID=11408 `) + //nolint:revive // error code is important for this "test" os.Exit(0) } + //nolint:errcheck,revive fmt.Printf("command not found\n") + //nolint:revive // error code is important for this "test" os.Exit(1) } @@ -63,16 +66,16 @@ type testPgrep struct { err error } -func pidFinder(pids []PID, err error) func() (PIDFinder, error) { +func pidFinder(pids []PID) func() (PIDFinder, error) { return func() (PIDFinder, error) { return &testPgrep{ pids: pids, - err: err, + err: nil, }, nil } } -func (pg *testPgrep) PidFile(path string) ([]PID, error) { +func (pg *testPgrep) PidFile(_ string) ([]PID, error) { return pg.pids, pg.err } @@ -80,15 +83,15 @@ func (p *testProc) Cmdline() (string, error) { return "test_proc", nil } -func (pg *testPgrep) Pattern(pattern string) ([]PID, error) { +func (pg *testPgrep) Pattern(_ string) ([]PID, error) { return pg.pids, pg.err } -func (pg *testPgrep) Uid(user string) ([]PID, error) { +func (pg *testPgrep) UID(_ string) ([]PID, error) { return pg.pids, pg.err } -func (pg *testPgrep) FullPattern(pattern string) ([]PID, error) { +func (pg *testPgrep) FullPattern(_ string) ([]PID, error) { return pg.pids, pg.err } @@ -97,7 +100,7 @@ type testProc struct { tags map[string]string } -func newTestProc(pid PID) (Process, error) { +func newTestProc(_ PID) (Process, error) { proc := &testProc{ tags: make(map[string]string), } @@ -144,7 +147,7 @@ func (p *testProc) NumThreads() (int32, error) { return 0, nil } -func (p *testProc) Percent(interval time.Duration) (float64, error) { +func (p *testProc) Percent(_ time.Duration) (float64, error) { return 0, nil } @@ -160,19 +163,23 @@ func (p *testProc) Times() (*cpu.TimesStat, error) { return &cpu.TimesStat{}, nil } -func (p *testProc) RlimitUsage(gatherUsage bool) ([]process.RlimitStat, error) { +func (p *testProc) RlimitUsage(_ bool) ([]process.RlimitStat, error) { return []process.RlimitStat{}, nil } -var pid PID = PID(42) -var exe string = "foo" +func (p *testProc) Ppid() (int32, error) { + return 0, nil +} + +var pid = PID(42) +var exe = "foo" func TestGather_CreateProcessErrorOk(t *testing.T) { var acc testutil.Accumulator p := Procstat{ Exe: exe, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: func(PID) (Process, error) { return nil, fmt.Errorf("createProcess error") }, @@ -198,12 +205,12 @@ func TestGather_ProcessName(t *testing.T) { p := Procstat{ Exe: exe, ProcessName: "custom_name", - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) - assert.Equal(t, "custom_name", acc.TagValue("procstat", "process_name")) + require.Equal(t, "custom_name", acc.TagValue("procstat", "process_name")) } func TestGather_NoProcessNameUsesReal(t *testing.T) { @@ -212,12 +219,12 @@ func TestGather_NoProcessNameUsesReal(t *testing.T) { p := Procstat{ Exe: exe, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) - assert.True(t, acc.HasTag("procstat", "process_name")) + require.True(t, acc.HasTag("procstat", "process_name")) } func TestGather_NoPidTag(t *testing.T) { @@ -225,12 +232,12 @@ func TestGather_NoPidTag(t *testing.T) { p := Procstat{ Exe: exe, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) - assert.True(t, acc.HasInt32Field("procstat", "pid")) - assert.False(t, acc.HasTag("procstat", "pid")) + require.True(t, acc.HasInt32Field("procstat", "pid")) + require.False(t, acc.HasTag("procstat", "pid")) } func TestGather_PidTag(t *testing.T) { @@ -239,12 +246,12 @@ func TestGather_PidTag(t *testing.T) { p := Procstat{ Exe: exe, PidTag: true, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) - assert.Equal(t, "42", acc.TagValue("procstat", "pid")) - assert.False(t, acc.HasInt32Field("procstat", "pid")) + require.Equal(t, "42", acc.TagValue("procstat", "pid")) + require.False(t, acc.HasInt32Field("procstat", "pid")) } func TestGather_Prefix(t *testing.T) { @@ -253,11 +260,11 @@ func TestGather_Prefix(t *testing.T) { p := Procstat{ Exe: exe, Prefix: "custom_prefix", - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) - assert.True(t, acc.HasInt32Field("procstat", "custom_prefix_num_fds")) + require.True(t, acc.HasInt32Field("procstat", "custom_prefix_num_fds")) } func TestGather_Exe(t *testing.T) { @@ -265,12 +272,12 @@ func TestGather_Exe(t *testing.T) { p := Procstat{ Exe: exe, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) - assert.Equal(t, exe, acc.TagValue("procstat", "exe")) + require.Equal(t, exe, acc.TagValue("procstat", "exe")) } func TestGather_User(t *testing.T) { @@ -279,12 +286,12 @@ func TestGather_User(t *testing.T) { p := Procstat{ User: user, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) - assert.Equal(t, user, acc.TagValue("procstat", "user")) + require.Equal(t, user, acc.TagValue("procstat", "user")) } func TestGather_Pattern(t *testing.T) { @@ -293,19 +300,19 @@ func TestGather_Pattern(t *testing.T) { p := Procstat{ Pattern: pattern, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) - assert.Equal(t, pattern, acc.TagValue("procstat", "pattern")) + require.Equal(t, pattern, acc.TagValue("procstat", "pattern")) } func TestGather_MissingPidMethod(t *testing.T) { var acc testutil.Accumulator p := Procstat{ - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.Error(t, acc.GatherError(p.Gather)) @@ -317,12 +324,12 @@ func TestGather_PidFile(t *testing.T) { p := Procstat{ PidFile: pidfile, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: newTestProc, } require.NoError(t, acc.GatherError(p.Gather)) - assert.Equal(t, pidfile, acc.TagValue("procstat", "pidfile")) + require.Equal(t, pidfile, acc.TagValue("procstat", "pidfile")) } func TestGather_PercentFirstPass(t *testing.T) { @@ -332,13 +339,13 @@ func TestGather_PercentFirstPass(t *testing.T) { p := Procstat{ Pattern: "foo", PidTag: true, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: NewProc, } require.NoError(t, acc.GatherError(p.Gather)) - assert.True(t, acc.HasFloatField("procstat", "cpu_time_user")) - assert.False(t, acc.HasFloatField("procstat", "cpu_usage")) + require.True(t, acc.HasFloatField("procstat", "cpu_time_user")) + require.False(t, acc.HasFloatField("procstat", "cpu_usage")) } func TestGather_PercentSecondPass(t *testing.T) { @@ -348,26 +355,30 @@ func TestGather_PercentSecondPass(t *testing.T) { p := Procstat{ Pattern: "foo", PidTag: true, - createPIDFinder: pidFinder([]PID{pid}, nil), + createPIDFinder: pidFinder([]PID{pid}), createProcess: NewProc, } require.NoError(t, acc.GatherError(p.Gather)) require.NoError(t, acc.GatherError(p.Gather)) - assert.True(t, acc.HasFloatField("procstat", "cpu_time_user")) - assert.True(t, acc.HasFloatField("procstat", "cpu_usage")) + require.True(t, acc.HasFloatField("procstat", "cpu_time_user")) + require.True(t, acc.HasFloatField("procstat", "cpu_usage")) } func TestGather_systemdUnitPIDs(t *testing.T) { p := Procstat{ - createPIDFinder: pidFinder([]PID{}, nil), + createPIDFinder: pidFinder([]PID{}), SystemdUnit: "TestGather_systemdUnitPIDs", } - var acc testutil.Accumulator - pids, tags, err := p.findPids(&acc) - require.NoError(t, err) - assert.Equal(t, []PID{11408}, pids) - assert.Equal(t, "TestGather_systemdUnitPIDs", tags["systemd_unit"]) + pidsTags := p.findPids() + for _, pidsTag := range pidsTags { + pids := pidsTag.PIDS + tags := pidsTag.Tags + err := pidsTag.Err + require.NoError(t, err) + require.Equal(t, []PID{11408}, pids) + require.Equal(t, "TestGather_systemdUnitPIDs", tags["systemd_unit"]) + } } func TestGather_cgroupPIDs(t *testing.T) { @@ -375,26 +386,28 @@ func TestGather_cgroupPIDs(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("no cgroups in windows") } - td, err := ioutil.TempDir("", "") - require.NoError(t, err) - defer os.RemoveAll(td) - err = ioutil.WriteFile(filepath.Join(td, "cgroup.procs"), []byte("1234\n5678\n"), 0644) + td := t.TempDir() + err := os.WriteFile(filepath.Join(td, "cgroup.procs"), []byte("1234\n5678\n"), 0644) require.NoError(t, err) p := Procstat{ - createPIDFinder: pidFinder([]PID{}, nil), + createPIDFinder: pidFinder([]PID{}), CGroup: td, } - var acc testutil.Accumulator - pids, tags, err := p.findPids(&acc) - require.NoError(t, err) - assert.Equal(t, []PID{1234, 5678}, pids) - assert.Equal(t, td, tags["cgroup"]) + pidsTags := p.findPids() + for _, pidsTag := range pidsTags { + pids := pidsTag.PIDS + tags := pidsTag.Tags + err := pidsTag.Err + require.NoError(t, err) + require.Equal(t, []PID{1234, 5678}, pids) + require.Equal(t, td, tags["cgroup"]) + } } func TestProcstatLookupMetric(t *testing.T) { p := Procstat{ - createPIDFinder: pidFinder([]PID{543}, nil), + createPIDFinder: pidFinder([]PID{543}), Exe: "-Gsys", } var acc testutil.Accumulator @@ -402,3 +415,20 @@ func TestProcstatLookupMetric(t *testing.T) { require.NoError(t, err) require.Equal(t, len(p.procs)+1, len(acc.Metrics)) } + +func TestGather_SameTimestamps(t *testing.T) { + var acc testutil.Accumulator + pidfile := "/path/to/pidfile" + + p := Procstat{ + PidFile: pidfile, + createPIDFinder: pidFinder([]PID{pid}), + createProcess: newTestProc, + } + require.NoError(t, acc.GatherError(p.Gather)) + + procstat, _ := acc.Get("procstat") + procstatLookup, _ := acc.Get("procstat_lookup") + + require.Equal(t, procstat.Time, procstatLookup.Time) +} diff --git a/plugins/inputs/procstat/sample.conf b/plugins/inputs/procstat/sample.conf new file mode 100644 index 0000000000000..f2d45545c36dc --- /dev/null +++ b/plugins/inputs/procstat/sample.conf @@ -0,0 +1,45 @@ +# Monitor process cpu and memory usage +[[inputs.procstat]] + ## PID file to monitor process + pid_file = "/var/run/nginx.pid" + ## executable name (ie, pgrep ) + # exe = "nginx" + ## pattern as argument for pgrep (ie, pgrep -f ) + # pattern = "nginx" + ## user as argument for pgrep (ie, pgrep -u ) + # user = "nginx" + ## Systemd unit name, supports globs when include_systemd_children is set to true + # systemd_unit = "nginx.service" + # include_systemd_children = false + ## CGroup name or path, supports globs + # cgroup = "systemd/system.slice/nginx.service" + + ## Windows service name + # win_service = "" + + ## override for process_name + ## This is optional; default is sourced from /proc//status + # process_name = "bar" + + ## Field name prefix + # prefix = "" + + ## When true add the full cmdline as a tag. + # cmdline_tag = false + + ## Mode to use when calculating CPU usage. Can be one of 'solaris' or 'irix'. + # mode = "irix" + + ## Add the PID as a tag instead of as a field. When collecting multiple + ## processes with otherwise matching tags this setting should be enabled to + ## ensure each process has a unique identity. + ## + ## Enabling this option may result in a large number of series, especially + ## when processes have a short lifetime. + # pid_tag = false + + ## Method to use when finding process IDs. Can be one of 'pgrep', or + ## 'native'. The pgrep finder calls the pgrep executable in the PATH while + ## the native finder performs the search directly in a manor dependent on the + ## platform. Default is 'pgrep' + # pid_finder = "pgrep" diff --git a/plugins/inputs/procstat/win_service_notwindows.go b/plugins/inputs/procstat/win_service_notwindows.go index 3d539d9f9918c..b7efcee17cdc1 100644 --- a/plugins/inputs/procstat/win_service_notwindows.go +++ b/plugins/inputs/procstat/win_service_notwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package procstat @@ -6,6 +7,6 @@ import ( "fmt" ) -func queryPidWithWinServiceName(winServiceName string) (uint32, error) { +func queryPidWithWinServiceName(_ string) (uint32, error) { return 0, fmt.Errorf("os not support win_service option") } diff --git a/plugins/inputs/procstat/win_service_windows.go b/plugins/inputs/procstat/win_service_windows.go index 06dffc8472089..5d9c196e388c0 100644 --- a/plugins/inputs/procstat/win_service_windows.go +++ b/plugins/inputs/procstat/win_service_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package procstat diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index e9dd119cc12d4..3393b79c7b62b 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -3,36 +3,53 @@ The prometheus input plugin gathers metrics from HTTP servers exposing metrics in Prometheus format. -### Configuration: +## Configuration -```toml +```toml @sample.conf # Read metrics from one or many prometheus clients [[inputs.prometheus]] ## An array of urls to scrape metrics from. urls = ["http://localhost:9100/metrics"] - - ## Metric version controls the mapping from Prometheus metrics into - ## Telegraf metrics. When using the prometheus_client output, use the same - ## value in both plugins to ensure metrics are round-tripped without - ## modification. - ## - ## example: metric_version = 1; deprecated in 1.13 - ## metric_version = 2; recommended version + + ## Metric version controls the mapping from Prometheus metrics into Telegraf metrics. + ## See "Metric Format Configuration" in plugins/inputs/prometheus/README.md for details. + ## Valid options: 1, 2 # metric_version = 1 - + + ## Url tag name (tag containing scrapped url. optional, default is "url") + # url_tag = "url" + + ## Whether the timestamp of the scraped metrics will be ignored. + ## If set to true, the gather time will be used. + # ignore_timestamp = false + ## An array of Kubernetes services to scrape metrics from. # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] - + ## Kubernetes config file to create client from. # kube_config = "/path/to/kubernetes.config" - + ## Scrape Kubernetes pods for the following prometheus annotations: ## - prometheus.io/scrape: Enable scraping for this pod ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to - ## set this to `https` & most likely set the tls config. + ## set this to 'https' & most likely set the tls config. ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation # monitor_kubernetes_pods = true + + ## Get the list of pods to scrape with either the scope of + ## - cluster: the kubernetes watch api (default, no need to specify) + ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. + # pod_scrape_scope = "cluster" + + ## Only for node scrape scope: node IP of the node that telegraf is running on. + ## Either this config or the environment variable NODE_IP must be set. + # node_ip = "10.180.1.1" + + ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. + ## Default is 60 seconds. + # pod_scrape_interval = 60 + ## Restricts Kubernetes monitoring to a single namespace ## ex: monitor_kubernetes_pods_namespace = "default" # monitor_kubernetes_pods_namespace = "" @@ -42,77 +59,214 @@ in Prometheus format. # eg. To scrape pods on a specific node # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" + # cache refresh interval to set the interval for re-sync of pods list. + # Default is 60 minutes. + # cache_refresh_interval = 60 + + ## Scrape Services available in Consul Catalog + # [inputs.prometheus.consul] + # enabled = true + # agent = "http://localhost:8500" + # query_interval = "5m" + + # [[inputs.prometheus.consul.query]] + # name = "a service name" + # tag = "a service tag" + # url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}' + # [inputs.prometheus.consul.query.tags] + # host = "{{.Node}}" + ## Use bearer token for authorization. ('bearer_token' takes priority) # bearer_token = "/path/to/bearer/token" ## OR # bearer_token_string = "abc_123" - + ## HTTP Basic Authentication username and password. ('bearer_token' and ## 'bearer_token_string' take priority) # username = "" # password = "" - + ## Specify timeout duration for slower prometheus clients (default is 3s) # response_timeout = "3s" - + ## Optional TLS Config # tls_ca = /path/to/cafile # tls_cert = /path/to/certfile # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification # insecure_skip_verify = false ``` -`urls` can contain a unix socket as well. If a different path is required (default is `/metrics` for both http[s] and unix) for a unix socket, add `path` as a query parameter as follows: `unix:///var/run/prometheus.sock?path=/custom/metrics` +`urls` can contain a unix socket as well. If a different path is required +(default is `/metrics` for both http[s] and unix) for a unix socket, add `path` +as a query parameter as follows: +`unix:///var/run/prometheus.sock?path=/custom/metrics` + +### Metric Format Configuration + +The `metric_version` setting controls how telegraf translates prometheus format +metrics to telegraf metrics. There are two options. + +With `metric_version = 1`, the prometheus metric name becomes the telegraf +metric name. Prometheus labels become telegraf tags. Prometheus values become +telegraf field values. The fields have generic keys based on the type of the +prometheus metric. This option produces metrics that are dense (not +sparse). Denseness is a useful property for some outputs, including those that +are more efficient with row-oriented data. + +`metric_version = 2` differs in a few ways. The prometheus metric name becomes a +telegraf field key. Metrics hold more than one value and the field keys aren't +generic. The resulting metrics are sparse, but for some outputs they may be +easier to process or query, including those that are more efficient with +column-oriented data. The telegraf metric name is the same for all metrics in +the input instance. It can be set with the `name_override` setting and defaults +to "prometheus". To have multiple metric names, you can use multiple instances +of the plugin, each with its own `name_override`. + +`metric_version = 2` uses the same histogram format as the [histogram +aggregator](../../aggregators/histogram/README.md) + +The Example Outputs sections shows examples for both options. + +When using this plugin along with the prometheus_client output, use the same +option in both to ensure metrics are round-tripped without modification. -#### Kubernetes Service Discovery +### Kubernetes Service Discovery -URLs listed in the `kubernetes_services` parameter will be expanded -by looking up all A records assigned to the hostname as described in -[Kubernetes DNS service discovery](https://kubernetes.io/docs/concepts/services-networking/service/#dns). +URLs listed in the `kubernetes_services` parameter will be expanded by looking +up all A records assigned to the hostname as described in [Kubernetes DNS +service discovery][serv-disc]. -This method can be used to locate all -[Kubernetes headless services](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services). +This method can be used to locate all [Kubernetes headless services][headless]. -#### Kubernetes scraping +[serv-disc]: https://kubernetes.io/docs/concepts/services-networking/service/#dns -Enabling this option will allow the plugin to scrape for prometheus annotation on Kubernetes -pods. Currently, you can run this plugin in your kubernetes cluster, or we use the kubeconfig -file to determine where to monitor. -Currently the following annotation are supported: +[headless]: https://kubernetes.io/docs/concepts/services-networking/service/#headless-services + +### Kubernetes scraping + +Enabling this option will allow the plugin to scrape for prometheus annotation +on Kubernetes pods. Currently, you can run this plugin in your kubernetes +cluster, or we use the kubeconfig file to determine where to monitor. Currently +the following annotation are supported: * `prometheus.io/scrape` Enable scraping for this pod. * `prometheus.io/scheme` If the metrics endpoint is secured then you will need to set this to `https` & most likely set the tls config. (default 'http') * `prometheus.io/path` Override the path for the metrics endpoint on the service. (default '/metrics') * `prometheus.io/port` Used to override the port. (default 9102) -Using the `monitor_kubernetes_pods_namespace` option allows you to limit which pods you are scraping. +Using the `monitor_kubernetes_pods_namespace` option allows you to limit which +pods you are scraping. + +Using `pod_scrape_scope = "node"` allows more scalable scraping for pods which +will scrape pods only in the node that telegraf is running. It will fetch the +pod list locally from the node's kubelet. This will require running Telegraf in +every node of the cluster. Note that either `node_ip` must be specified in the +config or the environment variable `NODE_IP` must be set to the host IP. ThisThe +latter can be done in the yaml of the pod running telegraf: + +```sh +env: + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + ``` + +If using node level scrape scope, `pod_scrape_interval` specifies how often (in +seconds) the pod list for scraping should updated. If not specified, the default +is 60 seconds. + +The pod running telegraf will need to have the proper rbac configuration in +order to be allowed to call the k8s api to discover and watch pods in the +cluster. A typical configuration will create a service account, a cluster role +with the appropriate rules and a cluster role binding to tie the cluster role to +the service account. Example of configuration for cluster level discovery: + +```yaml +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: telegraf-k8s-role-{{.Release.Name}} +rules: +- apiGroups: [""] + resources: + - nodes + - nodes/proxy + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] +--- +# Rolebinding for namespace to cluster-admin +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: telegraf-k8s-role-{{.Release.Name}} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: telegraf-k8s-role-{{.Release.Name}} +subjects: +- kind: ServiceAccount + name: telegraf-k8s-{{ .Release.Name }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: telegraf-k8s-{{ .Release.Name }} +``` + +### Consul Service Discovery + +Enabling this option and configuring consul `agent` url will allow the plugin to +query consul catalog for available services. Using `query_interval` the plugin +will periodically query the consul catalog for services with `name` and `tag` +and refresh the list of scraped urls. It can use the information from the +catalog to build the scraped url and additional tags from a template. + +Multiple consul queries can be configured, each for different service. +The following example fields can be used in url or tag templates: + +* Node +* Address +* NodeMeta +* ServicePort +* ServiceAddress +* ServiceTags +* ServiceMeta -#### Bearer Token +For full list of available fields and their type see struct CatalogService in + + +### Bearer Token If set, the file specified by the `bearer_token` parameter will be read on each interval and its contents will be appended to the Bearer string in the Authorization header. -### Usage for Caddy HTTP server +## Usage for Caddy HTTP server -If you want to monitor Caddy, you need to use Caddy with its Prometheus plugin: +Steps to monitor Caddy with Telegraf's Prometheus input plugin: -* Download Caddy+Prometheus plugin [here](https://caddyserver.com/download/linux/amd64?plugins=http.prometheus) -* Add the `prometheus` directive in your `CaddyFile` +* Download [Caddy](https://caddyserver.com/download) +* Download Prometheus and set up [monitoring Caddy with Prometheus metrics](https://caddyserver.com/docs/metrics#monitoring-caddy-with-prometheus-metrics) * Restart Caddy * Configure Telegraf to fetch metrics on it: ```toml [[inputs.prometheus]] # ## An array of urls to scrape metrics from. - urls = ["http://localhost:9180/metrics"] + urls = ["http://localhost:2019/metrics"] ``` -> This is the default URL where Caddy Prometheus plugin will send data. +> This is the default URL where Caddy will send data. > For more details, please read the [Caddy Prometheus documentation](https://github.com/miekg/caddy-prometheus/blob/master/README.md). -### Metrics: +## Metrics Measurement names are based on the Metric Family and tags are created for each label. The value is added to a field named based on the metric type. @@ -121,10 +275,11 @@ All metrics receive the `url` tag indicating the related URL specified in the Telegraf configuration. If using Kubernetes service discovery the `address` tag is also added indicating the discovered ip address. -### Example Output: +## Example Output -**Source** -``` +### Source + +```shell # HELP go_gc_duration_seconds A summary of the GC invocation durations. # TYPE go_gc_duration_seconds summary go_gc_duration_seconds{quantile="0"} 7.4545e-05 @@ -145,8 +300,9 @@ cpu_usage_user{cpu="cpu2"} 2.0161290322588776 cpu_usage_user{cpu="cpu3"} 1.5045135406226022 ``` -**Output** -``` +### Output + +```shell go_gc_duration_seconds,url=http://example.org:9273/metrics 1=0.001336611,count=14,sum=0.004527551,0=0.000057965,0.25=0.000083812,0.5=0.000286537,0.75=0.000365303 1505776733000000000 go_goroutines,url=http://example.org:9273/metrics gauge=21 1505776695000000000 cpu_usage_user,cpu=cpu0,url=http://example.org:9273/metrics gauge=1.513622603430151 1505776751000000000 @@ -155,8 +311,9 @@ cpu_usage_user,cpu=cpu2,url=http://example.org:9273/metrics gauge=2.119071644805 cpu_usage_user,cpu=cpu3,url=http://example.org:9273/metrics gauge=1.5228426395944945 1505776751000000000 ``` -**Output (when metric_version = 2)** -``` +### Output (when metric_version = 2) + +```shell prometheus,quantile=1,url=http://example.org:9273/metrics go_gc_duration_seconds=0.005574303 1556075100000000000 prometheus,quantile=0.75,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0001046 1556075100000000000 prometheus,quantile=0.5,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0000719 1556075100000000000 diff --git a/plugins/inputs/prometheus/consul.go b/plugins/inputs/prometheus/consul.go new file mode 100644 index 0000000000000..2f008a495c09b --- /dev/null +++ b/plugins/inputs/prometheus/consul.go @@ -0,0 +1,208 @@ +package prometheus + +import ( + "bytes" + "context" + "fmt" + "net/url" + "strings" + "text/template" + "time" + + "github.com/hashicorp/consul/api" + "github.com/influxdata/telegraf/config" +) + +type ConsulConfig struct { + // Address of the Consul agent. The address must contain a hostname or an IP address + // and optionally a port (format: "host:port"). + Enabled bool `toml:"enabled"` + Agent string `toml:"agent"` + QueryInterval config.Duration `toml:"query_interval"` + Queries []*ConsulQuery `toml:"query"` +} + +// One Consul service discovery query +type ConsulQuery struct { + // A name of the searched services (not ID) + ServiceName string `toml:"name"` + + // A tag of the searched services + ServiceTag string `toml:"tag"` + + // A DC of the searched services + ServiceDc string `toml:"dc"` + + // A template URL of the Prometheus gathering interface. The hostname part + // of the URL will be replaced by discovered address and port. + ServiceURL string `toml:"url"` + + // Extra tags to add to metrics found in Consul + ServiceExtraTags map[string]string `toml:"tags"` + + serviceURLTemplate *template.Template + serviceExtraTagsTemplate map[string]*template.Template + + // Store last error status and change log level depending on repeated occurence + lastQueryFailed bool +} + +func (p *Prometheus) startConsul(ctx context.Context) error { + consulAPIConfig := api.DefaultConfig() + if p.ConsulConfig.Agent != "" { + consulAPIConfig.Address = p.ConsulConfig.Agent + } + + consul, err := api.NewClient(consulAPIConfig) + if err != nil { + return fmt.Errorf("cannot connect to the Consul agent: %v", err) + } + + // Parse the template for metrics URL, drop queries with template parse errors + i := 0 + for _, q := range p.ConsulConfig.Queries { + serviceURLTemplate, err := template.New("URL").Parse(q.ServiceURL) + if err != nil { + p.Log.Errorf("Could not parse the Consul query URL template (%s), skipping it. Error: %s", q.ServiceURL, err) + continue + } + q.serviceURLTemplate = serviceURLTemplate + + // Allow to use join function in tags + templateFunctions := template.FuncMap{"join": strings.Join} + // Parse the tag value templates + q.serviceExtraTagsTemplate = make(map[string]*template.Template) + for tagName, tagTemplateString := range q.ServiceExtraTags { + tagTemplate, err := template.New(tagName).Funcs(templateFunctions).Parse(tagTemplateString) + if err != nil { + p.Log.Errorf("Could not parse the Consul query Extra Tag template (%s), skipping it. Error: %s", tagTemplateString, err) + continue + } + q.serviceExtraTagsTemplate[tagName] = tagTemplate + } + p.ConsulConfig.Queries[i] = q + i++ + } + // Prevent memory leak by erasing truncated values + for j := i; j < len(p.ConsulConfig.Queries); j++ { + p.ConsulConfig.Queries[j] = nil + } + p.ConsulConfig.Queries = p.ConsulConfig.Queries[:i] + + catalog := consul.Catalog() + + p.wg.Add(1) + go func() { + // Store last error status and change log level depending on repeated occurence + var refreshFailed = false + defer p.wg.Done() + err := p.refreshConsulServices(catalog) + if err != nil { + refreshFailed = true + p.Log.Errorf("Unable to refreh Consul services: %v", err) + } + for { + select { + case <-ctx.Done(): + return + case <-time.After(time.Duration(p.ConsulConfig.QueryInterval)): + err := p.refreshConsulServices(catalog) + if err != nil { + message := fmt.Sprintf("Unable to refreh Consul services: %v", err) + if refreshFailed { + p.Log.Debug(message) + } else { + p.Log.Warn(message) + } + refreshFailed = true + } else if refreshFailed { + refreshFailed = false + p.Log.Info("Successfully refreshed Consul services after previous errors") + } + } + } + }() + + return nil +} + +func (p *Prometheus) refreshConsulServices(c *api.Catalog) error { + consulServiceURLs := make(map[string]URLAndAddress) + + p.Log.Debugf("Refreshing Consul services") + + for _, q := range p.ConsulConfig.Queries { + queryOptions := api.QueryOptions{} + if q.ServiceDc != "" { + queryOptions.Datacenter = q.ServiceDc + } + + // Request services from Consul + consulServices, _, err := c.Service(q.ServiceName, q.ServiceTag, &queryOptions) + if err != nil { + return err + } + if len(consulServices) == 0 { + p.Log.Debugf("Queried Consul for Service (%s, %s) but did not find any instances", q.ServiceName, q.ServiceTag) + continue + } + p.Log.Debugf("Queried Consul for Service (%s, %s) and found %d instances", q.ServiceName, q.ServiceTag, len(consulServices)) + + for _, consulService := range consulServices { + uaa, err := p.getConsulServiceURL(q, consulService) + if err != nil { + message := fmt.Sprintf("Unable to get scrape URLs from Consul for Service (%s, %s): %s", q.ServiceName, q.ServiceTag, err) + if q.lastQueryFailed { + p.Log.Debug(message) + } else { + p.Log.Warn(message) + } + q.lastQueryFailed = true + break + } + if q.lastQueryFailed { + p.Log.Infof("Created scrape URLs from Consul for Service (%s, %s)", q.ServiceName, q.ServiceTag) + } + q.lastQueryFailed = false + p.Log.Debugf("Adding scrape URL from Consul for Service (%s, %s): %s", q.ServiceName, q.ServiceTag, uaa.URL.String()) + consulServiceURLs[uaa.URL.String()] = *uaa + } + } + + p.lock.Lock() + p.consulServices = consulServiceURLs + p.lock.Unlock() + + return nil +} + +func (p *Prometheus) getConsulServiceURL(q *ConsulQuery, s *api.CatalogService) (*URLAndAddress, error) { + var buffer bytes.Buffer + buffer.Reset() + err := q.serviceURLTemplate.Execute(&buffer, s) + if err != nil { + return nil, err + } + serviceURL, err := url.Parse(buffer.String()) + if err != nil { + return nil, err + } + + extraTags := make(map[string]string) + for tagName, tagTemplate := range q.serviceExtraTagsTemplate { + buffer.Reset() + err = tagTemplate.Execute(&buffer, s) + if err != nil { + return nil, err + } + extraTags[tagName] = buffer.String() + } + + p.Log.Debugf("Will scrape metrics from Consul Service %s", serviceURL.String()) + + return &URLAndAddress{ + URL: serviceURL, + OriginalURL: serviceURL, + Tags: extraTags, + }, nil +} diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go index 16f69cbd14228..f7a78dd8c0497 100644 --- a/plugins/inputs/prometheus/kubernetes.go +++ b/plugins/inputs/prometheus/kubernetes.go @@ -2,48 +2,69 @@ package prometheus import ( "context" + "crypto/tls" + "encoding/json" "fmt" - "io/ioutil" - "log" "net" + "net/http" "net/url" + "os" "os/user" "path/filepath" - "sync" "time" - "github.com/ericchiang/k8s" - corev1 "github.com/ericchiang/k8s/apis/core/v1" "github.com/ghodss/yaml" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" ) -type payload struct { - eventype string - pod *corev1.Pod +type podMetadata struct { + ResourceVersion string `json:"resourceVersion"` + SelfLink string `json:"selfLink"` } +type podResponse struct { + Kind string `json:"kind"` + APIVersion string `json:"apiVersion"` + Metadata podMetadata `json:"metadata"` + Items []*corev1.Pod `json:"items,string,omitempty"` +} + +const cAdvisorPodListDefaultInterval = 60 + // loadClient parses a kubeconfig from a file and returns a Kubernetes // client. It does not support extensions or client auth providers. -func loadClient(kubeconfigPath string) (*k8s.Client, error) { - data, err := ioutil.ReadFile(kubeconfigPath) +func loadClient(kubeconfigPath string) (*kubernetes.Clientset, error) { + data, err := os.ReadFile(kubeconfigPath) if err != nil { return nil, fmt.Errorf("failed reading '%s': %v", kubeconfigPath, err) } // Unmarshal YAML into a Kubernetes config object. - var config k8s.Config + var config rest.Config if err := yaml.Unmarshal(data, &config); err != nil { return nil, err } - return k8s.NewClient(&config) + return kubernetes.NewForConfig(&config) } -func (p *Prometheus) start(ctx context.Context) error { - client, err := k8s.NewInClusterClient() +func (p *Prometheus) startK8s(ctx context.Context) error { + config, err := rest.InClusterConfig() + if err != nil { + return fmt.Errorf("failed to get InClusterConfig - %v", err) + } + client, err := kubernetes.NewForConfig(config) if err != nil { u, err := user.Current() if err != nil { - return fmt.Errorf("Failed to get current user - %v", err) + return fmt.Errorf("failed to get current user - %v", err) } configLocation := filepath.Join(u.HomeDir, ".kube/config") @@ -56,8 +77,6 @@ func (p *Prometheus) start(ctx context.Context) error { } } - p.wg = sync.WaitGroup{} - p.wg.Add(1) go func() { defer p.wg.Done() @@ -66,9 +85,13 @@ func (p *Prometheus) start(ctx context.Context) error { case <-ctx.Done(): return case <-time.After(time.Second): - err := p.watch(ctx, client) - if err != nil { - p.Log.Errorf("Unable to watch resources: %s", err.Error()) + if p.isNodeScrapeScope { + err = p.cAdvisor(ctx, config.BearerToken) + if err != nil { + p.Log.Errorf("Unable to monitor pods with node scrape scope: %s", err.Error()) + } + } else { + p.watchPod(ctx, client) } } } @@ -81,126 +104,308 @@ func (p *Prometheus) start(ctx context.Context) error { // (without the scrape annotations). K8s may re-assign the old pod ip to the non-scrape // pod, causing errors in the logs. This is only true if the pod going offline is not // directed to do so by K8s. -func (p *Prometheus) watch(ctx context.Context, client *k8s.Client) error { +func (p *Prometheus) watchPod(ctx context.Context, clientset *kubernetes.Clientset) { + var resyncinterval time.Duration - selectors := podSelector(p) - - pod := &corev1.Pod{} - watcher, err := client.Watch(ctx, p.PodNamespace, &corev1.Pod{}, selectors...) - if err != nil { - return err + if p.CacheRefreshInterval != 0 { + resyncinterval = time.Duration(p.CacheRefreshInterval) * time.Minute + } else { + resyncinterval = 60 * time.Minute } - defer watcher.Close() - for { - select { - case <-ctx.Done(): - return nil - default: - pod = &corev1.Pod{} - // An error here means we need to reconnect the watcher. - eventType, err := watcher.Next(pod) + informerfactory := informers.NewSharedInformerFactory(clientset, resyncinterval) + + podinformer := informerfactory.Core().V1().Pods() + podinformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(newObj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(newObj) if err != nil { - return err + p.Log.Errorf("getting key from cache %s\n", err.Error()) } - // If the pod is not "ready", there will be no ip associated with it. - if pod.GetMetadata().GetAnnotations()["prometheus.io/scrape"] != "true" || - !podReady(pod.Status.GetContainerStatuses()) { - continue + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + p.Log.Errorf("splitting key into namespace and name %s\n", err.Error()) } - switch eventType { - case k8s.EventAdded: + pod, _ := clientset.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) + + if pod.Annotations["prometheus.io/scrape"] == "true" && + podReady(pod.Status.ContainerStatuses) && + podHasMatchingNamespace(pod, p) && + podHasMatchingLabelSelector(pod, p.podLabelSelector) && + podHasMatchingFieldSelector(pod, p.podFieldSelector) { registerPod(pod, p) - case k8s.EventModified: - // To avoid multiple actions for each event, unregister on the first event - // in the delete sequence, when the containers are still "ready". - if pod.Metadata.GetDeletionTimestamp() != nil { + } + }, + UpdateFunc: func(oldObj, newObj interface{}) { + newKey, err := cache.MetaNamespaceKeyFunc(newObj) + if err != nil { + p.Log.Errorf("getting key from cache %s\n", err.Error()) + } + + newNamespace, newName, err := cache.SplitMetaNamespaceKey(newKey) + if err != nil { + p.Log.Errorf("splitting key into namespace and name %s\n", err.Error()) + } + + newPod, _ := clientset.CoreV1().Pods(newNamespace).Get(ctx, newName, metav1.GetOptions{}) + + if newPod.Annotations["prometheus.io/scrape"] == "true" && + podReady(newPod.Status.ContainerStatuses) && + podHasMatchingNamespace(newPod, p) && + podHasMatchingLabelSelector(newPod, p.podLabelSelector) && + podHasMatchingFieldSelector(newPod, p.podFieldSelector) { + if newPod.GetDeletionTimestamp() == nil { + registerPod(newPod, p) + } + } + + oldKey, err := cache.MetaNamespaceKeyFunc(oldObj) + if err != nil { + p.Log.Errorf("getting key from cache %s\n", err.Error()) + } + + oldNamespace, oldName, err := cache.SplitMetaNamespaceKey(oldKey) + if err != nil { + p.Log.Errorf("splitting key into namespace and name %s\n", err.Error()) + } + + oldPod, _ := clientset.CoreV1().Pods(oldNamespace).Get(ctx, oldName, metav1.GetOptions{}) + + if oldPod.Annotations["prometheus.io/scrape"] == "true" && + podReady(oldPod.Status.ContainerStatuses) && + podHasMatchingNamespace(oldPod, p) && + podHasMatchingLabelSelector(oldPod, p.podLabelSelector) && + podHasMatchingFieldSelector(oldPod, p.podFieldSelector) { + if oldPod.GetDeletionTimestamp() != nil { + unregisterPod(oldPod, p) + } + } + }, + DeleteFunc: func(oldObj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(oldObj) + if err != nil { + p.Log.Errorf("getting key from cache %s", err.Error()) + } + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + p.Log.Errorf("splitting key into namespace and name %s\n", err.Error()) + } + + pod, _ := clientset.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{}) + + if pod.Annotations["prometheus.io/scrape"] == "true" && + podReady(pod.Status.ContainerStatuses) && + podHasMatchingNamespace(pod, p) && + podHasMatchingLabelSelector(pod, p.podLabelSelector) && + podHasMatchingFieldSelector(pod, p.podFieldSelector) { + if pod.GetDeletionTimestamp() != nil { unregisterPod(pod, p) - } else { - registerPod(pod, p) } } + }, + }) + + informerfactory.Start(wait.NeverStop) + informerfactory.WaitForCacheSync(wait.NeverStop) + + <-ctx.Done() +} + +func (p *Prometheus) cAdvisor(ctx context.Context, bearerToken string) error { + // The request will be the same each time + podsURL := fmt.Sprintf("https://%s:10250/pods", p.NodeIP) + req, err := http.NewRequest("GET", podsURL, nil) + if err != nil { + return fmt.Errorf("error when creating request to %s to get pod list: %w", podsURL, err) + } + req.Header.Set("Authorization", "Bearer "+bearerToken) + req.Header.Add("Accept", "application/json") + + // Update right away so code is not waiting the length of the specified scrape interval initially + err = updateCadvisorPodList(p, req) + if err != nil { + return fmt.Errorf("error initially updating pod list: %w", err) + } + + scrapeInterval := cAdvisorPodListDefaultInterval + if p.PodScrapeInterval != 0 { + scrapeInterval = p.PodScrapeInterval + } + + for { + select { + case <-ctx.Done(): + return nil + case <-time.After(time.Duration(scrapeInterval) * time.Second): + err := updateCadvisorPodList(p, req) + if err != nil { + return fmt.Errorf("error updating pod list: %w", err) + } } } } -func podReady(statuss []*corev1.ContainerStatus) bool { - if len(statuss) == 0 { - return false +func updateCadvisorPodList(p *Prometheus, req *http.Request) error { + http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + httpClient := http.Client{} + + resp, err := httpClient.Do(req) + if err != nil { + return fmt.Errorf("error when making request for pod list: %w", err) } - for _, cs := range statuss { - if !cs.GetReady() { - return false + + // If err is nil, still check response code + if resp.StatusCode != 200 { + return fmt.Errorf("error when making request for pod list with status %s", resp.Status) + } + + defer resp.Body.Close() + + cadvisorPodsResponse := podResponse{} + + // Will have expected type errors for some parts of corev1.Pod struct for some unused fields + // Instead have nil checks for every used field in case of incorrect decoding + if err := json.NewDecoder(resp.Body).Decode(&cadvisorPodsResponse); err != nil { + return fmt.Errorf("decoding response failed: %v", err) + } + pods := cadvisorPodsResponse.Items + + // Updating pod list to be latest cadvisor response + p.lock.Lock() + p.kubernetesPods = make(map[string]URLAndAddress) + + // Register pod only if it has an annotation to scrape, if it is ready, + // and if namespace and selectors are specified and match + for _, pod := range pods { + if necessaryPodFieldsArePresent(pod) && + pod.Annotations["prometheus.io/scrape"] == "true" && + podReady(pod.Status.ContainerStatuses) && + podHasMatchingNamespace(pod, p) && + podHasMatchingLabelSelector(pod, p.podLabelSelector) && + podHasMatchingFieldSelector(pod, p.podFieldSelector) { + registerPod(pod, p) } } - return true + p.lock.Unlock() + + // No errors + return nil } -func podSelector(p *Prometheus) []k8s.Option { - options := []k8s.Option{} +func necessaryPodFieldsArePresent(pod *corev1.Pod) bool { + return pod.Annotations != nil && + pod.Labels != nil && + pod.Status.ContainerStatuses != nil +} - if len(p.KubernetesLabelSelector) > 0 { - options = append(options, k8s.QueryParam("labelSelector", p.KubernetesLabelSelector)) +/* See the docs on kubernetes label selectors: + * https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + */ +func podHasMatchingLabelSelector(pod *corev1.Pod, labelSelector labels.Selector) bool { + if labelSelector == nil { + return true } - if len(p.KubernetesFieldSelector) > 0 { - options = append(options, k8s.QueryParam("fieldSelector", p.KubernetesFieldSelector)) + var labelsSet labels.Set = pod.Labels + return labelSelector.Matches(labelsSet) +} + +/* See ToSelectableFields() for list of fields that are selectable: + * https://github.com/kubernetes/kubernetes/release-1.20/pkg/registry/core/pod/strategy.go + * See docs on kubernetes field selectors: + * https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/ + */ +func podHasMatchingFieldSelector(pod *corev1.Pod, fieldSelector fields.Selector) bool { + if fieldSelector == nil { + return true } - return options + fieldsSet := make(fields.Set) + fieldsSet["spec.nodeName"] = pod.Spec.NodeName + fieldsSet["spec.restartPolicy"] = string(pod.Spec.RestartPolicy) + fieldsSet["spec.schedulerName"] = pod.Spec.SchedulerName + fieldsSet["spec.serviceAccountName"] = pod.Spec.ServiceAccountName + fieldsSet["status.phase"] = string(pod.Status.Phase) + fieldsSet["status.podIP"] = pod.Status.PodIP + fieldsSet["status.nominatedNodeName"] = pod.Status.NominatedNodeName + + return fieldSelector.Matches(fieldsSet) +} +/* + * If a namespace is specified and the pod doesn't have that namespace, return false + * Else return true + */ +func podHasMatchingNamespace(pod *corev1.Pod, p *Prometheus) bool { + return !(p.PodNamespace != "" && pod.Namespace != p.PodNamespace) +} + +func podReady(statuss []corev1.ContainerStatus) bool { + if len(statuss) == 0 { + return false + } + for _, cs := range statuss { + if !cs.Ready { + return false + } + } + return true } func registerPod(pod *corev1.Pod, p *Prometheus) { if p.kubernetesPods == nil { p.kubernetesPods = map[string]URLAndAddress{} } - targetURL := getScrapeURL(pod) - if targetURL == nil { + targetURL, err := getScrapeURL(pod) + if err != nil { + p.Log.Errorf("could not parse URL: %s", err) + return + } else if targetURL == nil { return } - log.Printf("D! [inputs.prometheus] will scrape metrics from %q", *targetURL) + p.Log.Debugf("will scrape metrics from %q", targetURL.String()) // add annotation as metrics tags - tags := pod.GetMetadata().GetAnnotations() + tags := pod.Annotations if tags == nil { tags = map[string]string{} } - tags["pod_name"] = pod.GetMetadata().GetName() - tags["namespace"] = pod.GetMetadata().GetNamespace() + tags["pod_name"] = pod.Name + tags["namespace"] = pod.Namespace // add labels as metrics tags - for k, v := range pod.GetMetadata().GetLabels() { + for k, v := range pod.Labels { tags[k] = v } - URL, err := url.Parse(*targetURL) - if err != nil { - log.Printf("E! [inputs.prometheus] could not parse URL %q: %s", *targetURL, err.Error()) - return + podURL := p.AddressToURL(targetURL, targetURL.Hostname()) + + // Locks earlier if using cAdvisor calls - makes a new list each time + // rather than updating and removing from the same list + if !p.isNodeScrapeScope { + p.lock.Lock() + defer p.lock.Unlock() } - podURL := p.AddressToURL(URL, URL.Hostname()) - p.lock.Lock() p.kubernetesPods[podURL.String()] = URLAndAddress{ URL: podURL, - Address: URL.Hostname(), - OriginalURL: URL, + Address: targetURL.Hostname(), + OriginalURL: targetURL, Tags: tags, } - p.lock.Unlock() } -func getScrapeURL(pod *corev1.Pod) *string { - ip := pod.Status.GetPodIP() +func getScrapeURL(pod *corev1.Pod) (*url.URL, error) { + ip := pod.Status.PodIP if ip == "" { // return as if scrape was disabled, we will be notified again once the pod // has an IP - return nil + return nil, nil } - scheme := pod.GetMetadata().GetAnnotations()["prometheus.io/scheme"] - path := pod.GetMetadata().GetAnnotations()["prometheus.io/path"] - port := pod.GetMetadata().GetAnnotations()["prometheus.io/port"] + scheme := pod.Annotations["prometheus.io/scheme"] + pathAndQuery := pod.Annotations["prometheus.io/path"] + port := pod.Annotations["prometheus.io/port"] if scheme == "" { scheme = "http" @@ -208,34 +413,35 @@ func getScrapeURL(pod *corev1.Pod) *string { if port == "" { port = "9102" } - if path == "" { - path = "/metrics" + if pathAndQuery == "" { + pathAndQuery = "/metrics" } - u := &url.URL{ - Scheme: scheme, - Host: net.JoinHostPort(ip, port), - Path: path, + base, err := url.Parse(pathAndQuery) + if err != nil { + return nil, err } - x := u.String() + base.Scheme = scheme + base.Host = net.JoinHostPort(ip, port) - return &x + return base, nil } func unregisterPod(pod *corev1.Pod, p *Prometheus) { - url := getScrapeURL(pod) - if url == nil { + targetURL, err := getScrapeURL(pod) + if err != nil { + p.Log.Errorf("failed to parse url: %s", err) + return + } else if targetURL == nil { return } - log.Printf("D! [inputs.prometheus] registered a delete request for %q in namespace %q", - pod.GetMetadata().GetName(), pod.GetMetadata().GetNamespace()) - p.lock.Lock() defer p.lock.Unlock() - if _, ok := p.kubernetesPods[*url]; ok { - delete(p.kubernetesPods, *url) - log.Printf("D! [inputs.prometheus] will stop scraping for %q", *url) + if _, ok := p.kubernetesPods[targetURL.String()]; ok { + p.Log.Debugf("registered a delete request for %q in namespace %q", pod.Name, pod.Namespace) + delete(p.kubernetesPods, targetURL.String()) + p.Log.Debugf("will stop scraping for %q", targetURL.String()) } } diff --git a/plugins/inputs/prometheus/kubernetes_test.go b/plugins/inputs/prometheus/kubernetes_test.go index 8568ac946437e..b763cd14825b2 100644 --- a/plugins/inputs/prometheus/kubernetes_test.go +++ b/plugins/inputs/prometheus/kubernetes_test.go @@ -1,155 +1,184 @@ package prometheus import ( - "github.com/ericchiang/k8s" "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" - v1 "github.com/ericchiang/k8s/apis/core/v1" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" + "github.com/influxdata/telegraf/testutil" ) func TestScrapeURLNoAnnotations(t *testing.T) { - p := &v1.Pod{Metadata: &metav1.ObjectMeta{}} - p.GetMetadata().Annotations = map[string]string{} - url := getScrapeURL(p) - assert.Nil(t, url) + p := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{}} + p.Annotations = map[string]string{} + url, err := getScrapeURL(p) + require.NoError(t, err) + require.Nil(t, url) } func TestScrapeURLAnnotationsNoScrape(t *testing.T) { - p := &v1.Pod{Metadata: &metav1.ObjectMeta{}} - p.Metadata.Name = str("myPod") - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "false"} - url := getScrapeURL(p) - assert.Nil(t, url) + p := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{}} + p.Name = "myPod" + p.Annotations = map[string]string{"prometheus.io/scrape": "false"} + url, err := getScrapeURL(p) + require.NoError(t, err) + require.Nil(t, url) } func TestScrapeURLAnnotations(t *testing.T) { p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} - url := getScrapeURL(p) - assert.Equal(t, "http://127.0.0.1:9102/metrics", *url) + p.Annotations = map[string]string{"prometheus.io/scrape": "true"} + url, err := getScrapeURL(p) + require.NoError(t, err) + require.Equal(t, "http://127.0.0.1:9102/metrics", url.String()) } func TestScrapeURLAnnotationsCustomPort(t *testing.T) { p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/port": "9000"} - url := getScrapeURL(p) - assert.Equal(t, "http://127.0.0.1:9000/metrics", *url) + p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/port": "9000"} + url, err := getScrapeURL(p) + require.NoError(t, err) + require.Equal(t, "http://127.0.0.1:9000/metrics", url.String()) } func TestScrapeURLAnnotationsCustomPath(t *testing.T) { p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "mymetrics"} - url := getScrapeURL(p) - assert.Equal(t, "http://127.0.0.1:9102/mymetrics", *url) + p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "mymetrics"} + url, err := getScrapeURL(p) + require.NoError(t, err) + require.Equal(t, "http://127.0.0.1:9102/mymetrics", url.String()) } func TestScrapeURLAnnotationsCustomPathWithSep(t *testing.T) { p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/mymetrics"} - url := getScrapeURL(p) - assert.Equal(t, "http://127.0.0.1:9102/mymetrics", *url) + p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/mymetrics"} + url, err := getScrapeURL(p) + require.NoError(t, err) + require.Equal(t, "http://127.0.0.1:9102/mymetrics", url.String()) +} + +func TestScrapeURLAnnotationsCustomPathWithQueryParameters(t *testing.T) { + p := pod() + p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/v1/agent/metrics?format=prometheus"} + url, err := getScrapeURL(p) + require.NoError(t, err) + require.Equal(t, "http://127.0.0.1:9102/v1/agent/metrics?format=prometheus", url.String()) +} + +func TestScrapeURLAnnotationsCustomPathWithFragment(t *testing.T) { + p := pod() + p.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/v1/agent/metrics#prometheus"} + url, err := getScrapeURL(p) + require.NoError(t, err) + require.Equal(t, "http://127.0.0.1:9102/v1/agent/metrics#prometheus", url.String()) } func TestAddPod(t *testing.T) { prom := &Prometheus{Log: testutil.Logger{}} p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) - assert.Equal(t, 1, len(prom.kubernetesPods)) + require.Equal(t, 1, len(prom.kubernetesPods)) } func TestAddMultipleDuplicatePods(t *testing.T) { prom := &Prometheus{Log: testutil.Logger{}} p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) - p.Metadata.Name = str("Pod2") + p.Name = "Pod2" registerPod(p, prom) - assert.Equal(t, 1, len(prom.kubernetesPods)) + require.Equal(t, 1, len(prom.kubernetesPods)) } func TestAddMultiplePods(t *testing.T) { prom := &Prometheus{Log: testutil.Logger{}} p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) - p.Metadata.Name = str("Pod2") - p.Status.PodIP = str("127.0.0.2") + p.Name = "Pod2" + p.Status.PodIP = "127.0.0.2" registerPod(p, prom) - assert.Equal(t, 2, len(prom.kubernetesPods)) + require.Equal(t, 2, len(prom.kubernetesPods)) } func TestDeletePods(t *testing.T) { prom := &Prometheus{Log: testutil.Logger{}} p := pod() - p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + p.Annotations = map[string]string{"prometheus.io/scrape": "true"} registerPod(p, prom) unregisterPod(p, prom) - assert.Equal(t, 0, len(prom.kubernetesPods)) -} - -func TestPodSelector(t *testing.T) { - - cases := []struct { - expected []k8s.Option - labelselector string - fieldselector string - }{ - { - expected: []k8s.Option{ - k8s.QueryParam("labelSelector", "key1=val1,key2=val2,key3"), - k8s.QueryParam("fieldSelector", "spec.nodeName=ip-1-2-3-4.acme.com"), - }, - labelselector: "key1=val1,key2=val2,key3", - fieldselector: "spec.nodeName=ip-1-2-3-4.acme.com", - }, - { - expected: []k8s.Option{ - k8s.QueryParam("labelSelector", "key1"), - k8s.QueryParam("fieldSelector", "spec.nodeName=ip-1-2-3-4.acme.com"), - }, - labelselector: "key1", - fieldselector: "spec.nodeName=ip-1-2-3-4.acme.com", - }, - { - expected: []k8s.Option{ - k8s.QueryParam("labelSelector", "key1"), - k8s.QueryParam("fieldSelector", "somefield"), - }, - labelselector: "key1", - fieldselector: "somefield", - }, - } - - for _, c := range cases { - prom := &Prometheus{ - Log: testutil.Logger{}, - KubernetesLabelSelector: c.labelselector, - KubernetesFieldSelector: c.fieldselector, - } - - output := podSelector(prom) - - assert.Equal(t, len(output), len(c.expected)) - } -} - -func pod() *v1.Pod { - p := &v1.Pod{Metadata: &metav1.ObjectMeta{}, Status: &v1.PodStatus{}, Spec: &v1.PodSpec{}} - p.Status.PodIP = str("127.0.0.1") - p.Metadata.Name = str("myPod") - p.Metadata.Namespace = str("default") - return p + require.Equal(t, 0, len(prom.kubernetesPods)) +} + +func TestPodHasMatchingNamespace(t *testing.T) { + prom := &Prometheus{Log: testutil.Logger{}, PodNamespace: "default"} + + pod := pod() + pod.Name = "Pod1" + pod.Namespace = "default" + shouldMatch := podHasMatchingNamespace(pod, prom) + require.Equal(t, true, shouldMatch) + + pod.Name = "Pod2" + pod.Namespace = "namespace" + shouldNotMatch := podHasMatchingNamespace(pod, prom) + require.Equal(t, false, shouldNotMatch) +} + +func TestPodHasMatchingLabelSelector(t *testing.T) { + labelSelectorString := "label0==label0,label1=label1,label2!=label,label3 in (label1,label2, label3),label4 notin (label1, label2,label3),label5,!label6" + prom := &Prometheus{Log: testutil.Logger{}, KubernetesLabelSelector: labelSelectorString} + + pod := pod() + pod.Labels = make(map[string]string) + pod.Labels["label0"] = "label0" + pod.Labels["label1"] = "label1" + pod.Labels["label2"] = "label2" + pod.Labels["label3"] = "label3" + pod.Labels["label4"] = "label4" + pod.Labels["label5"] = "label5" + + labelSelector, err := labels.Parse(prom.KubernetesLabelSelector) + require.Equal(t, err, nil) + require.Equal(t, true, podHasMatchingLabelSelector(pod, labelSelector)) } -func str(x string) *string { - return &x +func TestPodHasMatchingFieldSelector(t *testing.T) { + fieldSelectorString := "status.podIP=127.0.0.1,spec.restartPolicy=Always,spec.NodeName!=nodeName" + prom := &Prometheus{Log: testutil.Logger{}, KubernetesFieldSelector: fieldSelectorString} + pod := pod() + pod.Spec.RestartPolicy = "Always" + pod.Spec.NodeName = "node1000" + + fieldSelector, err := fields.ParseSelector(prom.KubernetesFieldSelector) + require.Equal(t, err, nil) + require.Equal(t, true, podHasMatchingFieldSelector(pod, fieldSelector)) +} + +func TestInvalidFieldSelector(t *testing.T) { + fieldSelectorString := "status.podIP=127.0.0.1,spec.restartPolicy=Always,spec.NodeName!=nodeName,spec.nodeName" + prom := &Prometheus{Log: testutil.Logger{}, KubernetesFieldSelector: fieldSelectorString} + pod := pod() + pod.Spec.RestartPolicy = "Always" + pod.Spec.NodeName = "node1000" + + _, err := fields.ParseSelector(prom.KubernetesFieldSelector) + require.NotEqual(t, err, nil) +} + +func pod() *corev1.Pod { + p := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{}, Status: corev1.PodStatus{}, Spec: corev1.PodSpec{}} + p.Status.PodIP = "127.0.0.1" + p.Name = "myPod" + p.Namespace = "default" + return p } diff --git a/plugins/inputs/prometheus/parser.go b/plugins/inputs/prometheus/parser.go index 0726c87713b0a..49bfa2afa4d27 100644 --- a/plugins/inputs/prometheus/parser.go +++ b/plugins/inputs/prometheus/parser.go @@ -1,8 +1,5 @@ package prometheus -// Parser inspired from -// https://github.com/prometheus/prom2json/blob/master/main.go - import ( "bufio" "bytes" @@ -13,170 +10,29 @@ import ( "net/http" "time" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" "github.com/matttproud/golang_protobuf_extensions/pbutil" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/parsers/prometheus/common" ) -// Parse returns a slice of Metrics from a text representation of a -// metrics -func ParseV2(buf []byte, header http.Header) ([]telegraf.Metric, error) { - var metrics []telegraf.Metric +func Parse(buf []byte, header http.Header, ignoreTimestamp bool) ([]telegraf.Metric, error) { var parser expfmt.TextParser - // parse even if the buffer begins with a newline - buf = bytes.TrimPrefix(buf, []byte("\n")) - // Read raw data - buffer := bytes.NewBuffer(buf) - reader := bufio.NewReader(buffer) - - mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type")) - // Prepare output - metricFamilies := make(map[string]*dto.MetricFamily) - - if err == nil && mediatype == "application/vnd.google.protobuf" && - params["encoding"] == "delimited" && - params["proto"] == "io.prometheus.client.MetricFamily" { - for { - mf := &dto.MetricFamily{} - if _, ierr := pbutil.ReadDelimited(reader, mf); ierr != nil { - if ierr == io.EOF { - break - } - return nil, fmt.Errorf("reading metric family protocol buffer failed: %s", ierr) - } - metricFamilies[mf.GetName()] = mf - } - } else { - metricFamilies, err = parser.TextToMetricFamilies(reader) - if err != nil { - return nil, fmt.Errorf("reading text format failed: %s", err) - } - } - - // make sure all metrics have a consistent timestamp so that metrics don't straddle two different seconds - now := time.Now() - // read metrics - for metricName, mf := range metricFamilies { - for _, m := range mf.Metric { - // reading tags - tags := makeLabels(m) - - if mf.GetType() == dto.MetricType_SUMMARY { - // summary metric - telegrafMetrics := makeQuantilesV2(m, tags, metricName, mf.GetType(), now) - metrics = append(metrics, telegrafMetrics...) - } else if mf.GetType() == dto.MetricType_HISTOGRAM { - // histogram metric - telegrafMetrics := makeBucketsV2(m, tags, metricName, mf.GetType(), now) - metrics = append(metrics, telegrafMetrics...) - } else { - // standard metric - // reading fields - fields := getNameAndValueV2(m, metricName) - // converting to telegraf metric - if len(fields) > 0 { - var t time.Time - if m.TimestampMs != nil && *m.TimestampMs > 0 { - t = time.Unix(0, *m.TimestampMs*1000000) - } else { - t = now - } - metric, err := metric.New("prometheus", tags, fields, t, valueType(mf.GetType())) - if err == nil { - metrics = append(metrics, metric) - } - } - } - } - } - - return metrics, err -} - -// Get Quantiles for summary metric & Buckets for histogram -func makeQuantilesV2(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric { var metrics []telegraf.Metric - fields := make(map[string]interface{}) - var t time.Time - if m.TimestampMs != nil && *m.TimestampMs > 0 { - t = time.Unix(0, *m.TimestampMs*1000000) - } else { - t = now - } - fields[metricName+"_count"] = float64(m.GetSummary().GetSampleCount()) - fields[metricName+"_sum"] = float64(m.GetSummary().GetSampleSum()) - met, err := metric.New("prometheus", tags, fields, t, valueType(metricType)) - if err == nil { - metrics = append(metrics, met) - } - - for _, q := range m.GetSummary().Quantile { - newTags := tags - fields = make(map[string]interface{}) - - newTags["quantile"] = fmt.Sprint(q.GetQuantile()) - fields[metricName] = float64(q.GetValue()) - - quantileMetric, err := metric.New("prometheus", newTags, fields, t, valueType(metricType)) - if err == nil { - metrics = append(metrics, quantileMetric) - } - } - return metrics -} - -// Get Buckets from histogram metric -func makeBucketsV2(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric { - var metrics []telegraf.Metric - fields := make(map[string]interface{}) - var t time.Time - if m.TimestampMs != nil && *m.TimestampMs > 0 { - t = time.Unix(0, *m.TimestampMs*1000000) - } else { - t = now - } - fields[metricName+"_count"] = float64(m.GetHistogram().GetSampleCount()) - fields[metricName+"_sum"] = float64(m.GetHistogram().GetSampleSum()) - - met, err := metric.New("prometheus", tags, fields, t, valueType(metricType)) - if err == nil { - metrics = append(metrics, met) - } - - for _, b := range m.GetHistogram().Bucket { - newTags := tags - fields = make(map[string]interface{}) - newTags["le"] = fmt.Sprint(b.GetUpperBound()) - fields[metricName+"_bucket"] = float64(b.GetCumulativeCount()) - - histogramMetric, err := metric.New("prometheus", newTags, fields, t, valueType(metricType)) - if err == nil { - metrics = append(metrics, histogramMetric) - } - } - return metrics -} - -// Parse returns a slice of Metrics from a text representation of a -// metrics -func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { - var metrics []telegraf.Metric - var parser expfmt.TextParser + var err error // parse even if the buffer begins with a newline buf = bytes.TrimPrefix(buf, []byte("\n")) // Read raw data buffer := bytes.NewBuffer(buf) reader := bufio.NewReader(buffer) - mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type")) // Prepare output metricFamilies := make(map[string]*dto.MetricFamily) - if err == nil && mediatype == "application/vnd.google.protobuf" && - params["encoding"] == "delimited" && - params["proto"] == "io.prometheus.client.MetricFamily" { + if isProtobuf(header) { for { mf := &dto.MetricFamily{} if _, ierr := pbutil.ReadDelimited(reader, mf); ierr != nil { @@ -194,26 +50,27 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { } } - // make sure all metrics have a consistent timestamp so that metrics don't straddle two different seconds now := time.Now() // read metrics for metricName, mf := range metricFamilies { for _, m := range mf.Metric { // reading tags - tags := makeLabels(m) + tags := common.MakeLabels(m, nil) + // reading fields var fields map[string]interface{} if mf.GetType() == dto.MetricType_SUMMARY { // summary metric fields = makeQuantiles(m) fields["count"] = float64(m.GetSummary().GetSampleCount()) + //nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40 fields["sum"] = float64(m.GetSummary().GetSampleSum()) } else if mf.GetType() == dto.MetricType_HISTOGRAM { // histogram metric fields = makeBuckets(m) fields["count"] = float64(m.GetHistogram().GetSampleCount()) + //nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40 fields["sum"] = float64(m.GetHistogram().GetSampleSum()) - } else { // standard metric fields = getNameAndValue(m) @@ -221,15 +78,13 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { // converting to telegraf metric if len(fields) > 0 { var t time.Time - if m.TimestampMs != nil && *m.TimestampMs > 0 { + if !ignoreTimestamp && m.TimestampMs != nil && *m.TimestampMs > 0 { t = time.Unix(0, *m.TimestampMs*1000000) } else { t = now } - metric, err := metric.New(metricName, tags, fields, t, valueType(mf.GetType())) - if err == nil { - metrics = append(metrics, metric) - } + m := metric.New(metricName, tags, fields, t, common.ValueType(mf.GetType())) + metrics = append(metrics, m) } } } @@ -237,19 +92,15 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { return metrics, err } -func valueType(mt dto.MetricType) telegraf.ValueType { - switch mt { - case dto.MetricType_COUNTER: - return telegraf.Counter - case dto.MetricType_GAUGE: - return telegraf.Gauge - case dto.MetricType_SUMMARY: - return telegraf.Summary - case dto.MetricType_HISTOGRAM: - return telegraf.Histogram - default: - return telegraf.Untyped +func isProtobuf(header http.Header) bool { + mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type")) + if err != nil { + return false } + + return mediatype == "application/vnd.google.protobuf" && + params["encoding"] == "delimited" && + params["proto"] == "io.prometheus.client.MetricFamily" } // Get Quantiles from summary metric @@ -257,6 +108,7 @@ func makeQuantiles(m *dto.Metric) map[string]interface{} { fields := make(map[string]interface{}) for _, q := range m.GetSummary().Quantile { if !math.IsNaN(q.GetValue()) { + //nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40 fields[fmt.Sprint(q.GetQuantile())] = float64(q.GetValue()) } } @@ -272,49 +124,24 @@ func makeBuckets(m *dto.Metric) map[string]interface{} { return fields } -// Get labels from metric -func makeLabels(m *dto.Metric) map[string]string { - result := map[string]string{} - for _, lp := range m.Label { - result[lp.GetName()] = lp.GetValue() - } - return result -} - // Get name and value from metric func getNameAndValue(m *dto.Metric) map[string]interface{} { fields := make(map[string]interface{}) if m.Gauge != nil { if !math.IsNaN(m.GetGauge().GetValue()) { + //nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40 fields["gauge"] = float64(m.GetGauge().GetValue()) } } else if m.Counter != nil { if !math.IsNaN(m.GetCounter().GetValue()) { + //nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40 fields["counter"] = float64(m.GetCounter().GetValue()) } } else if m.Untyped != nil { if !math.IsNaN(m.GetUntyped().GetValue()) { + //nolint:unconvert // Conversion may be needed for float64 https://github.com/mdempsky/unconvert/issues/40 fields["value"] = float64(m.GetUntyped().GetValue()) } } return fields } - -// Get name and value from metric -func getNameAndValueV2(m *dto.Metric, metricName string) map[string]interface{} { - fields := make(map[string]interface{}) - if m.Gauge != nil { - if !math.IsNaN(m.GetGauge().GetValue()) { - fields[metricName] = float64(m.GetGauge().GetValue()) - } - } else if m.Counter != nil { - if !math.IsNaN(m.GetCounter().GetValue()) { - fields[metricName] = float64(m.GetCounter().GetValue()) - } - } else if m.Untyped != nil { - if !math.IsNaN(m.GetUntyped().GetValue()) { - fields[metricName] = float64(m.GetUntyped().GetValue()) - } - } - return fields -} diff --git a/plugins/inputs/prometheus/parser_test.go b/plugins/inputs/prometheus/parser_test.go index 7b2bfeca2e128..24470a441a6b3 100644 --- a/plugins/inputs/prometheus/parser_test.go +++ b/plugins/inputs/prometheus/parser_test.go @@ -1,15 +1,14 @@ package prometheus import ( + "fmt" "net/http" "testing" "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -var exptime = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) - const validUniqueGauge = `# HELP cadvisor_version_info A metric with a constant '1' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision. # TYPE cadvisor_version_info gauge cadvisor_version_info{cadvisorRevision="",cadvisorVersion="",dockerVersion="1.8.2",kernelVersion="3.10.0-229.20.1.el7.x86_64",osVersion="CentOS Linux 7 (Core)"} 1 @@ -20,9 +19,6 @@ const validUniqueCounter = `# HELP get_token_fail_count Counter of failed Token( get_token_fail_count 0 ` -const validUniqueLine = `# HELP get_token_fail_count Counter of failed Token() requests to the alternate token source -` - const validUniqueSummary = `# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. # TYPE http_request_duration_microseconds summary http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 552048.506 @@ -46,71 +42,16 @@ apiserver_request_latencies_sum{resource="bindings",verb="POST"} 1.02726334e+08 apiserver_request_latencies_count{resource="bindings",verb="POST"} 2025 ` -const validData = `# HELP cadvisor_version_info A metric with a constant '1' value labeled by kernel version, OS version, docker version, cadvisor version & cadvisor revision. -# TYPE cadvisor_version_info gauge -cadvisor_version_info{cadvisorRevision="",cadvisorVersion="",dockerVersion="1.8.2",kernelVersion="3.10.0-229.20.1.el7.x86_64",osVersion="CentOS Linux 7 (Core)"} 1 -# HELP go_gc_duration_seconds A summary of the GC invocation durations. -# TYPE go_gc_duration_seconds summary -go_gc_duration_seconds{quantile="0"} 0.013534896000000001 -go_gc_duration_seconds{quantile="0.25"} 0.02469263 -go_gc_duration_seconds{quantile="0.5"} 0.033727822000000005 -go_gc_duration_seconds{quantile="0.75"} 0.03840335 -go_gc_duration_seconds{quantile="1"} 0.049956604 -go_gc_duration_seconds_sum 1970.341293002 -go_gc_duration_seconds_count 65952 -# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. -# TYPE http_request_duration_microseconds summary -http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 552048.506 -http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 5.876804288e+06 -http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 5.876804288e+06 -http_request_duration_microseconds_sum{handler="prometheus"} 1.8909097205e+07 -http_request_duration_microseconds_count{handler="prometheus"} 9 -# HELP get_token_fail_count Counter of failed Token() requests to the alternate token source -# TYPE get_token_fail_count counter -get_token_fail_count 0 -# HELP apiserver_request_latencies Response latency distribution in microseconds for each verb, resource and client. -# TYPE apiserver_request_latencies histogram -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="125000"} 1994 -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="250000"} 1997 -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="500000"} 2000 -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="1e+06"} 2005 -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="2e+06"} 2012 -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="4e+06"} 2017 -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="8e+06"} 2024 -apiserver_request_latencies_bucket{resource="bindings",verb="POST",le="+Inf"} 2025 -apiserver_request_latencies_sum{resource="bindings",verb="POST"} 1.02726334e+08 -apiserver_request_latencies_count{resource="bindings",verb="POST"} 2025 -` - -const prometheusMulti = ` -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -` - -const prometheusMultiSomeInvalid = ` -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu3, host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -cpu,cpu=cpu4 , usage_idle=99,usage_busy=1 -cpu,host=foo,datacenter=us-east usage_idle=99,usage_busy=1 -` - func TestParseValidPrometheus(t *testing.T) { // Gauge value - metrics, err := Parse([]byte(validUniqueGauge), http.Header{}) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "cadvisor_version_info", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + metrics, err := Parse([]byte(validUniqueGauge), http.Header{}, false) + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "cadvisor_version_info", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "gauge": float64(1), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "osVersion": "CentOS Linux 7 (Core)", "cadvisorRevision": "", "cadvisorVersion": "", @@ -119,36 +60,36 @@ func TestParseValidPrometheus(t *testing.T) { }, metrics[0].Tags()) // Counter value - metrics, err = Parse([]byte(validUniqueCounter), http.Header{}) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "get_token_fail_count", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + metrics, err = Parse([]byte(validUniqueCounter), http.Header{}, false) + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "get_token_fail_count", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "counter": float64(0), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) // Summary data //SetDefaultTags(map[string]string{}) - metrics, err = Parse([]byte(validUniqueSummary), http.Header{}) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "http_request_duration_microseconds", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + metrics, err = Parse([]byte(validUniqueSummary), http.Header{}, false) + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "http_request_duration_microseconds", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "0.5": 552048.506, "0.9": 5.876804288e+06, "0.99": 5.876804288e+06, "count": 9.0, "sum": 1.8909097205e+07, }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"handler": "prometheus"}, metrics[0].Tags()) + require.Equal(t, map[string]string{"handler": "prometheus"}, metrics[0].Tags()) // histogram data - metrics, err = Parse([]byte(validUniqueHistogram), http.Header{}) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "apiserver_request_latencies", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + metrics, err = Parse([]byte(validUniqueHistogram), http.Header{}, false) + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "apiserver_request_latencies", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "500000": 2000.0, "count": 2025.0, "sum": 1.02726334e+08, @@ -160,8 +101,42 @@ func TestParseValidPrometheus(t *testing.T) { "125000": 1994.0, "1e+06": 2005.0, }, metrics[0].Fields()) - assert.Equal(t, + require.Equal(t, map[string]string{"verb": "POST", "resource": "bindings"}, metrics[0].Tags()) +} +func TestMetricsWithTimestamp(t *testing.T) { + testTime := time.Date(2020, time.October, 4, 17, 0, 0, 0, time.UTC) + testTimeUnix := testTime.UnixNano() / int64(time.Millisecond) + metricsWithTimestamps := fmt.Sprintf(` +# TYPE test_counter counter +test_counter{label="test"} 1 %d +`, testTimeUnix) + + // IgnoreTimestamp is false + metrics, err := Parse([]byte(metricsWithTimestamps), http.Header{}, false) + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "test_counter", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ + "counter": float64(1), + }, metrics[0].Fields()) + require.Equal(t, map[string]string{ + "label": "test", + }, metrics[0].Tags()) + require.Equal(t, testTime, metrics[0].Time().UTC()) + + // IgnoreTimestamp is true + metrics, err = Parse([]byte(metricsWithTimestamps), http.Header{}, true) + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "test_counter", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ + "counter": float64(1), + }, metrics[0].Fields()) + require.Equal(t, map[string]string{ + "label": "test", + }, metrics[0].Tags()) + require.WithinDuration(t, time.Now(), metrics[0].Time().UTC(), 5*time.Second) } diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 70d72e0b0a379..4e6c8517dc99b 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -1,23 +1,36 @@ +//go:generate ../../../tools/readme_config_includer/generator package prometheus import ( "context" + _ "embed" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" + "os" + "strings" "sync" "time" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" + parserV2 "github.com/influxdata/telegraf/plugins/parsers/prometheus" ) -const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,*/*;q=0.1` +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3` type Prometheus struct { // An array of urls to scrape metrics from. @@ -35,6 +48,9 @@ type Prometheus struct { // Field Selector/s for Kubernetes KubernetesFieldSelector string `toml:"kubernetes_field_selector"` + // Consul SD configuration + ConsulConfig ConsulConfig `toml:"consul"` + // Bearer Token authorization file path BearerToken string `toml:"bearer_token"` BearerTokenString string `toml:"bearer_token_string"` @@ -43,104 +59,88 @@ type Prometheus struct { Username string `toml:"username"` Password string `toml:"password"` - ResponseTimeout internal.Duration `toml:"response_timeout"` + ResponseTimeout config.Duration `toml:"response_timeout"` MetricVersion int `toml:"metric_version"` URLTag string `toml:"url_tag"` + IgnoreTimestamp bool `toml:"ignore_timestamp"` + tls.ClientConfig Log telegraf.Logger - client *http.Client + client *http.Client + headers map[string]string // Should we scrape Kubernetes services for prometheus annotations - MonitorPods bool `toml:"monitor_kubernetes_pods"` - PodNamespace string `toml:"monitor_kubernetes_pods_namespace"` - lock sync.Mutex - kubernetesPods map[string]URLAndAddress - cancel context.CancelFunc - wg sync.WaitGroup + MonitorPods bool `toml:"monitor_kubernetes_pods"` + PodScrapeScope string `toml:"pod_scrape_scope"` + NodeIP string `toml:"node_ip"` + PodScrapeInterval int `toml:"pod_scrape_interval"` + PodNamespace string `toml:"monitor_kubernetes_pods_namespace"` + lock sync.Mutex + kubernetesPods map[string]URLAndAddress + cancel context.CancelFunc + wg sync.WaitGroup + + // Only for monitor_kubernetes_pods=true and pod_scrape_scope="node" + podLabelSelector labels.Selector + podFieldSelector fields.Selector + isNodeScrapeScope bool + + // Only for monitor_kubernetes_pods=true + CacheRefreshInterval int `toml:"cache_refresh_interval"` + + // List of consul services to scrape + consulServices map[string]URLAndAddress } -var sampleConfig = ` - ## An array of urls to scrape metrics from. - urls = ["http://localhost:9100/metrics"] - - ## Metric version controls the mapping from Prometheus metrics into - ## Telegraf metrics. When using the prometheus_client output, use the same - ## value in both plugins to ensure metrics are round-tripped without - ## modification. - ## - ## example: metric_version = 1; deprecated in 1.13 - ## metric_version = 2; recommended version - # metric_version = 1 - - ## Url tag name (tag containing scrapped url. optional, default is "url") - # url_tag = "scrapeUrl" - - ## An array of Kubernetes services to scrape metrics from. - # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] - - ## Kubernetes config file to create client from. - # kube_config = "/path/to/kubernetes.config" - - ## Scrape Kubernetes pods for the following prometheus annotations: - ## - prometheus.io/scrape: Enable scraping for this pod - ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to - ## set this to 'https' & most likely set the tls config. - ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. - ## - prometheus.io/port: If port is not 9102 use this annotation - # monitor_kubernetes_pods = true - ## Restricts Kubernetes monitoring to a single namespace - ## ex: monitor_kubernetes_pods_namespace = "default" - # monitor_kubernetes_pods_namespace = "" - # label selector to target pods which have the label - # kubernetes_label_selector = "env=dev,app=nginx" - # field selector to target pods - # eg. To scrape pods on a specific node - # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" - - ## Use bearer token for authorization. ('bearer_token' takes priority) - # bearer_token = "/path/to/bearer/token" - ## OR - # bearer_token_string = "abc_123" - - ## HTTP Basic Authentication username and password. ('bearer_token' and - ## 'bearer_token_string' take priority) - # username = "" - # password = "" - - ## Specify timeout duration for slower prometheus clients (default is 3s) - # response_timeout = "3s" - - ## Optional TLS Config - # tls_ca = /path/to/cafile - # tls_cert = /path/to/certfile - # tls_key = /path/to/keyfile - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` - -func (p *Prometheus) SampleConfig() string { +func (*Prometheus) SampleConfig() string { return sampleConfig } -func (p *Prometheus) Description() string { - return "Read metrics from one or many prometheus clients" -} - func (p *Prometheus) Init() error { - if p.MetricVersion != 2 { - p.Log.Warnf("Use of deprecated configuration: 'metric_version = 1'; please update to 'metric_version = 2'") + // Config processing for node scrape scope for monitor_kubernetes_pods + p.isNodeScrapeScope = strings.EqualFold(p.PodScrapeScope, "node") + if p.isNodeScrapeScope { + // Need node IP to make cAdvisor call for pod list. Check if set in config and valid IP address + if p.NodeIP == "" || net.ParseIP(p.NodeIP) == nil { + p.Log.Infof("The config node_ip is empty or invalid. Using NODE_IP env var as default.") + + // Check if set as env var and is valid IP address + envVarNodeIP := os.Getenv("NODE_IP") + if envVarNodeIP == "" || net.ParseIP(envVarNodeIP) == nil { + return errors.New("the node_ip config and the environment variable NODE_IP are not set or invalid; " + + "cannot get pod list for monitor_kubernetes_pods using node scrape scope") + } + + p.NodeIP = envVarNodeIP + } + + // Parse label and field selectors - will be used to filter pods after cAdvisor call + var err error + p.podLabelSelector, err = labels.Parse(p.KubernetesLabelSelector) + if err != nil { + return fmt.Errorf("error parsing the specified label selector(s): %s", err.Error()) + } + p.podFieldSelector, err = fields.ParseSelector(p.KubernetesFieldSelector) + if err != nil { + return fmt.Errorf("error parsing the specified field selector(s): %s", err.Error()) + } + isValid, invalidSelector := fieldSelectorIsSupported(p.podFieldSelector) + if !isValid { + return fmt.Errorf("the field selector %s is not supported for pods", invalidSelector) + } + + p.Log.Infof("Using pod scrape scope at node level to get pod list using cAdvisor.") + p.Log.Infof("Using the label selector: %v and field selector: %v", p.podLabelSelector, p.podFieldSelector) } return nil } -var ErrProtocolError = errors.New("prometheus protocol error") - func (p *Prometheus) AddressToURL(u *url.URL, address string) *url.URL { host := address if u.Port() != "" { @@ -168,40 +168,44 @@ type URLAndAddress struct { } func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) { - allURLs := make(map[string]URLAndAddress, 0) + allURLs := make(map[string]URLAndAddress) for _, u := range p.URLs { - URL, err := url.Parse(u) + address, err := url.Parse(u) if err != nil { p.Log.Errorf("Could not parse %q, skipping it. Error: %s", u, err.Error()) continue } - allURLs[URL.String()] = URLAndAddress{URL: URL, OriginalURL: URL} + allURLs[address.String()] = URLAndAddress{URL: address, OriginalURL: address} } p.lock.Lock() defer p.lock.Unlock() + // add all services collected from consul + for k, v := range p.consulServices { + allURLs[k] = v + } // loop through all pods scraped via the prometheus annotation on the pods for k, v := range p.kubernetesPods { allURLs[k] = v } for _, service := range p.KubernetesServices { - URL, err := url.Parse(service) + address, err := url.Parse(service) if err != nil { return nil, err } - resolvedAddresses, err := net.LookupHost(URL.Hostname()) + resolvedAddresses, err := net.LookupHost(address.Hostname()) if err != nil { - p.Log.Errorf("Could not resolve %q, skipping it. Error: %s", URL.Host, err.Error()) + p.Log.Errorf("Could not resolve %q, skipping it. Error: %s", address.Host, err.Error()) continue } for _, resolved := range resolvedAddresses { - serviceURL := p.AddressToURL(URL, resolved) + serviceURL := p.AddressToURL(address, resolved) allURLs[serviceURL.String()] = URLAndAddress{ URL: serviceURL, Address: resolved, - OriginalURL: URL, + OriginalURL: address, } } } @@ -217,6 +221,10 @@ func (p *Prometheus) Gather(acc telegraf.Accumulator) error { return err } p.client = client + p.headers = map[string]string{ + "User-Agent": internal.ProductToken(), + "Accept": acceptHeader, + } } var wg sync.WaitGroup @@ -249,7 +257,7 @@ func (p *Prometheus) createHTTPClient() (*http.Client, error) { TLSClientConfig: tlsCfg, DisableKeepAlives: true, }, - Timeout: p.ResponseTimeout.Duration, + Timeout: time.Duration(p.ResponseTimeout), } return client, nil @@ -282,7 +290,7 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error return c, err }, }, - Timeout: p.ResponseTimeout.Duration, + Timeout: time.Duration(p.ResponseTimeout), } } else { if u.URL.Path == "" { @@ -294,10 +302,10 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error } } - req.Header.Add("Accept", acceptHeader) + p.addHeaders(req) if p.BearerToken != "" { - token, err := ioutil.ReadFile(p.BearerToken) + token, err := os.ReadFile(p.BearerToken) if err != nil { return err } @@ -310,8 +318,10 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error var resp *http.Response if u.URL.Scheme != "unix" { + //nolint:bodyclose // False positive (because of if-else) - body will be closed in `defer` resp, err = p.client.Do(req) } else { + //nolint:bodyclose // False positive (because of if-else) - body will be closed in `defer` resp, err = uClient.Do(req) } if err != nil { @@ -323,15 +333,19 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error return fmt.Errorf("%s returned HTTP status %s", u.URL, resp.Status) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("error reading body: %s", err) } if p.MetricVersion == 2 { - metrics, err = ParseV2(body, resp.Header) + parser := parserV2.Parser{ + Header: resp.Header, + IgnoreTimestamp: p.IgnoreTimestamp, + } + metrics, err = parser.Parse(body) } else { - metrics, err = Parse(body, resp.Header) + metrics, err = Parse(body, resp.Header, p.IgnoreTimestamp) } if err != nil { @@ -370,28 +384,66 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error return nil } -// Start will start the Kubernetes scraping if enabled in the configuration -func (p *Prometheus) Start(a telegraf.Accumulator) error { +func (p *Prometheus) addHeaders(req *http.Request) { + for header, value := range p.headers { + req.Header.Add(header, value) + } +} + +/* Check if the field selector specified is valid. + * See ToSelectableFields() for list of fields that are selectable: + * https://github.com/kubernetes/kubernetes/release-1.20/pkg/registry/core/pod/strategy.go + */ +func fieldSelectorIsSupported(fieldSelector fields.Selector) (bool, string) { + supportedFieldsToSelect := map[string]bool{ + "spec.nodeName": true, + "spec.restartPolicy": true, + "spec.schedulerName": true, + "spec.serviceAccountName": true, + "status.phase": true, + "status.podIP": true, + "status.nominatedNodeName": true, + } + + for _, requirement := range fieldSelector.Requirements() { + if !supportedFieldsToSelect[requirement.Field] { + return false, requirement.Field + } + } + + return true, "" +} + +// Start will start the Kubernetes and/or Consul scraping if enabled in the configuration +func (p *Prometheus) Start(_ telegraf.Accumulator) error { + var ctx context.Context + p.wg = sync.WaitGroup{} + ctx, p.cancel = context.WithCancel(context.Background()) + + if p.ConsulConfig.Enabled && len(p.ConsulConfig.Queries) > 0 { + if err := p.startConsul(ctx); err != nil { + return err + } + } if p.MonitorPods { - var ctx context.Context - ctx, p.cancel = context.WithCancel(context.Background()) - return p.start(ctx) + if err := p.startK8s(ctx); err != nil { + return err + } } return nil } func (p *Prometheus) Stop() { - if p.MonitorPods { - p.cancel() - } + p.cancel() p.wg.Wait() } func init() { inputs.Add("prometheus", func() telegraf.Input { return &Prometheus{ - ResponseTimeout: internal.Duration{Duration: time.Second * 3}, + ResponseTimeout: config.Duration(time.Second * 3), kubernetesPods: map[string]URLAndAddress{}, + consulServices: map[string]URLAndAddress{}, URLTag: "url", } }) diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index d33cba273c276..0e1049051ced3 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -6,13 +6,15 @@ import ( "net/http" "net/http/httptest" "net/url" + "os" "testing" "time" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/fields" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) const sampleTextFormat = `# HELP go_gc_duration_seconds A summary of the GC invocation durations. @@ -29,8 +31,8 @@ go_gc_duration_seconds_count 7 go_goroutines 15 # HELP test_metric An untyped metric with a timestamp # TYPE test_metric untyped -test_metric{label="value"} 1.0 1490802350000 -` +test_metric{label="value"} 1.0 1490802350000` + const sampleSummaryTextFormat = `# HELP go_gc_duration_seconds A summary of the GC invocation durations. # TYPE go_gc_duration_seconds summary go_gc_duration_seconds{quantile="0"} 0.00010425500000000001 @@ -39,17 +41,17 @@ go_gc_duration_seconds{quantile="0.5"} 0.00015749400000000002 go_gc_duration_seconds{quantile="0.75"} 0.000331463 go_gc_duration_seconds{quantile="1"} 0.000667154 go_gc_duration_seconds_sum 0.0018183950000000002 -go_gc_duration_seconds_count 7 -` +go_gc_duration_seconds_count 7` + const sampleGaugeTextFormat = ` # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge -go_goroutines 15 1490802350000 -` +go_goroutines 15 1490802350000` func TestPrometheusGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, sampleTextFormat) + _, err := fmt.Fprintln(w, sampleTextFormat) + require.NoError(t, err) })) defer ts.Close() @@ -64,17 +66,18 @@ func TestPrometheusGeneratesMetrics(t *testing.T) { err := acc.GatherError(p.Gather) require.NoError(t, err) - assert.True(t, acc.HasFloatField("go_gc_duration_seconds", "count")) - assert.True(t, acc.HasFloatField("go_goroutines", "gauge")) - assert.True(t, acc.HasFloatField("test_metric", "value")) - assert.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) - assert.False(t, acc.HasTag("test_metric", "address")) - assert.True(t, acc.TagValue("test_metric", "url") == ts.URL+"/metrics") + require.True(t, acc.HasFloatField("go_gc_duration_seconds", "count")) + require.True(t, acc.HasFloatField("go_goroutines", "gauge")) + require.True(t, acc.HasFloatField("test_metric", "value")) + require.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) + require.False(t, acc.HasTag("test_metric", "address")) + require.True(t, acc.TagValue("test_metric", "url") == ts.URL+"/metrics") } func TestPrometheusGeneratesMetricsWithHostNameTag(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, sampleTextFormat) + _, err := fmt.Fprintln(w, sampleTextFormat) + require.NoError(t, err) })) defer ts.Close() @@ -91,21 +94,22 @@ func TestPrometheusGeneratesMetricsWithHostNameTag(t *testing.T) { err := acc.GatherError(p.Gather) require.NoError(t, err) - assert.True(t, acc.HasFloatField("go_gc_duration_seconds", "count")) - assert.True(t, acc.HasFloatField("go_goroutines", "gauge")) - assert.True(t, acc.HasFloatField("test_metric", "value")) - assert.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) - assert.True(t, acc.TagValue("test_metric", "address") == tsAddress) - assert.True(t, acc.TagValue("test_metric", "url") == ts.URL) + require.True(t, acc.HasFloatField("go_gc_duration_seconds", "count")) + require.True(t, acc.HasFloatField("go_goroutines", "gauge")) + require.True(t, acc.HasFloatField("test_metric", "value")) + require.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) + require.True(t, acc.TagValue("test_metric", "address") == tsAddress) + require.True(t, acc.TagValue("test_metric", "url") == ts.URL) } -func TestPrometheusGeneratesMetricsAlthoughFirstDNSFails(t *testing.T) { +func TestPrometheusGeneratesMetricsAlthoughFirstDNSFailsIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, sampleTextFormat) + _, err := fmt.Fprintln(w, sampleTextFormat) + require.NoError(t, err) })) defer ts.Close() @@ -120,15 +124,16 @@ func TestPrometheusGeneratesMetricsAlthoughFirstDNSFails(t *testing.T) { err := acc.GatherError(p.Gather) require.NoError(t, err) - assert.True(t, acc.HasFloatField("go_gc_duration_seconds", "count")) - assert.True(t, acc.HasFloatField("go_goroutines", "gauge")) - assert.True(t, acc.HasFloatField("test_metric", "value")) - assert.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) + require.True(t, acc.HasFloatField("go_gc_duration_seconds", "count")) + require.True(t, acc.HasFloatField("go_goroutines", "gauge")) + require.True(t, acc.HasFloatField("test_metric", "value")) + require.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) } func TestPrometheusGeneratesSummaryMetricsV2(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, sampleSummaryTextFormat) + _, err := fmt.Fprintln(w, sampleSummaryTextFormat) + require.NoError(t, err) })) defer ts.Close() @@ -143,11 +148,10 @@ func TestPrometheusGeneratesSummaryMetricsV2(t *testing.T) { err := acc.GatherError(p.Gather) require.NoError(t, err) - assert.True(t, acc.TagSetValue("prometheus", "quantile") == "0") - assert.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_sum")) - assert.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_count")) - assert.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics") - + require.True(t, acc.TagSetValue("prometheus", "quantile") == "0") + require.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_sum")) + require.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_count")) + require.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics") } func TestSummaryMayContainNaN(t *testing.T) { @@ -156,10 +160,11 @@ func TestSummaryMayContainNaN(t *testing.T) { go_gc_duration_seconds{quantile="0"} NaN go_gc_duration_seconds{quantile="1"} NaN go_gc_duration_seconds_sum 42.0 -go_gc_duration_seconds_count 42 -` +go_gc_duration_seconds_count 42` + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, data) + _, err := fmt.Fprintln(w, data) + require.NoError(t, err) })) defer ts.Close() @@ -215,7 +220,8 @@ go_gc_duration_seconds_count 42 func TestPrometheusGeneratesGaugeMetricsV2(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, sampleGaugeTextFormat) + _, err := fmt.Fprintln(w, sampleGaugeTextFormat) + require.NoError(t, err) })) defer ts.Close() @@ -230,7 +236,77 @@ func TestPrometheusGeneratesGaugeMetricsV2(t *testing.T) { err := acc.GatherError(p.Gather) require.NoError(t, err) - assert.True(t, acc.HasFloatField("prometheus", "go_goroutines")) - assert.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics") - assert.True(t, acc.HasTimestamp("prometheus", time.Unix(1490802350, 0))) + require.True(t, acc.HasFloatField("prometheus", "go_goroutines")) + require.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics") + require.True(t, acc.HasTimestamp("prometheus", time.Unix(1490802350, 0))) +} + +func TestPrometheusGeneratesMetricsWithIgnoreTimestamp(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, err := fmt.Fprintln(w, sampleTextFormat) + require.NoError(t, err) + })) + defer ts.Close() + + p := &Prometheus{ + Log: testutil.Logger{}, + URLs: []string{ts.URL}, + URLTag: "url", + IgnoreTimestamp: true, + } + + var acc testutil.Accumulator + + err := acc.GatherError(p.Gather) + require.NoError(t, err) + + m, _ := acc.Get("test_metric") + require.WithinDuration(t, time.Now(), m.Time, 5*time.Second) +} + +func TestUnsupportedFieldSelector(t *testing.T) { + fieldSelectorString := "spec.containerName=container" + prom := &Prometheus{Log: testutil.Logger{}, KubernetesFieldSelector: fieldSelectorString} + + fieldSelector, _ := fields.ParseSelector(prom.KubernetesFieldSelector) + isValid, invalidSelector := fieldSelectorIsSupported(fieldSelector) + require.Equal(t, false, isValid) + require.Equal(t, "spec.containerName", invalidSelector) +} + +func TestInitConfigErrors(t *testing.T) { + p := &Prometheus{ + MetricVersion: 2, + Log: testutil.Logger{}, + URLs: nil, + URLTag: "url", + MonitorPods: true, + PodScrapeScope: "node", + PodScrapeInterval: 60, + } + + // Both invalid IP addresses + p.NodeIP = "10.240.0.0.0" + require.NoError(t, os.Setenv("NODE_IP", "10.000.0.0.0")) + err := p.Init() + require.Error(t, err) + expectedMessage := "the node_ip config and the environment variable NODE_IP are not set or invalid; cannot get pod list for monitor_kubernetes_pods using node scrape scope" + require.Equal(t, expectedMessage, err.Error()) + require.NoError(t, os.Setenv("NODE_IP", "10.000.0.0")) + + p.KubernetesLabelSelector = "label0==label0, label0 in (=)" + err = p.Init() + expectedMessage = "error parsing the specified label selector(s): unable to parse requirement: found '=', expected: ',', ')' or identifier" + require.Error(t, err, expectedMessage) + p.KubernetesLabelSelector = "label0==label" + + p.KubernetesFieldSelector = "field," + err = p.Init() + expectedMessage = "error parsing the specified field selector(s): invalid selector: 'field,'; can't understand 'field'" + require.Error(t, err, expectedMessage) + + p.KubernetesFieldSelector = "spec.containerNames=containerNames" + err = p.Init() + expectedMessage = "the field selector spec.containerNames is not supported for pods" + require.Error(t, err, expectedMessage) } diff --git a/plugins/inputs/prometheus/sample.conf b/plugins/inputs/prometheus/sample.conf new file mode 100644 index 0000000000000..34945169aa47b --- /dev/null +++ b/plugins/inputs/prometheus/sample.conf @@ -0,0 +1,90 @@ +# Read metrics from one or many prometheus clients +[[inputs.prometheus]] + ## An array of urls to scrape metrics from. + urls = ["http://localhost:9100/metrics"] + + ## Metric version controls the mapping from Prometheus metrics into Telegraf metrics. + ## See "Metric Format Configuration" in plugins/inputs/prometheus/README.md for details. + ## Valid options: 1, 2 + # metric_version = 1 + + ## Url tag name (tag containing scrapped url. optional, default is "url") + # url_tag = "url" + + ## Whether the timestamp of the scraped metrics will be ignored. + ## If set to true, the gather time will be used. + # ignore_timestamp = false + + ## An array of Kubernetes services to scrape metrics from. + # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] + + ## Kubernetes config file to create client from. + # kube_config = "/path/to/kubernetes.config" + + ## Scrape Kubernetes pods for the following prometheus annotations: + ## - prometheus.io/scrape: Enable scraping for this pod + ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to + ## set this to 'https' & most likely set the tls config. + ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. + ## - prometheus.io/port: If port is not 9102 use this annotation + # monitor_kubernetes_pods = true + + ## Get the list of pods to scrape with either the scope of + ## - cluster: the kubernetes watch api (default, no need to specify) + ## - node: the local cadvisor api; for scalability. Note that the config node_ip or the environment variable NODE_IP must be set to the host IP. + # pod_scrape_scope = "cluster" + + ## Only for node scrape scope: node IP of the node that telegraf is running on. + ## Either this config or the environment variable NODE_IP must be set. + # node_ip = "10.180.1.1" + + ## Only for node scrape scope: interval in seconds for how often to get updated pod list for scraping. + ## Default is 60 seconds. + # pod_scrape_interval = 60 + + ## Restricts Kubernetes monitoring to a single namespace + ## ex: monitor_kubernetes_pods_namespace = "default" + # monitor_kubernetes_pods_namespace = "" + # label selector to target pods which have the label + # kubernetes_label_selector = "env=dev,app=nginx" + # field selector to target pods + # eg. To scrape pods on a specific node + # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" + + # cache refresh interval to set the interval for re-sync of pods list. + # Default is 60 minutes. + # cache_refresh_interval = 60 + + ## Scrape Services available in Consul Catalog + # [inputs.prometheus.consul] + # enabled = true + # agent = "http://localhost:8500" + # query_interval = "5m" + + # [[inputs.prometheus.consul.query]] + # name = "a service name" + # tag = "a service tag" + # url = 'http://{{if ne .ServiceAddress ""}}{{.ServiceAddress}}{{else}}{{.Address}}{{end}}:{{.ServicePort}}/{{with .ServiceMeta.metrics_path}}{{.}}{{else}}metrics{{end}}' + # [inputs.prometheus.consul.query.tags] + # host = "{{.Node}}" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" + + ## HTTP Basic Authentication username and password. ('bearer_token' and + ## 'bearer_token_string' take priority) + # username = "" + # password = "" + + ## Specify timeout duration for slower prometheus clients (default is 3s) + # response_timeout = "3s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/proxmox/README.md b/plugins/inputs/proxmox/README.md index 24e39ade24ea3..56aba3d528260 100644 --- a/plugins/inputs/proxmox/README.md +++ b/plugins/inputs/proxmox/README.md @@ -1,18 +1,20 @@ # Proxmox Input Plugin -The proxmox plugin gathers metrics about containers and VMs using the Proxmox API. +The proxmox plugin gathers metrics about containers and VMs using the Proxmox +API. Telegraf minimum version: Telegraf 1.16.0 -### Configuration: +## Configuration -```toml +```toml @sample.conf +# Provides metrics from Proxmox nodes (Proxmox Virtual Environment > 6.2). [[inputs.proxmox]] ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. base_url = "https://localhost:8006/api2/json" api_token = "USER@REALM!TOKENID=UUID" - ## Optional node name config - # node_name = "localhost" + ## Node name, defaults to OS hostname + # node_name = "" ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" @@ -25,13 +27,13 @@ Telegraf minimum version: Telegraf 1.16.0 response_timeout = "5s" ``` -#### Permissions +### Permissions -The plugin will need to have access to the Proxmox API. An API token -must be provided with the corresponding user being assigned at least the PVEAuditor -role on /. +The plugin will need to have access to the Proxmox API. An API token must be +provided with the corresponding user being assigned at least the PVEAuditor role +on /. -### Measurements & Fields: +## Metrics - proxmox - status @@ -50,16 +52,16 @@ role on /. - disk_free - disk_used_percentage -### Tags: +### Tags - - node_fqdn - FQDN of the node telegraf is running on - - vm_name - Name of the VM/container - - vm_fqdn - FQDN of the VM/container - - vm_type - Type of the VM/container (lxc, qemu) +- node_fqdn - FQDN of the node telegraf is running on +- vm_name - Name of the VM/container +- vm_fqdn - FQDN of the VM/container +- vm_type - Type of the VM/container (lxc, qemu) -### Example Output: +## Example Output -``` +```text $ ./telegraf --config telegraf.conf --input-filter proxmox --test > proxmox,host=pxnode,node_fqdn=pxnode.example.com,vm_fqdn=vm1.example.com,vm_name=vm1,vm_type=lxc cpuload=0.147998116735236,disk_free=4461129728i,disk_total=5217320960i,disk_used=756191232i,disk_used_percentage=14,mem_free=1046827008i,mem_total=1073741824i,mem_used=26914816i,mem_used_percentage=2,status="running",swap_free=536698880i,swap_total=536870912i,swap_used=172032i,swap_used_percentage=0,uptime=1643793i 1595457277000000000 > ... diff --git a/plugins/inputs/proxmox/proxmox.go b/plugins/inputs/proxmox/proxmox.go index 7c14356849d6b..369d9d1f83543 100644 --- a/plugins/inputs/proxmox/proxmox.go +++ b/plugins/inputs/proxmox/proxmox.go @@ -1,41 +1,29 @@ +//go:generate ../../../tools/readme_config_includer/generator package proxmox import ( + _ "embed" "encoding/json" "errors" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/inputs" - "io/ioutil" + "io" "net/http" "net/url" "os" "strings" -) - -var sampleConfig = ` - ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. - base_url = "https://localhost:8006/api2/json" - api_token = "USER@REALM!TOKENID=UUID" + "time" - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - insecure_skip_verify = false + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) - # HTTP response timeout (default: 5s) - response_timeout = "5s" -` +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string -func (px *Proxmox) SampleConfig() string { +func (*Proxmox) SampleConfig() string { return sampleConfig } -func (px *Proxmox) Description() string { - return "Provides metrics from Proxmox nodes (Proxmox Virtual Environment > 6.2)." -} - func (px *Proxmox) Gather(acc telegraf.Accumulator) error { err := getNodeSearchDomain(px) if err != nil { @@ -49,9 +37,10 @@ func (px *Proxmox) Gather(acc telegraf.Accumulator) error { } func (px *Proxmox) Init() error { - + // Set hostname as default node name for backwards compatibility if px.NodeName == "" { - return errors.New("node_name must be configured") + hostname, _ := os.Hostname() + px.NodeName = hostname } tlsCfg, err := px.ClientConfig.TLSConfig() @@ -62,47 +51,43 @@ func (px *Proxmox) Init() error { Transport: &http.Transport{ TLSClientConfig: tlsCfg, }, - Timeout: px.ResponseTimeout.Duration, + Timeout: time.Duration(px.ResponseTimeout), } return nil } func init() { - px := Proxmox{ - requestFunction: performRequest, - } - - // Set hostname as default node name for backwards compatibility - hostname, _ := os.Hostname() - px.NodeName = hostname - - inputs.Add("proxmox", func() telegraf.Input { return &px }) + inputs.Add("proxmox", func() telegraf.Input { + return &Proxmox{ + requestFunction: performRequest, + } + }) } func getNodeSearchDomain(px *Proxmox) error { - apiUrl := "/nodes/" + px.NodeName + "/dns" - jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil) + apiURL := "/nodes/" + px.NodeName + "/dns" + jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil) if err != nil { return err } - var nodeDns NodeDns - err = json.Unmarshal(jsonData, &nodeDns) + var nodeDNS NodeDNS + err = json.Unmarshal(jsonData, &nodeDNS) if err != nil { return err } - if nodeDns.Data.Searchdomain == "" { - return errors.New("node_name not found") + if nodeDNS.Data.Searchdomain == "" { + return errors.New("search domain is not set") } - px.nodeSearchDomain = nodeDns.Data.Searchdomain + px.nodeSearchDomain = nodeDNS.Data.Searchdomain return nil } -func performRequest(px *Proxmox, apiUrl string, method string, data url.Values) ([]byte, error) { - request, err := http.NewRequest(method, px.BaseURL+apiUrl, strings.NewReader(data.Encode())) +func performRequest(px *Proxmox, apiURL string, method string, data url.Values) ([]byte, error) { + request, err := http.NewRequest(method, px.BaseURL+apiURL, strings.NewReader(data.Encode())) if err != nil { return nil, err } @@ -114,7 +99,7 @@ func performRequest(px *Proxmox, apiUrl string, method string, data url.Values) } defer resp.Body.Close() - responseBody, err := ioutil.ReadAll(resp.Body) + responseBody, err := io.ReadAll(resp.Body) if err != nil { return nil, err } @@ -123,15 +108,15 @@ func performRequest(px *Proxmox, apiUrl string, method string, data url.Values) } func gatherLxcData(px *Proxmox, acc telegraf.Accumulator) { - gatherVmData(px, acc, LXC) + gatherVMData(px, acc, LXC) } func gatherQemuData(px *Proxmox, acc telegraf.Accumulator) { - gatherVmData(px, acc, QEMU) + gatherVMData(px, acc, QEMU) } -func gatherVmData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) { - vmStats, err := getVmStats(px, rt) +func gatherVMData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) { + vmStats, err := getVMStats(px, rt) if err != nil { px.Log.Error("Error getting VM stats: %v", err) return @@ -139,100 +124,103 @@ func gatherVmData(px *Proxmox, acc telegraf.Accumulator, rt ResourceType) { // For each VM add metrics to Accumulator for _, vmStat := range vmStats.Data { - vmConfig, err := getVmConfig(px, vmStat.ID, rt) + vmConfig, err := getVMConfig(px, vmStat.ID, rt) if err != nil { - px.Log.Error("Error getting VM config: %v", err) + px.Log.Errorf("Error getting VM config: %v", err) return } + + if vmConfig.Data.Template == 1 { + px.Log.Debugf("Ignoring template VM %s (%s)", vmStat.ID, vmStat.Name) + continue + } + tags := getTags(px, vmStat.Name, vmConfig, rt) currentVMStatus, err := getCurrentVMStatus(px, rt, vmStat.ID) if err != nil { - px.Log.Error("Error getting VM curent VM status: %v", err) - return - } - fields, err := getFields(currentVMStatus) - if err != nil { - px.Log.Error("Error getting VM measurements: %v", err) + px.Log.Errorf("Error getting VM curent VM status: %v", err) return } + + fields := getFields(currentVMStatus) acc.AddFields("proxmox", fields, tags) } } -func getCurrentVMStatus(px *Proxmox, rt ResourceType, id string) (VmStat, error) { - apiUrl := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + id + "/status/current" +func getCurrentVMStatus(px *Proxmox, rt ResourceType, id json.Number) (VMStat, error) { + apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + string(id) + "/status/current" - jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil) + jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil) if err != nil { - return VmStat{}, err + return VMStat{}, err } - var currentVmStatus VmCurrentStats - err = json.Unmarshal(jsonData, ¤tVmStatus) + var currentVMStatus VMCurrentStats + err = json.Unmarshal(jsonData, ¤tVMStatus) if err != nil { - return VmStat{}, err + return VMStat{}, err } - return currentVmStatus.Data, nil + return currentVMStatus.Data, nil } -func getVmStats(px *Proxmox, rt ResourceType) (VmStats, error) { - apiUrl := "/nodes/" + px.NodeName + "/" + string(rt) - jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil) +func getVMStats(px *Proxmox, rt ResourceType) (VMStats, error) { + apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil) if err != nil { - return VmStats{}, err + return VMStats{}, err } - var vmStats VmStats + var vmStats VMStats err = json.Unmarshal(jsonData, &vmStats) if err != nil { - return VmStats{}, err + return VMStats{}, err } return vmStats, nil } -func getVmConfig(px *Proxmox, vmId string, rt ResourceType) (VmConfig, error) { - apiUrl := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + vmId + "/config" - jsonData, err := px.requestFunction(px, apiUrl, http.MethodGet, nil) +func getVMConfig(px *Proxmox, vmID json.Number, rt ResourceType) (VMConfig, error) { + apiURL := "/nodes/" + px.NodeName + "/" + string(rt) + "/" + string(vmID) + "/config" + jsonData, err := px.requestFunction(px, apiURL, http.MethodGet, nil) if err != nil { - return VmConfig{}, err + return VMConfig{}, err } - var vmConfig VmConfig + var vmConfig VMConfig err = json.Unmarshal(jsonData, &vmConfig) if err != nil { - return VmConfig{}, err + return VMConfig{}, err } return vmConfig, nil } -func getFields(vmStat VmStat) (map[string]interface{}, error) { - mem_total, mem_used, mem_free, mem_used_percentage := getByteMetrics(vmStat.TotalMem, vmStat.UsedMem) - swap_total, swap_used, swap_free, swap_used_percentage := getByteMetrics(vmStat.TotalSwap, vmStat.UsedSwap) - disk_total, disk_used, disk_free, disk_used_percentage := getByteMetrics(vmStat.TotalDisk, vmStat.UsedDisk) +func getFields(vmStat VMStat) map[string]interface{} { + memMetrics := getByteMetrics(vmStat.TotalMem, vmStat.UsedMem) + swapMetrics := getByteMetrics(vmStat.TotalSwap, vmStat.UsedSwap) + diskMetrics := getByteMetrics(vmStat.TotalDisk, vmStat.UsedDisk) return map[string]interface{}{ "status": vmStat.Status, "uptime": jsonNumberToInt64(vmStat.Uptime), - "cpuload": jsonNumberToFloat64(vmStat.CpuLoad), - "mem_used": mem_used, - "mem_total": mem_total, - "mem_free": mem_free, - "mem_used_percentage": mem_used_percentage, - "swap_used": swap_used, - "swap_total": swap_total, - "swap_free": swap_free, - "swap_used_percentage": swap_used_percentage, - "disk_used": disk_used, - "disk_total": disk_total, - "disk_free": disk_free, - "disk_used_percentage": disk_used_percentage, - }, nil + "cpuload": jsonNumberToFloat64(vmStat.CPULoad), + "mem_used": memMetrics.used, + "mem_total": memMetrics.total, + "mem_free": memMetrics.free, + "mem_used_percentage": memMetrics.usedPercentage, + "swap_used": swapMetrics.used, + "swap_total": swapMetrics.total, + "swap_free": swapMetrics.free, + "swap_used_percentage": swapMetrics.usedPercentage, + "disk_used": diskMetrics.used, + "disk_total": diskMetrics.total, + "disk_free": diskMetrics.free, + "disk_used_percentage": diskMetrics.usedPercentage, + } } -func getByteMetrics(total json.Number, used json.Number) (int64, int64, int64, float64) { +func getByteMetrics(total json.Number, used json.Number) metrics { int64Total := jsonNumberToInt64(total) int64Used := jsonNumberToInt64(used) int64Free := int64Total - int64Used @@ -241,7 +229,12 @@ func getByteMetrics(total json.Number, used json.Number) (int64, int64, int64, f usedPercentage = float64(int64Used) * 100 / float64(int64Total) } - return int64Total, int64Used, int64Free, usedPercentage + return metrics{ + total: int64Total, + used: int64Used, + free: int64Free, + usedPercentage: usedPercentage, + } } func jsonNumberToInt64(value json.Number) int64 { @@ -262,7 +255,7 @@ func jsonNumberToFloat64(value json.Number) float64 { return float64Value } -func getTags(px *Proxmox, name string, vmConfig VmConfig, rt ResourceType) map[string]string { +func getTags(px *Proxmox, name string, vmConfig VMConfig, rt ResourceType) map[string]string { domain := vmConfig.Data.Searchdomain if len(domain) == 0 { domain = px.nodeSearchDomain diff --git a/plugins/inputs/proxmox/proxmox_test.go b/plugins/inputs/proxmox/proxmox_test.go index 524a105e7b1ab..b0916a5f3dd8e 100644 --- a/plugins/inputs/proxmox/proxmox_test.go +++ b/plugins/inputs/proxmox/proxmox_test.go @@ -1,38 +1,39 @@ package proxmox import ( - "github.com/bmizerany/assert" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" "net/url" "strings" "testing" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) var nodeSearchDomainTestData = `{"data":{"search":"test.example.com","dns1":"1.0.0.1"}}` var qemuTestData = `{"data":[{"name":"qemu1","status":"running","maxdisk":10737418240,"cpu":0.029336643550795,"vmid":"113","uptime":2159739,"disk":0,"maxmem":2147483648,"mem":1722451796}]}` var qemuConfigTestData = `{"data":{"hostname":"qemu1","searchdomain":"test.example.com"}}` -var lxcTestData = `{"data":[{"vmid":"111","type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container1"}]}` +var lxcTestData = `{"data":[{"vmid":"111","type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container1"},{"vmid":112,"type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container2"}]}` var lxcConfigTestData = `{"data":{"hostname":"container1","searchdomain":"test.example.com"}}` var lxcCurrentStatusTestData = `{"data":{"vmid":"111","type":"lxc","uptime":2078164,"swap":9412608,"disk":"744189952","maxmem":536870912,"mem":98500608,"maxswap":536870912,"cpu":0.00371567669193613,"status":"running","maxdisk":"5217320960","name":"container1"}}` var qemuCurrentStatusTestData = `{"data":{"name":"qemu1","status":"running","maxdisk":10737418240,"cpu":0.029336643550795,"vmid":"113","uptime":2159739,"disk":0,"maxmem":2147483648,"mem":1722451796}}` -func performTestRequest(px *Proxmox, apiUrl string, method string, data url.Values) ([]byte, error) { +func performTestRequest(_ *Proxmox, apiURL string, _ string, _ url.Values) ([]byte, error) { var bytedata = []byte("") - if strings.HasSuffix(apiUrl, "dns") { + if strings.HasSuffix(apiURL, "dns") { bytedata = []byte(nodeSearchDomainTestData) - } else if strings.HasSuffix(apiUrl, "qemu") { + } else if strings.HasSuffix(apiURL, "qemu") { bytedata = []byte(qemuTestData) - } else if strings.HasSuffix(apiUrl, "113/config") { + } else if strings.HasSuffix(apiURL, "113/config") { bytedata = []byte(qemuConfigTestData) - } else if strings.HasSuffix(apiUrl, "lxc") { + } else if strings.HasSuffix(apiURL, "lxc") { bytedata = []byte(lxcTestData) - } else if strings.HasSuffix(apiUrl, "111/config") { + } else if strings.HasSuffix(apiURL, "111/config") { bytedata = []byte(lxcConfigTestData) - } else if strings.HasSuffix(apiUrl, "111/status/current") { + } else if strings.HasSuffix(apiURL, "111/status/current") { bytedata = []byte(lxcCurrentStatusTestData) - } else if strings.HasSuffix(apiUrl, "113/status/current") { + } else if strings.HasSuffix(apiURL, "113/status/current") { bytedata = []byte(qemuCurrentStatusTestData) } @@ -58,7 +59,7 @@ func TestGetNodeSearchDomain(t *testing.T) { err := getNodeSearchDomain(px) require.NoError(t, err) - assert.Equal(t, px.nodeSearchDomain, "test.example.com") + require.Equal(t, px.nodeSearchDomain, "test.example.com") } func TestGatherLxcData(t *testing.T) { @@ -68,7 +69,7 @@ func TestGatherLxcData(t *testing.T) { acc := &testutil.Accumulator{} gatherLxcData(px, acc) - assert.Equal(t, acc.NFields(), 15) + require.Equal(t, acc.NFields(), 15) testFields := map[string]interface{}{ "status": "running", "uptime": int64(2078164), @@ -102,7 +103,7 @@ func TestGatherQemuData(t *testing.T) { acc := &testutil.Accumulator{} gatherQemuData(px, acc) - assert.Equal(t, acc.NFields(), 15) + require.Equal(t, acc.NFields(), 15) testFields := map[string]interface{}{ "status": "running", "uptime": int64(2159739), @@ -138,5 +139,5 @@ func TestGather(t *testing.T) { require.NoError(t, err) // Results from both tests above - assert.Equal(t, acc.NFields(), 30) + require.Equal(t, acc.NFields(), 30) } diff --git a/plugins/inputs/proxmox/sample.conf b/plugins/inputs/proxmox/sample.conf new file mode 100644 index 0000000000000..630eef1a926d8 --- /dev/null +++ b/plugins/inputs/proxmox/sample.conf @@ -0,0 +1,17 @@ +# Provides metrics from Proxmox nodes (Proxmox Virtual Environment > 6.2). +[[inputs.proxmox]] + ## API connection configuration. The API token was introduced in Proxmox v6.2. Required permissions for user and token: PVEAuditor role on /. + base_url = "https://localhost:8006/api2/json" + api_token = "USER@REALM!TOKENID=UUID" + ## Node name, defaults to OS hostname + # node_name = "" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + insecure_skip_verify = false + + # HTTP response timeout (default: 5s) + response_timeout = "5s" diff --git a/plugins/inputs/proxmox/structs.go b/plugins/inputs/proxmox/structs.go index 461e71d767d6a..78d0010b501eb 100644 --- a/plugins/inputs/proxmox/structs.go +++ b/plugins/inputs/proxmox/structs.go @@ -2,18 +2,19 @@ package proxmox import ( "encoding/json" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/plugins/common/tls" "net/http" "net/url" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/common/tls" ) type Proxmox struct { - BaseURL string `toml:"base_url"` - APIToken string `toml:"api_token"` - ResponseTimeout internal.Duration `toml:"response_timeout"` - NodeName string `toml:"node_name"` + BaseURL string `toml:"base_url"` + APIToken string `toml:"api_token"` + ResponseTimeout config.Duration `toml:"response_timeout"` + NodeName string `toml:"node_name"` tls.ClientConfig @@ -31,16 +32,16 @@ var ( LXC ResourceType = "lxc" ) -type VmStats struct { - Data []VmStat `json:"data"` +type VMStats struct { + Data []VMStat `json:"data"` } -type VmCurrentStats struct { - Data VmStat `json:"data"` +type VMCurrentStats struct { + Data VMStat `json:"data"` } -type VmStat struct { - ID string `json:"vmid"` +type VMStat struct { + ID json.Number `json:"vmid"` Name string `json:"name"` Status string `json:"status"` UsedMem json.Number `json:"mem"` @@ -50,18 +51,26 @@ type VmStat struct { UsedSwap json.Number `json:"swap"` TotalSwap json.Number `json:"maxswap"` Uptime json.Number `json:"uptime"` - CpuLoad json.Number `json:"cpu"` + CPULoad json.Number `json:"cpu"` } -type VmConfig struct { +type VMConfig struct { Data struct { Searchdomain string `json:"searchdomain"` Hostname string `json:"hostname"` + Template int `json:"template"` } `json:"data"` } -type NodeDns struct { +type NodeDNS struct { Data struct { Searchdomain string `json:"search"` } `json:"data"` } + +type metrics struct { + total int64 + used int64 + free int64 + usedPercentage float64 +} diff --git a/plugins/inputs/puppetagent/README.md b/plugins/inputs/puppetagent/README.md index 687005b98cc11..9a8f2352b296b 100644 --- a/plugins/inputs/puppetagent/README.md +++ b/plugins/inputs/puppetagent/README.md @@ -1,12 +1,10 @@ # PuppetAgent Input Plugin -#### Description +The puppetagent plugin collects variables outputted from the +'last_run_summary.yaml' file usually located in `/var/lib/puppet/state/` +[PuppetAgent Runs][1]. -The puppetagent plugin collects variables outputted from the 'last_run_summary.yaml' file -usually located in `/var/lib/puppet/state/` -[PuppetAgent Runs](https://puppet.com/blog/puppet-monitoring-how-to-monitor-success-or-failure-of-puppet-runs/). - -``` +```sh cat /var/lib/puppet/state/last_run_summary.yaml --- @@ -45,7 +43,7 @@ cat /var/lib/puppet/state/last_run_summary.yaml puppet: "3.7.5" ``` -``` +```sh jcross@pit-devops-02 ~ >sudo ./telegraf_linux_amd64 --input-filter puppetagent --config tele.conf --test * Plugin: puppetagent, Collection 1 > [] puppetagent_events_failure value=0 @@ -77,56 +75,83 @@ jcross@pit-devops-02 ~ >sudo ./telegraf_linux_amd64 --input-filter puppetagent - > [] puppetagent_version_puppet value=3.7.5 ``` -## Measurements: -#### PuppetAgent int64 measurements: +[1]: https://puppet.com/blog/puppet-monitoring-how-to-monitor-success-or-failure-of-puppet-runs/ + +## Configuration + +```toml @sample.conf +# Reads last_run_summary.yaml file and converts to measurements +[[inputs.puppetagent]] + ## Location of puppet last run summary file + location = "/var/lib/puppet/state/last_run_summary.yaml" +``` + +## Measurements + +### PuppetAgent int64 measurements Meta: + - units: int64 - tags: `` Measurement names: - - puppetagent_events_failure - - puppetagent_events_total - - puppetagent_events_success - - puppetagent_resources_failed - - puppetagent_resources_scheduled - - puppetagent_resources_changed - - puppetagent_resources_skipped - - puppetagent_resources_total - - puppetagent_resources_failedtorestart - - puppetagent_resources_restarted - - puppetagent_resources_outofsync - - puppetagent_changes_total - - puppetagent_time_service - - puppetagent_time_lastrun - - puppetagent_version_config - -#### PuppetAgent float64 measurements: + +- puppetagent_changes_total +- puppetagent_events_failure +- puppetagent_events_total +- puppetagent_events_success +- puppetagent_resources_changed +- puppetagent_resources_corrective_change +- puppetagent_resources_failed +- puppetagent_resources_failedtorestart +- puppetagent_resources_outofsync +- puppetagent_resources_restarted +- puppetagent_resources_scheduled +- puppetagent_resources_skipped +- puppetagent_resources_total +- puppetagent_time_service +- puppetagent_time_lastrun +- puppetagent_version_config + +### PuppetAgent float64 measurements Meta: + - units: float64 - tags: `` Measurement names: - - puppetagent_time_user - - puppetagent_time_schedule - - puppetagent_time_filebucket - - puppetagent_time_file - - puppetagent_time_exec - - puppetagent_time_anchor - - puppetagent_time_sshauthorizedkey - - puppetagent_time_package - - puppetagent_time_total - - puppetagent_time_configretrieval - - puppetagent_time_lastrun - - puppetagent_time_cron - - puppetagent_version_config - -#### PuppetAgent string measurements: + +- puppetagent_time_anchor +- puppetagent_time_catalogapplication +- puppetagent_time_configretrieval +- puppetagent_time_convertcatalog +- puppetagent_time_cron +- puppetagent_time_exec +- puppetagent_time_factgeneration +- puppetagent_time_file +- puppetagent_time_filebucket +- puppetagent_time_group +- puppetagent_time_lastrun +- puppetagent_time_noderetrieval +- puppetagent_time_notify +- puppetagent_time_package +- puppetagent_time_pluginsync +- puppetagent_time_schedule +- puppetagent_time_sshauthorizedkey +- puppetagent_time_total +- puppetagent_time_transactionevaluation +- puppetagent_time_user +- puppetagent_version_config + +### PuppetAgent string measurements Meta: + - units: string - tags: `` Measurement names: - - puppetagent_version_puppet + +- puppetagent_version_puppet diff --git a/plugins/inputs/puppetagent/last_run_summary.yaml b/plugins/inputs/puppetagent/last_run_summary.yaml index be2f017465fad..c1aa1ce276216 100644 --- a/plugins/inputs/puppetagent/last_run_summary.yaml +++ b/plugins/inputs/puppetagent/last_run_summary.yaml @@ -1,34 +1,43 @@ --- events: failure: 0 + noop: 0 total: 0 success: 0 resources: + changed: 0 + corrective_change: 0 failed: 0 + failed_to_restart: 0 + out_of_sync: 0 + restarted: 0 scheduled: 0 - changed: 0 skipped: 0 total: 109 - failed_to_restart: 0 - restarted: 0 - out_of_sync: 0 changes: total: 0 time: - user: 0.004331 - schedule: 0.001123 - filebucket: 0.000353 - file: 0.441472 - exec: 0.508123 anchor: 0.000555 - yumrepo: 0.006989 - ssh_authorized_key: 0.000764 - service: 1.807795 - package: 1.325788 - total: 8.85354707064819 + catalog_application: 0.010555 config_retrieval: 4.75567007064819 - last_run: 1444936531 + convert_catalog: 1.3 cron: 0.000584 + exec: 0.508123 + fact_generation: 0.34 + file: 0.441472 + filebucket: 0.000353 + last_run: 1444936531 + node_retrieval: 1.235 + notify: 0.00035 + package: 1.325788 + plugin_sync: 0.325788 + schedule: 0.001123 + service: 1.807795 + ssh_authorized_key: 0.000764 + total: 8.85354707064819 + transaction_evaluation: 4.69765 + user: 0.004331 + yumrepo: 0.006989 version: config: "environment:d6018ce" puppet: "3.7.5" diff --git a/plugins/inputs/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go index 1d0e30aa88ed5..61a03298a7c13 100644 --- a/plugins/inputs/puppetagent/puppetagent.go +++ b/plugins/inputs/puppetagent/puppetagent.go @@ -1,27 +1,28 @@ +//go:generate ../../../tools/readme_config_includer/generator package puppetagent import ( + _ "embed" "fmt" - "gopkg.in/yaml.v2" - "io/ioutil" "os" "reflect" "strings" + "gopkg.in/yaml.v2" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // PuppetAgent is a PuppetAgent plugin type PuppetAgent struct { Location string } -var sampleConfig = ` - ## Location of puppet last run summary file - location = "/var/lib/puppet/state/last_run_summary.yaml" -` - type State struct { Events event Resources resource @@ -32,19 +33,21 @@ type State struct { type event struct { Failure int64 `yaml:"failure"` + Noop int64 `yaml:"noop"` Total int64 `yaml:"total"` Success int64 `yaml:"success"` } type resource struct { - Failed int64 `yaml:"failed"` - Scheduled int64 `yaml:"scheduled"` - Changed int64 `yaml:"changed"` - Skipped int64 `yaml:"skipped"` - Total int64 `yaml:"total"` - FailedToRestart int64 `yaml:"failed_to_restart"` - Restarted int64 `yaml:"restarted"` - OutOfSync int64 `yaml:"out_of_sync"` + Changed int64 `yaml:"changed"` + CorrectiveChange int64 `yaml:"corrective_change"` + Failed int64 `yaml:"failed"` + FailedToRestart int64 `yaml:"failed_to_restart"` + OutOfSync int64 `yaml:"out_of_sync"` + Restarted int64 `yaml:"restarted"` + Scheduled int64 `yaml:"scheduled"` + Skipped int64 `yaml:"skipped"` + Total int64 `yaml:"total"` } type change struct { @@ -52,19 +55,27 @@ type change struct { } type time struct { - User float64 `yaml:"user"` - Schedule float64 `yaml:"schedule"` - FileBucket float64 `yaml:"filebucket"` - File float64 `yaml:"file"` - Exec float64 `yaml:"exec"` - Anchor float64 `yaml:"anchor"` - SSHAuthorizedKey float64 `yaml:"ssh_authorized_key"` - Service float64 `yaml:"service"` - Package float64 `yaml:"package"` - Total float64 `yaml:"total"` - ConfigRetrieval float64 `yaml:"config_retrieval"` - LastRun int64 `yaml:"last_run"` - Cron float64 `yaml:"cron"` + Anchor float64 `yaml:"anchor"` + CataLogApplication float64 `yaml:"catalog_application"` + ConfigRetrieval float64 `yaml:"config_retrieval"` + ConvertCatalog float64 `yaml:"convert_catalog"` + Cron float64 `yaml:"cron"` + Exec float64 `yaml:"exec"` + FactGeneration float64 `yaml:"fact_generation"` + File float64 `yaml:"file"` + FileBucket float64 `yaml:"filebucket"` + Group float64 `yaml:"group"` + LastRun int64 `yaml:"last_run"` + NodeRetrieval float64 `yaml:"node_retrieval"` + Notify float64 `yaml:"notify"` + Package float64 `yaml:"package"` + PluginSync float64 `yaml:"plugin_sync"` + Schedule float64 `yaml:"schedule"` + Service float64 `yaml:"service"` + SSHAuthorizedKey float64 `yaml:"ssh_authorized_key"` + Total float64 `yaml:"total"` + TransactionEvaluation float64 `yaml:"transaction_evaluation"` + User float64 `yaml:"user"` } type version struct { @@ -72,19 +83,12 @@ type version struct { Puppet string `yaml:"puppet"` } -// SampleConfig returns sample configuration message -func (pa *PuppetAgent) SampleConfig() string { +func (*PuppetAgent) SampleConfig() string { return sampleConfig } -// Description returns description of PuppetAgent plugin -func (pa *PuppetAgent) Description() string { - return `Reads last_run_summary.yaml file and converts to measurements` -} - // Gather reads stats from all configured servers accumulates stats func (pa *PuppetAgent) Gather(acc telegraf.Accumulator) error { - if len(pa.Location) == 0 { pa.Location = "/var/lib/puppet/state/last_run_summary.yaml" } @@ -93,7 +97,7 @@ func (pa *PuppetAgent) Gather(acc telegraf.Accumulator) error { return fmt.Errorf("%s", err) } - fh, err := ioutil.ReadFile(pa.Location) + fh, err := os.ReadFile(pa.Location) if err != nil { return fmt.Errorf("%s", err) } diff --git a/plugins/inputs/puppetagent/puppetagent_test.go b/plugins/inputs/puppetagent/puppetagent_test.go index b1c447887f23c..754fb39783a2a 100644 --- a/plugins/inputs/puppetagent/puppetagent_test.go +++ b/plugins/inputs/puppetagent/puppetagent_test.go @@ -1,8 +1,10 @@ package puppetagent import ( - "github.com/influxdata/telegraf/testutil" "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestGather(t *testing.T) { @@ -11,37 +13,47 @@ func TestGather(t *testing.T) { pa := PuppetAgent{ Location: "last_run_summary.yaml", } - pa.Gather(&acc) + require.NoError(t, pa.Gather(&acc)) tags := map[string]string{"location": "last_run_summary.yaml"} fields := map[string]interface{}{ - "events_failure": int64(0), - "events_total": int64(0), - "events_success": int64(0), - "resources_failed": int64(0), - "resources_scheduled": int64(0), - "resources_changed": int64(0), - "resources_skipped": int64(0), - "resources_total": int64(109), - "resources_failedtorestart": int64(0), - "resources_restarted": int64(0), - "resources_outofsync": int64(0), - "changes_total": int64(0), - "time_lastrun": int64(1444936531), - "version_configstring": "environment:d6018ce", - "time_user": float64(0.004331), - "time_schedule": float64(0.001123), - "time_filebucket": float64(0.000353), - "time_file": float64(0.441472), - "time_exec": float64(0.508123), - "time_anchor": float64(0.000555), - "time_sshauthorizedkey": float64(0.000764), - "time_service": float64(1.807795), - "time_package": float64(1.325788), - "time_total": float64(8.85354707064819), - "time_configretrieval": float64(4.75567007064819), - "time_cron": float64(0.000584), - "version_puppet": "3.7.5", + "events_failure": int64(0), + "events_noop": int64(0), + "events_success": int64(0), + "events_total": int64(0), + "resources_changed": int64(0), + "resources_correctivechange": int64(0), + "resources_failed": int64(0), + "resources_failedtorestart": int64(0), + "resources_outofsync": int64(0), + "resources_restarted": int64(0), + "resources_scheduled": int64(0), + "resources_skipped": int64(0), + "resources_total": int64(109), + "changes_total": int64(0), + "time_anchor": float64(0.000555), + "time_catalogapplication": float64(0.010555), + "time_configretrieval": float64(4.75567007064819), + "time_convertcatalog": float64(1.3), + "time_cron": float64(0.000584), + "time_exec": float64(0.508123), + "time_factgeneration": float64(0.34), + "time_file": float64(0.441472), + "time_filebucket": float64(0.000353), + "time_group": float64(0), + "time_lastrun": int64(1444936531), + "time_noderetrieval": float64(1.235), + "time_notify": float64(0.00035), + "time_package": float64(1.325788), + "time_pluginsync": float64(0.325788), + "time_schedule": float64(0.001123), + "time_service": float64(1.807795), + "time_sshauthorizedkey": float64(0.000764), + "time_total": float64(8.85354707064819), + "time_transactionevaluation": float64(4.69765), + "time_user": float64(0.004331), + "version_configstring": "environment:d6018ce", + "version_puppet": "3.7.5", } acc.AssertContainsTaggedFields(t, "puppetagent", fields, tags) diff --git a/plugins/inputs/puppetagent/sample.conf b/plugins/inputs/puppetagent/sample.conf new file mode 100644 index 0000000000000..dbb1ad2fa32f6 --- /dev/null +++ b/plugins/inputs/puppetagent/sample.conf @@ -0,0 +1,4 @@ +# Reads last_run_summary.yaml file and converts to measurements +[[inputs.puppetagent]] + ## Location of puppet last run summary file + location = "/var/lib/puppet/state/last_run_summary.yaml" diff --git a/plugins/inputs/rabbitmq/README.md b/plugins/inputs/rabbitmq/README.md index 1274b4ee230f8..45f40142ba2ad 100644 --- a/plugins/inputs/rabbitmq/README.md +++ b/plugins/inputs/rabbitmq/README.md @@ -2,14 +2,16 @@ Reads metrics from RabbitMQ servers via the [Management Plugin][management]. -For additional details reference the [RabbitMQ Management HTTP Stats][management-reference]. +For additional details reference the [RabbitMQ Management HTTP +Stats][management-reference]. [management]: https://www.rabbitmq.com/management.html [management-reference]: https://raw.githack.com/rabbitmq/rabbitmq-management/rabbitmq_v3_6_9/priv/www/api/index.html -### Configuration +## Configuration -```toml +```toml @sample.conf +# Reads metrics from RabbitMQ servers via the Management Plugin [[inputs.rabbitmq]] ## Management Plugin url. (default: http://localhost:15672) # url = "http://localhost:15672" @@ -42,12 +44,19 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management ## A list of queues to gather as the rabbitmq_queue measurement. If not ## specified, metrics for all queues are gathered. + ## Deprecated in 1.6: Use queue_name_include instead. # queues = ["telegraf"] ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not ## specified, metrics for all exchanges are gathered. # exchanges = ["telegraf"] + ## Metrics to include and exclude. Globs accepted. + ## Note that an empty array for both will include all metrics + ## Currently the following metrics are supported: "exchange", "federation", "node", "overview", "queue" + # metric_include = [] + # metric_exclude = [] + ## Queues to include and exclude. Globs accepted. ## Note that an empty array for both will include all queues # queue_name_include = [] @@ -60,7 +69,7 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management # federation_upstream_exclude = [] ``` -### Metrics +## Metrics - rabbitmq_overview - tags: @@ -84,7 +93,7 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management - return_unroutable (int, number of unroutable messages) - return_unroutable_rate (float, number of unroutable messages per second) -+ rabbitmq_node +- rabbitmq_node - tags: - url - node @@ -176,7 +185,7 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management - slave_nodes (int, count) - synchronised_slave_nodes (int, count) -+ rabbitmq_exchange +- rabbitmq_exchange - tags: - url - exchange @@ -211,17 +220,19 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management - messages_publish (int, count) - messages_return_unroutable (int, count) -### Sample Queries +## Sample Queries -Message rates for the entire node can be calculated from total message counts. For instance, to get the rate of messages published per minute, use this query: +Message rates for the entire node can be calculated from total message +counts. For instance, to get the rate of messages published per minute, use this +query: -``` +```sql SELECT NON_NEGATIVE_DERIVATIVE(LAST("messages_published"), 1m) AS messages_published_rate FROM rabbitmq_overview WHERE time > now() - 10m GROUP BY time(1m) ``` -### Example Output +## Example Output -``` +```text rabbitmq_queue,url=http://amqp.example.org:15672,queue=telegraf,vhost=influxdb,node=rabbit@amqp.example.org,durable=true,auto_delete=false,host=amqp.example.org messages_deliver_get=0i,messages_publish=329i,messages_publish_rate=0.2,messages_redeliver_rate=0,message_bytes_ready=0i,message_bytes_unacked=0i,messages_deliver=329i,messages_unack=0i,consumers=1i,idle_since="",messages=0i,messages_deliver_rate=0.2,messages_deliver_get_rate=0.2,messages_redeliver=0i,memory=43032i,message_bytes_ram=0i,messages_ack=329i,messages_ready=0i,messages_ack_rate=0.2,consumer_utilisation=1,message_bytes=0i,message_bytes_persist=0i 1493684035000000000 rabbitmq_overview,url=http://amqp.example.org:15672,host=amqp.example.org channels=2i,consumers=1i,exchanges=17i,messages_acked=329i,messages=0i,messages_ready=0i,messages_unacked=0i,connections=2i,queues=1i,messages_delivered=329i,messages_published=329i,clustering_listeners=2i,amqp_listeners=1i 1493684035000000000 rabbitmq_node,url=http://amqp.example.org:15672,node=rabbit@amqp.example.org,host=amqp.example.org fd_total=1024i,fd_used=32i,mem_limit=8363329126i,sockets_total=829i,disk_free=8175935488i,disk_free_limit=50000000i,mem_used=58771080i,proc_total=1048576i,proc_used=267i,run_queue=0i,sockets_used=2i,running=1i 149368403500000000 diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index 4d8050c33fbca..2742fd9d93d88 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -1,20 +1,27 @@ +//go:generate ../../../tools/readme_config_includer/generator package rabbitmq import ( + _ "embed" "encoding/json" "fmt" + "io" "net/http" "strconv" "sync" "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/filter" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // DefaultUsername will set a default value that corresponds to the default // value used by Rabbitmq const DefaultUsername = "guest" @@ -35,27 +42,30 @@ const DefaultClientTimeout = 4 // see the sample config for further details type RabbitMQ struct { URL string `toml:"url"` - Name string `toml:"name"` + Name string `toml:"name" deprecated:"1.3.0;use 'tags' instead"` Username string `toml:"username"` Password string `toml:"password"` tls.ClientConfig - ResponseHeaderTimeout internal.Duration `toml:"header_timeout"` - ClientTimeout internal.Duration `toml:"client_timeout"` + ResponseHeaderTimeout config.Duration `toml:"header_timeout"` + ClientTimeout config.Duration `toml:"client_timeout"` Nodes []string `toml:"nodes"` - Queues []string `toml:"queues"` + Queues []string `toml:"queues" deprecated:"1.6.0;use 'queue_name_include' instead"` Exchanges []string `toml:"exchanges"` + MetricInclude []string `toml:"metric_include"` + MetricExclude []string `toml:"metric_exclude"` QueueInclude []string `toml:"queue_name_include"` QueueExclude []string `toml:"queue_name_exclude"` FederationUpstreamInclude []string `toml:"federation_upstream_include"` FederationUpstreamExclude []string `toml:"federation_upstream_exclude"` - Client *http.Client `toml:"-"` + Log telegraf.Logger `toml:"-"` - filterCreated bool + client *http.Client excludeEveryQueue bool + metricFilter filter.Filter queueFilter filter.Filter upstreamFilter filter.Filter } @@ -157,17 +167,17 @@ type Node struct { Uptime int64 `json:"uptime"` MnesiaDiskTxCount int64 `json:"mnesia_disk_tx_count"` MnesiaDiskTxCountDetails Details `json:"mnesia_disk_tx_count_details"` - MnesiaRamTxCount int64 `json:"mnesia_ram_tx_count"` - MnesiaRamTxCountDetails Details `json:"mnesia_ram_tx_count_details"` + MnesiaRAMTxCount int64 `json:"mnesia_ram_tx_count"` + MnesiaRAMTxCountDetails Details `json:"mnesia_ram_tx_count_details"` GcNum int64 `json:"gc_num"` GcNumDetails Details `json:"gc_num_details"` GcBytesReclaimed int64 `json:"gc_bytes_reclaimed"` GcBytesReclaimedDetails Details `json:"gc_bytes_reclaimed_details"` - IoReadAvgTime int64 `json:"io_read_avg_time"` + IoReadAvgTime float64 `json:"io_read_avg_time"` IoReadAvgTimeDetails Details `json:"io_read_avg_time_details"` IoReadBytes int64 `json:"io_read_bytes"` IoReadBytesDetails Details `json:"io_read_bytes_details"` - IoWriteAvgTime int64 `json:"io_write_avg_time"` + IoWriteAvgTime float64 `json:"io_write_avg_time"` IoWriteAvgTimeDetails Details `json:"io_write_avg_time_details"` IoWriteBytes int64 `json:"io_write_bytes"` IoWriteBytesDetails Details `json:"io_write_bytes_details"` @@ -226,85 +236,44 @@ type MemoryResponse struct { // Memory details type Memory struct { - ConnectionReaders int64 `json:"connection_readers"` - ConnectionWriters int64 `json:"connection_writers"` - ConnectionChannels int64 `json:"connection_channels"` - ConnectionOther int64 `json:"connection_other"` - QueueProcs int64 `json:"queue_procs"` - QueueSlaveProcs int64 `json:"queue_slave_procs"` - Plugins int64 `json:"plugins"` - OtherProc int64 `json:"other_proc"` - Metrics int64 `json:"metrics"` - MgmtDb int64 `json:"mgmt_db"` - Mnesia int64 `json:"mnesia"` - OtherEts int64 `json:"other_ets"` - Binary int64 `json:"binary"` - MsgIndex int64 `json:"msg_index"` - Code int64 `json:"code"` - Atom int64 `json:"atom"` - OtherSystem int64 `json:"other_system"` - AllocatedUnused int64 `json:"allocated_unused"` - ReservedUnallocated int64 `json:"reserved_unallocated"` - Total int64 `json:"total"` + ConnectionReaders int64 `json:"connection_readers"` + ConnectionWriters int64 `json:"connection_writers"` + ConnectionChannels int64 `json:"connection_channels"` + ConnectionOther int64 `json:"connection_other"` + QueueProcs int64 `json:"queue_procs"` + QueueSlaveProcs int64 `json:"queue_slave_procs"` + Plugins int64 `json:"plugins"` + OtherProc int64 `json:"other_proc"` + Metrics int64 `json:"metrics"` + MgmtDb int64 `json:"mgmt_db"` + Mnesia int64 `json:"mnesia"` + OtherEts int64 `json:"other_ets"` + Binary int64 `json:"binary"` + MsgIndex int64 `json:"msg_index"` + Code int64 `json:"code"` + Atom int64 `json:"atom"` + OtherSystem int64 `json:"other_system"` + AllocatedUnused int64 `json:"allocated_unused"` + ReservedUnallocated int64 `json:"reserved_unallocated"` + Total interface{} `json:"total"` +} + +// Error response +type ErrorResponse struct { + Error string `json:"error"` + Reason string `json:"reason"` } // gatherFunc ... type gatherFunc func(r *RabbitMQ, acc telegraf.Accumulator) -var gatherFunctions = []gatherFunc{gatherOverview, gatherNodes, gatherQueues, gatherExchanges, gatherFederationLinks} - -var sampleConfig = ` - ## Management Plugin url. (default: http://localhost:15672) - # url = "http://localhost:15672" - ## Tag added to rabbitmq_overview series; deprecated: use tags - # name = "rmq-server-1" - ## Credentials - # username = "guest" - # password = "guest" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false - - ## Optional request timeouts - ## - ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait - ## for a server's response headers after fully writing the request. - # header_timeout = "3s" - ## - ## client_timeout specifies a time limit for requests made by this client. - ## Includes connection time, any redirects, and reading the response body. - # client_timeout = "4s" - - ## A list of nodes to gather as the rabbitmq_node measurement. If not - ## specified, metrics for all nodes are gathered. - # nodes = ["rabbit@node1", "rabbit@node2"] - - ## A list of queues to gather as the rabbitmq_queue measurement. If not - ## specified, metrics for all queues are gathered. - # queues = ["telegraf"] - - ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not - ## specified, metrics for all exchanges are gathered. - # exchanges = ["telegraf"] - - ## Queues to include and exclude. Globs accepted. - ## Note that an empty array for both will include all queues - queue_name_include = [] - queue_name_exclude = [] - - ## Federation upstreams include and exclude when gathering the rabbitmq_federation measurement. - ## If neither are specified, metrics for all federation upstreams are gathered. - ## Federation link metrics will only be gathered for queues and exchanges - ## whose non-federation metrics will be collected (e.g a queue excluded - ## by the 'queue_name_exclude' option will also be excluded from federation). - ## Globs accepted. - # federation_upstream_include = ["dataCentre-*"] - # federation_upstream_exclude = [] -` +var gatherFunctions = map[string]gatherFunc{ + "exchange": gatherExchanges, + "federation": gatherFederationLinks, + "node": gatherNodes, + "overview": gatherOverview, + "queue": gatherQueues, +} func boolToInt(b bool) int64 { if b { @@ -313,49 +282,51 @@ func boolToInt(b bool) int64 { return 0 } -// SampleConfig ... -func (r *RabbitMQ) SampleConfig() string { +func (*RabbitMQ) SampleConfig() string { return sampleConfig } -// Description ... -func (r *RabbitMQ) Description() string { - return "Reads metrics from RabbitMQ servers via the Management Plugin" -} +func (r *RabbitMQ) Init() error { + var err error -// Gather ... -func (r *RabbitMQ) Gather(acc telegraf.Accumulator) error { - if r.Client == nil { - tlsCfg, err := r.ClientConfig.TLSConfig() - if err != nil { - return err - } - tr := &http.Transport{ - ResponseHeaderTimeout: r.ResponseHeaderTimeout.Duration, - TLSClientConfig: tlsCfg, - } - r.Client = &http.Client{ - Transport: tr, - Timeout: r.ClientTimeout.Duration, - } + // Create gather filters + if err := r.createQueueFilter(); err != nil { + return err + } + if err := r.createUpstreamFilter(); err != nil { + return err } - // Create gather filters if not already created - if !r.filterCreated { - err := r.createQueueFilter() - if err != nil { - return err - } - err = r.createUpstreamFilter() - if err != nil { - return err - } - r.filterCreated = true + // Create a filter for the metrics + if r.metricFilter, err = filter.NewIncludeExcludeFilter(r.MetricInclude, r.MetricExclude); err != nil { + return err } + tlsCfg, err := r.ClientConfig.TLSConfig() + if err != nil { + return err + } + tr := &http.Transport{ + ResponseHeaderTimeout: time.Duration(r.ResponseHeaderTimeout), + TLSClientConfig: tlsCfg, + } + r.client = &http.Client{ + Transport: tr, + Timeout: time.Duration(r.ClientTimeout), + } + + return nil +} + +// Gather ... +func (r *RabbitMQ) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup - wg.Add(len(gatherFunctions)) - for _, f := range gatherFunctions { + for name, f := range gatherFunctions { + // Query only metrics that are supported + if !r.metricFilter.Match(name) { + continue + } + wg.Add(1) go func(gf gatherFunc) { defer wg.Done() gf(r, acc) @@ -366,15 +337,16 @@ func (r *RabbitMQ) Gather(acc telegraf.Accumulator) error { return nil } -func (r *RabbitMQ) requestJSON(u string, target interface{}) error { +func (r *RabbitMQ) requestEndpoint(u string) ([]byte, error) { if r.URL == "" { r.URL = DefaultURL } - u = fmt.Sprintf("%s%s", r.URL, u) + endpoint := r.URL + u + r.Log.Debugf("Requesting %q...", endpoint) - req, err := http.NewRequest("GET", u, nil) + req, err := http.NewRequest("GET", endpoint, nil) if err != nil { - return err + return nil, err } username := r.Username @@ -389,14 +361,37 @@ func (r *RabbitMQ) requestJSON(u string, target interface{}) error { req.SetBasicAuth(username, password) - resp, err := r.Client.Do(req) + resp, err := r.client.Do(req) if err != nil { - return err + return nil, err } - defer resp.Body.Close() - json.NewDecoder(resp.Body).Decode(target) + r.Log.Debugf("HTTP status code: %v %v", resp.StatusCode, http.StatusText(resp.StatusCode)) + if resp.StatusCode < 200 || resp.StatusCode > 299 { + return nil, fmt.Errorf("getting %q failed: %v %v", u, resp.StatusCode, http.StatusText(resp.StatusCode)) + } + + return io.ReadAll(resp.Body) +} + +func (r *RabbitMQ) requestJSON(u string, target interface{}) error { + buf, err := r.requestEndpoint(u) + if err != nil { + return err + } + if err := json.Unmarshal(buf, target); err != nil { + if _, ok := err.(*json.UnmarshalTypeError); ok { + // Try to get the error reason from the response + var errResponse ErrorResponse + if json.Unmarshal(buf, &errResponse) == nil && errResponse.Error != "" { + // Return the error reason in the response + return fmt.Errorf("error response trying to get %q: %q (reason: %q)", u, errResponse.Error, errResponse.Reason) + } + } + + return fmt.Errorf("decoding answer from %q failed: %v", u, err) + } return nil } @@ -410,7 +405,7 @@ func gatherOverview(r *RabbitMQ, acc telegraf.Accumulator) { return } - if overview.QueueTotals == nil || overview.ObjectTotals == nil || overview.MessageStats == nil || overview.Listeners == nil { + if overview.QueueTotals == nil || overview.ObjectTotals == nil || overview.MessageStats == nil { acc.AddError(fmt.Errorf("Wrong answer from rabbitmq. Probably auth issue")) return } @@ -491,8 +486,8 @@ func gatherNodes(r *RabbitMQ, acc telegraf.Accumulator) { "uptime": node.Uptime, "mnesia_disk_tx_count": node.MnesiaDiskTxCount, "mnesia_disk_tx_count_rate": node.MnesiaDiskTxCountDetails.Rate, - "mnesia_ram_tx_count": node.MnesiaRamTxCount, - "mnesia_ram_tx_count_rate": node.MnesiaRamTxCountDetails.Rate, + "mnesia_ram_tx_count": node.MnesiaRAMTxCount, + "mnesia_ram_tx_count_rate": node.MnesiaRAMTxCountDetails.Rate, "gc_num": node.GcNum, "gc_num_rate": node.GcNumDetails.Rate, "gc_bytes_reclaimed": node.GcBytesReclaimed, @@ -535,7 +530,27 @@ func gatherNodes(r *RabbitMQ, acc telegraf.Accumulator) { fields["mem_other_system"] = memory.Memory.OtherSystem fields["mem_allocated_unused"] = memory.Memory.AllocatedUnused fields["mem_reserved_unallocated"] = memory.Memory.ReservedUnallocated - fields["mem_total"] = memory.Memory.Total + switch v := memory.Memory.Total.(type) { + case float64: + fields["mem_total"] = int64(v) + case map[string]interface{}: + var foundEstimator bool + for _, estimator := range []string{"rss", "allocated", "erlang"} { + if x, found := v[estimator]; found { + if total, ok := x.(float64); ok { + fields["mem_total"] = int64(total) + foundEstimator = true + break + } + acc.AddError(fmt.Errorf("unknown type %T for %q total memory", x, estimator)) + } + } + if !foundEstimator { + acc.AddError(fmt.Errorf("no known memory estimation in %v", v)) + } + default: + acc.AddError(fmt.Errorf("unknown type %T for total memory", memory.Memory.Total)) + } } acc.AddFields("rabbitmq_node", fields, tags) @@ -764,8 +779,8 @@ func (r *RabbitMQ) shouldGatherFederationLink(link FederationLink) bool { func init() { inputs.Add("rabbitmq", func() telegraf.Input { return &RabbitMQ{ - ResponseHeaderTimeout: internal.Duration{Duration: DefaultResponseHeaderTimeout * time.Second}, - ClientTimeout: internal.Duration{Duration: DefaultClientTimeout * time.Second}, + ResponseHeaderTimeout: config.Duration(DefaultResponseHeaderTimeout * time.Second), + ClientTimeout: config.Duration(DefaultClientTimeout * time.Second), } }) } diff --git a/plugins/inputs/rabbitmq/rabbitmq_test.go b/plugins/inputs/rabbitmq/rabbitmq_test.go index 869e8036d157d..e867b1e2dcb61 100644 --- a/plugins/inputs/rabbitmq/rabbitmq_test.go +++ b/plugins/inputs/rabbitmq/rabbitmq_test.go @@ -4,195 +4,668 @@ import ( "fmt" "net/http" "net/http/httptest" - "testing" + "os" + "time" - "io/ioutil" + "testing" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestRabbitMQGeneratesMetrics(t *testing.T) { +func TestRabbitMQGeneratesMetricsSet1(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var jsonFilePath string switch r.URL.Path { case "/api/overview": - jsonFilePath = "testdata/overview.json" + jsonFilePath = "testdata/set1/overview.json" case "/api/nodes": - jsonFilePath = "testdata/nodes.json" + jsonFilePath = "testdata/set1/nodes.json" case "/api/queues": - jsonFilePath = "testdata/queues.json" + jsonFilePath = "testdata/set1/queues.json" case "/api/exchanges": - jsonFilePath = "testdata/exchanges.json" + jsonFilePath = "testdata/set1/exchanges.json" case "/api/federation-links": - jsonFilePath = "testdata/federation-links.json" + jsonFilePath = "testdata/set1/federation-links.json" case "/api/nodes/rabbit@vagrant-ubuntu-trusty-64/memory": - jsonFilePath = "testdata/memory.json" + jsonFilePath = "testdata/set1/memory.json" default: - panic("Cannot handle request") + http.Error(w, fmt.Sprintf("unknown path %q", r.URL.Path), http.StatusNotFound) + return } - data, err := ioutil.ReadFile(jsonFilePath) + data, err := os.ReadFile(jsonFilePath) + require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) - if err != nil { - panic(fmt.Sprintf("could not read from data file %s", jsonFilePath)) - } - - w.Write(data) + _, err = w.Write(data) + require.NoError(t, err) })) defer ts.Close() - r := &RabbitMQ{ + // Define test cases + expected := []telegraf.Metric{ + testutil.MustMetric("rabbitmq_overview", + map[string]string{ + "url": ts.URL, + }, + map[string]interface{}{ + "messages": int64(5), + "messages_ready": int64(32), + "messages_unacked": int64(27), + "messages_acked": int64(5246), + "messages_delivered": int64(5234), + "messages_delivered_get": int64(3333), + "messages_published": int64(5258), + "channels": int64(44), + "connections": int64(44), + "consumers": int64(65), + "exchanges": int64(43), + "queues": int64(62), + "clustering_listeners": int64(2), + "amqp_listeners": int64(2), + "return_unroutable": int64(10), + "return_unroutable_rate": float64(3.3), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_queue", + map[string]string{ + "auto_delete": "false", + "durable": "false", + "node": "rabbit@rmqlocal-0.rmqlocal.ankorabbitstatefulset3.svc.cluster.local", + "queue": "reply_a716f0523cd44941ad2ea6ce4a3869c3", + "url": ts.URL, + "vhost": "sorandomsorandom", + }, + map[string]interface{}{ + "consumers": int64(3), + "consumer_utilisation": float64(1.0), + "memory": int64(143776), + "message_bytes": int64(3), + "message_bytes_ready": int64(4), + "message_bytes_unacked": int64(5), + "message_bytes_ram": int64(6), + "message_bytes_persist": int64(7), + "messages": int64(44), + "messages_ready": int64(32), + "messages_unack": int64(44), + "messages_ack": int64(3457), + "messages_ack_rate": float64(9.9), + "messages_deliver": int64(22222), + "messages_deliver_rate": float64(333.4), + "messages_deliver_get": int64(3457), + "messages_deliver_get_rate": float64(0.2), + "messages_publish": int64(3457), + "messages_publish_rate": float64(11.2), + "messages_redeliver": int64(33), + "messages_redeliver_rate": float64(2.5), + "idle_since": "2015-11-01 8:22:14", + "slave_nodes": int64(1), + "synchronised_slave_nodes": int64(1), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_node", + map[string]string{ + "node": "rabbit@vagrant-ubuntu-trusty-64", + "url": ts.URL, + }, + map[string]interface{}{ + "disk_free": int64(3776), + "disk_free_limit": int64(50000000), + "disk_free_alarm": int64(0), + "fd_total": int64(1024), + "fd_used": int64(63), + "mem_limit": int64(2503), + "mem_used": int64(159707080), + "mem_alarm": int64(1), + "proc_total": int64(1048576), + "proc_used": int64(783), + "run_queue": int64(0), + "sockets_total": int64(829), + "sockets_used": int64(45), + "uptime": int64(7464827), + "running": int64(1), + "mnesia_disk_tx_count": int64(16), + "mnesia_ram_tx_count": int64(296), + "mnesia_disk_tx_count_rate": float64(1.1), + "mnesia_ram_tx_count_rate": float64(2.2), + "gc_num": int64(57280132), + "gc_bytes_reclaimed": int64(2533), + "gc_num_rate": float64(274.2), + "gc_bytes_reclaimed_rate": float64(16490856.3), + "io_read_avg_time": float64(983.0), + "io_read_avg_time_rate": float64(88.77), + "io_read_bytes": int64(1111), + "io_read_bytes_rate": float64(99.99), + "io_write_avg_time": float64(134.0), + "io_write_avg_time_rate": float64(4.32), + "io_write_bytes": int64(823), + "io_write_bytes_rate": float64(32.8), + "mem_connection_readers": int64(1234), + "mem_connection_writers": int64(5678), + "mem_connection_channels": int64(1133), + "mem_connection_other": int64(2840), + "mem_queue_procs": int64(2840), + "mem_queue_slave_procs": int64(0), + "mem_plugins": int64(1755976), + "mem_other_proc": int64(23056584), + "mem_metrics": int64(196536), + "mem_mgmt_db": int64(491272), + "mem_mnesia": int64(115600), + "mem_other_ets": int64(2121872), + "mem_binary": int64(418848), + "mem_msg_index": int64(42848), + "mem_code": int64(25179322), + "mem_atom": int64(1041593), + "mem_other_system": int64(14741981), + "mem_allocated_unused": int64(38208528), + "mem_reserved_unallocated": int64(0), + "mem_total": int64(83025920), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "true", + "durable": "false", + "exchange": "reply_a716f0523cd44941ad2ea6ce4a3869c3", + "internal": "false", + "type": "direct", + "url": ts.URL, + "vhost": "sorandomsorandom", + }, + map[string]interface{}{ + "messages_publish_in": int64(3678), + "messages_publish_in_rate": float64(3.2), + "messages_publish_out": int64(3677), + "messages_publish_out_rate": float64(5.1), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_federation", + map[string]string{ + "queue": "exampleLocalQueue", + "type": "queue", + "upstream": "ExampleFederationUpstream", + "upstream_queue": "exampleUpstreamQueue", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "acks_uncommitted": int64(1), + "consumers": int64(2), + "messages_unacknowledged": int64(3), + "messages_uncommitted": int64(4), + "messages_unconfirmed": int64(5), + "messages_confirm": int64(67), + "messages_publish": int64(890), + "messages_return_unroutable": int64(1), + }, + time.Unix(0, 0), + ), + } + + // Run the test + plugin := &RabbitMQ{ URL: ts.URL, + Log: testutil.Logger{}, } + require.NoError(t, plugin.Init()) acc := &testutil.Accumulator{} + require.NoError(t, plugin.Gather(acc)) - err := acc.GatherError(r.Gather) - require.NoError(t, err) - - overviewMetrics := map[string]interface{}{ - "messages": 5, - "messages_ready": 32, - "messages_unacked": 27, - "messages_acked": 5246, - "messages_delivered": 5234, - "messages_delivered_get": 3333, - "messages_published": 5258, - "channels": 44, - "connections": 44, - "consumers": 65, - "exchanges": 43, - "queues": 62, - "clustering_listeners": 2, - "amqp_listeners": 2, - "return_unroutable": 10, - "return_unroutable_rate": 3.3, - } - compareMetrics(t, overviewMetrics, acc, "rabbitmq_overview") - - queuesMetrics := map[string]interface{}{ - "consumers": 3, - "consumer_utilisation": 1.0, - "memory": 143776, - "message_bytes": 3, - "message_bytes_ready": 4, - "message_bytes_unacked": 5, - "message_bytes_ram": 6, - "message_bytes_persist": 7, - "messages": 44, - "messages_ready": 32, - "messages_unack": 44, - "messages_ack": 3457, - "messages_ack_rate": 9.9, - "messages_deliver": 22222, - "messages_deliver_rate": 333.4, - "messages_deliver_get": 3457, - "messages_deliver_get_rate": 0.2, - "messages_publish": 3457, - "messages_publish_rate": 11.2, - "messages_redeliver": 33, - "messages_redeliver_rate": 2.5, - "idle_since": "2015-11-01 8:22:14", - "slave_nodes": 1, - "synchronised_slave_nodes": 1, + acc.Wait(len(expected)) + require.Len(t, acc.Errors, 0) + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func TestRabbitMQGeneratesMetricsSet2(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var jsonFilePath string + + switch r.URL.Path { + case "/api/overview": + jsonFilePath = "testdata/set2/overview.json" + case "/api/nodes": + jsonFilePath = "testdata/set2/nodes.json" + case "/api/queues": + jsonFilePath = "testdata/set2/queues.json" + case "/api/exchanges": + jsonFilePath = "testdata/set2/exchanges.json" + case "/api/federation-links": + jsonFilePath = "testdata/set2/federation-links.json" + case "/api/nodes/rabbit@rmqserver/memory": + jsonFilePath = "testdata/set2/memory.json" + default: + http.Error(w, fmt.Sprintf("unknown path %q", r.URL.Path), http.StatusNotFound) + return + } + + data, err := os.ReadFile(jsonFilePath) + require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) + + _, err = w.Write(data) + require.NoError(t, err) + })) + defer ts.Close() + + // Define test cases + expected := []telegraf.Metric{ + testutil.MustMetric("rabbitmq_overview", + map[string]string{ + "url": ts.URL, + }, + map[string]interface{}{ + "messages": int64(30), + "messages_ready": int64(30), + "messages_unacked": int64(0), + "messages_acked": int64(3736443), + "messages_delivered": int64(3736446), + "messages_delivered_get": int64(3736446), + "messages_published": int64(770025), + "channels": int64(43), + "connections": int64(43), + "consumers": int64(37), + "exchanges": int64(8), + "queues": int64(34), + "clustering_listeners": int64(1), + "amqp_listeners": int64(2), + "return_unroutable": int64(0), + "return_unroutable_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_queue", + map[string]string{ + "auto_delete": "false", + "durable": "false", + "node": "rabbit@rmqserver", + "queue": "39fd2caf-63e5-41e3-c15a-ba8fa11434b2", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "consumers": int64(1), + "consumer_utilisation": float64(1.0), + "memory": int64(15840), + "message_bytes": int64(0), + "message_bytes_ready": int64(0), + "message_bytes_unacked": int64(0), + "message_bytes_ram": int64(0), + "message_bytes_persist": int64(0), + "messages": int64(0), + "messages_ready": int64(0), + "messages_unack": int64(0), + "messages_ack": int64(180), + "messages_ack_rate": float64(0.0), + "messages_deliver": int64(180), + "messages_deliver_rate": float64(0.0), + "messages_deliver_get": int64(180), + "messages_deliver_get_rate": float64(0.0), + "messages_publish": int64(180), + "messages_publish_rate": float64(0.0), + "messages_redeliver": int64(0), + "messages_redeliver_rate": float64(0.0), + "idle_since": "2021-06-28 15:54:14", + "slave_nodes": int64(0), + "synchronised_slave_nodes": int64(0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_queue", + map[string]string{ + "auto_delete": "false", + "durable": "false", + "node": "rabbit@rmqserver", + "queue": "39fd2cb4-aa2d-c08b-457a-62d0893523a1", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "consumers": int64(1), + "consumer_utilisation": float64(1.0), + "memory": int64(15600), + "message_bytes": int64(0), + "message_bytes_ready": int64(0), + "message_bytes_unacked": int64(0), + "message_bytes_ram": int64(0), + "message_bytes_persist": int64(0), + "messages": int64(0), + "messages_ready": int64(0), + "messages_unack": int64(0), + "messages_ack": int64(177), + "messages_ack_rate": float64(0.0), + "messages_deliver": int64(177), + "messages_deliver_rate": float64(0.0), + "messages_deliver_get": int64(177), + "messages_deliver_get_rate": float64(0.0), + "messages_publish": int64(177), + "messages_publish_rate": float64(0.0), + "messages_redeliver": int64(0), + "messages_redeliver_rate": float64(0.0), + "idle_since": "2021-06-28 15:54:14", + "slave_nodes": int64(0), + "synchronised_slave_nodes": int64(0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_queue", + map[string]string{ + "auto_delete": "false", + "durable": "false", + "node": "rabbit@rmqserver", + "queue": "39fd2cb5-3820-e01b-6e20-ba29d5553fc3", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "consumers": int64(1), + "consumer_utilisation": float64(1.0), + "memory": int64(15584), + "message_bytes": int64(0), + "message_bytes_ready": int64(0), + "message_bytes_unacked": int64(0), + "message_bytes_ram": int64(0), + "message_bytes_persist": int64(0), + "messages": int64(0), + "messages_ready": int64(0), + "messages_unack": int64(0), + "messages_ack": int64(175), + "messages_ack_rate": float64(0.0), + "messages_deliver": int64(175), + "messages_deliver_rate": float64(0.0), + "messages_deliver_get": int64(175), + "messages_deliver_get_rate": float64(0.0), + "messages_publish": int64(175), + "messages_publish_rate": float64(0.0), + "messages_redeliver": int64(0), + "messages_redeliver_rate": float64(0.0), + "idle_since": "2021-06-28 15:54:15", + "slave_nodes": int64(0), + "synchronised_slave_nodes": int64(0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_node", + map[string]string{ + "node": "rabbit@rmqserver", + "url": ts.URL, + }, + map[string]interface{}{ + "disk_free": int64(25086496768), + "disk_free_limit": int64(50000000), + "disk_free_alarm": int64(0), + "fd_total": int64(65536), + "fd_used": int64(78), + "mem_limit": int64(1717546188), + "mem_used": int64(387645440), + "mem_alarm": int64(0), + "proc_total": int64(1048576), + "proc_used": int64(1128), + "run_queue": int64(1), + "sockets_total": int64(58893), + "sockets_used": int64(43), + "uptime": int64(4150152129), + "running": int64(1), + "mnesia_disk_tx_count": int64(103), + "mnesia_ram_tx_count": int64(2257), + "mnesia_disk_tx_count_rate": float64(0.0), + "mnesia_ram_tx_count_rate": float64(0.0), + "gc_num": int64(329526389), + "gc_bytes_reclaimed": int64(13660012170840), + "gc_num_rate": float64(125.2), + "gc_bytes_reclaimed_rate": float64(6583379.2), + "io_read_avg_time": float64(0.0), + "io_read_avg_time_rate": float64(0.0), + "io_read_bytes": int64(1), + "io_read_bytes_rate": float64(0.0), + "io_write_avg_time": float64(0.0), + "io_write_avg_time_rate": float64(0.0), + "io_write_bytes": int64(193066), + "io_write_bytes_rate": float64(0.0), + "mem_connection_readers": int64(1246768), + "mem_connection_writers": int64(72108), + "mem_connection_channels": int64(308588), + "mem_connection_other": int64(4883596), + "mem_queue_procs": int64(780996), + "mem_queue_slave_procs": int64(0), + "mem_plugins": int64(11932828), + "mem_other_proc": int64(39203520), + "mem_metrics": int64(626932), + "mem_mgmt_db": int64(3341264), + "mem_mnesia": int64(396016), + "mem_other_ets": int64(3771384), + "mem_binary": int64(209324208), + "mem_msg_index": int64(32648), + "mem_code": int64(32810827), + "mem_atom": int64(1458513), + "mem_other_system": int64(14284124), + "mem_allocated_unused": int64(61026048), + "mem_reserved_unallocated": int64(0), + "mem_total": int64(385548288), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "false", + "durable": "true", + "exchange": "", + "internal": "false", + "type": "direct", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(284725), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(284572), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "false", + "durable": "true", + "exchange": "amq.direct", + "internal": "false", + "type": "direct", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(0), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(0), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "false", + "durable": "true", + "exchange": "amq.fanout", + "internal": "false", + "type": "fanout", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(0), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(0), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "false", + "durable": "true", + "exchange": "amq.headers", + "internal": "false", + "type": "headers", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(0), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(0), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "false", + "durable": "true", + "exchange": "amq.match", + "internal": "false", + "type": "headers", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(0), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(0), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "false", + "durable": "true", + "exchange": "amq.rabbitmq.trace", + "internal": "true", + "type": "topic", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(0), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(0), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "false", + "durable": "true", + "exchange": "amq.topic", + "internal": "false", + "type": "topic", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(0), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(0), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), + testutil.MustMetric("rabbitmq_exchange", + map[string]string{ + "auto_delete": "true", + "durable": "false", + "exchange": "Exchange", + "internal": "false", + "type": "topic", + "url": ts.URL, + "vhost": "/", + }, + map[string]interface{}{ + "messages_publish_in": int64(18006), + "messages_publish_in_rate": float64(0.0), + "messages_publish_out": int64(60798), + "messages_publish_out_rate": float64(0.0), + }, + time.Unix(0, 0), + ), } - compareMetrics(t, queuesMetrics, acc, "rabbitmq_queue") - - nodeMetrics := map[string]interface{}{ - "disk_free": 3776, - "disk_free_limit": 50000000, - "disk_free_alarm": 0, - "fd_total": 1024, - "fd_used": 63, - "mem_limit": 2503, - "mem_used": 159707080, - "mem_alarm": 1, - "proc_total": 1048576, - "proc_used": 783, - "run_queue": 0, - "sockets_total": 829, - "sockets_used": 45, - "uptime": 7464827, - "running": 1, - "mnesia_disk_tx_count": 16, - "mnesia_ram_tx_count": 296, - "mnesia_disk_tx_count_rate": 1.1, - "mnesia_ram_tx_count_rate": 2.2, - "gc_num": 57280132, - "gc_bytes_reclaimed": 2533, - "gc_num_rate": 274.2, - "gc_bytes_reclaimed_rate": 16490856.3, - "io_read_avg_time": 983, - "io_read_avg_time_rate": 88.77, - "io_read_bytes": 1111, - "io_read_bytes_rate": 99.99, - "io_write_avg_time": 134, - "io_write_avg_time_rate": 4.32, - "io_write_bytes": 823, - "io_write_bytes_rate": 32.8, - "mem_connection_readers": 1234, - "mem_connection_writers": 5678, - "mem_connection_channels": 1133, - "mem_connection_other": 2840, - "mem_queue_procs": 2840, - "mem_queue_slave_procs": 0, - "mem_plugins": 1755976, - "mem_other_proc": 23056584, - "mem_metrics": 196536, - "mem_mgmt_db": 491272, - "mem_mnesia": 115600, - "mem_other_ets": 2121872, - "mem_binary": 418848, - "mem_msg_index": 42848, - "mem_code": 25179322, - "mem_atom": 1041593, - "mem_other_system": 14741981, - "mem_allocated_unused": 38208528, - "mem_reserved_unallocated": 0, - "mem_total": 83025920, + expectedErrors := []error{ + fmt.Errorf("error response trying to get \"/api/federation-links\": \"Object Not Found\" (reason: \"Not Found\")"), } - compareMetrics(t, nodeMetrics, acc, "rabbitmq_node") - exchangeMetrics := map[string]interface{}{ - "messages_publish_in": 3678, - "messages_publish_in_rate": 3.2, - "messages_publish_out": 3677, - "messages_publish_out_rate": 5.1, - } - compareMetrics(t, exchangeMetrics, acc, "rabbitmq_exchange") - - federationLinkMetrics := map[string]interface{}{ - "acks_uncommitted": 1, - "consumers": 2, - "messages_unacknowledged": 3, - "messages_uncommitted": 4, - "messages_unconfirmed": 5, - "messages_confirm": 67, - "messages_publish": 890, - "messages_return_unroutable": 1, + // Run the test + plugin := &RabbitMQ{ + URL: ts.URL, + Log: testutil.Logger{}, } - compareMetrics(t, federationLinkMetrics, acc, "rabbitmq_federation") + require.NoError(t, plugin.Init()) + + acc := &testutil.Accumulator{} + require.NoError(t, plugin.Gather(acc)) + + acc.Wait(len(expected)) + require.Len(t, acc.Errors, len(expectedErrors)) + require.ElementsMatch(t, expectedErrors, acc.Errors) + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime(), testutil.SortMetrics()) } -func compareMetrics(t *testing.T, expectedMetrics map[string]interface{}, - accumulator *testutil.Accumulator, measurementKey string) { - measurement, exist := accumulator.Get(measurementKey) +func TestRabbitMQMetricFilerts(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, fmt.Sprintf("unknown path %q", r.URL.Path), http.StatusNotFound) + })) + defer ts.Close() + + metricErrors := map[string]error{ + "exchange": fmt.Errorf("getting \"/api/exchanges\" failed: 404 Not Found"), + "federation": fmt.Errorf("getting \"/api/federation-links\" failed: 404 Not Found"), + "node": fmt.Errorf("getting \"/api/nodes\" failed: 404 Not Found"), + "overview": fmt.Errorf("getting \"/api/overview\" failed: 404 Not Found"), + "queue": fmt.Errorf("getting \"/api/queues\" failed: 404 Not Found"), + } - assert.True(t, exist, "There is measurement %s", measurementKey) - assert.Equal(t, len(expectedMetrics), len(measurement.Fields)) + // Include test + for name, expected := range metricErrors { + plugin := &RabbitMQ{ + URL: ts.URL, + Log: testutil.Logger{}, + MetricInclude: []string{name}, + } + require.NoError(t, plugin.Init()) - for metricName, metricValue := range expectedMetrics { - actualMetricValue := measurement.Fields[metricName] + acc := &testutil.Accumulator{} + require.NoError(t, plugin.Gather(acc)) + require.Len(t, acc.Errors, 1) + require.ElementsMatch(t, []error{expected}, acc.Errors) + } - if accumulator.HasStringField(measurementKey, metricName) { - assert.Equal(t, metricValue, actualMetricValue, - "Metric name: %s", metricName) - } else { - assert.InDelta(t, metricValue, actualMetricValue, 0e5, - "Metric name: %s", metricName) + // Exclude test + for name := range metricErrors { + // Exclude the current metric error from the list of expected errors + var expected []error + for n, e := range metricErrors { + if n != name { + expected = append(expected, e) + } } + plugin := &RabbitMQ{ + URL: ts.URL, + Log: testutil.Logger{}, + MetricExclude: []string{name}, + } + require.NoError(t, plugin.Init()) + + acc := &testutil.Accumulator{} + require.NoError(t, plugin.Gather(acc)) + require.Len(t, acc.Errors, len(expected)) + require.ElementsMatch(t, expected, acc.Errors) } } diff --git a/plugins/inputs/rabbitmq/sample.conf b/plugins/inputs/rabbitmq/sample.conf new file mode 100644 index 0000000000000..04c4b4abd725b --- /dev/null +++ b/plugins/inputs/rabbitmq/sample.conf @@ -0,0 +1,56 @@ +# Reads metrics from RabbitMQ servers via the Management Plugin +[[inputs.rabbitmq]] + ## Management Plugin url. (default: http://localhost:15672) + # url = "http://localhost:15672" + ## Tag added to rabbitmq_overview series; deprecated: use tags + # name = "rmq-server-1" + ## Credentials + # username = "guest" + # password = "guest" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Optional request timeouts + ## + ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait + ## for a server's response headers after fully writing the request. + # header_timeout = "3s" + ## + ## client_timeout specifies a time limit for requests made by this client. + ## Includes connection time, any redirects, and reading the response body. + # client_timeout = "4s" + + ## A list of nodes to gather as the rabbitmq_node measurement. If not + ## specified, metrics for all nodes are gathered. + # nodes = ["rabbit@node1", "rabbit@node2"] + + ## A list of queues to gather as the rabbitmq_queue measurement. If not + ## specified, metrics for all queues are gathered. + ## Deprecated in 1.6: Use queue_name_include instead. + # queues = ["telegraf"] + + ## A list of exchanges to gather as the rabbitmq_exchange measurement. If not + ## specified, metrics for all exchanges are gathered. + # exchanges = ["telegraf"] + + ## Metrics to include and exclude. Globs accepted. + ## Note that an empty array for both will include all metrics + ## Currently the following metrics are supported: "exchange", "federation", "node", "overview", "queue" + # metric_include = [] + # metric_exclude = [] + + ## Queues to include and exclude. Globs accepted. + ## Note that an empty array for both will include all queues + # queue_name_include = [] + # queue_name_exclude = [] + + ## Federation upstreams to include and exclude specified as an array of glob + ## pattern strings. Federation links can also be limited by the queue and + ## exchange filters. + # federation_upstream_include = [] + # federation_upstream_exclude = [] diff --git a/plugins/inputs/rabbitmq/testdata/exchanges.json b/plugins/inputs/rabbitmq/testdata/set1/exchanges.json similarity index 100% rename from plugins/inputs/rabbitmq/testdata/exchanges.json rename to plugins/inputs/rabbitmq/testdata/set1/exchanges.json diff --git a/plugins/inputs/rabbitmq/testdata/federation-links.json b/plugins/inputs/rabbitmq/testdata/set1/federation-links.json similarity index 100% rename from plugins/inputs/rabbitmq/testdata/federation-links.json rename to plugins/inputs/rabbitmq/testdata/set1/federation-links.json diff --git a/plugins/inputs/rabbitmq/testdata/memory.json b/plugins/inputs/rabbitmq/testdata/set1/memory.json similarity index 100% rename from plugins/inputs/rabbitmq/testdata/memory.json rename to plugins/inputs/rabbitmq/testdata/set1/memory.json diff --git a/plugins/inputs/rabbitmq/testdata/nodes.json b/plugins/inputs/rabbitmq/testdata/set1/nodes.json similarity index 100% rename from plugins/inputs/rabbitmq/testdata/nodes.json rename to plugins/inputs/rabbitmq/testdata/set1/nodes.json diff --git a/plugins/inputs/rabbitmq/testdata/overview.json b/plugins/inputs/rabbitmq/testdata/set1/overview.json similarity index 100% rename from plugins/inputs/rabbitmq/testdata/overview.json rename to plugins/inputs/rabbitmq/testdata/set1/overview.json diff --git a/plugins/inputs/rabbitmq/testdata/queues.json b/plugins/inputs/rabbitmq/testdata/set1/queues.json similarity index 100% rename from plugins/inputs/rabbitmq/testdata/queues.json rename to plugins/inputs/rabbitmq/testdata/set1/queues.json diff --git a/plugins/inputs/rabbitmq/testdata/set2/exchanges.json b/plugins/inputs/rabbitmq/testdata/set2/exchanges.json new file mode 100644 index 0000000000000..df47fe44bbd7f --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/set2/exchanges.json @@ -0,0 +1,104 @@ +[ + { + "arguments": {}, + "auto_delete": false, + "durable": true, + "internal": false, + "message_stats": { + "publish_in": 284725, + "publish_in_details": { + "rate": 0 + }, + "publish_out": 284572, + "publish_out_details": { + "rate": 0 + } + }, + "name": "", + "type": "direct", + "user_who_performed_action": "rmq-internal", + "vhost": "/" + }, + { + "arguments": { + "x-expires": 300000 + }, + "auto_delete": true, + "durable": false, + "internal": false, + "message_stats": { + "publish_in": 18006, + "publish_in_details": { + "rate": 0 + }, + "publish_out": 60798, + "publish_out_details": { + "rate": 0 + } + }, + "name": "Exchange", + "type": "topic", + "user_who_performed_action": "user", + "vhost": "/" + }, + { + "arguments": {}, + "auto_delete": false, + "durable": true, + "internal": false, + "name": "amq.direct", + "type": "direct", + "user_who_performed_action": "rmq-internal", + "vhost": "/" + }, + { + "arguments": {}, + "auto_delete": false, + "durable": true, + "internal": false, + "name": "amq.fanout", + "type": "fanout", + "user_who_performed_action": "rmq-internal", + "vhost": "/" + }, + { + "arguments": {}, + "auto_delete": false, + "durable": true, + "internal": false, + "name": "amq.headers", + "type": "headers", + "user_who_performed_action": "rmq-internal", + "vhost": "/" + }, + { + "arguments": {}, + "auto_delete": false, + "durable": true, + "internal": false, + "name": "amq.match", + "type": "headers", + "user_who_performed_action": "rmq-internal", + "vhost": "/" + }, + { + "arguments": {}, + "auto_delete": false, + "durable": true, + "internal": true, + "name": "amq.rabbitmq.trace", + "type": "topic", + "user_who_performed_action": "rmq-internal", + "vhost": "/" + }, + { + "arguments": {}, + "auto_delete": false, + "durable": true, + "internal": false, + "name": "amq.topic", + "type": "topic", + "user_who_performed_action": "rmq-internal", + "vhost": "/" + } +] diff --git a/plugins/inputs/rabbitmq/testdata/set2/federation-links.json b/plugins/inputs/rabbitmq/testdata/set2/federation-links.json new file mode 100644 index 0000000000000..0d121cb2f3e64 --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/set2/federation-links.json @@ -0,0 +1 @@ +{"error":"Object Not Found","reason":"Not Found"} diff --git a/plugins/inputs/rabbitmq/testdata/set2/memory.json b/plugins/inputs/rabbitmq/testdata/set2/memory.json new file mode 100644 index 0000000000000..d18558ae21e5a --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/set2/memory.json @@ -0,0 +1,31 @@ +{ + "memory": { + "connection_readers": 1246768, + "connection_writers": 72108, + "connection_channels": 308588, + "connection_other": 4883596, + "queue_procs": 780996, + "queue_slave_procs": 0, + "quorum_queue_procs": 0, + "plugins": 11932828, + "other_proc": 39203520, + "metrics": 626932, + "mgmt_db": 3341264, + "mnesia": 396016, + "quorum_ets": 47920, + "other_ets": 3771384, + "binary": 209324208, + "msg_index": 32648, + "code": 32810827, + "atom": 1458513, + "other_system": 14284124, + "allocated_unused": 61026048, + "reserved_unallocated": 0, + "strategy": "rss", + "total": { + "erlang": 324522240, + "rss": 385548288, + "allocated": 385548288 + } + } +} diff --git a/plugins/inputs/rabbitmq/testdata/set2/nodes.json b/plugins/inputs/rabbitmq/testdata/set2/nodes.json new file mode 100644 index 0000000000000..6dcfb0d514efd --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/set2/nodes.json @@ -0,0 +1,417 @@ +[ + { + "partitions": [], + "os_pid": "8268", + "fd_total": 65536, + "sockets_total": 58893, + "mem_limit": 1717546188, + "mem_alarm": false, + "disk_free_limit": 50000000, + "disk_free_alarm": false, + "proc_total": 1048576, + "rates_mode": "basic", + "uptime": 4150152129, + "run_queue": 1, + "processors": 4, + "exchange_types": [ + { + "name": "topic", + "description": "AMQP topic exchange, as per the AMQP specification", + "enabled": true + }, + { + "name": "fanout", + "description": "AMQP fanout exchange, as per the AMQP specification", + "enabled": true + }, + { + "name": "direct", + "description": "AMQP direct exchange, as per the AMQP specification", + "enabled": true + }, + { + "name": "headers", + "description": "AMQP headers exchange, as per the AMQP specification", + "enabled": true + } + ], + "auth_mechanisms": [ + { + "name": "PLAIN", + "description": "SASL PLAIN authentication mechanism", + "enabled": true + }, + { + "name": "AMQPLAIN", + "description": "QPid AMQPLAIN mechanism", + "enabled": true + }, + { + "name": "RABBIT-CR-DEMO", + "description": "RabbitMQ Demo challenge-response authentication mechanism", + "enabled": false + } + ], + "applications": [ + { + "name": "amqp_client", + "description": "RabbitMQ AMQP Client", + "version": "3.8.14" + }, + { + "name": "asn1", + "description": "The Erlang ASN1 compiler version 5.0.14", + "version": "5.0.14" + }, + { + "name": "aten", + "description": "Erlang node failure detector", + "version": "0.5.5" + }, + { + "name": "compiler", + "description": "ERTS CXC 138 10", + "version": "7.6.6" + }, + { + "name": "cowboy", + "description": "Small, fast, modern HTTP server.", + "version": "2.8.0" + }, + { + "name": "cowlib", + "description": "Support library for manipulating Web protocols.", + "version": "2.9.1" + }, + { + "name": "credentials_obfuscation", + "description": "Helper library that obfuscates sensitive values in process state", + "version": "2.4.0" + }, + { + "name": "crypto", + "description": "CRYPTO", + "version": "4.8.3" + }, + { + "name": "cuttlefish", + "description": "cuttlefish configuration abstraction", + "version": "2.6.0" + }, + { + "name": "gen_batch_server", + "description": "Generic batching server", + "version": "0.8.4" + }, + { + "name": "goldrush", + "description": "Erlang event stream processor", + "version": "0.1.9" + }, + { + "name": "inets", + "description": "INETS CXC 138 49", + "version": "7.3.2" + }, + { + "name": "jsx", + "description": "a streaming, evented json parsing toolkit", + "version": "2.11.0" + }, + { + "name": "kernel", + "description": "ERTS CXC 138 10", + "version": "7.2.1" + }, + { + "name": "lager", + "description": "Erlang logging framework", + "version": "3.8.2" + }, + { + "name": "mnesia", + "description": "MNESIA CXC 138 12", + "version": "4.18.1" + }, + { + "name": "observer_cli", + "description": "Visualize Erlang Nodes On The Command Line", + "version": "1.6.1" + }, + { + "name": "os_mon", + "description": "CPO CXC 138 46", + "version": "2.6.1" + }, + { + "name": "public_key", + "description": "Public key infrastructure", + "version": "1.9.2" + }, + { + "name": "ra", + "description": "Raft library", + "version": "1.1.8" + }, + { + "name": "rabbit", + "description": "RabbitMQ", + "version": "3.8.14" + }, + { + "name": "rabbit_common", + "description": "Modules shared by rabbitmq-server and rabbitmq-erlang-client", + "version": "3.8.14" + }, + { + "name": "rabbitmq_management", + "description": "RabbitMQ Management Console", + "version": "3.8.14" + }, + { + "name": "rabbitmq_management_agent", + "description": "RabbitMQ Management Agent", + "version": "3.8.14" + }, + { + "name": "rabbitmq_prelaunch", + "description": "RabbitMQ prelaunch setup", + "version": "3.8.14" + }, + { + "name": "rabbitmq_web_dispatch", + "description": "RabbitMQ Web Dispatcher", + "version": "3.8.14" + }, + { + "name": "ranch", + "description": "Socket acceptor pool for TCP protocols.", + "version": "1.7.1" + }, + { + "name": "recon", + "description": "Diagnostic tools for production use", + "version": "2.5.1" + }, + { + "name": "sasl", + "description": "SASL CXC 138 11", + "version": "4.0.1" + }, + { + "name": "ssl", + "description": "Erlang/OTP SSL application", + "version": "10.2.4" + }, + { + "name": "stdlib", + "description": "ERTS CXC 138 10", + "version": "3.14" + }, + { + "name": "stdout_formatter", + "description": "Tools to format paragraphs, lists and tables as plain text", + "version": "0.2.4" + }, + { + "name": "syntax_tools", + "description": "Syntax tools", + "version": "2.4" + }, + { + "name": "sysmon_handler", + "description": "Rate-limiting system_monitor event handler", + "version": "1.3.0" + }, + { + "name": "tools", + "description": "DEVTOOLS CXC 138 16", + "version": "3.4.3" + }, + { + "name": "xmerl", + "description": "XML parser", + "version": "1.3.26" + } + ], + "contexts": [ + { + "description": "RabbitMQ Management", + "path": "/", + "cowboy_opts": "[{sendfile,false}]", + "port": "15672" + } + ], + "log_files": [ + "c:/Users/user/AppData/Roaming/RabbitMQ/log/rabbit@rmqserver.log", + "c:/Users/user/AppData/Roaming/RabbitMQ/log/rabbit@rmqserver_upgrade.log" + ], + "db_dir": "c:/Users/user/AppData/Roaming/RabbitMQ/db/rabbit@rmqserver-mnesia", + "config_files": [ + "c:/Users/user/AppData/Roaming/RabbitMQ/advanced.config" + ], + "net_ticktime": 60, + "enabled_plugins": [ + "rabbitmq_management" + ], + "mem_calculation_strategy": "rss", + "ra_open_file_metrics": { + "ra_log_wal": 1, + "ra_log_segment_writer": 0 + }, + "name": "rabbit@rmqserver", + "type": "disc", + "running": true, + "mem_used": 387645440, + "mem_used_details": { + "rate": 419430.4 + }, + "fd_used": 78, + "fd_used_details": { + "rate": 0 + }, + "sockets_used": 43, + "sockets_used_details": { + "rate": 0 + }, + "proc_used": 1128, + "proc_used_details": { + "rate": 0 + }, + "disk_free": 25086496768, + "disk_free_details": { + "rate": -118784 + }, + "gc_num": 329526389, + "gc_num_details": { + "rate": 125.2 + }, + "gc_bytes_reclaimed": 13660012170840, + "gc_bytes_reclaimed_details": { + "rate": 6583379.2 + }, + "context_switches": 974149754, + "context_switches_details": { + "rate": 270 + }, + "io_read_count": 1, + "io_read_count_details": { + "rate": 0 + }, + "io_read_bytes": 1, + "io_read_bytes_details": { + "rate": 0 + }, + "io_read_avg_time": 0, + "io_read_avg_time_details": { + "rate": 0 + }, + "io_write_count": 45, + "io_write_count_details": { + "rate": 0 + }, + "io_write_bytes": 193066, + "io_write_bytes_details": { + "rate": 0 + }, + "io_write_avg_time": 0, + "io_write_avg_time_details": { + "rate": 0 + }, + "io_sync_count": 45, + "io_sync_count_details": { + "rate": 0 + }, + "io_sync_avg_time": 0, + "io_sync_avg_time_details": { + "rate": 0 + }, + "io_seek_count": 31, + "io_seek_count_details": { + "rate": 0 + }, + "io_seek_avg_time": 0, + "io_seek_avg_time_details": { + "rate": 0 + }, + "io_reopen_count": 0, + "io_reopen_count_details": { + "rate": 0 + }, + "mnesia_ram_tx_count": 2257, + "mnesia_ram_tx_count_details": { + "rate": 0 + }, + "mnesia_disk_tx_count": 103, + "mnesia_disk_tx_count_details": { + "rate": 0 + }, + "msg_store_read_count": 0, + "msg_store_read_count_details": { + "rate": 0 + }, + "msg_store_write_count": 1, + "msg_store_write_count_details": { + "rate": 0 + }, + "queue_index_journal_write_count": 165, + "queue_index_journal_write_count_details": { + "rate": 0 + }, + "queue_index_write_count": 0, + "queue_index_write_count_details": { + "rate": 0 + }, + "queue_index_read_count": 0, + "queue_index_read_count_details": { + "rate": 0 + }, + "io_file_handle_open_attempt_count": 882, + "io_file_handle_open_attempt_count_details": { + "rate": 0 + }, + "io_file_handle_open_attempt_avg_time": 0.05442176870748299, + "io_file_handle_open_attempt_avg_time_details": { + "rate": 0 + }, + "connection_created": 2310, + "connection_created_details": { + "rate": 0 + }, + "connection_closed": 2268, + "connection_closed_details": { + "rate": 0 + }, + "channel_created": 2310, + "channel_created_details": { + "rate": 0 + }, + "channel_closed": 2267, + "channel_closed_details": { + "rate": 0 + }, + "queue_declared": 144281, + "queue_declared_details": { + "rate": 0 + }, + "queue_created": 663, + "queue_created_details": { + "rate": 0 + }, + "queue_deleted": 629, + "queue_deleted_details": { + "rate": 0 + }, + "cluster_links": [], + "metrics_gc_queue_length": { + "connection_closed": 0, + "channel_closed": 0, + "consumer_deleted": 0, + "exchange_deleted": 0, + "queue_deleted": 0, + "vhost_deleted": 0, + "node_node_deleted": 0, + "channel_consumer_deleted": 0 + } + } +] diff --git a/plugins/inputs/rabbitmq/testdata/set2/overview.json b/plugins/inputs/rabbitmq/testdata/set2/overview.json new file mode 100644 index 0000000000000..51977d61cbcae --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/set2/overview.json @@ -0,0 +1 @@ +{"management_version":"3.8.14","rates_mode":"basic","sample_retention_policies":{"global":[600,3600,28800,86400],"basic":[600,3600],"detailed":[600]},"exchange_types":[{"name":"direct","description":"AMQP direct exchange, as per the AMQP specification","enabled":true},{"name":"fanout","description":"AMQP fanout exchange, as per the AMQP specification","enabled":true},{"name":"headers","description":"AMQP headers exchange, as per the AMQP specification","enabled":true},{"name":"topic","description":"AMQP topic exchange, as per the AMQP specification","enabled":true}],"product_version":"3.8.14","product_name":"RabbitMQ","rabbitmq_version":"3.8.14","cluster_name":"rabbit@rmqserver","erlang_version":"23.2.7","erlang_full_version":"Erlang/OTP 23 [erts-11.1.8] [source] [64-bit] [smp:4:4] [ds:4:4:10] [async-threads:1]","disable_stats":false,"enable_queue_totals":false,"message_stats":{"ack":3736443,"ack_details":{"rate":0.0},"confirm":0,"confirm_details":{"rate":0.0},"deliver":3736446,"deliver_details":{"rate":0.0},"deliver_get":3736446,"deliver_get_details":{"rate":0.0},"deliver_no_ack":0,"deliver_no_ack_details":{"rate":0.0},"disk_reads":0,"disk_reads_details":{"rate":0.0},"disk_writes":55,"disk_writes_details":{"rate":0.0},"drop_unroutable":0,"drop_unroutable_details":{"rate":0.0},"get":0,"get_details":{"rate":0.0},"get_empty":0,"get_empty_details":{"rate":0.0},"get_no_ack":0,"get_no_ack_details":{"rate":0.0},"publish":770025,"publish_details":{"rate":0.0},"redeliver":1,"redeliver_details":{"rate":0.0},"return_unroutable":0,"return_unroutable_details":{"rate":0.0}},"churn_rates":{"channel_closed":2267,"channel_closed_details":{"rate":0.0},"channel_created":2310,"channel_created_details":{"rate":0.0},"connection_closed":2268,"connection_closed_details":{"rate":0.0},"connection_created":2310,"connection_created_details":{"rate":0.0},"queue_created":663,"queue_created_details":{"rate":0.0},"queue_declared":144281,"queue_declared_details":{"rate":0.0},"queue_deleted":629,"queue_deleted_details":{"rate":0.0}},"queue_totals":{"messages":30,"messages_details":{"rate":0.0},"messages_ready":30,"messages_ready_details":{"rate":0.0},"messages_unacknowledged":0,"messages_unacknowledged_details":{"rate":0.0}},"object_totals":{"channels":43,"connections":43,"consumers":37,"exchanges":8,"queues":34},"statistics_db_event_queue":0,"node":"rabbit@rmqserver","listeners":[{"node":"rabbit@rmqserver","protocol":"amqp","ip_address":"0.0.0.0","port":5672,"socket_opts":{"backlog":128,"nodelay":true,"linger":[true,0],"exit_on_close":false}},{"node":"rabbit@rmqserver","protocol":"amqp","ip_address":"::","port":5672,"socket_opts":{"backlog":128,"nodelay":true,"linger":[true,0],"exit_on_close":false}},{"node":"rabbit@rmqserver","protocol":"amqp/ssl","ip_address":"0.0.0.0","port":5671,"socket_opts":{"backlog":128,"nodelay":true,"linger":[true,0],"exit_on_close":false,"versions":["tlsv1.3","tlsv1.2","tlsv1.1","tlsv1"],"cacertfile":"C:\\ProgramData\\Chain.pem","certfile":"C:\\ProgramData\\server.crt","keyfile":"C:\\ProgramData\\server.key","verify":"verify_peer","depth":3,"fail_if_no_peer_cert":false}},{"node":"rabbit@rmqserver","protocol":"amqp/ssl","ip_address":"::","port":5671,"socket_opts":{"backlog":128,"nodelay":true,"linger":[true,0],"exit_on_close":false,"versions":["tlsv1.3","tlsv1.2","tlsv1.1","tlsv1"],"cacertfile":"C:\\ProgramData\\Chain.pem","certfile":"C:\\ProgramData\\server.crt","keyfile":"C:\\ProgramData\\server.key","verify":"verify_peer","depth":3,"fail_if_no_peer_cert":false}},{"node":"rabbit@rmqserver","protocol":"clustering","ip_address":"::","port":25672,"socket_opts":[]},{"node":"rabbit@rmqserver","protocol":"http","ip_address":"0.0.0.0","port":15672,"socket_opts":{"cowboy_opts":{"sendfile":false},"port":15672}},{"node":"rabbit@rmqserver","protocol":"http","ip_address":"::","port":15672,"socket_opts":{"cowboy_opts":{"sendfile":false},"port":15672}}],"contexts":[{"ssl_opts":[],"node":"rabbit@rmqserver","description":"RabbitMQ Management","path":"/","cowboy_opts":"[{sendfile,false}]","port":"15672"}]} diff --git a/plugins/inputs/rabbitmq/testdata/set2/queues.json b/plugins/inputs/rabbitmq/testdata/set2/queues.json new file mode 100644 index 0000000000000..6d8c2a831158a --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/set2/queues.json @@ -0,0 +1,356 @@ +[ + { + "arguments": { + "x-expires": 300000 + }, + "auto_delete": false, + "backing_queue_status": { + "avg_ack_egress_rate": 0, + "avg_ack_ingress_rate": 0, + "avg_egress_rate": 0, + "avg_ingress_rate": 0, + "delta": [ + "delta", + "undefined", + 0, + 0, + "undefined" + ], + "len": 0, + "mode": "default", + "next_seq_id": 180, + "q1": 0, + "q2": 0, + "q3": 0, + "q4": 0, + "target_ram_count": "infinity" + }, + "consumer_capacity": 1, + "consumer_utilisation": 1, + "consumers": 1, + "durable": false, + "effective_policy_definition": {}, + "exclusive": false, + "exclusive_consumer_tag": null, + "garbage_collection": { + "fullsweep_after": 65535, + "max_heap_size": 0, + "min_bin_vheap_size": 46422, + "min_heap_size": 233, + "minor_gcs": 16174 + }, + "head_message_timestamp": null, + "idle_since": "2021-06-28 15:54:14", + "memory": 15840, + "message_bytes": 0, + "message_bytes_paged_out": 0, + "message_bytes_persistent": 0, + "message_bytes_ram": 0, + "message_bytes_ready": 0, + "message_bytes_unacknowledged": 0, + "message_stats": { + "ack": 180, + "ack_details": { + "rate": 0 + }, + "deliver": 180, + "deliver_details": { + "rate": 0 + }, + "deliver_get": 180, + "deliver_get_details": { + "rate": 0 + }, + "deliver_no_ack": 0, + "deliver_no_ack_details": { + "rate": 0 + }, + "get": 0, + "get_details": { + "rate": 0 + }, + "get_empty": 0, + "get_empty_details": { + "rate": 0 + }, + "get_no_ack": 0, + "get_no_ack_details": { + "rate": 0 + }, + "publish": 180, + "publish_details": { + "rate": 0 + }, + "redeliver": 0, + "redeliver_details": { + "rate": 0 + } + }, + "messages": 0, + "messages_details": { + "rate": 0 + }, + "messages_paged_out": 0, + "messages_persistent": 0, + "messages_ram": 0, + "messages_ready": 0, + "messages_ready_details": { + "rate": 0 + }, + "messages_ready_ram": 0, + "messages_unacknowledged": 0, + "messages_unacknowledged_details": { + "rate": 0 + }, + "messages_unacknowledged_ram": 0, + "name": "39fd2caf-63e5-41e3-c15a-ba8fa11434b2", + "node": "rabbit@rmqserver", + "operator_policy": null, + "policy": null, + "recoverable_slaves": null, + "reductions": 11766294, + "reductions_details": { + "rate": 0 + }, + "single_active_consumer_tag": null, + "state": "running", + "type": "classic", + "vhost": "/" + }, + { + "arguments": { + "x-expires": 300000 + }, + "auto_delete": false, + "backing_queue_status": { + "avg_ack_egress_rate": 0, + "avg_ack_ingress_rate": 0, + "avg_egress_rate": 0, + "avg_ingress_rate": 0, + "delta": [ + "delta", + "undefined", + 0, + 0, + "undefined" + ], + "len": 0, + "mode": "default", + "next_seq_id": 177, + "q1": 0, + "q2": 0, + "q3": 0, + "q4": 0, + "target_ram_count": "infinity" + }, + "consumer_capacity": 1, + "consumer_utilisation": 1, + "consumers": 1, + "durable": false, + "effective_policy_definition": {}, + "exclusive": false, + "exclusive_consumer_tag": null, + "garbage_collection": { + "fullsweep_after": 65535, + "max_heap_size": 0, + "min_bin_vheap_size": 46422, + "min_heap_size": 233, + "minor_gcs": 16205 + }, + "head_message_timestamp": null, + "idle_since": "2021-06-28 15:54:14", + "memory": 15600, + "message_bytes": 0, + "message_bytes_paged_out": 0, + "message_bytes_persistent": 0, + "message_bytes_ram": 0, + "message_bytes_ready": 0, + "message_bytes_unacknowledged": 0, + "message_stats": { + "ack": 177, + "ack_details": { + "rate": 0 + }, + "deliver": 177, + "deliver_details": { + "rate": 0 + }, + "deliver_get": 177, + "deliver_get_details": { + "rate": 0 + }, + "deliver_no_ack": 0, + "deliver_no_ack_details": { + "rate": 0 + }, + "get": 0, + "get_details": { + "rate": 0 + }, + "get_empty": 0, + "get_empty_details": { + "rate": 0 + }, + "get_no_ack": 0, + "get_no_ack_details": { + "rate": 0 + }, + "publish": 177, + "publish_details": { + "rate": 0 + }, + "redeliver": 0, + "redeliver_details": { + "rate": 0 + } + }, + "messages": 0, + "messages_details": { + "rate": 0 + }, + "messages_paged_out": 0, + "messages_persistent": 0, + "messages_ram": 0, + "messages_ready": 0, + "messages_ready_details": { + "rate": 0 + }, + "messages_ready_ram": 0, + "messages_unacknowledged": 0, + "messages_unacknowledged_details": { + "rate": 0 + }, + "messages_unacknowledged_ram": 0, + "name": "39fd2cb4-aa2d-c08b-457a-62d0893523a1", + "node": "rabbit@rmqserver", + "operator_policy": null, + "policy": null, + "recoverable_slaves": null, + "reductions": 11706656, + "reductions_details": { + "rate": 0 + }, + "single_active_consumer_tag": null, + "state": "running", + "type": "classic", + "vhost": "/" + }, + { + "arguments": { + "x-expires": 300000 + }, + "auto_delete": false, + "backing_queue_status": { + "avg_ack_egress_rate": 0, + "avg_ack_ingress_rate": 0, + "avg_egress_rate": 0, + "avg_ingress_rate": 0, + "delta": [ + "delta", + "undefined", + 0, + 0, + "undefined" + ], + "len": 0, + "mode": "default", + "next_seq_id": 175, + "q1": 0, + "q2": 0, + "q3": 0, + "q4": 0, + "target_ram_count": "infinity" + }, + "consumer_capacity": 1, + "consumer_utilisation": 1, + "consumers": 1, + "durable": false, + "effective_policy_definition": {}, + "exclusive": false, + "exclusive_consumer_tag": null, + "garbage_collection": { + "fullsweep_after": 65535, + "max_heap_size": 0, + "min_bin_vheap_size": 46422, + "min_heap_size": 233, + "minor_gcs": 16183 + }, + "head_message_timestamp": null, + "idle_since": "2021-06-28 15:54:15", + "memory": 15584, + "message_bytes": 0, + "message_bytes_paged_out": 0, + "message_bytes_persistent": 0, + "message_bytes_ram": 0, + "message_bytes_ready": 0, + "message_bytes_unacknowledged": 0, + "message_stats": { + "ack": 175, + "ack_details": { + "rate": 0 + }, + "deliver": 175, + "deliver_details": { + "rate": 0 + }, + "deliver_get": 175, + "deliver_get_details": { + "rate": 0 + }, + "deliver_no_ack": 0, + "deliver_no_ack_details": { + "rate": 0 + }, + "get": 0, + "get_details": { + "rate": 0 + }, + "get_empty": 0, + "get_empty_details": { + "rate": 0 + }, + "get_no_ack": 0, + "get_no_ack_details": { + "rate": 0 + }, + "publish": 175, + "publish_details": { + "rate": 0 + }, + "redeliver": 0, + "redeliver_details": { + "rate": 0 + } + }, + "messages": 0, + "messages_details": { + "rate": 0 + }, + "messages_paged_out": 0, + "messages_persistent": 0, + "messages_ram": 0, + "messages_ready": 0, + "messages_ready_details": { + "rate": 0 + }, + "messages_ready_ram": 0, + "messages_unacknowledged": 0, + "messages_unacknowledged_details": { + "rate": 0 + }, + "messages_unacknowledged_ram": 0, + "name": "39fd2cb5-3820-e01b-6e20-ba29d5553fc3", + "node": "rabbit@rmqserver", + "operator_policy": null, + "policy": null, + "recoverable_slaves": null, + "reductions": 11649471, + "reductions_details": { + "rate": 0 + }, + "single_active_consumer_tag": null, + "state": "running", + "type": "classic", + "vhost": "/" + } +] diff --git a/plugins/inputs/raindrops/README.md b/plugins/inputs/raindrops/README.md index cdc13eec2d9a6..18cc2afad841f 100644 --- a/plugins/inputs/raindrops/README.md +++ b/plugins/inputs/raindrops/README.md @@ -1,41 +1,43 @@ # Raindrops Input Plugin -The [raindrops](http://raindrops.bogomips.org/) plugin reads from -specified raindops [middleware](http://raindrops.bogomips.org/Raindrops/Middleware.html) URI and adds stats to InfluxDB. +The [raindrops](http://raindrops.bogomips.org/) plugin reads from specified +raindops [middleware](http://raindrops.bogomips.org/Raindrops/Middleware.html) +URI and adds stats to InfluxDB. -### Configuration: +## Configuration -```toml -# Read raindrops stats +```toml @sample.conf +# Read raindrops stats (raindrops - real-time stats for preforking Rack servers) [[inputs.raindrops]] + ## An array of raindrops middleware URI to gather stats. urls = ["http://localhost:8080/_raindrops"] ``` -### Measurements & Fields: +## Metrics - raindrops - - calling (integer, count) - - writing (integer, count) + - calling (integer, count) + - writing (integer, count) - raindrops_listen - - active (integer, bytes) - - queued (integer, bytes) + - active (integer, bytes) + - queued (integer, bytes) -### Tags: +### Tags - Raindops calling/writing of all the workers: - - server - - port + - server + - port - raindrops_listen (ip:port): - - ip - - port + - ip + - port - raindrops_listen (Unix Socket): - - socket + - socket -### Example Output: +## Example Output -``` +```shell $ ./telegraf --config telegraf.conf --input-filter raindrops --test * Plugin: raindrops, Collection 1 > raindrops,port=8080,server=localhost calling=0i,writing=0i 1455479896806238204 diff --git a/plugins/inputs/raindrops/raindrops.go b/plugins/inputs/raindrops/raindrops.go index bcbf773689f33..6f6a95eb212fe 100644 --- a/plugins/inputs/raindrops/raindrops.go +++ b/plugins/inputs/raindrops/raindrops.go @@ -1,7 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package raindrops import ( "bufio" + _ "embed" "fmt" "net" "net/http" @@ -15,38 +17,33 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Raindrops struct { - Urls []string - http_client *http.Client + Urls []string + httpClient *http.Client } -var sampleConfig = ` - ## An array of raindrops middleware URI to gather stats. - urls = ["http://localhost:8080/_raindrops"] -` - -func (r *Raindrops) SampleConfig() string { +func (*Raindrops) SampleConfig() string { return sampleConfig } -func (r *Raindrops) Description() string { - return "Read raindrops stats (raindrops - real-time stats for preforking Rack servers)" -} - func (r *Raindrops) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup for _, u := range r.Urls { addr, err := url.Parse(u) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + acc.AddError(fmt.Errorf("unable to parse address '%s': %s", u, err)) continue } wg.Add(1) go func(addr *url.URL) { defer wg.Done() - acc.AddError(r.gatherUrl(addr, acc)) + acc.AddError(r.gatherURL(addr, acc)) }(addr) } @@ -55,8 +52,8 @@ func (r *Raindrops) Gather(acc telegraf.Accumulator) error { return nil } -func (r *Raindrops) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { - resp, err := r.http_client.Get(addr.String()) +func (r *Raindrops) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { + resp, err := r.httpClient.Get(addr.String()) if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) } @@ -101,10 +98,10 @@ func (r *Raindrops) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { acc.AddFields("raindrops", fields, tags) iterate := true - var queued_line_str string - var active_line_str string - var active_err error - var queued_err error + var queuedLineStr string + var activeLineStr string + var activeErr error + var queuedErr error for iterate { // Listen @@ -114,48 +111,46 @@ func (r *Raindrops) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { "active": 0, "queued": 0, } - active_line_str, active_err = buf.ReadString('\n') - if active_err != nil { - iterate = false + activeLineStr, activeErr = buf.ReadString('\n') + if activeErr != nil { break } - if strings.Compare(active_line_str, "\n") == 0 { + if strings.Compare(activeLineStr, "\n") == 0 { break } - queued_line_str, queued_err = buf.ReadString('\n') - if queued_err != nil { + queuedLineStr, queuedErr = buf.ReadString('\n') + if queuedErr != nil { iterate = false } - active_line := strings.Split(active_line_str, " ") - listen_name := active_line[0] + activeLine := strings.Split(activeLineStr, " ") + listenName := activeLine[0] - active, err := strconv.ParseUint(strings.TrimSpace(active_line[2]), 10, 64) + active, err := strconv.ParseUint(strings.TrimSpace(activeLine[2]), 10, 64) if err != nil { active = 0 } lis["active"] = active - queued_line := strings.Split(queued_line_str, " ") - queued, err := strconv.ParseUint(strings.TrimSpace(queued_line[2]), 10, 64) + queuedLine := strings.Split(queuedLineStr, " ") + queued, err := strconv.ParseUint(strings.TrimSpace(queuedLine[2]), 10, 64) if err != nil { queued = 0 } lis["queued"] = queued - if strings.Contains(listen_name, ":") { - listener := strings.Split(listen_name, ":") + if strings.Contains(listenName, ":") { + listener := strings.Split(listenName, ":") tags = map[string]string{ "ip": listener[0], "port": listener[1], } - } else { tags = map[string]string{ - "socket": listen_name, + "socket": listenName, } } acc.AddFields("raindrops_listen", lis, tags) } - return nil + return nil //nolint:nilerr // nil returned on purpose } // Get tag(s) for the raindrops calling/writing plugin @@ -177,11 +172,11 @@ func (r *Raindrops) getTags(addr *url.URL) map[string]string { func init() { inputs.Add("raindrops", func() telegraf.Input { - return &Raindrops{http_client: &http.Client{ + return &Raindrops{httpClient: &http.Client{ Transport: &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + ResponseHeaderTimeout: 3 * time.Second, }, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, }} }) } diff --git a/plugins/inputs/raindrops/raindrops_test.go b/plugins/inputs/raindrops/raindrops_test.go index b0b601cec49cc..6da64dbb4d207 100644 --- a/plugins/inputs/raindrops/raindrops_test.go +++ b/plugins/inputs/raindrops/raindrops_test.go @@ -7,11 +7,11 @@ import ( "net/http/httptest" "net/url" "testing" + "time" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "time" + + "github.com/influxdata/telegraf/testutil" ) const sampleResponse = ` @@ -41,7 +41,7 @@ func TestRaindropsTags(t *testing.T) { for _, url1 := range urls { addr, _ = url.Parse(url1) tagMap := r.getTags(addr) - assert.Contains(t, tagMap["server"], "localhost") + require.Contains(t, tagMap["server"], "localhost") } } @@ -49,20 +49,18 @@ func TestRaindropsGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var rsp string - if r.URL.Path == "/_raindrops" { - rsp = sampleResponse - } else { - panic("Cannot handle request") - } + require.Equal(t, r.URL.Path, "/_raindrops", "Cannot handle request") + rsp = sampleResponse - fmt.Fprintln(w, rsp) + _, err := fmt.Fprintln(w, rsp) + require.NoError(t, err) })) defer ts.Close() n := &Raindrops{ Urls: []string{fmt.Sprintf("%s/_raindrops", ts.URL)}, - http_client: &http.Client{Transport: &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), + httpClient: &http.Client{Transport: &http.Transport{ + ResponseHeaderTimeout: 3 * time.Second, }}, } diff --git a/plugins/inputs/raindrops/sample.conf b/plugins/inputs/raindrops/sample.conf new file mode 100644 index 0000000000000..ff889310b3547 --- /dev/null +++ b/plugins/inputs/raindrops/sample.conf @@ -0,0 +1,4 @@ +# Read raindrops stats (raindrops - real-time stats for preforking Rack servers) +[[inputs.raindrops]] + ## An array of raindrops middleware URI to gather stats. + urls = ["http://localhost:8080/_raindrops"] diff --git a/plugins/inputs/ras/README.md b/plugins/inputs/ras/README.md index 9c1cda75bff10..840a3388d2f64 100644 --- a/plugins/inputs/ras/README.md +++ b/plugins/inputs/ras/README.md @@ -1,21 +1,25 @@ # RAS Daemon Input Plugin -This plugin is only available on Linux (only for `386`, `amd64`, `arm` and `arm64` architectures). +This plugin is only available on Linux (only for `386`, `amd64`, `arm` and +`arm64` architectures). -The `RAS` plugin gathers and counts errors provided by [RASDaemon](https://github.com/mchehab/rasdaemon). +The `RAS` plugin gathers and counts errors provided by +[RASDaemon](https://github.com/mchehab/rasdaemon). -### Configuration +## Configuration -```toml +```toml @sample.conf +# RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required). [[inputs.ras]] ## Optional path to RASDaemon sqlite3 database. ## Default: /var/lib/rasdaemon/ras-mc_event.db # db_path = "" ``` -In addition `RASDaemon` runs, by default, with `--enable-sqlite3` flag. In case of problems with SQLite3 database please verify this is still a default option. +In addition `RASDaemon` runs, by default, with `--enable-sqlite3` flag. In case +of problems with SQLite3 database please verify this is still a default option. -### Metrics +## Metrics - ras - tags: @@ -39,7 +43,9 @@ In addition `RASDaemon` runs, by default, with `--enable-sqlite3` flag. In case - microcode_rom_parity_errors - unclassified_mce_errors -Please note that `processor_base_errors` is aggregate counter measuring the following MCE events: +Please note that `processor_base_errors` is aggregate counter measuring the +following MCE events: + - internal_timer_errors - smm_handler_code_access_violation_errors - internal_parity_errors @@ -48,13 +54,14 @@ Please note that `processor_base_errors` is aggregate counter measuring the foll - microcode_rom_parity_errors - unclassified_mce_errors -### Permissions +## Permissions -This plugin requires access to SQLite3 database from `RASDaemon`. Please make sure that user has required permissions to this database. +This plugin requires access to SQLite3 database from `RASDaemon`. Please make +sure that user has required permissions to this database. -### Example Output +## Example Output -``` +```shell ras,host=ubuntu,socket_id=0 external_mce_base_errors=1i,frc_errors=1i,instruction_tlb_errors=5i,internal_parity_errors=1i,internal_timer_errors=1i,l0_and_l1_cache_errors=7i,memory_read_corrected_errors=25i,memory_read_uncorrectable_errors=0i,memory_write_corrected_errors=5i,memory_write_uncorrectable_errors=0i,microcode_rom_parity_errors=1i,processor_base_errors=7i,processor_bus_errors=1i,smm_handler_code_access_violation_errors=1i,unclassified_mce_base_errors=1i 1598867393000000000 ras,host=ubuntu level_2_cache_errors=0i,upi_errors=0i 1598867393000000000 ``` diff --git a/plugins/inputs/ras/ras.go b/plugins/inputs/ras/ras.go index a8599c4a78d0f..b8812fbc533a1 100644 --- a/plugins/inputs/ras/ras.go +++ b/plugins/inputs/ras/ras.go @@ -1,3 +1,5 @@ +//go:generate ../../../tools/readme_config_includer/generator +//go:build linux && (386 || amd64 || arm || arm64) // +build linux // +build 386 amd64 arm arm64 @@ -5,28 +7,34 @@ package ras import ( "database/sql" + _ "embed" "fmt" "os" "strconv" "strings" "time" - _ "modernc.org/sqlite" //to register SQLite driver + // Required for SQL framework driver + _ "modernc.org/sqlite" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // Ras plugin gathers and counts errors provided by RASDaemon type Ras struct { DBPath string `toml:"db_path"` Log telegraf.Logger `toml:"-"` - db *sql.DB `toml:"-"` - latestTimestamp time.Time `toml:"-"` - cpuSocketCounters map[int]metricCounters `toml:"-"` - serverCounters metricCounters `toml:"-"` + db *sql.DB + latestTimestamp time.Time + cpuSocketCounters map[int]metricCounters + serverCounters metricCounters } type machineCheckError struct { @@ -41,7 +49,7 @@ type metricCounters map[string]int64 const ( mceQuery = ` - SELECT + SELECT id, timestamp, error_msg, mcistatus_msg, socketid FROM mce_record WHERE timestamp > ? @@ -67,18 +75,8 @@ const ( unclassifiedMCEBase = "unclassified_mce_errors" ) -// SampleConfig returns sample configuration for this plugin. -func (r *Ras) SampleConfig() string { - return ` - ## Optional path to RASDaemon sqlite3 database. - ## Default: /var/lib/rasdaemon/ras-mc_event.db - # db_path = "" -` -} - -// Description returns the plugin description. -func (r *Ras) Description() string { - return "RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required)." +func (*Ras) SampleConfig() string { + return sampleConfig } // Start initializes connection to DB, metrics are gathered in Gather diff --git a/plugins/inputs/ras/ras_notlinux.go b/plugins/inputs/ras/ras_notlinux.go index 74f0aaf9fc59f..b0795fd794f6f 100644 --- a/plugins/inputs/ras/ras_notlinux.go +++ b/plugins/inputs/ras/ras_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux || (linux && !386 && !amd64 && !arm && !arm64) // +build !linux linux,!386,!amd64,!arm,!arm64 package ras diff --git a/plugins/inputs/ras/ras_test.go b/plugins/inputs/ras/ras_test.go index a90258bb4423b..d4e87dfe5f12c 100644 --- a/plugins/inputs/ras/ras_test.go +++ b/plugins/inputs/ras/ras_test.go @@ -1,3 +1,4 @@ +//go:build linux && (386 || amd64 || arm || arm64) // +build linux // +build 386 amd64 arm arm64 @@ -7,9 +8,9 @@ import ( "fmt" "testing" - "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" - "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf/testutil" ) func TestUpdateCounters(t *testing.T) { @@ -18,20 +19,20 @@ func TestUpdateCounters(t *testing.T) { ras.updateCounters(&mce) } - assert.Equal(t, 1, len(ras.cpuSocketCounters), "Should contain counters only for single socket") + require.Equal(t, 1, len(ras.cpuSocketCounters), "Should contain counters only for single socket") for metric, value := range ras.cpuSocketCounters[0] { if metric == processorBase { // processor_base_errors is sum of other seven errors: internal_timer_errors, smm_handler_code_access_violation_errors, // internal_parity_errors, frc_errors, external_mce_errors, microcode_rom_parity_errors and unclassified_mce_errors - assert.Equal(t, int64(7), value, fmt.Sprintf("%s should have value of 7", processorBase)) + require.Equal(t, int64(7), value, fmt.Sprintf("%s should have value of 7", processorBase)) } else { - assert.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", metric)) + require.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", metric)) } } for metric, value := range ras.serverCounters { - assert.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", metric)) + require.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", metric)) } } @@ -60,9 +61,9 @@ func TestUpdateLatestTimestamp(t *testing.T) { }...) for _, mce := range testData { err := ras.updateLatestTimestamp(mce.Timestamp) - assert.NoError(t, err) + require.NoError(t, err) } - assert.Equal(t, ts, ras.latestTimestamp.Format(dateLayout)) + require.Equal(t, ts, ras.latestTimestamp.Format(dateLayout)) } func TestMultipleSockets(t *testing.T) { @@ -98,14 +99,14 @@ func TestMultipleSockets(t *testing.T) { for _, mce := range testData { ras.updateCounters(&mce) } - assert.Equal(t, 4, len(ras.cpuSocketCounters), "Should contain counters for four sockets") + require.Equal(t, 4, len(ras.cpuSocketCounters), "Should contain counters for four sockets") for _, metricData := range ras.cpuSocketCounters { for metric, value := range metricData { if metric == levelTwoCache { - assert.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", levelTwoCache)) + require.Equal(t, int64(1), value, fmt.Sprintf("%s should have value of 1", levelTwoCache)) } else { - assert.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) + require.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) } } } @@ -116,21 +117,21 @@ func TestMissingDatabase(t *testing.T) { ras := newRas() ras.DBPath = "/nonexistent/ras.db" err := ras.Start(&acc) - assert.Error(t, err) + require.Error(t, err) } func TestEmptyDatabase(t *testing.T) { ras := newRas() - assert.Equal(t, 1, len(ras.cpuSocketCounters), "Should contain default counters for one socket") - assert.Equal(t, 2, len(ras.serverCounters), "Should contain default counters for server") + require.Equal(t, 1, len(ras.cpuSocketCounters), "Should contain default counters for one socket") + require.Equal(t, 2, len(ras.serverCounters), "Should contain default counters for server") for metric, value := range ras.cpuSocketCounters[0] { - assert.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) + require.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) } for metric, value := range ras.serverCounters { - assert.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) + require.Equal(t, int64(0), value, fmt.Sprintf("%s should have value of 0", metric)) } } diff --git a/plugins/inputs/ras/sample.conf b/plugins/inputs/ras/sample.conf new file mode 100644 index 0000000000000..7e0c649d24c1a --- /dev/null +++ b/plugins/inputs/ras/sample.conf @@ -0,0 +1,5 @@ +# RAS plugin exposes counter metrics for Machine Check Errors provided by RASDaemon (sqlite3 output is required). +[[inputs.ras]] + ## Optional path to RASDaemon sqlite3 database. + ## Default: /var/lib/rasdaemon/ras-mc_event.db + # db_path = "" diff --git a/plugins/inputs/ravendb/README.md b/plugins/inputs/ravendb/README.md new file mode 100644 index 0000000000000..60bb3b956bb43 --- /dev/null +++ b/plugins/inputs/ravendb/README.md @@ -0,0 +1,221 @@ +# RavenDB Input Plugin + +Reads metrics from RavenDB servers via monitoring endpoints APIs. + +Requires RavenDB Server 5.2+. + +## Configuration + +```toml @sample.conf +# Reads metrics from RavenDB servers via the Monitoring Endpoints +[[inputs.ravendb]] + ## Node URL and port that RavenDB is listening on. By default, + ## attempts to connect securely over HTTPS, however, if the user + ## is running a local unsecure development cluster users can use + ## HTTP via a URL like "http://localhost:8080" + url = "https://localhost:4433" + + ## RavenDB X509 client certificate setup + # tls_cert = "/etc/telegraf/raven.crt" + # tls_key = "/etc/telegraf/raven.key" + + ## Optional request timeout + ## + ## Timeout, specifies the amount of time to wait + ## for a server's response headers after fully writing the request and + ## time limit for requests made by this client + # timeout = "5s" + + ## List of statistics which are collected + # At least one is required + # Allowed values: server, databases, indexes, collections + # + # stats_include = ["server", "databases", "indexes", "collections"] + + ## List of db where database stats are collected + ## If empty, all db are concerned + # db_stats_dbs = [] + + ## List of db where index status are collected + ## If empty, all indexes from all db are concerned + # index_stats_dbs = [] + + ## List of db where collection status are collected + ## If empty, all collections from all db are concerned + # collection_stats_dbs = [] +``` + +**Note:** The client certificate used should have `Operator` permissions on the +cluster. + +## Metrics + +- ravendb_server + - tags: + - url + - node_tag + - cluster_id + - public_server_url (optional) + - fields: + - backup_current_number_of_running_backups + - backup_max_number_of_concurrent_backups + - certificate_server_certificate_expiration_left_in_sec (optional) + - certificate_well_known_admin_certificates (optional, separated by ';') + - cluster_current_term + - cluster_index + - cluster_node_state + - 0 -> Passive + - 1 -> Candidate + - 2 -> Follower + - 3 -> LeaderElect + - 4 -> Leader + - config_public_tcp_server_urls (optional, separated by ';') + - config_server_urls + - config_tcp_server_urls (optional, separated by ';') + - cpu_assigned_processor_count + - cpu_machine_usage + - cpu_machine_io_wait (optional) + - cpu_process_usage + - cpu_processor_count + - cpu_thread_pool_available_worker_threads + - cpu_thread_pool_available_completion_port_threads + - databases_loaded_count + - databases_total_count + - disk_remaining_storage_space_percentage + - disk_system_store_used_data_file_size_in_mb + - disk_system_store_total_data_file_size_in_mb + - disk_total_free_space_in_mb + - license_expiration_left_in_sec (optional) + - license_max_cores + - license_type + - license_utilized_cpu_cores + - memory_allocated_in_mb + - memory_installed_in_mb + - memory_low_memory_severity + - 0 -> None + - 1 -> Low + - 2 -> Extremely Low + - memory_physical_in_mb + - memory_total_dirty_in_mb + - memory_total_swap_size_in_mb + - memory_total_swap_usage_in_mb + - memory_working_set_swap_usage_in_mb + - network_concurrent_requests_count + - network_last_authorized_non_cluster_admin_request_time_in_sec (optional) + - network_last_request_time_in_sec (optional) + - network_requests_per_sec + - network_tcp_active_connections + - network_total_requests + - server_full_version + - server_process_id + - server_version + - uptime_in_sec + +- ravendb_databases + - tags: + - url + - database_name + - database_id + - node_tag + - public_server_url (optional) + - fields: + - counts_alerts + - counts_attachments + - counts_documents + - counts_performance_hints + - counts_rehabs + - counts_replication_factor + - counts_revisions + - counts_unique_attachments + - statistics_doc_puts_per_sec + - statistics_map_index_indexes_per_sec + - statistics_map_reduce_index_mapped_per_sec + - statistics_map_reduce_index_reduced_per_sec + - statistics_request_average_duration_in_ms + - statistics_requests_count + - statistics_requests_per_sec + - indexes_auto_count + - indexes_count + - indexes_disabled_count + - indexes_errors_count + - indexes_errored_count + - indexes_idle_count + - indexes_stale_count + - indexes_static_count + - storage_documents_allocated_data_file_in_mb + - storage_documents_used_data_file_in_mb + - storage_indexes_allocated_data_file_in_mb + - storage_indexes_used_data_file_in_mb + - storage_total_allocated_storage_file_in_mb + - storage_total_free_space_in_mb + - time_since_last_backup_in_sec (optional) + - uptime_in_sec + +- ravendb_indexes + - tags: + - database_name + - index_name + - node_tag + - public_server_url (optional) + - url + - fields + - errors + - is_invalid + - lock_mode + - Unlock + - LockedIgnore + - LockedError + - mapped_per_sec + - priority + - Low + - Normal + - High + - reduced_per_sec + - state + - Normal + - Disabled + - Idle + - Error + - status + - Running + - Paused + - Disabled + - time_since_last_indexing_in_sec (optional) + - time_since_last_query_in_sec (optional) + - type + - None + - AutoMap + - AutoMapReduce + - Map + - MapReduce + - Faulty + - JavaScriptMap + - JavaScriptMapReduce + +- ravendb_collections + - tags: + - collection_name + - database_name + - node_tag + - public_server_url (optional) + - url + - fields + - documents_count + - documents_size_in_bytes + - revisions_size_in_bytes + - tombstones_size_in_bytes + - total_size_in_bytes + +## Example Output + +```text +> ravendb_server,cluster_id=07aecc42-9194-4181-999c-1c42450692c9,host=DESKTOP-2OISR6D,node_tag=A,url=http://localhost:8080 backup_current_number_of_running_backups=0i,backup_max_number_of_concurrent_backups=4i,certificate_server_certificate_expiration_left_in_sec=-1,cluster_current_term=2i,cluster_index=10i,cluster_node_state=4i,config_server_urls="http://127.0.0.1:8080",cpu_assigned_processor_count=8i,cpu_machine_usage=19.09944089456869,cpu_process_usage=0.16977205323024872,cpu_processor_count=8i,cpu_thread_pool_available_completion_port_threads=1000i,cpu_thread_pool_available_worker_threads=32763i,databases_loaded_count=1i,databases_total_count=1i,disk_remaining_storage_space_percentage=18i,disk_system_store_total_data_file_size_in_mb=35184372088832i,disk_system_store_used_data_file_size_in_mb=31379031064576i,disk_total_free_space_in_mb=42931i,license_expiration_left_in_sec=24079222.8772186,license_max_cores=256i,license_type="Enterprise",license_utilized_cpu_cores=8i,memory_allocated_in_mb=205i,memory_installed_in_mb=16384i,memory_low_memory_severity=0i,memory_physical_in_mb=16250i,memory_total_dirty_in_mb=0i,memory_total_swap_size_in_mb=0i,memory_total_swap_usage_in_mb=0i,memory_working_set_swap_usage_in_mb=0i,network_concurrent_requests_count=1i,network_last_request_time_in_sec=0.0058717,network_requests_per_sec=0.09916543455308825,network_tcp_active_connections=128i,network_total_requests=10i,server_full_version="5.2.0-custom-52",server_process_id=31044i,server_version="5.2",uptime_in_sec=56i 1613027977000000000 +> ravendb_databases,database_id=ced0edba-8f80-48b8-8e81-c3d2c6748ec3,database_name=db1,host=DESKTOP-2OISR6D,node_tag=A,url=http://localhost:8080 counts_alerts=0i,counts_attachments=17i,counts_documents=1059i,counts_performance_hints=0i,counts_rehabs=0i,counts_replication_factor=1i,counts_revisions=5475i,counts_unique_attachments=17i,indexes_auto_count=0i,indexes_count=7i,indexes_disabled_count=0i,indexes_errored_count=0i,indexes_errors_count=0i,indexes_idle_count=0i,indexes_stale_count=0i,indexes_static_count=7i,statistics_doc_puts_per_sec=0,statistics_map_index_indexes_per_sec=0,statistics_map_reduce_index_mapped_per_sec=0,statistics_map_reduce_index_reduced_per_sec=0,statistics_request_average_duration_in_ms=0,statistics_requests_count=0i,statistics_requests_per_sec=0,storage_documents_allocated_data_file_in_mb=140737488355328i,storage_documents_used_data_file_in_mb=74741020884992i,storage_indexes_allocated_data_file_in_mb=175921860444160i,storage_indexes_used_data_file_in_mb=120722940755968i,storage_total_allocated_storage_file_in_mb=325455441821696i,storage_total_free_space_in_mb=42931i,uptime_in_sec=54 1613027977000000000 +> ravendb_indexes,database_name=db1,host=DESKTOP-2OISR6D,index_name=Orders/Totals,node_tag=A,url=http://localhost:8080 errors=0i,is_invalid=false,lock_mode="Unlock",mapped_per_sec=0,priority="Normal",reduced_per_sec=0,state="Normal",status="Running",time_since_last_indexing_in_sec=45.4256655,time_since_last_query_in_sec=45.4304202,type="Map" 1613027977000000000 +> ravendb_collections,collection_name=@hilo,database_name=db1,host=DESKTOP-2OISR6D,node_tag=A,url=http://localhost:8080 documents_count=8i,documents_size_in_bytes=122880i,revisions_size_in_bytes=0i,tombstones_size_in_bytes=122880i,total_size_in_bytes=245760i 1613027977000000000 +``` + +## Contributors + +- Marcin Lewandowski () +- Casey Barton () diff --git a/plugins/inputs/ravendb/ravendb.go b/plugins/inputs/ravendb/ravendb.go new file mode 100644 index 0000000000000..e96c336d4b4f0 --- /dev/null +++ b/plugins/inputs/ravendb/ravendb.go @@ -0,0 +1,393 @@ +//go:generate ../../../tools/readme_config_includer/generator +package ravendb + +import ( + _ "embed" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +// defaultURL will set a default value that corresponds to the default value +// used by RavenDB +const defaultURL = "http://localhost:8080" + +const defaultTimeout = 5 + +// RavenDB defines the configuration necessary for gathering metrics, +// see the sample config for further details +type RavenDB struct { + URL string `toml:"url"` + Name string `toml:"name"` + + Timeout config.Duration `toml:"timeout"` + + StatsInclude []string `toml:"stats_include"` + DbStatsDbs []string `toml:"db_stats_dbs"` + IndexStatsDbs []string `toml:"index_stats_dbs"` + CollectionStatsDbs []string `toml:"collection_stats_dbs"` + + tls.ClientConfig + + Log telegraf.Logger `toml:"-"` + + client *http.Client + requestURLServer string + requestURLDatabases string + requestURLIndexes string + requestURLCollection string +} + +func (*RavenDB) SampleConfig() string { + return sampleConfig +} + +func (r *RavenDB) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + + for _, statToCollect := range r.StatsInclude { + wg.Add(1) + + switch statToCollect { + case "server": + go func() { + defer wg.Done() + r.gatherServer(acc) + }() + case "databases": + go func() { + defer wg.Done() + r.gatherDatabases(acc) + }() + case "indexes": + go func() { + defer wg.Done() + r.gatherIndexes(acc) + }() + case "collections": + go func() { + defer wg.Done() + r.gatherCollections(acc) + }() + } + } + + wg.Wait() + + return nil +} + +func (r *RavenDB) ensureClient() error { + if r.client != nil { + return nil + } + + tlsCfg, err := r.ClientConfig.TLSConfig() + if err != nil { + return err + } + tr := &http.Transport{ + ResponseHeaderTimeout: time.Duration(r.Timeout), + TLSClientConfig: tlsCfg, + } + r.client = &http.Client{ + Transport: tr, + Timeout: time.Duration(r.Timeout), + } + + return nil +} + +func (r *RavenDB) requestJSON(u string, target interface{}) error { + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return err + } + + resp, err := r.client.Do(req) + if err != nil { + return err + } + + defer resp.Body.Close() + + r.Log.Debugf("%s: %s", u, resp.Status) + if resp.StatusCode >= 400 { + return fmt.Errorf("invalid response code to request '%s': %d - %s", r.URL, resp.StatusCode, resp.Status) + } + + return json.NewDecoder(resp.Body).Decode(target) +} + +func (r *RavenDB) gatherServer(acc telegraf.Accumulator) { + serverResponse := &serverMetricsResponse{} + + err := r.requestJSON(r.requestURLServer, &serverResponse) + if err != nil { + acc.AddError(err) + return + } + + tags := map[string]string{ + "cluster_id": serverResponse.Cluster.ID, + "node_tag": serverResponse.Cluster.NodeTag, + "url": r.URL, + } + + if serverResponse.Config.PublicServerURL != nil { + tags["public_server_url"] = *serverResponse.Config.PublicServerURL + } + + fields := map[string]interface{}{ + "backup_current_number_of_running_backups": serverResponse.Backup.CurrentNumberOfRunningBackups, + "backup_max_number_of_concurrent_backups": serverResponse.Backup.MaxNumberOfConcurrentBackups, + "certificate_server_certificate_expiration_left_in_sec": serverResponse.Certificate.ServerCertificateExpirationLeftInSec, + "cluster_current_term": serverResponse.Cluster.CurrentTerm, + "cluster_index": serverResponse.Cluster.Index, + "cluster_node_state": serverResponse.Cluster.NodeState, + "config_server_urls": strings.Join(serverResponse.Config.ServerUrls, ";"), + "cpu_assigned_processor_count": serverResponse.CPU.AssignedProcessorCount, + "cpu_machine_io_wait": serverResponse.CPU.MachineIoWait, + "cpu_machine_usage": serverResponse.CPU.MachineUsage, + "cpu_process_usage": serverResponse.CPU.ProcessUsage, + "cpu_processor_count": serverResponse.CPU.ProcessorCount, + "cpu_thread_pool_available_worker_threads": serverResponse.CPU.ThreadPoolAvailableWorkerThreads, + "cpu_thread_pool_available_completion_port_threads": serverResponse.CPU.ThreadPoolAvailableCompletionPortThreads, + "databases_loaded_count": serverResponse.Databases.LoadedCount, + "databases_total_count": serverResponse.Databases.TotalCount, + "disk_remaining_storage_space_percentage": serverResponse.Disk.RemainingStorageSpacePercentage, + "disk_system_store_used_data_file_size_in_mb": serverResponse.Disk.SystemStoreUsedDataFileSizeInMb, + "disk_system_store_total_data_file_size_in_mb": serverResponse.Disk.SystemStoreTotalDataFileSizeInMb, + "disk_total_free_space_in_mb": serverResponse.Disk.TotalFreeSpaceInMb, + "license_expiration_left_in_sec": serverResponse.License.ExpirationLeftInSec, + "license_max_cores": serverResponse.License.MaxCores, + "license_type": serverResponse.License.Type, + "license_utilized_cpu_cores": serverResponse.License.UtilizedCPUCores, + "memory_allocated_in_mb": serverResponse.Memory.AllocatedMemoryInMb, + "memory_installed_in_mb": serverResponse.Memory.InstalledMemoryInMb, + "memory_low_memory_severity": serverResponse.Memory.LowMemorySeverity, + "memory_physical_in_mb": serverResponse.Memory.PhysicalMemoryInMb, + "memory_total_dirty_in_mb": serverResponse.Memory.TotalDirtyInMb, + "memory_total_swap_size_in_mb": serverResponse.Memory.TotalSwapSizeInMb, + "memory_total_swap_usage_in_mb": serverResponse.Memory.TotalSwapUsageInMb, + "memory_working_set_swap_usage_in_mb": serverResponse.Memory.WorkingSetSwapUsageInMb, + "network_concurrent_requests_count": serverResponse.Network.ConcurrentRequestsCount, + "network_last_authorized_non_cluster_admin_request_time_in_sec": serverResponse.Network.LastAuthorizedNonClusterAdminRequestTimeInSec, + "network_last_request_time_in_sec": serverResponse.Network.LastRequestTimeInSec, + "network_requests_per_sec": serverResponse.Network.RequestsPerSec, + "network_tcp_active_connections": serverResponse.Network.TCPActiveConnections, + "network_total_requests": serverResponse.Network.TotalRequests, + "server_full_version": serverResponse.ServerFullVersion, + "server_process_id": serverResponse.ServerProcessID, + "server_version": serverResponse.ServerVersion, + "uptime_in_sec": serverResponse.UpTimeInSec, + } + + if serverResponse.Config.TCPServerURLs != nil { + fields["config_tcp_server_urls"] = strings.Join(serverResponse.Config.TCPServerURLs, ";") + } + + if serverResponse.Config.PublicTCPServerURLs != nil { + fields["config_public_tcp_server_urls"] = strings.Join(serverResponse.Config.PublicTCPServerURLs, ";") + } + + if serverResponse.Certificate.WellKnownAdminCertificates != nil { + fields["certificate_well_known_admin_certificates"] = strings.Join(serverResponse.Certificate.WellKnownAdminCertificates, ";") + } + + acc.AddFields("ravendb_server", fields, tags) +} + +func (r *RavenDB) gatherDatabases(acc telegraf.Accumulator) { + databasesResponse := &databasesMetricResponse{} + + err := r.requestJSON(r.requestURLDatabases, &databasesResponse) + if err != nil { + acc.AddError(err) + return + } + + for _, dbResponse := range databasesResponse.Results { + tags := map[string]string{ + "database_id": dbResponse.DatabaseID, + "database_name": dbResponse.DatabaseName, + "node_tag": databasesResponse.NodeTag, + "url": r.URL, + } + + if databasesResponse.PublicServerURL != nil { + tags["public_server_url"] = *databasesResponse.PublicServerURL + } + + fields := map[string]interface{}{ + "counts_alerts": dbResponse.Counts.Alerts, + "counts_attachments": dbResponse.Counts.Attachments, + "counts_documents": dbResponse.Counts.Documents, + "counts_performance_hints": dbResponse.Counts.PerformanceHints, + "counts_rehabs": dbResponse.Counts.Rehabs, + "counts_replication_factor": dbResponse.Counts.ReplicationFactor, + "counts_revisions": dbResponse.Counts.Revisions, + "counts_unique_attachments": dbResponse.Counts.UniqueAttachments, + "indexes_auto_count": dbResponse.Indexes.AutoCount, + "indexes_count": dbResponse.Indexes.Count, + "indexes_errored_count": dbResponse.Indexes.ErroredCount, + "indexes_errors_count": dbResponse.Indexes.ErrorsCount, + "indexes_disabled_count": dbResponse.Indexes.DisabledCount, + "indexes_idle_count": dbResponse.Indexes.IdleCount, + "indexes_stale_count": dbResponse.Indexes.StaleCount, + "indexes_static_count": dbResponse.Indexes.StaticCount, + "statistics_doc_puts_per_sec": dbResponse.Statistics.DocPutsPerSec, + "statistics_map_index_indexes_per_sec": dbResponse.Statistics.MapIndexIndexesPerSec, + "statistics_map_reduce_index_mapped_per_sec": dbResponse.Statistics.MapReduceIndexMappedPerSec, + "statistics_map_reduce_index_reduced_per_sec": dbResponse.Statistics.MapReduceIndexReducedPerSec, + "statistics_request_average_duration_in_ms": dbResponse.Statistics.RequestAverageDurationInMs, + "statistics_requests_count": dbResponse.Statistics.RequestsCount, + "statistics_requests_per_sec": dbResponse.Statistics.RequestsPerSec, + "storage_documents_allocated_data_file_in_mb": dbResponse.Storage.DocumentsAllocatedDataFileInMb, + "storage_documents_used_data_file_in_mb": dbResponse.Storage.DocumentsUsedDataFileInMb, + "storage_indexes_allocated_data_file_in_mb": dbResponse.Storage.IndexesAllocatedDataFileInMb, + "storage_indexes_used_data_file_in_mb": dbResponse.Storage.IndexesUsedDataFileInMb, + "storage_total_allocated_storage_file_in_mb": dbResponse.Storage.TotalAllocatedStorageFileInMb, + "storage_total_free_space_in_mb": dbResponse.Storage.TotalFreeSpaceInMb, + "time_since_last_backup_in_sec": dbResponse.TimeSinceLastBackupInSec, + "uptime_in_sec": dbResponse.UptimeInSec, + } + + acc.AddFields("ravendb_databases", fields, tags) + } +} + +func (r *RavenDB) gatherIndexes(acc telegraf.Accumulator) { + indexesResponse := &indexesMetricResponse{} + + err := r.requestJSON(r.requestURLIndexes, &indexesResponse) + if err != nil { + acc.AddError(err) + return + } + + for _, perDbIndexResponse := range indexesResponse.Results { + for _, indexResponse := range perDbIndexResponse.Indexes { + tags := map[string]string{ + "database_name": perDbIndexResponse.DatabaseName, + "index_name": indexResponse.IndexName, + "node_tag": indexesResponse.NodeTag, + "url": r.URL, + } + + if indexesResponse.PublicServerURL != nil { + tags["public_server_url"] = *indexesResponse.PublicServerURL + } + + fields := map[string]interface{}{ + "errors": indexResponse.Errors, + "is_invalid": indexResponse.IsInvalid, + "lock_mode": indexResponse.LockMode, + "mapped_per_sec": indexResponse.MappedPerSec, + "priority": indexResponse.Priority, + "reduced_per_sec": indexResponse.ReducedPerSec, + "state": indexResponse.State, + "status": indexResponse.Status, + "time_since_last_indexing_in_sec": indexResponse.TimeSinceLastIndexingInSec, + "time_since_last_query_in_sec": indexResponse.TimeSinceLastQueryInSec, + "type": indexResponse.Type, + } + + acc.AddFields("ravendb_indexes", fields, tags) + } + } +} + +func (r *RavenDB) gatherCollections(acc telegraf.Accumulator) { + collectionsResponse := &collectionsMetricResponse{} + + err := r.requestJSON(r.requestURLCollection, &collectionsResponse) + if err != nil { + acc.AddError(err) + return + } + + for _, perDbCollectionMetrics := range collectionsResponse.Results { + for _, collectionMetrics := range perDbCollectionMetrics.Collections { + tags := map[string]string{ + "collection_name": collectionMetrics.CollectionName, + "database_name": perDbCollectionMetrics.DatabaseName, + "node_tag": collectionsResponse.NodeTag, + "url": r.URL, + } + + if collectionsResponse.PublicServerURL != nil { + tags["public_server_url"] = *collectionsResponse.PublicServerURL + } + + fields := map[string]interface{}{ + "documents_count": collectionMetrics.DocumentsCount, + "documents_size_in_bytes": collectionMetrics.DocumentsSizeInBytes, + "revisions_size_in_bytes": collectionMetrics.RevisionsSizeInBytes, + "tombstones_size_in_bytes": collectionMetrics.TombstonesSizeInBytes, + "total_size_in_bytes": collectionMetrics.TotalSizeInBytes, + } + + acc.AddFields("ravendb_collections", fields, tags) + } + } +} + +func prepareDBNamesURLPart(dbNames []string) string { + if len(dbNames) == 0 { + return "" + } + result := "?" + dbNames[0] + for _, db := range dbNames[1:] { + result += "&name=" + url.QueryEscape(db) + } + + return result +} + +func (r *RavenDB) Init() error { + if r.URL == "" { + r.URL = defaultURL + } + + r.requestURLServer = r.URL + "/admin/monitoring/v1/server" + r.requestURLDatabases = r.URL + "/admin/monitoring/v1/databases" + prepareDBNamesURLPart(r.DbStatsDbs) + r.requestURLIndexes = r.URL + "/admin/monitoring/v1/indexes" + prepareDBNamesURLPart(r.IndexStatsDbs) + r.requestURLCollection = r.URL + "/admin/monitoring/v1/collections" + prepareDBNamesURLPart(r.IndexStatsDbs) + + err := choice.CheckSlice(r.StatsInclude, []string{"server", "databases", "indexes", "collections"}) + if err != nil { + return err + } + + err = r.ensureClient() + if nil != err { + r.Log.Errorf("Error with Client %s", err) + return err + } + + return nil +} + +func init() { + inputs.Add("ravendb", func() telegraf.Input { + return &RavenDB{ + Timeout: config.Duration(defaultTimeout * time.Second), + StatsInclude: []string{"server", "databases", "indexes", "collections"}, + } + }) +} diff --git a/plugins/inputs/ravendb/ravendb_dto.go b/plugins/inputs/ravendb/ravendb_dto.go new file mode 100644 index 0000000000000..87ae34dccc541 --- /dev/null +++ b/plugins/inputs/ravendb/ravendb_dto.go @@ -0,0 +1,199 @@ +package ravendb + +type serverMetricsResponse struct { + ServerVersion string `json:"ServerVersion"` + ServerFullVersion string `json:"ServerFullVersion"` + UpTimeInSec int32 `json:"UpTimeInSec"` + ServerProcessID int32 `json:"ServerProcessId"` + Backup backupMetrics `json:"Backup"` + Config configurationMetrics `json:"Config"` + CPU cpuMetrics `json:"Cpu"` + Memory memoryMetrics `json:"Memory"` + Disk diskMetrics `json:"Disk"` + License licenseMetrics `json:"License"` + Network networkMetrics `json:"Network"` + Certificate certificateMetrics `json:"Certificate"` + Cluster clusterMetrics `json:"Cluster"` + Databases allDatabasesMetrics `json:"Databases"` +} + +type backupMetrics struct { + CurrentNumberOfRunningBackups int32 `json:"CurrentNumberOfRunningBackups"` + MaxNumberOfConcurrentBackups int32 `json:"MaxNumberOfConcurrentBackups"` +} + +type configurationMetrics struct { + ServerUrls []string `json:"ServerUrls"` + PublicServerURL *string `json:"PublicServerUrl"` + TCPServerURLs []string `json:"TcpServerUrls"` + PublicTCPServerURLs []string `json:"PublicTcpServerUrls"` +} + +type cpuMetrics struct { + ProcessUsage float64 `json:"ProcessUsage"` + MachineUsage float64 `json:"MachineUsage"` + MachineIoWait *float64 `json:"MachineIoWait"` + ProcessorCount int32 `json:"ProcessorCount"` + AssignedProcessorCount int32 `json:"AssignedProcessorCount"` + ThreadPoolAvailableWorkerThreads int32 `json:"ThreadPoolAvailableWorkerThreads"` + ThreadPoolAvailableCompletionPortThreads int32 `json:"ThreadPoolAvailableCompletionPortThreads"` +} + +type memoryMetrics struct { + AllocatedMemoryInMb int64 `json:"AllocatedMemoryInMb"` + PhysicalMemoryInMb int64 `json:"PhysicalMemoryInMb"` + InstalledMemoryInMb int64 `json:"InstalledMemoryInMb"` + LowMemorySeverity string `json:"LowMemorySeverity"` + TotalSwapSizeInMb int64 `json:"TotalSwapSizeInMb"` + TotalSwapUsageInMb int64 `json:"TotalSwapUsageInMb"` + WorkingSetSwapUsageInMb int64 `json:"WorkingSetSwapUsageInMb"` + TotalDirtyInMb int64 `json:"TotalDirtyInMb"` +} + +type diskMetrics struct { + SystemStoreUsedDataFileSizeInMb int64 `json:"SystemStoreUsedDataFileSizeInMb"` + SystemStoreTotalDataFileSizeInMb int64 `json:"SystemStoreTotalDataFileSizeInMb"` + TotalFreeSpaceInMb int64 `json:"TotalFreeSpaceInMb"` + RemainingStorageSpacePercentage int64 `json:"RemainingStorageSpacePercentage"` +} + +type licenseMetrics struct { + Type string `json:"Type"` + ExpirationLeftInSec *float64 `json:"ExpirationLeftInSec"` + UtilizedCPUCores int32 `json:"UtilizedCpuCores"` + MaxCores int32 `json:"MaxCores"` +} + +type networkMetrics struct { + TCPActiveConnections int64 `json:"TcpActiveConnections"` + ConcurrentRequestsCount int64 `json:"ConcurrentRequestsCount"` + TotalRequests int64 `json:"TotalRequests"` + RequestsPerSec float64 `json:"RequestsPerSec"` + LastRequestTimeInSec *float64 `json:"LastRequestTimeInSec"` + LastAuthorizedNonClusterAdminRequestTimeInSec *float64 `json:"LastAuthorizedNonClusterAdminRequestTimeInSec"` +} + +type certificateMetrics struct { + ServerCertificateExpirationLeftInSec *float64 `json:"ServerCertificateExpirationLeftInSec"` + WellKnownAdminCertificates []string `json:"WellKnownAdminCertificates"` +} + +type clusterMetrics struct { + NodeTag string `json:"NodeTag"` + NodeState string `json:"NodeState"` + CurrentTerm int64 `json:"CurrentTerm"` + Index int64 `json:"Index"` + ID string `json:"Id"` +} + +type allDatabasesMetrics struct { + TotalCount int32 `json:"TotalCount"` + LoadedCount int32 `json:"LoadedCount"` +} + +type databasesMetricResponse struct { + Results []*databaseMetrics `json:"Results"` + PublicServerURL *string `json:"PublicServerUrl"` + NodeTag string `json:"NodeTag"` +} + +type databaseMetrics struct { + DatabaseName string `json:"DatabaseName"` + DatabaseID string `json:"DatabaseId"` + UptimeInSec float64 `json:"UptimeInSec"` + TimeSinceLastBackupInSec *float64 `json:"TimeSinceLastBackupInSec"` + + Counts databaseCounts `json:"Counts"` + Statistics databaseStatistics `json:"Statistics"` + + Indexes databaseIndexesMetrics `json:"Indexes"` + Storage databaseStorageMetrics `json:"Storage"` +} + +type databaseCounts struct { + Documents int64 `json:"Documents"` + Revisions int64 `json:"Revisions"` + Attachments int64 `json:"Attachments"` + UniqueAttachments int64 `json:"UniqueAttachments"` + Alerts int64 `json:"Alerts"` + Rehabs int32 `json:"Rehabs"` + PerformanceHints int64 `json:"PerformanceHints"` + ReplicationFactor int32 `json:"ReplicationFactor"` +} + +type databaseStatistics struct { + DocPutsPerSec float64 `json:"DocPutsPerSec"` + MapIndexIndexesPerSec float64 `json:"MapIndexIndexesPerSec"` + MapReduceIndexMappedPerSec float64 `json:"MapReduceIndexMappedPerSec"` + MapReduceIndexReducedPerSec float64 `json:"MapReduceIndexReducedPerSec"` + RequestsPerSec float64 `json:"RequestsPerSec"` + RequestsCount int32 `json:"RequestsCount"` + RequestAverageDurationInMs float64 `json:"RequestAverageDurationInMs"` +} + +type databaseIndexesMetrics struct { + Count int64 `json:"Count"` + StaleCount int32 `json:"StaleCount"` + ErrorsCount int64 `json:"ErrorsCount"` + StaticCount int32 `json:"StaticCount"` + AutoCount int32 `json:"AutoCount"` + IdleCount int32 `json:"IdleCount"` + DisabledCount int32 `json:"DisabledCount"` + ErroredCount int32 `json:"ErroredCount"` +} + +type databaseStorageMetrics struct { + DocumentsAllocatedDataFileInMb int64 `json:"DocumentsAllocatedDataFileInMb"` + DocumentsUsedDataFileInMb int64 `json:"DocumentsUsedDataFileInMb"` + IndexesAllocatedDataFileInMb int64 `json:"IndexesAllocatedDataFileInMb"` + IndexesUsedDataFileInMb int64 `json:"IndexesUsedDataFileInMb"` + TotalAllocatedStorageFileInMb int64 `json:"TotalAllocatedStorageFileInMb"` + TotalFreeSpaceInMb int64 `json:"TotalFreeSpaceInMb"` +} + +type indexesMetricResponse struct { + Results []*perDatabaseIndexMetrics `json:"Results"` + PublicServerURL *string `json:"PublicServerUrl"` + NodeTag string `json:"NodeTag"` +} + +type perDatabaseIndexMetrics struct { + DatabaseName string `json:"DatabaseName"` + Indexes []*indexMetrics `json:"Indexes"` +} + +type indexMetrics struct { + IndexName string `json:"IndexName"` + Priority string `json:"Priority"` + State string `json:"State"` + Errors int32 `json:"Errors"` + TimeSinceLastQueryInSec *float64 `json:"TimeSinceLastQueryInSec"` + TimeSinceLastIndexingInSec *float64 `json:"TimeSinceLastIndexingInSec"` + LockMode string `json:"LockMode"` + IsInvalid bool `json:"IsInvalid"` + Status string `json:"Status"` + MappedPerSec float64 `json:"MappedPerSec"` + ReducedPerSec float64 `json:"ReducedPerSec"` + Type string `json:"Type"` + EntriesCount int32 `json:"EntriesCount"` +} + +type collectionsMetricResponse struct { + Results []*perDatabaseCollectionMetrics `json:"Results"` + PublicServerURL *string `json:"PublicServerUrl"` + NodeTag string `json:"NodeTag"` +} + +type perDatabaseCollectionMetrics struct { + DatabaseName string `json:"DatabaseName"` + Collections []*collectionMetrics `json:"Collections"` +} + +type collectionMetrics struct { + CollectionName string `json:"CollectionName"` + DocumentsCount int64 `json:"DocumentsCount"` + TotalSizeInBytes int64 `json:"TotalSizeInBytes"` + DocumentsSizeInBytes int64 `json:"DocumentsSizeInBytes"` + TombstonesSizeInBytes int64 `json:"TombstonesSizeInBytes"` + RevisionsSizeInBytes int64 `json:"RevisionsSizeInBytes"` +} diff --git a/plugins/inputs/ravendb/ravendb_test.go b/plugins/inputs/ravendb/ravendb_test.go new file mode 100644 index 0000000000000..3da1d0190a055 --- /dev/null +++ b/plugins/inputs/ravendb/ravendb_test.go @@ -0,0 +1,388 @@ +package ravendb + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +// Test against fully filled data +func TestRavenDBGeneratesMetricsFull(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var jsonFilePath string + + switch r.URL.Path { + case "/admin/monitoring/v1/databases": + jsonFilePath = "testdata/databases_full.json" + case "/admin/monitoring/v1/server": + jsonFilePath = "testdata/server_full.json" + case "/admin/monitoring/v1/indexes": + jsonFilePath = "testdata/indexes_full.json" + case "/admin/monitoring/v1/collections": + jsonFilePath = "testdata/collections_full.json" + + default: + require.Failf(t, "Cannot handle request for uri %s", r.URL.Path) + } + + data, err := os.ReadFile(jsonFilePath) + require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) + + _, err = w.Write(data) + require.NoError(t, err) + })) + defer ts.Close() + + r := &RavenDB{ + URL: ts.URL, + StatsInclude: []string{"server", "databases", "indexes", "collections"}, + Log: testutil.Logger{}, + } + + require.NoError(t, r.Init()) + + acc := &testutil.Accumulator{} + + err := acc.GatherError(r.Gather) + require.NoError(t, err) + + serverFields := map[string]interface{}{ + "server_version": "5.1", + "server_full_version": "5.1.1-custom-51", + "uptime_in_sec": int64(30), + "server_process_id": 26360, + "config_server_urls": "http://127.0.0.1:8080;http://192.168.0.1:8080", + "config_tcp_server_urls": "tcp://127.0.0.1:3888;tcp://192.168.0.1:3888", + "config_public_tcp_server_urls": "tcp://2.3.4.5:3888;tcp://6.7.8.9:3888", + "backup_max_number_of_concurrent_backups": 4, + "backup_current_number_of_running_backups": 2, + "cpu_process_usage": 6.28, + "cpu_machine_usage": 41.05, + "cpu_machine_io_wait": 2.55, + "cpu_processor_count": 8, + "cpu_assigned_processor_count": 7, + "cpu_thread_pool_available_worker_threads": 32766, + "cpu_thread_pool_available_completion_port_threads": 1000, + "memory_allocated_in_mb": 235, + "memory_installed_in_mb": 16384, + "memory_physical_in_mb": 16250, + "memory_low_memory_severity": "None", + "memory_total_swap_size_in_mb": 1024, + "memory_total_swap_usage_in_mb": 456, + "memory_working_set_swap_usage_in_mb": 89, + "memory_total_dirty_in_mb": 1, + "disk_system_store_used_data_file_size_in_mb": 28, + "disk_system_store_total_data_file_size_in_mb": 32, + "disk_total_free_space_in_mb": 52078, + "disk_remaining_storage_space_percentage": 22, + "license_type": "Enterprise", + "license_expiration_left_in_sec": 25466947.5, + "license_utilized_cpu_cores": 8, + "license_max_cores": 256, + "network_tcp_active_connections": 84, + "network_concurrent_requests_count": 1, + "network_total_requests": 3, + "network_requests_per_sec": 0.03322, + "network_last_request_time_in_sec": 0.0264977, + "network_last_authorized_non_cluster_admin_request_time_in_sec": 0.04, + "certificate_server_certificate_expiration_left_in_sec": float64(104), + "certificate_well_known_admin_certificates": "a909502dd82ae41433e6f83886b00d4277a32a7b;4444444444444444444444444444444444444444", + "cluster_node_state": "Leader", + "cluster_current_term": 28, + "cluster_index": 104, + "databases_total_count": 25, + "databases_loaded_count": 2, + } + + serverTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "cluster_id": "6b535a18-558f-4e53-a479-a514efc16aab", + "public_server_url": "http://raven1:8080", + } + + defaultTime := time.Unix(0, 0) + + dbFields := map[string]interface{}{ + "uptime_in_sec": float64(1396), + "time_since_last_backup_in_sec": 104.3, + "counts_documents": 425189, + "counts_revisions": 429605, + "counts_attachments": 17, + "counts_unique_attachments": 16, + "counts_alerts": 2, + "counts_rehabs": 3, + "counts_performance_hints": 5, + "counts_replication_factor": 2, + "statistics_doc_puts_per_sec": 23.4, + "statistics_map_index_indexes_per_sec": 82.5, + "statistics_map_reduce_index_mapped_per_sec": 50.3, + "statistics_map_reduce_index_reduced_per_sec": 85.2, + "statistics_requests_per_sec": 22.5, + "statistics_requests_count": 809, + "statistics_request_average_duration_in_ms": 0.55, + "indexes_count": 7, + "indexes_stale_count": 1, + "indexes_errors_count": 2, + "indexes_static_count": 7, + "indexes_auto_count": 3, + "indexes_idle_count": 4, + "indexes_disabled_count": 5, + "indexes_errored_count": 6, + "storage_documents_allocated_data_file_in_mb": 1024, + "storage_documents_used_data_file_in_mb": 942, + "storage_indexes_allocated_data_file_in_mb": 464, + "storage_indexes_used_data_file_in_mb": 278, + "storage_total_allocated_storage_file_in_mb": 1496, + "storage_total_free_space_in_mb": 52074, + } + + dbTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "database_name": "db2", + "database_id": "06eefe8b-d720-4a8d-a809-2c5af9a4abb5", + "public_server_url": "http://myhost:8080", + } + + indexFields := map[string]interface{}{ + "priority": "Normal", + "state": "Normal", + "errors": 0, + "time_since_last_query_in_sec": 3.4712567, + "time_since_last_indexing_in_sec": 3.4642612, + "lock_mode": "Unlock", + "is_invalid": true, + "status": "Running", + "mapped_per_sec": 102.34, + "reduced_per_sec": 593.23, + "type": "MapReduce", + } + + indexTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "public_server_url": "http://localhost:8080", + "database_name": "db1", + "index_name": "Product/Rating", + } + + collectionFields := map[string]interface{}{ + "documents_count": 830, + "total_size_in_bytes": 2744320, + "documents_size_in_bytes": 868352, + "tombstones_size_in_bytes": 122880, + "revisions_size_in_bytes": 1753088, + } + + collectionTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "database_name": "db1", + "collection_name": "Orders", + "public_server_url": "http://localhost:8080", + } + + serverExpected := testutil.MustMetric("ravendb_server", serverTags, serverFields, defaultTime) + dbExpected := testutil.MustMetric("ravendb_databases", dbTags, dbFields, defaultTime) + indexExpected := testutil.MustMetric("ravendb_indexes", indexTags, indexFields, defaultTime) + collectionsExpected := testutil.MustMetric("ravendb_collections", collectionTags, collectionFields, defaultTime) + + for _, metric := range acc.GetTelegrafMetrics() { + switch metric.Name() { + case "ravendb_server": + testutil.RequireMetricEqual(t, serverExpected, metric, testutil.IgnoreTime()) + case "ravendb_databases": + testutil.RequireMetricEqual(t, dbExpected, metric, testutil.IgnoreTime()) + case "ravendb_indexes": + testutil.RequireMetricEqual(t, indexExpected, metric, testutil.IgnoreTime()) + case "ravendb_collections": + testutil.RequireMetricEqual(t, collectionsExpected, metric, testutil.IgnoreTime()) + } + } +} + +// Test against minimum filled data +func TestRavenDBGeneratesMetricsMin(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var jsonFilePath string + + switch r.URL.Path { + case "/admin/monitoring/v1/databases": + jsonFilePath = "testdata/databases_min.json" + case "/admin/monitoring/v1/server": + jsonFilePath = "testdata/server_min.json" + case "/admin/monitoring/v1/indexes": + jsonFilePath = "testdata/indexes_min.json" + case "/admin/monitoring/v1/collections": + jsonFilePath = "testdata/collections_min.json" + default: + require.Failf(t, "Cannot handle request for uri %s", r.URL.Path) + } + + data, err := os.ReadFile(jsonFilePath) + require.NoErrorf(t, err, "could not read from data file %s", jsonFilePath) + + _, err = w.Write(data) + require.NoError(t, err) + })) + defer ts.Close() + + r := &RavenDB{ + URL: ts.URL, + StatsInclude: []string{"server", "databases", "indexes", "collections"}, + Log: testutil.Logger{}, + } + + require.NoError(t, r.Init()) + + acc := &testutil.Accumulator{} + + err := acc.GatherError(r.Gather) + require.NoError(t, err) + + serverFields := map[string]interface{}{ + "server_version": "5.1", + "server_full_version": "5.1.1-custom-51", + "uptime_in_sec": 30, + "server_process_id": 26360, + "config_server_urls": "http://127.0.0.1:8080", + "backup_max_number_of_concurrent_backups": 4, + "backup_current_number_of_running_backups": 2, + "cpu_process_usage": 6.28, + "cpu_machine_usage": 41.07, + "cpu_processor_count": 8, + "cpu_assigned_processor_count": 7, + "cpu_thread_pool_available_worker_threads": 32766, + "cpu_thread_pool_available_completion_port_threads": 1000, + "memory_allocated_in_mb": 235, + "memory_installed_in_mb": 16384, + "memory_physical_in_mb": 16250, + "memory_low_memory_severity": "Low", + "memory_total_swap_size_in_mb": 1024, + "memory_total_swap_usage_in_mb": 456, + "memory_working_set_swap_usage_in_mb": 89, + "memory_total_dirty_in_mb": 1, + "disk_system_store_used_data_file_size_in_mb": 28, + "disk_system_store_total_data_file_size_in_mb": 32, + "disk_total_free_space_in_mb": 52078, + "disk_remaining_storage_space_percentage": 22, + "license_type": "Enterprise", + "license_utilized_cpu_cores": 8, + "license_max_cores": 256, + "network_tcp_active_connections": 84, + "network_concurrent_requests_count": 1, + "network_total_requests": 3, + "network_requests_per_sec": 0.03322, + "cluster_node_state": "Leader", + "cluster_current_term": 28, + "cluster_index": 104, + "databases_total_count": 25, + "databases_loaded_count": 2, + } + + serverTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "cluster_id": "6b535a18-558f-4e53-a479-a514efc16aab", + } + + dbFields := map[string]interface{}{ + "uptime_in_sec": float64(1396), + "counts_documents": 425189, + "counts_revisions": 429605, + "counts_attachments": 17, + "counts_unique_attachments": 16, + "counts_alerts": 2, + "counts_rehabs": 3, + "counts_performance_hints": 5, + "counts_replication_factor": 2, + "statistics_doc_puts_per_sec": 23.4, + "statistics_map_index_indexes_per_sec": 82.5, + "statistics_map_reduce_index_mapped_per_sec": 50.3, + "statistics_map_reduce_index_reduced_per_sec": 85.2, + "statistics_requests_per_sec": 22.5, + "statistics_requests_count": 809, + "statistics_request_average_duration_in_ms": 0.55, + "indexes_count": 7, + "indexes_stale_count": 1, + "indexes_errors_count": 2, + "indexes_static_count": 7, + "indexes_auto_count": 3, + "indexes_idle_count": 4, + "indexes_disabled_count": 5, + "indexes_errored_count": 6, + "storage_documents_allocated_data_file_in_mb": 1024, + "storage_documents_used_data_file_in_mb": 942, + "storage_indexes_allocated_data_file_in_mb": 464, + "storage_indexes_used_data_file_in_mb": 278, + "storage_total_allocated_storage_file_in_mb": 1496, + "storage_total_free_space_in_mb": 52074, + } + + dbTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "database_name": "db2", + "database_id": "06eefe8b-d720-4a8d-a809-2c5af9a4abb5", + } + + indexFields := map[string]interface{}{ + "priority": "Normal", + "state": "Normal", + "errors": 0, + "lock_mode": "Unlock", + "is_invalid": false, + "status": "Running", + "mapped_per_sec": 102.34, + "reduced_per_sec": 593.23, + "type": "MapReduce", + } + + indexTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "database_name": "db1", + "index_name": "Product/Rating", + } + + collectionFields := map[string]interface{}{ + "documents_count": 830, + "total_size_in_bytes": 2744320, + "documents_size_in_bytes": 868352, + "tombstones_size_in_bytes": 122880, + "revisions_size_in_bytes": 1753088, + } + + collectionTags := map[string]string{ + "url": ts.URL, + "node_tag": "A", + "database_name": "db1", + "collection_name": "Orders", + } + + defaultTime := time.Unix(0, 0) + + serverExpected := testutil.MustMetric("ravendb_server", serverTags, serverFields, defaultTime) + dbExpected := testutil.MustMetric("ravendb_databases", dbTags, dbFields, defaultTime) + indexExpected := testutil.MustMetric("ravendb_indexes", indexTags, indexFields, defaultTime) + collectionsExpected := testutil.MustMetric("ravendb_collections", collectionTags, collectionFields, defaultTime) + + for _, metric := range acc.GetTelegrafMetrics() { + switch metric.Name() { + case "ravendb_server": + testutil.RequireMetricEqual(t, serverExpected, metric, testutil.IgnoreTime()) + case "ravendb_databases": + testutil.RequireMetricEqual(t, dbExpected, metric, testutil.IgnoreTime()) + case "ravendb_indexes": + testutil.RequireMetricEqual(t, indexExpected, metric, testutil.IgnoreTime()) + case "ravendb_collections": + testutil.RequireMetricEqual(t, collectionsExpected, metric, testutil.IgnoreTime()) + } + } +} diff --git a/plugins/inputs/ravendb/sample.conf b/plugins/inputs/ravendb/sample.conf new file mode 100644 index 0000000000000..f662b7eceab17 --- /dev/null +++ b/plugins/inputs/ravendb/sample.conf @@ -0,0 +1,36 @@ +# Reads metrics from RavenDB servers via the Monitoring Endpoints +[[inputs.ravendb]] + ## Node URL and port that RavenDB is listening on. By default, + ## attempts to connect securely over HTTPS, however, if the user + ## is running a local unsecure development cluster users can use + ## HTTP via a URL like "http://localhost:8080" + url = "https://localhost:4433" + + ## RavenDB X509 client certificate setup + # tls_cert = "/etc/telegraf/raven.crt" + # tls_key = "/etc/telegraf/raven.key" + + ## Optional request timeout + ## + ## Timeout, specifies the amount of time to wait + ## for a server's response headers after fully writing the request and + ## time limit for requests made by this client + # timeout = "5s" + + ## List of statistics which are collected + # At least one is required + # Allowed values: server, databases, indexes, collections + # + # stats_include = ["server", "databases", "indexes", "collections"] + + ## List of db where database stats are collected + ## If empty, all db are concerned + # db_stats_dbs = [] + + ## List of db where index status are collected + ## If empty, all indexes from all db are concerned + # index_stats_dbs = [] + + ## List of db where collection status are collected + ## If empty, all collections from all db are concerned + # collection_stats_dbs = [] diff --git a/plugins/inputs/ravendb/testdata/collections_full.json b/plugins/inputs/ravendb/testdata/collections_full.json new file mode 100644 index 0000000000000..db91e90868d9b --- /dev/null +++ b/plugins/inputs/ravendb/testdata/collections_full.json @@ -0,0 +1,19 @@ +{ + "PublicServerUrl": "http://localhost:8080", + "NodeTag": "A", + "Results": [ + { + "DatabaseName": "db1", + "Collections": [ + { + "CollectionName": "Orders", + "DocumentsCount": 830, + "TotalSizeInBytes": 2744320, + "DocumentsSizeInBytes": 868352, + "TombstonesSizeInBytes": 122880, + "RevisionsSizeInBytes": 1753088 + } + ] + } + ] +} diff --git a/plugins/inputs/ravendb/testdata/collections_min.json b/plugins/inputs/ravendb/testdata/collections_min.json new file mode 100644 index 0000000000000..edd636d21e202 --- /dev/null +++ b/plugins/inputs/ravendb/testdata/collections_min.json @@ -0,0 +1,19 @@ +{ + "PublicServerUrl": null, + "NodeTag": "A", + "Results": [ + { + "DatabaseName": "db1", + "Collections": [ + { + "CollectionName": "Orders", + "DocumentsCount": 830, + "TotalSizeInBytes": 2744320, + "DocumentsSizeInBytes": 868352, + "TombstonesSizeInBytes": 122880, + "RevisionsSizeInBytes": 1753088 + } + ] + } + ] +} diff --git a/plugins/inputs/ravendb/testdata/databases_full.json b/plugins/inputs/ravendb/testdata/databases_full.json new file mode 100644 index 0000000000000..1c74568812575 --- /dev/null +++ b/plugins/inputs/ravendb/testdata/databases_full.json @@ -0,0 +1,49 @@ +{ + "PublicServerUrl": "http://myhost:8080", + "NodeTag": "A", + "Results": [ + { + "DatabaseName": "db2", + "DatabaseId": "06eefe8b-d720-4a8d-a809-2c5af9a4abb5", + "UptimeInSec": 1396, + "TimeSinceLastBackupInSec": 104.3, + "Counts": { + "Documents": 425189, + "Revisions": 429605, + "Attachments": 17, + "UniqueAttachments": 16, + "Alerts": 2, + "Rehabs": 3, + "PerformanceHints": 5, + "ReplicationFactor": 2 + }, + "Statistics": { + "DocPutsPerSec": 23.4, + "MapIndexIndexesPerSec": 82.5, + "MapReduceIndexMappedPerSec": 50.3, + "MapReduceIndexReducedPerSec": 85.2, + "RequestsPerSec": 22.5, + "RequestsCount": 809, + "RequestAverageDurationInMs": 0.55 + }, + "Indexes": { + "Count": 7, + "StaleCount": 1, + "ErrorsCount": 2, + "StaticCount": 7, + "AutoCount": 3, + "IdleCount": 4, + "DisabledCount": 5, + "ErroredCount": 6 + }, + "Storage": { + "DocumentsAllocatedDataFileInMb": 1024, + "DocumentsUsedDataFileInMb": 942, + "IndexesAllocatedDataFileInMb": 464, + "IndexesUsedDataFileInMb": 278, + "TotalAllocatedStorageFileInMb": 1496, + "TotalFreeSpaceInMb": 52074 + } + } + ] +} diff --git a/plugins/inputs/ravendb/testdata/databases_min.json b/plugins/inputs/ravendb/testdata/databases_min.json new file mode 100644 index 0000000000000..48a1ccbb6b7ad --- /dev/null +++ b/plugins/inputs/ravendb/testdata/databases_min.json @@ -0,0 +1,49 @@ +{ + "PublicServerUrl": null, + "NodeTag": "A", + "Results": [ + { + "DatabaseName": "db2", + "DatabaseId": "06eefe8b-d720-4a8d-a809-2c5af9a4abb5", + "UptimeInSec": 1396, + "TimeSinceLastBackupInSec": null, + "Counts": { + "Documents": 425189, + "Revisions": 429605, + "Attachments": 17, + "UniqueAttachments": 16, + "Alerts": 2, + "Rehabs": 3, + "PerformanceHints": 5, + "ReplicationFactor": 2 + }, + "Statistics": { + "DocPutsPerSec": 23.4, + "MapIndexIndexesPerSec": 82.5, + "MapReduceIndexMappedPerSec": 50.3, + "MapReduceIndexReducedPerSec": 85.2, + "RequestsPerSec": 22.5, + "RequestsCount": 809, + "RequestAverageDurationInMs": 0.55 + }, + "Indexes": { + "Count": 7, + "StaleCount": 1, + "ErrorsCount": 2, + "StaticCount": 7, + "AutoCount": 3, + "IdleCount": 4, + "DisabledCount": 5, + "ErroredCount": 6 + }, + "Storage": { + "DocumentsAllocatedDataFileInMb": 1024, + "DocumentsUsedDataFileInMb": 942, + "IndexesAllocatedDataFileInMb": 464, + "IndexesUsedDataFileInMb": 278, + "TotalAllocatedStorageFileInMb": 1496, + "TotalFreeSpaceInMb": 52074 + } + } + ] +} diff --git a/plugins/inputs/ravendb/testdata/indexes_full.json b/plugins/inputs/ravendb/testdata/indexes_full.json new file mode 100644 index 0000000000000..d67ded7d18800 --- /dev/null +++ b/plugins/inputs/ravendb/testdata/indexes_full.json @@ -0,0 +1,25 @@ +{ + "PublicServerUrl": "http://localhost:8080", + "NodeTag": "A", + "Results": [ + { + "DatabaseName": "db1", + "Indexes": [ + { + "IndexName": "Product/Rating", + "Priority": "Normal", + "State": "Normal", + "Errors": 0, + "TimeSinceLastQueryInSec": 3.4712567, + "TimeSinceLastIndexingInSec": 3.4642612, + "LockMode": "Unlock", + "IsInvalid": true, + "Status": "Running", + "MappedPerSec": 102.34, + "ReducedPerSec": 593.23, + "Type": "MapReduce" + } + ] + } + ] +} diff --git a/plugins/inputs/ravendb/testdata/indexes_min.json b/plugins/inputs/ravendb/testdata/indexes_min.json new file mode 100644 index 0000000000000..493bda8b7e799 --- /dev/null +++ b/plugins/inputs/ravendb/testdata/indexes_min.json @@ -0,0 +1,25 @@ +{ + "PublicServerUrl": null, + "NodeTag": "A", + "Results": [ + { + "DatabaseName": "db1", + "Indexes": [ + { + "IndexName": "Product/Rating", + "Priority": "Normal", + "State": "Normal", + "Errors": 0, + "TimeSinceLastQueryInSec": null, + "TimeSinceLastIndexingInSec": null, + "LockMode": "Unlock", + "IsInvalid": false, + "Status": "Running", + "MappedPerSec": 102.34, + "ReducedPerSec": 593.23, + "Type": "MapReduce" + } + ] + } + ] +} diff --git a/plugins/inputs/ravendb/testdata/server_full.json b/plugins/inputs/ravendb/testdata/server_full.json new file mode 100644 index 0000000000000..edfbbbf7940dc --- /dev/null +++ b/plugins/inputs/ravendb/testdata/server_full.json @@ -0,0 +1,73 @@ +{ + "ServerVersion": "5.1", + "ServerFullVersion": "5.1.1-custom-51", + "UpTimeInSec": 30, + "ServerProcessId": 26360, + "Config": { + "ServerUrls": [ + "http://127.0.0.1:8080", + "http://192.168.0.1:8080" + ], + "PublicServerUrl": "http://raven1:8080", + "TcpServerUrls": ["tcp://127.0.0.1:3888", "tcp://192.168.0.1:3888"], + "PublicTcpServerUrls": ["tcp://2.3.4.5:3888", "tcp://6.7.8.9:3888"] + }, + "Backup": { + "CurrentNumberOfRunningBackups": 2, + "MaxNumberOfConcurrentBackups": 4 + }, + "Cpu": { + "ProcessUsage": 6.28, + "MachineUsage": 41.05, + "MachineIoWait": 2.55, + "ProcessorCount": 8, + "AssignedProcessorCount": 7, + "ThreadPoolAvailableWorkerThreads": 32766, + "ThreadPoolAvailableCompletionPortThreads": 1000 + }, + "Memory": { + "AllocatedMemoryInMb": 235, + "PhysicalMemoryInMb": 16250, + "InstalledMemoryInMb": 16384, + "LowMemorySeverity": "None", + "TotalSwapSizeInMb": 1024, + "TotalSwapUsageInMb": 456, + "WorkingSetSwapUsageInMb": 89, + "TotalDirtyInMb": 1 + }, + "Disk": { + "SystemStoreUsedDataFileSizeInMb": 28, + "SystemStoreTotalDataFileSizeInMb": 32, + "TotalFreeSpaceInMb": 52078, + "RemainingStorageSpacePercentage": 22 + }, + "License": { + "Type": "Enterprise", + "ExpirationLeftInSec": 25466947.5, + "UtilizedCpuCores": 8, + "MaxCores": 256 + }, + "Network": { + "TcpActiveConnections": 84, + "ConcurrentRequestsCount": 1, + "TotalRequests": 3, + "RequestsPerSec": 0.03322, + "LastRequestTimeInSec": 0.0264977, + "LastAuthorizedNonClusterAdminRequestTimeInSec": 0.04 + }, + "Certificate": { + "ServerCertificateExpirationLeftInSec": 104, + "WellKnownAdminCertificates": ["a909502dd82ae41433e6f83886b00d4277a32a7b", "4444444444444444444444444444444444444444"] + }, + "Cluster": { + "NodeTag": "A", + "NodeState": "Leader", + "CurrentTerm": 28, + "Index": 104, + "Id": "6b535a18-558f-4e53-a479-a514efc16aab" + }, + "Databases": { + "TotalCount": 25, + "LoadedCount": 2 + } +} diff --git a/plugins/inputs/ravendb/testdata/server_min.json b/plugins/inputs/ravendb/testdata/server_min.json new file mode 100644 index 0000000000000..e22bd03d4460d --- /dev/null +++ b/plugins/inputs/ravendb/testdata/server_min.json @@ -0,0 +1,72 @@ +{ + "ServerVersion": "5.1", + "ServerFullVersion": "5.1.1-custom-51", + "UpTimeInSec": 30, + "ServerProcessId": 26360, + "Config": { + "ServerUrls": [ + "http://127.0.0.1:8080" + ], + "PublicServerUrl": null, + "TcpServerUrls": null, + "PublicTcpServerUrls": null + }, + "Backup": { + "CurrentNumberOfRunningBackups": 2, + "MaxNumberOfConcurrentBackups": 4 + }, + "Cpu": { + "ProcessUsage": 6.28, + "MachineUsage": 41.07, + "MachineIoWait": null, + "ProcessorCount": 8, + "AssignedProcessorCount": 7, + "ThreadPoolAvailableWorkerThreads": 32766, + "ThreadPoolAvailableCompletionPortThreads": 1000 + }, + "Memory": { + "AllocatedMemoryInMb": 235, + "PhysicalMemoryInMb": 16250, + "InstalledMemoryInMb": 16384, + "LowMemorySeverity": "Low", + "TotalSwapSizeInMb": 1024, + "TotalSwapUsageInMb": 456, + "WorkingSetSwapUsageInMb": 89, + "TotalDirtyInMb": 1 + }, + "Disk": { + "SystemStoreUsedDataFileSizeInMb": 28, + "SystemStoreTotalDataFileSizeInMb": 32, + "TotalFreeSpaceInMb": 52078, + "RemainingStorageSpacePercentage": 22 + }, + "License": { + "Type": "Enterprise", + "ExpirationLeftInSec": null, + "UtilizedCpuCores": 8, + "MaxCores": 256 + }, + "Network": { + "TcpActiveConnections": 84, + "ConcurrentRequestsCount": 1, + "TotalRequests": 3, + "RequestsPerSec": 0.03322, + "LastRequestTimeInSec": null, + "LastAuthorizedNonClusterAdminRequestTimeInSec": null + }, + "Certificate": { + "ServerCertificateExpirationLeftInSec": null, + "WellKnownAdminCertificates": null + }, + "Cluster": { + "NodeTag": "A", + "NodeState": "Leader", + "CurrentTerm": 28, + "Index": 104, + "Id": "6b535a18-558f-4e53-a479-a514efc16aab" + }, + "Databases": { + "TotalCount": 25, + "LoadedCount": 2 + } +} diff --git a/plugins/inputs/redfish/README.md b/plugins/inputs/redfish/README.md index a22b9d3141741..e42ffecbfb07b 100644 --- a/plugins/inputs/redfish/README.md +++ b/plugins/inputs/redfish/README.md @@ -1,12 +1,16 @@ # Redfish Input Plugin -The `redfish` plugin gathers metrics and status information about CPU temperature, fanspeed, Powersupply, voltage, hostname and Location details (datacenter, placement, rack and room) of hardware servers for which [DMTF's Redfish](https://redfish.dmtf.org/) is enabled. +The `redfish` plugin gathers metrics and status information about CPU +temperature, fanspeed, Powersupply, voltage, hostname and Location details +(datacenter, placement, rack and room) of hardware servers for which [DMTF's +Redfish](https://redfish.dmtf.org/) is enabled. Telegraf minimum version: Telegraf 1.15.0 -### Configuration +## Configuration -```toml +```toml @sample.conf +# Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs [[inputs.redfish]] ## Redfish API Base URL. address = "https://127.0.0.1:5000" @@ -29,11 +33,12 @@ Telegraf minimum version: Telegraf 1.15.0 # insecure_skip_verify = false ``` -### Metrics +## Metrics - redfish_thermal_temperatures - tags: - source + - member_id - address - name - datacenter (available only if location data is found) @@ -49,10 +54,10 @@ Telegraf minimum version: Telegraf 1.15.0 - lower_threshold_critical - lower_threshold_fatal - -+ redfish_thermal_fans +- redfish_thermal_fans - tags: - source + - member_id - address - name - datacenter (available only if location data is found) @@ -68,11 +73,11 @@ Telegraf minimum version: Telegraf 1.15.0 - lower_threshold_critical - lower_threshold_fatal - - redfish_power_powersupplies - tags: - source - address + - member_id - name - datacenter (available only if location data is found) - rack (available only if location data is found) @@ -87,11 +92,11 @@ Telegraf minimum version: Telegraf 1.15.0 - power_input_watts - power_output_watts - - redfish_power_voltages (available only if voltage data is found) - tags: - source - address + - member_id - name - datacenter (available only if location data is found) - rack (available only if location data is found) @@ -106,22 +111,21 @@ Telegraf minimum version: Telegraf 1.15.0 - lower_threshold_critical - lower_threshold_fatal - -### Example Output - -``` -redfish_thermal_temperatures,source=test-hostname,name=CPU1,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=41,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_thermal_temperatures,source=test-hostname,name=CPU2,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=51,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_thermal_temperatures,source=test-hostname,name=SystemBoardInlet,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=23,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_thermal_temperatures,source=test-hostname,name=SystemBoardExhaust,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=33,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_thermal_fans,source=test-hostname,name=SystemBoardFan1A,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_rpm=17720,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_thermal_fans,source=test-hostname,name=SystemBoardFan1B,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_rpm=17760,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_thermal_fans,source=test-hostname,name=SystemBoardFan2A,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_rpm=17880,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_power_powersupplies,source=test-hostname,name=PS1Status,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" power_capacity_watts=750,power_input_watts=900,power_output_watts=208,last_power_output_watts=98,line_input_reading_volts=204 1582114112000000000 -redfish_power_powersupplies,source=test-hostname,name=PS2Status,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" power_capacity_watts=750,power_input_watts=900,power_output_watts=194,last_power_output_watts=98,line_input_reading_volts=204 1582114112000000000 -redfish_power_voltages,source=test-hostname,name=CPU1MEM345,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=1,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_power_voltages,source=test-hostname,name=CPU1MEM345,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=1,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_power_voltages,source=test-hostname,name=CPU1MEM347,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=1,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 -redfish_power_voltages,source=test-hostname,name=PS1voltage1,address=http://190.0.0.1,datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=208,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +## Example Output + +```text +redfish_thermal_temperatures,source=test-hostname,name=CPU1,address=http://190.0.0.1,member_id="0"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=41,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_thermal_temperatures,source=test-hostname,name=CPU2,address=http://190.0.0.1,member_id="1"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=51,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_thermal_temperatures,source=test-hostname,name=SystemBoardInlet,address=http://190.0.0.1,member_id="2"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=23,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_thermal_temperatures,source=test-hostname,name=SystemBoardExhaust,address=http://190.0.0.1,member_id="3"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_celsius=33,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_thermal_fans,source=test-hostname,name=SystemBoardFan1A,address=http://190.0.0.1,member_id="0"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_rpm=17720,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_thermal_fans,source=test-hostname,name=SystemBoardFan1B,address=http://190.0.0.1,member_id="1"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_rpm=17760,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_thermal_fans,source=test-hostname,name=SystemBoardFan2A,address=http://190.0.0.1,member_id="2"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_rpm=17880,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_power_powersupplies,source=test-hostname,name=PS1Status,address=http://190.0.0.1,member_id="0"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" power_capacity_watts=750,power_input_watts=900,power_output_watts=208,last_power_output_watts=98,line_input_reading_volts=204 1582114112000000000 +redfish_power_powersupplies,source=test-hostname,name=PS2Status,address=http://190.0.0.1,member_id="1",datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" power_capacity_watts=750,power_input_watts=900,power_output_watts=194,last_power_output_watts=98,line_input_reading_volts=204 1582114112000000000 +redfish_power_voltages,source=test-hostname,name=CPU1MEM345,address=http://190.0.0.1,member_id="0"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=1,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_power_voltages,source=test-hostname,name=CPU1MEM345,address=http://190.0.0.1,member_id="1"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=1,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_power_voltages,source=test-hostname,name=CPU1MEM347,address=http://190.0.0.1,member_id="2"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=1,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 +redfish_power_voltages,source=test-hostname,name=PS1voltage1,address=http://190.0.0.1,member_id="12"datacenter="Tampa",health="OK",rack="12",room="tbc",row="3",state="Enabled" reading_volts=208,upper_threshold_critical=59,upper_threshold_fatal=64 1582114112000000000 ``` diff --git a/plugins/inputs/redfish/redfish.go b/plugins/inputs/redfish/redfish.go index 54d1d15b8c097..55dea5e17de34 100644 --- a/plugins/inputs/redfish/redfish.go +++ b/plugins/inputs/redfish/redfish.go @@ -1,9 +1,11 @@ +//go:generate ../../../tools/readme_config_includer/generator package redfish import ( + _ "embed" "encoding/json" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -16,34 +18,15 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -const description = "Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs" -const sampleConfig = ` - ## Server url - address = "https://127.0.0.1:5000" - - ## Username, Password for hardware server - username = "root" - password = "password123456" - - ## ComputerSystemId - computer_system_id="2M220100SL" - - ## Amount of time allowed to complete the HTTP request - # timeout = "5s" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -` +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string type Redfish struct { Address string `toml:"address"` Username string `toml:"username"` Password string `toml:"password"` - ComputerSystemId string `toml:"computer_system_id"` + ComputerSystemID string `toml:"computer_system_id"` Timeout config.Duration `toml:"timeout"` client http.Client @@ -73,6 +56,7 @@ type Chassis struct { type Power struct { PowerSupplies []struct { Name string + MemberID string PowerInputWatts *float64 PowerCapacityWatts *float64 PowerOutputWatts *float64 @@ -82,6 +66,7 @@ type Power struct { } Voltages []struct { Name string + MemberID string ReadingVolts *float64 UpperThresholdCritical *float64 UpperThresholdFatal *float64 @@ -94,6 +79,7 @@ type Power struct { type Thermal struct { Fans []struct { Name string + MemberID string Reading *int64 ReadingUnits *string UpperThresholdCritical *int64 @@ -104,6 +90,7 @@ type Thermal struct { } Temperatures []struct { Name string + MemberID string ReadingCelsius *float64 UpperThresholdCritical *float64 UpperThresholdFatal *float64 @@ -129,11 +116,7 @@ type Status struct { Health string } -func (r *Redfish) Description() string { - return description -} - -func (r *Redfish) SampleConfig() string { +func (*Redfish) SampleConfig() string { return sampleConfig } @@ -146,7 +129,7 @@ func (r *Redfish) Init() error { return fmt.Errorf("did not provide username and password") } - if r.ComputerSystemId == "" { + if r.ComputerSystemID == "" { return fmt.Errorf("did not provide the computer system ID of the resource") } @@ -172,8 +155,8 @@ func (r *Redfish) Init() error { return nil } -func (r *Redfish) getData(url string, payload interface{}) error { - req, err := http.NewRequest("GET", url, nil) +func (r *Redfish) getData(address string, payload interface{}) error { + req, err := http.NewRequest("GET", address, nil) if err != nil { return err } @@ -181,6 +164,7 @@ func (r *Redfish) getData(url string, payload interface{}) error { req.SetBasicAuth(r.Username, r.Password) req.Header.Set("Accept", "application/json") req.Header.Set("Content-Type", "application/json") + req.Header.Set("OData-Version", "4.0") resp, err := r.client.Do(req) if err != nil { return err @@ -188,12 +172,13 @@ func (r *Redfish) getData(url string, payload interface{}) error { defer resp.Body.Close() if resp.StatusCode != 200 { - return fmt.Errorf("received status code %d (%s), expected 200", + return fmt.Errorf("received status code %d (%s) for address %s, expected 200", resp.StatusCode, - http.StatusText(resp.StatusCode)) + http.StatusText(resp.StatusCode), + r.Address) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return err } @@ -252,7 +237,7 @@ func (r *Redfish) Gather(acc telegraf.Accumulator) error { address = r.baseURL.Host } - system, err := r.getComputerSystem(r.ComputerSystemId) + system, err := r.getComputerSystem(r.ComputerSystemID) if err != nil { return err } @@ -270,6 +255,7 @@ func (r *Redfish) Gather(acc telegraf.Accumulator) error { for _, j := range thermal.Temperatures { tags := map[string]string{} + tags["member_id"] = j.MemberID tags["address"] = address tags["name"] = j.Name tags["source"] = system.Hostname @@ -294,6 +280,7 @@ func (r *Redfish) Gather(acc telegraf.Accumulator) error { for _, j := range thermal.Fans { tags := map[string]string{} fields := make(map[string]interface{}) + tags["member_id"] = j.MemberID tags["address"] = address tags["name"] = j.Name tags["source"] = system.Hostname @@ -325,6 +312,7 @@ func (r *Redfish) Gather(acc telegraf.Accumulator) error { for _, j := range power.PowerSupplies { tags := map[string]string{} + tags["member_id"] = j.MemberID tags["address"] = address tags["name"] = j.Name tags["source"] = system.Hostname @@ -348,6 +336,7 @@ func (r *Redfish) Gather(acc telegraf.Accumulator) error { for _, j := range power.Voltages { tags := map[string]string{} + tags["member_id"] = j.MemberID tags["address"] = address tags["name"] = j.Name tags["source"] = system.Hostname diff --git a/plugins/inputs/redfish/redfish_test.go b/plugins/inputs/redfish/redfish_test.go index 8821b3d97557f..04a102014490f 100644 --- a/plugins/inputs/redfish/redfish_test.go +++ b/plugins/inputs/redfish/redfish_test.go @@ -8,15 +8,14 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestDellApis(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "test", "test") { http.Error(w, "Unauthorized.", 401) return @@ -43,11 +42,12 @@ func TestDellApis(t *testing.T) { address, _, err := net.SplitHostPort(u.Host) require.NoError(t, err) - expected_metrics := []telegraf.Metric{ + expectedMetrics := []telegraf.Metric{ testutil.MustMetric( "redfish_thermal_temperatures", map[string]string{ "name": "CPU1 Temp", + "member_id": "iDRAC.Embedded.1#CPU1Temp", "source": "tpa-hostname", "address": address, "datacenter": "", @@ -71,6 +71,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan1A", + "member_id": "0x17||Fan.Embedded.1A", "address": address, "datacenter": "", "health": "OK", @@ -91,6 +92,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan1B", + "member_id": "0x17||Fan.Embedded.1B", "address": address, "datacenter": "", "health": "OK", @@ -111,6 +113,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan2A", + "member_id": "0x17||Fan.Embedded.2A", "address": address, "datacenter": "", "health": "OK", @@ -131,6 +134,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan2B", + "member_id": "0x17||Fan.Embedded.2B", "address": address, "datacenter": "", "health": "OK", @@ -151,6 +155,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan3A", + "member_id": "0x17||Fan.Embedded.3A", "address": address, "datacenter": "", "health": "OK", @@ -171,6 +176,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan3B", + "member_id": "0x17||Fan.Embedded.3B", "address": address, "datacenter": "", "health": "OK", @@ -191,6 +197,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan4A", + "member_id": "0x17||Fan.Embedded.4A", "address": address, "datacenter": "", "health": "OK", @@ -211,6 +218,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan4B", + "member_id": "0x17||Fan.Embedded.4B", "address": address, "datacenter": "", "health": "OK", @@ -231,6 +239,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan5A", + "member_id": "0x17||Fan.Embedded.5A", "address": address, "datacenter": "", "health": "OK", @@ -251,6 +260,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan5B", + "member_id": "0x17||Fan.Embedded.5B", "address": address, "datacenter": "", "health": "OK", @@ -271,6 +281,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan6A", + "member_id": "0x17||Fan.Embedded.6A", "address": address, "datacenter": "", "health": "OK", @@ -291,6 +302,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan6B", + "member_id": "0x17||Fan.Embedded.6B", "address": address, "datacenter": "", "health": "OK", @@ -311,6 +323,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan7A", + "member_id": "0x17||Fan.Embedded.7A", "address": address, "datacenter": "", "health": "OK", @@ -331,6 +344,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan7B", + "member_id": "0x17||Fan.Embedded.7B", "address": address, "datacenter": "", "health": "OK", @@ -351,6 +365,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan8A", + "member_id": "0x17||Fan.Embedded.8A", "address": address, "datacenter": "", "health": "OK", @@ -371,6 +386,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board Fan8B", + "member_id": "0x17||Fan.Embedded.8B", "address": address, "datacenter": "", "health": "OK", @@ -391,6 +407,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "PS1 Status", + "member_id": "PSU.Slot.1", "address": address, "datacenter": "", "health": "OK", @@ -412,6 +429,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board DIMM PG", + "member_id": "iDRAC.Embedded.1#SystemBoardDIMMPG", "address": address, "datacenter": "", "health": "OK", @@ -430,6 +448,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board NDC PG", + "member_id": "iDRAC.Embedded.1#SystemBoardNDCPG", "address": address, "datacenter": "", "health": "OK", @@ -449,6 +468,7 @@ func TestDellApis(t *testing.T) { map[string]string{ "source": "tpa-hostname", "name": "System Board PS1 PG FAIL", + "member_id": "iDRAC.Embedded.1#SystemBoardPS1PGFAIL", "address": address, "datacenter": "", "health": "OK", @@ -467,22 +487,20 @@ func TestDellApis(t *testing.T) { Address: ts.URL, Username: "test", Password: "test", - ComputerSystemId: "System.Embedded.1", + ComputerSystemID: "System.Embedded.1", } - plugin.Init() + require.NoError(t, plugin.Init()) var acc testutil.Accumulator err = plugin.Gather(&acc) require.NoError(t, err) require.True(t, acc.HasMeasurement("redfish_thermal_temperatures")) - testutil.RequireMetricsEqual(t, expected_metrics, acc.GetTelegrafMetrics(), + testutil.RequireMetricsEqual(t, expectedMetrics, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) } func TestHPApis(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "test", "test") { http.Error(w, "Unauthorized.", 401) return @@ -509,15 +527,16 @@ func TestHPApis(t *testing.T) { address, _, err := net.SplitHostPort(u.Host) require.NoError(t, err) - expected_metrics_hp := []telegraf.Metric{ + expectedMetricsHp := []telegraf.Metric{ testutil.MustMetric( "redfish_thermal_temperatures", map[string]string{ - "name": "01-Inlet Ambient", - "source": "tpa-hostname", - "address": address, - "health": "OK", - "state": "Enabled", + "name": "01-Inlet Ambient", + "member_id": "0", + "source": "tpa-hostname", + "address": address, + "health": "OK", + "state": "Enabled", }, map[string]interface{}{ "reading_celsius": 19.0, @@ -529,11 +548,12 @@ func TestHPApis(t *testing.T) { testutil.MustMetric( "redfish_thermal_temperatures", map[string]string{ - "name": "44-P/S 2 Zone", - "source": "tpa-hostname", - "address": address, - "health": "OK", - "state": "Enabled", + "name": "44-P/S 2 Zone", + "source": "tpa-hostname", + "member_id": "42", + "address": address, + "health": "OK", + "state": "Enabled", }, map[string]interface{}{ "reading_celsius": 34.0, @@ -545,11 +565,12 @@ func TestHPApis(t *testing.T) { testutil.MustMetric( "redfish_thermal_fans", map[string]string{ - "source": "tpa-hostname", - "name": "Fan 1", - "address": address, - "health": "OK", - "state": "Enabled", + "source": "tpa-hostname", + "name": "Fan 1", + "member_id": "0", + "address": address, + "health": "OK", + "state": "Enabled", }, map[string]interface{}{ "reading_percent": 23, @@ -559,11 +580,12 @@ func TestHPApis(t *testing.T) { testutil.MustMetric( "redfish_thermal_fans", map[string]string{ - "source": "tpa-hostname", - "name": "Fan 2", - "address": address, - "health": "OK", - "state": "Enabled", + "source": "tpa-hostname", + "name": "Fan 2", + "member_id": "1", + "address": address, + "health": "OK", + "state": "Enabled", }, map[string]interface{}{ "reading_percent": 23, @@ -573,11 +595,12 @@ func TestHPApis(t *testing.T) { testutil.MustMetric( "redfish_thermal_fans", map[string]string{ - "source": "tpa-hostname", - "name": "Fan 3", - "address": address, - "health": "OK", - "state": "Enabled", + "source": "tpa-hostname", + "name": "Fan 3", + "member_id": "2", + "address": address, + "health": "OK", + "state": "Enabled", }, map[string]interface{}{ "reading_percent": 23, @@ -587,11 +610,12 @@ func TestHPApis(t *testing.T) { testutil.MustMetric( "redfish_power_powersupplies", map[string]string{ - "source": "tpa-hostname", - "name": "HpeServerPowerSupply", - "address": address, - "health": "OK", - "state": "Enabled", + "source": "tpa-hostname", + "name": "HpeServerPowerSupply", + "member_id": "0", + "address": address, + "health": "OK", + "state": "Enabled", }, map[string]interface{}{ "power_capacity_watts": 800.0, @@ -603,11 +627,12 @@ func TestHPApis(t *testing.T) { testutil.MustMetric( "redfish_power_powersupplies", map[string]string{ - "source": "tpa-hostname", - "name": "HpeServerPowerSupply", - "address": address, - "health": "OK", - "state": "Enabled", + "source": "tpa-hostname", + "name": "HpeServerPowerSupply", + "member_id": "1", + "address": address, + "health": "OK", + "state": "Enabled", }, map[string]interface{}{ "power_capacity_watts": 800.0, @@ -618,19 +643,19 @@ func TestHPApis(t *testing.T) { ), } - hp_plugin := &Redfish{ + hpPlugin := &Redfish{ Address: ts.URL, Username: "test", Password: "test", - ComputerSystemId: "1", + ComputerSystemID: "1", } - hp_plugin.Init() - var hp_acc testutil.Accumulator + require.NoError(t, hpPlugin.Init()) + var hpAcc testutil.Accumulator - err = hp_plugin.Gather(&hp_acc) + err = hpPlugin.Gather(&hpAcc) require.NoError(t, err) - require.True(t, hp_acc.HasMeasurement("redfish_thermal_temperatures")) - testutil.RequireMetricsEqual(t, expected_metrics_hp, hp_acc.GetTelegrafMetrics(), + require.True(t, hpAcc.HasMeasurement("redfish_thermal_temperatures")) + testutil.RequireMetricsEqual(t, expectedMetricsHp, hpAcc.GetTelegrafMetrics(), testutil.IgnoreTime()) } @@ -642,26 +667,8 @@ func checkAuth(r *http.Request, username, password string) bool { return user == username && pass == password } -func TestConnection(t *testing.T) { - - r := &Redfish{ - Address: "http://127.0.0.1", - Username: "test", - Password: "test", - ComputerSystemId: "System.Embedded.1", - } - - var acc testutil.Accumulator - r.Init() - err := r.Gather(&acc) - require.Error(t, err) - require.Contains(t, err.Error(), "connect: connection refused") -} - func TestInvalidUsernameorPassword(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "testing", "testing") { http.Error(w, "Unauthorized.", 401) return @@ -680,19 +687,18 @@ func TestInvalidUsernameorPassword(t *testing.T) { Address: ts.URL, Username: "test", Password: "test", - ComputerSystemId: "System.Embedded.1", + ComputerSystemID: "System.Embedded.1", } var acc testutil.Accumulator - r.Init() - err := r.Gather(&acc) - require.Error(t, err) - require.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") + require.NoError(t, r.Init()) + u, err := url.Parse(ts.URL) + require.NoError(t, err) + err = r.Gather(&acc) + require.EqualError(t, err, "received status code 401 (Unauthorized) for address http://"+u.Host+", expected 200") } func TestNoUsernameorPasswordConfiguration(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "testing", "testing") { http.Error(w, "Unauthorized.", 401) return @@ -709,7 +715,7 @@ func TestNoUsernameorPasswordConfiguration(t *testing.T) { r := &Redfish{ Address: ts.URL, - ComputerSystemId: "System.Embedded.1", + ComputerSystemID: "System.Embedded.1", } err := r.Init() @@ -718,7 +724,6 @@ func TestNoUsernameorPasswordConfiguration(t *testing.T) { } func TestInvalidDellJSON(t *testing.T) { - tests := []struct { name string thermalfilename string @@ -756,46 +761,46 @@ func TestInvalidDellJSON(t *testing.T) { }, } for _, tt := range tests { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Run(tt.name, func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !checkAuth(r, "test", "test") { + http.Error(w, "Unauthorized.", 401) + return + } - if !checkAuth(r, "test", "test") { - http.Error(w, "Unauthorized.", 401) - return - } + switch r.URL.Path { + case "/redfish/v1/Chassis/System.Embedded.1/Thermal": + http.ServeFile(w, r, tt.thermalfilename) + case "/redfish/v1/Chassis/System.Embedded.1/Power": + http.ServeFile(w, r, tt.powerfilename) + case "/redfish/v1/Chassis/System.Embedded.1": + http.ServeFile(w, r, tt.chassisfilename) + case "/redfish/v1/Systems/System.Embedded.1": + http.ServeFile(w, r, tt.hostnamefilename) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer ts.Close() - switch r.URL.Path { - case "/redfish/v1/Chassis/System.Embedded.1/Thermal": - http.ServeFile(w, r, tt.thermalfilename) - case "/redfish/v1/Chassis/System.Embedded.1/Power": - http.ServeFile(w, r, tt.powerfilename) - case "/redfish/v1/Chassis/System.Embedded.1": - http.ServeFile(w, r, tt.chassisfilename) - case "/redfish/v1/Systems/System.Embedded.1": - http.ServeFile(w, r, tt.hostnamefilename) - default: - w.WriteHeader(http.StatusNotFound) + plugin := &Redfish{ + Address: ts.URL, + Username: "test", + Password: "test", + ComputerSystemID: "System.Embedded.1", } - })) - defer ts.Close() - - plugin := &Redfish{ - Address: ts.URL, - Username: "test", - Password: "test", - ComputerSystemId: "System.Embedded.1", - } - plugin.Init() + require.NoError(t, plugin.Init()) - var acc testutil.Accumulator - err := plugin.Gather(&acc) - require.Error(t, err) - require.Contains(t, err.Error(), "error parsing input:") + var acc testutil.Accumulator + err := plugin.Gather(&acc) + require.Error(t, err) + require.Contains(t, err.Error(), "error parsing input:") + }) } } func TestInvalidHPJSON(t *testing.T) { - tests := []struct { name string thermalfilename string @@ -828,7 +833,6 @@ func TestInvalidHPJSON(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !checkAuth(r, "test", "test") { http.Error(w, "Unauthorized.", 401) return @@ -853,10 +857,10 @@ func TestInvalidHPJSON(t *testing.T) { Address: ts.URL, Username: "test", Password: "test", - ComputerSystemId: "System.Embedded.2", + ComputerSystemID: "System.Embedded.2", } - plugin.Init() + require.NoError(t, plugin.Init()) var acc testutil.Accumulator err := plugin.Gather(&acc) diff --git a/plugins/inputs/redfish/sample.conf b/plugins/inputs/redfish/sample.conf new file mode 100644 index 0000000000000..1cced5c085cde --- /dev/null +++ b/plugins/inputs/redfish/sample.conf @@ -0,0 +1,21 @@ +# Read CPU, Fans, Powersupply and Voltage metrics of hardware server through redfish APIs +[[inputs.redfish]] + ## Redfish API Base URL. + address = "https://127.0.0.1:5000" + + ## Credentials for the Redfish API. + username = "root" + password = "password123456" + + ## System Id to collect data for in Redfish APIs. + computer_system_id="System.Embedded.1" + + ## Amount of time allowed to complete the HTTP request + # timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false diff --git a/plugins/inputs/redis/README.md b/plugins/inputs/redis/README.md index c8f343b262aca..2512499c046be 100644 --- a/plugins/inputs/redis/README.md +++ b/plugins/inputs/redis/README.md @@ -1,28 +1,39 @@ # Redis Input Plugin -### Configuration: +The Redis input plugin gathers metrics from one or many Redis servers. -```toml -# Read Redis's basic status information +## Configuration + +```toml @sample.conf +# Read metrics from one or many redis servers [[inputs.redis]] ## specify servers via a url matching: ## [protocol://][:password]@address[:port] ## e.g. ## tcp://localhost:6379 ## tcp://:password@192.168.99.100 + ## unix:///var/run/redis.sock ## ## If no servers are specified, then localhost is used as the host. ## If no port is specified, 6379 is used servers = ["tcp://localhost:6379"] + ## Optional. Specify redis commands to retrieve values # [[inputs.redis.commands]] - # command = ["get", "sample-key"] - # field = "sample-key-value" - # type = "string" + # # The command to run where each argument is a separate element + # command = ["get", "sample-key"] + # # The field to store the result in + # field = "sample-key-value" + # # The type of the result + # # Can be "string", "integer", or "float" + # type = "string" ## specify server password # password = "s#cr@t%" + ## specify username for ACL auth (Redis 6.0+) + # username = "default" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -31,105 +42,107 @@ # insecure_skip_verify = true ``` -### Measurements & Fields: +## Metrics -The plugin gathers the results of the [INFO](https://redis.io/commands/info) redis command. -There are two separate measurements: _redis_ and _redis\_keyspace_, the latter is used for gathering database related statistics. +The plugin gathers the results of the [INFO](https://redis.io/commands/info) +redis command. There are two separate measurements: _redis_ and +_redis\_keyspace_, the latter is used for gathering database related statistics. -Additionally the plugin also calculates the hit/miss ratio (keyspace\_hitrate) and the elapsed time since the last rdb save (rdb\_last\_save\_time\_elapsed). +Additionally the plugin also calculates the hit/miss ratio (keyspace\_hitrate) +and the elapsed time since the last rdb save (rdb\_last\_save\_time\_elapsed). - redis - - keyspace_hitrate(float, number) - - rdb_last_save_time_elapsed(int, seconds) + - keyspace_hitrate(float, number) + - rdb_last_save_time_elapsed(int, seconds) **Server** - - uptime(int, seconds) - - lru_clock(int, number) - - redis_version(string) + - uptime(int, seconds) + - lru_clock(int, number) + - redis_version(string) **Clients** - - clients(int, number) - - client_longest_output_list(int, number) - - client_biggest_input_buf(int, number) - - blocked_clients(int, number) + - clients(int, number) + - client_longest_output_list(int, number) + - client_biggest_input_buf(int, number) + - blocked_clients(int, number) **Memory** - - used_memory(int, bytes) - - used_memory_rss(int, bytes) - - used_memory_peak(int, bytes) - - total_system_memory(int, bytes) - - used_memory_lua(int, bytes) - - maxmemory(int, bytes) - - maxmemory_policy(string) - - mem_fragmentation_ratio(float, number) - - **Persistance** - - loading(int,flag) - - rdb_changes_since_last_save(int, number) - - rdb_bgsave_in_progress(int, flag) - - rdb_last_save_time(int, seconds) - - rdb_last_bgsave_status(string) - - rdb_last_bgsave_time_sec(int, seconds) - - rdb_current_bgsave_time_sec(int, seconds) - - aof_enabled(int, flag) - - aof_rewrite_in_progress(int, flag) - - aof_rewrite_scheduled(int, flag) - - aof_last_rewrite_time_sec(int, seconds) - - aof_current_rewrite_time_sec(int, seconds) - - aof_last_bgrewrite_status(string) - - aof_last_write_status(string) + - used_memory(int, bytes) + - used_memory_rss(int, bytes) + - used_memory_peak(int, bytes) + - total_system_memory(int, bytes) + - used_memory_lua(int, bytes) + - maxmemory(int, bytes) + - maxmemory_policy(string) + - mem_fragmentation_ratio(float, number) + + **Persistence** + - loading(int,flag) + - rdb_changes_since_last_save(int, number) + - rdb_bgsave_in_progress(int, flag) + - rdb_last_save_time(int, seconds) + - rdb_last_bgsave_status(string) + - rdb_last_bgsave_time_sec(int, seconds) + - rdb_current_bgsave_time_sec(int, seconds) + - aof_enabled(int, flag) + - aof_rewrite_in_progress(int, flag) + - aof_rewrite_scheduled(int, flag) + - aof_last_rewrite_time_sec(int, seconds) + - aof_current_rewrite_time_sec(int, seconds) + - aof_last_bgrewrite_status(string) + - aof_last_write_status(string) **Stats** - - total_connections_received(int, number) - - total_commands_processed(int, number) - - instantaneous_ops_per_sec(int, number) - - total_net_input_bytes(int, bytes) - - total_net_output_bytes(int, bytes) - - instantaneous_input_kbps(float, KB/sec) - - instantaneous_output_kbps(float, KB/sec) - - rejected_connections(int, number) - - sync_full(int, number) - - sync_partial_ok(int, number) - - sync_partial_err(int, number) - - expired_keys(int, number) - - evicted_keys(int, number) - - keyspace_hits(int, number) - - keyspace_misses(int, number) - - pubsub_channels(int, number) - - pubsub_patterns(int, number) - - latest_fork_usec(int, microseconds) - - migrate_cached_sockets(int, number) + - total_connections_received(int, number) + - total_commands_processed(int, number) + - instantaneous_ops_per_sec(int, number) + - total_net_input_bytes(int, bytes) + - total_net_output_bytes(int, bytes) + - instantaneous_input_kbps(float, KB/sec) + - instantaneous_output_kbps(float, KB/sec) + - rejected_connections(int, number) + - sync_full(int, number) + - sync_partial_ok(int, number) + - sync_partial_err(int, number) + - expired_keys(int, number) + - evicted_keys(int, number) + - keyspace_hits(int, number) + - keyspace_misses(int, number) + - pubsub_channels(int, number) + - pubsub_patterns(int, number) + - latest_fork_usec(int, microseconds) + - migrate_cached_sockets(int, number) **Replication** - - connected_slaves(int, number) - - master_link_down_since_seconds(int, number) - - master_link_status(string) - - master_repl_offset(int, number) - - second_repl_offset(int, number) - - repl_backlog_active(int, number) - - repl_backlog_size(int, bytes) - - repl_backlog_first_byte_offset(int, number) - - repl_backlog_histlen(int, bytes) + - connected_slaves(int, number) + - master_link_down_since_seconds(int, number) + - master_link_status(string) + - master_repl_offset(int, number) + - second_repl_offset(int, number) + - repl_backlog_active(int, number) + - repl_backlog_size(int, bytes) + - repl_backlog_first_byte_offset(int, number) + - repl_backlog_histlen(int, bytes) **CPU** - - used_cpu_sys(float, number) - - used_cpu_user(float, number) - - used_cpu_sys_children(float, number) - - used_cpu_user_children(float, number) + - used_cpu_sys(float, number) + - used_cpu_user(float, number) + - used_cpu_sys_children(float, number) + - used_cpu_user_children(float, number) **Cluster** - - cluster_enabled(int, flag) + - cluster_enabled(int, flag) - redis_keyspace - - keys(int, number) - - expires(int, number) - - avg_ttl(int, number) + - keys(int, number) + - expires(int, number) + - avg_ttl(int, number) - redis_cmdstat Every Redis used command will have 3 new fields: - - calls(int, number) - - usec(int, mircoseconds) - - usec_per_call(float, microseconds) + - calls(int, number) + - usec(int, mircoseconds) + - usec_per_call(float, microseconds) - redis_replication - tags: @@ -142,22 +155,23 @@ Additionally the plugin also calculates the hit/miss ratio (keyspace\_hitrate) a - lag(int, number) - offset(int, number) -### Tags: +### Tags - All measurements have the following tags: - - port - - server - - replication_role + - port + - server + - replication_role - The redis_keyspace measurement has an additional database tag: - - database + - database - The redis_cmdstat measurement has an additional tag: - - command + - command -### Example Output: +## Example Output Using this configuration: + ```toml [[inputs.redis]] ## specify servers via a url matching: @@ -172,22 +186,26 @@ Using this configuration: ``` When run with: -``` + +```sh ./telegraf --config telegraf.conf --input-filter redis --test ``` It produces: -``` + +```shell * Plugin: redis, Collection 1 > redis,server=localhost,port=6379,replication_role=master,host=host keyspace_hitrate=1,clients=2i,blocked_clients=0i,instantaneous_input_kbps=0,sync_full=0i,pubsub_channels=0i,pubsub_patterns=0i,total_net_output_bytes=6659253i,used_memory=842448i,total_system_memory=8351916032i,aof_current_rewrite_time_sec=-1i,rdb_changes_since_last_save=0i,sync_partial_err=0i,latest_fork_usec=508i,instantaneous_output_kbps=0,expired_keys=0i,used_memory_peak=843416i,aof_rewrite_in_progress=0i,aof_last_bgrewrite_status="ok",migrate_cached_sockets=0i,connected_slaves=0i,maxmemory_policy="noeviction",aof_rewrite_scheduled=0i,total_net_input_bytes=3125i,used_memory_rss=9564160i,repl_backlog_histlen=0i,rdb_last_bgsave_status="ok",aof_last_rewrite_time_sec=-1i,keyspace_misses=0i,client_biggest_input_buf=5i,used_cpu_user=1.33,maxmemory=0i,rdb_current_bgsave_time_sec=-1i,total_commands_processed=271i,repl_backlog_size=1048576i,used_cpu_sys=3,uptime=2822i,lru_clock=16706281i,used_memory_lua=37888i,rejected_connections=0i,sync_partial_ok=0i,evicted_keys=0i,rdb_last_save_time_elapsed=1922i,rdb_last_save_time=1493099368i,instantaneous_ops_per_sec=0i,used_cpu_user_children=0,client_longest_output_list=0i,master_repl_offset=0i,repl_backlog_active=0i,keyspace_hits=2i,used_cpu_sys_children=0,cluster_enabled=0i,rdb_last_bgsave_time_sec=0i,aof_last_write_status="ok",total_connections_received=263i,aof_enabled=0i,repl_backlog_first_byte_offset=0i,mem_fragmentation_ratio=11.35,loading=0i,rdb_bgsave_in_progress=0i 1493101290000000000 ``` redis_keyspace: -``` + +```shell > redis_keyspace,database=db1,host=host,server=localhost,port=6379,replication_role=master keys=1i,expires=0i,avg_ttl=0i 1493101350000000000 ``` redis_command: -``` + +```shell > redis_cmdstat,command=publish,host=host,port=6379,replication_role=master,server=localhost calls=68113i,usec=325146i,usec_per_call=4.77 1559227136000000000 ``` diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index 3a76a351c05de..70f815f317d67 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -1,22 +1,30 @@ +//go:generate ../../../tools/readme_config_includer/generator package redis import ( "bufio" + "context" + _ "embed" "fmt" "io" "net/url" + "reflect" "regexp" "strconv" "strings" "sync" "time" - "github.com/go-redis/redis" + "github.com/go-redis/redis/v8" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type RedisCommand struct { Command []interface{} Field string @@ -26,19 +34,21 @@ type RedisCommand struct { type Redis struct { Commands []*RedisCommand Servers []string + Username string Password string tls.ClientConfig - Log telegraf.Logger + Log telegraf.Logger `toml:"-"` - clients []Client - initialized bool + clients []Client + connected bool } type Client interface { Do(returnType string, args ...interface{}) (interface{}, error) Info() *redis.StringCmd BaseTags() map[string]string + Close() error } type RedisClient struct { @@ -46,23 +56,134 @@ type RedisClient struct { tags map[string]string } +// RedisFieldTypes defines the types expected for each of the fields redis reports on +type RedisFieldTypes struct { + ActiveDefragHits int64 `json:"active_defrag_hits"` + ActiveDefragKeyHits int64 `json:"active_defrag_key_hits"` + ActiveDefragKeyMisses int64 `json:"active_defrag_key_misses"` + ActiveDefragMisses int64 `json:"active_defrag_misses"` + ActiveDefragRunning int64 `json:"active_defrag_running"` + AllocatorActive int64 `json:"allocator_active"` + AllocatorAllocated int64 `json:"allocator_allocated"` + AllocatorFragBytes float64 `json:"allocator_frag_bytes"` // for historical reasons this was left as float although redis reports it as an int + AllocatorFragRatio float64 `json:"allocator_frag_ratio"` + AllocatorResident int64 `json:"allocator_resident"` + AllocatorRssBytes int64 `json:"allocator_rss_bytes"` + AllocatorRssRatio float64 `json:"allocator_rss_ratio"` + AofCurrentRewriteTimeSec int64 `json:"aof_current_rewrite_time_sec"` + AofEnabled int64 `json:"aof_enabled"` + AofLastBgrewriteStatus string `json:"aof_last_bgrewrite_status"` + AofLastCowSize int64 `json:"aof_last_cow_size"` + AofLastRewriteTimeSec int64 `json:"aof_last_rewrite_time_sec"` + AofLastWriteStatus string `json:"aof_last_write_status"` + AofRewriteInProgress int64 `json:"aof_rewrite_in_progress"` + AofRewriteScheduled int64 `json:"aof_rewrite_scheduled"` + BlockedClients int64 `json:"blocked_clients"` + ClientRecentMaxInputBuffer int64 `json:"client_recent_max_input_buffer"` + ClientRecentMaxOutputBuffer int64 `json:"client_recent_max_output_buffer"` + Clients int64 `json:"clients"` + ClientsInTimeoutTable int64 `json:"clients_in_timeout_table"` + ClusterEnabled int64 `json:"cluster_enabled"` + ConnectedSlaves int64 `json:"connected_slaves"` + EvictedKeys int64 `json:"evicted_keys"` + ExpireCycleCPUMilliseconds int64 `json:"expire_cycle_cpu_milliseconds"` + ExpiredKeys int64 `json:"expired_keys"` + ExpiredStalePerc float64 `json:"expired_stale_perc"` + ExpiredTimeCapReachedCount int64 `json:"expired_time_cap_reached_count"` + InstantaneousInputKbps float64 `json:"instantaneous_input_kbps"` + InstantaneousOpsPerSec int64 `json:"instantaneous_ops_per_sec"` + InstantaneousOutputKbps float64 `json:"instantaneous_output_kbps"` + IoThreadedReadsProcessed int64 `json:"io_threaded_reads_processed"` + IoThreadedWritesProcessed int64 `json:"io_threaded_writes_processed"` + KeyspaceHits int64 `json:"keyspace_hits"` + KeyspaceMisses int64 `json:"keyspace_misses"` + LatestForkUsec int64 `json:"latest_fork_usec"` + LazyfreePendingObjects int64 `json:"lazyfree_pending_objects"` + Loading int64 `json:"loading"` + LruClock int64 `json:"lru_clock"` + MasterReplOffset int64 `json:"master_repl_offset"` + MaxMemory int64 `json:"maxmemory"` + MaxMemoryPolicy string `json:"maxmemory_policy"` + MemAofBuffer int64 `json:"mem_aof_buffer"` + MemClientsNormal int64 `json:"mem_clients_normal"` + MemClientsSlaves int64 `json:"mem_clients_slaves"` + MemFragmentationBytes int64 `json:"mem_fragmentation_bytes"` + MemFragmentationRatio float64 `json:"mem_fragmentation_ratio"` + MemNotCountedForEvict int64 `json:"mem_not_counted_for_evict"` + MemReplicationBacklog int64 `json:"mem_replication_backlog"` + MigrateCachedSockets int64 `json:"migrate_cached_sockets"` + ModuleForkInProgress int64 `json:"module_fork_in_progress"` + ModuleForkLastCowSize int64 `json:"module_fork_last_cow_size"` + NumberOfCachedScripts int64 `json:"number_of_cached_scripts"` + PubsubChannels int64 `json:"pubsub_channels"` + PubsubPatterns int64 `json:"pubsub_patterns"` + RdbBgsaveInProgress int64 `json:"rdb_bgsave_in_progress"` + RdbChangesSinceLastSave int64 `json:"rdb_changes_since_last_save"` + RdbCurrentBgsaveTimeSec int64 `json:"rdb_current_bgsave_time_sec"` + RdbLastBgsaveStatus string `json:"rdb_last_bgsave_status"` + RdbLastBgsaveTimeSec int64 `json:"rdb_last_bgsave_time_sec"` + RdbLastCowSize int64 `json:"rdb_last_cow_size"` + RdbLastSaveTime int64 `json:"rdb_last_save_time"` + RdbLastSaveTimeElapsed int64 `json:"rdb_last_save_time_elapsed"` + RedisVersion string `json:"redis_version"` + RejectedConnections int64 `json:"rejected_connections"` + ReplBacklogActive int64 `json:"repl_backlog_active"` + ReplBacklogFirstByteOffset int64 `json:"repl_backlog_first_byte_offset"` + ReplBacklogHistlen int64 `json:"repl_backlog_histlen"` + ReplBacklogSize int64 `json:"repl_backlog_size"` + RssOverheadBytes int64 `json:"rss_overhead_bytes"` + RssOverheadRatio float64 `json:"rss_overhead_ratio"` + SecondReplOffset int64 `json:"second_repl_offset"` + SlaveExpiresTrackedKeys int64 `json:"slave_expires_tracked_keys"` + SyncFull int64 `json:"sync_full"` + SyncPartialErr int64 `json:"sync_partial_err"` + SyncPartialOk int64 `json:"sync_partial_ok"` + TotalCommandsProcessed int64 `json:"total_commands_processed"` + TotalConnectionsReceived int64 `json:"total_connections_received"` + TotalNetInputBytes int64 `json:"total_net_input_bytes"` + TotalNetOutputBytes int64 `json:"total_net_output_bytes"` + TotalReadsProcessed int64 `json:"total_reads_processed"` + TotalSystemMemory int64 `json:"total_system_memory"` + TotalWritesProcessed int64 `json:"total_writes_processed"` + TrackingClients int64 `json:"tracking_clients"` + TrackingTotalItems int64 `json:"tracking_total_items"` + TrackingTotalKeys int64 `json:"tracking_total_keys"` + TrackingTotalPrefixes int64 `json:"tracking_total_prefixes"` + UnexpectedErrorReplies int64 `json:"unexpected_error_replies"` + Uptime int64 `json:"uptime"` + UsedCPUSys float64 `json:"used_cpu_sys"` + UsedCPUSysChildren float64 `json:"used_cpu_sys_children"` + UsedCPUUser float64 `json:"used_cpu_user"` + UsedCPUUserChildren float64 `json:"used_cpu_user_children"` + UsedMemory int64 `json:"used_memory"` + UsedMemoryDataset int64 `json:"used_memory_dataset"` + UsedMemoryDatasetPerc float64 `json:"used_memory_dataset_perc"` + UsedMemoryLua int64 `json:"used_memory_lua"` + UsedMemoryOverhead int64 `json:"used_memory_overhead"` + UsedMemoryPeak int64 `json:"used_memory_peak"` + UsedMemoryPeakPerc float64 `json:"used_memory_peak_perc"` + UsedMemoryRss int64 `json:"used_memory_rss"` + UsedMemoryScripts int64 `json:"used_memory_scripts"` + UsedMemoryStartup int64 `json:"used_memory_startup"` +} + func (r *RedisClient) Do(returnType string, args ...interface{}) (interface{}, error) { - rawVal := r.client.Do(args...) + rawVal := r.client.Do(context.Background(), args...) switch returnType { case "integer": return rawVal.Int64() case "string": - return rawVal.String() + return rawVal.Text() case "float": return rawVal.Float64() default: - return rawVal.String() + return rawVal.Text() } } func (r *RedisClient) Info() *redis.StringCmd { - return r.client.Info("ALL") + return r.client.Info(context.Background(), "ALL") } func (r *RedisClient) BaseTags() map[string]string { @@ -73,44 +194,11 @@ func (r *RedisClient) BaseTags() map[string]string { return tags } -var replicationSlaveMetricPrefix = regexp.MustCompile(`^slave\d+`) - -var sampleConfig = ` - ## specify servers via a url matching: - ## [protocol://][:password]@address[:port] - ## e.g. - ## tcp://localhost:6379 - ## tcp://:password@192.168.99.100 - ## unix:///var/run/redis.sock - ## - ## If no servers are specified, then localhost is used as the host. - ## If no port is specified, 6379 is used - servers = ["tcp://localhost:6379"] - - ## Optional. Specify redis commands to retrieve values - # [[inputs.redis.commands]] - # command = ["get", "sample-key"] - # field = "sample-key-value" - # type = "string" - - ## specify server password - # password = "s#cr@t%" - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = true -` - -func (r *Redis) SampleConfig() string { - return sampleConfig +func (r *RedisClient) Close() error { + return r.client.Close() } -func (r *Redis) Description() string { - return "Read metrics from one or many redis servers" -} +var replicationSlaveMetricPrefix = regexp.MustCompile(`^slave\d+`) var Tracking = map[string]string{ "uptime_in_seconds": "uptime", @@ -118,8 +206,22 @@ var Tracking = map[string]string{ "role": "replication_role", } -func (r *Redis) init(acc telegraf.Accumulator) error { - if r.initialized { +func (*Redis) SampleConfig() string { + return sampleConfig +} + +func (r *Redis) Init() error { + for _, command := range r.Commands { + if command.Type != "string" && command.Type != "integer" && command.Type != "float" { + return fmt.Errorf(`unknown result type: expected one of "string", "integer", "float"; got %q`, command.Type) + } + } + + return nil +} + +func (r *Redis) connect() error { + if r.connected { return nil } @@ -140,13 +242,18 @@ func (r *Redis) init(acc telegraf.Accumulator) error { return fmt.Errorf("unable to parse to address %q: %s", serv, err.Error()) } + username := "" password := "" if u.User != nil { + username = u.User.Username() pw, ok := u.User.Password() if ok { password = pw } } + if len(r.Username) > 0 { + username = r.Username + } if len(r.Password) > 0 { password = r.Password } @@ -166,6 +273,7 @@ func (r *Redis) init(acc telegraf.Accumulator) error { client := redis.NewClient( &redis.Options{ Addr: address, + Username: username, Password: password, Network: u.Scheme, PoolSize: 1, @@ -187,15 +295,15 @@ func (r *Redis) init(acc telegraf.Accumulator) error { } } - r.initialized = true + r.connected = true return nil } // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). func (r *Redis) Gather(acc telegraf.Accumulator) error { - if !r.initialized { - err := r.init(acc) + if !r.connected { + err := r.connect() if err != nil { return err } @@ -221,6 +329,10 @@ func (r *Redis) gatherCommandValues(client Client, acc telegraf.Accumulator) err for _, command := range r.Commands { val, err := client.Do(command.Type, command.Command...) if err != nil { + if strings.Contains(err.Error(), "unexpected type=") { + return fmt.Errorf("could not get command result: %s", err) + } + return err } @@ -249,7 +361,7 @@ func gatherInfoOutput( tags map[string]string, ) error { var section string - var keyspace_hits, keyspace_misses int64 + var keyspaceHits, keyspaceMisses int64 scanner := bufio.NewScanner(rdr) fields := make(map[string]interface{}) @@ -271,7 +383,7 @@ func gatherInfoOutput( if len(parts) < 2 { continue } - name := string(parts[0]) + name := parts[0] if section == "Server" { if name != "lru_clock" && name != "uptime_in_seconds" && name != "redis_version" { @@ -294,7 +406,7 @@ func gatherInfoOutput( metric, ok := Tracking[name] if !ok { if section == "Keyspace" { - kline := strings.TrimSpace(string(parts[1])) + kline := strings.TrimSpace(parts[1]) gatherKeyspaceLine(name, kline, acc, tags) continue } @@ -321,9 +433,9 @@ func gatherInfoOutput( if ival, err := strconv.ParseInt(val, 10, 64); err == nil { switch name { case "keyspace_hits": - keyspace_hits = ival + keyspaceHits = ival case "keyspace_misses": - keyspace_misses = ival + keyspaceMisses = ival case "rdb_last_save_time": // influxdb can't calculate this, so we have to do it fields["rdb_last_save_time_elapsed"] = time.Now().Unix() - ival @@ -347,11 +459,17 @@ func gatherInfoOutput( fields[metric] = val } - var keyspace_hitrate float64 = 0.0 - if keyspace_hits != 0 || keyspace_misses != 0 { - keyspace_hitrate = float64(keyspace_hits) / float64(keyspace_hits+keyspace_misses) + var keyspaceHitrate float64 + if keyspaceHits != 0 || keyspaceMisses != 0 { + keyspaceHitrate = float64(keyspaceHits) / float64(keyspaceHits+keyspaceMisses) } - fields["keyspace_hitrate"] = keyspace_hitrate + fields["keyspace_hitrate"] = keyspaceHitrate + + o := RedisFieldTypes{} + + setStructFieldsFromObject(fields, &o) + setExistingFieldsFromStruct(fields, &o) + acc.AddFields("redis", fields, tags) return nil } @@ -364,12 +482,12 @@ func gatherKeyspaceLine( name string, line string, acc telegraf.Accumulator, - global_tags map[string]string, + globalTags map[string]string, ) { if strings.Contains(line, "keys=") { fields := make(map[string]interface{}) tags := make(map[string]string) - for k, v := range global_tags { + for k, v := range globalTags { tags[k] = v } tags["database"] = name @@ -393,7 +511,7 @@ func gatherCommandstateLine( name string, line string, acc telegraf.Accumulator, - global_tags map[string]string, + globalTags map[string]string, ) { if !strings.HasPrefix(name, "cmdstat") { return @@ -401,7 +519,7 @@ func gatherCommandstateLine( fields := make(map[string]interface{}) tags := make(map[string]string) - for k, v := range global_tags { + for k, v := range globalTags { tags[k] = v } tags["command"] = strings.TrimPrefix(name, "cmdstat_") @@ -438,11 +556,11 @@ func gatherReplicationLine( name string, line string, acc telegraf.Accumulator, - global_tags map[string]string, + globalTags map[string]string, ) { fields := make(map[string]interface{}) tags := make(map[string]string) - for k, v := range global_tags { + for k, v := range globalTags { tags[k] = v } @@ -479,3 +597,129 @@ func init() { return &Redis{} }) } + +func setExistingFieldsFromStruct(fields map[string]interface{}, o *RedisFieldTypes) { + val := reflect.ValueOf(o).Elem() + typ := val.Type() + + for key := range fields { + if _, exists := fields[key]; exists { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + jsonFieldName := f.Tag.Get("json") + if jsonFieldName == key { + fields[key] = val.Field(i).Interface() + break + } + } + } + } +} + +func setStructFieldsFromObject(fields map[string]interface{}, o *RedisFieldTypes) { + val := reflect.ValueOf(o).Elem() + typ := val.Type() + + for key, value := range fields { + if _, exists := fields[key]; exists { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + jsonFieldName := f.Tag.Get("json") + if jsonFieldName == key { + structFieldValue := val.Field(i) + structFieldValue.Set(coerceType(value, structFieldValue.Type())) + break + } + } + } + } +} + +func coerceType(value interface{}, typ reflect.Type) reflect.Value { + switch sourceType := value.(type) { + case bool: + switch typ.Kind() { + case reflect.String: + if sourceType { + value = "true" + } else { + value = "false" + } + case reflect.Int64: + if sourceType { + value = int64(1) + } else { + value = int64(0) + } + case reflect.Float64: + if sourceType { + value = float64(1) + } else { + value = float64(0) + } + default: + panic(fmt.Sprintf("unhandled destination type %s", typ.Kind().String())) + } + case int, int8, int16, int32, int64: + switch typ.Kind() { + case reflect.String: + value = fmt.Sprintf("%d", value) + case reflect.Int64: + // types match + case reflect.Float64: + value = float64(reflect.ValueOf(sourceType).Int()) + default: + panic(fmt.Sprintf("unhandled destination type %s", typ.Kind().String())) + } + case uint, uint8, uint16, uint32, uint64: + switch typ.Kind() { + case reflect.String: + value = fmt.Sprintf("%d", value) + case reflect.Int64: + // types match + case reflect.Float64: + value = float64(reflect.ValueOf(sourceType).Uint()) + default: + panic(fmt.Sprintf("unhandled destination type %s", typ.Kind().String())) + } + case float32, float64: + switch typ.Kind() { + case reflect.String: + value = fmt.Sprintf("%f", value) + case reflect.Int64: + value = int64(reflect.ValueOf(sourceType).Float()) + case reflect.Float64: + // types match + default: + panic(fmt.Sprintf("unhandled destination type %s", typ.Kind().String())) + } + case string: + switch typ.Kind() { + case reflect.String: + // types match + case reflect.Int64: + value, _ = strconv.ParseInt(value.(string), 10, 64) + case reflect.Float64: + value, _ = strconv.ParseFloat(value.(string), 64) + default: + panic(fmt.Sprintf("unhandled destination type %s", typ.Kind().String())) + } + default: + panic(fmt.Sprintf("unhandled source type %T", sourceType)) + } + return reflect.ValueOf(value) +} + +func (r *Redis) Start(telegraf.Accumulator) error { + return nil +} + +//Stop close the client through ServiceInput interface Start/Stop methods impl. +func (r *Redis) Stop() { + for _, c := range r.clients { + err := c.Close() + if err != nil { + r.Log.Errorf("error closing client: %v", err) + } + } +} diff --git a/plugins/inputs/redis/redis_test.go b/plugins/inputs/redis/redis_test.go index d5aaa7a7bfa38..7606829fc5c73 100644 --- a/plugins/inputs/redis/redis_test.go +++ b/plugins/inputs/redis/redis_test.go @@ -7,10 +7,11 @@ import ( "testing" "time" - "github.com/go-redis/redis" + "github.com/docker/go-connections/nat" + "github.com/go-redis/redis/v8" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go/wait" ) type testClient struct { @@ -24,16 +25,32 @@ func (t *testClient) Info() *redis.StringCmd { return nil } -func (t *testClient) Do(returnType string, args ...interface{}) (interface{}, error) { +func (t *testClient) Do(_ string, _ ...interface{}) (interface{}, error) { return 2, nil } -func TestRedisConnect(t *testing.T) { +func (t *testClient) Close() error { + return nil +} + +func TestRedisConnectIntegration(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } - addr := fmt.Sprintf(testutil.GetLocalHost() + ":6379") + servicePort := "6379" + container := testutil.Container{ + Image: "redis:alpine", + ExposedPorts: []string{servicePort}, + WaitingFor: wait.ForListeningPort(nat.Port(servicePort)), + } + err := container.Start() + require.NoError(t, err, "failed to start container") + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + + addr := fmt.Sprintf("%s:%s", container.Address, container.Ports[servicePort]) r := &Redis{ Log: testutil.Logger{}, @@ -42,7 +59,7 @@ func TestRedisConnect(t *testing.T) { var acc testutil.Accumulator - err := acc.GatherError(r.Gather) + err = acc.GatherError(r.Gather) require.NoError(t, err) } @@ -83,62 +100,115 @@ func TestRedis_ParseMetrics(t *testing.T) { tags = map[string]string{"host": "redis.net", "replication_role": "master"} fields := map[string]interface{}{ - "uptime": int64(238), - "lru_clock": int64(2364819), - "clients": int64(1), - "client_longest_output_list": int64(0), - "client_biggest_input_buf": int64(0), - "blocked_clients": int64(0), - "used_memory": int64(1003936), - "used_memory_rss": int64(811008), - "used_memory_peak": int64(1003936), - "used_memory_lua": int64(33792), - "used_memory_peak_perc": float64(93.58), - "used_memory_dataset_perc": float64(20.27), - "mem_fragmentation_ratio": float64(0.81), - "loading": int64(0), - "rdb_changes_since_last_save": int64(0), - "rdb_bgsave_in_progress": int64(0), - "rdb_last_save_time": int64(1428427941), - "rdb_last_bgsave_status": "ok", - "rdb_last_bgsave_time_sec": int64(-1), - "rdb_current_bgsave_time_sec": int64(-1), - "aof_enabled": int64(0), - "aof_rewrite_in_progress": int64(0), - "aof_rewrite_scheduled": int64(0), - "aof_last_rewrite_time_sec": int64(-1), - "aof_current_rewrite_time_sec": int64(-1), - "aof_last_bgrewrite_status": "ok", - "aof_last_write_status": "ok", - "total_connections_received": int64(2), - "total_commands_processed": int64(1), - "instantaneous_ops_per_sec": int64(0), - "instantaneous_input_kbps": float64(876.16), - "instantaneous_output_kbps": float64(3010.23), - "rejected_connections": int64(0), - "sync_full": int64(0), - "sync_partial_ok": int64(0), - "sync_partial_err": int64(0), - "expired_keys": int64(0), - "evicted_keys": int64(0), - "keyspace_hits": int64(1), - "keyspace_misses": int64(1), - "pubsub_channels": int64(0), - "pubsub_patterns": int64(0), - "latest_fork_usec": int64(0), - "connected_slaves": int64(2), - "master_repl_offset": int64(0), - "repl_backlog_active": int64(0), - "repl_backlog_size": int64(1048576), - "repl_backlog_first_byte_offset": int64(0), - "repl_backlog_histlen": int64(0), - "second_repl_offset": int64(-1), - "used_cpu_sys": float64(0.14), - "used_cpu_user": float64(0.05), - "used_cpu_sys_children": float64(0.00), - "used_cpu_user_children": float64(0.00), - "keyspace_hitrate": float64(0.50), - "redis_version": "2.8.9", + "uptime": int64(238), + "lru_clock": int64(2364819), + "clients": int64(1), + "client_longest_output_list": int64(0), + "client_biggest_input_buf": int64(0), + "blocked_clients": int64(0), + "used_memory": int64(1003936), + "used_memory_rss": int64(811008), + "used_memory_peak": int64(1003936), + "used_memory_lua": int64(33792), + "used_memory_peak_perc": float64(93.58), + "used_memory_dataset_perc": float64(20.27), + "mem_fragmentation_ratio": float64(0.81), + "loading": int64(0), + "rdb_changes_since_last_save": int64(0), + "rdb_bgsave_in_progress": int64(0), + "rdb_last_save_time": int64(1428427941), + "rdb_last_bgsave_status": "ok", + "rdb_last_bgsave_time_sec": int64(-1), + "rdb_current_bgsave_time_sec": int64(-1), + "aof_enabled": int64(0), + "aof_rewrite_in_progress": int64(0), + "aof_rewrite_scheduled": int64(0), + "aof_last_rewrite_time_sec": int64(-1), + "aof_current_rewrite_time_sec": int64(-1), + "aof_last_bgrewrite_status": "ok", + "aof_last_write_status": "ok", + "total_connections_received": int64(2), + "total_commands_processed": int64(1), + "instantaneous_ops_per_sec": int64(0), + "instantaneous_input_kbps": float64(876.16), + "instantaneous_output_kbps": float64(3010.23), + "rejected_connections": int64(0), + "sync_full": int64(0), + "sync_partial_ok": int64(0), + "sync_partial_err": int64(0), + "expired_keys": int64(0), + "evicted_keys": int64(0), + "keyspace_hits": int64(1), + "keyspace_misses": int64(1), + "pubsub_channels": int64(0), + "pubsub_patterns": int64(0), + "latest_fork_usec": int64(0), + "connected_slaves": int64(2), + "master_repl_offset": int64(0), + "repl_backlog_active": int64(0), + "repl_backlog_size": int64(1048576), + "repl_backlog_first_byte_offset": int64(0), + "repl_backlog_histlen": int64(0), + "second_repl_offset": int64(-1), + "used_cpu_sys": float64(0.14), + "used_cpu_user": float64(0.05), + "used_cpu_sys_children": float64(0.00), + "used_cpu_user_children": float64(0.00), + "keyspace_hitrate": float64(0.50), + "redis_version": "6.0.9", + "active_defrag_hits": int64(0), + "active_defrag_key_hits": int64(0), + "active_defrag_key_misses": int64(0), + "active_defrag_misses": int64(0), + "active_defrag_running": int64(0), + "allocator_active": int64(1022976), + "allocator_allocated": int64(1019632), + "allocator_frag_bytes": float64(3344), + "allocator_frag_ratio": float64(1.00), + "allocator_resident": int64(1022976), + "allocator_rss_bytes": int64(0), + "allocator_rss_ratio": float64(1.00), + "aof_last_cow_size": int64(0), + "client_recent_max_input_buffer": int64(16), + "client_recent_max_output_buffer": int64(0), + "clients_in_timeout_table": int64(0), + "cluster_enabled": int64(0), + "expire_cycle_cpu_milliseconds": int64(669), + "expired_stale_perc": float64(0.00), + "expired_time_cap_reached_count": int64(0), + "io_threaded_reads_processed": int64(0), + "io_threaded_writes_processed": int64(0), + "total_reads_processed": int64(31), + "total_writes_processed": int64(17), + "lazyfree_pending_objects": int64(0), + "maxmemory": int64(0), + "maxmemory_policy": "noeviction", + "mem_aof_buffer": int64(0), + "mem_clients_normal": int64(17440), + "mem_clients_slaves": int64(0), + "mem_fragmentation_bytes": int64(41232), + "mem_not_counted_for_evict": int64(0), + "mem_replication_backlog": int64(0), + "rss_overhead_bytes": int64(37888), + "rss_overhead_ratio": float64(1.04), + "total_system_memory": int64(17179869184), + "used_memory_dataset": int64(47088), + "used_memory_overhead": int64(1019152), + "used_memory_scripts": int64(0), + "used_memory_startup": int64(1001712), + "migrate_cached_sockets": int64(0), + "module_fork_in_progress": int64(0), + "module_fork_last_cow_size": int64(0), + "number_of_cached_scripts": int64(0), + "rdb_last_cow_size": int64(0), + "slave_expires_tracked_keys": int64(0), + "unexpected_error_replies": int64(0), + "total_net_input_bytes": int64(381), + "total_net_output_bytes": int64(71521), + "tracking_clients": int64(0), + "tracking_total_items": int64(0), + "tracking_total_keys": int64(0), + "tracking_total_prefixes": int64(0), } // We have to test rdb_last_save_time_offset manually because the value is based on the time when gathered @@ -149,7 +219,7 @@ func TestRedis_ParseMetrics(t *testing.T) { } } } - assert.InDelta(t, + require.InDelta(t, time.Now().Unix()-fields["rdb_last_save_time"].(int64), fields["rdb_last_save_time_elapsed"].(int64), 2) // allow for 2 seconds worth of offset @@ -210,26 +280,110 @@ func TestRedis_ParseMetrics(t *testing.T) { acc.AssertContainsTaggedFields(t, "redis_replication", replicationFields, replicationTags) } +func TestRedis_ParseFloatOnInts(t *testing.T) { + var acc testutil.Accumulator + tags := map[string]string{"host": "redis.net"} + rdr := bufio.NewReader(strings.NewReader(strings.Replace(testOutput, "mem_fragmentation_ratio:0.81", "mem_fragmentation_ratio:1", 1))) + err := gatherInfoOutput(rdr, &acc, tags) + require.NoError(t, err) + var m *testutil.Metric + for i := range acc.Metrics { + if _, ok := acc.Metrics[i].Fields["mem_fragmentation_ratio"]; ok { + m = acc.Metrics[i] + break + } + } + require.NotNil(t, m) + fragRatio, ok := m.Fields["mem_fragmentation_ratio"] + require.True(t, ok) + require.IsType(t, float64(0.0), fragRatio) +} + +func TestRedis_ParseIntOnFloats(t *testing.T) { + var acc testutil.Accumulator + tags := map[string]string{"host": "redis.net"} + rdr := bufio.NewReader(strings.NewReader(strings.Replace(testOutput, "clients_in_timeout_table:0", "clients_in_timeout_table:0.0", 1))) + err := gatherInfoOutput(rdr, &acc, tags) + require.NoError(t, err) + var m *testutil.Metric + for i := range acc.Metrics { + if _, ok := acc.Metrics[i].Fields["clients_in_timeout_table"]; ok { + m = acc.Metrics[i] + break + } + } + require.NotNil(t, m) + clientsInTimeout, ok := m.Fields["clients_in_timeout_table"] + require.True(t, ok) + require.IsType(t, int64(0), clientsInTimeout) +} + +func TestRedis_ParseStringOnInts(t *testing.T) { + var acc testutil.Accumulator + tags := map[string]string{"host": "redis.net"} + rdr := bufio.NewReader(strings.NewReader(strings.Replace(testOutput, "maxmemory_policy:no-eviction", "maxmemory_policy:1", 1))) + err := gatherInfoOutput(rdr, &acc, tags) + require.NoError(t, err) + var m *testutil.Metric + for i := range acc.Metrics { + if _, ok := acc.Metrics[i].Fields["maxmemory_policy"]; ok { + m = acc.Metrics[i] + break + } + } + require.NotNil(t, m) + maxmemoryPolicy, ok := m.Fields["maxmemory_policy"] + require.True(t, ok) + require.IsType(t, string(""), maxmemoryPolicy) +} + +func TestRedis_ParseIntOnString(t *testing.T) { + var acc testutil.Accumulator + tags := map[string]string{"host": "redis.net"} + rdr := bufio.NewReader(strings.NewReader(strings.Replace(testOutput, "clients_in_timeout_table:0", `clients_in_timeout_table:""`, 1))) + err := gatherInfoOutput(rdr, &acc, tags) + require.NoError(t, err) + var m *testutil.Metric + for i := range acc.Metrics { + if _, ok := acc.Metrics[i].Fields["clients_in_timeout_table"]; ok { + m = acc.Metrics[i] + break + } + } + require.NotNil(t, m) + clientsInTimeout, ok := m.Fields["clients_in_timeout_table"] + require.True(t, ok) + require.IsType(t, int64(0), clientsInTimeout) +} + const testOutput = `# Server -redis_version:2.8.9 +redis_version:6.0.9 redis_git_sha1:00000000 redis_git_dirty:0 -redis_build_id:9ccc8119ea98f6e1 +redis_build_id:26c3229b35eb3beb redis_mode:standalone -os:Darwin 14.1.0 x86_64 +os:Darwin 19.6.0 x86_64 arch_bits:64 multiplexing_api:kqueue +atomicvar_api:atomic-builtin gcc_version:4.2.1 -process_id:40235 -run_id:37d020620aadf0627282c0f3401405d774a82664 +process_id:46677 +run_id:5d6bf38087b23e48f1a59b7aca52e2b55438b02f tcp_port:6379 uptime_in_seconds:238 uptime_in_days:0 hz:10 +configured_hz:10 lru_clock:2364819 +executable:/usr/local/opt/redis/bin/redis-server config_file:/usr/local/etc/redis.conf +io_threads_active:0 # Clients +client_recent_max_input_buffer:16 +client_recent_max_output_buffer:0 +tracking_clients:0 +clients_in_timeout_table:0 connected_clients:1 client_longest_output_list:0 client_biggest_input_buf:0 @@ -239,13 +393,43 @@ blocked_clients:0 used_memory:1003936 used_memory_human:980.41K used_memory_rss:811008 +used_memory_rss_human:1.01M used_memory_peak:1003936 used_memory_peak_human:980.41K +used_memory_peak_perc:93.58% +used_memory_overhead:1019152 +used_memory_startup:1001712 +used_memory_dataset:47088 +used_memory_dataset_perc:20.27% +allocator_allocated:1019632 +allocator_active:1022976 +allocator_resident:1022976 +total_system_memory:17179869184 +total_system_memory_human:16.00G used_memory_lua:33792 +used_memory_lua_human:37.00K +used_memory_scripts:0 +used_memory_scripts_human:0B +number_of_cached_scripts:0 +maxmemory:0 +maxmemory_human:0B +maxmemory_policy:noeviction +allocator_frag_ratio:1.00 +allocator_frag_bytes:3344 +allocator_rss_ratio:1.00 +allocator_rss_bytes:0 +rss_overhead_ratio:1.04 +rss_overhead_bytes:37888 mem_fragmentation_ratio:0.81 +mem_fragmentation_bytes:41232 +mem_not_counted_for_evict:0 +mem_replication_backlog:0 +mem_clients_slaves:0 +mem_clients_normal:17440 +mem_aof_buffer:0 mem_allocator:libc -used_memory_peak_perc:93.58% -used_memory_dataset_perc:20.27% +active_defrag_running:0 +lazyfree_pending_objects:0 # Persistence loading:0 @@ -255,6 +439,7 @@ rdb_last_save_time:1428427941 rdb_last_bgsave_status:ok rdb_last_bgsave_time_sec:-1 rdb_current_bgsave_time_sec:-1 +rdb_last_cow_size:0 aof_enabled:0 aof_rewrite_in_progress:0 aof_rewrite_scheduled:0 @@ -262,11 +447,16 @@ aof_last_rewrite_time_sec:-1 aof_current_rewrite_time_sec:-1 aof_last_bgrewrite_status:ok aof_last_write_status:ok +aof_last_cow_size:0 +module_fork_in_progress:0 +module_fork_last_cow_size:0 # Stats total_connections_received:2 total_commands_processed:1 instantaneous_ops_per_sec:0 +total_net_input_bytes:381 +total_net_output_bytes:71521 instantaneous_input_kbps:876.16 instantaneous_output_kbps:3010.23 rejected_connections:0 @@ -274,12 +464,29 @@ sync_full:0 sync_partial_ok:0 sync_partial_err:0 expired_keys:0 +expired_stale_perc:0.00 +expired_time_cap_reached_count:0 +expire_cycle_cpu_milliseconds:669 evicted_keys:0 keyspace_hits:1 keyspace_misses:1 pubsub_channels:0 pubsub_patterns:0 latest_fork_usec:0 +migrate_cached_sockets:0 +slave_expires_tracked_keys:0 +active_defrag_hits:0 +active_defrag_misses:0 +active_defrag_key_hits:0 +active_defrag_key_misses:0 +tracking_total_keys:0 +tracking_total_items:0 +tracking_total_prefixes:0 +unexpected_error_replies:0 +total_reads_processed:31 +total_writes_processed:17 +io_threaded_reads_processed:0 +io_threaded_writes_processed:0 # Replication role:master @@ -301,6 +508,9 @@ used_cpu_user:0.05 used_cpu_sys_children:0.00 used_cpu_user_children:0.00 +# Cluster +cluster_enabled:0 + # Commandstats cmdstat_set:calls=261265,usec=1634157,usec_per_call=6.25 cmdstat_command:calls=1,usec=990,usec_per_call=990.00 @@ -308,5 +518,4 @@ cmdstat_command:calls=1,usec=990,usec_per_call=990.00 # Keyspace db0:keys=2,expires=0,avg_ttl=0 -(error) ERR unknown command 'eof' -` +(error) ERR unknown command 'eof'` diff --git a/plugins/inputs/redis/sample.conf b/plugins/inputs/redis/sample.conf new file mode 100644 index 0000000000000..6f1db720d53a7 --- /dev/null +++ b/plugins/inputs/redis/sample.conf @@ -0,0 +1,35 @@ +# Read metrics from one or many redis servers +[[inputs.redis]] + ## specify servers via a url matching: + ## [protocol://][:password]@address[:port] + ## e.g. + ## tcp://localhost:6379 + ## tcp://:password@192.168.99.100 + ## unix:///var/run/redis.sock + ## + ## If no servers are specified, then localhost is used as the host. + ## If no port is specified, 6379 is used + servers = ["tcp://localhost:6379"] + + ## Optional. Specify redis commands to retrieve values + # [[inputs.redis.commands]] + # # The command to run where each argument is a separate element + # command = ["get", "sample-key"] + # # The field to store the result in + # field = "sample-key-value" + # # The type of the result + # # Can be "string", "integer", or "float" + # type = "string" + + ## specify server password + # password = "s#cr@t%" + + ## specify username for ACL auth (Redis 6.0+) + # username = "default" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = true diff --git a/plugins/inputs/redis_sentinel/README.md b/plugins/inputs/redis_sentinel/README.md new file mode 100644 index 0000000000000..e88d22d99f5b6 --- /dev/null +++ b/plugins/inputs/redis_sentinel/README.md @@ -0,0 +1,209 @@ +# Redis Sentinel Input Plugin + +A plugin for Redis Sentinel to monitor multiple Sentinel instances that are +monitoring multiple Redis servers and replicas. + +## Configuration + +```toml @sample.conf +# Read metrics from one or many redis-sentinel servers +[[inputs.redis_sentinel]] + ## specify servers via a url matching: + ## [protocol://][:password]@address[:port] + ## e.g. + ## tcp://localhost:26379 + ## tcp://:password@192.168.99.100 + ## unix:///var/run/redis-sentinel.sock + ## + ## If no servers are specified, then localhost is used as the host. + ## If no port is specified, 26379 is used + # servers = ["tcp://localhost:26379"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = true +``` + +## Measurements & Fields + +The plugin gathers the results of these commands and measurements: + +* `sentinel masters` - `redis_sentinel_masters` +* `sentinel sentinels` - `redis_sentinels` +* `sentinel replicas` - `redis_replicas` +* `info all` - `redis_sentinel` + +The `has_quorum` field in `redis_sentinel_masters` is from calling the command +`sentinels ckquorum`. + +There are 5 remote network requests made for each server listed in the config. + +## Metrics + +* redis_sentinel_masters + * tags: + * host + * master + * port + * source + + * fields: + * config_epoch (int) + * down_after_milliseconds (int) + * failover_timeout (int) + * flags (string) + * has_quorum (bool) + * info_refresh (int) + * ip (string) + * last_ok_ping_reply (int) + * last_ping_reply (int) + * last_ping_sent (int) + * link_pending_commands (int) + * link_refcount (int) + * num_other_sentinels (int) + * num_slaves (int) + * parallel_syncs (int) + * port (int) + * quorum (int) + * role_reported (string) + * role_reported_time (int) + +* redis_sentinel_sentinels + * tags: + * host + * master + * port + * sentinel_ip + * sentinel_port + * source + + * fields: + * down_after_milliseconds (int) + * flags (string) + * last_hello_message (int) + * last_ok_ping_reply (int) + * last_ping_reply (int) + * last_ping_sent (int) + * link_pending_commands (int) + * link_refcount (int) + * name (string) + * voted_leader (string) + * voted_leader_epoch (int) + +* redis_sentinel_replicas + * tags: + * host + * master + * port + * replica_ip + * replica_port + * source + + * fields: + * down_after_milliseconds (int) + * flags (string) + * info_refresh (int) + * last_ok_ping_reply (int) + * last_ping_reply (int) + * last_ping_sent (int) + * link_pending_commands (int) + * link_refcount (int) + * master_host (string) + * master_link_down_time (int) + * master_link_status (string) + * master_port (int) + * name (string) + * role_reported (string) + * role_reported_time (int) + * slave_priority (int) + * slave_repl_offset (int) + +* redis_sentinel + * tags: + * host + * port + * source + + * fields: + * active_defrag_hits (int) + * active_defrag_key_hits (int) + * active_defrag_key_misses (int) + * active_defrag_misses (int) + * blocked_clients (int) + * client_recent_max_input_buffer (int) + * client_recent_max_output_buffer (int) + * clients (int) + * evicted_keys (int) + * expired_keys (int) + * expired_stale_perc (float) + * expired_time_cap_reached_count (int) + * instantaneous_input_kbps (float) + * instantaneous_ops_per_sec (int) + * instantaneous_output_kbps (float) + * keyspace_hits (int) + * keyspace_misses (int) + * latest_fork_usec (int) + * lru_clock (int) + * migrate_cached_sockets (int) + * pubsub_channels (int) + * pubsub_patterns (int) + * redis_version (string) + * rejected_connections (int) + * sentinel_masters (int) + * sentinel_running_scripts (int) + * sentinel_scripts_queue_length (int) + * sentinel_simulate_failure_flags (int) + * sentinel_tilt (int) + * slave_expires_tracked_keys (int) + * sync_full (int) + * sync_partial_err (int) + * sync_partial_ok (int) + * total_commands_processed (int) + * total_connections_received (int) + * total_net_input_bytes (int) + * total_net_output_bytes (int) + * uptime_ns (int, nanoseconds) + * used_cpu_sys (float) + * used_cpu_sys_children (float) + * used_cpu_user (float) + * used_cpu_user_children (float) + +## Example Output + +An example of 2 Redis Sentinel instances monitoring a single master and +replica. It produces: + +### redis_sentinel_masters + +```sh +redis_sentinel_masters,host=somehostname,master=mymaster,port=26380,source=localhost config_epoch=0i,down_after_milliseconds=30000i,failover_timeout=180000i,flags="master",has_quorum=1i,info_refresh=110i,ip="127.0.0.1",last_ok_ping_reply=819i,last_ping_reply=819i,last_ping_sent=0i,link_pending_commands=0i,link_refcount=1i,num_other_sentinels=1i,num_slaves=1i,parallel_syncs=1i,port=6379i,quorum=2i,role_reported="master",role_reported_time=311248i 1570207377000000000 + +redis_sentinel_masters,host=somehostname,master=mymaster,port=26379,source=localhost config_epoch=0i,down_after_milliseconds=30000i,failover_timeout=180000i,flags="master",has_quorum=1i,info_refresh=1650i,ip="127.0.0.1",last_ok_ping_reply=1003i,last_ping_reply=1003i,last_ping_sent=0i,link_pending_commands=0i,link_refcount=1i,num_other_sentinels=1i,num_slaves=1i,parallel_syncs=1i,port=6379i,quorum=2i,role_reported="master",role_reported_time=302990i 1570207377000000000 +``` + +### redis_sentinel_sentinels + +```sh +redis_sentinel_sentinels,host=somehostname,master=mymaster,port=26380,sentinel_ip=127.0.0.1,sentinel_port=26379,source=localhost down_after_milliseconds=30000i,flags="sentinel",last_hello_message=1337i,last_ok_ping_reply=566i,last_ping_reply=566i,last_ping_sent=0i,link_pending_commands=0i,link_refcount=1i,name="fd7444de58ecc00f2685cd89fc11ff96c72f0569",voted_leader="?",voted_leader_epoch=0i 1570207377000000000 + +redis_sentinel_sentinels,host=somehostname,master=mymaster,port=26379,sentinel_ip=127.0.0.1,sentinel_port=26380,source=localhost down_after_milliseconds=30000i,flags="sentinel",last_hello_message=1510i,last_ok_ping_reply=1004i,last_ping_reply=1004i,last_ping_sent=0i,link_pending_commands=0i,link_refcount=1i,name="d06519438fe1b35692cb2ea06d57833c959f9114",voted_leader="?",voted_leader_epoch=0i 1570207377000000000 +``` + +### redis_sentinel_replicas + +```sh +redis_sentinel_replicas,host=somehostname,master=mymaster,port=26379,replica_ip=127.0.0.1,replica_port=6380,source=localhost down_after_milliseconds=30000i,flags="slave",info_refresh=1651i,last_ok_ping_reply=1005i,last_ping_reply=1005i,last_ping_sent=0i,link_pending_commands=0i,link_refcount=1i,master_host="127.0.0.1",master_link_down_time=0i,master_link_status="ok",master_port=6379i,name="127.0.0.1:6380",role_reported="slave",role_reported_time=302983i,slave_priority=100i,slave_repl_offset=40175i 1570207377000000000 + +redis_sentinel_replicas,host=somehostname,master=mymaster,port=26380,replica_ip=127.0.0.1,replica_port=6380,source=localhost down_after_milliseconds=30000i,flags="slave",info_refresh=111i,last_ok_ping_reply=821i,last_ping_reply=821i,last_ping_sent=0i,link_pending_commands=0i,link_refcount=1i,master_host="127.0.0.1",master_link_down_time=0i,master_link_status="ok",master_port=6379i,name="127.0.0.1:6380",role_reported="slave",role_reported_time=311243i,slave_priority=100i,slave_repl_offset=40441i 1570207377000000000 +``` + +### redis_sentinel + +```sh +redis_sentinel,host=somehostname,port=26379,source=localhost active_defrag_hits=0i,active_defrag_key_hits=0i,active_defrag_key_misses=0i,active_defrag_misses=0i,blocked_clients=0i,client_recent_max_input_buffer=2i,client_recent_max_output_buffer=0i,clients=3i,evicted_keys=0i,expired_keys=0i,expired_stale_perc=0,expired_time_cap_reached_count=0i,instantaneous_input_kbps=0.01,instantaneous_ops_per_sec=0i,instantaneous_output_kbps=0,keyspace_hits=0i,keyspace_misses=0i,latest_fork_usec=0i,lru_clock=9926289i,migrate_cached_sockets=0i,pubsub_channels=0i,pubsub_patterns=0i,redis_version="5.0.5",rejected_connections=0i,sentinel_masters=1i,sentinel_running_scripts=0i,sentinel_scripts_queue_length=0i,sentinel_simulate_failure_flags=0i,sentinel_tilt=0i,slave_expires_tracked_keys=0i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=459i,total_connections_received=6i,total_net_input_bytes=24517i,total_net_output_bytes=14864i,uptime_ns=303000000000i,used_cpu_sys=0.404,used_cpu_sys_children=0,used_cpu_user=0.436,used_cpu_user_children=0 1570207377000000000 + +redis_sentinel,host=somehostname,port=26380,source=localhost active_defrag_hits=0i,active_defrag_key_hits=0i,active_defrag_key_misses=0i,active_defrag_misses=0i,blocked_clients=0i,client_recent_max_input_buffer=2i,client_recent_max_output_buffer=0i,clients=2i,evicted_keys=0i,expired_keys=0i,expired_stale_perc=0,expired_time_cap_reached_count=0i,instantaneous_input_kbps=0.01,instantaneous_ops_per_sec=0i,instantaneous_output_kbps=0,keyspace_hits=0i,keyspace_misses=0i,latest_fork_usec=0i,lru_clock=9926289i,migrate_cached_sockets=0i,pubsub_channels=0i,pubsub_patterns=0i,redis_version="5.0.5",rejected_connections=0i,sentinel_masters=1i,sentinel_running_scripts=0i,sentinel_scripts_queue_length=0i,sentinel_simulate_failure_flags=0i,sentinel_tilt=0i,slave_expires_tracked_keys=0i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=442i,total_connections_received=2i,total_net_input_bytes=23861i,total_net_output_bytes=4443i,uptime_ns=312000000000i,used_cpu_sys=0.46,used_cpu_sys_children=0,used_cpu_user=0.416,used_cpu_user_children=0 1570207377000000000 +``` diff --git a/plugins/inputs/redis_sentinel/redis_sentinel.go b/plugins/inputs/redis_sentinel/redis_sentinel.go new file mode 100644 index 0000000000000..0652cd472305d --- /dev/null +++ b/plugins/inputs/redis_sentinel/redis_sentinel.go @@ -0,0 +1,439 @@ +//go:generate ../../../tools/readme_config_includer/generator +package redis_sentinel + +import ( + "bufio" + _ "embed" + "fmt" + "io" + "net/url" + "strconv" + "strings" + "sync" + + "github.com/go-redis/redis" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +type RedisSentinel struct { + Servers []string `toml:"servers"` + tls.ClientConfig + + clients []*RedisSentinelClient +} + +type RedisSentinelClient struct { + sentinel *redis.SentinelClient + tags map[string]string +} + +const measurementMasters = "redis_sentinel_masters" +const measurementSentinel = "redis_sentinel" +const measurementSentinels = "redis_sentinel_sentinels" +const measurementReplicas = "redis_sentinel_replicas" + +func init() { + inputs.Add("redis_sentinel", func() telegraf.Input { + return &RedisSentinel{} + }) +} + +func (*RedisSentinel) SampleConfig() string { + return sampleConfig +} + +func (r *RedisSentinel) Init() error { + if len(r.Servers) == 0 { + r.Servers = []string{"tcp://localhost:26379"} + } + + r.clients = make([]*RedisSentinelClient, len(r.Servers)) + + tlsConfig, err := r.ClientConfig.TLSConfig() + if err != nil { + return err + } + + for i, serv := range r.Servers { + u, err := url.Parse(serv) + if err != nil { + return fmt.Errorf("unable to parse to address %q: %v", serv, err) + } + + password := "" + if u.User != nil { + password, _ = u.User.Password() + } + + var address string + tags := map[string]string{} + + switch u.Scheme { + case "tcp": + address = u.Host + tags["source"] = u.Hostname() + tags["port"] = u.Port() + case "unix": + address = u.Path + tags["socket"] = u.Path + default: + return fmt.Errorf("invalid scheme %q, expected tcp or unix", u.Scheme) + } + + sentinel := redis.NewSentinelClient( + &redis.Options{ + Addr: address, + Password: password, + Network: u.Scheme, + PoolSize: 1, + TLSConfig: tlsConfig, + }, + ) + + r.clients[i] = &RedisSentinelClient{ + sentinel: sentinel, + tags: tags, + } + } + + return nil +} + +// Redis list format has string key/values adjacent, so convert to a map for easier use +func toMap(vals []interface{}) map[string]string { + m := make(map[string]string) + + for idx := 0; idx < len(vals)-1; idx += 2 { + key, keyOk := vals[idx].(string) + value, valueOk := vals[idx+1].(string) + + if keyOk && valueOk { + m[key] = value + } + } + + return m +} + +func castFieldValue(value string, fieldType configFieldType) (interface{}, error) { + var castedValue interface{} + var err error + + switch fieldType { + case configFieldTypeFloat: + castedValue, err = strconv.ParseFloat(value, 64) + case configFieldTypeInteger: + castedValue, err = strconv.ParseInt(value, 10, 64) + case configFieldTypeString: + castedValue = value + default: + return nil, fmt.Errorf("unsupported field type %v", fieldType) + } + + if err != nil { + return nil, fmt.Errorf("casting value %v failed: %v", value, err) + } + + return castedValue, nil +} + +func prepareFieldValues(fields map[string]string, typeMap map[string]configFieldType) (map[string]interface{}, error) { + preparedFields := make(map[string]interface{}) + + for key, val := range fields { + key = strings.ReplaceAll(key, "-", "_") + + valType, ok := typeMap[key] + if !ok { + continue + } + + castedVal, err := castFieldValue(val, valType) + if err != nil { + return nil, err + } + + preparedFields[key] = castedVal + } + + return preparedFields, nil +} + +// Reads stats from all configured servers accumulates stats. +// Returns one of the errors encountered while gather stats (if any). +func (r *RedisSentinel) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + + for _, client := range r.clients { + wg.Add(1) + + go func(acc telegraf.Accumulator, client *RedisSentinelClient) { + defer wg.Done() + + masters, err := client.gatherMasterStats(acc) + acc.AddError(err) + + for _, master := range masters { + acc.AddError(client.gatherReplicaStats(acc, master)) + acc.AddError(client.gatherSentinelStats(acc, master)) + } + + acc.AddError(client.gatherInfoStats(acc)) + }(acc, client) + } + + wg.Wait() + + return nil +} + +func (client *RedisSentinelClient) gatherInfoStats(acc telegraf.Accumulator) error { + infoCmd := redis.NewStringCmd("info", "all") + if err := client.sentinel.Process(infoCmd); err != nil { + return err + } + + info, err := infoCmd.Result() + if err != nil { + return err + } + + rdr := strings.NewReader(info) + infoTags, infoFields, err := convertSentinelInfoOutput(client.tags, rdr) + if err != nil { + return err + } + + acc.AddFields(measurementSentinel, infoFields, infoTags) + + return nil +} + +func (client *RedisSentinelClient) gatherMasterStats(acc telegraf.Accumulator) ([]string, error) { + var masterNames []string + + mastersCmd := redis.NewSliceCmd("sentinel", "masters") + if err := client.sentinel.Process(mastersCmd); err != nil { + return masterNames, err + } + + masters, err := mastersCmd.Result() + if err != nil { + return masterNames, err + } + + // Break out of the loop if one of the items comes out malformed + // It's safe to assume that if we fail parsing one item that the rest will fail too + // This is because we are iterating over a single server response + for _, master := range masters { + master, ok := master.([]interface{}) + if !ok { + return masterNames, fmt.Errorf("unable to process master response") + } + + m := toMap(master) + + masterName, ok := m["name"] + if !ok { + return masterNames, fmt.Errorf("unable to resolve master name") + } + + quorumCmd := redis.NewStringCmd("sentinel", "ckquorum", masterName) + quorumErr := client.sentinel.Process(quorumCmd) + + sentinelMastersTags, sentinelMastersFields, err := convertSentinelMastersOutput(client.tags, m, quorumErr) + if err != nil { + return masterNames, err + } + acc.AddFields(measurementMasters, sentinelMastersFields, sentinelMastersTags) + } + + return masterNames, nil +} + +func (client *RedisSentinelClient) gatherReplicaStats(acc telegraf.Accumulator, masterName string) error { + replicasCmd := redis.NewSliceCmd("sentinel", "replicas", masterName) + if err := client.sentinel.Process(replicasCmd); err != nil { + return err + } + + replicas, err := replicasCmd.Result() + if err != nil { + return err + } + + // Break out of the loop if one of the items comes out malformed + // It's safe to assume that if we fail parsing one item that the rest will fail too + // This is because we are iterating over a single server response + for _, replica := range replicas { + replica, ok := replica.([]interface{}) + if !ok { + return fmt.Errorf("unable to process replica response") + } + + rm := toMap(replica) + replicaTags, replicaFields, err := convertSentinelReplicaOutput(client.tags, masterName, rm) + if err != nil { + return err + } + + acc.AddFields(measurementReplicas, replicaFields, replicaTags) + } + + return nil +} + +func (client *RedisSentinelClient) gatherSentinelStats(acc telegraf.Accumulator, masterName string) error { + sentinelsCmd := redis.NewSliceCmd("sentinel", "sentinels", masterName) + if err := client.sentinel.Process(sentinelsCmd); err != nil { + return err + } + + sentinels, err := sentinelsCmd.Result() + if err != nil { + return err + } + + // Break out of the loop if one of the items comes out malformed + // It's safe to assume that if we fail parsing one item that the rest will fail too + // This is because we are iterating over a single server response + for _, sentinel := range sentinels { + sentinel, ok := sentinel.([]interface{}) + if !ok { + return fmt.Errorf("unable to process sentinel response") + } + + sm := toMap(sentinel) + sentinelTags, sentinelFields, err := convertSentinelSentinelsOutput(client.tags, masterName, sm) + if err != nil { + return err + } + + acc.AddFields(measurementSentinels, sentinelFields, sentinelTags) + } + + return nil +} + +// converts `sentinel masters ` output to tags and fields +func convertSentinelMastersOutput( + globalTags map[string]string, + master map[string]string, + quorumErr error, +) (map[string]string, map[string]interface{}, error) { + tags := globalTags + + tags["master"] = master["name"] + + fields, err := prepareFieldValues(master, measurementMastersFields) + if err != nil { + return nil, nil, err + } + + fields["has_quorum"] = quorumErr == nil + + return tags, fields, nil +} + +// converts `sentinel sentinels ` output to tags and fields +func convertSentinelSentinelsOutput( + globalTags map[string]string, + masterName string, + sentinelMaster map[string]string, +) (map[string]string, map[string]interface{}, error) { + tags := globalTags + + tags["sentinel_ip"] = sentinelMaster["ip"] + tags["sentinel_port"] = sentinelMaster["port"] + tags["master"] = masterName + + fields, err := prepareFieldValues(sentinelMaster, measurementSentinelsFields) + if err != nil { + return nil, nil, err + } + + return tags, fields, nil +} + +// converts `sentinel replicas ` output to tags and fields +func convertSentinelReplicaOutput( + globalTags map[string]string, + masterName string, + replica map[string]string, +) (map[string]string, map[string]interface{}, error) { + tags := globalTags + + tags["replica_ip"] = replica["ip"] + tags["replica_port"] = replica["port"] + tags["master"] = masterName + + fields, err := prepareFieldValues(replica, measurementReplicasFields) + if err != nil { + return nil, nil, err + } + + return tags, fields, nil +} + +// convertSentinelInfoOutput parses `INFO` command output +// Largely copied from the Redis input plugin's gatherInfoOutput() +func convertSentinelInfoOutput( + globalTags map[string]string, + rdr io.Reader, +) (map[string]string, map[string]interface{}, error) { + scanner := bufio.NewScanner(rdr) + rawFields := make(map[string]string) + + tags := globalTags + + for scanner.Scan() { + line := scanner.Text() + if len(line) == 0 { + continue + } + + // Redis denotes configuration sections with a hashtag + // Example of the section header: # Clients + if line[0] == '#' { + // Nothing interesting here + continue + } + + parts := strings.SplitN(line, ":", 2) + if len(parts) < 2 { + // Not a valid configuration option + continue + } + + key := strings.TrimSpace(parts[0]) + val := strings.TrimSpace(parts[1]) + + rawFields[key] = val + } + + fields, err := prepareFieldValues(rawFields, measurementSentinelFields) + if err != nil { + return nil, nil, err + } + + // Rename the field and convert it to nanoseconds + secs, ok := fields["uptime_in_seconds"].(int64) + if !ok { + return nil, nil, fmt.Errorf("uptime type %T is not int64", fields["uptime_in_seconds"]) + } + fields["uptime_ns"] = secs * 1000_000_000 + delete(fields, "uptime_in_seconds") + + // Rename in order to match the "redis" input plugin + fields["clients"] = fields["connected_clients"] + delete(fields, "connected_clients") + + return tags, fields, nil +} diff --git a/plugins/inputs/redis_sentinel/redis_sentinel_test.go b/plugins/inputs/redis_sentinel/redis_sentinel_test.go new file mode 100644 index 0000000000000..506d17778cb98 --- /dev/null +++ b/plugins/inputs/redis_sentinel/redis_sentinel_test.go @@ -0,0 +1,325 @@ +package redis_sentinel + +import ( + "bufio" + "bytes" + "fmt" + "os" + "testing" + "time" + + "github.com/docker/go-connections/nat" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/testcontainers/testcontainers-go/wait" + + "github.com/stretchr/testify/require" +) + +const masterName = "mymaster" + +func TestRedisSentinelConnectIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + servicePort := "6379" + container := testutil.Container{ + Image: "redis:alpine", + ExposedPorts: []string{servicePort}, + WaitingFor: wait.ForListeningPort(nat.Port(servicePort)), + } + err := container.Start() + require.NoError(t, err, "failed to start container") + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + + addr := fmt.Sprintf("tcp://%s:%s", container.Address, container.Ports[servicePort]) + + r := &RedisSentinel{ + Servers: []string{addr}, + } + + var acc testutil.Accumulator + + err = acc.GatherError(r.Gather) + require.NoError(t, err) +} + +func TestRedisSentinelMasters(t *testing.T) { + now := time.Now() + + globalTags := map[string]string{ + "port": "6379", + "source": "redis.io", + } + + expectedTags := map[string]string{ + "port": "6379", + "source": "redis.io", + "master": masterName, + } + + // has_quorum is a custom field + expectedFields := map[string]interface{}{ + "config_epoch": 0, + "down_after_milliseconds": 30000, + "failover_timeout": 180000, + "flags": "master", + "info_refresh": 8819, + "ip": "127.0.0.1", + "last_ok_ping_reply": 174, + "last_ping_reply": 174, + "last_ping_sent": 0, + "link_pending_commands": 0, + "link_refcount": 1, + "num_other_sentinels": 1, + "num_slaves": 0, + "parallel_syncs": 1, + "port": 6379, + "quorum": 2, + "role_reported": "master", + "role_reported_time": 83138826, + "has_quorum": true, + } + + expectedMetrics := []telegraf.Metric{ + testutil.MustMetric(measurementMasters, expectedTags, expectedFields, now), + } + + sentinelMastersOutput := map[string]string{ + "config_epoch": "0", + "down_after_milliseconds": "30000", + "failover_timeout": "180000", + "flags": "master", + "info_refresh": "8819", + "ip": "127.0.0.1", + "last_ok_ping_reply": "174", + "last_ping_reply": "174", + "last_ping_sent": "0", + "link_pending_commands": "0", + "link_refcount": "1", + "name": "mymaster", + "num_other_sentinels": "1", + "num_slaves": "0", + "parallel_syncs": "1", + "port": "6379", + "quorum": "2", + "role_reported": "master", + "role_reported_time": "83138826", + "runid": "ff3dadd1cfea3043de4d25711d93f01a564562f7", + } + + sentinelTags, sentinelFields, sentinalErr := convertSentinelMastersOutput(globalTags, sentinelMastersOutput, nil) + require.NoErrorf(t, sentinalErr, "failed converting output: %v", sentinalErr) + + actualMetrics := []telegraf.Metric{ + testutil.MustMetric(measurementMasters, sentinelTags, sentinelFields, now), + } + + testutil.RequireMetricsEqual(t, expectedMetrics, actualMetrics, testutil.IgnoreTime()) +} + +func TestRedisSentinels(t *testing.T) { + now := time.Now() + + globalTags := make(map[string]string) + + expectedTags := map[string]string{ + "sentinel_ip": "127.0.0.1", + "sentinel_port": "26380", + "master": masterName, + } + expectedFields := map[string]interface{}{ + "name": "adfd343f6b6ecc77e2b9636de6d9f28d4b827521", + "flags": "sentinel", + "link_pending_commands": 0, + "link_refcount": 1, + "last_ping_sent": 0, + "last_ok_ping_reply": 516, + "last_ping_reply": 516, + "down_after_milliseconds": 30000, + "last_hello_message": 1905, + "voted_leader": "?", + "voted_leader_epoch": 0, + } + + expectedMetrics := []telegraf.Metric{ + testutil.MustMetric(measurementSentinels, expectedTags, expectedFields, now), + } + + sentinelsOutput := map[string]string{ + "name": "adfd343f6b6ecc77e2b9636de6d9f28d4b827521", + "ip": "127.0.0.1", + "port": "26380", + "runid": "adfd343f6b6ecc77e2b9636de6d9f28d4b827521", + "flags": "sentinel", + "link_pending_commands": "0", + "link_refcount": "1", + "last_ping_sent": "0", + "last_ok_ping_reply": "516", + "last_ping_reply": "516", + "down_after_milliseconds": "30000", + "last_hello_message": "1905", + "voted_leader": "?", + "voted_leader_epoch": "0", + } + + sentinelTags, sentinelFields, sentinelErr := convertSentinelSentinelsOutput(globalTags, masterName, sentinelsOutput) + require.NoErrorf(t, sentinelErr, "failed converting output: %v", sentinelErr) + + actualMetrics := []telegraf.Metric{ + testutil.MustMetric(measurementSentinels, sentinelTags, sentinelFields, now), + } + + testutil.RequireMetricsEqual(t, expectedMetrics, actualMetrics) +} + +func TestRedisSentinelReplicas(t *testing.T) { + now := time.Now() + + globalTags := make(map[string]string) + + expectedTags := map[string]string{ + "replica_ip": "127.0.0.1", + "replica_port": "6380", + "master": masterName, + } + expectedFields := map[string]interface{}{ + "down_after_milliseconds": 30000, + "flags": "slave", + "info_refresh": 8476, + "last_ok_ping_reply": 987, + "last_ping_reply": 987, + "last_ping_sent": 0, + "link_pending_commands": 0, + "link_refcount": 1, + "master_host": "127.0.0.1", + "master_link_down_time": 0, + "master_link_status": "ok", + "master_port": 6379, + "name": "127.0.0.1:6380", + "role_reported": "slave", + "role_reported_time": 10267432, + "slave_priority": 100, + "slave_repl_offset": 1392400, + } + + expectedMetrics := []telegraf.Metric{ + testutil.MustMetric(measurementReplicas, expectedTags, expectedFields, now), + } + + replicasOutput := map[string]string{ + "down_after_milliseconds": "30000", + "flags": "slave", + "info_refresh": "8476", + "ip": "127.0.0.1", + "last_ok_ping_reply": "987", + "last_ping_reply": "987", + "last_ping_sent": "0", + "link_pending_commands": "0", + "link_refcount": "1", + "master_host": "127.0.0.1", + "master_link_down_time": "0", + "master_link_status": "ok", + "master_port": "6379", + "name": "127.0.0.1:6380", + "port": "6380", + "role_reported": "slave", + "role_reported_time": "10267432", + "runid": "70e07dad9e450e2d35f1b75338e0a5341b59d710", + "slave_priority": "100", + "slave_repl_offset": "1392400", + } + + sentinelTags, sentinelFields, sentinalErr := convertSentinelReplicaOutput(globalTags, masterName, replicasOutput) + require.NoErrorf(t, sentinalErr, "failed converting output: %v", sentinalErr) + + actualMetrics := []telegraf.Metric{ + testutil.MustMetric(measurementReplicas, sentinelTags, sentinelFields, now), + } + + testutil.RequireMetricsEqual(t, expectedMetrics, actualMetrics) +} + +func TestRedisSentinelInfoAll(t *testing.T) { + now := time.Now() + + globalTags := map[string]string{ + "port": "6379", + "source": "redis.io", + } + + expectedTags := map[string]string{ + "port": "6379", + "source": "redis.io", + } + + expectedFields := map[string]interface{}{ + "lru_clock": int64(15585808), + "uptime_ns": int64(901000000000), + "redis_version": "5.0.5", + + "clients": int64(2), + "client_recent_max_input_buffer": int64(2), + "client_recent_max_output_buffer": int64(0), + "blocked_clients": int64(0), + + "used_cpu_sys": float64(0.786872), + "used_cpu_user": float64(0.939455), + "used_cpu_sys_children": float64(0.000000), + "used_cpu_user_children": float64(0.000000), + + "total_connections_received": int64(2), + "total_commands_processed": int64(6), + "instantaneous_ops_per_sec": int64(0), + "total_net_input_bytes": int64(124), + "total_net_output_bytes": int64(10148), + "instantaneous_input_kbps": float64(0.00), + "instantaneous_output_kbps": float64(0.00), + "rejected_connections": int64(0), + "sync_full": int64(0), + "sync_partial_ok": int64(0), + "sync_partial_err": int64(0), + "expired_keys": int64(0), + "expired_stale_perc": float64(0.00), + "expired_time_cap_reached_count": int64(0), + "evicted_keys": int64(0), + "keyspace_hits": int64(0), + "keyspace_misses": int64(0), + "pubsub_channels": int64(0), + "pubsub_patterns": int64(0), + "latest_fork_usec": int64(0), + "migrate_cached_sockets": int64(0), + "slave_expires_tracked_keys": int64(0), + "active_defrag_hits": int64(0), + "active_defrag_misses": int64(0), + "active_defrag_key_hits": int64(0), + "active_defrag_key_misses": int64(0), + + "sentinel_masters": int64(2), + "sentinel_running_scripts": int64(0), + "sentinel_scripts_queue_length": int64(0), + "sentinel_simulate_failure_flags": int64(0), + "sentinel_tilt": int64(0), + } + + expectedMetrics := []telegraf.Metric{ + testutil.MustMetric(measurementSentinel, expectedTags, expectedFields, now), + } + + sentinelInfoResponse, err := os.ReadFile("testdata/sentinel.info.response") + require.NoErrorf(t, err, "could not init fixture: %v", err) + + rdr := bufio.NewReader(bytes.NewReader(sentinelInfoResponse)) + + sentinelTags, sentinelFields, sentinalErr := convertSentinelInfoOutput(globalTags, rdr) + require.NoErrorf(t, sentinalErr, "failed converting output: %v", sentinalErr) + + actualMetrics := []telegraf.Metric{ + testutil.MustMetric(measurementSentinel, sentinelTags, sentinelFields, now), + } + + testutil.RequireMetricsEqual(t, expectedMetrics, actualMetrics) +} diff --git a/plugins/inputs/redis_sentinel/redis_sentinel_types.go b/plugins/inputs/redis_sentinel/redis_sentinel_types.go new file mode 100644 index 0000000000000..1f626c712bbbe --- /dev/null +++ b/plugins/inputs/redis_sentinel/redis_sentinel_types.go @@ -0,0 +1,113 @@ +package redis_sentinel + +type configFieldType int32 + +const ( + configFieldTypeInteger configFieldType = iota + configFieldTypeString + configFieldTypeFloat +) + +// Supported fields for "redis_sentinel_masters" +var measurementMastersFields = map[string]configFieldType{ + "config_epoch": configFieldTypeInteger, + "down_after_milliseconds": configFieldTypeInteger, + "failover_timeout": configFieldTypeInteger, + "flags": configFieldTypeString, + "info_refresh": configFieldTypeInteger, + "ip": configFieldTypeString, + "last_ok_ping_reply": configFieldTypeInteger, + "last_ping_reply": configFieldTypeInteger, + "last_ping_sent": configFieldTypeInteger, + "link_pending_commands": configFieldTypeInteger, + "link_refcount": configFieldTypeInteger, + "num_other_sentinels": configFieldTypeInteger, + "num_slaves": configFieldTypeInteger, + "parallel_syncs": configFieldTypeInteger, + "port": configFieldTypeInteger, + "quorum": configFieldTypeInteger, + "role_reported": configFieldTypeString, + "role_reported_time": configFieldTypeInteger, +} + +// Supported fields for "redis_sentinel" +var measurementSentinelFields = map[string]configFieldType{ + "active_defrag_hits": configFieldTypeInteger, + "active_defrag_key_hits": configFieldTypeInteger, + "active_defrag_key_misses": configFieldTypeInteger, + "active_defrag_misses": configFieldTypeInteger, + "blocked_clients": configFieldTypeInteger, + "client_recent_max_input_buffer": configFieldTypeInteger, + "client_recent_max_output_buffer": configFieldTypeInteger, + "connected_clients": configFieldTypeInteger, // Renamed to "clients" + "evicted_keys": configFieldTypeInteger, + "expired_keys": configFieldTypeInteger, + "expired_stale_perc": configFieldTypeFloat, + "expired_time_cap_reached_count": configFieldTypeInteger, + "instantaneous_input_kbps": configFieldTypeFloat, + "instantaneous_ops_per_sec": configFieldTypeInteger, + "instantaneous_output_kbps": configFieldTypeFloat, + "keyspace_hits": configFieldTypeInteger, + "keyspace_misses": configFieldTypeInteger, + "latest_fork_usec": configFieldTypeInteger, + "lru_clock": configFieldTypeInteger, + "migrate_cached_sockets": configFieldTypeInteger, + "pubsub_channels": configFieldTypeInteger, + "pubsub_patterns": configFieldTypeInteger, + "redis_version": configFieldTypeString, + "rejected_connections": configFieldTypeInteger, + "sentinel_masters": configFieldTypeInteger, + "sentinel_running_scripts": configFieldTypeInteger, + "sentinel_scripts_queue_length": configFieldTypeInteger, + "sentinel_simulate_failure_flags": configFieldTypeInteger, + "sentinel_tilt": configFieldTypeInteger, + "slave_expires_tracked_keys": configFieldTypeInteger, + "sync_full": configFieldTypeInteger, + "sync_partial_err": configFieldTypeInteger, + "sync_partial_ok": configFieldTypeInteger, + "total_commands_processed": configFieldTypeInteger, + "total_connections_received": configFieldTypeInteger, + "total_net_input_bytes": configFieldTypeInteger, + "total_net_output_bytes": configFieldTypeInteger, + "uptime_in_seconds": configFieldTypeInteger, // Renamed to "uptime_ns" + "used_cpu_sys": configFieldTypeFloat, + "used_cpu_sys_children": configFieldTypeFloat, + "used_cpu_user": configFieldTypeFloat, + "used_cpu_user_children": configFieldTypeFloat, +} + +// Supported fields for "redis_sentinel_sentinels" +var measurementSentinelsFields = map[string]configFieldType{ + "down_after_milliseconds": configFieldTypeInteger, + "flags": configFieldTypeString, + "last_hello_message": configFieldTypeInteger, + "last_ok_ping_reply": configFieldTypeInteger, + "last_ping_reply": configFieldTypeInteger, + "last_ping_sent": configFieldTypeInteger, + "link_pending_commands": configFieldTypeInteger, + "link_refcount": configFieldTypeInteger, + "name": configFieldTypeString, + "voted_leader": configFieldTypeString, + "voted_leader_epoch": configFieldTypeInteger, +} + +// Supported fields for "redis_sentinel_replicas" +var measurementReplicasFields = map[string]configFieldType{ + "down_after_milliseconds": configFieldTypeInteger, + "flags": configFieldTypeString, + "info_refresh": configFieldTypeInteger, + "last_ok_ping_reply": configFieldTypeInteger, + "last_ping_reply": configFieldTypeInteger, + "last_ping_sent": configFieldTypeInteger, + "link_pending_commands": configFieldTypeInteger, + "link_refcount": configFieldTypeInteger, + "master_host": configFieldTypeString, + "master_link_down_time": configFieldTypeInteger, + "master_link_status": configFieldTypeString, + "master_port": configFieldTypeInteger, + "name": configFieldTypeString, + "role_reported": configFieldTypeString, + "role_reported_time": configFieldTypeInteger, + "slave_priority": configFieldTypeInteger, + "slave_repl_offset": configFieldTypeInteger, +} diff --git a/plugins/inputs/redis_sentinel/sample.conf b/plugins/inputs/redis_sentinel/sample.conf new file mode 100644 index 0000000000000..ae8f89aedaf41 --- /dev/null +++ b/plugins/inputs/redis_sentinel/sample.conf @@ -0,0 +1,19 @@ +# Read metrics from one or many redis-sentinel servers +[[inputs.redis_sentinel]] + ## specify servers via a url matching: + ## [protocol://][:password]@address[:port] + ## e.g. + ## tcp://localhost:26379 + ## tcp://:password@192.168.99.100 + ## unix:///var/run/redis-sentinel.sock + ## + ## If no servers are specified, then localhost is used as the host. + ## If no port is specified, 26379 is used + # servers = ["tcp://localhost:26379"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = true diff --git a/plugins/inputs/redis_sentinel/testdata/sentinel.info.response b/plugins/inputs/redis_sentinel/testdata/sentinel.info.response new file mode 100644 index 0000000000000..6915d01dae1f3 --- /dev/null +++ b/plugins/inputs/redis_sentinel/testdata/sentinel.info.response @@ -0,0 +1,71 @@ +# Server +redis_version:5.0.5 +redis_git_sha1:00000000 +redis_git_dirty:0 +redis_build_id:78473e0efb96880a +redis_mode:sentinel +os:Linux 5.1.3-arch1-1-ARCH x86_64 +arch_bits:64 +multiplexing_api:epoll +atomicvar_api:atomic-builtin +gcc_version:8.3.0 +process_id:2837 +run_id:ecbbb2ca0035a532b03748fbec9f3f8ca1967536 +tcp_port:26379 +uptime_in_seconds:901 +uptime_in_days:0 +hz:10 +configured_hz:10 +lru_clock:15585808 +executable:/home/adam/redis-sentinel +config_file:/home/adam/rs1.conf + +# Clients +connected_clients:2 +client_recent_max_input_buffer:2 +client_recent_max_output_buffer:0 +blocked_clients:0 + +# CPU +used_cpu_sys:0.786872 +used_cpu_user:0.939455 +used_cpu_sys_children:0.000000 +used_cpu_user_children:0.000000 + +# Stats +total_connections_received:2 +total_commands_processed:6 +instantaneous_ops_per_sec:0 +total_net_input_bytes:124 +total_net_output_bytes:10148 +instantaneous_input_kbps:0.00 +instantaneous_output_kbps:0.00 +rejected_connections:0 +sync_full:0 +sync_partial_ok:0 +sync_partial_err:0 +expired_keys:0 +expired_stale_perc:0.00 +expired_time_cap_reached_count:0 +evicted_keys:0 +keyspace_hits:0 +keyspace_misses:0 +pubsub_channels:0 +pubsub_patterns:0 +latest_fork_usec:0 +migrate_cached_sockets:0 +slave_expires_tracked_keys:0 +active_defrag_hits:0 +active_defrag_misses:0 +active_defrag_key_hits:0 +active_defrag_key_misses:0 + +# Sentinel +sentinel_masters:2 +sentinel_tilt:0 +sentinel_running_scripts:0 +sentinel_scripts_queue_length:0 +sentinel_simulate_failure_flags:0 +master0:name=myothermaster,status=ok,address=127.0.0.1:6380,slaves=1,sentinels=2 +master0:name=myothermaster,status=ok,address=127.0.0.1:6381,slaves=1,sentinels=2 +master1:name=mymaster,status=ok,address=127.0.0.1:6379,slaves=1,sentinels=1 diff --git a/plugins/inputs/rethinkdb/README.md b/plugins/inputs/rethinkdb/README.md index b1946644ea13a..3a03344b377a6 100644 --- a/plugins/inputs/rethinkdb/README.md +++ b/plugins/inputs/rethinkdb/README.md @@ -2,12 +2,10 @@ Collect metrics from [RethinkDB](https://www.rethinkdb.com/). -### Configuration +## Configuration -This section contains the default TOML to configure the plugin. You can -generate it using `telegraf --usage rethinkdb`. - -```toml +```toml @sample.conf +# Read metrics from one or many RethinkDB servers [[inputs.rethinkdb]] ## An array of URI to gather stats about. Specify an ip or hostname ## with optional port add password. ie, @@ -25,7 +23,7 @@ generate it using `telegraf --usage rethinkdb`. # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] ``` -### Metrics +## Metrics - rethinkdb - tags: @@ -44,7 +42,7 @@ generate it using `telegraf --usage rethinkdb`. - disk_usage_metadata_bytes (integer, bytes) - disk_usage_preallocated_bytes (integer, bytes) -+ rethinkdb_engine +- rethinkdb_engine - tags: - type - ns diff --git a/plugins/inputs/rethinkdb/rethinkdb.go b/plugins/inputs/rethinkdb/rethinkdb.go index dc6b03620b153..6c881f99ce2bb 100644 --- a/plugins/inputs/rethinkdb/rethinkdb.go +++ b/plugins/inputs/rethinkdb/rethinkdb.go @@ -1,53 +1,37 @@ +//go:generate ../../../tools/readme_config_includer/generator package rethinkdb import ( + _ "embed" "fmt" "net/url" "sync" + "gopkg.in/gorethink/gorethink.v3" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - - "gopkg.in/gorethink/gorethink.v3" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type RethinkDB struct { Servers []string } -var sampleConfig = ` - ## An array of URI to gather stats about. Specify an ip or hostname - ## with optional port add password. ie, - ## rethinkdb://user:auth_key@10.10.3.30:28105, - ## rethinkdb://10.10.3.33:18832, - ## 10.0.0.1:10000, etc. - servers = ["127.0.0.1:28015"] - ## - ## If you use actual rethinkdb of > 2.3.0 with username/password authorization, - ## protocol have to be named "rethinkdb2" - it will use 1_0 H. - # servers = ["rethinkdb2://username:password@127.0.0.1:28015"] - ## - ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol - ## have to be named "rethinkdb". - # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] -` - -func (r *RethinkDB) SampleConfig() string { - return sampleConfig -} +var localhost = &Server{URL: &url.URL{Host: "127.0.0.1:28015"}} -func (r *RethinkDB) Description() string { - return "Read metrics from one or many RethinkDB servers" +func (*RethinkDB) SampleConfig() string { + return sampleConfig } -var localhost = &Server{Url: &url.URL{Host: "127.0.0.1:28015"}} - // Reads stats from all configured servers accumulates stats. // Returns one of the errors encountered while gather stats (if any). func (r *RethinkDB) Gather(acc telegraf.Accumulator) error { if len(r.Servers) == 0 { - r.gatherServer(localhost, acc) - return nil + return r.gatherServer(localhost, acc) } var wg sync.WaitGroup @@ -55,17 +39,17 @@ func (r *RethinkDB) Gather(acc telegraf.Accumulator) error { for _, serv := range r.Servers { u, err := url.Parse(serv) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse to address '%s': %s", serv, err)) + acc.AddError(fmt.Errorf("unable to parse to address '%s': %s", serv, err)) continue } else if u.Scheme == "" { // fallback to simple string based address (i.e. "10.0.0.1:10000") u.Host = serv } wg.Add(1) - go func(serv string) { + go func() { defer wg.Done() - acc.AddError(r.gatherServer(&Server{Url: u}, acc)) - }(serv) + acc.AddError(r.gatherServer(&Server{URL: u}, acc)) + }() } wg.Wait() @@ -76,20 +60,20 @@ func (r *RethinkDB) Gather(acc telegraf.Accumulator) error { func (r *RethinkDB) gatherServer(server *Server, acc telegraf.Accumulator) error { var err error connectOpts := gorethink.ConnectOpts{ - Address: server.Url.Host, + Address: server.URL.Host, DiscoverHosts: false, } - if server.Url.User != nil { - pwd, set := server.Url.User.Password() + if server.URL.User != nil { + pwd, set := server.URL.User.Password() if set && pwd != "" { connectOpts.AuthKey = pwd connectOpts.HandshakeVersion = gorethink.HandshakeV0_4 } } - if server.Url.Scheme == "rethinkdb2" && server.Url.User != nil { - pwd, set := server.Url.User.Password() + if server.URL.Scheme == "rethinkdb2" && server.URL.User != nil { + pwd, set := server.URL.User.Password() if set && pwd != "" { - connectOpts.Username = server.Url.User.Username() + connectOpts.Username = server.URL.User.Username() connectOpts.Password = pwd connectOpts.HandshakeVersion = gorethink.HandshakeV1_0 } @@ -97,7 +81,7 @@ func (r *RethinkDB) gatherServer(server *Server, acc telegraf.Accumulator) error server.session, err = gorethink.Connect(connectOpts) if err != nil { - return fmt.Errorf("Unable to connect to RethinkDB, %s\n", err.Error()) + return fmt.Errorf("unable to connect to RethinkDB, %s", err.Error()) } defer server.session.Close() diff --git a/plugins/inputs/rethinkdb/rethinkdb_data.go b/plugins/inputs/rethinkdb/rethinkdb_data.go index ca4ac75523455..159f6af9d992b 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_data.go +++ b/plugins/inputs/rethinkdb/rethinkdb_data.go @@ -8,7 +8,7 @@ import ( ) type serverStatus struct { - Id string `gorethink:"id"` + ID string `gorethink:"id"` Network struct { Addresses []Address `gorethink:"canonical_addresses"` Hostname string `gorethink:"hostname"` @@ -41,7 +41,7 @@ type Engine struct { } type tableStatus struct { - Id string `gorethink:"id"` + ID string `gorethink:"id"` DB string `gorethink:"db"` Name string `gorethink:"name"` } diff --git a/plugins/inputs/rethinkdb/rethinkdb_data_test.go b/plugins/inputs/rethinkdb/rethinkdb_data_test.go index ce1d963b973fc..2f9c90f1e9e7c 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_data_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_data_test.go @@ -3,8 +3,9 @@ package rethinkdb import ( "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) var tags = make(map[string]string) @@ -36,7 +37,7 @@ func TestAddEngineStats(t *testing.T) { engine.AddEngineStats(keys, &acc, tags) for _, metric := range keys { - assert.True(t, acc.HasInt64Field("rethinkdb_engine", metric)) + require.True(t, acc.HasInt64Field("rethinkdb_engine", metric)) } } @@ -59,15 +60,15 @@ func TestAddEngineStatsPartial(t *testing.T) { "written_docs_per_sec", } - missing_keys := []string{ + missingKeys := []string{ "total_queries", "total_reads", "total_writes", } engine.AddEngineStats(keys, &acc, tags) - for _, metric := range missing_keys { - assert.False(t, acc.HasInt64Field("rethinkdb", metric)) + for _, metric := range missingKeys { + require.False(t, acc.HasInt64Field("rethinkdb", metric)) } } @@ -107,6 +108,6 @@ func TestAddStorageStats(t *testing.T) { storage.AddStats(&acc, tags) for _, metric := range keys { - assert.True(t, acc.HasInt64Field("rethinkdb", metric)) + require.True(t, acc.HasInt64Field("rethinkdb", metric)) } } diff --git a/plugins/inputs/rethinkdb/rethinkdb_server.go b/plugins/inputs/rethinkdb/rethinkdb_server.go index c10605aa6d83e..553deddcb0219 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_server.go +++ b/plugins/inputs/rethinkdb/rethinkdb_server.go @@ -9,37 +9,36 @@ import ( "strconv" "strings" - "github.com/influxdata/telegraf" - "gopkg.in/gorethink/gorethink.v3" + + "github.com/influxdata/telegraf" ) type Server struct { - Url *url.URL + URL *url.URL session *gorethink.Session serverStatus serverStatus } func (s *Server) gatherData(acc telegraf.Accumulator) error { if err := s.getServerStatus(); err != nil { - return fmt.Errorf("Failed to get server_status, %s\n", err) + return fmt.Errorf("failed to get server_status, %s", err) } if err := s.validateVersion(); err != nil { - return fmt.Errorf("Failed version validation, %s\n", err.Error()) + return fmt.Errorf("failed version validation, %s", err.Error()) } if err := s.addClusterStats(acc); err != nil { - fmt.Printf("error adding cluster stats, %s\n", err.Error()) - return fmt.Errorf("Error adding cluster stats, %s\n", err.Error()) + return fmt.Errorf("error adding cluster stats, %s", err.Error()) } if err := s.addMemberStats(acc); err != nil { - return fmt.Errorf("Error adding member stats, %s\n", err.Error()) + return fmt.Errorf("error adding member stats, %s", err.Error()) } - if err := s.addTableStats(acc); err != nil { - return fmt.Errorf("Error adding table stats, %s\n", err.Error()) + if err := s.addTablesStats(acc); err != nil { + return fmt.Errorf("error adding table stats, %s", err.Error()) } return nil @@ -50,7 +49,7 @@ func (s *Server) validateVersion() error { return errors.New("could not determine the RethinkDB server version: process.version key missing") } - versionRegexp := regexp.MustCompile("\\d.\\d.\\d") + versionRegexp := regexp.MustCompile(`\d.\d.\d`) versionString := versionRegexp.FindString(s.serverStatus.Process.Version) if versionString == "" { return fmt.Errorf("could not determine the RethinkDB server version: malformed version string (%v)", s.serverStatus.Process.Version) @@ -58,7 +57,7 @@ func (s *Server) validateVersion() error { majorVersion, err := strconv.Atoi(strings.Split(versionString, "")[0]) if err != nil || majorVersion < 2 { - return fmt.Errorf("unsupported major version %s\n", versionString) + return fmt.Errorf("unsupported major version %s", versionString) } return nil } @@ -78,9 +77,9 @@ func (s *Server) getServerStatus() error { if err != nil { return errors.New("could not parse server_status results") } - host, port, err := net.SplitHostPort(s.Url.Host) + host, port, err := net.SplitHostPort(s.URL.Host) if err != nil { - return fmt.Errorf("unable to determine provided hostname from %s\n", s.Url.Host) + return fmt.Errorf("unable to determine provided hostname from %s", s.URL.Host) } driverPort, _ := strconv.Atoi(port) for _, ss := range serverStatuses { @@ -92,12 +91,12 @@ func (s *Server) getServerStatus() error { } } - return fmt.Errorf("unable to determine host id from server_status with %s", s.Url.Host) + return fmt.Errorf("unable to determine host id from server_status with %s", s.URL.Host) } func (s *Server) getDefaultTags() map[string]string { tags := make(map[string]string) - tags["rethinkdb_host"] = s.Url.Host + tags["rethinkdb_host"] = s.URL.Host tags["rethinkdb_hostname"] = s.serverStatus.Network.Hostname return tags } @@ -113,12 +112,12 @@ var ClusterTracking = []string{ func (s *Server) addClusterStats(acc telegraf.Accumulator) error { cursor, err := gorethink.DB("rethinkdb").Table("stats").Get([]string{"cluster"}).Run(s.session) if err != nil { - return fmt.Errorf("cluster stats query error, %s\n", err.Error()) + return fmt.Errorf("cluster stats query error, %s", err.Error()) } defer cursor.Close() var clusterStats stats if err := cursor.One(&clusterStats); err != nil { - return fmt.Errorf("failure to parse cluster stats, %s\n", err.Error()) + return fmt.Errorf("failure to parse cluster stats, %s", err.Error()) } tags := s.getDefaultTags() @@ -139,14 +138,14 @@ var MemberTracking = []string{ } func (s *Server) addMemberStats(acc telegraf.Accumulator) error { - cursor, err := gorethink.DB("rethinkdb").Table("stats").Get([]string{"server", s.serverStatus.Id}).Run(s.session) + cursor, err := gorethink.DB("rethinkdb").Table("stats").Get([]string{"server", s.serverStatus.ID}).Run(s.session) if err != nil { - return fmt.Errorf("member stats query error, %s\n", err.Error()) + return fmt.Errorf("member stats query error, %s", err.Error()) } defer cursor.Close() var memberStats stats if err := cursor.One(&memberStats); err != nil { - return fmt.Errorf("failure to parse member stats, %s\n", err.Error()) + return fmt.Errorf("failure to parse member stats, %s", err.Error()) } tags := s.getDefaultTags() @@ -162,10 +161,10 @@ var TableTracking = []string{ "total_writes", } -func (s *Server) addTableStats(acc telegraf.Accumulator) error { +func (s *Server) addTablesStats(acc telegraf.Accumulator) error { tablesCursor, err := gorethink.DB("rethinkdb").Table("table_status").Run(s.session) if err != nil { - return fmt.Errorf("table stats query error, %s\n", err.Error()) + return fmt.Errorf("table stats query error, %s", err.Error()) } defer tablesCursor.Close() @@ -175,23 +174,33 @@ func (s *Server) addTableStats(acc telegraf.Accumulator) error { return errors.New("could not parse table_status results") } for _, table := range tables { - cursor, err := gorethink.DB("rethinkdb").Table("stats"). - Get([]string{"table_server", table.Id, s.serverStatus.Id}). - Run(s.session) + err = s.addTableStats(acc, table) if err != nil { - return fmt.Errorf("table stats query error, %s\n", err.Error()) - } - defer cursor.Close() - var ts tableStats - if err := cursor.One(&ts); err != nil { - return fmt.Errorf("failure to parse table stats, %s\n", err.Error()) + return err } + } + return nil +} - tags := s.getDefaultTags() - tags["type"] = "data" - tags["ns"] = fmt.Sprintf("%s.%s", table.DB, table.Name) - ts.Engine.AddEngineStats(TableTracking, acc, tags) - ts.Storage.AddStats(acc, tags) +func (s *Server) addTableStats(acc telegraf.Accumulator, table tableStatus) error { + cursor, err := gorethink.DB("rethinkdb").Table("stats"). + Get([]string{"table_server", table.ID, s.serverStatus.ID}). + Run(s.session) + if err != nil { + return fmt.Errorf("table stats query error, %s", err.Error()) + } + defer cursor.Close() + + var ts tableStats + if err := cursor.One(&ts); err != nil { + return fmt.Errorf("failure to parse table stats, %s", err.Error()) } + + tags := s.getDefaultTags() + tags["type"] = "data" + tags["ns"] = fmt.Sprintf("%s.%s", table.DB, table.Name) + ts.Engine.AddEngineStats(TableTracking, acc, tags) + ts.Storage.AddStats(acc, tags) + return nil } diff --git a/plugins/inputs/rethinkdb/rethinkdb_server_test.go b/plugins/inputs/rethinkdb/rethinkdb_server_test.go index 82ff292804a8c..0584dcc90c33b 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_server_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_server_test.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package rethinkdb @@ -5,9 +6,9 @@ package rethinkdb import ( "testing" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) func TestValidateVersion(t *testing.T) { @@ -38,7 +39,7 @@ func TestAddClusterStats(t *testing.T) { require.NoError(t, err) for _, metric := range ClusterTracking { - assert.True(t, acc.HasIntValue(metric)) + require.True(t, acc.HasIntValue(metric)) } } @@ -49,7 +50,7 @@ func TestAddMemberStats(t *testing.T) { require.NoError(t, err) for _, metric := range MemberTracking { - assert.True(t, acc.HasIntValue(metric)) + require.True(t, acc.HasIntValue(metric)) } } @@ -60,7 +61,7 @@ func TestAddTableStats(t *testing.T) { require.NoError(t, err) for _, metric := range TableTracking { - assert.True(t, acc.HasIntValue(metric)) + require.True(t, acc.HasIntValue(metric)) } keys := []string{ @@ -76,6 +77,6 @@ func TestAddTableStats(t *testing.T) { } for _, metric := range keys { - assert.True(t, acc.HasIntValue(metric)) + require.True(t, acc.HasIntValue(metric)) } } diff --git a/plugins/inputs/rethinkdb/rethinkdb_test.go b/plugins/inputs/rethinkdb/rethinkdb_test.go index fa2cc92f2b06c..651042ab13783 100644 --- a/plugins/inputs/rethinkdb/rethinkdb_test.go +++ b/plugins/inputs/rethinkdb/rethinkdb_test.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package rethinkdb @@ -28,18 +29,18 @@ func init() { func testSetup(m *testing.M) { var err error - server = &Server{Url: &url.URL{Host: connect_url}} + server = &Server{URL: &url.URL{Host: connect_url}} if authKey { server.session, _ = gorethink.Connect(gorethink.ConnectOpts{ - Address: server.Url.Host, + Address: server.URL.Host, AuthKey: authKey, HandshakeVersion: gorethink.HandshakeV0_4, DiscoverHosts: false, }) } else { server.session, _ = gorethink.Connect(gorethink.ConnectOpts{ - Address: server.Url.Host, + Address: server.URL.Host, Username: username, Password: password, HandshakeVersion: gorethink.HandshakeV1_0, diff --git a/plugins/inputs/rethinkdb/sample.conf b/plugins/inputs/rethinkdb/sample.conf new file mode 100644 index 0000000000000..3d6df1963a9ea --- /dev/null +++ b/plugins/inputs/rethinkdb/sample.conf @@ -0,0 +1,16 @@ +# Read metrics from one or many RethinkDB servers +[[inputs.rethinkdb]] + ## An array of URI to gather stats about. Specify an ip or hostname + ## with optional port add password. ie, + ## rethinkdb://user:auth_key@10.10.3.30:28105, + ## rethinkdb://10.10.3.33:18832, + ## 10.0.0.1:10000, etc. + servers = ["127.0.0.1:28015"] + + ## If you use actual rethinkdb of > 2.3.0 with username/password authorization, + ## protocol have to be named "rethinkdb2" - it will use 1_0 H. + # servers = ["rethinkdb2://username:password@127.0.0.1:28015"] + + ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol + ## have to be named "rethinkdb". + # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] diff --git a/plugins/inputs/riak/README.md b/plugins/inputs/riak/README.md index a435eea4d7f63..9ee49496305e1 100644 --- a/plugins/inputs/riak/README.md +++ b/plugins/inputs/riak/README.md @@ -2,16 +2,16 @@ The Riak plugin gathers metrics from one or more riak instances. -### Configuration: +## Configuration -```toml -# Description +```toml @sample.conf +# Read metrics one or many Riak servers [[inputs.riak]] # Specify a list of one or more riak http servers servers = ["http://localhost:8098"] ``` -### Measurements & Fields: +## Metrics Riak provides one measurement named "riak", with the following fields: @@ -61,18 +61,19 @@ Riak provides one measurement named "riak", with the following fields: - read_repairs - read_repairs_total -Measurements of time (such as node_get_fsm_time_mean) are measured in nanoseconds. +Measurements of time (such as node_get_fsm_time_mean) are measured in +nanoseconds. -### Tags: +### Tags All measurements have the following tags: - server (the host:port of the given server address, ex. `127.0.0.1:8087`) - nodename (the internal node name received, ex. `riak@127.0.0.1`) -### Example Output: +## Example Output -``` +```shell $ ./telegraf --config telegraf.conf --input-filter riak --test > riak,nodename=riak@127.0.0.1,server=localhost:8098 cpu_avg1=31i,cpu_avg15=69i,cpu_avg5=51i,memory_code=11563738i,memory_ets=5925872i,memory_processes=30236069i,memory_system=93074971i,memory_total=123311040i,node_get_fsm_objsize_100=0i,node_get_fsm_objsize_95=0i,node_get_fsm_objsize_99=0i,node_get_fsm_objsize_mean=0i,node_get_fsm_objsize_median=0i,node_get_fsm_siblings_100=0i,node_get_fsm_siblings_95=0i,node_get_fsm_siblings_99=0i,node_get_fsm_siblings_mean=0i,node_get_fsm_siblings_median=0i,node_get_fsm_time_100=0i,node_get_fsm_time_95=0i,node_get_fsm_time_99=0i,node_get_fsm_time_mean=0i,node_get_fsm_time_median=0i,node_gets=0i,node_gets_total=19i,node_put_fsm_time_100=0i,node_put_fsm_time_95=0i,node_put_fsm_time_99=0i,node_put_fsm_time_mean=0i,node_put_fsm_time_median=0i,node_puts=0i,node_puts_total=0i,pbc_active=0i,pbc_connects=0i,pbc_connects_total=20i,vnode_gets=0i,vnode_gets_total=57i,vnode_index_reads=0i,vnode_index_reads_total=0i,vnode_index_writes=0i,vnode_index_writes_total=0i,vnode_puts=0i,vnode_puts_total=0i,read_repair=0i,read_repairs_total=0i 1455913392622482332 ``` diff --git a/plugins/inputs/riak/riak.go b/plugins/inputs/riak/riak.go index 19f6222890360..8d9552057eb8b 100644 --- a/plugins/inputs/riak/riak.go +++ b/plugins/inputs/riak/riak.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package riak import ( + _ "embed" "encoding/json" "fmt" "net/http" @@ -11,6 +13,10 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // Type Riak gathers statistics from one or more Riak instances type Riak struct { // Servers is a slice of servers as http addresses (ex. http://127.0.0.1:8098) @@ -21,19 +27,19 @@ type Riak struct { // NewRiak return a new instance of Riak with a default http client func NewRiak() *Riak { - tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)} + tr := &http.Transport{ResponseHeaderTimeout: 3 * time.Second} client := &http.Client{ Transport: tr, - Timeout: time.Duration(4 * time.Second), + Timeout: 4 * time.Second, } return &Riak{client: client} } // Type riakStats represents the data that is received from Riak type riakStats struct { - CpuAvg1 int64 `json:"cpu_avg1"` - CpuAvg15 int64 `json:"cpu_avg15"` - CpuAvg5 int64 `json:"cpu_avg5"` + CPUAvg1 int64 `json:"cpu_avg1"` + CPUAvg15 int64 `json:"cpu_avg15"` + CPUAvg5 int64 `json:"cpu_avg5"` MemoryCode int64 `json:"memory_code"` MemoryEts int64 `json:"memory_ets"` MemoryProcesses int64 `json:"memory_processes"` @@ -79,22 +85,10 @@ type riakStats struct { ReadRepairsTotal int64 `json:"read_repairs_total"` } -// A sample configuration to only gather stats from localhost, default port. -const sampleConfig = ` - # Specify a list of one or more riak http servers - servers = ["http://localhost:8098"] -` - -// Returns a sample configuration for the plugin -func (r *Riak) SampleConfig() string { +func (*Riak) SampleConfig() string { return sampleConfig } -// Returns a description of the plugin -func (r *Riak) Description() string { - return "Read metrics one or many Riak servers" -} - // Reads stats from all configured servers. func (r *Riak) Gather(acc telegraf.Accumulator) error { // Default to a single server at localhost (default port) if none specified @@ -144,9 +138,9 @@ func (r *Riak) gatherServer(s string, acc telegraf.Accumulator) error { // Build a map of field values fields := map[string]interface{}{ - "cpu_avg1": stats.CpuAvg1, - "cpu_avg15": stats.CpuAvg15, - "cpu_avg5": stats.CpuAvg5, + "cpu_avg1": stats.CPUAvg1, + "cpu_avg15": stats.CPUAvg15, + "cpu_avg5": stats.CPUAvg5, "memory_code": stats.MemoryCode, "memory_ets": stats.MemoryEts, "memory_processes": stats.MemoryProcesses, diff --git a/plugins/inputs/riak/riak_test.go b/plugins/inputs/riak/riak_test.go index 09f9a961f4d76..90688b17827b0 100644 --- a/plugins/inputs/riak/riak_test.go +++ b/plugins/inputs/riak/riak_test.go @@ -15,7 +15,8 @@ func TestRiak(t *testing.T) { // Create a test server with the const response JSON ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, response) + _, err := fmt.Fprintln(w, response) + require.NoError(t, err) })) defer ts.Close() @@ -31,8 +32,7 @@ func TestRiak(t *testing.T) { acc := &testutil.Accumulator{} // Gather data from the test server - err = riak.Gather(acc) - require.NoError(t, err) + require.NoError(t, riak.Gather(acc)) // Expect the correct values for all known keys expectFields := map[string]interface{}{ diff --git a/plugins/inputs/riak/sample.conf b/plugins/inputs/riak/sample.conf new file mode 100644 index 0000000000000..9e847a7e16aa9 --- /dev/null +++ b/plugins/inputs/riak/sample.conf @@ -0,0 +1,4 @@ +# Read metrics one or many Riak servers +[[inputs.riak]] + # Specify a list of one or more riak http servers + servers = ["http://localhost:8098"] diff --git a/plugins/inputs/riemann_listener/README.md b/plugins/inputs/riemann_listener/README.md new file mode 100644 index 0000000000000..78b02c1689e53 --- /dev/null +++ b/plugins/inputs/riemann_listener/README.md @@ -0,0 +1,43 @@ +# Riemann Listener Input Plugin + +The Riemann Listener is a simple input plugin that listens for messages from +client that use riemann clients using riemann-protobuff format. + +## Configuration + +```toml @sample.conf +# Riemann protobuff listener +[[inputs.riemann_listener]] + ## URL to listen on + ## Default is "tcp://:5555" + # service_address = "tcp://:8094" + # service_address = "tcp://127.0.0.1:http" + # service_address = "tcp4://:8094" + # service_address = "tcp6://:8094" + # service_address = "tcp6://[2001:db8::1]:8094" + + ## Maximum number of concurrent connections. + ## 0 (default) is unlimited. + # max_connections = 1024 + ## Read timeout. + ## 0 (default) is unlimited. + # read_timeout = "30s" + ## Optional TLS configuration. + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Enables client authentication if set. + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + ## Maximum socket buffer size (in bytes when no unit specified). + # read_buffer_size = "64KiB" + ## Period between keep alive probes. + ## 0 disables keep alive probes. + ## Defaults to the OS configuration. + # keep_alive_period = "5m" +``` + +Just like Riemann the default port is 5555. This can be configured, refer +configuration above. + +Riemann `Service` is mapped as `measurement`. `metric` and `TTL` are converted +into field values. As Riemann tags as simply an array, they are converted into +the `influx_line` format key-value, where both key and value are the tags. diff --git a/plugins/inputs/riemann_listener/riemann_listener.go b/plugins/inputs/riemann_listener/riemann_listener.go new file mode 100644 index 0000000000000..f99e0976fd904 --- /dev/null +++ b/plugins/inputs/riemann_listener/riemann_listener.go @@ -0,0 +1,361 @@ +//go:generate ../../../tools/readme_config_includer/generator +package riemann_listener + +import ( + "bytes" + "context" + "crypto/tls" + _ "embed" + "encoding/binary" + "fmt" + "io" + "net" + "os" + "os/signal" + "strings" + "sync" + "time" + + riemanngo "github.com/riemann/riemann-go-client" + riemangoProto "github.com/riemann/riemann-go-client/proto" + "google.golang.org/protobuf/proto" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/metric" + tlsint "github.com/influxdata/telegraf/plugins/common/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +type RiemannSocketListener struct { + ServiceAddress string `toml:"service_address"` + MaxConnections int `toml:"max_connections"` + ReadBufferSize config.Size `toml:"read_buffer_size"` + ReadTimeout *config.Duration `toml:"read_timeout"` + KeepAlivePeriod *config.Duration `toml:"keep_alive_period"` + SocketMode string `toml:"socket_mode"` + tlsint.ServerConfig + + wg sync.WaitGroup + + Log telegraf.Logger `toml:"-"` + + telegraf.Accumulator +} +type setReadBufferer interface { + SetReadBuffer(sizeInBytes int) error +} + +type riemannListener struct { + net.Listener + *RiemannSocketListener + + sockType string + + connections map[string]net.Conn + connectionsMtx sync.Mutex +} + +func (rsl *riemannListener) listen(ctx context.Context) { + rsl.connections = map[string]net.Conn{} + + wg := sync.WaitGroup{} + + select { + case <-ctx.Done(): + rsl.closeAllConnections() + wg.Wait() + return + default: + for { + c, err := rsl.Accept() + if err != nil { + if !strings.HasSuffix(err.Error(), ": use of closed network connection") { + rsl.Log.Error(err.Error()) + } + break + } + + if rsl.ReadBufferSize > 0 { + if srb, ok := c.(setReadBufferer); ok { + if err := srb.SetReadBuffer(int(rsl.ReadBufferSize)); err != nil { + rsl.Log.Warnf("Setting read buffer failed: %v", err) + } + } else { + rsl.Log.Warnf("Unable to set read buffer on a %s socket", rsl.sockType) + } + } + + rsl.connectionsMtx.Lock() + if rsl.MaxConnections > 0 && len(rsl.connections) >= rsl.MaxConnections { + rsl.connectionsMtx.Unlock() + if err := c.Close(); err != nil { + rsl.Log.Warnf("Closing the connection failed: %v", err) + } + continue + } + rsl.connections[c.RemoteAddr().String()] = c + rsl.connectionsMtx.Unlock() + + if err := rsl.setKeepAlive(c); err != nil { + rsl.Log.Errorf("Unable to configure keep alive %q: %s", rsl.ServiceAddress, err.Error()) + } + + wg.Add(1) + go func() { + defer wg.Done() + rsl.read(c) + }() + } + rsl.closeAllConnections() + wg.Wait() + } +} + +func (rsl *riemannListener) closeAllConnections() { + rsl.connectionsMtx.Lock() + for _, c := range rsl.connections { + if err := c.Close(); err != nil { + rsl.Log.Warnf("Closing the connection failed: %v", err.Error()) + } + } + rsl.connectionsMtx.Unlock() +} + +func (rsl *riemannListener) setKeepAlive(c net.Conn) error { + if rsl.KeepAlivePeriod == nil { + return nil + } + tcpc, ok := c.(*net.TCPConn) + if !ok { + return fmt.Errorf("cannot set keep alive on a %s socket", strings.SplitN(rsl.ServiceAddress, "://", 2)[0]) + } + if *rsl.KeepAlivePeriod == 0 { + return tcpc.SetKeepAlive(false) + } + if err := tcpc.SetKeepAlive(true); err != nil { + return err + } + return tcpc.SetKeepAlivePeriod(time.Duration(*rsl.KeepAlivePeriod)) +} + +func (rsl *riemannListener) removeConnection(c net.Conn) { + rsl.connectionsMtx.Lock() + delete(rsl.connections, c.RemoteAddr().String()) + rsl.connectionsMtx.Unlock() +} + +//Utilities + +/* +readMessages will read Riemann messages in binary format +from the TCP connection. byte Array p size will depend on the size +of the riemann message as sent by the cleint +*/ +func readMessages(r io.Reader, p []byte) error { + for len(p) > 0 { + n, err := r.Read(p) + p = p[n:] + if err != nil { + return err + } + } + return nil +} + +func (rsl *riemannListener) read(conn net.Conn) { + defer rsl.removeConnection(conn) + defer conn.Close() + var err error + + for { + if rsl.ReadTimeout != nil && *rsl.ReadTimeout > 0 { + if err := conn.SetDeadline(time.Now().Add(time.Duration(*rsl.ReadTimeout))); err != nil { + rsl.Log.Warnf("Setting deadline failed: %v", err) + } + } + + messagePb := &riemangoProto.Msg{} + var header uint32 + // First obtain the size of the riemann event from client and acknowledge + if err = binary.Read(conn, binary.BigEndian, &header); err != nil { + if err.Error() != "EOF" { + rsl.Log.Debugf("Failed to read header") + rsl.riemannReturnErrorResponse(conn, err.Error()) + return + } + return + } + data := make([]byte, header) + + if err = readMessages(conn, data); err != nil { + rsl.Log.Debugf("Failed to read body: %s", err.Error()) + rsl.riemannReturnErrorResponse(conn, "Failed to read body") + return + } + if err = proto.Unmarshal(data, messagePb); err != nil { + rsl.Log.Debugf("Failed to unmarshal: %s", err.Error()) + rsl.riemannReturnErrorResponse(conn, "Failed to unmarshal") + return + } + riemannEvents := riemanngo.ProtocolBuffersToEvents(messagePb.Events) + + for _, m := range riemannEvents { + if m.Service == "" { + rsl.riemannReturnErrorResponse(conn, "No Service Name") + return + } + tags := make(map[string]string) + fieldValues := map[string]interface{}{} + for _, tag := range m.Tags { + tags[strings.ReplaceAll(tag, " ", "_")] = tag + } + tags["Host"] = m.Host + tags["Description"] = m.Description + tags["State"] = m.State + fieldValues["Metric"] = m.Metric + fieldValues["TTL"] = m.TTL.Seconds() + singleMetric := metric.New(m.Service, tags, fieldValues, m.Time, telegraf.Untyped) + rsl.AddMetric(singleMetric) + } + rsl.riemannReturnResponse(conn) + } +} + +func (rsl *riemannListener) riemannReturnResponse(conn net.Conn) { + t := true + message := new(riemangoProto.Msg) + message.Ok = &t + returnData, err := proto.Marshal(message) + if err != nil { + rsl.Log.Errorf("The error is: %v", err) + return + } + b := new(bytes.Buffer) + if err = binary.Write(b, binary.BigEndian, uint32(len(returnData))); err != nil { + rsl.Log.Errorf("The error is: %v", err) + } + // send the msg length + if _, err = conn.Write(b.Bytes()); err != nil { + rsl.Log.Errorf("The error is: %v", err) + } + if _, err = conn.Write(returnData); err != nil { + rsl.Log.Errorf("The error is: %v", err) + } +} + +func (rsl *riemannListener) riemannReturnErrorResponse(conn net.Conn, errorMessage string) { + t := false + message := new(riemangoProto.Msg) + message.Ok = &t + message.Error = &errorMessage + returnData, err := proto.Marshal(message) + if err != nil { + rsl.Log.Errorf("The error is: %v", err) + return + } + b := new(bytes.Buffer) + if err = binary.Write(b, binary.BigEndian, uint32(len(returnData))); err != nil { + rsl.Log.Errorf("The error is: %v", err) + } + // send the msg length + if _, err = conn.Write(b.Bytes()); err != nil { + rsl.Log.Errorf("The error is: %v", err) + } + if _, err = conn.Write(returnData); err != nil { + rsl.Log.Errorf("The error is: %v", err) + } +} + +func (*RiemannSocketListener) SampleConfig() string { + return sampleConfig +} + +func (rsl *RiemannSocketListener) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (rsl *RiemannSocketListener) Start(acc telegraf.Accumulator) error { + ctx, cancelFunc := context.WithCancel(context.Background()) + go rsl.processOsSignals(cancelFunc) + rsl.Accumulator = acc + if rsl.ServiceAddress == "" { + rsl.Log.Warnf("Using default service_address tcp://:5555") + rsl.ServiceAddress = "tcp://:5555" + } + spl := strings.SplitN(rsl.ServiceAddress, "://", 2) + if len(spl) != 2 { + return fmt.Errorf("invalid service address: %s", rsl.ServiceAddress) + } + + protocol := spl[0] + addr := spl[1] + + switch protocol { + case "tcp", "tcp4", "tcp6": + tlsCfg, err := rsl.ServerConfig.TLSConfig() + if err != nil { + return err + } + + var l net.Listener + if tlsCfg == nil { + l, err = net.Listen(protocol, addr) + } else { + l, err = tls.Listen(protocol, addr, tlsCfg) + } + if err != nil { + return err + } + + rsl.Log.Infof("Listening on %s://%s", protocol, l.Addr()) + + rsl := &riemannListener{ + Listener: l, + RiemannSocketListener: rsl, + sockType: spl[0], + } + + rsl.wg = sync.WaitGroup{} + rsl.wg.Add(1) + go func() { + defer rsl.wg.Done() + rsl.listen(ctx) + }() + default: + return fmt.Errorf("unknown protocol '%s' in '%s'", protocol, rsl.ServiceAddress) + } + + return nil +} + +// Handle cancellations from the process +func (rsl *RiemannSocketListener) processOsSignals(cancelFunc context.CancelFunc) { + signalChan := make(chan os.Signal, 1) + signal.Notify(signalChan, os.Interrupt) + for { + sig := <-signalChan + if sig == os.Interrupt { + rsl.Log.Warn("Signal SIGINT is received, probably due to `Ctrl-C`, exiting...") + cancelFunc() + return + } + } +} + +func (rsl *RiemannSocketListener) Stop() { + rsl.wg.Done() + rsl.wg.Wait() +} + +func newRiemannSocketListener() *RiemannSocketListener { + return &RiemannSocketListener{} +} + +func init() { + inputs.Add("riemann_listener", func() telegraf.Input { return newRiemannSocketListener() }) +} diff --git a/plugins/inputs/riemann_listener/riemann_listener_test.go b/plugins/inputs/riemann_listener/riemann_listener_test.go new file mode 100644 index 0000000000000..3f87944610312 --- /dev/null +++ b/plugins/inputs/riemann_listener/riemann_listener_test.go @@ -0,0 +1,53 @@ +package riemann_listener + +import ( + "log" + "testing" + "time" + + riemanngo "github.com/riemann/riemann-go-client" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" +) + +func TestSocketListener_tcp(t *testing.T) { + log.Println("Entering") + + sl := newRiemannSocketListener() + sl.Log = testutil.Logger{} + sl.ServiceAddress = "tcp://127.0.0.1:5555" + sl.ReadBufferSize = config.Size(1024) + + acc := &testutil.Accumulator{} + err := sl.Start(acc) + require.NoError(t, err) + defer sl.Stop() + + testStats(t) + testMissingService(t) +} + +func testStats(t *testing.T) { + c := riemanngo.NewTCPClient("127.0.0.1:5555", 5*time.Second) + err := c.Connect() + require.NoError(t, err) + defer c.Close() + result, err := riemanngo.SendEvent(c, &riemanngo.Event{ + Service: "hello", + }) + require.NoError(t, err) + require.Equal(t, result.GetOk(), true) +} + +func testMissingService(t *testing.T) { + c := riemanngo.NewTCPClient("127.0.0.1:5555", 5*time.Second) + err := c.Connect() + require.NoError(t, err) + defer c.Close() + result, err := riemanngo.SendEvent(c, &riemanngo.Event{}) + require.Equal(t, false, result.GetOk()) + require.Equal(t, "No Service Name", result.GetError()) + require.NoError(t, err) +} diff --git a/plugins/inputs/riemann_listener/sample.conf b/plugins/inputs/riemann_listener/sample.conf new file mode 100644 index 0000000000000..8c10ac06c64a8 --- /dev/null +++ b/plugins/inputs/riemann_listener/sample.conf @@ -0,0 +1,27 @@ +# Riemann protobuff listener +[[inputs.riemann_listener]] + ## URL to listen on + ## Default is "tcp://:5555" + # service_address = "tcp://:8094" + # service_address = "tcp://127.0.0.1:http" + # service_address = "tcp4://:8094" + # service_address = "tcp6://:8094" + # service_address = "tcp6://[2001:db8::1]:8094" + + ## Maximum number of concurrent connections. + ## 0 (default) is unlimited. + # max_connections = 1024 + ## Read timeout. + ## 0 (default) is unlimited. + # read_timeout = "30s" + ## Optional TLS configuration. + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Enables client authentication if set. + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + ## Maximum socket buffer size (in bytes when no unit specified). + # read_buffer_size = "64KiB" + ## Period between keep alive probes. + ## 0 disables keep alive probes. + ## Defaults to the OS configuration. + # keep_alive_period = "5m" diff --git a/plugins/inputs/salesforce/README.md b/plugins/inputs/salesforce/README.md index 6883f3a90b85f..6f904b829575c 100644 --- a/plugins/inputs/salesforce/README.md +++ b/plugins/inputs/salesforce/README.md @@ -1,25 +1,35 @@ # Salesforce Input Plugin -The Salesforce plugin gathers metrics about the limits in your Salesforce organization and the remaining usage. -It fetches its data from the [limits endpoint](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_limits.htm) of Salesforce's REST API. +The Salesforce plugin gathers metrics about the limits in your Salesforce +organization and the remaining usage. It fetches its data from the [limits +endpoint][limits] of Salesforce's REST API. -### Configuration: +[limits]: https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_limits.htm -```toml -# Gather Metrics about Salesforce limits and remaining usage +## Configuration + +```toml @sample.conf +# Read API usage and limits for a Salesforce organisation [[inputs.salesforce]] + ## specify your credentials + ## username = "your_username" password = "your_password" - ## (Optional) security token - security_token = "your_security_token" - ## (Optional) environment type (sandbox or production) + ## + ## (optional) security token + # security_token = "your_security_token" + ## + ## (optional) environment type (sandbox or production) ## default is: production + ## # environment = "production" - ## (Optional) API version (default: "39.0") + ## + ## (optional) API version (default: "39.0") + ## # version = "39.0" ``` -### Measurements & Fields: +## Metrics Salesforce provide one measurement named "salesforce". Each entry is converted to snake\_case and 2 fields are created. @@ -28,20 +38,19 @@ Each entry is converted to snake\_case and 2 fields are created. - \_remaining represents the usage remaining before hitting the limit threshold - salesforce - - \_max (int) - - \_remaining (int) - - (...) + - \_max (int) + - \_remaining (int) + - (...) -### Tags: +### Tags - All measurements have the following tags: - - host - - organization_id (t18 char organisation ID) - + - host + - organization_id (t18 char organisation ID) -### Example Output: +## Example Output -``` +```sh $./telegraf --config telegraf.conf --input-filter salesforce --test salesforce,organization_id=XXXXXXXXXXXXXXXXXX,host=xxxxx.salesforce.com daily_workflow_emails_max=546000i,hourly_time_based_workflow_max=50i,daily_async_apex_executions_remaining=250000i,daily_durable_streaming_api_events_remaining=1000000i,streaming_api_concurrent_clients_remaining=2000i,daily_bulk_api_requests_remaining=10000i,hourly_sync_report_runs_remaining=500i,daily_api_requests_max=5000000i,data_storage_mb_remaining=1073i,file_storage_mb_remaining=1069i,daily_generic_streaming_api_events_remaining=10000i,hourly_async_report_runs_remaining=1200i,hourly_time_based_workflow_remaining=50i,daily_streaming_api_events_remaining=1000000i,single_email_max=5000i,hourly_dashboard_refreshes_remaining=200i,streaming_api_concurrent_clients_max=2000i,daily_durable_generic_streaming_api_events_remaining=1000000i,daily_api_requests_remaining=4999998i,hourly_dashboard_results_max=5000i,hourly_async_report_runs_max=1200i,daily_durable_generic_streaming_api_events_max=1000000i,hourly_dashboard_results_remaining=5000i,concurrent_sync_report_runs_max=20i,durable_streaming_api_concurrent_clients_remaining=2000i,daily_workflow_emails_remaining=546000i,hourly_dashboard_refreshes_max=200i,daily_streaming_api_events_max=1000000i,hourly_sync_report_runs_max=500i,hourly_o_data_callout_max=10000i,mass_email_max=5000i,mass_email_remaining=5000i,single_email_remaining=5000i,hourly_dashboard_statuses_max=999999999i,concurrent_async_get_report_instances_max=200i,daily_durable_streaming_api_events_max=1000000i,daily_generic_streaming_api_events_max=10000i,hourly_o_data_callout_remaining=10000i,concurrent_sync_report_runs_remaining=20i,daily_bulk_api_requests_max=10000i,data_storage_mb_max=1073i,hourly_dashboard_statuses_remaining=999999999i,concurrent_async_get_report_instances_remaining=200i,daily_async_apex_executions_max=250000i,durable_streaming_api_concurrent_clients_max=2000i,file_storage_mb_max=1073i 1501565661000000000 diff --git a/plugins/inputs/salesforce/salesforce.go b/plugins/inputs/salesforce/salesforce.go index b66266d3f17d2..ec3d82c32f1c5 100644 --- a/plugins/inputs/salesforce/salesforce.go +++ b/plugins/inputs/salesforce/salesforce.go @@ -1,12 +1,13 @@ +//go:generate ../../../tools/readme_config_includer/generator package salesforce import ( + _ "embed" "encoding/json" "encoding/xml" "errors" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -17,24 +18,9 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -var sampleConfig = ` - ## specify your credentials - ## - username = "your_username" - password = "your_password" - ## - ## (optional) security token - # security_token = "your_security_token" - ## - ## (optional) environment type (sandbox or production) - ## default is: production - ## - # environment = "production" - ## - ## (optional) API version (default: "39.0") - ## - # version = "39.0" -` +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string type limit struct { Max int @@ -62,11 +48,11 @@ const defaultEnvironment = "production" // returns a new Salesforce plugin instance func NewSalesforce() *Salesforce { tr := &http.Transport{ - ResponseHeaderTimeout: time.Duration(5 * time.Second), + ResponseHeaderTimeout: 5 * time.Second, } client := &http.Client{ Transport: tr, - Timeout: time.Duration(10 * time.Second), + Timeout: 10 * time.Second, } return &Salesforce{ client: client, @@ -74,14 +60,10 @@ func NewSalesforce() *Salesforce { Environment: defaultEnvironment} } -func (s *Salesforce) SampleConfig() string { +func (*Salesforce) SampleConfig() string { return sampleConfig } -func (s *Salesforce) Description() string { - return "Read API usage and limits for a Salesforce organisation" -} - // Reads limits values from Salesforce API func (s *Salesforce) Gather(acc telegraf.Accumulator) error { limits, err := s.fetchLimits() @@ -147,7 +129,7 @@ func (s *Salesforce) fetchLimits() (limits, error) { } if resp.StatusCode != http.StatusOK { - return l, fmt.Errorf("Salesforce responded with unexpected status code %d", resp.StatusCode) + return l, fmt.Errorf("salesforce responded with unexpected status code %d", resp.StatusCode) } l = limits{} @@ -203,11 +185,11 @@ func (s *Salesforce) login() error { defer resp.Body.Close() if resp.StatusCode != http.StatusOK { // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. - body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 200)) return fmt.Errorf("%s returned HTTP status %s: %q", loginEndpoint, resp.Status, body) } - respBody, err := ioutil.ReadAll(resp.Body) + respBody, err := io.ReadAll(resp.Body) if err != nil { return err } diff --git a/plugins/inputs/salesforce/salesforce_test.go b/plugins/inputs/salesforce/salesforce_test.go index 288cc0f40af79..3d26d87dda964 100644 --- a/plugins/inputs/salesforce/salesforce_test.go +++ b/plugins/inputs/salesforce/salesforce_test.go @@ -14,7 +14,7 @@ import ( func Test_Gather(t *testing.T) { fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Add("Content-Type", "application/json") - _, _ = w.Write([]byte(testJson)) + _, _ = w.Write([]byte(testJSON)) })) defer fakeServer.Close() @@ -35,7 +35,7 @@ func Test_Gather(t *testing.T) { require.Len(t, m.Tags, 2) } -var testJson = `{ +var testJSON = `{ "ConcurrentAsyncGetReportInstances" : { "Max" : 200, "Remaining" : 200 diff --git a/plugins/inputs/salesforce/sample.conf b/plugins/inputs/salesforce/sample.conf new file mode 100644 index 0000000000000..6722dac6d3fa4 --- /dev/null +++ b/plugins/inputs/salesforce/sample.conf @@ -0,0 +1,18 @@ +# Read API usage and limits for a Salesforce organisation +[[inputs.salesforce]] + ## specify your credentials + ## + username = "your_username" + password = "your_password" + ## + ## (optional) security token + # security_token = "your_security_token" + ## + ## (optional) environment type (sandbox or production) + ## default is: production + ## + # environment = "production" + ## + ## (optional) API version (default: "39.0") + ## + # version = "39.0" diff --git a/plugins/inputs/sensors/README.md b/plugins/inputs/sensors/README.md index d9bcfe2e4544d..d33516fa8fc48 100644 --- a/plugins/inputs/sensors/README.md +++ b/plugins/inputs/sensors/README.md @@ -1,12 +1,14 @@ # LM Sensors Input Plugin -Collect [lm-sensors](https://en.wikipedia.org/wiki/Lm_sensors) metrics - requires the lm-sensors -package installed. +Collect [lm-sensors](https://en.wikipedia.org/wiki/Lm_sensors) metrics - +requires the lm-sensors package installed. -This plugin collects sensor metrics with the `sensors` executable from the lm-sensor package. +This plugin collects sensor metrics with the `sensors` executable from the +lm-sensor package. -### Configuration: -```toml +## Configuration + +```toml @sample.conf # Monitor sensors, requires lm-sensors package [[inputs.sensors]] ## Remove numbers from field names. @@ -17,19 +19,21 @@ This plugin collects sensor metrics with the `sensors` executable from the lm-se # timeout = "5s" ``` -### Measurements & Fields: +## Metrics + Fields are created dynamically depending on the sensors. All fields are float. -### Tags: +### Tags - All measurements have the following tags: - - chip - - feature + - chip + - feature -### Example Output: +## Example Output -#### Default -``` +### Default + +```shell $ telegraf --config telegraf.conf --input-filter sensors --test * Plugin: sensors, Collection 1 > sensors,chip=power_meter-acpi-0,feature=power1 power_average=0,power_average_interval=300 1466751326000000000 @@ -39,8 +43,9 @@ $ telegraf --config telegraf.conf --input-filter sensors --test > sensors,chip=k10temp-pci-00db,feature=temp1 temp_crit=70,temp_crit_hyst=65,temp_input=29.5,temp_max=70 1466751326000000000 ``` -#### With remove_numbers=false -``` +### With remove_numbers=false + +```shell * Plugin: sensors, Collection 1 > sensors,chip=power_meter-acpi-0,feature=power1 power1_average=0,power1_average_interval=300 1466753424000000000 > sensors,chip=k10temp-pci-00c3,feature=temp1 temp1_crit=70,temp1_crit_hyst=65,temp1_input=29.125,temp1_max=70 1466753424000000000 diff --git a/plugins/inputs/sensors/sample.conf b/plugins/inputs/sensors/sample.conf new file mode 100644 index 0000000000000..7892452fdc692 --- /dev/null +++ b/plugins/inputs/sensors/sample.conf @@ -0,0 +1,8 @@ +# Monitor sensors, requires lm-sensors package +[[inputs.sensors]] + ## Remove numbers from field names. + ## If true, a field name like 'temp1_input' will be changed to 'temp_input'. + # remove_numbers = true + + ## Timeout is the maximum amount of time that the sensors command can run. + # timeout = "5s" diff --git a/plugins/inputs/sensors/sensors.go b/plugins/inputs/sensors/sensors.go index 1df88466be2e9..aeddecaa071b3 100644 --- a/plugins/inputs/sensors/sensors.go +++ b/plugins/inputs/sensors/sensors.go @@ -1,8 +1,11 @@ +//go:generate ../../../tools/readme_config_includer/generator +//go:build linux // +build linux package sensors import ( + _ "embed" "errors" "fmt" "os/exec" @@ -12,36 +15,49 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + var ( execCommand = exec.Command // execCommand is used to mock commands in tests. numberRegp = regexp.MustCompile("[0-9]+") - defaultTimeout = internal.Duration{Duration: 5 * time.Second} + defaultTimeout = config.Duration(5 * time.Second) ) type Sensors struct { - RemoveNumbers bool `toml:"remove_numbers"` - Timeout internal.Duration `toml:"timeout"` + RemoveNumbers bool `toml:"remove_numbers"` + Timeout config.Duration `toml:"timeout"` path string } -func (*Sensors) Description() string { - return "Monitor sensors, requires lm-sensors package" -} +const cmd = "sensors" func (*Sensors) SampleConfig() string { - return ` - ## Remove numbers from field names. - ## If true, a field name like 'temp1_input' will be changed to 'temp_input'. - # remove_numbers = true + return sampleConfig +} - ## Timeout is the maximum amount of time that the sensors command can run. - # timeout = "5s" -` +func (s *Sensors) Init() error { + // Set defaults + if s.path == "" { + path, err := exec.LookPath(cmd) + if err != nil { + return fmt.Errorf("looking up %q failed: %v", cmd, err) + } + s.path = path + } + // Check parameters + if s.path == "" { + return fmt.Errorf("no path specified for %q", cmd) + } + + return nil } func (s *Sensors) Gather(acc telegraf.Accumulator) error { @@ -60,7 +76,7 @@ func (s *Sensors) parse(acc telegraf.Accumulator) error { fields := map[string]interface{}{} chip := "" cmd := execCommand(s.path, "-A", "-u") - out, err := internal.StdOutputTimeout(cmd, s.Timeout.Duration) + out, err := internal.StdOutputTimeout(cmd, time.Duration(s.Timeout)) if err != nil { return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) } @@ -106,19 +122,14 @@ func (s *Sensors) parse(acc telegraf.Accumulator) error { // snake converts string to snake case func snake(input string) string { - return strings.ToLower(strings.Replace(strings.TrimSpace(input), " ", "_", -1)) + return strings.ToLower(strings.ReplaceAll(strings.TrimSpace(input), " ", "_")) } func init() { - s := Sensors{ - RemoveNumbers: true, - Timeout: defaultTimeout, - } - path, _ := exec.LookPath("sensors") - if len(path) > 0 { - s.path = path - } inputs.Add("sensors", func() telegraf.Input { - return &s + return &Sensors{ + RemoveNumbers: true, + Timeout: defaultTimeout, + } }) } diff --git a/plugins/inputs/sensors/sensors_notlinux.go b/plugins/inputs/sensors/sensors_notlinux.go index 62a6211598f4e..424e96181b46b 100644 --- a/plugins/inputs/sensors/sensors_notlinux.go +++ b/plugins/inputs/sensors/sensors_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package sensors diff --git a/plugins/inputs/sensors/sensors_test.go b/plugins/inputs/sensors/sensors_test.go index 2a24fa6f9212f..ac2bf498ed8e1 100644 --- a/plugins/inputs/sensors/sensors_test.go +++ b/plugins/inputs/sensors/sensors_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sensors @@ -8,6 +9,8 @@ import ( "os/exec" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" ) @@ -22,10 +25,8 @@ func TestGatherDefault(t *testing.T) { defer func() { execCommand = exec.Command }() var acc testutil.Accumulator - err := s.Gather(&acc) - if err != nil { - t.Fatal(err) - } + require.NoError(t, s.Init()) + require.NoError(t, s.Gather(&acc)) var tests = []struct { tags map[string]string @@ -163,10 +164,8 @@ func TestGatherNotRemoveNumbers(t *testing.T) { defer func() { execCommand = exec.Command }() var acc testutil.Accumulator - err := s.Gather(&acc) - if err != nil { - t.Fatal(err) - } + require.NoError(t, s.Init()) + require.NoError(t, s.Gather(&acc)) var tests = []struct { tags map[string]string @@ -306,7 +305,7 @@ func fakeExecCommand(command string, args ...string) *exec.Cmd { // For example, if you run: // GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- chrony tracking // it returns below mockData. -func TestHelperProcess(t *testing.T) { +func TestHelperProcess(_ *testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } @@ -370,14 +369,17 @@ Vcore Voltage: // Previous arguments are tests stuff, that looks like : // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- - cmd, args := args[3], args[4:] + cmd, _ := args[3], args[4:] if cmd == "sensors" { + //nolint:errcheck,revive fmt.Fprint(os.Stdout, mockData) } else { + //nolint:errcheck,revive fmt.Fprint(os.Stdout, "command not found") + //nolint:revive // error code is important for this "test" os.Exit(1) - } + //nolint:revive // error code is important for this "test" os.Exit(0) } diff --git a/plugins/inputs/sflow/README.md b/plugins/inputs/sflow/README.md index 66d556e17c694..2371590fbbaca 100644 --- a/plugins/inputs/sflow/README.md +++ b/plugins/inputs/sflow/README.md @@ -6,7 +6,7 @@ accordance with the specification from [sflow.org](https://sflow.org/). Currently only Flow Samples of Ethernet / IPv4 & IPv4 TCP & UDP headers are turned into metrics. Counters and other header samples are ignored. -#### Series Cardinality Warning +## Series Cardinality Warning This plugin may produce a high number of series which, when not controlled for, will cause high load on your database. Use the following techniques to @@ -14,15 +14,14 @@ avoid cardinality issues: - Use [metric filtering][] options to exclude unneeded measurements and tags. - Write to a database with an appropriate [retention policy][]. -- Limit series cardinality in your database using the - [max-series-per-database][] and [max-values-per-tag][] settings. - Consider using the [Time Series Index][tsi]. - Monitor your databases [series cardinality][]. - Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques. -### Configuration +## Configuration -```toml +```toml @sample.conf +# SFlow V5 Protocol Listener [[inputs.sflow]] ## Address to listen for sFlow packets. ## example: service_address = "udp://:6343" @@ -35,7 +34,7 @@ avoid cardinality issues: # read_buffer_size = "" ``` -### Metrics +## Metrics - sflow - tags: @@ -83,38 +82,38 @@ avoid cardinality issues: - ip_flags (integer, ip_ver field of IPv4 structures) - tcp_flags (integer, TCP flags of TCP IP header (IPv4 or IPv6)) -### Troubleshooting +## Troubleshooting The [sflowtool][] utility can be used to print sFlow packets, and compared against the metrics produced by Telegraf. -``` + +```sh sflowtool -p 6343 ``` If opening an issue, in addition to the output of sflowtool it will also be helpful to collect a packet capture. Adjust the interface, host and port as needed: -``` -$ sudo tcpdump -s 0 -i eth0 -w telegraf-sflow.pcap host 127.0.0.1 and port 6343 + +```sh +sudo tcpdump -s 0 -i eth0 -w telegraf-sflow.pcap host 127.0.0.1 and port 6343 ``` [sflowtool]: https://github.com/sflow/sflowtool -### Example Output -``` +## Example Output + +```shell sflow,agent_address=0.0.0.0,dst_ip=10.0.0.2,dst_mac=ff:ff:ff:ff:ff:ff,dst_port=40042,ether_type=IPv4,header_protocol=ETHERNET-ISO88023,input_ifindex=6,ip_dscp=27,ip_ecn=0,output_ifindex=1073741823,source_id_index=3,source_id_type=0,src_ip=10.0.0.1,src_mac=ff:ff:ff:ff:ff:ff,src_port=443 bytes=1570i,drops=0i,frame_length=157i,header_length=128i,ip_flags=2i,ip_fragment_offset=0i,ip_total_length=139i,ip_ttl=42i,sampling_rate=10i,tcp_header_length=0i,tcp_urgent_pointer=0i,tcp_window_size=14i 1584473704793580447 ``` -### Reference Documentation +## Reference Documentation -This sflow implementation was built from the reference document +This sflow implementation was built from the reference document [sflow.org/sflow_version_5.txt](sflow_version_5) - [metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering [retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ -[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 -[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000 [tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ [series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality [influx-docs]: https://docs.influxdata.com/influxdb/latest/ diff --git a/plugins/inputs/sflow/metricencoder.go b/plugins/inputs/sflow/metricencoder.go index ffc9d8e023849..2dc1fb122b096 100644 --- a/plugins/inputs/sflow/metricencoder.go +++ b/plugins/inputs/sflow/metricencoder.go @@ -34,10 +34,7 @@ func makeMetrics(p *V5Format) ([]telegraf.Metric, error) { for k, v := range fields { fields2[k] = v } - m, err := metric.New("sflow", tags2, fields2, now) - if err != nil { - return nil, err - } + m := metric.New("sflow", tags2, fields2, now) metrics = append(metrics, m) } } diff --git a/plugins/inputs/sflow/packetdecoder_test.go b/plugins/inputs/sflow/packetdecoder_test.go index f078eaf310e8b..bb318a86a1932 100644 --- a/plugins/inputs/sflow/packetdecoder_test.go +++ b/plugins/inputs/sflow/packetdecoder_test.go @@ -40,7 +40,8 @@ func BenchmarkUDPHeader(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - dc.decodeUDPHeader(octets) + _, err := dc.decodeUDPHeader(octets) + require.NoError(b, err) } } diff --git a/plugins/inputs/sflow/sample.conf b/plugins/inputs/sflow/sample.conf new file mode 100644 index 0000000000000..8935fa3f2966e --- /dev/null +++ b/plugins/inputs/sflow/sample.conf @@ -0,0 +1,11 @@ +# SFlow V5 Protocol Listener +[[inputs.sflow]] + ## Address to listen for sFlow packets. + ## example: service_address = "udp://:6343" + ## service_address = "udp4://:6343" + ## service_address = "udp6://:6343" + service_address = "udp://:6343" + + ## Set the size of the operating system's receive buffer. + ## example: read_buffer_size = "64KiB" + # read_buffer_size = "" diff --git a/plugins/inputs/sflow/sflow.go b/plugins/inputs/sflow/sflow.go index 2e3fbc0cf73f5..52043a1390c17 100644 --- a/plugins/inputs/sflow/sflow.go +++ b/plugins/inputs/sflow/sflow.go @@ -1,8 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package sflow import ( "bytes" - "context" + _ "embed" "fmt" "io" "net" @@ -11,46 +12,31 @@ import ( "sync" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" ) -const sampleConfig = ` - ## Address to listen for sFlow packets. - ## example: service_address = "udp://:6343" - ## service_address = "udp4://:6343" - ## service_address = "udp6://:6343" - service_address = "udp://:6343" - - ## Set the size of the operating system's receive buffer. - ## example: read_buffer_size = "64KiB" - # read_buffer_size = "" -` +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string const ( maxPacketSize = 64 * 1024 ) type SFlow struct { - ServiceAddress string `toml:"service_address"` - ReadBufferSize internal.Size `toml:"read_buffer_size"` + ServiceAddress string `toml:"service_address"` + ReadBufferSize config.Size `toml:"read_buffer_size"` Log telegraf.Logger `toml:"-"` addr net.Addr decoder *PacketDecoder closer io.Closer - cancel context.CancelFunc wg sync.WaitGroup } -// Description answers a description of this input plugin -func (s *SFlow) Description() string { - return "SFlow V5 Protocol Listener" -} - -// SampleConfig answers a sample configuration -func (s *SFlow) SampleConfig() string { +func (*SFlow) SampleConfig() string { return sampleConfig } @@ -85,8 +71,10 @@ func (s *SFlow) Start(acc telegraf.Accumulator) error { s.closer = conn s.addr = conn.LocalAddr() - if s.ReadBufferSize.Size > 0 { - conn.SetReadBuffer(int(s.ReadBufferSize.Size)) + if s.ReadBufferSize > 0 { + if err := conn.SetReadBuffer(int(s.ReadBufferSize)); err != nil { + return err + } } s.Log.Infof("Listening on %s://%s", s.addr.Network(), s.addr.String()) @@ -107,6 +95,8 @@ func (s *SFlow) Gather(_ telegraf.Accumulator) error { func (s *SFlow) Stop() { if s.closer != nil { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive s.closer.Close() } s.wg.Wait() @@ -131,7 +121,6 @@ func (s *SFlow) read(acc telegraf.Accumulator, conn net.PacketConn) { } func (s *SFlow) process(acc telegraf.Accumulator, buf []byte) { - if err := s.decoder.Decode(bytes.NewBuffer(buf)); err != nil { acc.AddError(fmt.Errorf("unable to parse incoming packet: %s", err)) } diff --git a/plugins/inputs/sflow/sflow_test.go b/plugins/inputs/sflow/sflow_test.go index 2df56c2ae97cd..6129c2d95c079 100644 --- a/plugins/inputs/sflow/sflow_test.go +++ b/plugins/inputs/sflow/sflow_test.go @@ -29,7 +29,8 @@ func TestSFlow(t *testing.T) { packetBytes, err := hex.DecodeString("0000000500000001c0a80102000000100000f3d40bfa047f0000000200000001000000d00001210a000001fe000004000484240000000000000001fe00000200000000020000000100000090000000010000010b0000000400000080000c2936d3d694c691aa97600800450000f9f19040004011b4f5c0a80913c0a8090a00a1ba0500e5641f3081da02010104066d6f746f6770a281cc02047b46462e0201000201003081bd3012060d2b06010201190501010281dc710201003013060d2b06010201190501010281e66802025acc3012060d2b0601020119050101000003e9000000100000000900000000000000090000000000000001000000d00000e3cc000002100000400048eb740000000000000002100000020000000002000000010000009000000001000000970000000400000080000c2936d3d6fcecda44008f81000009080045000081186440003f119098c0a80815c0a8090a9a690202006d23083c33303e4170722031312030393a33333a3031206b6e6f64653120736e6d70645b313039385d3a20436f6e6e656374696f6e2066726f6d205544503a205b3139322e3136382e392e31305d3a34393233362d000003e90000001000000009000000000000000900000000") require.NoError(t, err) - client.Write(packetBytes) + _, err = client.Write(packetBytes) + require.NoError(t, err) acc.Wait(2) @@ -129,7 +130,8 @@ func BenchmarkSFlow(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - client.Write(packetBytes) + _, err := client.Write(packetBytes) + require.NoError(b, err) acc.Wait(2) } } diff --git a/plugins/inputs/sflow/types.go b/plugins/inputs/sflow/types.go index a48857803b40d..7efd59aff0c71 100644 --- a/plugins/inputs/sflow/types.go +++ b/plugins/inputs/sflow/types.go @@ -6,13 +6,8 @@ import ( ) const ( - AddressTypeIPv6 uint32 = 2 // sflow_version_5.txt line: 1384 - AddressTypeIPv4 uint32 = 1 // sflow_version_5.txt line: 1383 - IPProtocolTCP uint8 = 6 IPProtocolUDP uint8 = 17 - - metricName = "sflow" ) var ETypeMap = map[uint16]string{ @@ -20,11 +15,6 @@ var ETypeMap = map[uint16]string{ 0x86DD: "IPv6", } -var IPvMap = map[uint32]string{ - 1: "IPV4", // sflow_version_5.txt line: 1383 - 2: "IPV6", // sflow_version_5.txt line: 1384 -} - type ContainsMetricData interface { GetTags() map[string]string GetFields() map[string]interface{} @@ -118,12 +108,22 @@ type RawPacketHeaderFlowData struct { } func (h RawPacketHeaderFlowData) GetTags() map[string]string { - t := h.Header.GetTags() + var t map[string]string + if h.Header != nil { + t = h.Header.GetTags() + } else { + t = map[string]string{} + } t["header_protocol"] = HeaderProtocolMap[h.HeaderProtocol] return t } func (h RawPacketHeaderFlowData) GetFields() map[string]interface{} { - f := h.Header.GetFields() + var f map[string]interface{} + if h.Header != nil { + f = h.Header.GetFields() + } else { + f = map[string]interface{}{} + } f["bytes"] = h.Bytes f["frame_length"] = h.FrameLength f["header_length"] = h.HeaderLength @@ -143,14 +143,22 @@ type EthHeader struct { } func (h EthHeader) GetTags() map[string]string { - t := h.IPHeader.GetTags() + var t map[string]string + if h.IPHeader != nil { + t = h.IPHeader.GetTags() + } else { + t = map[string]string{} + } t["src_mac"] = net.HardwareAddr(h.SourceMAC[:]).String() t["dst_mac"] = net.HardwareAddr(h.DestinationMAC[:]).String() t["ether_type"] = h.EtherType return t } func (h EthHeader) GetFields() map[string]interface{} { - return h.IPHeader.GetFields() + if h.IPHeader != nil { + return h.IPHeader.GetFields() + } + return map[string]interface{}{} } type ProtocolHeader ContainsMetricData diff --git a/plugins/inputs/sflow/types_test.go b/plugins/inputs/sflow/types_test.go new file mode 100644 index 0000000000000..d59ac0ae23941 --- /dev/null +++ b/plugins/inputs/sflow/types_test.go @@ -0,0 +1,43 @@ +package sflow + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRawPacketHeaderFlowData(t *testing.T) { + h := RawPacketHeaderFlowData{ + HeaderProtocol: HeaderProtocolTypeEthernetISO88023, + FrameLength: 64, + Bytes: 64, + StrippedOctets: 0, + HeaderLength: 0, + Header: nil, + } + tags := h.GetTags() + fields := h.GetFields() + + require.NotNil(t, fields) + require.NotNil(t, tags) + require.Contains(t, tags, "header_protocol") + require.Equal(t, 1, len(tags)) +} + +// process a raw ethernet packet without any encapsulated protocol +func TestEthHeader(t *testing.T) { + h := EthHeader{ + DestinationMAC: [6]byte{0xca, 0xff, 0xee, 0xff, 0xe, 0x0}, + SourceMAC: [6]byte{0xde, 0xad, 0xbe, 0xef, 0x0, 0x0}, + TagProtocolIdentifier: 0x88B5, // IEEE Std 802 - Local Experimental Ethertype + TagControlInformation: 0, + EtherTypeCode: 0, + EtherType: "", + IPHeader: nil, + } + tags := h.GetTags() + fields := h.GetFields() + + require.NotNil(t, fields) + require.NotNil(t, tags) +} diff --git a/plugins/inputs/sflow_a10/README.md b/plugins/inputs/sflow_a10/README.md index 51db2e657df57..dca4fb2f4460c 100644 --- a/plugins/inputs/sflow_a10/README.md +++ b/plugins/inputs/sflow_a10/README.md @@ -1,17 +1,29 @@ -# SFlow_A10 Input Plugin +# SFlowA10 Input Plugin -The SFlow_A10 Input Plugin provides support for acting as an SFlow V5 collector for [A10](https://www.a10networks.com/) appliances, - in accordance with the specification from [sflow.org](https://sflow.org/). +The SFlow_A10 Input Plugin provides support for acting as an SFlow V5 collector +for [A10](https://www.a10networks.com/) appliances, in accordance with the +specification from [sflow.org](https://sflow.org/). -It is heavily based (i.e. re-uses a lot of code and techniques) on the [SFlow](../sflow/README.md) plugin. The main difference is that SFlow_A10 - captures ony Counter Samples that are coming from A10 appliance and turns them into telegraf metrics. Flow samples and header samples are ignored. +It is heavily based (i.e. re-uses a lot of code and techniques) on the +[SFlow](../sflow/README.md) plugin. The main difference is that SFlow_A10 + captures ony Counter Samples that are coming from A10 appliance and turns them + into telegraf metrics. Flow samples and header samples are ignored. -### How this works +## How this works -Plugin starts by reading the XML file with the counter record definitions. Counter records which definition is not included in the XML file are ignored when they arrive at the plugin. -The way that the plugin works is that it parses incoming counter records from A10. When it discovers counter records tagged 260 (port information) or 271/272 (IPv4/IPv6 information) it parses their sourceID and stores them in memory. When a counter record metric arrives, plugin checks if there is port and ip information for it (i.e. we have gotten 260 and 271 or 272 for the same sourceID). If there is, the metric is sent to telegraf output. If it is not, the metric is discarded. +Plugin starts by reading the XML file with the counter record definitions. +Counter records which definition is not included in the XML file are ignored +when they arrive at the plugin. -### Configuration +The way that the plugin works is that it parses incoming counter records from +A10. When it discovers counter records tagged 260 (port information) or 271/272 +(IPv4/IPv6 information) it parses their sourceID and stores them in memory. +When a counter record metric arrives, plugin checks if there is port and ip +information for it (i.e. we have gotten 260 and 271 or 272 for the same +sourceID). If there is, the metric is sent to telegraf output. +If it is not, the metric is discarded. + +## Configuration ```toml [[inputs.sflow_a10]] @@ -33,20 +45,21 @@ The way that the plugin works is that it parses incoming counter records from A1 ignore_zero_values = true ``` -### Metrics +## Metrics - sflow_a10 - - tags: - - agent_address - - ip_address - - port_number - - port_range_end - - port_type - - table_ type - - fields: - - all counters that are included in the XML file - -### Example output -``` + - tags: + - agent_address + - ip_address + - port_number + - port_range_end + - port_type + - table_ type + - fields: + - all counters that are included in the XML file + +## Example Output + +```shell sflow_a10,agent_address=10.1.0.6,ip_address=10.3.0.39,port_number=0,port_range_end=0,port_type=INVALID,table_type=Zone udp_total_bytes_forwarded_diff=0,src_dst_pair_entry_total_count_diff=0,inbound_packets_dropped_diff=25,tcp_total_bytes_dropped_diff=1932,udp_total_bytes_dropped_diff=0,tcp_connections_created_from_syn_diff=3,tcp_connections_closed_diff=10,outbound_bytes_forwarded_diff=1776,udp_dst_port_total_exceeded_diff=0,src_dst_pair_entry_tcp_count_diff=0,tcp_connections_created_from_ack_diff=6,tcp_total_bytes_received_diff=2418,sflow_external_samples_packed_diff=1,sflow_external_packets_sent_diff=3,inbound_bytes_dropped_diff=1932,udp_total_bytes_received_diff=0,tcp_total_bytes_forwarded_diff=2262 1605280935424500515 ``` diff --git a/plugins/inputs/sflow_a10/metricencoder.go b/plugins/inputs/sflow_a10/metricencoder.go index 3c3937f88755a..c2c3c7dccbdf8 100644 --- a/plugins/inputs/sflow_a10/metricencoder.go +++ b/plugins/inputs/sflow_a10/metricencoder.go @@ -26,7 +26,7 @@ func makeMetricsForCounters(p *V5Format, d *PacketDecoder) ([]telegraf.Metric, e // this is for packets tagged 293 and 294 // as per A10, each packet that contains counter block tagged 293 or 294 is just a single sample - if !sample.SampleCounterData.NeedsIpAndPort() { + if !sample.SampleCounterData.NeedsIPAndPort() { if len(sample.SampleCounterData.CounterRecords) != 1 { d.Log.Error(" SampleCounterData.CounterRecords with false NeedsIpPort has length != 1") continue @@ -52,11 +52,7 @@ func makeMetricsForCounters(p *V5Format, d *PacketDecoder) ([]telegraf.Metric, e } if len(counterFields) > 0 { - m, err := metric.New("sflow_a10", counterTags, counterFields, now) - if err != nil { - d.debug(fmt.Sprintf(" error sending new metric to telegraf %s", err)) - return nil, err - } + m := metric.New("sflow_a10", counterTags, counterFields, now) d.debug(fmt.Sprintf(" sending 293 or 294 metric to telegraf %s", m)) metrics = append(metrics, m) @@ -101,11 +97,7 @@ func makeMetricsForCounters(p *V5Format, d *PacketDecoder) ([]telegraf.Metric, e } if len(counterFields) > 0 { - m, err := metric.New("sflow_a10", counterTags, counterFields, now) - if err != nil { - d.debug(fmt.Sprintf(" error sending new metric to telegraf %s", err)) - return nil, err - } + m := metric.New("sflow_a10", counterTags, counterFields, now) metrics = append(metrics, m) } diff --git a/plugins/inputs/sflow_a10/packetdecoder.go b/plugins/inputs/sflow_a10/packetdecoder.go index 2ea0b196f841d..2aa406fca6846 100644 --- a/plugins/inputs/sflow_a10/packetdecoder.go +++ b/plugins/inputs/sflow_a10/packetdecoder.go @@ -229,21 +229,21 @@ func (d *PacketDecoder) decodeCounterRecords(r io.Reader, sourceID uint32, agent } // we're checking if the tag we got exists on our counter record definitions (that were loaded from the A10 xml file) - if _, exists := d.CounterBlocks[uint32(tag)]; !exists { + if _, exists := d.CounterBlocks[tag]; !exists { d.debug(fmt.Sprintf(" tag %x for sourceID %x NOT found on xml file list. Ignoring counter record", tag, sourceID)) continue } // as per A10, each packet of either counter block 293 or 294 is one sample of 293 or 294 // plus, we are not getting any IP and PORT information - cr.NeedsIpAndPort = tag != 293 && tag != 294 + cr.NeedsIPAndPort = tag != 293 && tag != 294 cr.IsEthernetCounters = tag == 294 - d.debug(fmt.Sprintf(" tag %x for sourceID %x needs ip and and port: %t", tag, sourceID, cr.NeedsIpAndPort)) + d.debug(fmt.Sprintf(" tag %x for sourceID %x needs ip and and port: %t", tag, sourceID, cr.NeedsIPAndPort)) d.debug(fmt.Sprintf(" tag %x for sourceID %x found on xml file list. Gonna decode counter record", tag, sourceID)) - err := d.decodeCounterRecord(mr, cr, uint32(tag), sourceID) + err := d.decodeCounterRecord(mr, cr, tag, sourceID) if err != nil { return recs, err } @@ -259,8 +259,8 @@ func (d *PacketDecoder) decodeCounterRecord(r io.Reader, cr *CounterRecord, tag counterBlock := d.CounterBlocks[tag] // reading the header values, we assume they are always 4 uint16 - var counterOffset uint16 = 0 - var totalCounterNum uint16 = 0 + var counterOffset uint16 + var totalCounterNum uint16 if len(counterBlock.OffsetHeaders) > 0 { if err := read(r, &counterOffset, "counterOffset"); err != nil { d.debug(" error reading counterOffset variable") @@ -488,15 +488,6 @@ func read(r io.Reader, data interface{}, name string) error { return errors.Wrapf(err, "failed to read %s", name) } -func (d *PacketDecoder) skip(r io.Reader, recordsToSkip uint16) { - for i := uint16(0); i < recordsToSkip; i++ { - var temp uint64 - if err := read(r, &temp, "temp"); err != nil { - d.debug(fmt.Sprintf("error skipping, at %d, error %s", i, err)) - } - } -} - func createMapKey(sourceID uint32, addr string) string { return fmt.Sprintf("%s_%x", addr, sourceID) } diff --git a/plugins/inputs/sflow_a10/packetdecoder_test.go b/plugins/inputs/sflow_a10/packetdecoder_test.go index 52f07f9ade864..b3f866bfa4be9 100644 --- a/plugins/inputs/sflow_a10/packetdecoder_test.go +++ b/plugins/inputs/sflow_a10/packetdecoder_test.go @@ -100,7 +100,7 @@ func TestDecodeCounterSample(t *testing.T) { "testCounter2": uint64(29), }, }, - NeedsIpAndPort: true, + NeedsIPAndPort: true, }, }, }, @@ -109,7 +109,6 @@ func TestDecodeCounterSample(t *testing.T) { actual, err := dc.decodeSample(octets, "10.0.1.2") require.NoError(t, err) require.Equal(t, expected, actual) - } func TestCounterSampleSimple(t *testing.T) { @@ -167,11 +166,11 @@ func TestDecode271(t *testing.T) { dc := NewDecoder() expected := []IPDimension{ - IPDimension{ + { IPAddress: "10.0.1.3", SubnetMask: 30, }, - IPDimension{ + { IPAddress: "192.168.5.6", SubnetMask: 15, }, @@ -188,31 +187,31 @@ func TestDecode271(t *testing.T) { // read some counters with sourceID X // make sure metrics from sourceID X are emitted func TestDecodeA10EndToEnd(t *testing.T) { - sflow := SFlow_A10{ + sflow := SFlowA10{ Log: tu.Logger{}, } const sourceID = 269839 - agent_ip := "10.0.9.1" - key := createMapKey(sourceID, agent_ip) + agentIP := "10.0.9.1" + key := createMapKey(sourceID, agentIP) // start by reading the XML file with the metric definitions c, err := sflow.readA10XMLData([]byte(testXMLStringEndToEnd)) require.NoError(t, err) expected := map[uint32]CounterBlock{ - 217: CounterBlock{ + 217: { Tag: 217, OffsetHeaders: []HeaderDefinition{ - HeaderDefinition{ + { FieldName: "Counter Offset", }, - HeaderDefinition{ + { FieldName: "Total Counter Num", }, - HeaderDefinition{ + { FieldName: "Reserved1", }, - HeaderDefinition{ + { FieldName: "Reserved2", }, }, @@ -298,7 +297,7 @@ func TestDecodeA10EndToEnd(t *testing.T) { 0x3E, 0xFB, // port range end uint16 }) - _, err = dc.decodeSample(octets, agent_ip) + _, err = dc.decodeSample(octets, agentIP) require.NoError(t, err) portValue, portExists := dc.PortMap.Get(key) @@ -336,15 +335,15 @@ func TestDecodeA10EndToEnd(t *testing.T) { 0x0F, // subnet 1 }) - _, err = dc.decodeSample(octets, agent_ip) + _, err = dc.decodeSample(octets, agentIP) require.NoError(t, err) expectedIPAddresses := []IPDimension{ - IPDimension{ + { IPAddress: "10.0.1.3", SubnetMask: 30, }, - IPDimension{ + { IPAddress: "192.168.5.6", SubnetMask: 15, }, @@ -418,7 +417,7 @@ func TestDecodeA10EndToEnd(t *testing.T) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA1, // counter metric 1 uint64 }) - s, err := dc.decodeSample(octets, agent_ip) + s, err := dc.decodeSample(octets, agentIP) require.NoError(t, err) require.Equal(t, 2, len(s.SampleCounterData.CounterRecords)) diff --git a/plugins/inputs/sflow_a10/sflow_a10.go b/plugins/inputs/sflow_a10/sflow_a10.go index e6f818a5d28df..d3cce0942ad0e 100644 --- a/plugins/inputs/sflow_a10/sflow_a10.go +++ b/plugins/inputs/sflow_a10/sflow_a10.go @@ -2,7 +2,6 @@ package sflow_a10 import ( "bytes" - "context" "errors" "fmt" "io/ioutil" @@ -13,7 +12,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/selfstat" ) @@ -42,14 +41,14 @@ const ( defaultAllowPendingMessage = 100000 // UDP_MAX_PACKET_SIZE is the UDP packet limit, see // https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure - UDP_MAX_PACKET_SIZE int = 64 * 1024 + UDPMaxPacketSize int = 64 * 1024 ) -type SFlow_A10 struct { - ServiceAddress string `toml:"service_address"` - ReadBufferSize internal.Size `toml:"read_buffer_size"` - A10DefinitionsFile string `toml:"a10_definitions_file"` - IgnoreZeroValues bool `toml:"ignore_zero_values"` +type SFlowA10 struct { + ServiceAddress string `toml:"service_address"` + ReadBufferSize config.Size `toml:"read_buffer_size"` + A10DefinitionsFile string `toml:"a10_definitions_file"` + IgnoreZeroValues bool `toml:"ignore_zero_values"` sync.Mutex @@ -57,7 +56,6 @@ type SFlow_A10 struct { addr net.Addr decoder *PacketDecoder - cancel context.CancelFunc wg sync.WaitGroup acc telegraf.Accumulator @@ -91,16 +89,16 @@ type input struct { } // Description answers a description of this input plugin -func (s *SFlow_A10) Description() string { +func (s *SFlowA10) Description() string { return "SFlow_A10 V5 Protocol Listener" } // SampleConfig answers a sample configuration -func (s *SFlow_A10) SampleConfig() string { +func (s *SFlowA10) SampleConfig() string { return sampleConfig } -func (s *SFlow_A10) Init() error { +func (s *SFlowA10) Init() error { if s.A10DefinitionsFile == "" { return errors.New("XML DefinitionsFile cannot be empty") } @@ -112,7 +110,7 @@ func (s *SFlow_A10) Init() error { return s.initInternal(data) } -func (s *SFlow_A10) initInternal(xmlData []byte) error { +func (s *SFlowA10) initInternal(xmlData []byte) error { s.decoder = NewDecoder() s.decoder.Log = s.Log counterBlocks, err := s.readA10XMLData(xmlData) @@ -126,7 +124,7 @@ func (s *SFlow_A10) initInternal(xmlData []byte) error { } // Start starts this sFlow_A10 listener listening on the configured network for sFlow packets -func (s *SFlow_A10) Start(acc telegraf.Accumulator) error { +func (s *SFlowA10) Start(acc telegraf.Accumulator) error { s.decoder.OnPacket(func(p *V5Format) { metrics, err := makeMetricsForCounters(p, s.decoder) if err != nil { @@ -172,8 +170,10 @@ func (s *SFlow_A10) Start(acc telegraf.Accumulator) error { s.addr = conn.LocalAddr() s.UDPlistener = conn - if s.ReadBufferSize.Size > 0 { - conn.SetReadBuffer(int(s.ReadBufferSize.Size)) + if s.ReadBufferSize > 0 { + if err := conn.SetReadBuffer(int(s.ReadBufferSize)); err != nil { + return err + } } s.Log.Infof("Listening on %s://%s", s.addr.Network(), s.addr.String()) @@ -199,16 +199,19 @@ func (s *SFlow_A10) Start(acc telegraf.Accumulator) error { } // Gather is a NOOP for sFlow as it receives, asynchronously, sFlow network packets -func (s *SFlow_A10) Gather(_ telegraf.Accumulator) error { +func (s *SFlowA10) Gather(_ telegraf.Accumulator) error { return nil } -func (s *SFlow_A10) Stop() { +func (s *SFlowA10) Stop() { s.Log.Infof("Stopping the sflow_a10 service") close(s.done) s.Lock() if s.UDPlistener != nil { - s.UDPlistener.Close() + err := s.UDPlistener.Close() + if err != nil { + s.acc.AddError(err) + } } s.Unlock() s.wg.Wait() @@ -219,12 +222,12 @@ func (s *SFlow_A10) Stop() { s.Unlock() } -func (s *SFlow_A10) Address() net.Addr { +func (s *SFlowA10) Address() net.Addr { return s.addr } -func (s *SFlow_A10) udpListen(conn *net.UDPConn) { - buf := make([]byte, UDP_MAX_PACKET_SIZE) +func (s *SFlowA10) udpListen(conn *net.UDPConn) { + buf := make([]byte, UDPMaxPacketSize) for { select { case <-s.done: @@ -242,7 +245,11 @@ func (s *SFlow_A10) udpListen(conn *net.UDPConn) { s.SflowUDPBytesRecv.Incr(int64(n)) b := s.bufPool.Get().(*bytes.Buffer) b.Reset() - b.Write(buf[:n]) + _, err = b.Write(buf[:n]) + if err != nil { + s.acc.AddError(err) + } + select { case s.in <- input{ Buffer: b, @@ -258,15 +265,14 @@ func (s *SFlow_A10) udpListen(conn *net.UDPConn) { } } } - } } -func (s *SFlow_A10) parse() error { +func (s *SFlowA10) parse() { for { select { case <-s.done: - return nil + return case in := <-s.in: start := time.Now() if err := s.decoder.Decode(in.Buffer); err != nil { @@ -294,7 +300,7 @@ func listenUDP(network string, address string) (*net.UDPConn, error) { // init registers this SFlow_A10 input plug in with the Telegraf framework func init() { inputs.Add("sflow_a10", func() telegraf.Input { - return &SFlow_A10{ + return &SFlowA10{ AllowedPendingMessages: defaultAllowPendingMessage, } }) diff --git a/plugins/inputs/sflow_a10/sflow_a10_test.go b/plugins/inputs/sflow_a10/sflow_a10_test.go index d5ed802588ce1..828fb27be5e5d 100644 --- a/plugins/inputs/sflow_a10/sflow_a10_test.go +++ b/plugins/inputs/sflow_a10/sflow_a10_test.go @@ -14,7 +14,7 @@ import ( ) func TestSFlow_A10(t *testing.T) { - sflow := &SFlow_A10{ + sflow := &SFlowA10{ ServiceAddress: "udp://127.0.0.1:0", Log: testutil.Logger{}, IgnoreZeroValues: true, @@ -23,7 +23,7 @@ func TestSFlow_A10(t *testing.T) { data, err := ioutil.ReadFile("sflow_3_2_t2.xml") require.NoError(t, err) - err = sflow.initInternal([]byte(data)) + err = sflow.initInternal(data) require.NoError(t, err) var acc testutil.Accumulator @@ -34,48 +34,52 @@ func TestSFlow_A10(t *testing.T) { client, err := net.Dial(sflow.Address().Network(), sflow.Address().String()) require.NoError(t, err) - // Create a rutine to wait for the packets - go acc.Wait(3) - // Add sleep times (100 ms) between the packets - time.Sleep(100 * time.Millisecond) + // 271 - hex 10f packetBytes, err := hex.DecodeString("00000005000000010a41101f0000000000008a6766e234bc00000001000000020000005a00000000000210c70000000109f8a10F2468000000020004000000000A0001031EC0A805060F") require.NoError(t, err) - client.Write(packetBytes) + _, err = client.Write(packetBytes) + require.NoError(t, err) time.Sleep(100 * time.Millisecond) // 260 - hex 104 packetBytes, err = hex.DecodeString("00000005000000010a41101f0000000000008a6766e234bc00000001000000020000005a00000000000210c70000000109f8a10400000046050c00567a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") require.NoError(t, err) - client.Write(packetBytes) + _, err = client.Write(packetBytes) + require.NoError(t, err) time.Sleep(100 * time.Millisecond) // 219 - hex 0db packetBytes, err = hex.DecodeString("00000005000000010a41101f0000000000008a9066e235c800000002000000020000026c00000000000210c70000000109f8a0db000002580000004a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000007400000000000210c70000000109f8a0d000000060000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") require.NoError(t, err) - client.Write(packetBytes) + _, err = client.Write(packetBytes) + require.NoError(t, err) time.Sleep(100 * time.Millisecond) // 207 - hex 0cf packetBytes, err = hex.DecodeString("00000005000000010a01000600000000002c6d611a33ba2f00000001000000020000027c000000000000a27e0000000109f8a0cf0000026800a6004c00000000000000000292a4dc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000059b9909a0000002ce72d028c00000000000000000000000000000000000000000000000000000000022e30fc00000000efc72290000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001392de4000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011c8a000000000000948d000000000001b11500000000026350560000000106f457cc000000000000000000000000000000000000000002c7c2860000000013722fbc00000000000000000000000000000000000000000000004100000000000000400000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000012550000000000003795000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") require.NoError(t, err) - client.Write(packetBytes) + _, err = client.Write(packetBytes) + require.NoError(t, err) time.Sleep(100 * time.Millisecond) // 293 - hex 125 packetBytes, err = hex.DecodeString("00000005000000010a1d000800000000000007f70003ae8800000001000000020000002c00000008020001030000000109f8a12500000018000000000000001a00000000000000000000000000000000") require.NoError(t, err) - client.Write(packetBytes) + _, err = client.Write(packetBytes) + require.NoError(t, err) time.Sleep(100 * time.Millisecond) // 294 - hex 126 packetBytes, err = hex.DecodeString("00000005000000010a1d000800000000000008ac0003d59800000001000000020000006c00000009040000010000000109f8a1260000005800000001000000060000000ba43b7400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000038000000000000000000000000000000150000000000000000") require.NoError(t, err) - client.Write(packetBytes) + _, err = client.Write(packetBytes) + require.NoError(t, err) time.Sleep(100 * time.Millisecond) + acc.Wait(3) expected := []telegraf.Metric{ testutil.MustMetric( @@ -121,7 +125,7 @@ func TestSFlow_A10(t *testing.T) { } func BenchmarkSFlow(b *testing.B) { - sflow := &SFlow_A10{ + sflow := &SFlowA10{ ServiceAddress: "udp://127.0.0.1:0", Log: testutil.Logger{}, } @@ -149,9 +153,15 @@ func BenchmarkSFlow(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - client.Write(packetBytes260) - client.Write(packetBytes271) - client.Write(packetBytesCounter) + _, err = client.Write(packetBytes260) + require.NoError(b, err) + + _, err = client.Write(packetBytes271) + require.NoError(b, err) + + _, err = client.Write(packetBytesCounter) + require.NoError(b, err) + acc.Wait(1) } } diff --git a/plugins/inputs/sflow_a10/test.go b/plugins/inputs/sflow_a10/test.go index 2b161cee3c7d9..db7e1c8686d69 100644 --- a/plugins/inputs/sflow_a10/test.go +++ b/plugins/inputs/sflow_a10/test.go @@ -9,7 +9,7 @@ import ( // TestXMLUnMarshalSimple tests for a simple unmarshaling func TestXMLUnMarshalSimple(t *testing.T) { - sflow := SFlow_A10{ + sflow := SFlowA10{ Log: tu.Logger{}, } @@ -28,12 +28,11 @@ func TestXMLUnMarshalSimple(t *testing.T) { require.Equal(t, 0, c[217].Counters[0].Offset) require.Equal(t, 1, c[217].Counters[1].Offset) - } // TestXMLUnMarshalSameTagReturnsError makes sure that if we have the same tag for two different counter blocks we will get an error func TestXMLUnMarshalSameTagReturnsError(t *testing.T) { - sflow := SFlow_A10{ + sflow := SFlowA10{ Log: tu.Logger{}, } @@ -43,7 +42,7 @@ func TestXMLUnMarshalSameTagReturnsError(t *testing.T) { // TestXMLUnMarshalWrongOrderReturnedInCorrectOrder checks that an XML with wrong order in the offset headers is returned in the correct order func TestXMLUnMarshalWrongOrderReturnedInCorrectOrder(t *testing.T) { - sflow := SFlow_A10{ + sflow := SFlowA10{ Log: tu.Logger{}, } diff --git a/plugins/inputs/sflow_a10/types.go b/plugins/inputs/sflow_a10/types.go index d4f3814b4d275..416fcb9ac6f5f 100644 --- a/plugins/inputs/sflow_a10/types.go +++ b/plugins/inputs/sflow_a10/types.go @@ -42,11 +42,11 @@ type CounterSample struct { CounterRecords []CounterRecord } -func (c *CounterSample) NeedsIpAndPort() bool { +func (c *CounterSample) NeedsIPAndPort() bool { if c.CounterRecords == nil || len(c.CounterRecords) == 0 { return false } - return c.CounterRecords[0].NeedsIpAndPort + return c.CounterRecords[0].NeedsIPAndPort } type CounterFormatType uint32 @@ -54,7 +54,7 @@ type CounterFormatType uint32 type CounterRecord struct { CounterFormat CounterFormatType CounterData *CounterData - NeedsIpAndPort bool + NeedsIPAndPort bool IsEthernetCounters bool } @@ -224,9 +224,9 @@ func portTypeIntToString(portType uint8) string { // readA10XMLData parses the A10 XML definitions file and returns a map with tag as key and counter information as value // moreover, it does some processing on the FieldName strings so they are compatible with different timeseries storage backends -func (s *SFlow_A10) readA10XMLData(data []byte) (map[uint32]CounterBlock, error) { +func (s *SFlowA10) readA10XMLData(data []byte) (map[uint32]CounterBlock, error) { var allCounterBlocks Allctrblocks - if err := xml.Unmarshal([]byte(data), &allCounterBlocks); err != nil { + if err := xml.Unmarshal(data, &allCounterBlocks); err != nil { return nil, err } diff --git a/plugins/inputs/sflow_a10/types_test.go b/plugins/inputs/sflow_a10/types_test.go index 87e1a53accbbf..bb80e4156a0e4 100644 --- a/plugins/inputs/sflow_a10/types_test.go +++ b/plugins/inputs/sflow_a10/types_test.go @@ -8,7 +8,7 @@ import ( ) func TestXMLFileNegative(t *testing.T) { - sflow := SFlow_A10{ + sflow := SFlowA10{ Log: tu.Logger{}, } _, err := sflow.readA10XMLData([]byte(testXMLStringNegativeTag)) diff --git a/plugins/inputs/slab/README.md b/plugins/inputs/slab/README.md new file mode 100644 index 0000000000000..0977d1f38dfaa --- /dev/null +++ b/plugins/inputs/slab/README.md @@ -0,0 +1,60 @@ +# Slab Input Plugin + +This plugin collects details on how much memory each entry in Slab cache is +consuming. For example, it collects the consumption of `kmalloc-1024` and +`xfs_inode`. Since this information is obtained by parsing `/proc/slabinfo` +file, only Linux is supported. The specification of `/proc/slabinfo` has not +changed since [Linux v2.6.12 (April 2005)][slab-c], so it can be regarded as +sufficiently stable. The memory usage is equivalent to the `CACHE_SIZE` column +of `slabtop` command. If the HOST_PROC environment variable is set, Telegraf +will use its value instead of `/proc` + +**Note: `/proc/slabinfo` is usually restricted to read as root user. Make sure +telegraf can execute `sudo` without password.** + +[slab-c]: https://github.com/torvalds/linux/blob/1da177e4/mm/slab.c#L2848-L2861 + +## Configuration + +```toml @sample.conf +# Get slab statistics from procfs +[[inputs.slab]] + # no configuration - please see the plugin's README for steps to configure + # sudo properly +``` + +## Sudo configuration + +Since the slabinfo file is only readable by root, the plugin runs `sudo +/bin/cat` to read the file. + +Sudo can be configured to allow telegraf to run just the command needed to read +the slabinfo file. For example, if telegraf is running as the user 'telegraf' +and HOST_PROC is not used, add this to the sudoers file: `telegraf ALL = (root) +NOPASSWD: /bin/cat /proc/slabinfo` + +## Metrics + +Metrics include generic ones such as `kmalloc_*` as well as those of kernel +subsystems and drivers used by the system such as `xfs_inode`. +Each field with `_size` suffix indicates memory consumption in bytes. + +- mem + - fields: + - kmalloc_8_size (integer) + - kmalloc_16_size (integer) + - kmalloc_32_size (integer) + - kmalloc_64_size (integer) + - kmalloc_96_size (integer) + - kmalloc_128_size (integer) + - kmalloc_256_size (integer) + - kmalloc_512_size (integer) + - xfs_ili_size (integer) + - xfs_inode_size (integer) + +## Example Output + +```shel +slab +kmalloc_1024_size=239927296i,kmalloc_512_size=5582848i 1651049129000000000 +``` diff --git a/plugins/inputs/slab/sample.conf b/plugins/inputs/slab/sample.conf new file mode 100644 index 0000000000000..d606f2dcd96bc --- /dev/null +++ b/plugins/inputs/slab/sample.conf @@ -0,0 +1,4 @@ +# Get slab statistics from procfs +[[inputs.slab]] + # no configuration - please see the plugin's README for steps to configure + # sudo properly diff --git a/plugins/inputs/slab/slab.go b/plugins/inputs/slab/slab.go new file mode 100644 index 0000000000000..c895c3b84bc8d --- /dev/null +++ b/plugins/inputs/slab/slab.go @@ -0,0 +1,128 @@ +//go:generate ../../../tools/readme_config_includer/generator +//go:build linux +// +build linux + +package slab + +import ( + "bufio" + "bytes" + _ "embed" + "errors" + "fmt" + "os" + "os/exec" + "path" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +type SlabStats struct { + Log telegraf.Logger `toml:"-"` + + statFile string + useSudo bool +} + +func (*SlabStats) SampleConfig() string { + return sampleConfig +} + +func (ss *SlabStats) Init() error { + return nil +} + +func (ss *SlabStats) Gather(acc telegraf.Accumulator) error { + fields, err := ss.getSlabStats() + if err != nil { + return err + } + + acc.AddGauge("slab", fields, nil) + return nil +} + +func (ss *SlabStats) getSlabStats() (map[string]interface{}, error) { + fields := map[string]interface{}{} + + out, err := ss.runCmd("/bin/cat", []string{ss.statFile}) + if err != nil { + return nil, err + } + + bytesReader := bytes.NewReader(out) + scanner := bufio.NewScanner(bytesReader) + + // Read header rows + scanner.Scan() // for "slabinfo - version: 2.1" + scanner.Scan() // for "# name ..." + + // Read data rows + for scanner.Scan() { + line := scanner.Text() + cols := strings.Fields(line) + + if len(cols) < 4 { + return nil, errors.New("the content of /proc/slabinfo is invalid") + } + + var numObj, sizObj int + + numObj, err = strconv.Atoi(cols[2]) + if err != nil { + return nil, err + } + + sizObj, err = strconv.Atoi(cols[3]) + if err != nil { + return nil, err + } + + fields[normalizeName(cols[0])] = numObj * sizObj + } + return fields, nil +} + +func (ss *SlabStats) runCmd(cmd string, args []string) ([]byte, error) { + execCmd := exec.Command(cmd, args...) + if os.Geteuid() != 0 && ss.useSudo { + execCmd = exec.Command("sudo", append([]string{"-n", cmd}, args...)...) + } + + out, err := internal.StdOutputTimeout(execCmd, 5*time.Second) + if err != nil { + return nil, fmt.Errorf("failed to run command %s: %s - %v", execCmd.Args, err, out) + } + + return out, nil +} + +func getHostProc() string { + procPath := "/proc" + if os.Getenv("HOST_PROC") != "" { + procPath = os.Getenv("HOST_PROC") + } + return procPath +} + +func normalizeName(name string) string { + return strings.ReplaceAll(strings.ToLower(name), "-", "_") + "_size" +} + +func init() { + inputs.Add("slab", func() telegraf.Input { + return &SlabStats{ + statFile: path.Join(getHostProc(), "slabinfo"), + useSudo: true, + } + }) +} diff --git a/plugins/inputs/slab/slab_notlinux.go b/plugins/inputs/slab/slab_notlinux.go new file mode 100644 index 0000000000000..77ec3e6e6a170 --- /dev/null +++ b/plugins/inputs/slab/slab_notlinux.go @@ -0,0 +1,4 @@ +//go:build !linux +// +build !linux + +package slab diff --git a/plugins/inputs/slab/slab_test.go b/plugins/inputs/slab/slab_test.go new file mode 100644 index 0000000000000..a4b26e6e63162 --- /dev/null +++ b/plugins/inputs/slab/slab_test.go @@ -0,0 +1,49 @@ +//go:build linux +// +build linux + +package slab + +import ( + "path" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" +) + +func TestSlab(t *testing.T) { + slabStats := SlabStats{ + statFile: path.Join("testdata", "slabinfo"), + useSudo: false, + } + + var acc testutil.Accumulator + require.NoError(t, slabStats.Gather(&acc)) + + fields := map[string]interface{}{ + "ext4_allocation_context_size": int(16384), + "ext4_extent_status_size": int(8160), + "ext4_free_data_size": int(0), + "ext4_inode_cache_size": int(491520), + "ext4_io_end_size": int(4032), + "ext4_xattr_size": int(0), + "kmalloc_1024_size": int(239927296), + "kmalloc_128_size": int(5586944), + "kmalloc_16_size": int(17002496), + "kmalloc_192_size": int(4015872), + "kmalloc_2048_size": int(3309568), + "kmalloc_256_size": int(5423104), + "kmalloc_32_size": int(3657728), + "kmalloc_4096_size": int(2359296), + "kmalloc_512_size": int(41435136), + "kmalloc_64_size": int(8536064), + "kmalloc_8_size": int(229376), + "kmalloc_8192_size": int(1048576), + "kmalloc_96_size": int(12378240), + "kmem_cache_size": int(81920), + "kmem_cache_node_size": int(36864), + } + + acc.AssertContainsFields(t, "slab", fields) +} diff --git a/plugins/inputs/slab/testdata/slabinfo b/plugins/inputs/slab/testdata/slabinfo new file mode 100644 index 0000000000000..f86237ca2cf4d --- /dev/null +++ b/plugins/inputs/slab/testdata/slabinfo @@ -0,0 +1,23 @@ +slabinfo - version: 2.1 +# name : tunables : slabdata +ext4_inode_cache 480 480 1024 32 8 : tunables 0 0 0 : slabdata 15 15 0 +ext4_xattr 0 0 88 46 1 : tunables 0 0 0 : slabdata 0 0 0 +ext4_free_data 0 0 64 64 1 : tunables 0 0 0 : slabdata 0 0 0 +ext4_allocation_context 128 128 128 32 1 : tunables 0 0 0 : slabdata 4 4 0 +ext4_io_end 56 56 72 56 1 : tunables 0 0 0 : slabdata 1 1 0 +ext4_extent_status 204 204 40 102 1 : tunables 0 0 0 : slabdata 2 2 0 +kmalloc-8192 106 128 8192 4 8 : tunables 0 0 0 : slabdata 32 32 0 +kmalloc-4096 486 576 4096 8 8 : tunables 0 0 0 : slabdata 72 72 0 +kmalloc-2048 1338 1616 2048 16 8 : tunables 0 0 0 : slabdata 101 101 0 +kmalloc-1024 155845 234304 1024 32 8 : tunables 0 0 0 : slabdata 7329 7329 0 +kmalloc-512 18995 80928 512 32 4 : tunables 0 0 0 : slabdata 2529 2529 0 +kmalloc-256 16366 21184 256 32 2 : tunables 0 0 0 : slabdata 662 662 0 +kmalloc-192 18835 20916 192 21 1 : tunables 0 0 0 : slabdata 996 996 0 +kmalloc-128 23600 43648 128 32 1 : tunables 0 0 0 : slabdata 1364 1364 0 +kmalloc-96 95106 128940 96 42 1 : tunables 0 0 0 : slabdata 3070 3070 0 +kmalloc-64 82432 133376 64 64 1 : tunables 0 0 0 : slabdata 2084 2084 0 +kmalloc-32 78477 114304 32 128 1 : tunables 0 0 0 : slabdata 893 893 0 +kmalloc-16 885605 1062656 16 256 1 : tunables 0 0 0 : slabdata 4151 4151 0 +kmalloc-8 28672 28672 8 512 1 : tunables 0 0 0 : slabdata 56 56 0 +kmem_cache_node 576 576 64 64 1 : tunables 0 0 0 : slabdata 9 9 0 +kmem_cache 320 320 256 32 2 : tunables 0 0 0 : slabdata 10 10 0 \ No newline at end of file diff --git a/plugins/inputs/smart/README.md b/plugins/inputs/smart/README.md index dec58e3f9afab..4a854e68ce62e 100644 --- a/plugins/inputs/smart/README.md +++ b/plugins/inputs/smart/README.md @@ -1,69 +1,88 @@ # S.M.A.R.T. Input Plugin -Get metrics using the command line utility `smartctl` for S.M.A.R.T. (Self-Monitoring, Analysis and Reporting Technology) storage devices. SMART is a monitoring system included in computer hard disk drives (HDDs) and solid-state drives (SSDs) that detects and reports on various indicators of drive reliability, with the intent of enabling the anticipation of hardware failures. -See smartmontools (https://www.smartmontools.org/). - -SMART information is separated between different measurements: `smart_device` is used for general information, while `smart_attribute` stores the detailed attribute information if `attributes = true` is enabled in the plugin configuration. - -If no devices are specified, the plugin will scan for SMART devices via the following command: - -``` +Get metrics using the command line utility `smartctl` for +S.M.A.R.T. (Self-Monitoring, Analysis and Reporting Technology) storage +devices. SMART is a monitoring system included in computer hard disk drives +(HDDs) and solid-state drives (SSDs) that detects and reports on various +indicators of drive reliability, with the intent of enabling the anticipation of +hardware failures. See smartmontools (). + +SMART information is separated between different measurements: `smart_device` is +used for general information, while `smart_attribute` stores the detailed +attribute information if `attributes = true` is enabled in the plugin +configuration. + +If no devices are specified, the plugin will scan for SMART devices via the +following command: + +```sh smartctl --scan ``` Metrics will be reported from the following `smartctl` command: -``` +```sh smartctl --info --attributes --health -n --format=brief ``` -This plugin supports _smartmontools_ version 5.41 and above, but v. 5.41 and v. 5.42 -might require setting `nocheck`, see the comment in the sample configuration. -Also, NVMe capabilities were introduced in version 6.5. +This plugin supports _smartmontools_ version 5.41 and above, but v. 5.41 and +v. 5.42 might require setting `nocheck`, see the comment in the sample +configuration. Also, NVMe capabilities were introduced in version 6.5. To enable SMART on a storage device run: -``` +```sh smartctl -s on ``` + ## NVMe vendor specific attributes -For NVMe disk type, plugin can use command line utility `nvme-cli`. It has a feature -to easy access a vendor specific attributes. -This plugin supports nmve-cli version 1.5 and above (https://github.com/linux-nvme/nvme-cli). -In case of `nvme-cli` absence NVMe vendor specific metrics will not be obtained. +For NVMe disk type, plugin can use command line utility `nvme-cli`. It has a +feature to easy access a vendor specific attributes. This plugin supports +nmve-cli version 1.5 and above (). In +case of `nvme-cli` absence NVMe vendor specific metrics will not be obtained. -Vendor specific SMART metrics for NVMe disks may be reported from the following `nvme` command: +Vendor specific SMART metrics for NVMe disks may be reported from the following +`nvme` command: -``` +```sh nvme smart-log-add ``` -Note that vendor plugins for `nvme-cli` could require different naming convention and report format. +Note that vendor plugins for `nvme-cli` could require different naming +convention and report format. -To see installed plugin extensions, depended on the nvme-cli version, look at the bottom of: -``` +To see installed plugin extensions, depended on the nvme-cli version, look at +the bottom of: + +```sh nvme help ``` To gather disk vendor id (vid) `id-ctrl` could be used: -``` + +```sh nvme id-ctrl ``` -Association between a vid and company can be found there: https://pcisig.com/membership/member-companies. + +Association between a vid and company can be found there: +. Devices affiliation to being NVMe or non NVMe will be determined thanks to: -``` + +```sh smartctl --scan ``` + and: -``` + +```sh smartctl --scan -d nvme ``` ## Configuration -```toml +```toml @sample.conf # Read metrics from storage devices supporting S.M.A.R.T. [[inputs.smart]] ## Optionally specify the path to the smartctl executable @@ -105,20 +124,32 @@ smartctl --scan -d nvme ## Timeout for the cli command to complete. # timeout = "30s" + + ## Optionally call smartctl and nvme-cli with a specific concurrency policy. + ## By default, smartctl and nvme-cli are called in separate threads (goroutines) to gather disk attributes. + ## Some devices (e.g. disks in RAID arrays) may have access limitations that require sequential reading of + ## SMART data - one individual array drive at the time. In such case please set this configuration option + ## to "sequential" to get readings for all drives. + ## valid options: concurrent, sequential + # read_method = "concurrent" ``` ## Permissions -It's important to note that this plugin references smartctl and nvme-cli, which may require additional permissions to execute successfully. -Depending on the user/group permissions of the telegraf user executing this plugin, you may need to use sudo. +It's important to note that this plugin references smartctl and nvme-cli, which +may require additional permissions to execute successfully. Depending on the +user/group permissions of the telegraf user executing this plugin, you may need +to use sudo. You will need the following in your telegraf config: + ```toml [[inputs.smart]] use_sudo = true ``` You will also need to update your sudoers file: + ```bash $ visudo # For smartctl add the following lines: @@ -131,8 +162,10 @@ Cmnd_Alias NVME = /path/to/nvme telegraf ALL=(ALL) NOPASSWD: NVME Defaults!NVME !logfile, !syslog, !pam_session ``` -To run smartctl or nvme with `sudo` wrapper script can be created. `path_smartctl` or -`path_nvme` in the configuration should be set to execute this script. + +To run smartctl or nvme with `sudo` wrapper script can be +created. `path_smartctl` or `path_nvme` in the configuration should be set to +execute this script. ## Metrics @@ -171,57 +204,88 @@ To run smartctl or nvme with `sudo` wrapper script can be created. `path_smartct - value - worst -#### Flags +### Flags The interpretation of the tag `flags` is: - - `K` auto-keep - - `C` event count - - `R` error rate - - `S` speed/performance - - `O` updated online - - `P` prefailure warning -#### Exit Status +- `K` auto-keep +- `C` event count +- `R` error rate +- `S` speed/performance +- `O` updated online +- `P` prefailure warning + +### Exit Status -The `exit_status` field captures the exit status of the used cli utilities command which -is defined by a bitmask. For the interpretation of the bitmask see the man page for -smartctl or nvme-cli. +The `exit_status` field captures the exit status of the used cli utilities +command which is defined by a bitmask. For the interpretation of the bitmask see +the man page for smartctl or nvme-cli. ## Device Names -Device names, e.g., `/dev/sda`, are *not persistent*, and may be + +Device names, e.g., `/dev/sda`, are _not persistent_, and may be subject to change across reboots or system changes. Instead, you can use the -*World Wide Name* (WWN) or serial number to identify devices. On Linux block +_World Wide Name_ (WWN) or serial number to identify devices. On Linux block devices can be referenced by the WWN in the following location: `/dev/disk/by-id/`. + ## Troubleshooting -If you expect to see more SMART metrics than this plugin shows, be sure to use a proper version -of smartctl or nvme-cli utility which has the functionality to gather desired data. Also, check -your device capability because not every SMART metrics are mandatory. -For example the number of temperature sensors depends on the device specification. + +If you expect to see more SMART metrics than this plugin shows, be sure to use a +proper version of smartctl or nvme-cli utility which has the functionality to +gather desired data. Also, check your device capability because not every SMART +metrics are mandatory. For example the number of temperature sensors depends on +the device specification. If this plugin is not working as expected for your SMART enabled device, please run these commands and include the output in a bug report: -For non NVMe devices (from smartctl version >= 7.0 this will also return NVMe devices by default): -``` +For non NVMe devices (from smartctl version >= 7.0 this will also return NVMe +devices by default): + +```sh smartctl --scan ``` + For NVMe devices: -``` + +```sh smartctl --scan -d nvme ``` + Run the following command replacing your configuration setting for NOCHECK and the DEVICE (name of the device could be taken from the previous command): -``` + +```sh smartctl --info --health --attributes --tolerance=verypermissive --nocheck NOCHECK --format=brief -d DEVICE ``` -If you try to gather vendor specific metrics, please provide this commad + +If you try to gather vendor specific metrics, please provide this command and replace vendor and device to match your case: -``` + +```sh nvme VENDOR smart-log-add DEVICE ``` -## Example SMART Plugin Outputs + +If you have specified devices array in configuration file, and Telegraf only +shows data from one device, you should change the plugin configuration to +sequentially gather disk attributes instead of collecting it in separate threads +(goroutines). To do this find in plugin configuration read_method and change it +to sequential: + +```toml + ## Optionally call smartctl and nvme-cli with a specific concurrency policy. + ## By default, smartctl and nvme-cli are called in separate threads (goroutines) to gather disk attributes. + ## Some devices (e.g. disks in RAID arrays) may have access limitations that require sequential reading of + ## SMART data - one individual array drive at the time. In such case please set this configuration option + ## to "sequential" to get readings for all drives. + ## valid options: concurrent, sequential + read_method = "sequential" ``` + +## Example Output + +```shell smart_device,enabled=Enabled,host=mbpro.local,device=rdisk0,model=APPLE\ SSD\ SM0512F,serial_no=S1K5NYCD964433,wwn=5002538655584d30,capacity=500277790720 udma_crc_errors=0i,exit_status=0i,health_ok=true,read_error_rate=0i,temp_c=40i 1502536854000000000 smart_attribute,capacity=500277790720,device=rdisk0,enabled=Enabled,fail=-,flags=-O-RC-,host=mbpro.local,id=199,model=APPLE\ SSD\ SM0512F,name=UDMA_CRC_Error_Count,serial_no=S1K5NYCD964433,wwn=5002538655584d30 exit_status=0i,raw_value=0i,threshold=0i,value=200i,worst=200i 1502536854000000000 smart_attribute,capacity=500277790720,device=rdisk0,enabled=Enabled,fail=-,flags=-O---K,host=mbpro.local,id=199,model=APPLE\ SSD\ SM0512F,name=Unknown_SSD_Attribute,serial_no=S1K5NYCD964433,wwn=5002538655584d30 exit_status=0i,raw_value=0i,threshold=0i,value=100i,worst=100i 1502536854000000000 diff --git a/plugins/inputs/smart/sample.conf b/plugins/inputs/smart/sample.conf new file mode 100644 index 0000000000000..303e93561e30a --- /dev/null +++ b/plugins/inputs/smart/sample.conf @@ -0,0 +1,49 @@ +# Read metrics from storage devices supporting S.M.A.R.T. +[[inputs.smart]] + ## Optionally specify the path to the smartctl executable + # path_smartctl = "/usr/bin/smartctl" + + ## Optionally specify the path to the nvme-cli executable + # path_nvme = "/usr/bin/nvme" + + ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case + ## ["auto-on"] - automatically find and enable additional vendor specific disk info + ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info + # enable_extensions = ["auto-on"] + + ## On most platforms used cli utilities requires root access. + ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli. + ## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli + ## without a password. + # use_sudo = false + + ## Skip checking disks in this power mode. Defaults to + ## "standby" to not wake up disks that have stopped rotating. + ## See --nocheck in the man pages for smartctl. + ## smartctl version 5.41 and 5.42 have faulty detection of + ## power mode and might require changing this value to + ## "never" depending on your disks. + # nocheck = "standby" + + ## Gather all returned S.M.A.R.T. attribute metrics and the detailed + ## information from each drive into the 'smart_attribute' measurement. + # attributes = false + + ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed. + # excludes = [ "/dev/pass6" ] + + ## Optionally specify devices and device type, if unset + ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done + ## and all found will be included except for the excluded in excludes. + # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"] + + ## Timeout for the cli command to complete. + # timeout = "30s" + + ## Optionally call smartctl and nvme-cli with a specific concurrency policy. + ## By default, smartctl and nvme-cli are called in separate threads (goroutines) to gather disk attributes. + ## Some devices (e.g. disks in RAID arrays) may have access limitations that require sequential reading of + ## SMART data - one individual array drive at the time. In such case please set this configuration option + ## to "sequential" to get readings for all drives. + ## valid options: concurrent, sequential + # read_method = "concurrent" diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index adc23f0921e26..bc99330fa701f 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -1,7 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package smart import ( "bufio" + _ "embed" "fmt" "os" "os/exec" @@ -12,50 +14,70 @@ import ( "sync" "syscall" "time" - "unicode" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) -const IntelVID = "0x8086" +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +const intelVID = "0x8086" var ( // Device Model: APPLE SSD SM256E // Product: HUH721212AL5204 // Model Number: TS128GMTE850 - modelInfo = regexp.MustCompile("^(Device Model|Product|Model Number):\\s+(.*)$") + modelInfo = regexp.MustCompile(`^(Device Model|Product|Model Number):\s+(.*)$`) // Serial Number: S0X5NZBC422720 - serialInfo = regexp.MustCompile("(?i)^Serial Number:\\s+(.*)$") + serialInfo = regexp.MustCompile(`(?i)^Serial Number:\s+(.*)$`) // LU WWN Device Id: 5 002538 655584d30 - wwnInfo = regexp.MustCompile("^LU WWN Device Id:\\s+(.*)$") + wwnInfo = regexp.MustCompile(`^LU WWN Device Id:\s+(.*)$`) // User Capacity: 251,000,193,024 bytes [251 GB] - userCapacityInfo = regexp.MustCompile("^User Capacity:\\s+([0-9,]+)\\s+bytes.*$") + userCapacityInfo = regexp.MustCompile(`^User Capacity:\s+([0-9,]+)\s+bytes.*$`) // SMART support is: Enabled - smartEnabledInfo = regexp.MustCompile("^SMART support is:\\s+(\\w+)$") + smartEnabledInfo = regexp.MustCompile(`^SMART support is:\s+(\w+)$`) + // Power mode is: ACTIVE or IDLE or Power mode was: STANDBY + powermodeInfo = regexp.MustCompile(`^Power mode \w+:\s+(\w+)`) + // Device is in STANDBY mode + standbyInfo = regexp.MustCompile(`^Device is in\s+(\w+)`) // SMART overall-health self-assessment test result: PASSED // SMART Health Status: OK // PASSED, FAILED, UNKNOWN - smartOverallHealth = regexp.MustCompile("^(SMART overall-health self-assessment test result|SMART Health Status):\\s+(\\w+).*$") + smartOverallHealth = regexp.MustCompile(`^(SMART overall-health self-assessment test result|SMART Health Status):\s+(\w+).*$`) - // sasNvmeAttr is a SAS or NVME SMART attribute - sasNvmeAttr = regexp.MustCompile(`^([^:]+):\s+(.+)$`) + // sasNVMeAttr is a SAS or NVMe SMART attribute + sasNVMeAttr = regexp.MustCompile(`^([^:]+):\s+(.+)$`) // ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE // 1 Raw_Read_Error_Rate -O-RC- 200 200 000 - 0 // 5 Reallocated_Sector_Ct PO--CK 100 100 000 - 0 // 192 Power-Off_Retract_Count -O--C- 097 097 000 - 14716 - attribute = regexp.MustCompile("^\\s*([0-9]+)\\s(\\S+)\\s+([-P][-O][-S][-R][-C][-K])\\s+([0-9]+)\\s+([0-9]+)\\s+([0-9-]+)\\s+([-\\w]+)\\s+([\\w\\+\\.]+).*$") + attribute = regexp.MustCompile(`^\s*([0-9]+)\s(\S+)\s+([-P][-O][-S][-R][-C][-K])\s+([0-9]+)\s+([0-9]+)\s+([0-9-]+)\s+([-\w]+)\s+([\w\+\.]+).*$`) // Additional Smart Log for NVME device:nvme0 namespace-id:ffffffff + // nvme version 1.14+ metrics: + // ID KEY Normalized Raw + // 0xab program_fail_count 100 0 + + // nvme deprecated metric format: // key normalized raw // program_fail_count : 100% 0 - intelExpressionPattern = regexp.MustCompile(`^([\w\s]+):([\w\s]+)%(.+)`) + + // REGEX patter supports deprecated metrics (nvme-cli version below 1.14) and metrics from nvme-cli 1.14 (and above). + intelExpressionPattern = regexp.MustCompile(`^([A-Za-z0-9_\s]+)[:|\s]+(\d+)[%|\s]+(.+)`) // vid : 0x8086 // sn : CFGT53260XSP8011P - nvmeIdCtrlExpressionPattern = regexp.MustCompile(`^([\w\s]+):([\s\w]+)`) + nvmeIDCtrlExpressionPattern = regexp.MustCompile(`^([\w\s]+):([\s\w]+)`) + + // Format from nvme-cli 1.14 (and above) gives ID and KEY, this regex is for separating id from key. + // ID KEY + // 0xab program_fail_count + nvmeIDSeparatePattern = regexp.MustCompile(`^([A-Za-z0-9_]+)(.+)`) deviceFieldIds = map[string]string{ "1": "read_error_rate", @@ -66,7 +88,7 @@ var ( } // to obtain metrics from smartctl - sasNvmeAttributes = map[string]struct { + sasNVMeAttributes = map[string]struct { ID string Name string Parse func(fields, deviceFields map[string]interface{}, str string) error @@ -209,12 +231,51 @@ var ( Parse: parseTemperatureSensor, }, } - - // to obtain Intel specific metrics from nvme-cli + // To obtain Intel specific metrics from nvme-cli version 1.14 and above. intelAttributes = map[string]struct { ID string Name string Parse func(acc telegraf.Accumulator, fields map[string]interface{}, tags map[string]string, str string) error + }{ + "program_fail_count": { + Name: "Program_Fail_Count", + }, + "erase_fail_count": { + Name: "Erase_Fail_Count", + }, + "wear_leveling_count": { // previously: "wear_leveling" + Name: "Wear_Leveling_Count", + }, + "e2e_error_detect_count": { // previously: "end_to_end_error_detection_count" + Name: "End_To_End_Error_Detection_Count", + }, + "crc_error_count": { + Name: "Crc_Error_Count", + }, + "media_wear_percentage": { // previously: "timed_workload_media_wear" + Name: "Media_Wear_Percentage", + }, + "host_reads": { + Name: "Host_Reads", + }, + "timed_work_load": { // previously: "timed_workload_timer" + Name: "Timed_Workload_Timer", + }, + "thermal_throttle_status": { + Name: "Thermal_Throttle_Status", + }, + "retry_buff_overflow_count": { // previously: "retry_buffer_overflow_count" + Name: "Retry_Buffer_Overflow_Count", + }, + "pll_lock_loss_counter": { // previously: "pll_lock_loss_count" + Name: "Pll_Lock_Loss_Count", + }, + } + // to obtain Intel specific metrics from nvme-cli + intelAttributesDeprecatedFormat = map[string]struct { + ID string + Name string + Parse func(acc telegraf.Accumulator, fields map[string]interface{}, tags map[string]string, str string) error }{ "program_fail_count": { Name: "Program_Fail_Count", @@ -265,85 +326,45 @@ var ( Parse: parseBytesWritten, }, } + + knownReadMethods = []string{"concurrent", "sequential"} ) -type NVMeDevice struct { +// Smart plugin reads metrics from storage devices supporting S.M.A.R.T. +type Smart struct { + Path string `toml:"path" deprecated:"1.16.0;use 'path_smartctl' instead"` + PathSmartctl string `toml:"path_smartctl"` + PathNVMe string `toml:"path_nvme"` + Nocheck string `toml:"nocheck"` + EnableExtensions []string `toml:"enable_extensions"` + Attributes bool `toml:"attributes"` + Excludes []string `toml:"excludes"` + Devices []string `toml:"devices"` + UseSudo bool `toml:"use_sudo"` + Timeout config.Duration `toml:"timeout"` + ReadMethod string `toml:"read_method"` + Log telegraf.Logger `toml:"-"` +} + +type nvmeDevice struct { name string vendorID string model string serialNumber string } -type Smart struct { - Path string `toml:"path"` //deprecated - to keep backward compatibility - PathSmartctl string `toml:"path_smartctl"` - PathNVMe string `toml:"path_nvme"` - Nocheck string `toml:"nocheck"` - EnableExtensions []string `toml:"enable_extensions"` - Attributes bool `toml:"attributes"` - Excludes []string `toml:"excludes"` - Devices []string `toml:"devices"` - UseSudo bool `toml:"use_sudo"` - Timeout internal.Duration `toml:"timeout"` - Log telegraf.Logger `toml:"-"` -} - -var sampleConfig = ` - ## Optionally specify the path to the smartctl executable - # path_smartctl = "/usr/bin/smartctl" - - ## Optionally specify the path to the nvme-cli executable - # path_nvme = "/usr/bin/nvme" - - ## Optionally specify if vendor specific attributes should be propagated for NVMe disk case - ## ["auto-on"] - automatically find and enable additional vendor specific disk info - ## ["vendor1", "vendor2", ...] - e.g. "Intel" enable additional Intel specific disk info - # enable_extensions = ["auto-on"] - - ## On most platforms used cli utilities requires root access. - ## Setting 'use_sudo' to true will make use of sudo to run smartctl or nvme-cli. - ## Sudo must be configured to allow the telegraf user to run smartctl or nvme-cli - ## without a password. - # use_sudo = false - - ## Skip checking disks in this power mode. Defaults to - ## "standby" to not wake up disks that have stopped rotating. - ## See --nocheck in the man pages for smartctl. - ## smartctl version 5.41 and 5.42 have faulty detection of - ## power mode and might require changing this value to - ## "never" depending on your disks. - # nocheck = "standby" - - ## Gather all returned S.M.A.R.T. attribute metrics and the detailed - ## information from each drive into the 'smart_attribute' measurement. - # attributes = false - - ## Optionally specify devices to exclude from reporting if disks auto-discovery is performed. - # excludes = [ "/dev/pass6" ] - - ## Optionally specify devices and device type, if unset - ## a scan (smartctl --scan and smartctl --scan -d nvme) for S.M.A.R.T. devices will be done - ## and all found will be included except for the excluded in excludes. - # devices = [ "/dev/ada0 -d atacam", "/dev/nvme0"] - - ## Timeout for the cli command to complete. - # timeout = "30s" -` - -func NewSmart() *Smart { +func newSmart() *Smart { return &Smart{ - Timeout: internal.Duration{Duration: time.Second * 30}, + Timeout: config.Duration(time.Second * 30), + ReadMethod: "concurrent", } } -func (m *Smart) SampleConfig() string { +func (*Smart) SampleConfig() string { return sampleConfig } -func (m *Smart) Description() string { - return "Read metrics from storage devices supporting S.M.A.R.T." -} - +// Init performs one time setup of the plugin and returns an error if the configuration is invalid. func (m *Smart) Init() error { //if deprecated `path` (to smartctl binary) is provided in config and `path_smartctl` override does not exist if len(m.Path) > 0 && len(m.PathSmartctl) == 0 { @@ -360,6 +381,10 @@ func (m *Smart) Init() error { m.PathNVMe, _ = exec.LookPath("nvme") } + if !contains(knownReadMethods, m.ReadMethod) { + return fmt.Errorf("provided read method `%s` is not valid", m.ReadMethod) + } + err := validatePath(m.PathSmartctl) if err != nil { m.PathSmartctl = "" @@ -377,6 +402,7 @@ func (m *Smart) Init() error { return nil } +// Gather takes in an accumulator and adds the metrics that the SMART tools gather. func (m *Smart) Gather(acc telegraf.Accumulator) error { var err error var scannedNVMeDevices []string @@ -387,8 +413,6 @@ func (m *Smart) Gather(acc telegraf.Accumulator) error { isVendorExtension := len(m.EnableExtensions) != 0 if len(m.Devices) != 0 { - devicesFromConfig = excludeWrongDeviceNames(devicesFromConfig) - m.getAttributes(acc, devicesFromConfig) // if nvme-cli is present, vendor specific attributes can be gathered @@ -397,9 +421,9 @@ func (m *Smart) Gather(acc telegraf.Accumulator) error { if err != nil { return err } - NVMeDevices := distinguishNVMeDevices(devicesFromConfig, scannedNVMeDevices) + nvmeDevices := distinguishNVMeDevices(devicesFromConfig, scannedNVMeDevices) - m.getVendorNVMeAttributes(acc, NVMeDevices) + m.getVendorNVMeAttributes(acc, nvmeDevices) } return nil } @@ -418,31 +442,6 @@ func (m *Smart) Gather(acc telegraf.Accumulator) error { return nil } -// validate and exclude not correct config device names to avoid unwanted behaviours -func excludeWrongDeviceNames(devices []string) []string { - validSigns := map[string]struct{}{ - " ": {}, - "/": {}, - "\\": {}, - "-": {}, - ",": {}, - } - var wrongDevices []string - - for _, device := range devices { - for _, char := range device { - if unicode.IsLetter(char) || unicode.IsNumber(char) { - continue - } - if _, exist := validSigns[string(char)]; exist { - continue - } - wrongDevices = append(wrongDevices, device) - } - } - return difference(devices, wrongDevices) -} - func (m *Smart) scanAllDevices(ignoreExcludes bool) ([]string, []string, error) { // this will return all devices (including NVMe devices) for smartctl version >= 7.0 // for older versions this will return non NVMe devices @@ -452,28 +451,28 @@ func (m *Smart) scanAllDevices(ignoreExcludes bool) ([]string, []string, error) } // this will return only NVMe devices - NVMeDevices, err := m.scanDevices(ignoreExcludes, "--scan", "--device=nvme") + nvmeDevices, err := m.scanDevices(ignoreExcludes, "--scan", "--device=nvme") if err != nil { return nil, nil, err } // to handle all versions of smartctl this will return only non NVMe devices - nonNVMeDevices := difference(devices, NVMeDevices) - return NVMeDevices, nonNVMeDevices, nil + nonNVMeDevices := difference(devices, nvmeDevices) + return nvmeDevices, nonNVMeDevices, nil } func distinguishNVMeDevices(userDevices []string, availableNVMeDevices []string) []string { - var NVMeDevices []string + var nvmeDevices []string for _, userDevice := range userDevices { - for _, NVMeDevice := range availableNVMeDevices { + for _, availableNVMeDevice := range availableNVMeDevices { // double check. E.g. in case when nvme0 is equal nvme0n1, will check if "nvme0" part is present. - if strings.Contains(NVMeDevice, userDevice) || strings.Contains(userDevice, NVMeDevice) { - NVMeDevices = append(NVMeDevices, userDevice) + if strings.Contains(availableNVMeDevice, userDevice) || strings.Contains(userDevice, availableNVMeDevice) { + nvmeDevices = append(nvmeDevices, userDevice) } } } - return NVMeDevices + return nvmeDevices } // Scan for S.M.A.R.T. devices from smartctl @@ -500,12 +499,12 @@ func (m *Smart) scanDevices(ignoreExcludes bool, scanArgs ...string) ([]string, } // Wrap with sudo -var runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { +var runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { cmd := exec.Command(command, args...) if sudo { cmd = exec.Command("sudo", append([]string{"-n", command}, args...)...) } - return internal.CombinedOutputTimeout(cmd, timeout.Duration) + return internal.CombinedOutputTimeout(cmd, time.Duration(timeout)) } func excludedDev(excludes []string, deviceLine string) bool { @@ -524,81 +523,98 @@ func excludedDev(excludes []string, deviceLine string) bool { func (m *Smart) getAttributes(acc telegraf.Accumulator, devices []string) { var wg sync.WaitGroup wg.Add(len(devices)) - for _, device := range devices { - go gatherDisk(acc, m.Timeout, m.UseSudo, m.Attributes, m.PathSmartctl, m.Nocheck, device, &wg) + switch m.ReadMethod { + case "concurrent": + go m.gatherDisk(acc, device, &wg) + case "sequential": + m.gatherDisk(acc, device, &wg) + default: + wg.Done() + } } wg.Wait() } func (m *Smart) getVendorNVMeAttributes(acc telegraf.Accumulator, devices []string) { - NVMeDevices := getDeviceInfoForNVMeDisks(acc, devices, m.PathNVMe, m.Timeout, m.UseSudo) + nvmeDevices := getDeviceInfoForNVMeDisks(acc, devices, m.PathNVMe, m.Timeout, m.UseSudo) var wg sync.WaitGroup - for _, device := range NVMeDevices { + for _, device := range nvmeDevices { if contains(m.EnableExtensions, "auto-on") { + // nolint:revive // one case switch on purpose to demonstrate potential extensions switch device.vendorID { - case IntelVID: + case intelVID: wg.Add(1) - go gatherIntelNVMeDisk(acc, m.Timeout, m.UseSudo, m.PathNVMe, device, &wg) + switch m.ReadMethod { + case "concurrent": + go gatherIntelNVMeDisk(acc, m.Timeout, m.UseSudo, m.PathNVMe, device, &wg) + case "sequential": + gatherIntelNVMeDisk(acc, m.Timeout, m.UseSudo, m.PathNVMe, device, &wg) + default: + wg.Done() + } } - } else if contains(m.EnableExtensions, "Intel") && device.vendorID == IntelVID { + } else if contains(m.EnableExtensions, "Intel") && device.vendorID == intelVID { wg.Add(1) - go gatherIntelNVMeDisk(acc, m.Timeout, m.UseSudo, m.PathNVMe, device, &wg) + switch m.ReadMethod { + case "concurrent": + go gatherIntelNVMeDisk(acc, m.Timeout, m.UseSudo, m.PathNVMe, device, &wg) + case "sequential": + gatherIntelNVMeDisk(acc, m.Timeout, m.UseSudo, m.PathNVMe, device, &wg) + default: + wg.Done() + } } } wg.Wait() } -func getDeviceInfoForNVMeDisks(acc telegraf.Accumulator, devices []string, nvme string, timeout internal.Duration, useSudo bool) []NVMeDevice { - var NVMeDevices []NVMeDevice +func getDeviceInfoForNVMeDisks(acc telegraf.Accumulator, devices []string, nvme string, timeout config.Duration, useSudo bool) []nvmeDevice { + var nvmeDevices []nvmeDevice for _, device := range devices { - vid, sn, mn, err := gatherNVMeDeviceInfo(nvme, device, timeout, useSudo) + newDevice, err := gatherNVMeDeviceInfo(nvme, device, timeout, useSudo) if err != nil { acc.AddError(fmt.Errorf("cannot find device info for %s device", device)) continue } - newDevice := NVMeDevice{ - name: device, - vendorID: vid, - model: mn, - serialNumber: sn, - } - NVMeDevices = append(NVMeDevices, newDevice) + nvmeDevices = append(nvmeDevices, newDevice) } - return NVMeDevices + return nvmeDevices } -func gatherNVMeDeviceInfo(nvme, device string, timeout internal.Duration, useSudo bool) (string, string, string, error) { +func gatherNVMeDeviceInfo(nvme, deviceName string, timeout config.Duration, useSudo bool) (device nvmeDevice, err error) { args := []string{"id-ctrl"} - args = append(args, strings.Split(device, " ")...) + args = append(args, strings.Split(deviceName, " ")...) out, err := runCmd(timeout, useSudo, nvme, args...) if err != nil { - return "", "", "", err + return device, err } outStr := string(out) - - vid, sn, mn, err := findNVMeDeviceInfo(outStr) - - return vid, sn, mn, err + device, err = findNVMeDeviceInfo(outStr) + if err != nil { + return device, err + } + device.name = deviceName + return device, nil } -func findNVMeDeviceInfo(output string) (string, string, string, error) { +func findNVMeDeviceInfo(output string) (nvmeDevice, error) { scanner := bufio.NewScanner(strings.NewReader(output)) var vid, sn, mn string for scanner.Scan() { line := scanner.Text() - if matches := nvmeIdCtrlExpressionPattern.FindStringSubmatch(line); len(matches) > 2 { + if matches := nvmeIDCtrlExpressionPattern.FindStringSubmatch(line); len(matches) > 2 { matches[1] = strings.TrimSpace(matches[1]) matches[2] = strings.TrimSpace(matches[2]) if matches[1] == "vid" { if _, err := fmt.Sscanf(matches[2], "%s", &vid); err != nil { - return "", "", "", err + return nvmeDevice{}, err } } if matches[1] == "sn" { @@ -609,10 +625,16 @@ func findNVMeDeviceInfo(output string) (string, string, string, error) { } } } - return vid, sn, mn, nil + + newDevice := nvmeDevice{ + vendorID: vid, + model: mn, + serialNumber: sn, + } + return newDevice, nil } -func gatherIntelNVMeDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo bool, nvme string, device NVMeDevice, wg *sync.WaitGroup) { +func gatherIntelNVMeDisk(acc telegraf.Accumulator, timeout config.Duration, usesudo bool, nvme string, device nvmeDevice, wg *sync.WaitGroup) { defer wg.Done() args := []string{"intel", "smart-log-add"} @@ -637,10 +659,31 @@ func gatherIntelNVMeDisk(acc telegraf.Accumulator, timeout internal.Duration, us tags["model"] = device.model tags["serial_no"] = device.serialNumber - if matches := intelExpressionPattern.FindStringSubmatch(line); len(matches) > 3 { - matches[1] = strings.TrimSpace(matches[1]) + // Create struct to initialize later with intel attributes. + var ( + attr = struct { + ID string + Name string + Parse func(acc telegraf.Accumulator, fields map[string]interface{}, tags map[string]string, str string) error + }{} + attrExists bool + ) + + if matches := intelExpressionPattern.FindStringSubmatch(line); len(matches) > 3 && len(matches[1]) > 1 { + // Check if nvme shows metrics in deprecated format or in format with ID. + // Based on that, an attribute map with metrics is chosen. + // If string has more than one character it means it has KEY there, otherwise it's empty string (""). + if separatedIDAndKey := nvmeIDSeparatePattern.FindStringSubmatch(matches[1]); len(strings.TrimSpace(separatedIDAndKey[2])) > 1 { + matches[1] = strings.TrimSpace(separatedIDAndKey[2]) + attr, attrExists = intelAttributes[matches[1]] + } else { + matches[1] = strings.TrimSpace(matches[1]) + attr, attrExists = intelAttributesDeprecatedFormat[matches[1]] + } + matches[3] = strings.TrimSpace(matches[3]) - if attr, ok := intelAttributes[matches[1]]; ok { + + if attrExists { tags["name"] = attr.Name if attr.ID != "" { tags["id"] = attr.ID @@ -659,18 +702,18 @@ func gatherIntelNVMeDisk(acc telegraf.Accumulator, timeout internal.Duration, us } } -func gatherDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo, collectAttributes bool, smartctl, nocheck, device string, wg *sync.WaitGroup) { +func (m *Smart) gatherDisk(acc telegraf.Accumulator, device string, wg *sync.WaitGroup) { defer wg.Done() // smartctl 5.41 & 5.42 have are broken regarding handling of --nocheck/-n - args := []string{"--info", "--health", "--attributes", "--tolerance=verypermissive", "-n", nocheck, "--format=brief"} + args := []string{"--info", "--health", "--attributes", "--tolerance=verypermissive", "-n", m.Nocheck, "--format=brief"} args = append(args, strings.Split(device, " ")...) - out, e := runCmd(timeout, usesudo, smartctl, args...) + out, e := runCmd(m.Timeout, m.UseSudo, m.PathSmartctl, args...) outStr := string(out) // Ignore all exit statuses except if it is a command line parse error exitStatus, er := exitStatus(e) if er != nil { - acc.AddError(fmt.Errorf("failed to run command '%s %s': %s - %s", smartctl, strings.Join(args, " "), e, outStr)) + acc.AddError(fmt.Errorf("failed to run command '%s %s': %s - %s", m.PathSmartctl, strings.Join(args, " "), e, outStr)) return } @@ -697,12 +740,12 @@ func gatherDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo, co wwn := wwnInfo.FindStringSubmatch(line) if len(wwn) > 1 { - deviceTags["wwn"] = strings.Replace(wwn[1], " ", "", -1) + deviceTags["wwn"] = strings.ReplaceAll(wwn[1], " ", "") } capacity := userCapacityInfo.FindStringSubmatch(line) if len(capacity) > 1 { - deviceTags["capacity"] = strings.Replace(capacity[1], ",", "", -1) + deviceTags["capacity"] = strings.ReplaceAll(capacity[1], ",", "") } enabled := smartEnabledInfo.FindStringSubmatch(line) @@ -715,11 +758,24 @@ func gatherDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo, co deviceFields["health_ok"] = health[2] == "PASSED" || health[2] == "OK" } + // checks to see if there is a power mode to print to user + // if not look for Device is in STANDBY which happens when + // nocheck is set to standby (will exit to not spin up the disk) + // otherwise nothing is found so nothing is printed (NVMe does not show power) + if power := powermodeInfo.FindStringSubmatch(line); len(power) > 1 { + deviceTags["power"] = power[1] + } else { + if power := standbyInfo.FindStringSubmatch(line); len(power) > 1 { + deviceTags["power"] = power[1] + } + } + tags := map[string]string{} fields := make(map[string]interface{}) - if collectAttributes { - keys := [...]string{"device", "model", "serial_no", "wwn", "capacity", "enabled"} + if m.Attributes { + //add power mode + keys := [...]string{"device", "model", "serial_no", "wwn", "capacity", "enabled", "power"} for _, key := range keys { if value, ok := deviceTags[key]; ok { tags[key] = value @@ -729,8 +785,8 @@ func gatherDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo, co attr := attribute.FindStringSubmatch(line) if len(attr) > 1 { - // attribute has been found, add it only if collectAttributes is true - if collectAttributes { + // attribute has been found, add it only if m.Attributes is true + if m.Attributes { tags["id"] = attr[1] tags["name"] = attr[2] tags["flags"] = attr[3] @@ -763,8 +819,8 @@ func gatherDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo, co } } else { // what was found is not a vendor attribute - if matches := sasNvmeAttr.FindStringSubmatch(line); len(matches) > 2 { - if attr, ok := sasNvmeAttributes[matches[1]]; ok { + if matches := sasNVMeAttr.FindStringSubmatch(line); len(matches) > 2 { + if attr, ok := sasNVMeAttributes[matches[1]]; ok { tags["name"] = attr.Name if attr.ID != "" { tags["id"] = attr.ID @@ -776,11 +832,12 @@ func gatherDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo, co } if err := parse(fields, deviceFields, matches[2]); err != nil { + acc.AddError(fmt.Errorf("error parsing %s: '%s': %s", attr.Name, matches[2], err.Error())) continue } // if the field is classified as an attribute, only add it - // if collectAttributes is true - if collectAttributes { + // if m.Attributes is true + if m.Attributes { acc.AddFields("smart_attribute", fields, tags) } } @@ -923,8 +980,20 @@ func parseInt(str string) int64 { } func parseCommaSeparatedInt(fields, _ map[string]interface{}, str string) error { - str = strings.Join(strings.Fields(str), "") - i, err := strconv.ParseInt(strings.Replace(str, ",", "", -1), 10, 64) + // remove any non-utf8 values + // '1\xa0292' --> 1292 + value := strings.ToValidUTF8(strings.Join(strings.Fields(str), ""), "") + + // remove any non-alphanumeric values + // '16,626,888' --> 16626888 + // '16 829 004' --> 16829004 + numRegex, err := regexp.Compile(`[^0-9\-]+`) + if err != nil { + return fmt.Errorf("failed to compile numeric regex") + } + value = numRegex.ReplaceAllString(value, "") + + i, err := strconv.ParseInt(value, 10, 64) if err != nil { return err } @@ -939,12 +1008,13 @@ func parsePercentageInt(fields, deviceFields map[string]interface{}, str string) } func parseDataUnits(fields, deviceFields map[string]interface{}, str string) error { - units := strings.Fields(str)[0] + // Remove everything after '[' + units := strings.Split(str, "[")[0] return parseCommaSeparatedInt(fields, deviceFields, units) } func parseCommaSeparatedIntWithAccumulator(acc telegraf.Accumulator, fields map[string]interface{}, tags map[string]string, str string) error { - i, err := strconv.ParseInt(strings.Replace(str, ",", "", -1), 10, 64) + i, err := strconv.ParseInt(strings.ReplaceAll(str, ",", ""), 10, 64) if err != nil { return err } @@ -966,7 +1036,7 @@ func parseTemperature(fields, deviceFields map[string]interface{}, str string) e return nil } -func parseTemperatureSensor(fields, deviceFields map[string]interface{}, str string) error { +func parseTemperatureSensor(fields, _ map[string]interface{}, str string) error { var temp int64 if _, err := fmt.Sscanf(str, "%d C", &temp); err != nil { return err @@ -977,13 +1047,13 @@ func parseTemperatureSensor(fields, deviceFields map[string]interface{}, str str return nil } -func validatePath(path string) error { - pathInfo, err := os.Stat(path) +func validatePath(filePath string) error { + pathInfo, err := os.Stat(filePath) if os.IsNotExist(err) { - return fmt.Errorf("provided path does not exist: [%s]", path) + return fmt.Errorf("provided path does not exist: [%s]", filePath) } if mode := pathInfo.Mode(); !mode.IsRegular() { - return fmt.Errorf("provided path does not point to a regular file: [%s]", path) + return fmt.Errorf("provided path does not point to a regular file: [%s]", filePath) } return nil } @@ -993,7 +1063,7 @@ func init() { _ = os.Setenv("LC_NUMERIC", "en_US.UTF-8") inputs.Add("smart", func() telegraf.Input { - m := NewSmart() + m := newSmart() m.Nocheck = "standby" return m }) diff --git a/plugins/inputs/smart/smart_test.go b/plugins/inputs/smart/smart_test.go index 00d8cf0725ea7..c7d1f3ffe348c 100644 --- a/plugins/inputs/smart/smart_test.go +++ b/plugins/inputs/smart/smart_test.go @@ -7,28 +7,28 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestGatherAttributes(t *testing.T) { - s := NewSmart() + s := newSmart() s.Attributes = true - assert.Equal(t, time.Second*30, s.Timeout.Duration) + assert.Equal(t, time.Second*30, time.Duration(s.Timeout)) - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { if len(args) > 0 { if args[0] == "--info" && args[7] == "/dev/ada0" { return []byte(mockInfoAttributeData), nil } else if args[0] == "--info" && args[7] == "/dev/nvme0" { - return []byte(smartctlNvmeInfoData), nil + return []byte(smartctlNVMeInfoData), nil } else if args[0] == "--scan" && len(args) == 1 { return []byte(mockScanData), nil } else if args[0] == "--scan" && len(args) >= 2 && args[1] == "--device=nvme" { - return []byte(mockScanNvmeData), nil + return []byte(mockScanNVMeData), nil } } return nil, errors.New("command not found") @@ -45,7 +45,7 @@ func TestGatherAttributes(t *testing.T) { s.PathSmartctl = "smartctl" s.PathNVMe = "" - t.Run("Only non nvme device", func(t *testing.T) { + t.Run("Only non NVMe device", func(t *testing.T) { s.Devices = []string{"/dev/ada0"} var acc testutil.Accumulator @@ -62,7 +62,7 @@ func TestGatherAttributes(t *testing.T) { acc.AssertContainsTaggedFields(t, "smart_device", test.fields, test.tags) } }) - t.Run("Only nvme device", func(t *testing.T) { + t.Run("Only NVMe device", func(t *testing.T) { s.Devices = []string{"/dev/nvme0"} var acc testutil.Accumulator @@ -71,28 +71,94 @@ func TestGatherAttributes(t *testing.T) { require.NoError(t, err) assert.Equal(t, 32, acc.NFields(), "Wrong number of fields gathered") - testutil.RequireMetricsEqual(t, testSmartctlNvmeAttributes, acc.GetTelegrafMetrics(), + testutil.RequireMetricsEqual(t, testSmartctlNVMeAttributes, acc.GetTelegrafMetrics(), testutil.SortMetrics(), testutil.IgnoreTime()) }) }) } +func TestGatherInParallelMode(t *testing.T) { + s := newSmart() + s.Attributes = true + s.PathSmartctl = "smartctl" + s.PathNVMe = "nvmeIdentifyController" + s.EnableExtensions = append(s.EnableExtensions, "auto-on") + s.Devices = []string{"/dev/nvme0"} + + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { + if len(args) > 0 { + if args[0] == "--info" && args[7] == "/dev/ada0" { + return []byte(mockInfoAttributeData), nil + } else if args[0] == "--info" && args[7] == "/dev/nvmeIdentifyController" { + return []byte(smartctlNVMeInfoData), nil + } else if args[0] == "--scan" && len(args) == 1 { + return []byte(mockScanData), nil + } else if args[0] == "--scan" && len(args) >= 2 && args[1] == "--device=nvme" { + return []byte(mockScanNVMeData), nil + } else if args[0] == "intel" && args[1] == "smart-log-add" { + return []byte(nvmeIntelInfoDataMetricsFormat), nil + } else if args[0] == "id-ctrl" { + return []byte(nvmeIdentifyController), nil + } + } + return nil, errors.New("command not found") + } + + t.Run("Gather NVMe device info in goroutine", func(t *testing.T) { + acc := &testutil.Accumulator{} + s.ReadMethod = "concurrent" + + err := s.Gather(acc) + require.NoError(t, err) + + result := acc.GetTelegrafMetrics() + testutil.RequireMetricsEqual(t, testIntelNVMeNewFormatAttributes, result, + testutil.SortMetrics(), testutil.IgnoreTime()) + }) + + t.Run("Gather NVMe device info sequentially", func(t *testing.T) { + acc := &testutil.Accumulator{} + s.ReadMethod = "sequential" + + err := s.Gather(acc) + require.NoError(t, err) + + result := acc.GetTelegrafMetrics() + testutil.RequireMetricsEqual(t, testIntelNVMeNewFormatAttributes, result, + testutil.SortMetrics(), testutil.IgnoreTime()) + }) + + t.Run("Gather NVMe device info - not known read method", func(t *testing.T) { + acc := &testutil.Accumulator{} + s.ReadMethod = "horizontally" + + err := s.Init() + require.Error(t, err) + + err = s.Gather(acc) + require.NoError(t, err) + + result := acc.GetTelegrafMetrics() + testutil.RequireMetricsEqual(t, []telegraf.Metric{}, result) + }) +} + func TestGatherNoAttributes(t *testing.T) { - s := NewSmart() + s := newSmart() s.Attributes = false - assert.Equal(t, time.Second*30, s.Timeout.Duration) + assert.Equal(t, time.Second*30, time.Duration(s.Timeout)) - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { if len(args) > 0 { if args[0] == "--scan" && len(args) == 1 { return []byte(mockScanData), nil } else if args[0] == "--info" && args[7] == "/dev/ada0" { return []byte(mockInfoAttributeData), nil } else if args[0] == "--info" && args[7] == "/dev/nvme0" { - return []byte(smartctlNvmeInfoData), nil + return []byte(smartctlNVMeInfoData), nil } else if args[0] == "--scan" && args[1] == "--device=nvme" { - return []byte(mockScanNvmeData), nil + return []byte(mockScanNVMeData), nil } } return nil, errors.New("command not found") @@ -111,7 +177,7 @@ func TestGatherNoAttributes(t *testing.T) { for _, test := range testsAda0Device { acc.AssertContainsTaggedFields(t, "smart_device", test.fields, test.tags) } - for _, test := range testNvmeDevice { + for _, test := range testNVMeDevice { acc.AssertContainsTaggedFields(t, "smart_device", test.fields, test.tags) } }) @@ -123,8 +189,18 @@ func TestExcludedDev(t *testing.T) { assert.Equal(t, false, excludedDev([]string{"/dev/pass6"}, "/dev/pass1 -d atacam"), "Shouldn't be excluded.") } +var ( + sampleSmart = Smart{ + PathSmartctl: "", + Nocheck: "", + Attributes: true, + UseSudo: true, + Timeout: config.Duration(time.Second * 30), + } +) + func TestGatherSATAInfo(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(hgstSATAInfoData), nil } @@ -134,13 +210,14 @@ func TestGatherSATAInfo(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + + sampleSmart.gatherDisk(acc, "", wg) assert.Equal(t, 101, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(20), acc.NMetrics(), "Wrong number of metrics gathered") } func TestGatherSATAInfo65(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(hgstSATAInfoData65), nil } @@ -150,13 +227,13 @@ func TestGatherSATAInfo65(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + sampleSmart.gatherDisk(acc, "", wg) assert.Equal(t, 91, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(18), acc.NMetrics(), "Wrong number of metrics gathered") } func TestGatherHgstSAS(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(hgstSASInfoData), nil } @@ -166,13 +243,13 @@ func TestGatherHgstSAS(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + sampleSmart.gatherDisk(acc, "", wg) assert.Equal(t, 6, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(4), acc.NMetrics(), "Wrong number of metrics gathered") } func TestGatherHtSAS(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(htSASInfoData), nil } @@ -182,13 +259,13 @@ func TestGatherHtSAS(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + sampleSmart.gatherDisk(acc, "", wg) testutil.RequireMetricsEqual(t, testHtsasAtributtes, acc.GetTelegrafMetrics(), testutil.SortMetrics(), testutil.IgnoreTime()) } func TestGatherSSD(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(ssdInfoData), nil } @@ -198,13 +275,13 @@ func TestGatherSSD(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + sampleSmart.gatherDisk(acc, "", wg) assert.Equal(t, 105, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(26), acc.NMetrics(), "Wrong number of metrics gathered") } func TestGatherSSDRaid(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { return []byte(ssdRaidInfoData), nil } @@ -214,14 +291,14 @@ func TestGatherSSDRaid(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + sampleSmart.gatherDisk(acc, "", wg) assert.Equal(t, 74, acc.NFields(), "Wrong number of fields gathered") assert.Equal(t, uint64(15), acc.NMetrics(), "Wrong number of metrics gathered") } -func TestGatherNvme(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { - return []byte(smartctlNvmeInfoData), nil +func TestGatherNVMe(t *testing.T) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { + return []byte(smartctlNVMeInfoData), nil } var ( @@ -230,21 +307,39 @@ func TestGatherNvme(t *testing.T) { ) wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "nvme0", wg) + sampleSmart.gatherDisk(acc, "nvme0", wg) - testutil.RequireMetricsEqual(t, testSmartctlNvmeAttributes, acc.GetTelegrafMetrics(), + testutil.RequireMetricsEqual(t, testSmartctlNVMeAttributes, acc.GetTelegrafMetrics(), testutil.SortMetrics(), testutil.IgnoreTime()) } -func TestGatherIntelNvme(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { - return []byte(nvmeIntelInfoData), nil +func TestGatherNVMeWindows(t *testing.T) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { + return []byte(smartctlNVMeInfoDataWindows), nil + } + + var ( + acc = &testutil.Accumulator{} + wg = &sync.WaitGroup{} + ) + + wg.Add(1) + sampleSmart.gatherDisk(acc, "nvme0", wg) + + metrics := acc.GetTelegrafMetrics() + testutil.RequireMetricsEqual(t, testSmartctlNVMeWindowsAttributes, metrics, + testutil.SortMetrics(), testutil.IgnoreTime()) +} + +func TestGatherIntelNVMeMetrics(t *testing.T) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { + return []byte(nvmeIntelInfoDataMetricsFormat), nil } var ( acc = &testutil.Accumulator{} wg = &sync.WaitGroup{} - device = NVMeDevice{ + device = nvmeDevice{ name: "nvme0", model: mockModel, serialNumber: mockSerial, @@ -252,20 +347,43 @@ func TestGatherIntelNvme(t *testing.T) { ) wg.Add(1) - gatherIntelNVMeDisk(acc, internal.Duration{Duration: time.Second * 30}, true, "", device, wg) + gatherIntelNVMeDisk(acc, config.Duration(time.Second*30), true, "", device, wg) result := acc.GetTelegrafMetrics() - testutil.RequireMetricsEqual(t, testIntelInvmeAttributes, result, + testutil.RequireMetricsEqual(t, testIntelNVMeNewFormatAttributes, result, + testutil.SortMetrics(), testutil.IgnoreTime()) +} + +func TestGatherIntelNVMeDeprecatedFormatMetrics(t *testing.T) { + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { + return []byte(nvmeIntelInfoDataDeprecatedMetricsFormat), nil + } + + var ( + acc = &testutil.Accumulator{} + wg = &sync.WaitGroup{} + device = nvmeDevice{ + name: "nvme0", + model: mockModel, + serialNumber: mockSerial, + } + ) + + wg.Add(1) + gatherIntelNVMeDisk(acc, config.Duration(time.Second*30), true, "", device, wg) + + result := acc.GetTelegrafMetrics() + testutil.RequireMetricsEqual(t, testIntelNVMeAttributes, result, testutil.SortMetrics(), testutil.IgnoreTime()) } func Test_findVIDFromNVMeOutput(t *testing.T) { - vid, sn, mn, err := findNVMeDeviceInfo(nvmeIdentifyController) + device, err := findNVMeDeviceInfo(nvmeIdentifyController) assert.Nil(t, err) - assert.Equal(t, "0x8086", vid) - assert.Equal(t, "CVFT5123456789ABCD", sn) - assert.Equal(t, "INTEL SSDPEDABCDEFG", mn) + assert.Equal(t, "0x8086", device.vendorID) + assert.Equal(t, "CVFT5123456789ABCD", device.serialNumber) + assert.Equal(t, "INTEL SSDPEDABCDEFG", device.model) } func Test_checkForNVMeDevices(t *testing.T) { @@ -275,13 +393,6 @@ func Test_checkForNVMeDevices(t *testing.T) { assert.Equal(t, expectedNVMeDevices, resultNVMeDevices) } -func Test_excludeWrongDeviceNames(t *testing.T) { - devices := []string{"/dev/sda", "/dev/nvme -d nvme", "/dev/sda1 -d megaraid,1", "/dev/sda ; ./suspicious_script.sh"} - validDevices := []string{"/dev/sda", "/dev/nvme -d nvme", "/dev/sda1 -d megaraid,1"} - result := excludeWrongDeviceNames(devices) - assert.Equal(t, validDevices, result) -} - func Test_contains(t *testing.T) { devices := []string{"/dev/sda", "/dev/nvme1"} device := "/dev/nvme1" @@ -299,8 +410,8 @@ func Test_difference(t *testing.T) { } func Test_integerOverflow(t *testing.T) { - runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { - return []byte(smartctlNvmeInfoDataWithOverflow), nil + runCmd = func(timeout config.Duration, sudo bool, command string, args ...string) ([]byte, error) { + return []byte(smartctlNVMeInfoDataWithOverflow), nil } var ( @@ -310,7 +421,8 @@ func Test_integerOverflow(t *testing.T) { t.Run("If data raw_value is out of int64 range, there should be no metrics for that attribute", func(t *testing.T) { wg.Add(1) - gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "nvme0", wg) + + sampleSmart.gatherDisk(acc, "nvme0", wg) result := acc.GetTelegrafMetrics() testutil.RequireMetricsEqual(t, testOverflowAttributes, result, @@ -663,7 +775,7 @@ var ( mockModel = "INTEL SSDPEDABCDEFG" mockSerial = "CVFT5123456789ABCD" - testSmartctlNvmeAttributes = []telegraf.Metric{ + testSmartctlNVMeAttributes = []telegraf.Metric{ testutil.MustMetric("smart_device", map[string]string{ "device": "nvme0", @@ -1029,6 +1141,253 @@ var ( ), } + testSmartctlNVMeWindowsAttributes = []telegraf.Metric{ + testutil.MustMetric("smart_device", + map[string]string{ + "device": "nvme0", + "model": "Samsung SSD 970 EVO 1TB", + "serial_no": "xxx", + }, + map[string]interface{}{ + "exit_status": 0, + "health_ok": true, + "temp_c": 47, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "id": "9", + "name": "Power_On_Hours", + "serial_no": "xxx", + "model": "Samsung SSD 970 EVO 1TB", + }, + map[string]interface{}{ + "raw_value": 1290, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": "xxx", + "model": "Samsung SSD 970 EVO 1TB", + "name": "Unsafe_Shutdowns", + }, + map[string]interface{}{ + "raw_value": 9, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "id": "12", + "name": "Power_Cycle_Count", + "serial_no": "xxx", + "model": "Samsung SSD 970 EVO 1TB", + }, + map[string]interface{}{ + "raw_value": 10779, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "name": "Media_and_Data_Integrity_Errors", + "serial_no": "xxx", + "model": "Samsung SSD 970 EVO 1TB", + }, + map[string]interface{}{ + "raw_value": 0, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "name": "Error_Information_Log_Entries", + "serial_no": "xxx", + "model": "Samsung SSD 970 EVO 1TB", + }, + map[string]interface{}{ + "raw_value": 979, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "name": "Available_Spare", + "serial_no": "xxx", + "model": "Samsung SSD 970 EVO 1TB", + }, + map[string]interface{}{ + "raw_value": 100, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "name": "Available_Spare_Threshold", + "serial_no": "xxx", + "model": "Samsung SSD 970 EVO 1TB", + }, + map[string]interface{}{ + "raw_value": 10, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "id": "194", + "name": "Temperature_Celsius", + "serial_no": "xxx", + "model": "Samsung SSD 970 EVO 1TB", + }, + map[string]interface{}{ + "raw_value": 47, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "name": "Critical_Warning", + "serial_no": "xxx", + "model": "Samsung SSD 970 EVO 1TB", + }, + map[string]interface{}{ + "raw_value": int64(0), + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "name": "Percentage_Used", + "serial_no": "xxx", + "model": "Samsung SSD 970 EVO 1TB", + }, + map[string]interface{}{ + "raw_value": int64(0), + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "name": "Data_Units_Read", + "serial_no": "xxx", + "model": "Samsung SSD 970 EVO 1TB", + }, + map[string]interface{}{ + "raw_value": int64(16626888), + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "name": "Data_Units_Written", + "serial_no": "xxx", + "model": "Samsung SSD 970 EVO 1TB", + }, + map[string]interface{}{ + "raw_value": int64(16829004), + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "name": "Host_Read_Commands", + "serial_no": "xxx", + "model": "Samsung SSD 970 EVO 1TB", + }, + map[string]interface{}{ + "raw_value": int64(205868508), + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "name": "Host_Write_Commands", + "serial_no": "xxx", + "model": "Samsung SSD 970 EVO 1TB", + }, + map[string]interface{}{ + "raw_value": int64(228472943), + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "name": "Controller_Busy_Time", + "serial_no": "xxx", + "model": "Samsung SSD 970 EVO 1TB", + }, + map[string]interface{}{ + "raw_value": int64(686), + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "name": "Critical_Temperature_Time", + "serial_no": "xxx", + "model": "Samsung SSD 970 EVO 1TB", + }, + map[string]interface{}{ + "raw_value": int64(0), + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": "xxx", + "model": "Samsung SSD 970 EVO 1TB", + "name": "Temperature_Sensor_1", + }, + map[string]interface{}{ + "raw_value": int64(47), + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": "xxx", + "model": "Samsung SSD 970 EVO 1TB", + "name": "Temperature_Sensor_2", + }, + map[string]interface{}{ + "raw_value": int64(68), + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": "xxx", + "model": "Samsung SSD 970 EVO 1TB", + "name": "Warning_Temperature_Time", + }, + map[string]interface{}{ + "raw_value": int64(0), + }, + time.Now(), + ), + } + testsAda0Device = []struct { fields map[string]interface{} tags map[string]string @@ -1052,7 +1411,7 @@ var ( }, } - testNvmeDevice = []struct { + testNVMeDevice = []struct { fields map[string]interface{} tags map[string]string }{ @@ -1070,7 +1429,7 @@ var ( }, } - testIntelInvmeAttributes = []telegraf.Metric{ + testIntelNVMeAttributes = []telegraf.Metric{ testutil.MustMetric("smart_attribute", map[string]string{ "device": "nvme0", @@ -1264,11 +1623,146 @@ var ( time.Now(), ), } + + testIntelNVMeNewFormatAttributes = []telegraf.Metric{ + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "Program_Fail_Count", + }, + map[string]interface{}{ + "raw_value": 0, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "Erase_Fail_Count", + }, + map[string]interface{}{ + "raw_value": 0, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "Wear_Leveling_Count", + }, + map[string]interface{}{ + "raw_value": int64(700090417315), + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "End_To_End_Error_Detection_Count", + }, + map[string]interface{}{ + "raw_value": 0, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "Crc_Error_Count", + }, + map[string]interface{}{ + "raw_value": 13, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "Media_Wear_Percentage", + }, + map[string]interface{}{ + "raw_value": 552, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "Host_Reads", + }, + map[string]interface{}{ + "raw_value": 73, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "Timed_Workload_Timer", + }, + map[string]interface{}{ + "raw_value": int64(2343038), + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "Thermal_Throttle_Status", + }, + map[string]interface{}{ + "raw_value": 0, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "Retry_Buffer_Overflow_Count", + }, + map[string]interface{}{ + "raw_value": 0, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": "nvme0", + "serial_no": mockSerial, + "model": mockModel, + "name": "Pll_Lock_Loss_Count", + }, + map[string]interface{}{ + "raw_value": 0, + }, + time.Now(), + ), + } // smartctl --scan mockScanData = `/dev/ada0 -d atacam # /dev/ada0, ATA device` // smartctl --scan -d nvme - mockScanNvmeData = `/dev/nvme0 -d nvme # /dev/nvme0, NVMe device` + mockScanNVMeData = `/dev/nvme0 -d nvme # /dev/nvme0, NVMe device` // smartctl --info --health --attributes --tolerance=verypermissive -n standby --format=brief [DEVICE] mockInfoAttributeData = `smartctl 6.5 2016-05-07 r4318 [Darwin 16.4.0 x86_64] (local build) @@ -1677,7 +2171,7 @@ Selective self-test flags (0x0): After scanning selected spans, do NOT read-scan remainder of disk. If Selective self-test is pending on power-up, resume after 0 minute delay. ` - smartctlNvmeInfoData = `smartctl 6.5 2016-05-07 r4318 [x86_64-linux-4.1.27-gvt-yocto-standard] (local build) + smartctlNVMeInfoData = `smartctl 6.5 2016-05-07 r4318 [x86_64-linux-4.1.27-gvt-yocto-standard] (local build) Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org === START OF INFORMATION SECTION === @@ -1727,14 +2221,82 @@ Temperature Sensor 7: 44 C Temperature Sensor 8: 43 C ` - smartctlNvmeInfoDataWithOverflow = ` + smartctlNVMeInfoDataWindows = `smartctl 7.3 2022-02-28 r5338 [x86_64-w64-mingw32-w10-20H2] (sf-7.3-1) +Copyright (C) 2002-22, Bruce Allen, Christian Franke, www.smartmontools.org + +=== START OF INFORMATION SECTION === +Model Number: Samsung SSD 970 EVO 1TB +Serial Number: xxx +Firmware Version: 2B2QEXE7 +PCI Vendor/Subsystem ID: 0x144d +IEEE OUI Identifier: 0x002538 +Total NVM Capacity: 1 000 204 886 016 [1,00 TB] +Unallocated NVM Capacity: 0 +Controller ID: 4 +NVMe Version: 1.3 +Number of Namespaces: 1 +Namespace 1 Size/Capacity: 1 000 204 886 016 [1,00 TB] +Namespace 1 Utilization: 732 789 374 976 [732 GB] +Namespace 1 Formatted LBA Size: 512 +Namespace 1 IEEE EUI-64: 002538 590141cfa4 +Local Time is: Wed Mar 30 13:30:12 2022 +Firmware Updates (0x16): 3 Slots, no Reset required +Optional Admin Commands (0x0017): Security Format Frmw_DL Self_Test +Optional NVM Commands (0x005f): Comp Wr_Unc DS_Mngmt Wr_Zero Sav/Sel_Feat Timestmp +Log Page Attributes (0x03): S/H_per_NS Cmd_Eff_Lg +Maximum Data Transfer Size: 512 Pages +Warning Comp. Temp. Threshold: 85 Celsius +Critical Comp. Temp. Threshold: 85 Celsius + +Supported Power States +St Op Max Active Idle RL RT WL WT Ent_Lat Ex_Lat + 0 + 6.20W - - 0 0 0 0 0 0 + 1 + 4.30W - - 1 1 1 1 0 0 + 2 + 2.10W - - 2 2 2 2 0 0 + 3 - 0.0400W - - 3 3 3 3 210 1200 + 4 - 0.0050W - - 4 4 4 4 2000 8000 + +Supported LBA Sizes (NSID 0x1) +Id Fmt Data Metadt Rel_Perf + 0 + 512 0 0 + +=== START OF SMART DATA SECTION === +SMART overall-health self-assessment test result: PASSED + +SMART/Health Information (NVMe Log 0x02) +Critical Warning: 0x00 +Temperature: 47 Celsius +Available Spare: 100% +Available Spare Threshold: 10% +Percentage Used: 0% +Data Units Read: 16,626,888 [8,51 TB] +Data Units Written: 16 829 004 [8,61 TB] +Host Read Commands: 205 868 508 +Host Write Commands: 228 472 943 +Controller Busy Time: 686 +Power Cycles: 10�779 +Power On Hours: 1�290 +Unsafe Shutdowns: 9 +Media and Data Integrity Errors: 0 +Error Information Log Entries: 979 +Warning Comp. Temperature Time: 0 +Critical Comp. Temperature Time: 0 +Temperature Sensor 1: 47 Celsius +Temperature Sensor 2: 68 Celsius + +Error Information (NVMe Log 0x01, 16 of 64 entries) +Num ErrCount SQId CmdId Status PELoc LBA NSID VS + 0 979 0 0x002a 0x4212 0x028 0 - - +` + + smartctlNVMeInfoDataWithOverflow = ` Temperature Sensor 1: 9223372036854775808 C Temperature Sensor 2: -9223372036854775809 C Temperature Sensor 3: 9223372036854775807 C Temperature Sensor 4: -9223372036854775808 C ` - nvmeIntelInfoData = `Additional Smart Log for NVME device:nvme0 namespace-id:ffffffff + nvmeIntelInfoDataDeprecatedMetricsFormat = `Additional Smart Log for NVME device:nvme0 namespace-id:ffffffff key normalized raw program_fail_count : 100% 0 erase_fail_count : 100% 0 @@ -1749,6 +2311,20 @@ retry_buffer_overflow_count : 100% 0 pll_lock_loss_count : 100% 0 nand_bytes_written : 0% sectors: 0 host_bytes_written : 0% sectors: 0 +` + nvmeIntelInfoDataMetricsFormat = `Additional Smart Log for NVME device:nvme0n1 namespace-id:ffffffff +ID KEY Normalized Raw +0xab program_fail_count 100 0 +0xac erase_fail_count 100 0 +0xad wear_leveling_count 100 700090417315 +0xb8 e2e_error_detect_count 100 0 +0xc7 crc_error_count 100 13 +0xe2 media_wear_percentage 100 552 +0xe3 host_reads 100 73 +0xe4 timed_work_load 100 2343038 +0xea thermal_throttle_status 100 0 +0xf0 retry_buff_overflow_count 100 0 +0xf3 pll_lock_loss_counter 100 0 ` nvmeIdentifyController = `NVME Identify Controller: diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index a0c9155db5432..b85d6e26dbad3 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -4,26 +4,24 @@ The `snmp` input plugin uses polling to gather metrics from SNMP agents. Support for gathering individual OIDs as well as complete SNMP tables is included. -### Prerequisites +## Note about Paths -This plugin uses the `snmptable` and `snmptranslate` programs from the -[net-snmp][] project. These tools will need to be installed into the `PATH` in -order to be located. Other utilities from the net-snmp project may be useful -for troubleshooting, but are not directly used by the plugin. +Path is a global variable, separate snmp instances will append the specified +path onto the global path variable -These programs will load available MIBs on the system. Typically the default -directory for MIBs is `/usr/share/snmp/mibs`, but if your MIBs are in a -different location you may need to make the paths known to net-snmp. The -location of these files can be configured in the `snmp.conf` or via the -`MIBDIRS` environment variable. See [`man 1 snmpcmd`][man snmpcmd] for more -information. +## Configuration -### Configuration -```toml +```toml @sample.conf +# Retrieves SNMP values from remote agents [[inputs.snmp]] ## Agent addresses to retrieve values from. + ## format: agents = [":"] + ## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6. + ## default is udp + ## port: optional ## example: agents = ["udp://127.0.0.1:161"] ## agents = ["tcp://127.0.0.1:161"] + ## agents = ["udp4://v4only-snmp-agent"] agents = ["udp://127.0.0.1:161"] ## Timeout for each request. @@ -32,6 +30,11 @@ information. ## SNMP version; can be 1, 2, or 3. # version = 2 + ## Path to mib files + ## Used by the gosmi translator. + ## To add paths when translating with netsnmp, use the MIBDIRS environment variable + # path = ["/usr/share/snmp/mibs"] + ## SNMP community string. # community = "public" @@ -48,7 +51,7 @@ information. ## ## Security Name. # sec_name = "myuser" - ## Authentication protocol; one of "MD5", "SHA", or "". + ## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "". # auth_protocol = "MD5" ## Authentication password. # auth_password = "pass" @@ -56,7 +59,9 @@ information. # sec_level = "authNoPriv" ## Context Name. # context_name = "" - ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". + ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C", or "". + ### Protocols "AES192", "AES192", "AES256", and "AES256C" require the underlying net-snmp tools + ### to be compiled with --enable-blumenthal-aes (http://www.net-snmp.org/docs/INSTALL.html) # priv_protocol = "" ## Privacy password used for encrypted messages. # priv_password = "" @@ -84,13 +89,13 @@ information. is_tag = true ``` -#### Configure SNMP Requests +### Configure SNMP Requests This plugin provides two methods for configuring the SNMP requests: `fields` and `tables`. Use the `field` option to gather single ad-hoc variables. To collect SNMP tables, use the `table` option. -##### Field +#### Field Use a `field` to collect a variable by OID. Requests specified with this option operate similar to the `snmpget` utility. @@ -113,19 +118,25 @@ option operate similar to the `snmpget` utility. # is_tag = false ## Apply one of the following conversions to the variable value: - ## float(X) Convert the input value into a float and divides by the - ## Xth power of 10. Effectively just moves the decimal left - ## X places. For example a value of `123` with `float(2)` - ## will result in `1.23`. - ## float: Convert the value into a float with no adjustment. Same - ## as `float(0)`. - ## int: Convert the value into an integer. - ## hwaddr: Convert the value to a MAC address. - ## ipaddr: Convert the value to an IP address. + ## float(X): Convert the input value into a float and divides by the + ## Xth power of 10. Effectively just moves the decimal left + ## X places. For example a value of `123` with `float(2)` + ## will result in `1.23`. + ## float: Convert the value into a float with no adjustment. Same + ## as `float(0)`. + ## int: Convert the value into an integer. + ## hwaddr: Convert the value to a MAC address. + ## ipaddr: Convert the value to an IP address. + ## hextoint:X:Y Convert a hex string value to integer. Where X is the Endian + ## and Y the bit size. For example: hextoint:LittleEndian:uint64 + ## or hextoint:BigEndian:uint32. Valid options for the Endian are: + ## BigEndian and LittleEndian. For the bit size: uint16, uint32 + ## and uint64. + ## # conversion = "" ``` -##### Table +#### Table Use a `table` to configure the collection of a SNMP table. SNMP requests formed with this option operate similarly way to the `snmptable` command. @@ -184,43 +195,168 @@ One [metric][] is created for each row of the SNMP table. ## path segments). Truncates the index after this point to remove non-fixed ## value or length index suffixes. # oid_index_length = 0 + + ## Specifies if the value of given field should be snmptranslated + ## by default no field values are translated + # translate = true + + ## Secondary index table allows to merge data from two tables with + ## different index that this filed will be used to join them. There can + ## be only one secondary index table. + # secondary_index_table = false + + ## This field is using secondary index, and will be later merged with + ## primary index using SecondaryIndexTable. SecondaryIndexTable and + ## SecondaryIndexUse are exclusive. + # secondary_index_use = false + + ## Controls if entries from secondary table should be added or not + ## if joining index is present or not. I set to true, means that join + ## is outer, and index is prepended with "Secondary." for missing values + ## to avoid overlaping indexes from both tables. Can be set per field or + ## globally with SecondaryIndexTable, global true overrides per field false. + # secondary_outer_join = false ``` -### Troubleshooting +#### Two Table Join -Check that a numeric field can be translated to a textual field: +Snmp plugin can join two snmp tables that have different indexes. For this to +work one table should have translation field that return index of second table +as value. Examples of such fields are: + +* Cisco portTable with translation field: `CISCO-STACK-MIB::portIfIndex`, +which value is IfIndex from ifTable +* Adva entityFacilityTable with translation field: `ADVA-FSPR7-MIB::entityFacilityOneIndex`, +which value is IfIndex from ifTable +* Cisco cpeExtPsePortTable with translation field: `CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortEntPhyIndex`, +which value is index from entPhysicalTable + +Such field can be used to translate index to secondary table with +`secondary_index_table = true` and all fields from secondary table (with index +pointed from translation field), should have added option `secondary_index_use = +true`. Telegraf cannot duplicate entries during join so translation must be +1-to-1 (not 1-to-many). To add fields from secondary table with index that is +not present in translation table (outer join), there is a second option for +translation index `secondary_outer_join = true`. + +##### Example configuration for table joins + +CISCO-POWER-ETHERNET-EXT-MIB table before join: + +```toml +[[inputs.snmp.table]] +name = "ciscoPower" +index_as_tag = true + +[[inputs.snmp.table.field]] +name = "PortPwrConsumption" +oid = "CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortPwrConsumption" + +[[inputs.snmp.table.field]] +name = "EntPhyIndex" +oid = "CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortEntPhyIndex" +``` + +Partial result (removed agent_host and host columns from all following outputs +in this section): + +```text +> ciscoPower,index=1.2 EntPhyIndex=1002i,PortPwrConsumption=6643i 1621460628000000000 +> ciscoPower,index=1.6 EntPhyIndex=1006i,PortPwrConsumption=10287i 1621460628000000000 +> ciscoPower,index=1.5 EntPhyIndex=1005i,PortPwrConsumption=8358i 1621460628000000000 +``` + +Note here that EntPhyIndex column carries index from ENTITY-MIB table, config +for it: + +```toml +[[inputs.snmp.table]] +name = "entityTable" +index_as_tag = true + +[[inputs.snmp.table.field]] +name = "EntPhysicalName" +oid = "ENTITY-MIB::entPhysicalName" ``` + +Partial result: + +```text +> entityTable,index=1006 EntPhysicalName="GigabitEthernet1/6" 1621460809000000000 +> entityTable,index=1002 EntPhysicalName="GigabitEthernet1/2" 1621460809000000000 +> entityTable,index=1005 EntPhysicalName="GigabitEthernet1/5" 1621460809000000000 +``` + +Now, lets attempt to join these results into one table. EntPhyIndex matches +index from second table, and lets convert EntPhysicalName into tag, so second +table will only provide tags into result. Configuration: + +```toml +[[inputs.snmp.table]] +name = "ciscoPowerEntity" +index_as_tag = true + +[[inputs.snmp.table.field]] +name = "PortPwrConsumption" +oid = "CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortPwrConsumption" + +[[inputs.snmp.table.field]] +name = "EntPhyIndex" +oid = "CISCO-POWER-ETHERNET-EXT-MIB::cpeExtPsePortEntPhyIndex" +secondary_index_table = true # enables joining + +[[inputs.snmp.table.field]] +name = "EntPhysicalName" +oid = "ENTITY-MIB::entPhysicalName" +secondary_index_use = true # this tag is indexed from secondary table +is_tag = true +``` + +Result: + +```text +> ciscoPowerEntity,EntPhysicalName=GigabitEthernet1/2,index=1.2 EntPhyIndex=1002i,PortPwrConsumption=6643i 1621461148000000000 +> ciscoPowerEntity,EntPhysicalName=GigabitEthernet1/6,index=1.6 EntPhyIndex=1006i,PortPwrConsumption=10287i 1621461148000000000 +> ciscoPowerEntity,EntPhysicalName=GigabitEthernet1/5,index=1.5 EntPhyIndex=1005i,PortPwrConsumption=8358i 1621461148000000000 +``` + +## Troubleshooting + +Check that a numeric field can be translated to a textual field: + +```sh $ snmptranslate .1.3.6.1.2.1.1.3.0 DISMAN-EVENT-MIB::sysUpTimeInstance ``` Request a top-level field: -``` -$ snmpget -v2c -c public 127.0.0.1 sysUpTime.0 + +```sh +snmpget -v2c -c public 127.0.0.1 sysUpTime.0 ``` Request a table: -``` -$ snmptable -v2c -c public 127.0.0.1 ifTable + +```sh +snmptable -v2c -c public 127.0.0.1 ifTable ``` To collect a packet capture, run this command in the background while running Telegraf or one of the above commands. Adjust the interface, host and port as needed: -``` -$ sudo tcpdump -s 0 -i eth0 -w telegraf-snmp.pcap host 127.0.0.1 and port 161 + +```sh +sudo tcpdump -s 0 -i eth0 -w telegraf-snmp.pcap host 127.0.0.1 and port 161 ``` -### Example Output +## Example Output -``` +```shell snmp,agent_host=127.0.0.1,source=loaner uptime=11331974i 1575509815000000000 interface,agent_host=127.0.0.1,ifDescr=wlan0,ifIndex=3,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=0i,ifInOctets=3436617431i,ifInUcastPkts=2717778i,ifInUnknownProtos=0i,ifLastChange=0i,ifMtu=1500i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=581368041i,ifOutQLen=0i,ifOutUcastPkts=1354338i,ifPhysAddress="c8:5b:76:c9:e6:8c",ifSpecific=".0.0",ifSpeed=0i,ifType=6i 1575509815000000000 interface,agent_host=127.0.0.1,ifDescr=eth0,ifIndex=2,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=21i,ifInOctets=3852386380i,ifInUcastPkts=3634004i,ifInUnknownProtos=0i,ifLastChange=9088763i,ifMtu=1500i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=434865441i,ifOutQLen=0i,ifOutUcastPkts=2110394i,ifPhysAddress="c8:5b:76:c9:e6:8c",ifSpecific=".0.0",ifSpeed=1000000000i,ifType=6i 1575509815000000000 interface,agent_host=127.0.0.1,ifDescr=lo,ifIndex=1,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=0i,ifInOctets=51555569i,ifInUcastPkts=339097i,ifInUnknownProtos=0i,ifLastChange=0i,ifMtu=65536i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=51555569i,ifOutQLen=0i,ifOutUcastPkts=339097i,ifSpecific=".0.0",ifSpeed=10000000i,ifType=24i 1575509815000000000 ``` -[net-snmp]: http://www.net-snmp.org/ -[man snmpcmd]: http://net-snmp.sourceforge.net/docs/man/snmpcmd.html#lbAK [metric filtering]: /docs/CONFIGURATION.md#metric-filtering [metric]: /docs/METRICS.md diff --git a/plugins/inputs/snmp/gosmi.go b/plugins/inputs/snmp/gosmi.go new file mode 100644 index 0000000000000..f2de844ce6fc0 --- /dev/null +++ b/plugins/inputs/snmp/gosmi.go @@ -0,0 +1,123 @@ +package snmp + +import ( + "fmt" + "sync" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/snmp" + "github.com/sleepinggenius2/gosmi" +) + +type gosmiTranslator struct { +} + +func NewGosmiTranslator(paths []string, log telegraf.Logger) (*gosmiTranslator, error) { + err := snmp.LoadMibsFromPath(paths, log, &snmp.GosmiMibLoader{}) + if err == nil { + return &gosmiTranslator{}, nil + } + return nil, err +} + +type gosmiSnmpTranslateCache struct { + mibName string + oidNum string + oidText string + conversion string + node gosmi.SmiNode + err error +} + +var gosmiSnmpTranslateCachesLock sync.Mutex +var gosmiSnmpTranslateCaches map[string]gosmiSnmpTranslateCache + +//nolint:revive +func (g *gosmiTranslator) SnmpTranslate(oid string) (string, string, string, string, error) { + a, b, c, d, _, e := g.SnmpTranslateFull(oid) + return a, b, c, d, e +} + +//nolint:revive +func (g *gosmiTranslator) SnmpTranslateFull(oid string) ( + mibName string, oidNum string, oidText string, + conversion string, + node gosmi.SmiNode, + err error) { + gosmiSnmpTranslateCachesLock.Lock() + if gosmiSnmpTranslateCaches == nil { + gosmiSnmpTranslateCaches = map[string]gosmiSnmpTranslateCache{} + } + + var stc gosmiSnmpTranslateCache + var ok bool + if stc, ok = gosmiSnmpTranslateCaches[oid]; !ok { + // This will result in only one call to snmptranslate running at a time. + // We could speed it up by putting a lock in snmpTranslateCache and then + // returning it immediately, and multiple callers would then release the + // snmpTranslateCachesLock and instead wait on the individual + // snmpTranslation.Lock to release. But I don't know that the extra complexity + // is worth it. Especially when it would slam the system pretty hard if lots + // of lookups are being performed. + + stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.node, stc.err = snmp.SnmpTranslateCall(oid) + gosmiSnmpTranslateCaches[oid] = stc + } + + gosmiSnmpTranslateCachesLock.Unlock() + + return stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.node, stc.err +} + +type gosmiSnmpTableCache struct { + mibName string + oidNum string + oidText string + fields []Field + err error +} + +var gosmiSnmpTableCaches map[string]gosmiSnmpTableCache +var gosmiSnmpTableCachesLock sync.Mutex + +// snmpTable resolves the given OID as a table, providing information about the +// table and fields within. +//nolint:revive //Too many return variable but necessary +func (g *gosmiTranslator) SnmpTable(oid string) ( + mibName string, oidNum string, oidText string, + fields []Field, + err error) { + gosmiSnmpTableCachesLock.Lock() + if gosmiSnmpTableCaches == nil { + gosmiSnmpTableCaches = map[string]gosmiSnmpTableCache{} + } + + var stc gosmiSnmpTableCache + var ok bool + if stc, ok = gosmiSnmpTableCaches[oid]; !ok { + stc.mibName, stc.oidNum, stc.oidText, stc.fields, stc.err = g.SnmpTableCall(oid) + gosmiSnmpTableCaches[oid] = stc + } + + gosmiSnmpTableCachesLock.Unlock() + return stc.mibName, stc.oidNum, stc.oidText, stc.fields, stc.err +} + +//nolint:revive //Too many return variable but necessary +func (g *gosmiTranslator) SnmpTableCall(oid string) (mibName string, oidNum string, oidText string, fields []Field, err error) { + mibName, oidNum, oidText, _, node, err := g.SnmpTranslateFull(oid) + if err != nil { + return "", "", "", nil, fmt.Errorf("translating: %w", err) + } + + mibPrefix := mibName + "::" + + col, tagOids, err := snmp.GetIndex(oidNum, mibPrefix, node) + + for _, c := range col { + _, isTag := tagOids[mibPrefix+c] + fields = append(fields, Field{Name: c, Oid: mibPrefix + c, IsTag: isTag}) + } + + return mibName, oidNum, oidText, fields, err +} diff --git a/plugins/inputs/snmp/gosmi_test.go b/plugins/inputs/snmp/gosmi_test.go new file mode 100644 index 0000000000000..bca48ffa17094 --- /dev/null +++ b/plugins/inputs/snmp/gosmi_test.go @@ -0,0 +1,943 @@ +package snmp + +import ( + "fmt" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/internal/snmp" + "github.com/influxdata/telegraf/testutil" +) + +func getGosmiTr(t *testing.T) Translator { + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + + tr, err := NewGosmiTranslator([]string{testDataPath}, testutil.Logger{}) + require.NoError(t, err) + return tr +} + +func TestGosmiTranslator(t *testing.T) { + var tr Translator + var err error + + tr, err = NewGosmiTranslator([]string{"testdata"}, testutil.Logger{}) + require.NoError(t, err) + require.NotNil(t, tr) +} + +//gosmi uses the same connection struct as netsnmp but has a few +//different test cases, so it has its own copy +var gosmiTsc = &testSNMPConnection{ + host: "tsc", + values: map[string]interface{}{ + ".1.3.6.1.2.1.3.1.1.1.0": "foo", + ".1.3.6.1.2.1.3.1.1.1.1": []byte("bar"), + ".1.3.6.1.2.1.3.1.1.1.2": []byte(""), + ".1.3.6.1.2.1.3.1.1.102": "bad", + ".1.3.6.1.2.1.3.1.1.2.0": 1, + ".1.3.6.1.2.1.3.1.1.2.1": 2, + ".1.3.6.1.2.1.3.1.1.2.2": 0, + ".1.3.6.1.2.1.3.1.1.3.0": "1.3.6.1.2.1.3.1.1.3", + ".1.3.6.1.2.1.3.1.1.5.0": 123456, + ".1.0.0.0.1.1.0": "foo", + ".1.0.0.0.1.1.1": []byte("bar"), + ".1.0.0.0.1.1.2": []byte(""), + ".1.0.0.0.1.102": "bad", + ".1.0.0.0.1.2.0": 1, + ".1.0.0.0.1.2.1": 2, + ".1.0.0.0.1.2.2": 0, + ".1.0.0.0.1.3.0": "0.123", + ".1.0.0.0.1.3.1": "0.456", + ".1.0.0.0.1.3.2": "0.000", + ".1.0.0.0.1.3.3": "9.999", + ".1.0.0.0.1.5.0": 123456, + ".1.0.0.1.1": "baz", + ".1.0.0.1.2": 234, + ".1.0.0.1.3": []byte("byte slice"), + ".1.0.0.2.1.5.0.9.9": 11, + ".1.0.0.2.1.5.1.9.9": 22, + ".1.0.0.0.1.6.0": ".1.0.0.0.1.7", + ".1.0.0.3.1.1.10": "instance", + ".1.0.0.3.1.1.11": "instance2", + ".1.0.0.3.1.1.12": "instance3", + ".1.0.0.3.1.2.10": 10, + ".1.0.0.3.1.2.11": 20, + ".1.0.0.3.1.2.12": 20, + ".1.0.0.3.1.3.10": 1, + ".1.0.0.3.1.3.11": 2, + ".1.0.0.3.1.3.12": 3, + }, +} + +func TestFieldInitGosmi(t *testing.T) { + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + + tr, err := NewGosmiTranslator([]string{testDataPath}, testutil.Logger{}) + require.NoError(t, err) + + translations := []struct { + inputOid string + inputName string + inputConversion string + expectedOid string + expectedName string + expectedConversion string + }{ + {".1.2.3", "foo", "", ".1.2.3", "foo", ""}, + {".iso.2.3", "foo", "", ".1.2.3", "foo", ""}, + {".1.0.0.0.1.1", "", "", ".1.0.0.0.1.1", "server", ""}, + {"IF-MIB::ifPhysAddress.1", "", "", ".1.3.6.1.2.1.2.2.1.6.1", "ifPhysAddress.1", "hwaddr"}, + {"IF-MIB::ifPhysAddress.1", "", "none", ".1.3.6.1.2.1.2.2.1.6.1", "ifPhysAddress.1", "none"}, + {"BRIDGE-MIB::dot1dTpFdbAddress.1", "", "", ".1.3.6.1.2.1.17.4.3.1.1.1", "dot1dTpFdbAddress.1", "hwaddr"}, + {"TCP-MIB::tcpConnectionLocalAddress.1", "", "", ".1.3.6.1.2.1.6.19.1.2.1", "tcpConnectionLocalAddress.1", "ipaddr"}, + {".999", "", "", ".999", ".999", ""}, + } + + for _, txl := range translations { + f := Field{Oid: txl.inputOid, Name: txl.inputName, Conversion: txl.inputConversion} + err := f.init(tr) + require.NoError(t, err, "inputOid='%s' inputName='%s'", txl.inputOid, txl.inputName) + + assert.Equal(t, txl.expectedOid, f.Oid, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) + assert.Equal(t, txl.expectedName, f.Name, "inputOid='%s' inputName='%s' inputConversion='%s'", txl.inputOid, txl.inputName, txl.inputConversion) + } +} + +func TestTableInitGosmi(t *testing.T) { + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + + s := &Snmp{ + ClientConfig: snmp.ClientConfig{ + Path: []string{testDataPath}, + Translator: "gosmi", + }, + Tables: []Table{ + {Oid: ".1.3.6.1.2.1.3.1", + Fields: []Field{ + {Oid: ".999", Name: "foo"}, + {Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex", IsTag: true}, + {Oid: "RFC1213-MIB::atPhysAddress", Name: "atPhysAddress"}, + }}, + }, + } + err = s.Init() + require.NoError(t, err) + + assert.Equal(t, "atTable", s.Tables[0].Name) + + assert.Len(t, s.Tables[0].Fields, 5) + assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".999", Name: "foo", initialized: true}) + assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex", initialized: true, IsTag: true}) + assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.2", Name: "atPhysAddress", initialized: true, Conversion: "hwaddr"}) + assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.3", Name: "atNetAddress", initialized: true, IsTag: true}) +} + +func TestSnmpInitGosmi(t *testing.T) { + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + + s := &Snmp{ + Tables: []Table{ + {Oid: "RFC1213-MIB::atTable"}, + }, + Fields: []Field{ + {Oid: "RFC1213-MIB::atPhysAddress"}, + }, + ClientConfig: snmp.ClientConfig{ + Path: []string{testDataPath}, + Translator: "gosmi", + }, + } + + err = s.Init() + require.NoError(t, err) + + assert.Len(t, s.Tables[0].Fields, 3) + assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.1", Name: "atIfIndex", IsTag: true, initialized: true}) + assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.2", Name: "atPhysAddress", initialized: true, Conversion: "hwaddr"}) + assert.Contains(t, s.Tables[0].Fields, Field{Oid: ".1.3.6.1.2.1.3.1.1.3", Name: "atNetAddress", IsTag: true, initialized: true}) + + assert.Equal(t, Field{ + Oid: ".1.3.6.1.2.1.3.1.1.2", + Name: "atPhysAddress", + Conversion: "hwaddr", + initialized: true, + }, s.Fields[0]) +} + +func TestSnmpInit_noTranslateGosmi(t *testing.T) { + s := &Snmp{ + Fields: []Field{ + {Oid: ".9.1.1.1.1", Name: "one", IsTag: true}, + {Oid: ".9.1.1.1.2", Name: "two"}, + {Oid: ".9.1.1.1.3"}, + }, + Tables: []Table{ + {Name: "testing", + Fields: []Field{ + {Oid: ".9.1.1.1.4", Name: "four", IsTag: true}, + {Oid: ".9.1.1.1.5", Name: "five"}, + {Oid: ".9.1.1.1.6"}, + }}, + }, + ClientConfig: snmp.ClientConfig{ + Path: []string{}, + Translator: "gosmi", + }, + } + + err := s.Init() + require.NoError(t, err) + + assert.Equal(t, ".9.1.1.1.1", s.Fields[0].Oid) + assert.Equal(t, "one", s.Fields[0].Name) + assert.Equal(t, true, s.Fields[0].IsTag) + + assert.Equal(t, ".9.1.1.1.2", s.Fields[1].Oid) + assert.Equal(t, "two", s.Fields[1].Name) + assert.Equal(t, false, s.Fields[1].IsTag) + + assert.Equal(t, ".9.1.1.1.3", s.Fields[2].Oid) + assert.Equal(t, ".9.1.1.1.3", s.Fields[2].Name) + assert.Equal(t, false, s.Fields[2].IsTag) + + assert.Equal(t, ".9.1.1.1.4", s.Tables[0].Fields[0].Oid) + assert.Equal(t, "four", s.Tables[0].Fields[0].Name) + assert.Equal(t, true, s.Tables[0].Fields[0].IsTag) + + assert.Equal(t, ".9.1.1.1.5", s.Tables[0].Fields[1].Oid) + assert.Equal(t, "five", s.Tables[0].Fields[1].Name) + assert.Equal(t, false, s.Tables[0].Fields[1].IsTag) + + assert.Equal(t, ".9.1.1.1.6", s.Tables[0].Fields[2].Oid) + assert.Equal(t, ".9.1.1.1.6", s.Tables[0].Fields[2].Name) + assert.Equal(t, false, s.Tables[0].Fields[2].IsTag) +} + +//TestTableBuild_walk in snmp_test.go is split into two tests here, +//noTranslate and Translate. +// +//This is only running with gosmi translator but should be valid with +//netsnmp too. +func TestTableBuild_walk_noTranslate(t *testing.T) { + tbl := Table{ + Name: "mytable", + IndexAsTag: true, + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.0.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.0.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.0.1.3", + Conversion: "float", + }, + { + Name: "myfield4", + Oid: ".1.0.0.2.1.5", + OidIndexSuffix: ".9.9", + }, + { + Name: "myfield5", + Oid: ".1.0.0.2.1.5", + OidIndexLength: 1, + }, + }, + } + + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + + tr, err := NewGosmiTranslator([]string{testDataPath}, testutil.Logger{}) + require.NoError(t, err) + + tb, err := tbl.Build(gosmiTsc, true, tr) + require.NoError(t, err) + assert.Equal(t, tb.Name, "mytable") + rtr1 := RTableRow{ + Tags: map[string]string{ + "myfield1": "foo", + "index": "0", + }, + Fields: map[string]interface{}{ + "myfield2": 1, + "myfield3": float64(0.123), + "myfield4": 11, + "myfield5": 11, + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "myfield1": "bar", + "index": "1", + }, + Fields: map[string]interface{}{ + "myfield2": 2, + "myfield3": float64(0.456), + "myfield4": 22, + "myfield5": 22, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "index": "2", + }, + Fields: map[string]interface{}{ + "myfield2": 0, + "myfield3": float64(0.0), + }, + } + rtr4 := RTableRow{ + Tags: map[string]string{ + "index": "3", + }, + Fields: map[string]interface{}{ + "myfield3": float64(9.999), + }, + } + assert.Len(t, tb.Rows, 4) + assert.Contains(t, tb.Rows, rtr1) + assert.Contains(t, tb.Rows, rtr2) + assert.Contains(t, tb.Rows, rtr3) + assert.Contains(t, tb.Rows, rtr4) +} + +func TestTableBuild_walk_Translate(t *testing.T) { + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + + tr, err := NewGosmiTranslator([]string{testDataPath}, testutil.Logger{}) + require.NoError(t, err) + + tbl := Table{ + Name: "atTable", + IndexAsTag: true, + Fields: []Field{ + { + Name: "ifIndex", + Oid: "1.3.6.1.2.1.3.1.1.1", + IsTag: true, + }, + { + Name: "atPhysAddress", + Oid: "1.3.6.1.2.1.3.1.1.2", + Translate: false, + }, + { + Name: "atNetAddress", + Oid: "1.3.6.1.2.1.3.1.1.3", + Translate: true, + }, + }, + } + + err = tbl.Init(tr) + require.NoError(t, err) + tb, err := tbl.Build(gosmiTsc, true, tr) + require.NoError(t, err) + + require.Equal(t, tb.Name, "atTable") + + rtr1 := RTableRow{ + Tags: map[string]string{ + "ifIndex": "foo", + "index": "0", + }, + Fields: map[string]interface{}{ + "atPhysAddress": 1, + "atNetAddress": "atNetAddress", + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "ifIndex": "bar", + "index": "1", + }, + Fields: map[string]interface{}{ + "atPhysAddress": 2, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "index": "2", + }, + Fields: map[string]interface{}{ + "atPhysAddress": 0, + }, + } + + require.Len(t, tb.Rows, 3) + require.Contains(t, tb.Rows, rtr1) + require.Contains(t, tb.Rows, rtr2) + require.Contains(t, tb.Rows, rtr3) +} + +func TestTableBuild_noWalkGosmi(t *testing.T) { + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + + tr, err := NewGosmiTranslator([]string{testDataPath}, testutil.Logger{}) + require.NoError(t, err) + + tbl := Table{ + Name: "mytable", + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.1.2", + IsTag: true, + }, + { + Name: "empty", + Oid: ".1.0.0.0.1.1.2", + }, + { + Name: "noexist", + Oid: ".1.2.3.4.5", + }, + }, + } + + tb, err := tbl.Build(gosmiTsc, false, tr) + require.NoError(t, err) + + rtr := RTableRow{ + Tags: map[string]string{"myfield1": "baz", "myfield3": "234"}, + Fields: map[string]interface{}{"myfield2": 234}, + } + assert.Len(t, tb.Rows, 1) + assert.Contains(t, tb.Rows, rtr) +} + +func TestGatherGosmi(t *testing.T) { + s := &Snmp{ + Agents: []string{"TestGather"}, + Name: "mytable", + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.1.2", + }, + { + Name: "myfield3", + Oid: "1.0.0.1.1", + }, + }, + Tables: []Table{ + { + Name: "myOtherTable", + InheritTags: []string{"myfield1"}, + Fields: []Field{ + { + Name: "myOtherField", + Oid: ".1.0.0.0.1.5", + }, + }, + }, + }, + + connectionCache: []snmpConnection{ + gosmiTsc, + }, + + ClientConfig: snmp.ClientConfig{ + Path: []string{"testdata"}, + Translator: "gosmi", + }, + } + acc := &testutil.Accumulator{} + + tstart := time.Now() + require.NoError(t, s.Gather(acc)) + tstop := time.Now() + + require.Len(t, acc.Metrics, 2) + + m := acc.Metrics[0] + assert.Equal(t, "mytable", m.Measurement) + assert.Equal(t, "tsc", m.Tags[s.AgentHostTag]) + assert.Equal(t, "baz", m.Tags["myfield1"]) + assert.Len(t, m.Fields, 2) + assert.Equal(t, 234, m.Fields["myfield2"]) + assert.Equal(t, "baz", m.Fields["myfield3"]) + assert.False(t, tstart.After(m.Time)) + assert.False(t, tstop.Before(m.Time)) + + m2 := acc.Metrics[1] + assert.Equal(t, "myOtherTable", m2.Measurement) + assert.Equal(t, "tsc", m2.Tags[s.AgentHostTag]) + assert.Equal(t, "baz", m2.Tags["myfield1"]) + assert.Len(t, m2.Fields, 1) + assert.Equal(t, 123456, m2.Fields["myOtherField"]) +} + +func TestGather_hostGosmi(t *testing.T) { + s := &Snmp{ + Agents: []string{"TestGather"}, + Name: "mytable", + Fields: []Field{ + { + Name: "host", + Oid: ".1.0.0.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.1.2", + }, + }, + + connectionCache: []snmpConnection{ + gosmiTsc, + }, + } + + acc := &testutil.Accumulator{} + + require.NoError(t, s.Gather(acc)) + + require.Len(t, acc.Metrics, 1) + m := acc.Metrics[0] + assert.Equal(t, "baz", m.Tags["host"]) +} + +func TestFieldConvertGosmi(t *testing.T) { + testTable := []struct { + input interface{} + conv string + expected interface{} + }{ + {[]byte("foo"), "", "foo"}, + {"0.123", "float", float64(0.123)}, + {[]byte("0.123"), "float", float64(0.123)}, + {float32(0.123), "float", float64(float32(0.123))}, + {float64(0.123), "float", float64(0.123)}, + {float64(0.123123123123), "float", float64(0.123123123123)}, + {123, "float", float64(123)}, + {123, "float(0)", float64(123)}, + {123, "float(4)", float64(0.0123)}, + {int8(123), "float(3)", float64(0.123)}, + {int16(123), "float(3)", float64(0.123)}, + {int32(123), "float(3)", float64(0.123)}, + {int64(123), "float(3)", float64(0.123)}, + {uint(123), "float(3)", float64(0.123)}, + {uint8(123), "float(3)", float64(0.123)}, + {uint16(123), "float(3)", float64(0.123)}, + {uint32(123), "float(3)", float64(0.123)}, + {uint64(123), "float(3)", float64(0.123)}, + {"123", "int", int64(123)}, + {[]byte("123"), "int", int64(123)}, + {"123123123123", "int", int64(123123123123)}, + {[]byte("123123123123"), "int", int64(123123123123)}, + {float32(12.3), "int", int64(12)}, + {float64(12.3), "int", int64(12)}, + {123, "int", int64(123)}, + {int8(123), "int", int64(123)}, + {int16(123), "int", int64(123)}, + {int32(123), "int", int64(123)}, + {int64(123), "int", int64(123)}, + {uint(123), "int", int64(123)}, + {uint8(123), "int", int64(123)}, + {uint16(123), "int", int64(123)}, + {uint32(123), "int", int64(123)}, + {uint64(123), "int", int64(123)}, + {[]byte("abcdef"), "hwaddr", "61:62:63:64:65:66"}, + {"abcdef", "hwaddr", "61:62:63:64:65:66"}, + {[]byte("abcd"), "ipaddr", "97.98.99.100"}, + {"abcd", "ipaddr", "97.98.99.100"}, + {[]byte("abcdefghijklmnop"), "ipaddr", "6162:6364:6566:6768:696a:6b6c:6d6e:6f70"}, + {[]byte{0x00, 0x09, 0x3E, 0xE3, 0xF6, 0xD5, 0x3B, 0x60}, "hextoint:BigEndian:uint64", uint64(2602423610063712)}, + {[]byte{0x00, 0x09, 0x3E, 0xE3}, "hextoint:BigEndian:uint32", uint32(605923)}, + {[]byte{0x00, 0x09}, "hextoint:BigEndian:uint16", uint16(9)}, + {[]byte{0x00, 0x09, 0x3E, 0xE3, 0xF6, 0xD5, 0x3B, 0x60}, "hextoint:LittleEndian:uint64", uint64(6934371307618175232)}, + {[]byte{0x00, 0x09, 0x3E, 0xE3}, "hextoint:LittleEndian:uint32", uint32(3812493568)}, + {[]byte{0x00, 0x09}, "hextoint:LittleEndian:uint16", uint16(2304)}, + } + + for _, tc := range testTable { + act, err := fieldConvert(tc.conv, tc.input) + assert.NoError(t, err, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected) + assert.EqualValues(t, tc.expected, act, "input=%T(%v) conv=%s expected=%T(%v)", tc.input, tc.input, tc.conv, tc.expected, tc.expected) + } +} + +func TestSnmpTranslateCache_missGosmi(t *testing.T) { + gosmiSnmpTranslateCaches = nil + oid := "IF-MIB::ifPhysAddress.1" + mibName, oidNum, oidText, conversion, err := getGosmiTr(t).SnmpTranslate(oid) + assert.Len(t, gosmiSnmpTranslateCaches, 1) + stc := gosmiSnmpTranslateCaches[oid] + assert.NotNil(t, stc) + assert.Equal(t, mibName, stc.mibName) + assert.Equal(t, oidNum, stc.oidNum) + assert.Equal(t, oidText, stc.oidText) + assert.Equal(t, conversion, stc.conversion) + assert.Equal(t, err, stc.err) +} + +func TestSnmpTranslateCache_hitGosmi(t *testing.T) { + gosmiSnmpTranslateCaches = map[string]gosmiSnmpTranslateCache{ + "foo": { + mibName: "a", + oidNum: "b", + oidText: "c", + conversion: "d", + err: fmt.Errorf("e"), + }, + } + mibName, oidNum, oidText, conversion, err := getGosmiTr(t).SnmpTranslate("foo") + assert.Equal(t, "a", mibName) + assert.Equal(t, "b", oidNum) + assert.Equal(t, "c", oidText) + assert.Equal(t, "d", conversion) + assert.Equal(t, fmt.Errorf("e"), err) + gosmiSnmpTranslateCaches = nil +} + +func TestSnmpTableCache_missGosmi(t *testing.T) { + gosmiSnmpTableCaches = nil + oid := ".1.0.0.0" + mibName, oidNum, oidText, fields, err := getGosmiTr(t).SnmpTable(oid) + assert.Len(t, gosmiSnmpTableCaches, 1) + stc := gosmiSnmpTableCaches[oid] + assert.NotNil(t, stc) + assert.Equal(t, mibName, stc.mibName) + assert.Equal(t, oidNum, stc.oidNum) + assert.Equal(t, oidText, stc.oidText) + assert.Equal(t, fields, stc.fields) + assert.Equal(t, err, stc.err) +} + +func TestSnmpTableCache_hitGosmi(t *testing.T) { + gosmiSnmpTableCaches = map[string]gosmiSnmpTableCache{ + "foo": { + mibName: "a", + oidNum: "b", + oidText: "c", + fields: []Field{{Name: "d"}}, + err: fmt.Errorf("e"), + }, + } + mibName, oidNum, oidText, fields, err := getGosmiTr(t).SnmpTable("foo") + assert.Equal(t, "a", mibName) + assert.Equal(t, "b", oidNum) + assert.Equal(t, "c", oidText) + assert.Equal(t, []Field{{Name: "d"}}, fields) + assert.Equal(t, fmt.Errorf("e"), err) +} + +func TestTableJoin_walkGosmi(t *testing.T) { + tbl := Table{ + Name: "mytable", + IndexAsTag: true, + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.3.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.3.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.3.1.3", + SecondaryIndexTable: true, + }, + { + Name: "myfield4", + Oid: ".1.0.0.0.1.1", + SecondaryIndexUse: true, + IsTag: true, + }, + { + Name: "myfield5", + Oid: ".1.0.0.0.1.2", + SecondaryIndexUse: true, + }, + }, + } + + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + + tr, err := NewGosmiTranslator([]string{testDataPath}, testutil.Logger{}) + require.NoError(t, err) + + tb, err := tbl.Build(gosmiTsc, true, tr) + require.NoError(t, err) + + assert.Equal(t, tb.Name, "mytable") + rtr1 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance", + "myfield4": "bar", + "index": "10", + }, + Fields: map[string]interface{}{ + "myfield2": 10, + "myfield3": 1, + "myfield5": 2, + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance2", + "index": "11", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 2, + "myfield5": 0, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance3", + "index": "12", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 3, + }, + } + assert.Len(t, tb.Rows, 3) + assert.Contains(t, tb.Rows, rtr1) + assert.Contains(t, tb.Rows, rtr2) + assert.Contains(t, tb.Rows, rtr3) +} + +func TestTableOuterJoin_walkGosmi(t *testing.T) { + tbl := Table{ + Name: "mytable", + IndexAsTag: true, + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.3.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.3.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.3.1.3", + SecondaryIndexTable: true, + SecondaryOuterJoin: true, + }, + { + Name: "myfield4", + Oid: ".1.0.0.0.1.1", + SecondaryIndexUse: true, + IsTag: true, + }, + { + Name: "myfield5", + Oid: ".1.0.0.0.1.2", + SecondaryIndexUse: true, + }, + }, + } + + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + + tr, err := NewGosmiTranslator([]string{testDataPath}, testutil.Logger{}) + require.NoError(t, err) + + tb, err := tbl.Build(gosmiTsc, true, tr) + require.NoError(t, err) + + assert.Equal(t, tb.Name, "mytable") + rtr1 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance", + "myfield4": "bar", + "index": "10", + }, + Fields: map[string]interface{}{ + "myfield2": 10, + "myfield3": 1, + "myfield5": 2, + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance2", + "index": "11", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 2, + "myfield5": 0, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance3", + "index": "12", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 3, + }, + } + rtr4 := RTableRow{ + Tags: map[string]string{ + "index": "Secondary.0", + "myfield4": "foo", + }, + Fields: map[string]interface{}{ + "myfield5": 1, + }, + } + assert.Len(t, tb.Rows, 4) + assert.Contains(t, tb.Rows, rtr1) + assert.Contains(t, tb.Rows, rtr2) + assert.Contains(t, tb.Rows, rtr3) + assert.Contains(t, tb.Rows, rtr4) +} + +func TestTableJoinNoIndexAsTag_walkGosmi(t *testing.T) { + tbl := Table{ + Name: "mytable", + IndexAsTag: false, + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.3.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.3.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.3.1.3", + SecondaryIndexTable: true, + }, + { + Name: "myfield4", + Oid: ".1.0.0.0.1.1", + SecondaryIndexUse: true, + IsTag: true, + }, + { + Name: "myfield5", + Oid: ".1.0.0.0.1.2", + SecondaryIndexUse: true, + }, + }, + } + + testDataPath, err := filepath.Abs("./testdata") + require.NoError(t, err) + + tr, err := NewGosmiTranslator([]string{testDataPath}, testutil.Logger{}) + require.NoError(t, err) + + tb, err := tbl.Build(gosmiTsc, true, tr) + require.NoError(t, err) + + assert.Equal(t, tb.Name, "mytable") + rtr1 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance", + "myfield4": "bar", + //"index": "10", + }, + Fields: map[string]interface{}{ + "myfield2": 10, + "myfield3": 1, + "myfield5": 2, + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance2", + //"index": "11", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 2, + "myfield5": 0, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance3", + //"index": "12", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 3, + }, + } + assert.Len(t, tb.Rows, 3) + assert.Contains(t, tb.Rows, rtr1) + assert.Contains(t, tb.Rows, rtr2) + assert.Contains(t, tb.Rows, rtr3) +} + +func BenchmarkMibLoading(b *testing.B) { + log := testutil.Logger{} + path := []string{"testdata"} + for i := 0; i < b.N; i++ { + err := snmp.LoadMibsFromPath(path, log, &snmp.GosmiMibLoader{}) + require.NoError(b, err) + } +} + +func TestCanNotParse(t *testing.T) { + s := &Snmp{ + Fields: []Field{ + {Oid: "RFC1213-MIB::"}, + }, + ClientConfig: snmp.ClientConfig{ + Path: []string{"testdata"}, + Translator: "gosmi", + }, + } + + err := s.Init() + require.Error(t, err) +} + +func TestMissingMibPath(t *testing.T) { + log := testutil.Logger{} + path := []string{"non-existing-directory"} + err := snmp.LoadMibsFromPath(path, log, &snmp.GosmiMibLoader{}) + require.NoError(t, err) +} diff --git a/plugins/inputs/snmp/netsnmp.go b/plugins/inputs/snmp/netsnmp.go new file mode 100644 index 0000000000000..96339956f5c71 --- /dev/null +++ b/plugins/inputs/snmp/netsnmp.go @@ -0,0 +1,256 @@ +package snmp + +import ( + "bufio" + "bytes" + "fmt" + "log" //nolint:revive + "os/exec" + "strings" + "sync" + + "github.com/influxdata/wlog" +) + +//struct that implements the translator interface. This calls existing +//code to exec netsnmp's snmptranslate program +type netsnmpTranslator struct { +} + +func NewNetsnmpTranslator() *netsnmpTranslator { + return &netsnmpTranslator{} +} + +type snmpTableCache struct { + mibName string + oidNum string + oidText string + fields []Field + err error +} + +// execCommand is so tests can mock out exec.Command usage. +var execCommand = exec.Command + +// execCmd executes the specified command, returning the STDOUT content. +// If command exits with error status, the output is captured into the returned error. +func execCmd(arg0 string, args ...string) ([]byte, error) { + if wlog.LogLevel() == wlog.DEBUG { + quoted := make([]string, 0, len(args)) + for _, arg := range args { + quoted = append(quoted, fmt.Sprintf("%q", arg)) + } + log.Printf("D! [inputs.snmp] executing %q %s", arg0, strings.Join(quoted, " ")) + } + + out, err := execCommand(arg0, args...).Output() + if err != nil { + if err, ok := err.(*exec.ExitError); ok { + return nil, fmt.Errorf("%s: %w", bytes.TrimRight(err.Stderr, "\r\n"), err) + } + return nil, err + } + return out, nil +} + +var snmpTableCaches map[string]snmpTableCache +var snmpTableCachesLock sync.Mutex + +// snmpTable resolves the given OID as a table, providing information about the +// table and fields within. +//nolint:revive +func (n *netsnmpTranslator) SnmpTable(oid string) ( + mibName string, oidNum string, oidText string, + fields []Field, + err error) { + snmpTableCachesLock.Lock() + if snmpTableCaches == nil { + snmpTableCaches = map[string]snmpTableCache{} + } + + var stc snmpTableCache + var ok bool + if stc, ok = snmpTableCaches[oid]; !ok { + stc.mibName, stc.oidNum, stc.oidText, stc.fields, stc.err = n.snmpTableCall(oid) + snmpTableCaches[oid] = stc + } + + snmpTableCachesLock.Unlock() + return stc.mibName, stc.oidNum, stc.oidText, stc.fields, stc.err +} + +//nolint:revive +func (n *netsnmpTranslator) snmpTableCall(oid string) ( + mibName string, oidNum string, oidText string, + fields []Field, + err error) { + mibName, oidNum, oidText, _, err = n.SnmpTranslate(oid) + if err != nil { + return "", "", "", nil, fmt.Errorf("translating: %w", err) + } + + mibPrefix := mibName + "::" + oidFullName := mibPrefix + oidText + + // first attempt to get the table's tags + tagOids := map[string]struct{}{} + // We have to guess that the "entry" oid is `oid+".1"`. snmptable and snmptranslate don't seem to have a way to provide the info. + if out, err := execCmd("snmptranslate", "-Td", oidFullName+".1"); err == nil { + scanner := bufio.NewScanner(bytes.NewBuffer(out)) + for scanner.Scan() { + line := scanner.Text() + + if !strings.HasPrefix(line, " INDEX") { + continue + } + + i := strings.Index(line, "{ ") + if i == -1 { // parse error + continue + } + line = line[i+2:] + i = strings.Index(line, " }") + if i == -1 { // parse error + continue + } + line = line[:i] + for _, col := range strings.Split(line, ", ") { + tagOids[mibPrefix+col] = struct{}{} + } + } + } + + // this won't actually try to run a query. The `-Ch` will just cause it to dump headers. + out, err := execCmd("snmptable", "-Ch", "-Cl", "-c", "public", "127.0.0.1", oidFullName) + if err != nil { + return "", "", "", nil, fmt.Errorf("getting table columns: %w", err) + } + scanner := bufio.NewScanner(bytes.NewBuffer(out)) + scanner.Scan() + cols := scanner.Text() + if len(cols) == 0 { + return "", "", "", nil, fmt.Errorf("could not find any columns in table") + } + for _, col := range strings.Split(cols, " ") { + if len(col) == 0 { + continue + } + _, isTag := tagOids[mibPrefix+col] + fields = append(fields, Field{Name: col, Oid: mibPrefix + col, IsTag: isTag}) + } + + return mibName, oidNum, oidText, fields, err +} + +type snmpTranslateCache struct { + mibName string + oidNum string + oidText string + conversion string + err error +} + +var snmpTranslateCachesLock sync.Mutex +var snmpTranslateCaches map[string]snmpTranslateCache + +// snmpTranslate resolves the given OID. +//nolint:revive +func (n *netsnmpTranslator) SnmpTranslate(oid string) ( + mibName string, oidNum string, oidText string, + conversion string, + err error) { + snmpTranslateCachesLock.Lock() + if snmpTranslateCaches == nil { + snmpTranslateCaches = map[string]snmpTranslateCache{} + } + + var stc snmpTranslateCache + var ok bool + if stc, ok = snmpTranslateCaches[oid]; !ok { + // This will result in only one call to snmptranslate running at a time. + // We could speed it up by putting a lock in snmpTranslateCache and then + // returning it immediately, and multiple callers would then release the + // snmpTranslateCachesLock and instead wait on the individual + // snmpTranslation.Lock to release. But I don't know that the extra complexity + // is worth it. Especially when it would slam the system pretty hard if lots + // of lookups are being performed. + + stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err = snmpTranslateCall(oid) + snmpTranslateCaches[oid] = stc + } + + snmpTranslateCachesLock.Unlock() + + return stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err +} + +//nolint:revive +func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { + var out []byte + if strings.ContainsAny(oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") { + out, err = execCmd("snmptranslate", "-Td", "-Ob", oid) + } else { + out, err = execCmd("snmptranslate", "-Td", "-Ob", "-m", "all", oid) + if err, ok := err.(*exec.Error); ok && err.Err == exec.ErrNotFound { + // Silently discard error if snmptranslate not found and we have a numeric OID. + // Meaning we can get by without the lookup. + return "", oid, oid, "", nil + } + } + if err != nil { + return "", "", "", "", err + } + + scanner := bufio.NewScanner(bytes.NewBuffer(out)) + ok := scanner.Scan() + if !ok && scanner.Err() != nil { + return "", "", "", "", fmt.Errorf("getting OID text: %w", scanner.Err()) + } + + oidText = scanner.Text() + + i := strings.Index(oidText, "::") + if i == -1 { + // was not found in MIB. + if bytes.Contains(out, []byte("[TRUNCATED]")) { + return "", oid, oid, "", nil + } + // not truncated, but not fully found. We still need to parse out numeric OID, so keep going + oidText = oid + } else { + mibName = oidText[:i] + oidText = oidText[i+2:] + } + + for scanner.Scan() { + line := scanner.Text() + + if strings.HasPrefix(line, " -- TEXTUAL CONVENTION ") { + tc := strings.TrimPrefix(line, " -- TEXTUAL CONVENTION ") + switch tc { + case "MacAddress", "PhysAddress": + conversion = "hwaddr" + case "InetAddressIPv4", "InetAddressIPv6", "InetAddress", "IPSIpAddress": + conversion = "ipaddr" + } + } else if strings.HasPrefix(line, "::= { ") { + objs := strings.TrimPrefix(line, "::= { ") + objs = strings.TrimSuffix(objs, " }") + + for _, obj := range strings.Split(objs, " ") { + if len(obj) == 0 { + continue + } + if i := strings.Index(obj, "("); i != -1 { + obj = obj[i+1:] + oidNum += "." + obj[:strings.Index(obj, ")")] + } else { + oidNum += "." + obj + } + } + break + } + } + + return mibName, oidNum, oidText, conversion, nil +} diff --git a/plugins/inputs/snmp/sample.conf b/plugins/inputs/snmp/sample.conf new file mode 100644 index 0000000000000..bc52b4371e6d8 --- /dev/null +++ b/plugins/inputs/snmp/sample.conf @@ -0,0 +1,75 @@ +# Retrieves SNMP values from remote agents +[[inputs.snmp]] + ## Agent addresses to retrieve values from. + ## format: agents = [":"] + ## scheme: optional, either udp, udp4, udp6, tcp, tcp4, tcp6. + ## default is udp + ## port: optional + ## example: agents = ["udp://127.0.0.1:161"] + ## agents = ["tcp://127.0.0.1:161"] + ## agents = ["udp4://v4only-snmp-agent"] + agents = ["udp://127.0.0.1:161"] + + ## Timeout for each request. + # timeout = "5s" + + ## SNMP version; can be 1, 2, or 3. + # version = 2 + + ## Path to mib files + ## Used by the gosmi translator. + ## To add paths when translating with netsnmp, use the MIBDIRS environment variable + # path = ["/usr/share/snmp/mibs"] + + ## SNMP community string. + # community = "public" + + ## Agent host tag + # agent_host_tag = "agent_host" + + ## Number of retries to attempt. + # retries = 3 + + ## The GETBULK max-repetitions parameter. + # max_repetitions = 10 + + ## SNMPv3 authentication and encryption options. + ## + ## Security Name. + # sec_name = "myuser" + ## Authentication protocol; one of "MD5", "SHA", "SHA224", "SHA256", "SHA384", "SHA512" or "". + # auth_protocol = "MD5" + ## Authentication password. + # auth_password = "pass" + ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". + # sec_level = "authNoPriv" + ## Context Name. + # context_name = "" + ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C", or "". + ### Protocols "AES192", "AES192", "AES256", and "AES256C" require the underlying net-snmp tools + ### to be compiled with --enable-blumenthal-aes (http://www.net-snmp.org/docs/INSTALL.html) + # priv_protocol = "" + ## Privacy password used for encrypted messages. + # priv_password = "" + + ## Add fields and tables defining the variables you wish to collect. This + ## example collects the system uptime and interface variables. Reference the + ## full plugin documentation for configuration details. + [[inputs.snmp.field]] + oid = "RFC1213-MIB::sysUpTime.0" + name = "uptime" + + [[inputs.snmp.field]] + oid = "RFC1213-MIB::sysName.0" + name = "source" + is_tag = true + + [[inputs.snmp.table]] + oid = "IF-MIB::ifTable" + name = "interface" + inherit_tags = ["source"] + + [[inputs.snmp.table.field]] + oid = "IF-MIB::ifDescr" + name = "ifDescr" + is_tag = true diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 103b23d214485..5fda56fe2a49a 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -1,95 +1,42 @@ +//go:generate ../../../tools/readme_config_includer/generator package snmp import ( - "bufio" - "bytes" + _ "embed" + "encoding/binary" + "errors" "fmt" - "log" "math" "net" - "os/exec" "strconv" "strings" "sync" "time" + "github.com/gosnmp/gosnmp" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/wlog" - "github.com/soniah/gosnmp" ) -const description = `Retrieves SNMP values from remote agents` -const sampleConfig = ` - ## Agent addresses to retrieve values from. - ## example: agents = ["udp://127.0.0.1:161"] - ## agents = ["tcp://127.0.0.1:161"] - agents = ["udp://127.0.0.1:161"] - - ## Timeout for each request. - # timeout = "5s" - - ## SNMP version; can be 1, 2, or 3. - # version = 2 - - ## Agent host tag; the tag used to reference the source host - # agent_host_tag = "agent_host" - - ## SNMP community string. - # community = "public" - - ## Number of retries to attempt. - # retries = 3 - - ## The GETBULK max-repetitions parameter. - # max_repetitions = 10 - - ## SNMPv3 authentication and encryption options. - ## - ## Security Name. - # sec_name = "myuser" - ## Authentication protocol; one of "MD5", "SHA", or "". - # auth_protocol = "MD5" - ## Authentication password. - # auth_password = "pass" - ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". - # sec_level = "authNoPriv" - ## Context Name. - # context_name = "" - ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". - # priv_protocol = "" - ## Privacy password used for encrypted messages. - # priv_password = "" - - ## Add fields and tables defining the variables you wish to collect. This - ## example collects the system uptime and interface variables. Reference the - ## full plugin documentation for configuration details. -` - -// execCommand is so tests can mock out exec.Command usage. -var execCommand = exec.Command - -// execCmd executes the specified command, returning the STDOUT content. -// If command exits with error status, the output is captured into the returned error. -func execCmd(arg0 string, args ...string) ([]byte, error) { - if wlog.LogLevel() == wlog.DEBUG { - quoted := make([]string, 0, len(args)) - for _, arg := range args { - quoted = append(quoted, fmt.Sprintf("%q", arg)) - } - log.Printf("D! [inputs.snmp] executing %q %s", arg0, strings.Join(quoted, " ")) - } - - out, err := execCommand(arg0, args...).Output() - if err != nil { - if err, ok := err.(*exec.ExitError); ok { - return nil, fmt.Errorf("%s: %w", bytes.TrimRight(err.Stderr, "\r\n"), err) - } - return nil, err - } - return out, nil +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +type Translator interface { + SnmpTranslate(oid string) ( + mibName string, oidNum string, oidText string, + conversion string, + err error, + ) + + SnmpTable(oid string) ( + mibName string, oidNum string, oidText string, + fields []Field, + err error, + ) } // Snmp holds the configuration for the plugin. @@ -108,28 +55,48 @@ type Snmp struct { // Name & Fields are the elements of a Table. // Telegraf chokes if we try to embed a Table. So instead we have to embed the // fields of a Table, and construct a Table during runtime. - Name string // deprecated in 1.14; use name_override + Name string `toml:"name"` Fields []Field `toml:"field"` connectionCache []snmpConnection - initialized bool + + Log telegraf.Logger `toml:"-"` + + translator Translator } -func (s *Snmp) init() error { - if s.initialized { - return nil +func (s *Snmp) SetTranslator(name string) { + s.Translator = name +} + +func (*Snmp) SampleConfig() string { + return sampleConfig +} + +func (s *Snmp) Init() error { + var err error + switch s.Translator { + case "gosmi": + s.translator, err = NewGosmiTranslator(s.Path, s.Log) + if err != nil { + return err + } + case "netsnmp": + s.translator = NewNetsnmpTranslator() + default: + return fmt.Errorf("invalid translator value") } s.connectionCache = make([]snmpConnection, len(s.Agents)) for i := range s.Tables { - if err := s.Tables[i].Init(); err != nil { + if err := s.Tables[i].Init(s.translator); err != nil { return fmt.Errorf("initializing table %s: %w", s.Tables[i].Name, err) } } for i := range s.Fields { - if err := s.Fields[i].init(); err != nil { + if err := s.Fields[i].init(s.translator); err != nil { return fmt.Errorf("initializing field %s: %w", s.Fields[i].Name, err) } } @@ -138,7 +105,6 @@ func (s *Snmp) init() error { s.AgentHostTag = "agent_host" } - s.initialized = true return nil } @@ -165,20 +131,33 @@ type Table struct { } // Init() builds & initializes the nested fields. -func (t *Table) Init() error { +func (t *Table) Init(tr Translator) error { + //makes sure oid or name is set in config file + //otherwise snmp will produce metrics with an empty name + if t.Oid == "" && t.Name == "" { + return fmt.Errorf("SNMP table in config file is not named. One or both of the oid and name settings must be set") + } + if t.initialized { return nil } - if err := t.initBuild(); err != nil { + if err := t.initBuild(tr); err != nil { return err } + secondaryIndexTablePresent := false // initialize all the nested fields for i := range t.Fields { - if err := t.Fields[i].init(); err != nil { + if err := t.Fields[i].init(tr); err != nil { return fmt.Errorf("initializing field %s: %w", t.Fields[i].Name, err) } + if t.Fields[i].SecondaryIndexTable { + if secondaryIndexTablePresent { + return fmt.Errorf("only one field can be SecondaryIndexTable") + } + secondaryIndexTablePresent = true + } } t.initialized = true @@ -188,12 +167,12 @@ func (t *Table) Init() error { // initBuild initializes the table if it has an OID configured. If so, the // net-snmp tools will be used to look up the OID and auto-populate the table's // fields. -func (t *Table) initBuild() error { +func (t *Table) initBuild(tr Translator) error { if t.Oid == "" { return nil } - _, _, oidText, fields, err := snmpTable(t.Oid) + _, _, oidText, fields, err := tr.SnmpTable(t.Oid) if err != nil { return err } @@ -237,29 +216,54 @@ type Field struct { // "hwaddr" will convert a 6-byte string to a MAC address. // "ipaddr" will convert the value to an IPv4 or IPv6 address. Conversion string + // Translate tells if the value of the field should be snmptranslated + Translate bool + // Secondary index table allows to merge data from two tables with different index + // that this filed will be used to join them. There can be only one secondary index table. + SecondaryIndexTable bool + // This field is using secondary index, and will be later merged with primary index + // using SecondaryIndexTable. SecondaryIndexTable and SecondaryIndexUse are exclusive. + SecondaryIndexUse bool + // Controls if entries from secondary table should be added or not if joining + // index is present or not. I set to true, means that join is outer, and + // index is prepended with "Secondary." for missing values to avoid overlaping + // indexes from both tables. + // Can be set per field or globally with SecondaryIndexTable, global true overrides + // per field false. + SecondaryOuterJoin bool initialized bool } // init() converts OID names to numbers, and sets the .Name attribute if unset. -func (f *Field) init() error { +func (f *Field) init(tr Translator) error { if f.initialized { return nil } - _, oidNum, oidText, conversion, err := SnmpTranslate(f.Oid) - if err != nil { - return fmt.Errorf("translating: %w", err) - } - f.Oid = oidNum - if f.Name == "" { - f.Name = oidText + // check if oid needs translation or name is not set + if strings.ContainsAny(f.Oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") || f.Name == "" { + _, oidNum, oidText, conversion, err := tr.SnmpTranslate(f.Oid) + if err != nil { + return fmt.Errorf("translating: %w", err) + } + f.Oid = oidNum + if f.Name == "" { + f.Name = oidText + } + if f.Conversion == "" { + f.Conversion = conversion + } + //TODO use textual convention conversion from the MIB } - if f.Conversion == "" { - f.Conversion = conversion + + if f.SecondaryIndexTable && f.SecondaryIndexUse { + return fmt.Errorf("SecondaryIndexTable and UseSecondaryIndex are exclusive") } - //TODO use textual convention conversion from the MIB + if !f.SecondaryIndexTable && !f.SecondaryIndexUse && f.SecondaryOuterJoin { + return fmt.Errorf("SecondaryOuterJoin set to true, but field is not being used in join") + } f.initialized = true return nil @@ -297,39 +301,10 @@ func (e *walkError) Unwrap() error { return e.err } -func init() { - inputs.Add("snmp", func() telegraf.Input { - return &Snmp{ - Name: "snmp", - ClientConfig: snmp.ClientConfig{ - Retries: 3, - MaxRepetitions: 10, - Timeout: internal.Duration{Duration: 5 * time.Second}, - Version: 2, - Community: "public", - }, - } - }) -} - -// SampleConfig returns the default configuration of the input. -func (s *Snmp) SampleConfig() string { - return sampleConfig -} - -// Description returns a one-sentence description on the input. -func (s *Snmp) Description() string { - return description -} - // Gather retrieves all the configured fields and tables. // Any error encountered does not halt the process. The errors are accumulated // and returned at the end. func (s *Snmp) Gather(acc telegraf.Accumulator) error { - if err := s.init(); err != nil { - return err - } - var wg sync.WaitGroup for i, agent := range s.Agents { wg.Add(1) @@ -365,7 +340,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { } func (s *Snmp) gatherTable(acc telegraf.Accumulator, gs snmpConnection, t Table, topTags map[string]string, walk bool) error { - rt, err := t.Build(gs, walk) + rt, err := t.Build(gs, walk, s.translator) if err != nil { return err } @@ -394,9 +369,22 @@ func (s *Snmp) gatherTable(acc telegraf.Accumulator, gs snmpConnection, t Table, } // Build retrieves all the fields specified in the table and constructs the RTable. -func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { +func (t Table) Build(gs snmpConnection, walk bool, tr Translator) (*RTable, error) { rows := map[string]RTableRow{} + //translation table for secondary index (when preforming join on two tables) + secIdxTab := make(map[string]string) + secGlobalOuterJoin := false + for i, f := range t.Fields { + if f.SecondaryIndexTable { + secGlobalOuterJoin = f.SecondaryOuterJoin + if i != 0 { + t.Fields[0], t.Fields[i] = t.Fields[i], t.Fields[0] + } + break + } + } + tagCount := 0 for _, f := range t.Fields { if f.IsTag { @@ -424,7 +412,17 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { // empty string. This results in all the non-table fields sharing the same // index, and being added on the same row. if pkt, err := gs.Get([]string{oid}); err != nil { - return nil, fmt.Errorf("performing get on field %s: %w", f.Name, err) + if errors.Is(err, gosnmp.ErrUnknownSecurityLevel) { + return nil, fmt.Errorf("unknown security level (sec_level)") + } else if errors.Is(err, gosnmp.ErrUnknownUsername) { + return nil, fmt.Errorf("unknown username (sec_name)") + } else if errors.Is(err, gosnmp.ErrWrongDigest) { + return nil, fmt.Errorf("wrong digest (auth_protocol, auth_password)") + } else if errors.Is(err, gosnmp.ErrDecryption) { + return nil, fmt.Errorf("decryption error (priv_protocol, priv_password)") + } else { + return nil, fmt.Errorf("performing get on field %s: %w", f.Name, err) + } } else if pkt != nil && len(pkt.Variables) > 0 && pkt.Variables[0].Type != gosnmp.NoSuchObject && pkt.Variables[0].Type != gosnmp.NoSuchInstance { ent := pkt.Variables[0] fv, err := fieldConvert(f.Conversion, ent.Value) @@ -451,7 +449,7 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { i := f.OidIndexLength + 1 // leading separator idx = strings.Map(func(r rune) rune { if r == '.' { - i -= 1 + i-- } if i < 1 { return -1 @@ -460,6 +458,17 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { }, idx) } + // snmptranslate table field value here + if f.Translate { + if entOid, ok := ent.Value.(string); ok { + _, _, oidText, _, err := tr.SnmpTranslate(entOid) + if err == nil { + // If no error translating, the original value for ent.Value should be replaced + ent.Value = oidText + } + } + } + fv, err := fieldConvert(f.Conversion, ent.Value) if err != nil { return &walkError{ @@ -481,6 +490,16 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { } for idx, v := range ifv { + if f.SecondaryIndexUse { + if newidx, ok := secIdxTab[idx]; ok { + idx = newidx + } else { + if !secGlobalOuterJoin && !f.SecondaryOuterJoin { + continue + } + idx = ".Secondary" + idx + } + } rtr, ok := rows[idx] if !ok { rtr = RTableRow{} @@ -505,6 +524,20 @@ func (t Table) Build(gs snmpConnection, walk bool) (*RTable, error) { } else { rtr.Fields[f.Name] = v } + if f.SecondaryIndexTable { + //indexes are stored here with prepending "." so we need to add them if needed + var vss string + if ok { + vss = "." + vs + } else { + vss = fmt.Sprintf(".%v", v) + } + if idx[0] == '.' { + secIdxTab[vss] = idx + } else { + secIdxTab[vss] = "." + idx + } + } } } } @@ -527,6 +560,7 @@ type snmpConnection interface { //BulkWalkAll(string) ([]gosnmp.SnmpPDU, error) Walk(string, gosnmp.WalkFunc) error Get(oids []string) (*gosnmp.SnmpPacket, error) + Reconnect() error } // getConnection creates a snmpConnection (*gosnmp.GoSNMP) object and caches the @@ -535,6 +569,10 @@ type snmpConnection interface { // more than one goroutine. func (s *Snmp) getConnection(idx int) (snmpConnection, error) { if gs := s.connectionCache[idx]; gs != nil { + if err := gs.Reconnect(); err != nil { + return gs, fmt.Errorf("reconnecting: %w", err) + } + return gs, nil } @@ -546,7 +584,8 @@ func (s *Snmp) getConnection(idx int) (snmpConnection, error) { if err != nil { return nil, err } - gs.SetAgent(agent) + + err = gs.SetAgent(agent) if err != nil { return nil, err } @@ -561,12 +600,6 @@ func (s *Snmp) getConnection(idx int) (snmpConnection, error) { } // fieldConvert converts from any type according to the conv specification -// "float"/"float(0)" will convert the value into a float. -// "float(X)" will convert the value into a float, and then move the decimal before Xth right-most digit. -// "int" will convert the value into an integer. -// "hwaddr" will convert the value into a MAC address. -// "ipaddr" will convert the value into into an IP address. -// "" will convert a byte slice into a string. func fieldConvert(conv string, v interface{}) (interface{}, error) { if conv == "" { if bs, ok := v.([]byte); ok { @@ -581,7 +614,7 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) { case float32: v = float64(vt) / math.Pow10(d) case float64: - v = float64(vt) / math.Pow10(d) + v = vt / math.Pow10(d) case int: v = float64(vt) / math.Pow10(d) case int8: @@ -627,7 +660,7 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) { case int32: v = int64(vt) case int64: - v = int64(vt) + v = vt case uint: v = int64(vt) case uint8: @@ -658,6 +691,46 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) { return v, nil } + split := strings.Split(conv, ":") + if split[0] == "hextoint" && len(split) == 3 { + endian := split[1] + bit := split[2] + + bv, ok := v.([]byte) + if !ok { + return v, nil + } + + switch endian { + case "LittleEndian": + switch bit { + case "uint64": + v = binary.LittleEndian.Uint64(bv) + case "uint32": + v = binary.LittleEndian.Uint32(bv) + case "uint16": + v = binary.LittleEndian.Uint16(bv) + default: + return nil, fmt.Errorf("invalid bit value (%s) for hex to int conversion", bit) + } + case "BigEndian": + switch bit { + case "uint64": + v = binary.BigEndian.Uint64(bv) + case "uint32": + v = binary.BigEndian.Uint32(bv) + case "uint16": + v = binary.BigEndian.Uint16(bv) + default: + return nil, fmt.Errorf("invalid bit value (%s) for hex to int conversion", bit) + } + default: + return nil, fmt.Errorf("invalid Endian value (%s) for hex to int conversion", endian) + } + + return v, nil + } + if conv == "ipaddr" { var ipbs []byte @@ -683,221 +756,18 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) { return nil, fmt.Errorf("invalid conversion type '%s'", conv) } -type snmpTableCache struct { - mibName string - oidNum string - oidText string - fields []Field - err error -} - -var snmpTableCaches map[string]snmpTableCache -var snmpTableCachesLock sync.Mutex - -// snmpTable resolves the given OID as a table, providing information about the -// table and fields within. -func snmpTable(oid string) (mibName string, oidNum string, oidText string, fields []Field, err error) { - snmpTableCachesLock.Lock() - if snmpTableCaches == nil { - snmpTableCaches = map[string]snmpTableCache{} - } - - var stc snmpTableCache - var ok bool - if stc, ok = snmpTableCaches[oid]; !ok { - stc.mibName, stc.oidNum, stc.oidText, stc.fields, stc.err = snmpTableCall(oid) - snmpTableCaches[oid] = stc - } - - snmpTableCachesLock.Unlock() - return stc.mibName, stc.oidNum, stc.oidText, stc.fields, stc.err -} - -func snmpTableCall(oid string) (mibName string, oidNum string, oidText string, fields []Field, err error) { - mibName, oidNum, oidText, _, err = SnmpTranslate(oid) - if err != nil { - return "", "", "", nil, fmt.Errorf("translating: %w", err) - } - - mibPrefix := mibName + "::" - oidFullName := mibPrefix + oidText - - // first attempt to get the table's tags - tagOids := map[string]struct{}{} - // We have to guess that the "entry" oid is `oid+".1"`. snmptable and snmptranslate don't seem to have a way to provide the info. - if out, err := execCmd("snmptranslate", "-Td", oidFullName+".1"); err == nil { - scanner := bufio.NewScanner(bytes.NewBuffer(out)) - for scanner.Scan() { - line := scanner.Text() - - if !strings.HasPrefix(line, " INDEX") { - continue - } - - i := strings.Index(line, "{ ") - if i == -1 { // parse error - continue - } - line = line[i+2:] - i = strings.Index(line, " }") - if i == -1 { // parse error - continue - } - line = line[:i] - for _, col := range strings.Split(line, ", ") { - tagOids[mibPrefix+col] = struct{}{} - } - } - } - - // this won't actually try to run a query. The `-Ch` will just cause it to dump headers. - out, err := execCmd("snmptable", "-Ch", "-Cl", "-c", "public", "127.0.0.1", oidFullName) - if err != nil { - return "", "", "", nil, fmt.Errorf("getting table columns: %w", err) - } - scanner := bufio.NewScanner(bytes.NewBuffer(out)) - scanner.Scan() - cols := scanner.Text() - if len(cols) == 0 { - return "", "", "", nil, fmt.Errorf("could not find any columns in table") - } - for _, col := range strings.Split(cols, " ") { - if len(col) == 0 { - continue - } - _, isTag := tagOids[mibPrefix+col] - fields = append(fields, Field{Name: col, Oid: mibPrefix + col, IsTag: isTag}) - } - - return mibName, oidNum, oidText, fields, err -} - -type snmpTranslateCache struct { - mibName string - oidNum string - oidText string - conversion string - err error -} - -var snmpTranslateCachesLock sync.Mutex -var snmpTranslateCaches map[string]snmpTranslateCache - -// snmpTranslate resolves the given OID. -func SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { - snmpTranslateCachesLock.Lock() - if snmpTranslateCaches == nil { - snmpTranslateCaches = map[string]snmpTranslateCache{} - } - - var stc snmpTranslateCache - var ok bool - if stc, ok = snmpTranslateCaches[oid]; !ok { - // This will result in only one call to snmptranslate running at a time. - // We could speed it up by putting a lock in snmpTranslateCache and then - // returning it immediately, and multiple callers would then release the - // snmpTranslateCachesLock and instead wait on the individual - // snmpTranslation.Lock to release. But I don't know that the extra complexity - // is worth it. Especially when it would slam the system pretty hard if lots - // of lookups are being performed. - - stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err = snmpTranslateCall(oid) - snmpTranslateCaches[oid] = stc - } - - snmpTranslateCachesLock.Unlock() - - return stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err -} - -func SnmpTranslateForce(oid string, mibName string, oidNum string, oidText string, conversion string) { - snmpTranslateCachesLock.Lock() - defer snmpTranslateCachesLock.Unlock() - if snmpTranslateCaches == nil { - snmpTranslateCaches = map[string]snmpTranslateCache{} - } - - var stc snmpTranslateCache - stc.mibName = mibName - stc.oidNum = oidNum - stc.oidText = oidText - stc.conversion = conversion - stc.err = nil - snmpTranslateCaches[oid] = stc -} - -func SnmpTranslateClear() { - snmpTranslateCachesLock.Lock() - defer snmpTranslateCachesLock.Unlock() - snmpTranslateCaches = map[string]snmpTranslateCache{} -} - -func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { - var out []byte - if strings.ContainsAny(oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") { - out, err = execCmd("snmptranslate", "-Td", "-Ob", oid) - } else { - out, err = execCmd("snmptranslate", "-Td", "-Ob", "-m", "all", oid) - if err, ok := err.(*exec.Error); ok && err.Err == exec.ErrNotFound { - // Silently discard error if snmptranslate not found and we have a numeric OID. - // Meaning we can get by without the lookup. - return "", oid, oid, "", nil - } - } - if err != nil { - return "", "", "", "", err - } - - scanner := bufio.NewScanner(bytes.NewBuffer(out)) - ok := scanner.Scan() - if !ok && scanner.Err() != nil { - return "", "", "", "", fmt.Errorf("getting OID text: %w", scanner.Err()) - } - - oidText = scanner.Text() - - i := strings.Index(oidText, "::") - if i == -1 { - // was not found in MIB. - if bytes.Contains(out, []byte("[TRUNCATED]")) { - return "", oid, oid, "", nil - } - // not truncated, but not fully found. We still need to parse out numeric OID, so keep going - oidText = oid - } else { - mibName = oidText[:i] - oidText = oidText[i+2:] - } - - for scanner.Scan() { - line := scanner.Text() - - if strings.HasPrefix(line, " -- TEXTUAL CONVENTION ") { - tc := strings.TrimPrefix(line, " -- TEXTUAL CONVENTION ") - switch tc { - case "MacAddress", "PhysAddress": - conversion = "hwaddr" - case "InetAddressIPv4", "InetAddressIPv6", "InetAddress", "IPSIpAddress": - conversion = "ipaddr" - } - } else if strings.HasPrefix(line, "::= { ") { - objs := strings.TrimPrefix(line, "::= { ") - objs = strings.TrimSuffix(objs, " }") - - for _, obj := range strings.Split(objs, " ") { - if len(obj) == 0 { - continue - } - if i := strings.Index(obj, "("); i != -1 { - obj = obj[i+1:] - oidNum += "." + obj[:strings.Index(obj, ")")] - } else { - oidNum += "." + obj - } - } - break +func init() { + inputs.Add("snmp", func() telegraf.Input { + return &Snmp{ + Name: "snmp", + ClientConfig: snmp.ClientConfig{ + Retries: 3, + MaxRepetitions: 10, + Timeout: config.Duration(5 * time.Second), + Version: 2, + Path: []string{"/usr/share/snmp/mibs"}, + Community: "public", + }, } - } - - return mibName, oidNum, oidText, conversion, nil + }) } diff --git a/plugins/inputs/snmp/snmp_mocks_generate.go b/plugins/inputs/snmp/snmp_mocks_generate.go index c09dd004580da..f87f9029b0d06 100644 --- a/plugins/inputs/snmp/snmp_mocks_generate.go +++ b/plugins/inputs/snmp/snmp_mocks_generate.go @@ -1,3 +1,4 @@ +//go:build generate // +build generate package main @@ -23,6 +24,7 @@ var mockedCommands = [][]string{ {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.1.0"}, {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.5"}, {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.2.3"}, + {"snmptranslate", "-Td", "-Ob", "-m", "all", ".1.0.0.0.1.7"}, {"snmptranslate", "-Td", "-Ob", ".iso.2.3"}, {"snmptranslate", "-Td", "-Ob", "-m", "all", ".999"}, {"snmptranslate", "-Td", "-Ob", "TEST::server"}, diff --git a/plugins/inputs/snmp/snmp_mocks_test.go b/plugins/inputs/snmp/snmp_mocks_test.go index 56d9326f1d639..850f6b83830bc 100644 --- a/plugins/inputs/snmp/snmp_mocks_test.go +++ b/plugins/inputs/snmp/snmp_mocks_test.go @@ -24,17 +24,17 @@ func mockExecCommand(arg0 string, args ...string) *exec.Cmd { // This is not a real test. This is just a way of mocking out commands. // // Idea based on https://github.com/golang/go/blob/7c31043/src/os/exec/exec_test.go#L568 -func TestMockExecCommand(t *testing.T) { +func TestMockExecCommand(_ *testing.T) { var cmd []string for _, arg := range os.Args { - if string(arg) == "--" { + if arg == "--" { cmd = []string{} continue } if cmd == nil { continue } - cmd = append(cmd, string(arg)) + cmd = append(cmd, arg) } if cmd == nil { return @@ -44,14 +44,20 @@ func TestMockExecCommand(t *testing.T) { mcr, ok := mockedCommandResults[cmd0] if !ok { cv := fmt.Sprintf("%#v", cmd)[8:] // trim `[]string` prefix + //nolint:errcheck,revive fmt.Fprintf(os.Stderr, "Unmocked command. Please add the following to `mockedCommands` in snmp_mocks_generate.go, and then run `go generate`:\n\t%s,\n", cv) + //nolint:revive // error code is important for this "test" os.Exit(1) } + //nolint:errcheck,revive fmt.Printf("%s", mcr.stdout) + //nolint:errcheck,revive fmt.Fprintf(os.Stderr, "%s", mcr.stderr) if mcr.exitError { + //nolint:revive // error code is important for this "test" os.Exit(1) } + //nolint:revive // error code is important for this "test" os.Exit(0) } @@ -69,6 +75,7 @@ var mockedCommandResults = map[string]mockedCommandResult{ "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1.0": {stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false}, "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.5": {stdout: "TEST::testTableEntry.5\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 5 }\n", stderr: "", exitError: false}, "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.2.3": {stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.7": {stdout: "TEST::testTableEntry.7\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) std(0) testOID(0) testTable(0) testTableEntry(1) 7 }\n", stderr: "", exitError: false}, "snmptranslate\x00-Td\x00-Ob\x00.iso.2.3": {stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.999": {stdout: ".999\n [TRUNCATED]\n", stderr: "", exitError: false}, "snmptranslate\x00-Td\x00-Ob\x00TEST::server": {stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false}, diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 583b2dc847282..6b10d969cdf9f 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -9,13 +9,10 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal" + "github.com/gosnmp/gosnmp" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/snmp" - config "github.com/influxdata/telegraf/internal/snmp" - "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" - "github.com/influxdata/toml" - "github.com/soniah/gosnmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -60,6 +57,9 @@ func (tsc *testSNMPConnection) Walk(oid string, wf gosnmp.WalkFunc) error { } return nil } +func (tsc *testSNMPConnection) Reconnect() error { + return nil +} var tsc = &testSNMPConnection{ host: "tsc", @@ -81,29 +81,19 @@ var tsc = &testSNMPConnection{ ".1.0.0.1.3": []byte("byte slice"), ".1.0.0.2.1.5.0.9.9": 11, ".1.0.0.2.1.5.1.9.9": 22, + ".1.0.0.0.1.6.0": ".1.0.0.0.1.7", + ".1.0.0.3.1.1.10": "instance", + ".1.0.0.3.1.1.11": "instance2", + ".1.0.0.3.1.1.12": "instance3", + ".1.0.0.3.1.2.10": 10, + ".1.0.0.3.1.2.11": 20, + ".1.0.0.3.1.2.12": 20, + ".1.0.0.3.1.3.10": 1, + ".1.0.0.3.1.3.11": 2, + ".1.0.0.3.1.3.12": 3, }, } -func TestSampleConfig(t *testing.T) { - conf := inputs.Inputs["snmp"]() - err := toml.Unmarshal([]byte(conf.SampleConfig()), conf) - require.NoError(t, err) - - expected := &Snmp{ - Agents: []string{"udp://127.0.0.1:161"}, - AgentHostTag: "", - ClientConfig: config.ClientConfig{ - Timeout: internal.Duration{Duration: 5 * time.Second}, - Version: 2, - Community: "public", - MaxRepetitions: 10, - Retries: 3, - }, - Name: "snmp", - } - require.Equal(t, expected, conf) -} - func TestFieldInit(t *testing.T) { translations := []struct { inputOid string @@ -127,9 +117,10 @@ func TestFieldInit(t *testing.T) { {"TCP-MIB::tcpConnectionLocalAddress.1", "", "", ".1.3.6.1.2.1.6.19.1.2.1", "tcpConnectionLocalAddress.1", "ipaddr"}, } + tr := NewNetsnmpTranslator() for _, txl := range translations { f := Field{Oid: txl.inputOid, Name: txl.inputName, Conversion: txl.inputConversion} - err := f.init() + err := f.init(tr) if !assert.NoError(t, err, "inputOid='%s' inputName='%s'", txl.inputOid, txl.inputName) { continue } @@ -146,7 +137,7 @@ func TestTableInit(t *testing.T) { {Oid: "TEST::description", Name: "description", IsTag: true}, }, } - err := tbl.Init() + err := tbl.Init(NewNetsnmpTranslator()) require.NoError(t, err) assert.Equal(t, "testTable", tbl.Name) @@ -167,9 +158,12 @@ func TestSnmpInit(t *testing.T) { Fields: []Field{ {Oid: "TEST::hostname"}, }, + ClientConfig: snmp.ClientConfig{ + Translator: "netsnmp", + }, } - err := s.init() + err := s.Init() require.NoError(t, err) assert.Len(t, s.Tables[0].Fields, 4) @@ -199,15 +193,19 @@ func TestSnmpInit_noTranslate(t *testing.T) { {Oid: ".1.1.1.3"}, }, Tables: []Table{ - {Fields: []Field{ - {Oid: ".1.1.1.4", Name: "four", IsTag: true}, - {Oid: ".1.1.1.5", Name: "five"}, - {Oid: ".1.1.1.6"}, - }}, + {Name: "testing", + Fields: []Field{ + {Oid: ".1.1.1.4", Name: "four", IsTag: true}, + {Oid: ".1.1.1.5", Name: "five"}, + {Oid: ".1.1.1.6"}, + }}, + }, + ClientConfig: snmp.ClientConfig{ + Translator: "netsnmp", }, } - err := s.init() + err := s.Init() require.NoError(t, err) assert.Equal(t, ".1.1.1.1", s.Fields[0].Oid) @@ -235,17 +233,33 @@ func TestSnmpInit_noTranslate(t *testing.T) { assert.Equal(t, false, s.Tables[0].Fields[2].IsTag) } +func TestSnmpInit_noName_noOid(t *testing.T) { + s := &Snmp{ + Tables: []Table{ + {Fields: []Field{ + {Oid: ".1.1.1.4", Name: "four", IsTag: true}, + {Oid: ".1.1.1.5", Name: "five"}, + {Oid: ".1.1.1.6"}, + }}, + }, + } + + err := s.Init() + require.Error(t, err) +} + func TestGetSNMPConnection_v2(t *testing.T) { s := &Snmp{ Agents: []string{"1.2.3.4:567", "1.2.3.4", "udp://127.0.0.1"}, - ClientConfig: config.ClientConfig{ - Timeout: internal.Duration{Duration: 3 * time.Second}, - Retries: 4, - Version: 2, - Community: "foo", + ClientConfig: snmp.ClientConfig{ + Timeout: config.Duration(3 * time.Second), + Retries: 4, + Version: 2, + Community: "foo", + Translator: "netsnmp", }, } - err := s.init() + err := s.Init() require.NoError(t, err) gsc, err := s.getConnection(0) @@ -280,8 +294,11 @@ func TestGetSNMPConnectionTCP(t *testing.T) { s := &Snmp{ Agents: []string{"tcp://127.0.0.1:56789"}, + ClientConfig: snmp.ClientConfig{ + Translator: "netsnmp", + }, } - err := s.init() + err := s.Init() require.NoError(t, err) wg.Add(1) @@ -296,8 +313,14 @@ func TestGetSNMPConnectionTCP(t *testing.T) { func stubTCPServer(wg *sync.WaitGroup) { defer wg.Done() - tcpAddr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:56789") - tcpServer, _ := net.ListenTCP("tcp", tcpAddr) + tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:56789") + if err != nil { + fmt.Print(err) + } + tcpServer, err := net.ListenTCP("tcp", tcpAddr) + if err != nil { + fmt.Print(err) + } defer tcpServer.Close() wg.Done() conn, _ := tcpServer.AcceptTCP() @@ -307,7 +330,7 @@ func stubTCPServer(wg *sync.WaitGroup) { func TestGetSNMPConnection_v3(t *testing.T) { s := &Snmp{ Agents: []string{"1.2.3.4"}, - ClientConfig: config.ClientConfig{ + ClientConfig: snmp.ClientConfig{ Version: 3, MaxRepetitions: 20, ContextName: "mycontext", @@ -320,9 +343,10 @@ func TestGetSNMPConnection_v3(t *testing.T) { EngineID: "myengineid", EngineBoots: 1, EngineTime: 2, + Translator: "netsnmp", }, } - err := s.init() + err := s.Init() require.NoError(t, err) gsc, err := s.getConnection(0) @@ -344,11 +368,137 @@ func TestGetSNMPConnection_v3(t *testing.T) { assert.EqualValues(t, 2, sp.AuthoritativeEngineTime) } +func TestGetSNMPConnection_v3_blumenthal(t *testing.T) { + testCases := []struct { + Name string + Algorithm gosnmp.SnmpV3PrivProtocol + Config *Snmp + }{ + { + Name: "AES192", + Algorithm: gosnmp.AES192, + Config: &Snmp{ + Agents: []string{"1.2.3.4"}, + ClientConfig: snmp.ClientConfig{ + Version: 3, + MaxRepetitions: 20, + ContextName: "mycontext", + SecLevel: "authPriv", + SecName: "myuser", + AuthProtocol: "md5", + AuthPassword: "password123", + PrivProtocol: "AES192", + PrivPassword: "password123", + EngineID: "myengineid", + EngineBoots: 1, + EngineTime: 2, + Translator: "netsnmp", + }, + }, + }, + { + Name: "AES192C", + Algorithm: gosnmp.AES192C, + Config: &Snmp{ + Agents: []string{"1.2.3.4"}, + ClientConfig: snmp.ClientConfig{ + Version: 3, + MaxRepetitions: 20, + ContextName: "mycontext", + SecLevel: "authPriv", + SecName: "myuser", + AuthProtocol: "md5", + AuthPassword: "password123", + PrivProtocol: "AES192C", + PrivPassword: "password123", + EngineID: "myengineid", + EngineBoots: 1, + EngineTime: 2, + Translator: "netsnmp", + }, + }, + }, + { + Name: "AES256", + Algorithm: gosnmp.AES256, + Config: &Snmp{ + Agents: []string{"1.2.3.4"}, + ClientConfig: snmp.ClientConfig{ + Version: 3, + MaxRepetitions: 20, + ContextName: "mycontext", + SecLevel: "authPriv", + SecName: "myuser", + AuthProtocol: "md5", + AuthPassword: "password123", + PrivProtocol: "AES256", + PrivPassword: "password123", + EngineID: "myengineid", + EngineBoots: 1, + EngineTime: 2, + Translator: "netsnmp", + }, + }, + }, + { + Name: "AES256C", + Algorithm: gosnmp.AES256C, + Config: &Snmp{ + Agents: []string{"1.2.3.4"}, + ClientConfig: snmp.ClientConfig{ + Version: 3, + MaxRepetitions: 20, + ContextName: "mycontext", + SecLevel: "authPriv", + SecName: "myuser", + AuthProtocol: "md5", + AuthPassword: "password123", + PrivProtocol: "AES256C", + PrivPassword: "password123", + EngineID: "myengineid", + EngineBoots: 1, + EngineTime: 2, + Translator: "netsnmp", + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + s := tc.Config + err := s.Init() + require.NoError(t, err) + + gsc, err := s.getConnection(0) + require.NoError(t, err) + gs := gsc.(snmp.GosnmpWrapper) + assert.Equal(t, gs.Version, gosnmp.Version3) + sp := gs.SecurityParameters.(*gosnmp.UsmSecurityParameters) + assert.Equal(t, "1.2.3.4", gsc.Host()) + assert.EqualValues(t, 20, gs.MaxRepetitions) + assert.Equal(t, "mycontext", gs.ContextName) + assert.Equal(t, gosnmp.AuthPriv, gs.MsgFlags&gosnmp.AuthPriv) + assert.Equal(t, "myuser", sp.UserName) + assert.Equal(t, gosnmp.MD5, sp.AuthenticationProtocol) + assert.Equal(t, "password123", sp.AuthenticationPassphrase) + assert.Equal(t, tc.Algorithm, sp.PrivacyProtocol) + assert.Equal(t, "password123", sp.PrivacyPassphrase) + assert.Equal(t, "myengineid", sp.AuthoritativeEngineID) + assert.EqualValues(t, 1, sp.AuthoritativeEngineBoots) + assert.EqualValues(t, 2, sp.AuthoritativeEngineTime) + }) + } +} + func TestGetSNMPConnection_caching(t *testing.T) { s := &Snmp{ Agents: []string{"1.2.3.4", "1.2.3.5", "1.2.3.5"}, + ClientConfig: snmp.ClientConfig{ + Translator: "netsnmp", + }, } - err := s.init() + err := s.Init() require.NoError(t, err) gs1, err := s.getConnection(0) require.NoError(t, err) @@ -358,18 +508,17 @@ func TestGetSNMPConnection_caching(t *testing.T) { require.NoError(t, err) gs4, err := s.getConnection(2) require.NoError(t, err) - assert.True(t, gs1 == gs2) - assert.False(t, gs2 == gs3) - assert.False(t, gs3 == gs4) + assert.Equal(t, gs1, gs2) + assert.NotEqual(t, gs2, gs3) + assert.NotEqual(t, gs3, gs4) } func TestGosnmpWrapper_walk_retry(t *testing.T) { - if testing.Short() { - t.Skip("Skipping test due to random failures.") - } + t.Skip("Skipping test due to random failures.") + srvr, err := net.ListenUDP("udp4", &net.UDPAddr{}) - defer srvr.Close() require.NoError(t, err) + defer srvr.Close() reqCount := 0 // Set up a WaitGroup to wait for the server goroutine to exit and protect // reqCount. @@ -387,7 +536,10 @@ func TestGosnmpWrapper_walk_retry(t *testing.T) { } reqCount++ - srvr.WriteTo([]byte{'X'}, addr) // will cause decoding error + // will cause decoding error + if _, err := srvr.WriteTo([]byte{'X'}, addr); err != nil { + return + } } }() @@ -407,10 +559,10 @@ func TestGosnmpWrapper_walk_retry(t *testing.T) { GoSNMP: gs, } err = gsw.Walk(".1.0.0", func(_ gosnmp.SnmpPDU) error { return nil }) - srvr.Close() + require.NoError(t, srvr.Close()) wg.Wait() assert.Error(t, err) - assert.False(t, gs.Conn == conn) + assert.NotEqual(t, gs.Conn, conn) assert.Equal(t, (gs.Retries+1)*2, reqCount) } @@ -418,8 +570,8 @@ func TestGosnmpWrapper_get_retry(t *testing.T) { // TODO: Fix this test t.Skip("Test failing too often, skip for now and revisit later.") srvr, err := net.ListenUDP("udp4", &net.UDPAddr{}) - defer srvr.Close() require.NoError(t, err) + defer srvr.Close() reqCount := 0 // Set up a WaitGroup to wait for the server goroutine to exit and protect // reqCount. @@ -437,7 +589,10 @@ func TestGosnmpWrapper_get_retry(t *testing.T) { } reqCount++ - srvr.WriteTo([]byte{'X'}, addr) // will cause decoding error + // will cause decoding error + if _, err := srvr.WriteTo([]byte{'X'}, addr); err != nil { + return + } } }() @@ -457,10 +612,10 @@ func TestGosnmpWrapper_get_retry(t *testing.T) { GoSNMP: gs, } _, err = gsw.Get([]string{".1.0.0"}) - srvr.Close() + require.NoError(t, srvr.Close()) wg.Wait() assert.Error(t, err) - assert.False(t, gs.Conn == conn) + assert.NotEqual(t, gs.Conn, conn) assert.Equal(t, (gs.Retries+1)*2, reqCount) } @@ -493,10 +648,20 @@ func TestTableBuild_walk(t *testing.T) { Oid: ".1.0.0.2.1.5", OidIndexLength: 1, }, + { + Name: "myfield6", + Oid: ".1.0.0.0.1.6", + Translate: true, + }, + { + Name: "myfield7", + Oid: ".1.0.0.0.1.6", + Translate: false, + }, }, } - tb, err := tbl.Build(tsc, true) + tb, err := tbl.Build(tsc, true, NewNetsnmpTranslator()) require.NoError(t, err) assert.Equal(t, tb.Name, "mytable") @@ -510,6 +675,8 @@ func TestTableBuild_walk(t *testing.T) { "myfield3": float64(0.123), "myfield4": 11, "myfield5": 11, + "myfield6": "testTableEntry.7", + "myfield7": ".1.0.0.0.1.7", }, } rtr2 := RTableRow{ @@ -577,7 +744,7 @@ func TestTableBuild_noWalk(t *testing.T) { }, } - tb, err := tbl.Build(tsc, false) + tb, err := tbl.Build(tsc, false, NewNetsnmpTranslator()) require.NoError(t, err) rtr := RTableRow{ @@ -623,12 +790,11 @@ func TestGather(t *testing.T) { connectionCache: []snmpConnection{ tsc, }, - initialized: true, } acc := &testutil.Accumulator{} tstart := time.Now() - s.Gather(acc) + require.NoError(t, s.Gather(acc)) tstop := time.Now() require.Len(t, acc.Metrics, 2) @@ -640,8 +806,8 @@ func TestGather(t *testing.T) { assert.Len(t, m.Fields, 2) assert.Equal(t, 234, m.Fields["myfield2"]) assert.Equal(t, "baz", m.Fields["myfield3"]) - assert.True(t, tstart.Before(m.Time)) - assert.True(t, tstop.After(m.Time)) + assert.True(t, !tstart.After(m.Time)) + assert.True(t, !tstop.Before(m.Time)) m2 := acc.Metrics[1] assert.Equal(t, "myOtherTable", m2.Measurement) @@ -670,12 +836,11 @@ func TestGather_host(t *testing.T) { connectionCache: []snmpConnection{ tsc, }, - initialized: true, } acc := &testutil.Accumulator{} - s.Gather(acc) + require.NoError(t, s.Gather(acc)) require.Len(t, acc.Metrics, 1) m := acc.Metrics[0] @@ -688,11 +853,12 @@ func TestFieldConvert(t *testing.T) { conv string expected interface{} }{ - {[]byte("foo"), "", string("foo")}, + {[]byte("foo"), "", "foo"}, {"0.123", "float", float64(0.123)}, {[]byte("0.123"), "float", float64(0.123)}, {float32(0.123), "float", float64(float32(0.123))}, {float64(0.123), "float", float64(0.123)}, + {float64(0.123123123123), "float", float64(0.123123123123)}, {123, "float", float64(123)}, {123, "float(0)", float64(123)}, {123, "float(4)", float64(0.0123)}, @@ -726,6 +892,12 @@ func TestFieldConvert(t *testing.T) { {[]byte("abcd"), "ipaddr", "97.98.99.100"}, {"abcd", "ipaddr", "97.98.99.100"}, {[]byte("abcdefghijklmnop"), "ipaddr", "6162:6364:6566:6768:696a:6b6c:6d6e:6f70"}, + {[]byte{0x00, 0x09, 0x3E, 0xE3, 0xF6, 0xD5, 0x3B, 0x60}, "hextoint:BigEndian:uint64", uint64(2602423610063712)}, + {[]byte{0x00, 0x09, 0x3E, 0xE3}, "hextoint:BigEndian:uint32", uint32(605923)}, + {[]byte{0x00, 0x09}, "hextoint:BigEndian:uint16", uint16(9)}, + {[]byte{0x00, 0x09, 0x3E, 0xE3, 0xF6, 0xD5, 0x3B, 0x60}, "hextoint:LittleEndian:uint64", uint64(6934371307618175232)}, + {[]byte{0x00, 0x09, 0x3E, 0xE3}, "hextoint:LittleEndian:uint32", uint32(3812493568)}, + {[]byte{0x00, 0x09}, "hextoint:LittleEndian:uint16", uint16(2304)}, } for _, tc := range testTable { @@ -740,7 +912,7 @@ func TestFieldConvert(t *testing.T) { func TestSnmpTranslateCache_miss(t *testing.T) { snmpTranslateCaches = nil oid := "IF-MIB::ifPhysAddress.1" - mibName, oidNum, oidText, conversion, err := SnmpTranslate(oid) + mibName, oidNum, oidText, conversion, err := NewNetsnmpTranslator().SnmpTranslate(oid) assert.Len(t, snmpTranslateCaches, 1) stc := snmpTranslateCaches[oid] require.NotNil(t, stc) @@ -761,7 +933,7 @@ func TestSnmpTranslateCache_hit(t *testing.T) { err: fmt.Errorf("e"), }, } - mibName, oidNum, oidText, conversion, err := SnmpTranslate("foo") + mibName, oidNum, oidText, conversion, err := NewNetsnmpTranslator().SnmpTranslate("foo") assert.Equal(t, "a", mibName) assert.Equal(t, "b", oidNum) assert.Equal(t, "c", oidText) @@ -773,7 +945,7 @@ func TestSnmpTranslateCache_hit(t *testing.T) { func TestSnmpTableCache_miss(t *testing.T) { snmpTableCaches = nil oid := ".1.0.0.0" - mibName, oidNum, oidText, fields, err := snmpTable(oid) + mibName, oidNum, oidText, fields, err := NewNetsnmpTranslator().SnmpTable(oid) assert.Len(t, snmpTableCaches, 1) stc := snmpTableCaches[oid] require.NotNil(t, stc) @@ -794,10 +966,249 @@ func TestSnmpTableCache_hit(t *testing.T) { err: fmt.Errorf("e"), }, } - mibName, oidNum, oidText, fields, err := snmpTable("foo") + mibName, oidNum, oidText, fields, err := NewNetsnmpTranslator().SnmpTable("foo") assert.Equal(t, "a", mibName) assert.Equal(t, "b", oidNum) assert.Equal(t, "c", oidText) assert.Equal(t, []Field{{Name: "d"}}, fields) assert.Equal(t, fmt.Errorf("e"), err) } + +func TestTableJoin_walk(t *testing.T) { + tbl := Table{ + Name: "mytable", + IndexAsTag: true, + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.3.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.3.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.3.1.3", + SecondaryIndexTable: true, + }, + { + Name: "myfield4", + Oid: ".1.0.0.0.1.1", + SecondaryIndexUse: true, + IsTag: true, + }, + { + Name: "myfield5", + Oid: ".1.0.0.0.1.2", + SecondaryIndexUse: true, + }, + }, + } + + tb, err := tbl.Build(tsc, true, NewNetsnmpTranslator()) + require.NoError(t, err) + + assert.Equal(t, tb.Name, "mytable") + rtr1 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance", + "myfield4": "bar", + "index": "10", + }, + Fields: map[string]interface{}{ + "myfield2": 10, + "myfield3": 1, + "myfield5": 2, + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance2", + "index": "11", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 2, + "myfield5": 0, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance3", + "index": "12", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 3, + }, + } + assert.Len(t, tb.Rows, 3) + assert.Contains(t, tb.Rows, rtr1) + assert.Contains(t, tb.Rows, rtr2) + assert.Contains(t, tb.Rows, rtr3) +} + +func TestTableOuterJoin_walk(t *testing.T) { + tbl := Table{ + Name: "mytable", + IndexAsTag: true, + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.3.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.3.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.3.1.3", + SecondaryIndexTable: true, + SecondaryOuterJoin: true, + }, + { + Name: "myfield4", + Oid: ".1.0.0.0.1.1", + SecondaryIndexUse: true, + IsTag: true, + }, + { + Name: "myfield5", + Oid: ".1.0.0.0.1.2", + SecondaryIndexUse: true, + }, + }, + } + + tb, err := tbl.Build(tsc, true, NewNetsnmpTranslator()) + require.NoError(t, err) + + assert.Equal(t, tb.Name, "mytable") + rtr1 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance", + "myfield4": "bar", + "index": "10", + }, + Fields: map[string]interface{}{ + "myfield2": 10, + "myfield3": 1, + "myfield5": 2, + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance2", + "index": "11", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 2, + "myfield5": 0, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance3", + "index": "12", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 3, + }, + } + rtr4 := RTableRow{ + Tags: map[string]string{ + "index": "Secondary.0", + "myfield4": "foo", + }, + Fields: map[string]interface{}{ + "myfield5": 1, + }, + } + assert.Len(t, tb.Rows, 4) + assert.Contains(t, tb.Rows, rtr1) + assert.Contains(t, tb.Rows, rtr2) + assert.Contains(t, tb.Rows, rtr3) + assert.Contains(t, tb.Rows, rtr4) +} + +func TestTableJoinNoIndexAsTag_walk(t *testing.T) { + tbl := Table{ + Name: "mytable", + IndexAsTag: false, + Fields: []Field{ + { + Name: "myfield1", + Oid: ".1.0.0.3.1.1", + IsTag: true, + }, + { + Name: "myfield2", + Oid: ".1.0.0.3.1.2", + }, + { + Name: "myfield3", + Oid: ".1.0.0.3.1.3", + SecondaryIndexTable: true, + }, + { + Name: "myfield4", + Oid: ".1.0.0.0.1.1", + SecondaryIndexUse: true, + IsTag: true, + }, + { + Name: "myfield5", + Oid: ".1.0.0.0.1.2", + SecondaryIndexUse: true, + }, + }, + } + + tb, err := tbl.Build(tsc, true, NewNetsnmpTranslator()) + require.NoError(t, err) + + assert.Equal(t, tb.Name, "mytable") + rtr1 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance", + "myfield4": "bar", + //"index": "10", + }, + Fields: map[string]interface{}{ + "myfield2": 10, + "myfield3": 1, + "myfield5": 2, + }, + } + rtr2 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance2", + //"index": "11", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 2, + "myfield5": 0, + }, + } + rtr3 := RTableRow{ + Tags: map[string]string{ + "myfield1": "instance3", + //"index": "12", + }, + Fields: map[string]interface{}{ + "myfield2": 20, + "myfield3": 3, + }, + } + assert.Len(t, tb.Rows, 3) + assert.Contains(t, tb.Rows, rtr1) + assert.Contains(t, tb.Rows, rtr2) + assert.Contains(t, tb.Rows, rtr3) +} diff --git a/plugins/inputs/snmp/testdata/bridgeMib b/plugins/inputs/snmp/testdata/bridgeMib new file mode 100644 index 0000000000000..96f562732fd6a --- /dev/null +++ b/plugins/inputs/snmp/testdata/bridgeMib @@ -0,0 +1,1467 @@ +BRIDGE-MIB DEFINITIONS ::= BEGIN + +-- ---------------------------------------------------------- -- +-- MIB for IEEE 802.1D devices +-- ---------------------------------------------------------- -- +IMPORTS + MODULE-IDENTITY, OBJECT-TYPE, NOTIFICATION-TYPE, + Counter32, Integer32, TimeTicks, mib-2, TEXTUAL-CONVENTION, MacAddress, + MODULE-COMPLIANCE, NOTIFICATION-GROUP, OBJECT-GROUP, InterfaceIndex + FROM bridgeMibImports; + +dot1dBridge MODULE-IDENTITY + LAST-UPDATED "200509190000Z" + ORGANIZATION "IETF Bridge MIB Working Group" + CONTACT-INFO + "Email: bridge-mib@ietf.org + + K.C. Norseth (Editor) + L-3 Communications + Tel: +1 801-594-2809 + Email: kenyon.c.norseth@L-3com.com + Postal: 640 N. 2200 West. + Salt Lake City, Utah 84116-0850 + + Les Bell (Editor) + 3Com Europe Limited + Phone: +44 1442 438025 + Email: elbell@ntlworld.com + Postal: 3Com Centre, Boundary Way + Hemel Hempstead + Herts. HP2 7YU + UK + + Send comments to " + DESCRIPTION + "The Bridge MIB module for managing devices that support + IEEE 802.1D. + + Copyright (C) The Internet Society (2005). This version of + this MIB module is part of RFC 4188; see the RFC itself for + full legal notices." + REVISION "200509190000Z" + DESCRIPTION + "Third revision, published as part of RFC 4188. + + The MIB module has been converted to SMIv2 format. + Conformance statements have been added and some + description and reference clauses have been updated. + + The object dot1dStpPortPathCost32 was added to + support IEEE 802.1t and the permissible values of + dot1dStpPriority and dot1dStpPortPriority have been + clarified for bridges supporting IEEE 802.1t or + IEEE 802.1w. + + The interpretation of dot1dStpTimeSinceTopologyChange + has been clarified for bridges supporting the Rapid + Spanning Tree Protocol (RSTP)." + REVISION "199307310000Z" + DESCRIPTION + "Second revision, published as part of RFC 1493." + REVISION "199112310000Z" + DESCRIPTION + "Initial revision, published as part of RFC 1286." + ::= { mib-2 17 } + +-- ---------------------------------------------------------- -- +-- Textual Conventions +-- ---------------------------------------------------------- -- + +BridgeId ::= TEXTUAL-CONVENTION + STATUS current + DESCRIPTION + "The Bridge-Identifier, as used in the Spanning Tree + Protocol, to uniquely identify a bridge. Its first two + octets (in network byte order) contain a priority value, + and its last 6 octets contain the MAC address used to + refer to a bridge in a unique fashion (typically, the + numerically smallest MAC address of all ports on the + bridge)." + SYNTAX OCTET STRING (SIZE (8)) + +Timeout ::= TEXTUAL-CONVENTION + DISPLAY-HINT "d" + STATUS current + DESCRIPTION + "A Spanning Tree Protocol (STP) timer in units of 1/100 + seconds. Several objects in this MIB module represent + values of timers used by the Spanning Tree Protocol. + In this MIB, these timers have values in units of + hundredths of a second (i.e., 1/100 secs). + + These timers, when stored in a Spanning Tree Protocol's + BPDU, are in units of 1/256 seconds. Note, however, that + 802.1D-1998 specifies a settable granularity of no more + than one second for these timers. To avoid ambiguity, + a conversion algorithm is defined below for converting + between the different units, which ensures a timer's + value is not distorted by multiple conversions. + + To convert a Timeout value into a value in units of + 1/256 seconds, the following algorithm should be used: + + b = floor( (n * 256) / 100) + + where: + floor = quotient [ignore remainder] + n is the value in 1/100 second units + b is the value in 1/256 second units + + To convert the value from 1/256 second units back to + 1/100 seconds, the following algorithm should be used: + + n = ceiling( (b * 100) / 256) + + where: + ceiling = quotient [if remainder is 0], or + quotient + 1 [if remainder is nonzero] + n is the value in 1/100 second units + + b is the value in 1/256 second units + + Note: it is important that the arithmetic operations are + done in the order specified (i.e., multiply first, + divide second)." + SYNTAX Integer32 + +-- ---------------------------------------------------------- -- +-- subtrees in the Bridge MIB +-- ---------------------------------------------------------- -- + +dot1dNotifications OBJECT IDENTIFIER ::= { dot1dBridge 0 } + +dot1dBase OBJECT IDENTIFIER ::= { dot1dBridge 1 } +dot1dStp OBJECT IDENTIFIER ::= { dot1dBridge 2 } + +dot1dSr OBJECT IDENTIFIER ::= { dot1dBridge 3 } +-- documented in RFC 1525 + +dot1dTp OBJECT IDENTIFIER ::= { dot1dBridge 4 } +dot1dStatic OBJECT IDENTIFIER ::= { dot1dBridge 5 } + +-- Subtrees used by Bridge MIB Extensions: +-- pBridgeMIB MODULE-IDENTITY ::= { dot1dBridge 6 } +-- qBridgeMIB MODULE-IDENTITY ::= { dot1dBridge 7 } +-- Note that the practice of registering related MIB modules +-- below dot1dBridge has been discouraged since there is no +-- robust mechanism to track such registrations. + +dot1dConformance OBJECT IDENTIFIER ::= { dot1dBridge 8 } + +-- ---------------------------------------------------------- -- +-- the dot1dBase subtree +-- ---------------------------------------------------------- -- +-- Implementation of the dot1dBase subtree is mandatory for all +-- bridges. +-- ---------------------------------------------------------- -- + +dot1dBaseBridgeAddress OBJECT-TYPE + SYNTAX MacAddress + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The MAC address used by this bridge when it must be + referred to in a unique fashion. It is recommended + that this be the numerically smallest MAC address of + all ports that belong to this bridge. However, it is only + + required to be unique. When concatenated with + dot1dStpPriority, a unique BridgeIdentifier is formed, + which is used in the Spanning Tree Protocol." + REFERENCE + "IEEE 802.1D-1998: clauses 14.4.1.1.3 and 7.12.5" + ::= { dot1dBase 1 } + +dot1dBaseNumPorts OBJECT-TYPE + SYNTAX Integer32 + UNITS "ports" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of ports controlled by this bridging + entity." + REFERENCE + "IEEE 802.1D-1998: clause 14.4.1.1.3" + ::= { dot1dBase 2 } + +dot1dBaseType OBJECT-TYPE + SYNTAX INTEGER { + unknown(1), + transparent-only(2), + sourceroute-only(3), + srt(4) + } + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "Indicates what type of bridging this bridge can + perform. If a bridge is actually performing a + certain type of bridging, this will be indicated by + entries in the port table for the given type." + ::= { dot1dBase 3 } + +-- ---------------------------------------------------------- -- +-- The Generic Bridge Port Table +-- ---------------------------------------------------------- -- +dot1dBasePortTable OBJECT-TYPE + SYNTAX SEQUENCE OF Dot1dBasePortEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A table that contains generic information about every + port that is associated with this bridge. Transparent, + source-route, and srt ports are included." + ::= { dot1dBase 4 } + +dot1dBasePortEntry OBJECT-TYPE + SYNTAX Dot1dBasePortEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of information for each port of the bridge." + REFERENCE + "IEEE 802.1D-1998: clause 14.4.2, 14.6.1" + INDEX { dot1dBasePort } + ::= { dot1dBasePortTable 1 } + +Dot1dBasePortEntry ::= + SEQUENCE { + dot1dBasePort + Integer32, + dot1dBasePortIfIndex + InterfaceIndex, + dot1dBasePortCircuit + OBJECT IDENTIFIER, + dot1dBasePortDelayExceededDiscards + Counter32, + dot1dBasePortMtuExceededDiscards + Counter32 + } + +dot1dBasePort OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The port number of the port for which this entry + contains bridge management information." + ::= { dot1dBasePortEntry 1 } + +dot1dBasePortIfIndex OBJECT-TYPE + SYNTAX InterfaceIndex + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The value of the instance of the ifIndex object, + defined in IF-MIB, for the interface corresponding + to this port." + ::= { dot1dBasePortEntry 2 } + +dot1dBasePortCircuit OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "For a port that (potentially) has the same value of + dot1dBasePortIfIndex as another port on the same bridge. + This object contains the name of an object instance + unique to this port. For example, in the case where + multiple ports correspond one-to-one with multiple X.25 + virtual circuits, this value might identify an (e.g., + the first) object instance associated with the X.25 + virtual circuit corresponding to this port. + + For a port which has a unique value of + dot1dBasePortIfIndex, this object can have the value + { 0 0 }." + ::= { dot1dBasePortEntry 3 } + +dot1dBasePortDelayExceededDiscards OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of frames discarded by this port due + to excessive transit delay through the bridge. It + is incremented by both transparent and source + route bridges." + REFERENCE + "IEEE 802.1D-1998: clause 14.6.1.1.3" + ::= { dot1dBasePortEntry 4 } + +dot1dBasePortMtuExceededDiscards OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of frames discarded by this port due + to an excessive size. It is incremented by both + transparent and source route bridges." + REFERENCE + "IEEE 802.1D-1998: clause 14.6.1.1.3" + ::= { dot1dBasePortEntry 5 } + +-- ---------------------------------------------------------- -- +-- the dot1dStp subtree +-- ---------------------------------------------------------- -- +-- Implementation of the dot1dStp subtree is optional. It is +-- implemented by those bridges that support the Spanning Tree +-- Protocol. +-- ---------------------------------------------------------- -- + +dot1dStpProtocolSpecification OBJECT-TYPE + SYNTAX INTEGER { + unknown(1), + decLb100(2), + ieee8021d(3) + } + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "An indication of what version of the Spanning Tree + Protocol is being run. The value 'decLb100(2)' + indicates the DEC LANbridge 100 Spanning Tree protocol. + IEEE 802.1D implementations will return 'ieee8021d(3)'. + If future versions of the IEEE Spanning Tree Protocol + that are incompatible with the current version + are released a new value will be defined." + ::= { dot1dStp 1 } + +dot1dStpPriority OBJECT-TYPE + SYNTAX Integer32 (0..65535) + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The value of the write-able portion of the Bridge ID + (i.e., the first two octets of the (8 octet long) Bridge + ID). The other (last) 6 octets of the Bridge ID are + given by the value of dot1dBaseBridgeAddress. + On bridges supporting IEEE 802.1t or IEEE 802.1w, + permissible values are 0-61440, in steps of 4096." + REFERENCE + "IEEE 802.1D-1998 clause 8.10.2, Table 8-4, + IEEE 802.1t clause 8.10.2, Table 8-4, clause 14.3." + ::= { dot1dStp 2 } + +dot1dStpTimeSinceTopologyChange OBJECT-TYPE + SYNTAX TimeTicks + UNITS "centi-seconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The time (in hundredths of a second) since the + last time a topology change was detected by the + bridge entity. + For RSTP, this reports the time since the tcWhile + timer for any port on this Bridge was nonzero." + REFERENCE + "IEEE 802.1D-1998 clause 14.8.1.1., + IEEE 802.1w clause 14.8.1.1." + ::= { dot1dStp 3 } + +dot1dStpTopChanges OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of topology changes detected by + this bridge since the management entity was last + reset or initialized." + REFERENCE + "IEEE 802.1D-1998 clause 14.8.1.1." + ::= { dot1dStp 4 } + +dot1dStpDesignatedRoot OBJECT-TYPE + SYNTAX BridgeId + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The bridge identifier of the root of the spanning + tree, as determined by the Spanning Tree Protocol, + as executed by this node. This value is used as + the Root Identifier parameter in all Configuration + Bridge PDUs originated by this node." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.1" + ::= { dot1dStp 5 } + +dot1dStpRootCost OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The cost of the path to the root as seen from + this bridge." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.2" + ::= { dot1dStp 6 } + +dot1dStpRootPort OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The port number of the port that offers the lowest + cost path from this bridge to the root bridge." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.3" + ::= { dot1dStp 7 } + +dot1dStpMaxAge OBJECT-TYPE + SYNTAX Timeout + UNITS "centi-seconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The maximum age of Spanning Tree Protocol information + learned from the network on any port before it is + discarded, in units of hundredths of a second. This is + the actual value that this bridge is currently using." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.4" + ::= { dot1dStp 8 } + +dot1dStpHelloTime OBJECT-TYPE + SYNTAX Timeout + UNITS "centi-seconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The amount of time between the transmission of + Configuration bridge PDUs by this node on any port when + it is the root of the spanning tree, or trying to become + so, in units of hundredths of a second. This is the + actual value that this bridge is currently using." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.5" + ::= { dot1dStp 9 } + +dot1dStpHoldTime OBJECT-TYPE + SYNTAX Integer32 + UNITS "centi-seconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "This time value determines the interval length + during which no more than two Configuration bridge + PDUs shall be transmitted by this node, in units + of hundredths of a second." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.14" + ::= { dot1dStp 10 } + +dot1dStpForwardDelay OBJECT-TYPE + SYNTAX Timeout + UNITS "centi-seconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "This time value, measured in units of hundredths of a + second, controls how fast a port changes its spanning + state when moving towards the Forwarding state. The + value determines how long the port stays in each of the + Listening and Learning states, which precede the + Forwarding state. This value is also used when a + topology change has been detected and is underway, to + age all dynamic entries in the Forwarding Database. + [Note that this value is the one that this bridge is + currently using, in contrast to + dot1dStpBridgeForwardDelay, which is the value that this + bridge and all others would start using if/when this + bridge were to become the root.]" + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.6" + ::= { dot1dStp 11 } + +dot1dStpBridgeMaxAge OBJECT-TYPE + SYNTAX Timeout (600..4000) + UNITS "centi-seconds" + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The value that all bridges use for MaxAge when this + bridge is acting as the root. Note that 802.1D-1998 + specifies that the range for this parameter is related + to the value of dot1dStpBridgeHelloTime. The + granularity of this timer is specified by 802.1D-1998 to + be 1 second. An agent may return a badValue error if a + set is attempted to a value that is not a whole number + of seconds." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.8" + ::= { dot1dStp 12 } + +dot1dStpBridgeHelloTime OBJECT-TYPE + SYNTAX Timeout (100..1000) + UNITS "centi-seconds" + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The value that all bridges use for HelloTime when this + bridge is acting as the root. The granularity of this + timer is specified by 802.1D-1998 to be 1 second. An + agent may return a badValue error if a set is attempted + + to a value that is not a whole number of seconds." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.9" + ::= { dot1dStp 13 } + +dot1dStpBridgeForwardDelay OBJECT-TYPE + SYNTAX Timeout (400..3000) + UNITS "centi-seconds" + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The value that all bridges use for ForwardDelay when + this bridge is acting as the root. Note that + 802.1D-1998 specifies that the range for this parameter + is related to the value of dot1dStpBridgeMaxAge. The + granularity of this timer is specified by 802.1D-1998 to + be 1 second. An agent may return a badValue error if a + set is attempted to a value that is not a whole number + of seconds." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.3.10" + ::= { dot1dStp 14 } + +-- ---------------------------------------------------------- -- +-- The Spanning Tree Port Table +-- ---------------------------------------------------------- -- + +dot1dStpPortTable OBJECT-TYPE + SYNTAX SEQUENCE OF Dot1dStpPortEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A table that contains port-specific information + for the Spanning Tree Protocol." + ::= { dot1dStp 15 } + +dot1dStpPortEntry OBJECT-TYPE + SYNTAX Dot1dStpPortEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of information maintained by every port about + the Spanning Tree Protocol state for that port." + INDEX { dot1dStpPort } + ::= { dot1dStpPortTable 1 } + +Dot1dStpPortEntry ::= + SEQUENCE { + + dot1dStpPort + Integer32, + dot1dStpPortPriority + Integer32, + dot1dStpPortState + INTEGER, + dot1dStpPortEnable + INTEGER, + dot1dStpPortPathCost + Integer32, + dot1dStpPortDesignatedRoot + BridgeId, + dot1dStpPortDesignatedCost + Integer32, + dot1dStpPortDesignatedBridge + BridgeId, + dot1dStpPortDesignatedPort + OCTET STRING, + dot1dStpPortForwardTransitions + Counter32, + dot1dStpPortPathCost32 + Integer32 + } + +dot1dStpPort OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The port number of the port for which this entry + contains Spanning Tree Protocol management information." + REFERENCE + "IEEE 802.1D-1998: clause 14.8.2.1.2" + ::= { dot1dStpPortEntry 1 } + +dot1dStpPortPriority OBJECT-TYPE + SYNTAX Integer32 (0..255) + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The value of the priority field that is contained in + the first (in network byte order) octet of the (2 octet + long) Port ID. The other octet of the Port ID is given + by the value of dot1dStpPort. + On bridges supporting IEEE 802.1t or IEEE 802.1w, + permissible values are 0-240, in steps of 16." + REFERENCE + "IEEE 802.1D-1998 clause 8.10.2, Table 8-4, + IEEE 802.1t clause 8.10.2, Table 8-4, clause 14.3." + ::= { dot1dStpPortEntry 2 } + +dot1dStpPortState OBJECT-TYPE + SYNTAX INTEGER { + disabled(1), + blocking(2), + listening(3), + learning(4), + forwarding(5), + broken(6) + } + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The port's current state, as defined by application of + the Spanning Tree Protocol. This state controls what + action a port takes on reception of a frame. If the + bridge has detected a port that is malfunctioning, it + will place that port into the broken(6) state. For + ports that are disabled (see dot1dStpPortEnable), this + object will have a value of disabled(1)." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.5.2" + ::= { dot1dStpPortEntry 3 } + +dot1dStpPortEnable OBJECT-TYPE + SYNTAX INTEGER { + enabled(1), + disabled(2) + } + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The enabled/disabled status of the port." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.5.2" + ::= { dot1dStpPortEntry 4 } + +dot1dStpPortPathCost OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The contribution of this port to the path cost of + paths towards the spanning tree root which include + this port. 802.1D-1998 recommends that the default + value of this parameter be in inverse proportion to + + the speed of the attached LAN. + + New implementations should support dot1dStpPortPathCost32. + If the port path costs exceeds the maximum value of this + object then this object should report the maximum value, + namely 65535. Applications should try to read the + dot1dStpPortPathCost32 object if this object reports + the maximum value." + REFERENCE "IEEE 802.1D-1998: clause 8.5.5.3" + ::= { dot1dStpPortEntry 5 } + +dot1dStpPortDesignatedRoot OBJECT-TYPE + SYNTAX BridgeId + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The unique Bridge Identifier of the Bridge + recorded as the Root in the Configuration BPDUs + transmitted by the Designated Bridge for the + segment to which the port is attached." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.5.4" + ::= { dot1dStpPortEntry 6 } + +dot1dStpPortDesignatedCost OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The path cost of the Designated Port of the segment + connected to this port. This value is compared to the + Root Path Cost field in received bridge PDUs." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.5.5" + ::= { dot1dStpPortEntry 7 } + +dot1dStpPortDesignatedBridge OBJECT-TYPE + SYNTAX BridgeId + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The Bridge Identifier of the bridge that this + port considers to be the Designated Bridge for + this port's segment." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.5.6" + ::= { dot1dStpPortEntry 8 } + +dot1dStpPortDesignatedPort OBJECT-TYPE + SYNTAX OCTET STRING (SIZE (2)) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The Port Identifier of the port on the Designated + Bridge for this port's segment." + REFERENCE + "IEEE 802.1D-1998: clause 8.5.5.7" + ::= { dot1dStpPortEntry 9 } + +dot1dStpPortForwardTransitions OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of times this port has transitioned + from the Learning state to the Forwarding state." + ::= { dot1dStpPortEntry 10 } + +dot1dStpPortPathCost32 OBJECT-TYPE + SYNTAX Integer32 (1..200000000) + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The contribution of this port to the path cost of + paths towards the spanning tree root which include + this port. 802.1D-1998 recommends that the default + value of this parameter be in inverse proportion to + the speed of the attached LAN. + + This object replaces dot1dStpPortPathCost to support + IEEE 802.1t." + REFERENCE + "IEEE 802.1t clause 8.10.2, Table 8-5." + ::= { dot1dStpPortEntry 11 } + +-- ---------------------------------------------------------- -- +-- the dot1dTp subtree +-- ---------------------------------------------------------- -- +-- Implementation of the dot1dTp subtree is optional. It is +-- implemented by those bridges that support the transparent +-- bridging mode. A transparent or SRT bridge will implement +-- this subtree. +-- ---------------------------------------------------------- -- + +dot1dTpLearnedEntryDiscards OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of Forwarding Database entries that + have been or would have been learned, but have been + discarded due to a lack of storage space in the + Forwarding Database. If this counter is increasing, it + indicates that the Forwarding Database is regularly + becoming full (a condition that has unpleasant + performance effects on the subnetwork). If this counter + has a significant value but is not presently increasing, + it indicates that the problem has been occurring but is + not persistent." + REFERENCE + "IEEE 802.1D-1998: clause 14.7.1.1.3" + ::= { dot1dTp 1 } + +dot1dTpAgingTime OBJECT-TYPE + SYNTAX Integer32 (10..1000000) + UNITS "seconds" + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The timeout period in seconds for aging out + dynamically-learned forwarding information. + 802.1D-1998 recommends a default of 300 seconds." + REFERENCE + "IEEE 802.1D-1998: clause 14.7.1.1.3" + ::= { dot1dTp 2 } + +-- ---------------------------------------------------------- -- +-- The Forwarding Database for Transparent Bridges +-- ---------------------------------------------------------- -- + +dot1dTpFdbTable OBJECT-TYPE + SYNTAX SEQUENCE OF Dot1dTpFdbEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A table that contains information about unicast + entries for which the bridge has forwarding and/or + filtering information. This information is used + by the transparent bridging function in + determining how to propagate a received frame." + ::= { dot1dTp 3 } + +dot1dTpFdbEntry OBJECT-TYPE + SYNTAX Dot1dTpFdbEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "Information about a specific unicast MAC address + for which the bridge has some forwarding and/or + filtering information." + INDEX { dot1dTpFdbAddress } + ::= { dot1dTpFdbTable 1 } + +Dot1dTpFdbEntry ::= + SEQUENCE { + dot1dTpFdbAddress + MacAddress, + dot1dTpFdbPort + Integer32, + dot1dTpFdbStatus + INTEGER + } + +dot1dTpFdbAddress OBJECT-TYPE + SYNTAX MacAddress + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "A unicast MAC address for which the bridge has + forwarding and/or filtering information." + REFERENCE + "IEEE 802.1D-1998: clause 7.9.1, 7.9.2" + ::= { dot1dTpFdbEntry 1 } + +dot1dTpFdbPort OBJECT-TYPE + SYNTAX Integer32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "Either the value '0', or the port number of the port on + which a frame having a source address equal to the value + of the corresponding instance of dot1dTpFdbAddress has + been seen. A value of '0' indicates that the port + number has not been learned, but that the bridge does + have some forwarding/filtering information about this + address (e.g., in the dot1dStaticTable). Implementors + are encouraged to assign the port value to this object + whenever it is learned, even for addresses for which the + corresponding value of dot1dTpFdbStatus is not + learned(3)." + ::= { dot1dTpFdbEntry 2 } + +dot1dTpFdbStatus OBJECT-TYPE + SYNTAX INTEGER { + other(1), + invalid(2), + learned(3), + self(4), + mgmt(5) + } + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The status of this entry. The meanings of the + values are: + other(1) - none of the following. This would + include the case where some other MIB object + (not the corresponding instance of + dot1dTpFdbPort, nor an entry in the + dot1dStaticTable) is being used to determine if + and how frames addressed to the value of the + corresponding instance of dot1dTpFdbAddress are + being forwarded. + invalid(2) - this entry is no longer valid (e.g., + it was learned but has since aged out), but has + not yet been flushed from the table. + learned(3) - the value of the corresponding instance + of dot1dTpFdbPort was learned, and is being + used. + self(4) - the value of the corresponding instance of + dot1dTpFdbAddress represents one of the bridge's + addresses. The corresponding instance of + dot1dTpFdbPort indicates which of the bridge's + ports has this address. + mgmt(5) - the value of the corresponding instance of + dot1dTpFdbAddress is also the value of an + existing instance of dot1dStaticAddress." + ::= { dot1dTpFdbEntry 3 } + +-- ---------------------------------------------------------- -- +-- Port Table for Transparent Bridges +-- ---------------------------------------------------------- -- + +dot1dTpPortTable OBJECT-TYPE + SYNTAX SEQUENCE OF Dot1dTpPortEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A table that contains information about every port that + is associated with this transparent bridge." + ::= { dot1dTp 4 } + +dot1dTpPortEntry OBJECT-TYPE + SYNTAX Dot1dTpPortEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of information for each port of a transparent + bridge." + INDEX { dot1dTpPort } + ::= { dot1dTpPortTable 1 } + +Dot1dTpPortEntry ::= + SEQUENCE { + dot1dTpPort + Integer32, + dot1dTpPortMaxInfo + Integer32, + dot1dTpPortInFrames + Counter32, + dot1dTpPortOutFrames + Counter32, + dot1dTpPortInDiscards + Counter32 + } + +dot1dTpPort OBJECT-TYPE + SYNTAX Integer32 (1..65535) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The port number of the port for which this entry + contains Transparent bridging management information." + ::= { dot1dTpPortEntry 1 } + +-- It would be nice if we could use ifMtu as the size of the +-- largest INFO field, but we can't because ifMtu is defined +-- to be the size that the (inter-)network layer can use, which +-- can differ from the MAC layer (especially if several layers +-- of encapsulation are used). + +dot1dTpPortMaxInfo OBJECT-TYPE + SYNTAX Integer32 + UNITS "bytes" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The maximum size of the INFO (non-MAC) field that + + this port will receive or transmit." + ::= { dot1dTpPortEntry 2 } + +dot1dTpPortInFrames OBJECT-TYPE + SYNTAX Counter32 + UNITS "frames" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of frames that have been received by this + port from its segment. Note that a frame received on the + interface corresponding to this port is only counted by + this object if and only if it is for a protocol being + processed by the local bridging function, including + bridge management frames." + REFERENCE + "IEEE 802.1D-1998: clause 14.6.1.1.3" + ::= { dot1dTpPortEntry 3 } + +dot1dTpPortOutFrames OBJECT-TYPE + SYNTAX Counter32 + UNITS "frames" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of frames that have been transmitted by this + port to its segment. Note that a frame transmitted on + the interface corresponding to this port is only counted + by this object if and only if it is for a protocol being + processed by the local bridging function, including + bridge management frames." + REFERENCE + "IEEE 802.1D-1998: clause 14.6.1.1.3" + ::= { dot1dTpPortEntry 4 } + +dot1dTpPortInDiscards OBJECT-TYPE + SYNTAX Counter32 + UNITS "frames" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "Count of received valid frames that were discarded + (i.e., filtered) by the Forwarding Process." + REFERENCE + "IEEE 802.1D-1998: clause 14.6.1.1.3" + ::= { dot1dTpPortEntry 5 } + +-- ---------------------------------------------------------- -- + +-- The Static (Destination-Address Filtering) Database +-- ---------------------------------------------------------- -- +-- Implementation of this subtree is optional. +-- ---------------------------------------------------------- -- + +dot1dStaticTable OBJECT-TYPE + SYNTAX SEQUENCE OF Dot1dStaticEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A table containing filtering information configured + into the bridge by (local or network) management + specifying the set of ports to which frames received + from specific ports and containing specific destination + addresses are allowed to be forwarded. The value of + zero in this table, as the port number from which frames + with a specific destination address are received, is + used to specify all ports for which there is no specific + entry in this table for that particular destination + address. Entries are valid for unicast and for + group/broadcast addresses." + REFERENCE + "IEEE 802.1D-1998: clause 14.7.2" + ::= { dot1dStatic 1 } + +dot1dStaticEntry OBJECT-TYPE + SYNTAX Dot1dStaticEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "Filtering information configured into the bridge by + (local or network) management specifying the set of + ports to which frames received from a specific port and + containing a specific destination address are allowed to + be forwarded." + REFERENCE + "IEEE 802.1D-1998: clause 14.7.2" + INDEX { dot1dStaticAddress, dot1dStaticReceivePort } + ::= { dot1dStaticTable 1 } + +Dot1dStaticEntry ::= + SEQUENCE { + dot1dStaticAddress MacAddress, + dot1dStaticReceivePort Integer32, + dot1dStaticAllowedToGoTo OCTET STRING, + dot1dStaticStatus INTEGER + } + +dot1dStaticAddress OBJECT-TYPE + SYNTAX MacAddress + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The destination MAC address in a frame to which this + entry's filtering information applies. This object can + take the value of a unicast address, a group address, or + the broadcast address." + REFERENCE + "IEEE 802.1D-1998: clause 7.9.1, 7.9.2" + ::= { dot1dStaticEntry 1 } + +dot1dStaticReceivePort OBJECT-TYPE + SYNTAX Integer32 (0..65535) + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "Either the value '0', or the port number of the port + from which a frame must be received in order for this + entry's filtering information to apply. A value of zero + indicates that this entry applies on all ports of the + bridge for which there is no other applicable entry." + ::= { dot1dStaticEntry 2 } + +dot1dStaticAllowedToGoTo OBJECT-TYPE + SYNTAX OCTET STRING (SIZE (0..512)) + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "The set of ports to which frames received from a + specific port and destined for a specific MAC address, + are allowed to be forwarded. Each octet within the + value of this object specifies a set of eight ports, + with the first octet specifying ports 1 through 8, the + second octet specifying ports 9 through 16, etc. Within + each octet, the most significant bit represents the + lowest numbered port, and the least significant bit + represents the highest numbered port. Thus, each port + of the bridge is represented by a single bit within the + value of this object. If that bit has a value of '1', + then that port is included in the set of ports; the port + is not included if its bit has a value of '0'. (Note + that the setting of the bit corresponding to the port + from which a frame is received is irrelevant.) The + default value of this object is a string of ones of + appropriate length. + + The value of this object may exceed the required minimum + maximum message size of some SNMP transport (484 bytes, + in the case of SNMP over UDP, see RFC 3417, section 3.2). + SNMP engines on bridges supporting a large number of + ports must support appropriate maximum message sizes." + ::= { dot1dStaticEntry 3 } + +dot1dStaticStatus OBJECT-TYPE + SYNTAX INTEGER { + other(1), + invalid(2), + permanent(3), + deleteOnReset(4), + deleteOnTimeout(5) + } + MAX-ACCESS read-create + STATUS current + DESCRIPTION + "This object indicates the status of this entry. + The default value is permanent(3). + other(1) - this entry is currently in use but the + conditions under which it will remain so are + different from each of the following values. + invalid(2) - writing this value to the object + removes the corresponding entry. + permanent(3) - this entry is currently in use and + will remain so after the next reset of the + bridge. + deleteOnReset(4) - this entry is currently in use + and will remain so until the next reset of the + bridge. + deleteOnTimeout(5) - this entry is currently in use + and will remain so until it is aged out." + ::= { dot1dStaticEntry 4 } + +-- ---------------------------------------------------------- -- +-- Notifications for use by Bridges +-- ---------------------------------------------------------- -- +-- Notifications for the Spanning Tree Protocol +-- ---------------------------------------------------------- -- + +newRoot NOTIFICATION-TYPE + -- OBJECTS { } + STATUS current + DESCRIPTION + "The newRoot trap indicates that the sending agent has + become the new root of the Spanning Tree; the trap is + sent by a bridge soon after its election as the new + + root, e.g., upon expiration of the Topology Change Timer, + immediately subsequent to its election. Implementation + of this trap is optional." + ::= { dot1dNotifications 1 } + +topologyChange NOTIFICATION-TYPE + -- OBJECTS { } + STATUS current + DESCRIPTION + "A topologyChange trap is sent by a bridge when any of + its configured ports transitions from the Learning state + to the Forwarding state, or from the Forwarding state to + the Blocking state. The trap is not sent if a newRoot + trap is sent for the same transition. Implementation of + this trap is optional." + ::= { dot1dNotifications 2 } + +-- ---------------------------------------------------------- -- +-- IEEE 802.1D MIB - Conformance Information +-- ---------------------------------------------------------- -- + +dot1dGroups OBJECT IDENTIFIER ::= { dot1dConformance 1 } +dot1dCompliances OBJECT IDENTIFIER ::= { dot1dConformance 2 } + +-- ---------------------------------------------------------- -- +-- units of conformance +-- ---------------------------------------------------------- -- + +-- ---------------------------------------------------------- -- +-- the dot1dBase group +-- ---------------------------------------------------------- -- + +dot1dBaseBridgeGroup OBJECT-GROUP + OBJECTS { + dot1dBaseBridgeAddress, + dot1dBaseNumPorts, + dot1dBaseType + } + STATUS current + DESCRIPTION + "Bridge level information for this device." + ::= { dot1dGroups 1 } + +dot1dBasePortGroup OBJECT-GROUP + OBJECTS { + dot1dBasePort, + dot1dBasePortIfIndex, + dot1dBasePortCircuit, + dot1dBasePortDelayExceededDiscards, + dot1dBasePortMtuExceededDiscards + } + STATUS current + DESCRIPTION + "Information for each port on this device." + ::= { dot1dGroups 2 } + +-- ---------------------------------------------------------- -- +-- the dot1dStp group +-- ---------------------------------------------------------- -- + +dot1dStpBridgeGroup OBJECT-GROUP + OBJECTS { + dot1dStpProtocolSpecification, + dot1dStpPriority, + dot1dStpTimeSinceTopologyChange, + dot1dStpTopChanges, + dot1dStpDesignatedRoot, + dot1dStpRootCost, + dot1dStpRootPort, + dot1dStpMaxAge, + dot1dStpHelloTime, + dot1dStpHoldTime, + dot1dStpForwardDelay, + dot1dStpBridgeMaxAge, + dot1dStpBridgeHelloTime, + dot1dStpBridgeForwardDelay + } + STATUS current + DESCRIPTION + "Bridge level Spanning Tree data for this device." + ::= { dot1dGroups 3 } + +dot1dStpPortGroup OBJECT-GROUP + OBJECTS { + dot1dStpPort, + dot1dStpPortPriority, + dot1dStpPortState, + dot1dStpPortEnable, + dot1dStpPortPathCost, + dot1dStpPortDesignatedRoot, + dot1dStpPortDesignatedCost, + dot1dStpPortDesignatedBridge, + dot1dStpPortDesignatedPort, + dot1dStpPortForwardTransitions + } + STATUS current + DESCRIPTION + "Spanning Tree data for each port on this device." + ::= { dot1dGroups 4 } + +dot1dStpPortGroup2 OBJECT-GROUP + OBJECTS { + dot1dStpPort, + dot1dStpPortPriority, + dot1dStpPortState, + dot1dStpPortEnable, + dot1dStpPortDesignatedRoot, + dot1dStpPortDesignatedCost, + dot1dStpPortDesignatedBridge, + dot1dStpPortDesignatedPort, + dot1dStpPortForwardTransitions, + dot1dStpPortPathCost32 + } + STATUS current + DESCRIPTION + "Spanning Tree data for each port on this device." + ::= { dot1dGroups 5 } + +dot1dStpPortGroup3 OBJECT-GROUP + OBJECTS { + dot1dStpPortPathCost32 + } + STATUS current + DESCRIPTION + "Spanning Tree data for devices supporting 32-bit + path costs." + ::= { dot1dGroups 6 } + +-- ---------------------------------------------------------- -- +-- the dot1dTp group +-- ---------------------------------------------------------- -- + +dot1dTpBridgeGroup OBJECT-GROUP + OBJECTS { + dot1dTpLearnedEntryDiscards, + dot1dTpAgingTime + } + STATUS current + DESCRIPTION + "Bridge level Transparent Bridging data." + ::= { dot1dGroups 7 } + +dot1dTpFdbGroup OBJECT-GROUP + OBJECTS { + + dot1dTpFdbAddress, + dot1dTpFdbPort, + dot1dTpFdbStatus + } + STATUS current + DESCRIPTION + "Filtering Database information for the Bridge." + ::= { dot1dGroups 8 } + +dot1dTpGroup OBJECT-GROUP + OBJECTS { + dot1dTpPort, + dot1dTpPortMaxInfo, + dot1dTpPortInFrames, + dot1dTpPortOutFrames, + dot1dTpPortInDiscards + } + STATUS current + DESCRIPTION + "Dynamic Filtering Database information for each port of + the Bridge." + ::= { dot1dGroups 9 } + +-- ---------------------------------------------------------- -- +-- The Static (Destination-Address Filtering) Database +-- ---------------------------------------------------------- -- + +dot1dStaticGroup OBJECT-GROUP + OBJECTS { + dot1dStaticAddress, + dot1dStaticReceivePort, + dot1dStaticAllowedToGoTo, + dot1dStaticStatus + } + STATUS current + DESCRIPTION + "Static Filtering Database information for each port of + the Bridge." + ::= { dot1dGroups 10 } + +-- ---------------------------------------------------------- -- +-- The Trap Notification Group +-- ---------------------------------------------------------- -- + +dot1dNotificationGroup NOTIFICATION-GROUP + NOTIFICATIONS { + newRoot, + topologyChange + } + STATUS current + DESCRIPTION + "Group of objects describing notifications (traps)." + ::= { dot1dGroups 11 } + +-- ---------------------------------------------------------- -- +-- compliance statements +-- ---------------------------------------------------------- -- + +bridgeCompliance1493 MODULE-COMPLIANCE + STATUS current + DESCRIPTION + "The compliance statement for device support of bridging + services, as per RFC1493." + + MODULE + MANDATORY-GROUPS { + dot1dBaseBridgeGroup, + dot1dBasePortGroup + } + + GROUP dot1dStpBridgeGroup + DESCRIPTION + "Implementation of this group is mandatory for bridges + that support the Spanning Tree Protocol." + + GROUP dot1dStpPortGroup + DESCRIPTION + "Implementation of this group is mandatory for bridges + that support the Spanning Tree Protocol." + + GROUP dot1dTpBridgeGroup + DESCRIPTION + "Implementation of this group is mandatory for bridges + that support the transparent bridging mode. A + transparent or SRT bridge will implement this group." + + GROUP dot1dTpFdbGroup + DESCRIPTION + "Implementation of this group is mandatory for bridges + that support the transparent bridging mode. A + transparent or SRT bridge will implement this group." + + GROUP dot1dTpGroup + DESCRIPTION + "Implementation of this group is mandatory for bridges + + that support the transparent bridging mode. A + transparent or SRT bridge will implement this group." + + GROUP dot1dStaticGroup + DESCRIPTION + "Implementation of this group is optional." + + GROUP dot1dNotificationGroup + DESCRIPTION + "Implementation of this group is optional." + ::= { dot1dCompliances 1 } + +bridgeCompliance4188 MODULE-COMPLIANCE + STATUS current + DESCRIPTION + "The compliance statement for device support of bridging + services. This supports 32-bit Path Cost values and the + more restricted bridge and port priorities, as per IEEE + 802.1t. + + Full support for the 802.1D management objects requires that + the SNMPv2-MIB [RFC3418] objects sysDescr, and sysUpTime, as + well as the IF-MIB [RFC2863] objects ifIndex, ifType, + ifDescr, ifPhysAddress, and ifLastChange are implemented." + + MODULE + MANDATORY-GROUPS { + dot1dBaseBridgeGroup, + dot1dBasePortGroup + } + + GROUP dot1dStpBridgeGroup + DESCRIPTION + "Implementation of this group is mandatory for + bridges that support the Spanning Tree Protocol." + + OBJECT dot1dStpPriority + SYNTAX Integer32 (0|4096|8192|12288|16384|20480|24576 + |28672|32768|36864|40960|45056|49152 + |53248|57344|61440) + DESCRIPTION + "The possible values defined by IEEE 802.1t." + + GROUP dot1dStpPortGroup2 + DESCRIPTION + "Implementation of this group is mandatory for + bridges that support the Spanning Tree Protocol." + + GROUP dot1dStpPortGroup3 + DESCRIPTION + "Implementation of this group is mandatory for bridges + that support the Spanning Tree Protocol and 32-bit path + costs. In particular, this includes devices supporting + IEEE 802.1t and IEEE 802.1w." + + OBJECT dot1dStpPortPriority + SYNTAX Integer32 (0|16|32|48|64|80|96|112|128 + |144|160|176|192|208|224|240) + DESCRIPTION + "The possible values defined by IEEE 802.1t." + + GROUP dot1dTpBridgeGroup + DESCRIPTION + "Implementation of this group is mandatory for + bridges that support the transparent bridging + mode. A transparent or SRT bridge will implement + this group." + + GROUP dot1dTpFdbGroup + DESCRIPTION + "Implementation of this group is mandatory for + bridges that support the transparent bridging + mode. A transparent or SRT bridge will implement + this group." + + GROUP dot1dTpGroup + DESCRIPTION + "Implementation of this group is mandatory for + bridges that support the transparent bridging + mode. A transparent or SRT bridge will implement + this group." + + GROUP dot1dStaticGroup + DESCRIPTION + "Implementation of this group is optional." + + GROUP dot1dNotificationGroup + DESCRIPTION + "Implementation of this group is optional." + ::= { dot1dCompliances 2 } + +END diff --git a/plugins/inputs/snmp/testdata/bridgeMibImports b/plugins/inputs/snmp/testdata/bridgeMibImports new file mode 100644 index 0000000000000..8f6a52bd36058 --- /dev/null +++ b/plugins/inputs/snmp/testdata/bridgeMibImports @@ -0,0 +1,554 @@ +SNMPv2-SMI DEFINITIONS ::= BEGIN + +-- the path to the root + +org OBJECT IDENTIFIER ::= { iso 3 } -- "iso" = 1 +dod OBJECT IDENTIFIER ::= { org 6 } +internet OBJECT IDENTIFIER ::= { dod 1 } + +directory OBJECT IDENTIFIER ::= { internet 1 } + +mgmt OBJECT IDENTIFIER ::= { internet 2 } +mib-2 OBJECT IDENTIFIER ::= { mgmt 1 } +transmission OBJECT IDENTIFIER ::= { mib-2 10 } + +experimental OBJECT IDENTIFIER ::= { internet 3 } + +private OBJECT IDENTIFIER ::= { internet 4 } +enterprises OBJECT IDENTIFIER ::= { private 1 } + +security OBJECT IDENTIFIER ::= { internet 5 } + +snmpV2 OBJECT IDENTIFIER ::= { internet 6 } + +-- transport domains +snmpDomains OBJECT IDENTIFIER ::= { snmpV2 1 } + +-- transport proxies +snmpProxys OBJECT IDENTIFIER ::= { snmpV2 2 } + +-- module identities +snmpModules OBJECT IDENTIFIER ::= { snmpV2 3 } + +-- Extended UTCTime, to allow dates with four-digit years +-- (Note that this definition of ExtUTCTime is not to be IMPORTed +-- by MIB modules.) +ExtUTCTime ::= OCTET STRING(SIZE(11 | 13)) + -- format is YYMMDDHHMMZ or YYYYMMDDHHMMZ + + -- where: YY - last two digits of year (only years + -- between 1900-1999) + -- YYYY - last four digits of the year (any year) + -- MM - month (01 through 12) + -- DD - day of month (01 through 31) + -- HH - hours (00 through 23) + -- MM - minutes (00 through 59) + -- Z - denotes GMT (the ASCII character Z) + -- + -- For example, "9502192015Z" and "199502192015Z" represent + -- 8:15pm GMT on 19 February 1995. Years after 1999 must use + -- the four digit year format. Years 1900-1999 may use the + -- two or four digit format. + +-- definitions for information modules + +MODULE-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "LAST-UPDATED" value(Update ExtUTCTime) + "ORGANIZATION" Text + "CONTACT-INFO" Text + "DESCRIPTION" Text + RevisionPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + RevisionPart ::= + Revisions + | empty + Revisions ::= + Revision + | Revisions Revision + Revision ::= + "REVISION" value(Update ExtUTCTime) + "DESCRIPTION" Text + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +OBJECT-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "STATUS" Status + "DESCRIPTION" Text + + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- names of objects +-- (Note that these definitions of ObjectName and NotificationName +-- are not to be IMPORTed by MIB modules.) + +ObjectName ::= + OBJECT IDENTIFIER + +NotificationName ::= + OBJECT IDENTIFIER + +-- syntax of objects + +-- the "base types" defined here are: +-- 3 built-in ASN.1 types: INTEGER, OCTET STRING, OBJECT IDENTIFIER +-- 8 application-defined types: Integer32, IpAddress, Counter32, +-- Gauge32, Unsigned32, TimeTicks, Opaque, and Counter64 + +ObjectSyntax ::= + CHOICE { + simple + SimpleSyntax, + -- note that SEQUENCEs for conceptual tables and + -- rows are not mentioned here... + + application-wide + ApplicationSyntax + } + +-- built-in ASN.1 types + +SimpleSyntax ::= + CHOICE { + -- INTEGERs with a more restrictive range + -- may also be used + integer-value -- includes Integer32 + INTEGER (-2147483648..2147483647), + -- OCTET STRINGs with a more restrictive size + -- may also be used + string-value + OCTET STRING (SIZE (0..65535)), + objectID-value + OBJECT IDENTIFIER + } + +-- indistinguishable from INTEGER, but never needs more than +-- 32-bits for a two's complement representation +Integer32 ::= + INTEGER (-2147483648..2147483647) + +-- application-wide types + +ApplicationSyntax ::= + CHOICE { + ipAddress-value + IpAddress, + counter-value + Counter32, + timeticks-value + TimeTicks, + arbitrary-value + Opaque, + big-counter-value + Counter64, + unsigned-integer-value -- includes Gauge32 + Unsigned32 + } + +-- in network-byte order + +-- (this is a tagged type for historical reasons) +IpAddress ::= + [APPLICATION 0] + IMPLICIT OCTET STRING (SIZE (4)) + +-- this wraps +Counter32 ::= + [APPLICATION 1] + IMPLICIT INTEGER (0..4294967295) + +-- this doesn't wrap +Gauge32 ::= + [APPLICATION 2] + IMPLICIT INTEGER (0..4294967295) + +-- an unsigned 32-bit quantity +-- indistinguishable from Gauge32 +Unsigned32 ::= + [APPLICATION 2] + IMPLICIT INTEGER (0..4294967295) + +-- hundredths of seconds since an epoch +TimeTicks ::= + [APPLICATION 3] + IMPLICIT INTEGER (0..4294967295) + +-- for backward-compatibility only +Opaque ::= + [APPLICATION 4] + IMPLICIT OCTET STRING + +-- for counters that wrap in less than one hour with only 32 bits +Counter64 ::= + [APPLICATION 6] + IMPLICIT INTEGER (0..18446744073709551615) + +-- definition for objects + +OBJECT-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + "SYNTAX" Syntax + UnitsPart + "MAX-ACCESS" Access + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + IndexPart + DefValPart + + VALUE NOTATION ::= + value(VALUE ObjectName) + + Syntax ::= -- Must be one of the following: + -- a base type (or its refinement), + -- a textual convention (or its refinement), or + -- a BITS pseudo-type + type + | "BITS" "{" NamedBits "}" + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + + UnitsPart ::= + "UNITS" Text + | empty + + Access ::= + "not-accessible" + | "accessible-for-notify" + | "read-only" + | "read-write" + | "read-create" + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + IndexPart ::= + "INDEX" "{" IndexTypes "}" + | "AUGMENTS" "{" Entry "}" + | empty + IndexTypes ::= + IndexType + | IndexTypes "," IndexType + IndexType ::= + "IMPLIED" Index + | Index + + Index ::= + -- use the SYNTAX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + Entry ::= + -- use the INDEX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + + DefValPart ::= "DEFVAL" "{" Defvalue "}" + | empty + + Defvalue ::= -- must be valid for the type specified in + -- SYNTAX clause of same OBJECT-TYPE macro + value(ObjectSyntax) + | "{" BitsValue "}" + + BitsValue ::= BitNames + | empty + + BitNames ::= BitName + | BitNames "," BitName + + BitName ::= identifier + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- definitions for notifications + +NOTIFICATION-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + ObjectsPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + VALUE NOTATION ::= + value(VALUE NotificationName) + + ObjectsPart ::= + "OBJECTS" "{" Objects "}" + | empty + Objects ::= + Object + + | Objects "," Object + Object ::= + value(ObjectName) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- definitions of administrative identifiers + +zeroDotZero OBJECT-IDENTITY + STATUS current + DESCRIPTION + "A value used for null identifiers." + ::= { 0 0 } + + + +TEXTUAL-CONVENTION MACRO ::= + +BEGIN + TYPE NOTATION ::= + DisplayPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + "SYNTAX" Syntax + + VALUE NOTATION ::= + value(VALUE Syntax) -- adapted ASN.1 + + DisplayPart ::= + "DISPLAY-HINT" Text + | empty + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in [2] + Text ::= value(IA5String) + + Syntax ::= -- Must be one of the following: + -- a base type (or its refinement), or + -- a BITS pseudo-type + type + | "BITS" "{" NamedBits "}" + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + +END + +MODULE-COMPLIANCE MACRO ::= +BEGIN + TYPE NOTATION ::= + "STATUS" Status + "DESCRIPTION" Text + ReferPart + ModulePart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + ModulePart ::= + Modules + Modules ::= + Module + | Modules Module + Module ::= + -- name of module -- + "MODULE" ModuleName + MandatoryPart + CompliancePart + + ModuleName ::= + -- identifier must start with uppercase letter + identifier ModuleIdentifier + -- must not be empty unless contained + -- in MIB Module + | empty + ModuleIdentifier ::= + value(OBJECT IDENTIFIER) + | empty + + MandatoryPart ::= + "MANDATORY-GROUPS" "{" Groups "}" + | empty + + Groups ::= + + Group + | Groups "," Group + Group ::= + value(OBJECT IDENTIFIER) + + CompliancePart ::= + Compliances + | empty + + Compliances ::= + Compliance + | Compliances Compliance + Compliance ::= + ComplianceGroup + | Object + + ComplianceGroup ::= + "GROUP" value(OBJECT IDENTIFIER) + "DESCRIPTION" Text + + Object ::= + "OBJECT" value(ObjectName) + SyntaxPart + WriteSyntaxPart + AccessPart + "DESCRIPTION" Text + + -- must be a refinement for object's SYNTAX clause + SyntaxPart ::= "SYNTAX" Syntax + | empty + + -- must be a refinement for object's SYNTAX clause + WriteSyntaxPart ::= "WRITE-SYNTAX" Syntax + | empty + + Syntax ::= -- Must be one of the following: + -- a base type (or its refinement), + -- a textual convention (or its refinement), or + -- a BITS pseudo-type + type + | "BITS" "{" NamedBits "}" + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + + AccessPart ::= + "MIN-ACCESS" Access + | empty + Access ::= + "not-accessible" + | "accessible-for-notify" + | "read-only" + | "read-write" + | "read-create" + + -- a character string as defined in [2] + Text ::= value(IA5String) +END + +OBJECT-GROUP MACRO ::= +BEGIN + TYPE NOTATION ::= + ObjectsPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + ObjectsPart ::= + "OBJECTS" "{" Objects "}" + Objects ::= + Object + | Objects "," Object + Object ::= + + value(ObjectName) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in [2] + Text ::= value(IA5String) +END + +InterfaceIndex ::= TEXTUAL-CONVENTION + DISPLAY-HINT "d" + STATUS current + DESCRIPTION + "A unique value, greater than zero, for each interface or + interface sub-layer in the managed system. It is + recommended that values are assigned contiguously starting + from 1. The value for each interface sub-layer must remain + constant at least from one re-initialization of the entity's + network management system to the next re-initialization." + SYNTAX Integer32 (1..2147483647) + + + +MacAddress ::= TEXTUAL-CONVENTION + DISPLAY-HINT "1x:" + STATUS current + DESCRIPTION + "Represents an 802 MAC address represented in the + `canonical' order defined by IEEE 802.1a, i.e., as if it + were transmitted least significant bit first, even though + 802.5 (in contrast to other 802.x protocols) requires MAC + addresses to be transmitted most significant bit first." + SYNTAX OCTET STRING (SIZE (6)) + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/foo b/plugins/inputs/snmp/testdata/foo new file mode 100644 index 0000000000000..4e9bf7f9d16f9 --- /dev/null +++ b/plugins/inputs/snmp/testdata/foo @@ -0,0 +1,30 @@ +FOOTEST-MIB DEFINITIONS ::= BEGIN + +IMPORTS + MODULE-IDENTITY, OBJECT-TYPE, Integer32 FROM fooImports; + +fooTestMIB MODULE-IDENTITY + LAST-UPDATED "2021090800Z" + ORGANIZATION "influx" + CONTACT-INFO + "EMail: influx@email.com" + DESCRIPTION + "MIB module for testing snmp plugin + for telegraf + " + ::= { iso 1 } + +fooMIBObjects OBJECT IDENTIFIER ::= { iso 2 } +fooOne OBJECT IDENTIFIER ::= { iso 1 } +six OBJECT IDENTIFIER ::= { fooOne 1 } +three OBJECT IDENTIFIER ::= { six 3 } + +foo OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "foo mib for testing" + ::= { fooMIBObjects 3 } + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/fooImports b/plugins/inputs/snmp/testdata/fooImports new file mode 100644 index 0000000000000..6cbed24de4b95 --- /dev/null +++ b/plugins/inputs/snmp/testdata/fooImports @@ -0,0 +1,169 @@ +fooImports DEFINITIONS ::= BEGIN + +-- the path to the root + +org OBJECT IDENTIFIER ::= { iso 1 } -- "iso" = 1 +dod OBJECT IDENTIFIER ::= { org 2 } +internet OBJECT IDENTIFIER ::= { dod 3 } + +ExtUTCTime ::= OCTET STRING(SIZE(11 | 13)) + -- format is YYMMDDHHMMZ or YYYYMMDDHHMMZ + + -- where: YY - last two digits of year (only years + -- between 1900-1999) + -- YYYY - last four digits of the year (any year) + -- MM - month (01 through 12) + -- DD - day of month (01 through 31) + -- HH - hours (00 through 23) + -- MM - minutes (00 through 59) + -- Z - denotes GMT (the ASCII character Z) + -- + -- For example, "9502192015Z" and "199502192015Z" represent + -- 8:15pm GMT on 19 February 1995. Years after 1999 must use + -- the four digit year format. Years 1900-1999 may use the + -- two or four digit format. + +-- definitions for information modules + +MODULE-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "LAST-UPDATED" value(Update ExtUTCTime) + "ORGANIZATION" Text + "CONTACT-INFO" Text + "DESCRIPTION" Text + RevisionPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + RevisionPart ::= + Revisions + | empty + Revisions ::= + Revision + | Revisions Revision + Revision ::= + "REVISION" value(Update ExtUTCTime) + "DESCRIPTION" Text + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +OBJECT-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "STATUS" Status + "DESCRIPTION" Text + + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- names of objects +-- (Note that these definitions of ObjectName and NotificationName +-- are not to be IMPORTed by MIB modules.) + +ObjectName ::= + OBJECT IDENTIFIER + +NotificationName ::= + OBJECT IDENTIFIER + + +-- indistinguishable from INTEGER, but never needs more than +-- 32-bits for a two's complement representation +Integer32 ::= + INTEGER (-2147483648..2147483647) + + + +-- definition for objects + +OBJECT-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + UnitsPart + "MAX-ACCESS" Access + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + IndexPart + DefValPart + + VALUE NOTATION ::= + value(VALUE ObjectName) + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + + UnitsPart ::= + "UNITS" Text + | empty + + Access ::= + "not-accessible" + | "accessible-for-notify" + | "read-only" + | "read-write" + | "read-create" + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + IndexPart ::= + "INDEX" "{" IndexTypes "}" + | "AUGMENTS" "{" Entry "}" + | empty + IndexTypes ::= + IndexType + | IndexTypes "," IndexType + IndexType ::= + "IMPLIED" Index + | Index + + Entry ::= + -- use the INDEX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + + DefValPart ::= "DEFVAL" "{" Defvalue "}" + | empty + + BitsValue ::= BitNames + | empty + + BitNames ::= BitName + | BitNames "," BitName + + BitName ::= identifier + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/ifPhysAddress b/plugins/inputs/snmp/testdata/ifPhysAddress new file mode 100644 index 0000000000000..8ac5b5a2e9489 --- /dev/null +++ b/plugins/inputs/snmp/testdata/ifPhysAddress @@ -0,0 +1,84 @@ +IF-MIB DEFINITIONS ::= BEGIN + +IMPORTS + MODULE-IDENTITY, OBJECT-TYPE, Integer32, mib-2, + PhysAddress FROM ifPhysAddressImports; + +ifMIB MODULE-IDENTITY + LAST-UPDATED "200006140000Z" + ORGANIZATION "IETF Interfaces MIB Working Group" + CONTACT-INFO + " Keith McCloghrie + Cisco Systems, Inc. + 170 West Tasman Drive + San Jose, CA 95134-1706 + US + + 408-526-5260 + kzm@cisco.com" + DESCRIPTION + "The MIB module to describe generic objects for network + interface sub-layers. This MIB is an updated version of + MIB-II's ifTable, and incorporates the extensions defined in + RFC 1229." + + REVISION "200006140000Z" + DESCRIPTION + "Clarifications agreed upon by the Interfaces MIB WG, and + published as RFC 2863." + REVISION "199602282155Z" + DESCRIPTION + "Revisions made by the Interfaces MIB WG, and published in + RFC 2233." + REVISION "199311082155Z" + DESCRIPTION + "Initial revision, published as part of RFC 1573." + ::= { mib-2 31 } + +ifMIBObjects OBJECT IDENTIFIER ::= { ifMIB 1 } + +interfaces OBJECT IDENTIFIER ::= { mib-2 2 } + + +ifTable OBJECT-TYPE + SYNTAX SEQUENCE OF IfEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A list of interface entries. The number of entries is + given by the value of ifNumber." + ::= { interfaces 2 } + +ifEntry OBJECT-TYPE + SYNTAX IfEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "An entry containing management information applicable to a + particular interface." + INDEX { ifIndex } + ::= { ifTable 1 } + + + +ifPhysAddress OBJECT-TYPE + SYNTAX PhysAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The interface's address at the protocol layer + immediately `below' the network layer in the + protocol stack. For interfaces which do not have + such an address (e.g., a serial line), this object + should contain an octet string of zero length." + ::= { ifEntry 6 } + +foo OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "foo mib for testing" + ::= { ifEntry 9 } + +END diff --git a/plugins/inputs/snmp/testdata/ifPhysAddressImports b/plugins/inputs/snmp/testdata/ifPhysAddressImports new file mode 100644 index 0000000000000..316f665b4f916 --- /dev/null +++ b/plugins/inputs/snmp/testdata/ifPhysAddressImports @@ -0,0 +1,254 @@ +SNMPv2-SMI DEFINITIONS ::= BEGIN + +-- the path to the root + +org OBJECT IDENTIFIER ::= { iso 3 } -- "iso" = 1 +dod OBJECT IDENTIFIER ::= { org 6 } +internet OBJECT IDENTIFIER ::= { dod 1 } + +directory OBJECT IDENTIFIER ::= { internet 1 } + +mgmt OBJECT IDENTIFIER ::= { internet 2 } +mib-2 OBJECT IDENTIFIER ::= { mgmt 1 } +transmission OBJECT IDENTIFIER ::= { mib-2 10 } + +experimental OBJECT IDENTIFIER ::= { internet 3 } + +private OBJECT IDENTIFIER ::= { internet 4 } +enterprises OBJECT IDENTIFIER ::= { private 1 } + +security OBJECT IDENTIFIER ::= { internet 5 } + +snmpV2 OBJECT IDENTIFIER ::= { internet 6 } + +-- transport domains +snmpDomains OBJECT IDENTIFIER ::= { snmpV2 1 } + +-- transport proxies +snmpProxys OBJECT IDENTIFIER ::= { snmpV2 2 } + +-- module identities +snmpModules OBJECT IDENTIFIER ::= { snmpV2 3 } + +-- Extended UTCTime, to allow dates with four-digit years +-- (Note that this definition of ExtUTCTime is not to be IMPORTed +-- by MIB modules.) +ExtUTCTime ::= OCTET STRING(SIZE(11 | 13)) + -- format is YYMMDDHHMMZ or YYYYMMDDHHMMZ + + -- where: YY - last two digits of year (only years + -- between 1900-1999) + -- YYYY - last four digits of the year (any year) + -- MM - month (01 through 12) + -- DD - day of month (01 through 31) + -- HH - hours (00 through 23) + -- MM - minutes (00 through 59) + -- Z - denotes GMT (the ASCII character Z) + -- + -- For example, "9502192015Z" and "199502192015Z" represent + -- 8:15pm GMT on 19 February 1995. Years after 1999 must use + -- the four digit year format. Years 1900-1999 may use the + -- two or four digit format. + +-- definitions for information modules + +MODULE-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "LAST-UPDATED" value(Update ExtUTCTime) + "ORGANIZATION" Text + "CONTACT-INFO" Text + "DESCRIPTION" Text + RevisionPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + RevisionPart ::= + Revisions + | empty + Revisions ::= + Revision + | Revisions Revision + Revision ::= + "REVISION" value(Update ExtUTCTime) + "DESCRIPTION" Text + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +OBJECT-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "STATUS" Status + "DESCRIPTION" Text + + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- names of objects +-- (Note that these definitions of ObjectName and NotificationName +-- are not to be IMPORTed by MIB modules.) + +ObjectName ::= + OBJECT IDENTIFIER + +NotificationName ::= + OBJECT IDENTIFIER + +-- syntax of objects + +-- the "base types" defined here are: +-- 3 built-in ASN.1 types: INTEGER, OCTET STRING, OBJECT IDENTIFIER +-- 8 application-defined types: Integer32, IpAddress, Counter32, +-- Gauge32, Unsigned32, TimeTicks, Opaque, and Counter64 + +ObjectSyntax ::= + CHOICE { + simple + SimpleSyntax, + -- note that SEQUENCEs for conceptual tables and + -- rows are not mentioned here... + + application-wide + ApplicationSyntax + } + +-- built-in ASN.1 types + +SimpleSyntax ::= + CHOICE { + -- INTEGERs with a more restrictive range + -- may also be used + integer-value -- includes Integer32 + INTEGER (-2147483648..2147483647), + -- OCTET STRINGs with a more restrictive size + -- may also be used + string-value + OCTET STRING (SIZE (0..65535)), + objectID-value + OBJECT IDENTIFIER + } + +-- indistinguishable from INTEGER, but never needs more than +-- 32-bits for a two's complement representation +Integer32 ::= + INTEGER (-2147483648..2147483647) + + + +-- definition for objects + +OBJECT-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + "SYNTAX" Syntax + UnitsPart + "MAX-ACCESS" Access + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + IndexPart + DefValPart + + VALUE NOTATION ::= + value(VALUE ObjectName) + + Syntax ::= -- Must be one of the following: + -- a base type (or its refinement), + -- a textual convention (or its refinement), or + -- a BITS pseudo-type + type + | "BITS" "{" NamedBits "}" + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + + UnitsPart ::= + "UNITS" Text + | empty + + Access ::= + "not-accessible" + | "accessible-for-notify" + | "read-only" + | "read-write" + | "read-create" + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + IndexPart ::= + "INDEX" "{" IndexTypes "}" + | "AUGMENTS" "{" Entry "}" + | empty + IndexTypes ::= + IndexType + | IndexTypes "," IndexType + IndexType ::= + "IMPLIED" Index + | Index + + Index ::= + -- use the SYNTAX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + Entry ::= + -- use the INDEX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + + DefValPart ::= "DEFVAL" "{" Defvalue "}" + | empty + + Defvalue ::= -- must be valid for the type specified in + -- SYNTAX clause of same OBJECT-TYPE macro + value(ObjectSyntax) + | "{" BitsValue "}" + + BitsValue ::= BitNames + | empty + + BitNames ::= BitName + | BitNames "," BitName + + BitName ::= identifier + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +PhysAddress ::= TEXTUAL-CONVENTION + DISPLAY-HINT "1x:" + STATUS current + DESCRIPTION + "Represents media- or physical-level addresses." + SYNTAX OCTET STRING + + +END diff --git a/plugins/inputs/snmp/testdata/server b/plugins/inputs/snmp/testdata/server new file mode 100644 index 0000000000000..4f97618d62ef3 --- /dev/null +++ b/plugins/inputs/snmp/testdata/server @@ -0,0 +1,57 @@ +TEST DEFINITIONS ::= BEGIN + +IMPORTS + MODULE-IDENTITY, OBJECT-TYPE, Integer32 FROM fooImports; + +TestMIB MODULE-IDENTITY + LAST-UPDATED "2021090800Z" + ORGANIZATION "influx" + CONTACT-INFO + "EMail: influx@email.com" + DESCRIPTION + "MIB module for testing snmp plugin + for telegraf + " + ::= { iso 1 } + +testingObjects OBJECT IDENTIFIER ::= { iso 0 } +testObjects OBJECT IDENTIFIER ::= { testingObjects 0 } +hostnameone OBJECT IDENTIFIER ::= {testObjects 1 } +hostname OBJECT IDENTIFIER ::= { hostnameone 1 } +testTable OBJECT IDENTIFIER ::= { testObjects 0 } +testMIBObjects OBJECT IDENTIFIER ::= { testTable 1 } + + +server OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 1 } + +connections OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 2 } + +latency OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 3 } + +description OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 4 } + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/serverImports b/plugins/inputs/snmp/testdata/serverImports new file mode 100644 index 0000000000000..6bfb238234f07 --- /dev/null +++ b/plugins/inputs/snmp/testdata/serverImports @@ -0,0 +1,174 @@ +fooImports DEFINITIONS ::= BEGIN + +-- the path to the root + +org OBJECT IDENTIFIER ::= { iso 1 } -- "iso" = 1 +dod OBJECT IDENTIFIER ::= { org 1 } +internet OBJECT IDENTIFIER ::= { dod 1 } + +directory OBJECT IDENTIFIER ::= { internet 1 } + +mgmt OBJECT IDENTIFIER ::= { internet 1 } +mib-2 OBJECT IDENTIFIER ::= { mgmt 1 } + +ExtUTCTime ::= OCTET STRING(SIZE(11 | 13)) + -- format is YYMMDDHHMMZ or YYYYMMDDHHMMZ + + -- where: YY - last two digits of year (only years + -- between 1900-1999) + -- YYYY - last four digits of the year (any year) + -- MM - month (01 through 12) + -- DD - day of month (01 through 31) + -- HH - hours (00 through 23) + -- MM - minutes (00 through 59) + -- Z - denotes GMT (the ASCII character Z) + -- + -- For example, "9502192015Z" and "199502192015Z" represent + -- 8:15pm GMT on 19 February 1995. Years after 1999 must use + -- the four digit year format. Years 1900-1999 may use the + -- two or four digit format. + +-- definitions for information modules + +MODULE-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "LAST-UPDATED" value(Update ExtUTCTime) + "ORGANIZATION" Text + "CONTACT-INFO" Text + "DESCRIPTION" Text + RevisionPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + RevisionPart ::= + Revisions + | empty + Revisions ::= + Revision + | Revisions Revision + Revision ::= + "REVISION" value(Update ExtUTCTime) + "DESCRIPTION" Text + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +OBJECT-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "STATUS" Status + "DESCRIPTION" Text + + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- names of objects +-- (Note that these definitions of ObjectName and NotificationName +-- are not to be IMPORTed by MIB modules.) + +ObjectName ::= + OBJECT IDENTIFIER + +NotificationName ::= + OBJECT IDENTIFIER + + +-- indistinguishable from INTEGER, but never needs more than +-- 32-bits for a two's complement representation +Integer32 ::= + INTEGER (-2147483648..2147483647) + + + +-- definition for objects + +OBJECT-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + UnitsPart + "MAX-ACCESS" Access + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + IndexPart + DefValPart + + VALUE NOTATION ::= + value(VALUE ObjectName) + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + + UnitsPart ::= + "UNITS" Text + | empty + + Access ::= + "not-accessible" + | "accessible-for-notify" + | "read-only" + | "read-write" + | "read-create" + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + IndexPart ::= + "INDEX" "{" IndexTypes "}" + | "AUGMENTS" "{" Entry "}" + | empty + IndexTypes ::= + IndexType + | IndexTypes "," IndexType + IndexType ::= + "IMPLIED" Index + | Index + + Entry ::= + -- use the INDEX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + + DefValPart ::= "DEFVAL" "{" Defvalue "}" + | empty + + BitsValue ::= BitNames + | empty + + BitNames ::= BitName + | BitNames "," BitName + + BitName ::= identifier + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/snmpd.conf b/plugins/inputs/snmp/testdata/snmpd.conf deleted file mode 100644 index 3f3151a6550c0..0000000000000 --- a/plugins/inputs/snmp/testdata/snmpd.conf +++ /dev/null @@ -1,17 +0,0 @@ -# This config provides the data represented in the plugin documentation -# Requires net-snmp >= 5.7 - -#agentaddress UDP:127.0.0.1:1161 -rocommunity public - -override .1.0.0.0.1.1.0 octet_str "foo" -override .1.0.0.0.1.1.1 octet_str "bar" -override .1.0.0.0.1.102 octet_str "bad" -override .1.0.0.0.1.2.0 integer 1 -override .1.0.0.0.1.2.1 integer 2 -override .1.0.0.0.1.3.0 octet_str "0.123" -override .1.0.0.0.1.3.1 octet_str "0.456" -override .1.0.0.0.1.3.2 octet_str "9.999" -override .1.0.0.1.1 octet_str "baz" -override .1.0.0.1.2 uinteger 54321 -override .1.0.0.1.3 uinteger 234 diff --git a/plugins/inputs/snmp/testdata/tableBuild b/plugins/inputs/snmp/testdata/tableBuild new file mode 100644 index 0000000000000..0551bfd6dd1d4 --- /dev/null +++ b/plugins/inputs/snmp/testdata/tableBuild @@ -0,0 +1,57 @@ +TEST DEFINITIONS ::= BEGIN + +IMPORTS + MODULE-IDENTITY, OBJECT-TYPE, Integer32 FROM fooImports; + +TestMIB MODULE-IDENTITY + LAST-UPDATED "2021090800Z" + ORGANIZATION "influx" + CONTACT-INFO + "EMail: influx@email.com" + DESCRIPTION + "MIB module for testing snmp plugin + for telegraf + " + ::= { iso 1 } + +testingObjects OBJECT IDENTIFIER ::= { iso 0 } +testObjects OBJECT IDENTIFIER ::= { testingObjects 0 } +hostnameone OBJECT IDENTIFIER ::= {testObjects 1 } +hostname OBJECT IDENTIFIER ::= { hostnameone 1 } +testTable OBJECT IDENTIFIER ::= { testObjects 0 } +testMIBObjects OBJECT IDENTIFIER ::= { testTable 1 } + + +myfield1 OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 1 } + +myfield2 OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 2 } + +myfield3 OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 3 } + +myfield4 OBJECT-TYPE + SYNTAX Integer32 + ACCESS read-only + STATUS current + DESCRIPTION + "server mib for testing" + ::= { testMIBObjects 4 } + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/tableMib b/plugins/inputs/snmp/testdata/tableMib new file mode 100644 index 0000000000000..be13c1c1cc510 --- /dev/null +++ b/plugins/inputs/snmp/testdata/tableMib @@ -0,0 +1,2613 @@ +RFC1213-MIB DEFINITIONS ::= BEGIN + +IMPORTS + mgmt, NetworkAddress, IpAddress, Counter, Gauge, + TimeTicks + FROM RFC1155-SMI + OBJECT-TYPE + FROM fooImports; + +-- This MIB module uses the extended OBJECT-TYPE macro as +-- defined in [14]; + +-- MIB-II (same prefix as MIB-I) + +mib-2 OBJECT IDENTIFIER ::= { mgmt 1 } + +-- textual conventions + +DisplayString ::= + OCTET STRING +-- This data type is used to model textual information taken +-- from the NVT ASCII character set. By convention, objects +-- with this syntax are declared as having + +-- +-- SIZE (0..255) + +PhysAddress ::= + OCTET STRING +-- This data type is used to model media addresses. For many +-- types of media, this will be in a binary representation. +-- For example, an ethernet address would be represented as +-- a string of 6 octets. + +-- groups in MIB-II + +system OBJECT IDENTIFIER ::= { mib-2 1 } + +interfaces OBJECT IDENTIFIER ::= { mib-2 2 } + +at OBJECT IDENTIFIER ::= { mib-2 3 } + +ip OBJECT IDENTIFIER ::= { mib-2 4 } + +icmp OBJECT IDENTIFIER ::= { mib-2 5 } + +tcp OBJECT IDENTIFIER ::= { mib-2 6 } + +udp OBJECT IDENTIFIER ::= { mib-2 7 } + +egp OBJECT IDENTIFIER ::= { mib-2 8 } + +-- historical (some say hysterical) +-- cmot OBJECT IDENTIFIER ::= { mib-2 9 } + +transmission OBJECT IDENTIFIER ::= { mib-2 10 } + +snmp OBJECT IDENTIFIER ::= { mib-2 11 } + +-- the System group + +-- Implementation of the System group is mandatory for all +-- systems. If an agent is not configured to have a value +-- for any of these variables, a string of length 0 is +-- returned. + +sysDescr OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A textual description of the entity. This value + should include the full name and version + identification of the system's hardware type, + software operating-system, and networking + software. It is mandatory that this only contain + printable ASCII characters." + ::= { system 1 } + +sysObjectID OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The vendor's authoritative identification of the + network management subsystem contained in the + entity. This value is allocated within the SMI + enterprises subtree (1.3.6.1.4.1) and provides an + easy and unambiguous means for determining `what + kind of box' is being managed. For example, if + vendor `Flintstones, Inc.' was assigned the + subtree 1.3.6.1.4.1.4242, it could assign the + identifier 1.3.6.1.4.1.4242.1.1 to its `Fred + Router'." + ::= { system 2 } + +sysUpTime OBJECT-TYPE + SYNTAX TimeTicks + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The time (in hundredths of a second) since the + network management portion of the system was last + re-initialized." + ::= { system 3 } + +sysContact OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The textual identification of the contact person + for this managed node, together with information + on how to contact this person." + ::= { system 4 } + +sysName OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An administratively-assigned name for this + managed node. By convention, this is the node's + fully-qualified domain name." + ::= { system 5 } + +sysLocation OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The physical location of this node (e.g., + `telephone closet, 3rd floor')." + ::= { system 6 } + +sysServices OBJECT-TYPE + SYNTAX INTEGER (0..127) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A value which indicates the set of services that + this entity primarily offers. + + The value is a sum. This sum initially takes the + value zero, Then, for each layer, L, in the range + 1 through 7, that this node performs transactions + for, 2 raised to (L - 1) is added to the sum. For + example, a node which performs primarily routing + functions would have a value of 4 (2^(3-1)). In + contrast, a node which is a host offering + application services would have a value of 72 + (2^(4-1) + 2^(7-1)). Note that in the context of + the Internet suite of protocols, values should be + calculated accordingly: + + layer functionality + 1 physical (e.g., repeaters) + 2 datalink/subnetwork (e.g., bridges) + 3 internet (e.g., IP gateways) + 4 end-to-end (e.g., IP hosts) + 7 applications (e.g., mail relays) + + For systems including OSI protocols, layers 5 and + 6 may also be counted." + ::= { system 7 } + +-- the Interfaces group + +-- Implementation of the Interfaces group is mandatory for +-- all systems. + +ifNumber OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of network interfaces (regardless of + their current state) present on this system." + ::= { interfaces 1 } + +-- the Interfaces table + +-- The Interfaces table contains information on the entity's +-- interfaces. Each interface is thought of as being +-- attached to a `subnetwork'. Note that this term should +-- not be confused with `subnet' which refers to an +-- addressing partitioning scheme used in the Internet suite +-- of protocols. + +ifTable OBJECT-TYPE + SYNTAX SEQUENCE OF IfEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "A list of interface entries. The number of + entries is given by the value of ifNumber." + ::= { interfaces 2 } + +ifEntry OBJECT-TYPE + SYNTAX IfEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "An interface entry containing objects at the + subnetwork layer and below for a particular + interface." + INDEX { ifIndex } + ::= { ifTable 1 } + +IfEntry ::= + SEQUENCE { + ifIndex + INTEGER, + ifDescr + DisplayString, + ifType + INTEGER, + ifMtu + INTEGER, + ifSpeed + Gauge, + ifPhysAddress + PhysAddress, + ifAdminStatus + INTEGER, + ifOperStatus + INTEGER, + ifLastChange + TimeTicks, + ifInOctets + Counter, + ifInUcastPkts + Counter, + ifInNUcastPkts + Counter, + ifInDiscards + Counter, + ifInErrors + Counter, + ifInUnknownProtos + Counter, + ifOutOctets + Counter, + ifOutUcastPkts + Counter, + ifOutNUcastPkts + Counter, + ifOutDiscards + Counter, + ifOutErrors + Counter, + ifOutQLen + Gauge, + ifSpecific + OBJECT IDENTIFIER + } + +ifIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A unique value for each interface. Its value + ranges between 1 and the value of ifNumber. The + value for each interface must remain constant at + least from one re-initialization of the entity's + network management system to the next re- + initialization." + ::= { ifEntry 1 } + +ifDescr OBJECT-TYPE + SYNTAX DisplayString (SIZE (0..255)) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A textual string containing information about the + interface. This string should include the name of + the manufacturer, the product name and the version + of the hardware interface." + ::= { ifEntry 2 } + +ifType OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + regular1822(2), + hdh1822(3), + ddn-x25(4), + rfc877-x25(5), + ethernet-csmacd(6), + iso88023-csmacd(7), + iso88024-tokenBus(8), + iso88025-tokenRing(9), + iso88026-man(10), + starLan(11), + proteon-10Mbit(12), + proteon-80Mbit(13), + hyperchannel(14), + fddi(15), + lapb(16), + sdlc(17), + ds1(18), -- T-1 + e1(19), -- european equiv. of T-1 + basicISDN(20), + primaryISDN(21), -- proprietary serial + propPointToPointSerial(22), + ppp(23), + softwareLoopback(24), + eon(25), -- CLNP over IP [11] + ethernet-3Mbit(26), + nsip(27), -- XNS over IP + slip(28), -- generic SLIP + ultra(29), -- ULTRA technologies + ds3(30), -- T-3 + sip(31), -- SMDS + frame-relay(32) + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The type of interface, distinguished according to + the physical/link protocol(s) immediately `below' + the network layer in the protocol stack." + ::= { ifEntry 3 } + +ifMtu OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The size of the largest datagram which can be + sent/received on the interface, specified in + octets. For interfaces that are used for + transmitting network datagrams, this is the size + of the largest network datagram that can be sent + on the interface." + ::= { ifEntry 4 } + +ifSpeed OBJECT-TYPE + SYNTAX Gauge + ACCESS read-only + STATUS mandatory + DESCRIPTION + "An estimate of the interface's current bandwidth + in bits per second. For interfaces which do not + vary in bandwidth or for those where no accurate + estimation can be made, this object should contain + the nominal bandwidth." + ::= { ifEntry 5 } + +ifPhysAddress OBJECT-TYPE + SYNTAX PhysAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The interface's address at the protocol layer + immediately `below' the network layer in the + protocol stack. For interfaces which do not have + + such an address (e.g., a serial line), this object + should contain an octet string of zero length." + ::= { ifEntry 6 } + +ifAdminStatus OBJECT-TYPE + SYNTAX INTEGER { + up(1), -- ready to pass packets + down(2), + testing(3) -- in some test mode + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The desired state of the interface. The + testing(3) state indicates that no operational + packets can be passed." + ::= { ifEntry 7 } + +ifOperStatus OBJECT-TYPE + SYNTAX INTEGER { + up(1), -- ready to pass packets + down(2), + testing(3) -- in some test mode + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The current operational state of the interface. + The testing(3) state indicates that no operational + packets can be passed." + ::= { ifEntry 8 } + +ifLastChange OBJECT-TYPE + SYNTAX TimeTicks + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The value of sysUpTime at the time the interface + entered its current operational state. If the + current state was entered prior to the last re- + initialization of the local network management + subsystem, then this object contains a zero + value." + ::= { ifEntry 9 } + +ifInOctets OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of octets received on the + interface, including framing characters." + ::= { ifEntry 10 } + +ifInUcastPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of subnetwork-unicast packets + delivered to a higher-layer protocol." + ::= { ifEntry 11 } + +ifInNUcastPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of non-unicast (i.e., subnetwork- + broadcast or subnetwork-multicast) packets + delivered to a higher-layer protocol." + ::= { ifEntry 12 } + +ifInDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of inbound packets which were chosen + to be discarded even though no errors had been + detected to prevent their being deliverable to a + higher-layer protocol. One possible reason for + discarding such a packet could be to free up + buffer space." + ::= { ifEntry 13 } + +ifInErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of inbound packets that contained + errors preventing them from being deliverable to a + higher-layer protocol." + ::= { ifEntry 14 } + +ifInUnknownProtos OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of packets received via the interface + which were discarded because of an unknown or + unsupported protocol." + ::= { ifEntry 15 } + +ifOutOctets OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of octets transmitted out of the + interface, including framing characters." + ::= { ifEntry 16 } + +ifOutUcastPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of packets that higher-level + protocols requested be transmitted to a + subnetwork-unicast address, including those that + were discarded or not sent." + ::= { ifEntry 17 } + +ifOutNUcastPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of packets that higher-level + protocols requested be transmitted to a non- + unicast (i.e., a subnetwork-broadcast or + subnetwork-multicast) address, including those + that were discarded or not sent." + ::= { ifEntry 18 } + +ifOutDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of outbound packets which were chosen + + to be discarded even though no errors had been + detected to prevent their being transmitted. One + possible reason for discarding such a packet could + be to free up buffer space." + ::= { ifEntry 19 } + +ifOutErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of outbound packets that could not be + transmitted because of errors." + ::= { ifEntry 20 } + +ifOutQLen OBJECT-TYPE + SYNTAX Gauge + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The length of the output packet queue (in + packets)." + ::= { ifEntry 21 } + +ifSpecific OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A reference to MIB definitions specific to the + particular media being used to realize the + interface. For example, if the interface is + realized by an ethernet, then the value of this + object refers to a document defining objects + specific to ethernet. If this information is not + present, its value should be set to the OBJECT + IDENTIFIER { 0 0 }, which is a syntatically valid + object identifier, and any conformant + implementation of ASN.1 and BER must be able to + generate and recognize this value." + ::= { ifEntry 22 } + +-- the Address Translation group + +-- Implementation of the Address Translation group is +-- mandatory for all systems. Note however that this group +-- is deprecated by MIB-II. That is, it is being included + +-- solely for compatibility with MIB-I nodes, and will most +-- likely be excluded from MIB-III nodes. From MIB-II and +-- onwards, each network protocol group contains its own +-- address translation tables. + +-- The Address Translation group contains one table which is +-- the union across all interfaces of the translation tables +-- for converting a NetworkAddress (e.g., an IP address) into +-- a subnetwork-specific address. For lack of a better term, +-- this document refers to such a subnetwork-specific address +-- as a `physical' address. + +-- Examples of such translation tables are: for broadcast +-- media where ARP is in use, the translation table is +-- equivalent to the ARP cache; or, on an X.25 network where +-- non-algorithmic translation to X.121 addresses is +-- required, the translation table contains the +-- NetworkAddress to X.121 address equivalences. + +atTable OBJECT-TYPE + SYNTAX SEQUENCE OF AtEntry + ACCESS not-accessible + STATUS deprecated + DESCRIPTION + "The Address Translation tables contain the + NetworkAddress to `physical' address equivalences. + Some interfaces do not use translation tables for + determining address equivalences (e.g., DDN-X.25 + has an algorithmic method); if all interfaces are + of this type, then the Address Translation table + is empty, i.e., has zero entries." + ::= { at 1 } + +atEntry OBJECT-TYPE + SYNTAX AtEntry + ACCESS not-accessible + STATUS deprecated + DESCRIPTION + "Each entry contains one NetworkAddress to + `physical' address equivalence." + INDEX { atIfIndex, + atNetAddress } + ::= { atTable 1 } + +AtEntry ::= + SEQUENCE { + atIfIndex + INTEGER, + atPhysAddress + PhysAddress, + atNetAddress + NetworkAddress + } + +atIfIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS deprecated + DESCRIPTION + "The interface on which this entry's equivalence + is effective. The interface identified by a + particular value of this index is the same + interface as identified by the same value of + ifIndex." + ::= { atEntry 1 } + +atPhysAddress OBJECT-TYPE + SYNTAX PhysAddress + ACCESS read-write + STATUS deprecated + DESCRIPTION + "The media-dependent `physical' address. + + Setting this object to a null string (one of zero + length) has the effect of invaliding the + corresponding entry in the atTable object. That + is, it effectively dissasociates the interface + identified with said entry from the mapping + identified with said entry. It is an + implementation-specific matter as to whether the + agent removes an invalidated entry from the table. + Accordingly, management stations must be prepared + to receive tabular information from agents that + corresponds to entries not currently in use. + Proper interpretation of such entries requires + examination of the relevant atPhysAddress object." + ::= { atEntry 2 } + +atNetAddress OBJECT-TYPE + SYNTAX NetworkAddress + ACCESS read-write + STATUS deprecated + DESCRIPTION + "The NetworkAddress (e.g., the IP address) + corresponding to the media-dependent `physical' + address." + ::= { atEntry 3 } + +-- the IP group + +-- Implementation of the IP group is mandatory for all +-- systems. + +ipForwarding OBJECT-TYPE + SYNTAX INTEGER { + forwarding(1), -- acting as a gateway + not-forwarding(2) -- NOT acting as a gateway + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The indication of whether this entity is acting + as an IP gateway in respect to the forwarding of + datagrams received by, but not addressed to, this + entity. IP gateways forward datagrams. IP hosts + do not (except those source-routed via the host). + + Note that for some managed nodes, this object may + take on only a subset of the values possible. + Accordingly, it is appropriate for an agent to + return a `badValue' response if a management + station attempts to change this object to an + inappropriate value." + ::= { ip 1 } + +ipDefaultTTL OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The default value inserted into the Time-To-Live + field of the IP header of datagrams originated at + this entity, whenever a TTL value is not supplied + by the transport layer protocol." + ::= { ip 2 } + +ipInReceives OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of input datagrams received from + interfaces, including those received in error." + ::= { ip 3 } + +ipInHdrErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of input datagrams discarded due to + errors in their IP headers, including bad + checksums, version number mismatch, other format + errors, time-to-live exceeded, errors discovered + in processing their IP options, etc." + ::= { ip 4 } + +ipInAddrErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of input datagrams discarded because + the IP address in their IP header's destination + field was not a valid address to be received at + this entity. This count includes invalid + addresses (e.g., 0.0.0.0) and addresses of + unsupported Classes (e.g., Class E). For entities + which are not IP Gateways and therefore do not + forward datagrams, this counter includes datagrams + discarded because the destination address was not + a local address." + ::= { ip 5 } + +ipForwDatagrams OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of input datagrams for which this + entity was not their final IP destination, as a + result of which an attempt was made to find a + route to forward them to that final destination. + In entities which do not act as IP Gateways, this + counter will include only those packets which were + Source-Routed via this entity, and the Source- + Route option processing was successful." + ::= { ip 6 } + +ipInUnknownProtos OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of locally-addressed datagrams + received successfully but discarded because of an + unknown or unsupported protocol." + ::= { ip 7 } + +ipInDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of input IP datagrams for which no + problems were encountered to prevent their + continued processing, but which were discarded + (e.g., for lack of buffer space). Note that this + counter does not include any datagrams discarded + while awaiting re-assembly." + ::= { ip 8 } + +ipInDelivers OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of input datagrams successfully + delivered to IP user-protocols (including ICMP)." + ::= { ip 9 } + +ipOutRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of IP datagrams which local IP + user-protocols (including ICMP) supplied to IP in + requests for transmission. Note that this counter + does not include any datagrams counted in + ipForwDatagrams." + ::= { ip 10 } + +ipOutDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of output IP datagrams for which no + + problem was encountered to prevent their + transmission to their destination, but which were + discarded (e.g., for lack of buffer space). Note + that this counter would include datagrams counted + in ipForwDatagrams if any such packets met this + (discretionary) discard criterion." + ::= { ip 11 } + +ipOutNoRoutes OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagrams discarded because no + route could be found to transmit them to their + destination. Note that this counter includes any + packets counted in ipForwDatagrams which meet this + `no-route' criterion. Note that this includes any + datagarms which a host cannot route because all of + its default gateways are down." + ::= { ip 12 } + +ipReasmTimeout OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The maximum number of seconds which received + fragments are held while they are awaiting + reassembly at this entity." + ::= { ip 13 } + +ipReasmReqds OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP fragments received which needed + to be reassembled at this entity." + ::= { ip 14 } + +ipReasmOKs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagrams successfully re- + assembled." + ::= { ip 15 } + +ipReasmFails OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of failures detected by the IP re- + assembly algorithm (for whatever reason: timed + out, errors, etc). Note that this is not + necessarily a count of discarded IP fragments + since some algorithms (notably the algorithm in + RFC 815) can lose track of the number of fragments + by combining them as they are received." + ::= { ip 16 } + +ipFragOKs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagrams that have been + successfully fragmented at this entity." + ::= { ip 17 } + +ipFragFails OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagrams that have been + discarded because they needed to be fragmented at + this entity but could not be, e.g., because their + Don't Fragment flag was set." + ::= { ip 18 } + +ipFragCreates OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of IP datagram fragments that have + been generated as a result of fragmentation at + this entity." + ::= { ip 19 } + +-- the IP address table + +-- The IP address table contains this entity's IP addressing +-- information. + +ipAddrTable OBJECT-TYPE + SYNTAX SEQUENCE OF IpAddrEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "The table of addressing information relevant to + this entity's IP addresses." + ::= { ip 20 } + +ipAddrEntry OBJECT-TYPE + SYNTAX IpAddrEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "The addressing information for one of this + entity's IP addresses." + INDEX { ipAdEntAddr } + ::= { ipAddrTable 1 } + +IpAddrEntry ::= + SEQUENCE { + ipAdEntAddr + IpAddress, + ipAdEntIfIndex + INTEGER, + ipAdEntNetMask + IpAddress, + ipAdEntBcastAddr + INTEGER, + ipAdEntReasmMaxSize + INTEGER (0..65535) + } + +ipAdEntAddr OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The IP address to which this entry's addressing + information pertains." + ::= { ipAddrEntry 1 } + +ipAdEntIfIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The index value which uniquely identifies the + interface to which this entry is applicable. The + interface identified by a particular value of this + index is the same interface as identified by the + same value of ifIndex." + ::= { ipAddrEntry 2 } + +ipAdEntNetMask OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The subnet mask associated with the IP address of + this entry. The value of the mask is an IP + address with all the network bits set to 1 and all + the hosts bits set to 0." + ::= { ipAddrEntry 3 } + +ipAdEntBcastAddr OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The value of the least-significant bit in the IP + broadcast address used for sending datagrams on + the (logical) interface associated with the IP + address of this entry. For example, when the + Internet standard all-ones broadcast address is + used, the value will be 1. This value applies to + both the subnet and network broadcasts addresses + used by the entity on this (logical) interface." + ::= { ipAddrEntry 4 } + +ipAdEntReasmMaxSize OBJECT-TYPE + SYNTAX INTEGER (0..65535) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The size of the largest IP datagram which this + entity can re-assemble from incoming IP fragmented + datagrams received on this interface." + ::= { ipAddrEntry 5 } + +-- the IP routing table + +-- The IP routing table contains an entry for each route +-- presently known to this entity. + +ipRouteTable OBJECT-TYPE + SYNTAX SEQUENCE OF IpRouteEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "This entity's IP Routing table." + ::= { ip 21 } + +ipRouteEntry OBJECT-TYPE + SYNTAX IpRouteEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "A route to a particular destination." + INDEX { ipRouteDest } + ::= { ipRouteTable 1 } + +IpRouteEntry ::= + SEQUENCE { + ipRouteDest + IpAddress, + ipRouteIfIndex + INTEGER, + ipRouteMetric1 + INTEGER, + ipRouteMetric2 + INTEGER, + ipRouteMetric3 + INTEGER, + ipRouteMetric4 + INTEGER, + ipRouteNextHop + IpAddress, + ipRouteType + INTEGER, + ipRouteProto + INTEGER, + ipRouteAge + INTEGER, + ipRouteMask + IpAddress, + ipRouteMetric5 + INTEGER, + ipRouteInfo + OBJECT IDENTIFIER + } + +ipRouteDest OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The destination IP address of this route. An + entry with a value of 0.0.0.0 is considered a + default route. Multiple routes to a single + destination can appear in the table, but access to + such multiple entries is dependent on the table- + access mechanisms defined by the network + management protocol in use." + ::= { ipRouteEntry 1 } + +ipRouteIfIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The index value which uniquely identifies the + local interface through which the next hop of this + route should be reached. The interface identified + by a particular value of this index is the same + interface as identified by the same value of + ifIndex." + ::= { ipRouteEntry 2 } + +ipRouteMetric1 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The primary routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 3 } + +ipRouteMetric2 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An alternate routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 4 } + +ipRouteMetric3 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An alternate routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 5 } + +ipRouteMetric4 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An alternate routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 6 } + +ipRouteNextHop OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The IP address of the next hop of this route. + (In the case of a route bound to an interface + which is realized via a broadcast media, the value + of this field is the agent's IP address on that + interface.)" + ::= { ipRouteEntry 7 } + +ipRouteType OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + + invalid(2), -- an invalidated route + + -- route to directly + direct(3), -- connected (sub-)network + + -- route to a non-local + indirect(4) -- host/network/sub-network + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The type of route. Note that the values + direct(3) and indirect(4) refer to the notion of + direct and indirect routing in the IP + architecture. + + Setting this object to the value invalid(2) has + the effect of invalidating the corresponding entry + in the ipRouteTable object. That is, it + effectively dissasociates the destination + identified with said entry from the route + identified with said entry. It is an + implementation-specific matter as to whether the + agent removes an invalidated entry from the table. + Accordingly, management stations must be prepared + to receive tabular information from agents that + corresponds to entries not currently in use. + Proper interpretation of such entries requires + examination of the relevant ipRouteType object." + ::= { ipRouteEntry 8 } + +ipRouteProto OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + + -- non-protocol information, + -- e.g., manually configured + local(2), -- entries + + -- set via a network + netmgmt(3), -- management protocol + + -- obtained via ICMP, + icmp(4), -- e.g., Redirect + + -- the remaining values are + -- all gateway routing + -- protocols + egp(5), + ggp(6), + hello(7), + rip(8), + is-is(9), + es-is(10), + ciscoIgrp(11), + bbnSpfIgp(12), + ospf(13), + bgp(14) + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The routing mechanism via which this route was + learned. Inclusion of values for gateway routing + protocols is not intended to imply that hosts + should support those protocols." + ::= { ipRouteEntry 9 } + +ipRouteAge OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The number of seconds since this route was last + updated or otherwise determined to be correct. + Note that no semantics of `too old' can be implied + except through knowledge of the routing protocol + by which the route was learned." + ::= { ipRouteEntry 10 } + +ipRouteMask OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "Indicate the mask to be logical-ANDed with the + destination address before being compared to the + value in the ipRouteDest field. For those systems + that do not support arbitrary subnet masks, an + agent constructs the value of the ipRouteMask by + determining whether the value of the correspondent + ipRouteDest field belong to a class-A, B, or C + network, and then using one of: + + mask network + 255.0.0.0 class-A + 255.255.0.0 class-B + 255.255.255.0 class-C + + If the value of the ipRouteDest is 0.0.0.0 (a + default route), then the mask value is also + 0.0.0.0. It should be noted that all IP routing + subsystems implicitly use this mechanism." + ::= { ipRouteEntry 11 } + +ipRouteMetric5 OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "An alternate routing metric for this route. The + semantics of this metric are determined by the + routing-protocol specified in the route's + ipRouteProto value. If this metric is not used, + its value should be set to -1." + ::= { ipRouteEntry 12 } + +ipRouteInfo OBJECT-TYPE + SYNTAX OBJECT IDENTIFIER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "A reference to MIB definitions specific to the + particular routing protocol which is responsible + for this route, as determined by the value + specified in the route's ipRouteProto value. If + this information is not present, its value should + be set to the OBJECT IDENTIFIER { 0 0 }, which is + a syntatically valid object identifier, and any + conformant implementation of ASN.1 and BER must be + able to generate and recognize this value." + ::= { ipRouteEntry 13 } + +-- the IP Address Translation table + +-- The IP address translation table contain the IpAddress to +-- `physical' address equivalences. Some interfaces do not +-- use translation tables for determining address +-- equivalences (e.g., DDN-X.25 has an algorithmic method); +-- if all interfaces are of this type, then the Address +-- Translation table is empty, i.e., has zero entries. + +ipNetToMediaTable OBJECT-TYPE + SYNTAX SEQUENCE OF IpNetToMediaEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "The IP Address Translation table used for mapping + from IP addresses to physical addresses." + ::= { ip 22 } + +ipNetToMediaEntry OBJECT-TYPE + SYNTAX IpNetToMediaEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "Each entry contains one IpAddress to `physical' + address equivalence." + INDEX { ipNetToMediaIfIndex, + ipNetToMediaNetAddress } + ::= { ipNetToMediaTable 1 } + +IpNetToMediaEntry ::= + SEQUENCE { + ipNetToMediaIfIndex + INTEGER, + ipNetToMediaPhysAddress + PhysAddress, + ipNetToMediaNetAddress + IpAddress, + ipNetToMediaType + INTEGER + } + +ipNetToMediaIfIndex OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The interface on which this entry's equivalence + is effective. The interface identified by a + particular value of this index is the same + interface as identified by the same value of + ifIndex." + ::= { ipNetToMediaEntry 1 } + +ipNetToMediaPhysAddress OBJECT-TYPE + SYNTAX PhysAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The media-dependent `physical' address." + ::= { ipNetToMediaEntry 2 } + +ipNetToMediaNetAddress OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The IpAddress corresponding to the media- + dependent `physical' address." + ::= { ipNetToMediaEntry 3 } + +ipNetToMediaType OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + invalid(2), -- an invalidated mapping + dynamic(3), + static(4) + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The type of mapping. + + Setting this object to the value invalid(2) has + the effect of invalidating the corresponding entry + in the ipNetToMediaTable. That is, it effectively + dissasociates the interface identified with said + entry from the mapping identified with said entry. + It is an implementation-specific matter as to + whether the agent removes an invalidated entry + from the table. Accordingly, management stations + must be prepared to receive tabular information + from agents that corresponds to entries not + currently in use. Proper interpretation of such + entries requires examination of the relevant + ipNetToMediaType object." + ::= { ipNetToMediaEntry 4 } + +-- additional IP objects + +ipRoutingDiscards OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of routing entries which were chosen + to be discarded even though they are valid. One + possible reason for discarding such an entry could + be to free-up buffer space for other routing + + entries." + ::= { ip 23 } + +-- the ICMP group + +-- Implementation of the ICMP group is mandatory for all +-- systems. + +icmpInMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of ICMP messages which the + entity received. Note that this counter includes + all those counted by icmpInErrors." + ::= { icmp 1 } + +icmpInErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP messages which the entity + received but determined as having ICMP-specific + errors (bad ICMP checksums, bad length, etc.)." + ::= { icmp 2 } + +icmpInDestUnreachs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Destination Unreachable + messages received." + ::= { icmp 3 } + +icmpInTimeExcds OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Time Exceeded messages + received." + ::= { icmp 4 } + +icmpInParmProbs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Parameter Problem messages + received." + ::= { icmp 5 } + +icmpInSrcQuenchs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Source Quench messages + received." + ::= { icmp 6 } + +icmpInRedirects OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Redirect messages received." + ::= { icmp 7 } + +icmpInEchos OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Echo (request) messages + received." + ::= { icmp 8 } + +icmpInEchoReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Echo Reply messages received." + ::= { icmp 9 } + +icmpInTimestamps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Timestamp (request) messages + received." + ::= { icmp 10 } + +icmpInTimestampReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Timestamp Reply messages + received." + ::= { icmp 11 } + +icmpInAddrMasks OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Address Mask Request messages + received." + ::= { icmp 12 } + +icmpInAddrMaskReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Address Mask Reply messages + received." + ::= { icmp 13 } + +icmpOutMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of ICMP messages which this + entity attempted to send. Note that this counter + includes all those counted by icmpOutErrors." + ::= { icmp 14 } + +icmpOutErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP messages which this entity did + not send due to problems discovered within ICMP + + such as a lack of buffers. This value should not + include errors discovered outside the ICMP layer + such as the inability of IP to route the resultant + datagram. In some implementations there may be no + types of error which contribute to this counter's + value." + ::= { icmp 15 } + +icmpOutDestUnreachs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Destination Unreachable + messages sent." + ::= { icmp 16 } + +icmpOutTimeExcds OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Time Exceeded messages sent." + ::= { icmp 17 } + +icmpOutParmProbs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Parameter Problem messages + sent." + ::= { icmp 18 } + +icmpOutSrcQuenchs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Source Quench messages sent." + ::= { icmp 19 } + +icmpOutRedirects OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Redirect messages sent. For a + + host, this object will always be zero, since hosts + do not send redirects." + ::= { icmp 20 } + +icmpOutEchos OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Echo (request) messages sent." + ::= { icmp 21 } + +icmpOutEchoReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Echo Reply messages sent." + ::= { icmp 22 } + +icmpOutTimestamps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Timestamp (request) messages + sent." + ::= { icmp 23 } + +icmpOutTimestampReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Timestamp Reply messages + sent." + ::= { icmp 24 } + +icmpOutAddrMasks OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Address Mask Request messages + sent." + ::= { icmp 25 } + +icmpOutAddrMaskReps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of ICMP Address Mask Reply messages + sent." + ::= { icmp 26 } + +-- the TCP group + +-- Implementation of the TCP group is mandatory for all +-- systems that implement the TCP. + +-- Note that instances of object types that represent +-- information about a particular TCP connection are +-- transient; they persist only as long as the connection +-- in question. + +tcpRtoAlgorithm OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + + constant(2), -- a constant rto + rsre(3), -- MIL-STD-1778, Appendix B + vanj(4) -- Van Jacobson's algorithm [10] + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The algorithm used to determine the timeout value + used for retransmitting unacknowledged octets." + ::= { tcp 1 } + +tcpRtoMin OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The minimum value permitted by a TCP + implementation for the retransmission timeout, + measured in milliseconds. More refined semantics + for objects of this type depend upon the algorithm + used to determine the retransmission timeout. In + particular, when the timeout algorithm is rsre(3), + an object of this type has the semantics of the + LBOUND quantity described in RFC 793." + ::= { tcp 2 } + +tcpRtoMax OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The maximum value permitted by a TCP + implementation for the retransmission timeout, + measured in milliseconds. More refined semantics + for objects of this type depend upon the algorithm + used to determine the retransmission timeout. In + particular, when the timeout algorithm is rsre(3), + an object of this type has the semantics of the + UBOUND quantity described in RFC 793." + ::= { tcp 3 } + +tcpMaxConn OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The limit on the total number of TCP connections + the entity can support. In entities where the + maximum number of connections is dynamic, this + object should contain the value -1." + ::= { tcp 4 } + +tcpActiveOpens OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of times TCP connections have made a + direct transition to the SYN-SENT state from the + CLOSED state." + ::= { tcp 5 } + +tcpPassiveOpens OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of times TCP connections have made a + direct transition to the SYN-RCVD state from the + LISTEN state." + ::= { tcp 6 } + +tcpAttemptFails OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of times TCP connections have made a + direct transition to the CLOSED state from either + the SYN-SENT state or the SYN-RCVD state, plus the + number of times TCP connections have made a direct + transition to the LISTEN state from the SYN-RCVD + state." + ::= { tcp 7 } + +tcpEstabResets OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of times TCP connections have made a + direct transition to the CLOSED state from either + the ESTABLISHED state or the CLOSE-WAIT state." + ::= { tcp 8 } + +tcpCurrEstab OBJECT-TYPE + SYNTAX Gauge + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of TCP connections for which the + current state is either ESTABLISHED or CLOSE- + WAIT." + ::= { tcp 9 } + +tcpInSegs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of segments received, including + those received in error. This count includes + segments received on currently established + connections." + ::= { tcp 10 } + +tcpOutSegs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of segments sent, including + those on current connections but excluding those + containing only retransmitted octets." + ::= { tcp 11 } + +tcpRetransSegs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of segments retransmitted - that + is, the number of TCP segments transmitted + containing one or more previously transmitted + octets." + ::= { tcp 12 } + +-- the TCP Connection table + +-- The TCP connection table contains information about this +-- entity's existing TCP connections. + +tcpConnTable OBJECT-TYPE + SYNTAX SEQUENCE OF TcpConnEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "A table containing TCP connection-specific + information." + ::= { tcp 13 } + +tcpConnEntry OBJECT-TYPE + SYNTAX TcpConnEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "Information about a particular current TCP + connection. An object of this type is transient, + in that it ceases to exist when (or soon after) + the connection makes the transition to the CLOSED + state." + INDEX { tcpConnLocalAddress, + tcpConnLocalPort, + tcpConnRemAddress, + tcpConnRemPort } + ::= { tcpConnTable 1 } + +TcpConnEntry ::= + SEQUENCE { + tcpConnState + INTEGER, + tcpConnLocalAddress + IpAddress, + tcpConnLocalPort + INTEGER (0..65535), + tcpConnRemAddress + IpAddress, + tcpConnRemPort + INTEGER (0..65535) + } + +tcpConnState OBJECT-TYPE + SYNTAX INTEGER { + closed(1), + listen(2), + synSent(3), + synReceived(4), + established(5), + finWait1(6), + finWait2(7), + closeWait(8), + lastAck(9), + closing(10), + timeWait(11), + deleteTCB(12) + } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "The state of this TCP connection. + + The only value which may be set by a management + station is deleteTCB(12). Accordingly, it is + appropriate for an agent to return a `badValue' + response if a management station attempts to set + this object to any other value. + + If a management station sets this object to the + value deleteTCB(12), then this has the effect of + deleting the TCB (as defined in RFC 793) of the + corresponding connection on the managed node, + resulting in immediate termination of the + connection. + + As an implementation-specific option, a RST + + segment may be sent from the managed node to the + other TCP endpoint (note however that RST segments + are not sent reliably)." + ::= { tcpConnEntry 1 } + +tcpConnLocalAddress OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The local IP address for this TCP connection. In + the case of a connection in the listen state which + is willing to accept connections for any IP + interface associated with the node, the value + 0.0.0.0 is used." + ::= { tcpConnEntry 2 } + +tcpConnLocalPort OBJECT-TYPE + SYNTAX INTEGER (0..65535) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The local port number for this TCP connection." + ::= { tcpConnEntry 3 } + +tcpConnRemAddress OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The remote IP address for this TCP connection." + ::= { tcpConnEntry 4 } + +tcpConnRemPort OBJECT-TYPE + SYNTAX INTEGER (0..65535) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The remote port number for this TCP connection." + ::= { tcpConnEntry 5 } + +-- additional TCP objects + +tcpInErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of segments received in error + (e.g., bad TCP checksums)." + ::= { tcp 14 } + +tcpOutRsts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of TCP segments sent containing the + RST flag." + ::= { tcp 15 } + +-- the UDP group + +-- Implementation of the UDP group is mandatory for all +-- systems which implement the UDP. + +udpInDatagrams OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of UDP datagrams delivered to + UDP users." + ::= { udp 1 } + +udpNoPorts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of received UDP datagrams for + which there was no application at the destination + port." + ::= { udp 2 } + +udpInErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of received UDP datagrams that could + not be delivered for reasons other than the lack + of an application at the destination port." + ::= { udp 3 } + +udpOutDatagrams OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of UDP datagrams sent from this + entity." + ::= { udp 4 } + +-- the UDP Listener table + +-- The UDP listener table contains information about this +-- entity's UDP end-points on which a local application is +-- currently accepting datagrams. + +udpTable OBJECT-TYPE + SYNTAX SEQUENCE OF UdpEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "A table containing UDP listener information." + ::= { udp 5 } + +udpEntry OBJECT-TYPE + SYNTAX UdpEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "Information about a particular current UDP + listener." + INDEX { udpLocalAddress, udpLocalPort } + ::= { udpTable 1 } + +UdpEntry ::= + SEQUENCE { + udpLocalAddress + IpAddress, + udpLocalPort + INTEGER (0..65535) + } + +udpLocalAddress OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The local IP address for this UDP listener. In + + the case of a UDP listener which is willing to + accept datagrams for any IP interface associated + with the node, the value 0.0.0.0 is used." + ::= { udpEntry 1 } + +udpLocalPort OBJECT-TYPE + SYNTAX INTEGER (0..65535) + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The local port number for this UDP listener." + ::= { udpEntry 2 } + +-- the EGP group + +-- Implementation of the EGP group is mandatory for all +-- systems which implement the EGP. + +egpInMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP messages received without + error." + ::= { egp 1 } + +egpInErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP messages received that proved + to be in error." + ::= { egp 2 } + +egpOutMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of locally generated EGP + messages." + ::= { egp 3 } + +egpOutErrors OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of locally generated EGP messages not + sent due to resource limitations within an EGP + entity." + ::= { egp 4 } + +-- the EGP Neighbor table + +-- The EGP neighbor table contains information about this +-- entity's EGP neighbors. + +egpNeighTable OBJECT-TYPE + SYNTAX SEQUENCE OF EgpNeighEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "The EGP neighbor table." + ::= { egp 5 } + +egpNeighEntry OBJECT-TYPE + SYNTAX EgpNeighEntry + ACCESS not-accessible + STATUS mandatory + DESCRIPTION + "Information about this entity's relationship with + a particular EGP neighbor." + INDEX { egpNeighAddr } + ::= { egpNeighTable 1 } + +EgpNeighEntry ::= + SEQUENCE { + egpNeighState + INTEGER, + egpNeighAddr + IpAddress, + egpNeighAs + INTEGER, + egpNeighInMsgs + Counter, + egpNeighInErrs + Counter, + egpNeighOutMsgs + Counter, + egpNeighOutErrs + Counter, + egpNeighInErrMsgs + Counter, + egpNeighOutErrMsgs + Counter, + egpNeighStateUps + Counter, + egpNeighStateDowns + Counter, + egpNeighIntervalHello + INTEGER, + egpNeighIntervalPoll + INTEGER, + egpNeighMode + INTEGER, + egpNeighEventTrigger + INTEGER + } + +egpNeighState OBJECT-TYPE + SYNTAX INTEGER { + idle(1), + acquisition(2), + down(3), + up(4), + cease(5) + } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The EGP state of the local system with respect to + this entry's EGP neighbor. Each EGP state is + represented by a value that is one greater than + the numerical value associated with said state in + RFC 904." + ::= { egpNeighEntry 1 } + +egpNeighAddr OBJECT-TYPE + SYNTAX IpAddress + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The IP address of this entry's EGP neighbor." + ::= { egpNeighEntry 2 } + +egpNeighAs OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The autonomous system of this EGP peer. Zero + should be specified if the autonomous system + number of the neighbor is not yet known." + ::= { egpNeighEntry 3 } + +egpNeighInMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP messages received without error + from this EGP peer." + ::= { egpNeighEntry 4 } + +egpNeighInErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP messages received from this EGP + peer that proved to be in error (e.g., bad EGP + checksum)." + ::= { egpNeighEntry 5 } + +egpNeighOutMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of locally generated EGP messages to + this EGP peer." + ::= { egpNeighEntry 6 } + +egpNeighOutErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of locally generated EGP messages not + sent to this EGP peer due to resource limitations + within an EGP entity." + ::= { egpNeighEntry 7 } + +egpNeighInErrMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP-defined error messages received + from this EGP peer." + ::= { egpNeighEntry 8 } + +egpNeighOutErrMsgs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP-defined error messages sent to + this EGP peer." + ::= { egpNeighEntry 9 } + +egpNeighStateUps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP state transitions to the UP + state with this EGP peer." + ::= { egpNeighEntry 10 } + +egpNeighStateDowns OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The number of EGP state transitions from the UP + state to any other state with this EGP peer." + ::= { egpNeighEntry 11 } + +egpNeighIntervalHello OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The interval between EGP Hello command + retransmissions (in hundredths of a second). This + represents the t1 timer as defined in RFC 904." + ::= { egpNeighEntry 12 } + +egpNeighIntervalPoll OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The interval between EGP poll command + + retransmissions (in hundredths of a second). This + represents the t3 timer as defined in RFC 904." + ::= { egpNeighEntry 13 } + +egpNeighMode OBJECT-TYPE + SYNTAX INTEGER { active(1), passive(2) } + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The polling mode of this EGP entity, either + passive or active." + ::= { egpNeighEntry 14 } + +egpNeighEventTrigger OBJECT-TYPE + SYNTAX INTEGER { start(1), stop(2) } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "A control variable used to trigger operator- + initiated Start and Stop events. When read, this + variable always returns the most recent value that + egpNeighEventTrigger was set to. If it has not + been set since the last initialization of the + network management subsystem on the node, it + returns a value of `stop'. + + When set, this variable causes a Start or Stop + event on the specified neighbor, as specified on + pages 8-10 of RFC 904. Briefly, a Start event + causes an Idle peer to begin neighbor acquisition + and a non-Idle peer to reinitiate neighbor + acquisition. A stop event causes a non-Idle peer + to return to the Idle state until a Start event + occurs, either via egpNeighEventTrigger or + otherwise." + ::= { egpNeighEntry 15 } + +-- additional EGP objects + +egpAs OBJECT-TYPE + SYNTAX INTEGER + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The autonomous system number of this EGP entity." + ::= { egp 6 } + +-- the Transmission group + +-- Based on the transmission media underlying each interface +-- on a system, the corresponding portion of the Transmission +-- group is mandatory for that system. + +-- When Internet-standard definitions for managing +-- transmission media are defined, the transmission group is +-- used to provide a prefix for the names of those objects. + +-- Typically, such definitions reside in the experimental +-- portion of the MIB until they are "proven", then as a +-- part of the Internet standardization process, the +-- definitions are accordingly elevated and a new object +-- identifier, under the transmission group is defined. By +-- convention, the name assigned is: +-- +-- type OBJECT IDENTIFIER ::= { transmission number } +-- +-- where "type" is the symbolic value used for the media in +-- the ifType column of the ifTable object, and "number" is +-- the actual integer value corresponding to the symbol. + +-- the SNMP group + +-- Implementation of the SNMP group is mandatory for all +-- systems which support an SNMP protocol entity. Some of +-- the objects defined below will be zero-valued in those +-- SNMP implementations that are optimized to support only +-- those functions specific to either a management agent or +-- a management station. In particular, it should be +-- observed that the objects below refer to an SNMP entity, +-- and there may be several SNMP entities residing on a +-- managed node (e.g., if the node is hosting acting as +-- a management station). + +snmpInPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of Messages delivered to the + SNMP entity from the transport service." + ::= { snmp 1 } + +snmpOutPkts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Messages which were + passed from the SNMP protocol entity to the + transport service." + ::= { snmp 2 } + +snmpInBadVersions OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Messages which were + delivered to the SNMP protocol entity and were for + an unsupported SNMP version." + ::= { snmp 3 } + +snmpInBadCommunityNames OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Messages delivered to + the SNMP protocol entity which used a SNMP + community name not known to said entity." + ::= { snmp 4 } + +snmpInBadCommunityUses OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Messages delivered to + the SNMP protocol entity which represented an SNMP + operation which was not allowed by the SNMP + community named in the Message." + ::= { snmp 5 } + +snmpInASNParseErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of ASN.1 or BER errors + encountered by the SNMP protocol entity when + decoding received SNMP Messages." + ::= { snmp 6 } + +-- { snmp 7 } is not used + +snmpInTooBigs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `tooBig'." + ::= { snmp 8 } + +snmpInNoSuchNames OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `noSuchName'." + ::= { snmp 9 } + +snmpInBadValues OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `badValue'." + ::= { snmp 10 } + +snmpInReadOnlys OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number valid SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `readOnly'. It should be noted that it is a + protocol error to generate an SNMP PDU which + contains the value `readOnly' in the error-status + field, as such this object is provided as a means + of detecting incorrect implementations of the + + SNMP." + ::= { snmp 11 } + +snmpInGenErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + delivered to the SNMP protocol entity and for + which the value of the error-status field is + `genErr'." + ::= { snmp 12 } + +snmpInTotalReqVars OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of MIB objects which have been + retrieved successfully by the SNMP protocol entity + as the result of receiving valid SNMP Get-Request + and Get-Next PDUs." + ::= { snmp 13 } + +snmpInTotalSetVars OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of MIB objects which have been + altered successfully by the SNMP protocol entity + as the result of receiving valid SNMP Set-Request + PDUs." + ::= { snmp 14 } + +snmpInGetRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Request PDUs which + have been accepted and processed by the SNMP + protocol entity." + ::= { snmp 15 } + +snmpInGetNexts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Next PDUs which have + been accepted and processed by the SNMP protocol + entity." + ::= { snmp 16 } + +snmpInSetRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Set-Request PDUs which + have been accepted and processed by the SNMP + protocol entity." + ::= { snmp 17 } + +snmpInGetResponses OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Response PDUs which + have been accepted and processed by the SNMP + protocol entity." + ::= { snmp 18 } + +snmpInTraps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Trap PDUs which have + been accepted and processed by the SNMP protocol + entity." + ::= { snmp 19 } + +snmpOutTooBigs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + generated by the SNMP protocol entity and for + which the value of the error-status field is + `tooBig.'" + ::= { snmp 20 } + +snmpOutNoSuchNames OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + generated by the SNMP protocol entity and for + which the value of the error-status is + `noSuchName'." + ::= { snmp 21 } + +snmpOutBadValues OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + generated by the SNMP protocol entity and for + which the value of the error-status field is + `badValue'." + ::= { snmp 22 } + +-- { snmp 23 } is not used + +snmpOutGenErrs OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP PDUs which were + generated by the SNMP protocol entity and for + which the value of the error-status field is + `genErr'." + ::= { snmp 24 } + +snmpOutGetRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Request PDUs which + have been generated by the SNMP protocol entity." + ::= { snmp 25 } + +snmpOutGetNexts OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Next PDUs which have + been generated by the SNMP protocol entity." + ::= { snmp 26 } + +snmpOutSetRequests OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Set-Request PDUs which + have been generated by the SNMP protocol entity." + ::= { snmp 27 } + +snmpOutGetResponses OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Get-Response PDUs which + have been generated by the SNMP protocol entity." + ::= { snmp 28 } + +snmpOutTraps OBJECT-TYPE + SYNTAX Counter + ACCESS read-only + STATUS mandatory + DESCRIPTION + "The total number of SNMP Trap PDUs which have + been generated by the SNMP protocol entity." + ::= { snmp 29 } + +snmpEnableAuthenTraps OBJECT-TYPE + SYNTAX INTEGER { enabled(1), disabled(2) } + ACCESS read-write + STATUS mandatory + DESCRIPTION + "Indicates whether the SNMP agent process is + permitted to generate authentication-failure + traps. The value of this object overrides any + configuration information; as such, it provides a + means whereby all authentication-failure traps may + be disabled. + + Note that it is strongly recommended that this + object be stored in non-volatile memory so that it + remains constant between re-initializations of the + network management system." + ::= { snmp 30 } + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/tableMibImports b/plugins/inputs/snmp/testdata/tableMibImports new file mode 100644 index 0000000000000..1516e7cbb840f --- /dev/null +++ b/plugins/inputs/snmp/testdata/tableMibImports @@ -0,0 +1,119 @@ +RFC1155-SMI DEFINITIONS ::= BEGIN + +EXPORTS -- EVERYTHING + internet, directory, mgmt, + experimental, private, enterprises, + OBJECT-TYPE, ObjectName, ObjectSyntax, SimpleSyntax, + ApplicationSyntax, NetworkAddress, IpAddress, + Counter, Gauge, TimeTicks, Opaque; + + -- the path to the root + + internet OBJECT IDENTIFIER ::= { iso org(3) dod(6) 1 } + + directory OBJECT IDENTIFIER ::= { internet 1 } + + mgmt OBJECT IDENTIFIER ::= { internet 2 } + + experimental OBJECT IDENTIFIER ::= { internet 3 } + + private OBJECT IDENTIFIER ::= { internet 4 } + enterprises OBJECT IDENTIFIER ::= { private 1 } + + -- definition of object types + + OBJECT-TYPE MACRO ::= + BEGIN + TYPE NOTATION ::= "SYNTAX" type (TYPE ObjectSyntax) + "ACCESS" Access + "STATUS" Status + VALUE NOTATION ::= value (VALUE ObjectName) + + Access ::= "read-only" + | "read-write" + | "write-only" + | "not-accessible" + Status ::= "mandatory" + | "optional" + | "obsolete" + END + + -- names of objects in the MIB + + ObjectName ::= + OBJECT IDENTIFIER + + -- syntax of objects in the MIB + + ObjectSyntax ::= + CHOICE { + simple + SimpleSyntax, + -- note that simple SEQUENCEs are not directly + -- mentioned here to keep things simple (i.e., + -- prevent mis-use). However, application-wide + -- types which are IMPLICITly encoded simple + -- SEQUENCEs may appear in the following CHOICE + + application-wide + ApplicationSyntax + } + + SimpleSyntax ::= + CHOICE { + number + INTEGER, + string + OCTET STRING, + object + OBJECT IDENTIFIER, + empty + NULL + } + + ApplicationSyntax ::= + CHOICE { + address + NetworkAddress, + counter + Counter, + gauge + Gauge, + ticks + TimeTicks, + arbitrary + Opaque + + -- other application-wide types, as they are + -- defined, will be added here + } + + -- application-wide types + + NetworkAddress ::= + CHOICE { + internet + IpAddress + } + + IpAddress ::= + [APPLICATION 0] -- in network-byte order + IMPLICIT OCTET STRING (SIZE (4)) + + Counter ::= + [APPLICATION 1] + IMPLICIT INTEGER (0..4294967295) + + Gauge ::= + [APPLICATION 2] + IMPLICIT INTEGER (0..4294967295) + + TimeTicks ::= + [APPLICATION 3] + IMPLICIT INTEGER (0..4294967295) + + Opaque ::= + [APPLICATION 4] -- arbitrary ASN.1 value, + IMPLICIT OCTET STRING -- "double-wrapped" + + END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/tcpMib b/plugins/inputs/snmp/testdata/tcpMib new file mode 100644 index 0000000000000..03c47224da153 --- /dev/null +++ b/plugins/inputs/snmp/testdata/tcpMib @@ -0,0 +1,786 @@ +TCP-MIB DEFINITIONS ::= BEGIN + +IMPORTS + MODULE-IDENTITY, OBJECT-TYPE, Integer32, Unsigned32, + Gauge32, Counter32, Counter64, IpAddress, mib-2, + MODULE-COMPLIANCE, OBJECT-GROUP, InetAddress, + InetAddressType, InetPortNumber + FROM tcpMibImports; + + + +tcpMIB MODULE-IDENTITY + LAST-UPDATED "200502180000Z" -- 18 February 2005 + ORGANIZATION + "IETF IPv6 MIB Revision Team + http://www.ietf.org/html.charters/ipv6-charter.html" + CONTACT-INFO + "Rajiv Raghunarayan (editor) + + Cisco Systems Inc. + 170 West Tasman Drive + San Jose, CA 95134 + + Phone: +1 408 853 9612 + Email: + + Send comments to " + DESCRIPTION + "The MIB module for managing TCP implementations. + + Copyright (C) The Internet Society (2005). This version + of this MIB module is a part of RFC 4022; see the RFC + itself for full legal notices." + REVISION "200502180000Z" -- 18 February 2005 + DESCRIPTION + "IP version neutral revision, published as RFC 4022." + REVISION "9411010000Z" + DESCRIPTION + "Initial SMIv2 version, published as RFC 2012." + REVISION "9103310000Z" + DESCRIPTION + "The initial revision of this MIB module was part of + MIB-II." + ::= { mib-2 49 } + +-- the TCP base variables group + +tcp OBJECT IDENTIFIER ::= { mib-2 6 } + +-- Scalars + +tcpRtoAlgorithm OBJECT-TYPE + SYNTAX INTEGER { + other(1), -- none of the following + constant(2), -- a constant rto + rsre(3), -- MIL-STD-1778, Appendix B + vanj(4), -- Van Jacobson's algorithm + rfc2988(5) -- RFC 2988 + } + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The algorithm used to determine the timeout value used for + retransmitting unacknowledged octets." + ::= { tcp 1 } + +tcpRtoMin OBJECT-TYPE + SYNTAX Integer32 (0..2147483647) + UNITS "milliseconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The minimum value permitted by a TCP implementation for + the retransmission timeout, measured in milliseconds. + More refined semantics for objects of this type depend + on the algorithm used to determine the retransmission + timeout; in particular, the IETF standard algorithm + rfc2988(5) provides a minimum value." + ::= { tcp 2 } + +tcpRtoMax OBJECT-TYPE + SYNTAX Integer32 (0..2147483647) + UNITS "milliseconds" + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The maximum value permitted by a TCP implementation for + the retransmission timeout, measured in milliseconds. + More refined semantics for objects of this type depend + on the algorithm used to determine the retransmission + timeout; in particular, the IETF standard algorithm + rfc2988(5) provides an upper bound (as part of an + adaptive backoff algorithm)." + ::= { tcp 3 } + +tcpMaxConn OBJECT-TYPE + SYNTAX Integer32 (-1 | 0..2147483647) + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The limit on the total number of TCP connections the entity + can support. In entities where the maximum number of + connections is dynamic, this object should contain the + value -1." + ::= { tcp 4 } + +tcpActiveOpens OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of times that TCP connections have made a direct + transition to the SYN-SENT state from the CLOSED state. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 5 } + +tcpPassiveOpens OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of times TCP connections have made a direct + transition to the SYN-RCVD state from the LISTEN state. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 6 } + +tcpAttemptFails OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of times that TCP connections have made a direct + transition to the CLOSED state from either the SYN-SENT + state or the SYN-RCVD state, plus the number of times that + TCP connections have made a direct transition to the + LISTEN state from the SYN-RCVD state. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 7 } + +tcpEstabResets OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of times that TCP connections have made a direct + transition to the CLOSED state from either the ESTABLISHED + state or the CLOSE-WAIT state. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 8 } + +tcpCurrEstab OBJECT-TYPE + SYNTAX Gauge32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of TCP connections for which the current state + is either ESTABLISHED or CLOSE-WAIT." + ::= { tcp 9 } + +tcpInSegs OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of segments received, including those + received in error. This count includes segments received + on currently established connections. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 10 } + +tcpOutSegs OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of segments sent, including those on + current connections but excluding those containing only + retransmitted octets. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 11 } + +tcpRetransSegs OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of segments retransmitted; that is, the + number of TCP segments transmitted containing one or more + previously transmitted octets. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 12 } + +tcpInErrs OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of segments received in error (e.g., bad + TCP checksums). + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 14 } + +tcpOutRsts OBJECT-TYPE + SYNTAX Counter32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The number of TCP segments sent containing the RST flag. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 15 } + +-- { tcp 16 } was used to represent the ipv6TcpConnTable in RFC 2452, +-- which has since been obsoleted. It MUST not be used. + +tcpHCInSegs OBJECT-TYPE + SYNTAX Counter64 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of segments received, including those + received in error. This count includes segments received + + on currently established connections. This object is + the 64-bit equivalent of tcpInSegs. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 17 } + +tcpHCOutSegs OBJECT-TYPE + SYNTAX Counter64 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The total number of segments sent, including those on + current connections but excluding those containing only + retransmitted octets. This object is the 64-bit + equivalent of tcpOutSegs. + + Discontinuities in the value of this counter are + indicated via discontinuities in the value of sysUpTime." + ::= { tcp 18 } + +-- The TCP Connection table + +tcpConnectionTable OBJECT-TYPE + SYNTAX SEQUENCE OF TcpConnectionEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A table containing information about existing TCP + connections. Note that unlike earlier TCP MIBs, there + is a separate table for connections in the LISTEN state." + ::= { tcp 19 } + +tcpConnectionEntry OBJECT-TYPE + SYNTAX TcpConnectionEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A conceptual row of the tcpConnectionTable containing + information about a particular current TCP connection. + Each row of this table is transient in that it ceases to + exist when (or soon after) the connection makes the + transition to the CLOSED state." + INDEX { tcpConnectionLocalAddressType, + tcpConnectionLocalAddress, + tcpConnectionLocalPort, + tcpConnectionRemAddressType, + tcpConnectionRemAddress, + tcpConnectionRemPort } + ::= { tcpConnectionTable 1 } + +TcpConnectionEntry ::= SEQUENCE { + tcpConnectionLocalAddressType InetAddressType, + tcpConnectionLocalAddress InetAddress, + tcpConnectionLocalPort InetPortNumber, + tcpConnectionRemAddressType InetAddressType, + tcpConnectionRemAddress InetAddress, + tcpConnectionRemPort InetPortNumber, + tcpConnectionState INTEGER, + tcpConnectionProcess Unsigned32 + } + +tcpConnectionLocalAddressType OBJECT-TYPE + SYNTAX InetAddressType + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The address type of tcpConnectionLocalAddress." + ::= { tcpConnectionEntry 1 } + +tcpConnectionLocalAddress OBJECT-TYPE + SYNTAX InetAddress + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The local IP address for this TCP connection. The type + of this address is determined by the value of + tcpConnectionLocalAddressType. + + As this object is used in the index for the + tcpConnectionTable, implementors should be + careful not to create entries that would result in OIDs + with more than 128 subidentifiers; otherwise the information + cannot be accessed by using SNMPv1, SNMPv2c, or SNMPv3." + ::= { tcpConnectionEntry 2 } + +tcpConnectionLocalPort OBJECT-TYPE + SYNTAX InetPortNumber + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The local port number for this TCP connection." + ::= { tcpConnectionEntry 3 } + +tcpConnectionRemAddressType OBJECT-TYPE + SYNTAX InetAddressType + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The address type of tcpConnectionRemAddress." + ::= { tcpConnectionEntry 4 } + +tcpConnectionRemAddress OBJECT-TYPE + SYNTAX InetAddress + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The remote IP address for this TCP connection. The type + of this address is determined by the value of + tcpConnectionRemAddressType. + + As this object is used in the index for the + tcpConnectionTable, implementors should be + careful not to create entries that would result in OIDs + with more than 128 subidentifiers; otherwise the information + cannot be accessed by using SNMPv1, SNMPv2c, or SNMPv3." + ::= { tcpConnectionEntry 5 } + +tcpConnectionRemPort OBJECT-TYPE + SYNTAX InetPortNumber + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The remote port number for this TCP connection." + ::= { tcpConnectionEntry 6 } + +tcpConnectionState OBJECT-TYPE + SYNTAX INTEGER { + closed(1), + listen(2), + synSent(3), + synReceived(4), + established(5), + finWait1(6), + finWait2(7), + closeWait(8), + lastAck(9), + closing(10), + timeWait(11), + deleteTCB(12) + } + MAX-ACCESS read-write + STATUS current + DESCRIPTION + "The state of this TCP connection. + + The value listen(2) is included only for parallelism to the + old tcpConnTable and should not be used. A connection in + LISTEN state should be present in the tcpListenerTable. + + The only value that may be set by a management station is + deleteTCB(12). Accordingly, it is appropriate for an agent + to return a `badValue' response if a management station + attempts to set this object to any other value. + + If a management station sets this object to the value + deleteTCB(12), then the TCB (as defined in [RFC793]) of + the corresponding connection on the managed node is + deleted, resulting in immediate termination of the + connection. + + As an implementation-specific option, a RST segment may be + sent from the managed node to the other TCP endpoint (note, + however, that RST segments are not sent reliably)." + ::= { tcpConnectionEntry 7 } + +tcpConnectionProcess OBJECT-TYPE + SYNTAX Unsigned32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The system's process ID for the process associated with + this connection, or zero if there is no such process. This + value is expected to be the same as HOST-RESOURCES-MIB:: + hrSWRunIndex or SYSAPPL-MIB::sysApplElmtRunIndex for some + row in the appropriate tables." + ::= { tcpConnectionEntry 8 } + +-- The TCP Listener table + +tcpListenerTable OBJECT-TYPE + SYNTAX SEQUENCE OF TcpListenerEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A table containing information about TCP listeners. A + listening application can be represented in three + possible ways: + + 1. An application that is willing to accept both IPv4 and + IPv6 datagrams is represented by + + a tcpListenerLocalAddressType of unknown (0) and + a tcpListenerLocalAddress of ''h (a zero-length + octet-string). + + 2. An application that is willing to accept only IPv4 or + IPv6 datagrams is represented by a + tcpListenerLocalAddressType of the appropriate address + type and a tcpListenerLocalAddress of '0.0.0.0' or '::' + respectively. + + 3. An application that is listening for data destined + only to a specific IP address, but from any remote + system, is represented by a tcpListenerLocalAddressType + of an appropriate address type, with + tcpListenerLocalAddress as the specific local address. + + NOTE: The address type in this table represents the + address type used for the communication, irrespective + of the higher-layer abstraction. For example, an + application using IPv6 'sockets' to communicate via + IPv4 between ::ffff:10.0.0.1 and ::ffff:10.0.0.2 would + use InetAddressType ipv4(1))." + ::= { tcp 20 } + +tcpListenerEntry OBJECT-TYPE + SYNTAX TcpListenerEntry + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "A conceptual row of the tcpListenerTable containing + information about a particular TCP listener." + INDEX { tcpListenerLocalAddressType, + tcpListenerLocalAddress, + tcpListenerLocalPort } + ::= { tcpListenerTable 1 } + +TcpListenerEntry ::= SEQUENCE { + tcpListenerLocalAddressType InetAddressType, + tcpListenerLocalAddress InetAddress, + tcpListenerLocalPort InetPortNumber, + tcpListenerProcess Unsigned32 + } + +tcpListenerLocalAddressType OBJECT-TYPE + SYNTAX InetAddressType + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The address type of tcpListenerLocalAddress. The value + should be unknown (0) if connection initiations to all + local IP addresses are accepted." + ::= { tcpListenerEntry 1 } + +tcpListenerLocalAddress OBJECT-TYPE + SYNTAX InetAddress + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The local IP address for this TCP connection. + + The value of this object can be represented in three + possible ways, depending on the characteristics of the + listening application: + + 1. For an application willing to accept both IPv4 and + IPv6 datagrams, the value of this object must be + ''h (a zero-length octet-string), with the value + of the corresponding tcpListenerLocalAddressType + object being unknown (0). + + 2. For an application willing to accept only IPv4 or + IPv6 datagrams, the value of this object must be + '0.0.0.0' or '::' respectively, with + tcpListenerLocalAddressType representing the + appropriate address type. + + 3. For an application which is listening for data + destined only to a specific IP address, the value + of this object is the specific local address, with + tcpListenerLocalAddressType representing the + appropriate address type. + + As this object is used in the index for the + tcpListenerTable, implementors should be + careful not to create entries that would result in OIDs + with more than 128 subidentifiers; otherwise the information + cannot be accessed, using SNMPv1, SNMPv2c, or SNMPv3." + ::= { tcpListenerEntry 2 } + +tcpListenerLocalPort OBJECT-TYPE + SYNTAX InetPortNumber + MAX-ACCESS not-accessible + STATUS current + DESCRIPTION + "The local port number for this TCP connection." + ::= { tcpListenerEntry 3 } + +tcpListenerProcess OBJECT-TYPE + SYNTAX Unsigned32 + MAX-ACCESS read-only + STATUS current + DESCRIPTION + "The system's process ID for the process associated with + this listener, or zero if there is no such process. This + value is expected to be the same as HOST-RESOURCES-MIB:: + hrSWRunIndex or SYSAPPL-MIB::sysApplElmtRunIndex for some + row in the appropriate tables." + ::= { tcpListenerEntry 4 } + +-- The deprecated TCP Connection table + +tcpConnTable OBJECT-TYPE + SYNTAX SEQUENCE OF TcpConnEntry + MAX-ACCESS not-accessible + STATUS deprecated + DESCRIPTION + "A table containing information about existing IPv4-specific + TCP connections or listeners. This table has been + deprecated in favor of the version neutral + tcpConnectionTable." + ::= { tcp 13 } + +tcpConnEntry OBJECT-TYPE + SYNTAX TcpConnEntry + MAX-ACCESS not-accessible + STATUS deprecated + DESCRIPTION + "A conceptual row of the tcpConnTable containing information + about a particular current IPv4 TCP connection. Each row + of this table is transient in that it ceases to exist when + (or soon after) the connection makes the transition to the + CLOSED state." + INDEX { tcpConnLocalAddress, + tcpConnLocalPort, + tcpConnRemAddress, + tcpConnRemPort } + ::= { tcpConnTable 1 } + +TcpConnEntry ::= SEQUENCE { + tcpConnState INTEGER, + tcpConnLocalAddress IpAddress, + tcpConnLocalPort Integer32, + tcpConnRemAddress IpAddress, + tcpConnRemPort Integer32 + + } + +tcpConnState OBJECT-TYPE + SYNTAX INTEGER { + closed(1), + listen(2), + synSent(3), + synReceived(4), + established(5), + finWait1(6), + finWait2(7), + closeWait(8), + lastAck(9), + closing(10), + timeWait(11), + deleteTCB(12) + } + MAX-ACCESS read-write + STATUS deprecated + DESCRIPTION + "The state of this TCP connection. + + The only value that may be set by a management station is + deleteTCB(12). Accordingly, it is appropriate for an agent + to return a `badValue' response if a management station + attempts to set this object to any other value. + + If a management station sets this object to the value + deleteTCB(12), then the TCB (as defined in [RFC793]) of + the corresponding connection on the managed node is + deleted, resulting in immediate termination of the + connection. + + As an implementation-specific option, a RST segment may be + sent from the managed node to the other TCP endpoint (note, + however, that RST segments are not sent reliably)." + ::= { tcpConnEntry 1 } + +tcpConnLocalAddress OBJECT-TYPE + SYNTAX IpAddress + MAX-ACCESS read-only + STATUS deprecated + DESCRIPTION + "The local IP address for this TCP connection. In the case + of a connection in the listen state willing to + accept connections for any IP interface associated with the + node, the value 0.0.0.0 is used." + ::= { tcpConnEntry 2 } + +tcpConnLocalPort OBJECT-TYPE + SYNTAX Integer32 (0..65535) + MAX-ACCESS read-only + STATUS deprecated + DESCRIPTION + "The local port number for this TCP connection." + ::= { tcpConnEntry 3 } + +tcpConnRemAddress OBJECT-TYPE + SYNTAX IpAddress + MAX-ACCESS read-only + STATUS deprecated + DESCRIPTION + "The remote IP address for this TCP connection." + ::= { tcpConnEntry 4 } + +tcpConnRemPort OBJECT-TYPE + SYNTAX Integer32 (0..65535) + MAX-ACCESS read-only + STATUS deprecated + DESCRIPTION + "The remote port number for this TCP connection." + ::= { tcpConnEntry 5 } + +-- conformance information + +tcpMIBConformance OBJECT IDENTIFIER ::= { tcpMIB 2 } + +tcpMIBCompliances OBJECT IDENTIFIER ::= { tcpMIBConformance 1 } +tcpMIBGroups OBJECT IDENTIFIER ::= { tcpMIBConformance 2 } + +-- compliance statements + +tcpMIBCompliance2 MODULE-COMPLIANCE + STATUS current + DESCRIPTION + "The compliance statement for systems that implement TCP. + + A number of INDEX objects cannot be + represented in the form of OBJECT clauses in SMIv2 but + have the following compliance requirements, + expressed in OBJECT clause form in this description + clause: + + -- OBJECT tcpConnectionLocalAddressType + -- SYNTAX InetAddressType { ipv4(1), ipv6(2) } + -- DESCRIPTION + -- This MIB requires support for only global IPv4 + + -- and IPv6 address types. + -- + -- OBJECT tcpConnectionRemAddressType + -- SYNTAX InetAddressType { ipv4(1), ipv6(2) } + -- DESCRIPTION + -- This MIB requires support for only global IPv4 + -- and IPv6 address types. + -- + -- OBJECT tcpListenerLocalAddressType + -- SYNTAX InetAddressType { unknown(0), ipv4(1), + -- ipv6(2) } + -- DESCRIPTION + -- This MIB requires support for only global IPv4 + -- and IPv6 address types. The type unknown also + -- needs to be supported to identify a special + -- case in the listener table: a listen using + -- both IPv4 and IPv6 addresses on the device. + -- + " + MODULE -- this module + MANDATORY-GROUPS { tcpBaseGroup, tcpConnectionGroup, + tcpListenerGroup } + GROUP tcpHCGroup + DESCRIPTION + "This group is mandatory for systems that are capable + of receiving or transmitting more than 1 million TCP + segments per second. 1 million segments per second will + cause a Counter32 to wrap in just over an hour." + OBJECT tcpConnectionState + SYNTAX INTEGER { closed(1), listen(2), synSent(3), + synReceived(4), established(5), + finWait1(6), finWait2(7), closeWait(8), + lastAck(9), closing(10), timeWait(11) } + MIN-ACCESS read-only + DESCRIPTION + "Write access is not required, nor is support for the value + deleteTCB (12)." + ::= { tcpMIBCompliances 2 } + +tcpMIBCompliance MODULE-COMPLIANCE + STATUS deprecated + DESCRIPTION + "The compliance statement for IPv4-only systems that + implement TCP. In order to be IP version independent, this + compliance statement is deprecated in favor of + tcpMIBCompliance2. However, agents are still encouraged + to implement these objects in order to interoperate with + the deployed base of managers." + + MODULE -- this module + MANDATORY-GROUPS { tcpGroup } + OBJECT tcpConnState + MIN-ACCESS read-only + DESCRIPTION + "Write access is not required." + ::= { tcpMIBCompliances 1 } + +-- units of conformance + +tcpGroup OBJECT-GROUP + OBJECTS { tcpRtoAlgorithm, tcpRtoMin, tcpRtoMax, + tcpMaxConn, tcpActiveOpens, + tcpPassiveOpens, tcpAttemptFails, + tcpEstabResets, tcpCurrEstab, tcpInSegs, + tcpOutSegs, tcpRetransSegs, tcpConnState, + tcpConnLocalAddress, tcpConnLocalPort, + tcpConnRemAddress, tcpConnRemPort, + tcpInErrs, tcpOutRsts } + STATUS deprecated + DESCRIPTION + "The tcp group of objects providing for management of TCP + entities." + ::= { tcpMIBGroups 1 } + +tcpBaseGroup OBJECT-GROUP + OBJECTS { tcpRtoAlgorithm, tcpRtoMin, tcpRtoMax, + tcpMaxConn, tcpActiveOpens, + tcpPassiveOpens, tcpAttemptFails, + tcpEstabResets, tcpCurrEstab, tcpInSegs, + tcpOutSegs, tcpRetransSegs, + tcpInErrs, tcpOutRsts } + STATUS current + DESCRIPTION + "The group of counters common to TCP entities." + ::= { tcpMIBGroups 2 } + +tcpConnectionGroup OBJECT-GROUP + OBJECTS { tcpConnectionState, tcpConnectionProcess } + STATUS current + DESCRIPTION + "The group provides general information about TCP + connections." + ::= { tcpMIBGroups 3 } + +tcpListenerGroup OBJECT-GROUP + OBJECTS { tcpListenerProcess } + STATUS current + DESCRIPTION + "This group has objects providing general information about + TCP listeners." + ::= { tcpMIBGroups 4 } + +tcpHCGroup OBJECT-GROUP + OBJECTS { tcpHCInSegs, tcpHCOutSegs } + STATUS current + DESCRIPTION + "The group of objects providing for counters of high speed + TCP implementations." + ::= { tcpMIBGroups 5 } + +END diff --git a/plugins/inputs/snmp/testdata/tcpMibImports b/plugins/inputs/snmp/testdata/tcpMibImports new file mode 100644 index 0000000000000..f3b6b9d8d52fd --- /dev/null +++ b/plugins/inputs/snmp/testdata/tcpMibImports @@ -0,0 +1,639 @@ +SNMPv2-SMI DEFINITIONS ::= BEGIN + +-- the path to the root + +org OBJECT IDENTIFIER ::= { iso 3 } -- "iso" = 1 +dod OBJECT IDENTIFIER ::= { org 6 } +internet OBJECT IDENTIFIER ::= { dod 1 } + +directory OBJECT IDENTIFIER ::= { internet 1 } + +mgmt OBJECT IDENTIFIER ::= { internet 2 } +mib-2 OBJECT IDENTIFIER ::= { mgmt 1 } +transmission OBJECT IDENTIFIER ::= { mib-2 10 } + +experimental OBJECT IDENTIFIER ::= { internet 3 } + +private OBJECT IDENTIFIER ::= { internet 4 } +enterprises OBJECT IDENTIFIER ::= { private 1 } + +security OBJECT IDENTIFIER ::= { internet 5 } + +snmpV2 OBJECT IDENTIFIER ::= { internet 6 } + +-- transport domains +snmpDomains OBJECT IDENTIFIER ::= { snmpV2 1 } + +-- transport proxies +snmpProxys OBJECT IDENTIFIER ::= { snmpV2 2 } + +-- module identities +snmpModules OBJECT IDENTIFIER ::= { snmpV2 3 } + +-- Extended UTCTime, to allow dates with four-digit years +-- (Note that this definition of ExtUTCTime is not to be IMPORTed +-- by MIB modules.) +ExtUTCTime ::= OCTET STRING(SIZE(11 | 13)) + -- format is YYMMDDHHMMZ or YYYYMMDDHHMMZ + + -- where: YY - last two digits of year (only years + -- between 1900-1999) + -- YYYY - last four digits of the year (any year) + -- MM - month (01 through 12) + -- DD - day of month (01 through 31) + -- HH - hours (00 through 23) + -- MM - minutes (00 through 59) + -- Z - denotes GMT (the ASCII character Z) + -- + -- For example, "9502192015Z" and "199502192015Z" represent + -- 8:15pm GMT on 19 February 1995. Years after 1999 must use + -- the four digit year format. Years 1900-1999 may use the + -- two or four digit format. + +-- definitions for information modules + +MODULE-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "LAST-UPDATED" value(Update ExtUTCTime) + "ORGANIZATION" Text + "CONTACT-INFO" Text + "DESCRIPTION" Text + RevisionPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + RevisionPart ::= + Revisions + | empty + Revisions ::= + Revision + | Revisions Revision + Revision ::= + "REVISION" value(Update ExtUTCTime) + "DESCRIPTION" Text + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +OBJECT-IDENTITY MACRO ::= +BEGIN + TYPE NOTATION ::= + "STATUS" Status + "DESCRIPTION" Text + + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- names of objects +-- (Note that these definitions of ObjectName and NotificationName +-- are not to be IMPORTed by MIB modules.) + +ObjectName ::= + OBJECT IDENTIFIER + +NotificationName ::= + OBJECT IDENTIFIER + +-- syntax of objects + +-- the "base types" defined here are: +-- 3 built-in ASN.1 types: INTEGER, OCTET STRING, OBJECT IDENTIFIER +-- 8 application-defined types: Integer32, IpAddress, Counter32, +-- Gauge32, Unsigned32, TimeTicks, Opaque, and Counter64 + +ObjectSyntax ::= + CHOICE { + simple + SimpleSyntax, + -- note that SEQUENCEs for conceptual tables and + -- rows are not mentioned here... + + application-wide + ApplicationSyntax + } + +-- built-in ASN.1 types + +SimpleSyntax ::= + CHOICE { + -- INTEGERs with a more restrictive range + -- may also be used + integer-value -- includes Integer32 + INTEGER (-2147483648..2147483647), + -- OCTET STRINGs with a more restrictive size + -- may also be used + string-value + OCTET STRING (SIZE (0..65535)), + objectID-value + OBJECT IDENTIFIER + } + +-- indistinguishable from INTEGER, but never needs more than +-- 32-bits for a two's complement representation +Integer32 ::= + INTEGER (-2147483648..2147483647) + +-- application-wide types + +ApplicationSyntax ::= + CHOICE { + ipAddress-value + IpAddress, + counter-value + Counter32, + timeticks-value + TimeTicks, + arbitrary-value + Opaque, + big-counter-value + Counter64, + unsigned-integer-value -- includes Gauge32 + Unsigned32 + } + +-- in network-byte order + +-- (this is a tagged type for historical reasons) +IpAddress ::= + [APPLICATION 0] + IMPLICIT OCTET STRING (SIZE (4)) + +-- this wraps +Counter32 ::= + [APPLICATION 1] + IMPLICIT INTEGER (0..4294967295) + +-- this doesn't wrap +Gauge32 ::= + [APPLICATION 2] + IMPLICIT INTEGER (0..4294967295) + +-- an unsigned 32-bit quantity +-- indistinguishable from Gauge32 +Unsigned32 ::= + [APPLICATION 2] + IMPLICIT INTEGER (0..4294967295) + +-- hundredths of seconds since an epoch +TimeTicks ::= + [APPLICATION 3] + IMPLICIT INTEGER (0..4294967295) + +-- for backward-compatibility only +Opaque ::= + [APPLICATION 4] + IMPLICIT OCTET STRING + +-- for counters that wrap in less than one hour with only 32 bits +Counter64 ::= + [APPLICATION 6] + IMPLICIT INTEGER (0..18446744073709551615) + +-- definition for objects + +OBJECT-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + "SYNTAX" Syntax + UnitsPart + "MAX-ACCESS" Access + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + IndexPart + DefValPart + + VALUE NOTATION ::= + value(VALUE ObjectName) + + Syntax ::= -- Must be one of the following: + -- a base type (or its refinement), + -- a textual convention (or its refinement), or + -- a BITS pseudo-type + type + | "BITS" "{" NamedBits "}" + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + + UnitsPart ::= + "UNITS" Text + | empty + + Access ::= + "not-accessible" + | "accessible-for-notify" + | "read-only" + | "read-write" + | "read-create" + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + IndexPart ::= + "INDEX" "{" IndexTypes "}" + | "AUGMENTS" "{" Entry "}" + | empty + IndexTypes ::= + IndexType + | IndexTypes "," IndexType + IndexType ::= + "IMPLIED" Index + | Index + + Index ::= + -- use the SYNTAX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + Entry ::= + -- use the INDEX value of the + -- correspondent OBJECT-TYPE invocation + value(ObjectName) + + DefValPart ::= "DEFVAL" "{" Defvalue "}" + | empty + + Defvalue ::= -- must be valid for the type specified in + -- SYNTAX clause of same OBJECT-TYPE macro + value(ObjectSyntax) + | "{" BitsValue "}" + + BitsValue ::= BitNames + | empty + + BitNames ::= BitName + | BitNames "," BitName + + BitName ::= identifier + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- definitions for notifications + +NOTIFICATION-TYPE MACRO ::= +BEGIN + TYPE NOTATION ::= + ObjectsPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + VALUE NOTATION ::= + value(VALUE NotificationName) + + ObjectsPart ::= + "OBJECTS" "{" Objects "}" + | empty + Objects ::= + Object + + | Objects "," Object + Object ::= + value(ObjectName) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in section 3.1.1 + Text ::= value(IA5String) +END + +-- definitions of administrative identifiers + +zeroDotZero OBJECT-IDENTITY + STATUS current + DESCRIPTION + "A value used for null identifiers." + ::= { 0 0 } + + + +TEXTUAL-CONVENTION MACRO ::= + +BEGIN + TYPE NOTATION ::= + DisplayPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + "SYNTAX" Syntax + + VALUE NOTATION ::= + value(VALUE Syntax) -- adapted ASN.1 + + DisplayPart ::= + "DISPLAY-HINT" Text + | empty + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in [2] + Text ::= value(IA5String) + + Syntax ::= -- Must be one of the following: + -- a base type (or its refinement), or + -- a BITS pseudo-type + type + | "BITS" "{" NamedBits "}" + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + +END + +MODULE-COMPLIANCE MACRO ::= +BEGIN + TYPE NOTATION ::= + "STATUS" Status + "DESCRIPTION" Text + ReferPart + ModulePart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + ModulePart ::= + Modules + Modules ::= + Module + | Modules Module + Module ::= + -- name of module -- + "MODULE" ModuleName + MandatoryPart + CompliancePart + + ModuleName ::= + -- identifier must start with uppercase letter + identifier ModuleIdentifier + -- must not be empty unless contained + -- in MIB Module + | empty + ModuleIdentifier ::= + value(OBJECT IDENTIFIER) + | empty + + MandatoryPart ::= + "MANDATORY-GROUPS" "{" Groups "}" + | empty + + Groups ::= + + Group + | Groups "," Group + Group ::= + value(OBJECT IDENTIFIER) + + CompliancePart ::= + Compliances + | empty + + Compliances ::= + Compliance + | Compliances Compliance + Compliance ::= + ComplianceGroup + | Object + + ComplianceGroup ::= + "GROUP" value(OBJECT IDENTIFIER) + "DESCRIPTION" Text + + Object ::= + "OBJECT" value(ObjectName) + SyntaxPart + WriteSyntaxPart + AccessPart + "DESCRIPTION" Text + + -- must be a refinement for object's SYNTAX clause + SyntaxPart ::= "SYNTAX" Syntax + | empty + + -- must be a refinement for object's SYNTAX clause + WriteSyntaxPart ::= "WRITE-SYNTAX" Syntax + | empty + + Syntax ::= -- Must be one of the following: + -- a base type (or its refinement), + -- a textual convention (or its refinement), or + -- a BITS pseudo-type + type + | "BITS" "{" NamedBits "}" + + NamedBits ::= NamedBit + | NamedBits "," NamedBit + + NamedBit ::= identifier "(" number ")" -- number is nonnegative + + AccessPart ::= + "MIN-ACCESS" Access + | empty + Access ::= + "not-accessible" + | "accessible-for-notify" + | "read-only" + | "read-write" + | "read-create" + + -- a character string as defined in [2] + Text ::= value(IA5String) +END + +OBJECT-GROUP MACRO ::= +BEGIN + TYPE NOTATION ::= + ObjectsPart + "STATUS" Status + "DESCRIPTION" Text + ReferPart + + VALUE NOTATION ::= + value(VALUE OBJECT IDENTIFIER) + + ObjectsPart ::= + "OBJECTS" "{" Objects "}" + Objects ::= + Object + | Objects "," Object + Object ::= + + value(ObjectName) + + Status ::= + "current" + | "deprecated" + | "obsolete" + + ReferPart ::= + "REFERENCE" Text + | empty + + -- a character string as defined in [2] + Text ::= value(IA5String) +END + +InetPortNumber ::= TEXTUAL-CONVENTION + DISPLAY-HINT "d" + STATUS current + DESCRIPTION + "Represents a 16 bit port number of an Internet transport + + layer protocol. Port numbers are assigned by IANA. A + current list of all assignments is available from + . + + The value zero is object-specific and must be defined as + part of the description of any object that uses this + syntax. Examples of the usage of zero might include + situations where a port number is unknown, or when the + value zero is used as a wildcard in a filter." + REFERENCE "STD 6 (RFC 768), STD 7 (RFC 793) and RFC 2960" + SYNTAX Unsigned32 (0..65535) + + +InetAddress ::= TEXTUAL-CONVENTION + STATUS current + DESCRIPTION + "Denotes a generic Internet address. + + An InetAddress value is always interpreted within the context + of an InetAddressType value. Every usage of the InetAddress + textual convention is required to specify the InetAddressType + object that provides the context. It is suggested that the + InetAddressType object be logically registered before the + object(s) that use the InetAddress textual convention, if + they appear in the same logical row. + + The value of an InetAddress object must always be + consistent with the value of the associated InetAddressType + object. Attempts to set an InetAddress object to a value + inconsistent with the associated InetAddressType + must fail with an inconsistentValue error. + + When this textual convention is used as the syntax of an + index object, there may be issues with the limit of 128 + sub-identifiers specified in SMIv2, STD 58. In this case, + the object definition MUST include a 'SIZE' clause to + limit the number of potential instance sub-identifiers; + otherwise the applicable constraints MUST be stated in + the appropriate conceptual row DESCRIPTION clauses, or + in the surrounding documentation if there is no single + DESCRIPTION clause that is appropriate." + SYNTAX OCTET STRING (SIZE (0..255)) + +InetAddressType ::= TEXTUAL-CONVENTION + STATUS current + DESCRIPTION + "A value that represents a type of Internet address. + + unknown(0) An unknown address type. This value MUST + be used if the value of the corresponding + InetAddress object is a zero-length string. + It may also be used to indicate an IP address + that is not in one of the formats defined + below. + + ipv4(1) An IPv4 address as defined by the + InetAddressIPv4 textual convention. + + ipv6(2) An IPv6 address as defined by the + InetAddressIPv6 textual convention. + + ipv4z(3) A non-global IPv4 address including a zone + index as defined by the InetAddressIPv4z + textual convention. + + ipv6z(4) A non-global IPv6 address including a zone + index as defined by the InetAddressIPv6z + textual convention. + + dns(16) A DNS domain name as defined by the + InetAddressDNS textual convention. + + Each definition of a concrete InetAddressType value must be + accompanied by a definition of a textual convention for use + with that InetAddressType. + + To support future extensions, the InetAddressType textual + convention SHOULD NOT be sub-typed in object type definitions. + It MAY be sub-typed in compliance statements in order to + require only a subset of these address types for a compliant + implementation. + + Implementations must ensure that InetAddressType objects + and any dependent objects (e.g., InetAddress objects) are + consistent. An inconsistentValue error must be generated + if an attempt to change an InetAddressType object would, + for example, lead to an undefined InetAddress value. In + + particular, InetAddressType/InetAddress pairs must be + changed together if the address type changes (e.g., from + ipv6(2) to ipv4(1))." + SYNTAX INTEGER { + unknown(0), + ipv4(1), + ipv6(2), + ipv4z(3), + ipv6z(4), + dns(16) + } + + + + + +END \ No newline at end of file diff --git a/plugins/inputs/snmp/testdata/test.mib b/plugins/inputs/snmp/testdata/test.mib deleted file mode 100644 index 7c3758d66d9a1..0000000000000 --- a/plugins/inputs/snmp/testdata/test.mib +++ /dev/null @@ -1,58 +0,0 @@ -TEST DEFINITIONS ::= BEGIN - -testOID ::= { 1 0 0 } - -testTable OBJECT-TYPE - SYNTAX SEQUENCE OF testTableEntry - MAX-ACCESS not-accessible - STATUS current - ::= { testOID 0 } - -testTableEntry OBJECT-TYPE - SYNTAX TestTableEntry - MAX-ACCESS not-accessible - STATUS current - INDEX { - server - } - ::= { testTable 1 } - -TestTableEntry ::= - SEQUENCE { - server OCTET STRING, - connections INTEGER, - latency OCTET STRING, - description OCTET STRING, - } - -server OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testTableEntry 1 } - -connections OBJECT-TYPE - SYNTAX INTEGER - MAX-ACCESS read-only - STATUS current - ::= { testTableEntry 2 } - -latency OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testTableEntry 3 } - -description OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testTableEntry 4 } - -hostname OBJECT-TYPE - SYNTAX OCTET STRING - MAX-ACCESS read-only - STATUS current - ::= { testOID 1 1 } - -END diff --git a/plugins/inputs/snmp_legacy/README.md b/plugins/inputs/snmp_legacy/README.md index 06bebbcad6176..0eb2335ff032e 100644 --- a/plugins/inputs/snmp_legacy/README.md +++ b/plugins/inputs/snmp_legacy/README.md @@ -1,61 +1,136 @@ # SNMP Legacy Input Plugin -The SNMP input plugin gathers metrics from SNMP agents - -### Configuration: - +**Deprecated in version 1.0. Use [SNMP input plugin][]** -#### Very simple example - -In this example, the plugin will gather value of OIDS: - - - `.1.3.6.1.2.1.2.2.1.4.1` +The SNMP input plugin gathers metrics from SNMP agents -```toml -# Very Simple Example -[[inputs.snmp]] +## Configuration +```toml @sample.conf +[[inputs.snmp_legacy]] + ## Use 'oids.txt' file to translate oids to names + ## To generate 'oids.txt' you need to run: + ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt + ## Or if you have an other MIB folder with custom MIBs + ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt + snmptranslate_file = "/tmp/oids.txt" [[inputs.snmp.host]] - address = "127.0.0.1:161" + address = "192.168.2.2:161" # SNMP community community = "public" # default public # SNMP version (1, 2 or 3) # Version 3 not supported yet version = 2 # default 2 + # SNMP response timeout + timeout = 2.0 # default 2.0 + # SNMP request retries + retries = 2 # default 2 + # Which get/bulk do you want to collect for this host + collect = ["mybulk", "sysservices", "sysdescr"] # Simple list of OIDs to get, in addition to "collect" - get_oids = [".1.3.6.1.2.1.2.2.1.4.1"] + get_oids = [] + [[inputs.snmp.host]] + address = "192.168.2.3:161" + community = "public" + version = 2 + timeout = 2.0 + retries = 2 + collect = ["mybulk"] + get_oids = [ + "ifNumber", + ".1.3.6.1.2.1.1.3.0", + ] + [[inputs.snmp.get]] + name = "ifnumber" + oid = "ifNumber" + [[inputs.snmp.get]] + name = "interface_speed" + oid = "ifSpeed" + instance = "0" + [[inputs.snmp.get]] + name = "sysuptime" + oid = ".1.3.6.1.2.1.1.3.0" + unit = "second" + [[inputs.snmp.bulk]] + name = "mybulk" + max_repetition = 127 + oid = ".1.3.6.1.2.1.1" + [[inputs.snmp.bulk]] + name = "ifoutoctets" + max_repetition = 127 + oid = "ifOutOctets" + [[inputs.snmp.host]] + address = "192.168.2.13:161" + #address = "127.0.0.1:161" + community = "public" + version = 2 + timeout = 2.0 + retries = 2 + #collect = ["mybulk", "sysservices", "sysdescr", "systype"] + collect = ["sysuptime" ] + [[inputs.snmp.host.table]] + name = "iftable3" + include_instances = ["enp5s0", "eth1"] + # SNMP TABLEs + # table without mapping neither subtables + [[inputs.snmp.table]] + name = "iftable1" + oid = ".1.3.6.1.2.1.31.1.1.1" + # table without mapping but with subtables + [[inputs.snmp.table]] + name = "iftable2" + oid = ".1.3.6.1.2.1.31.1.1.1" + sub_tables = [".1.3.6.1.2.1.2.2.1.13"] + # table with mapping but without subtables + [[inputs.snmp.table]] + name = "iftable3" + oid = ".1.3.6.1.2.1.31.1.1.1" + # if empty. get all instances + mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" + # if empty, get all subtables + # table with both mapping and subtables + [[inputs.snmp.table]] + name = "iftable4" + oid = ".1.3.6.1.2.1.31.1.1.1" + # if empty get all instances + mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" + # if empty get all subtables + # sub_tables could be not "real subtables" + sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] ``` +In the previous example, the plugin will gather value of OIDS: + +- `.1.3.6.1.2.1.2.2.1.4.1` -#### Simple example +### Simple example In this example, Telegraf gathers value of OIDS: - - named **ifnumber** - - named **interface_speed** +- named **ifnumber** +- named **interface_speed** With **inputs.snmp.get** section the plugin gets the oid number: - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* +- **ifnumber** => `.1.3.6.1.2.1.2.1.0` +- **interface_speed** => *ifSpeed* As you can see *ifSpeed* is not a valid OID. In order to get the valid OID, the plugin uses `snmptranslate_file` to match the OID: - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5` +- **ifnumber** => `.1.3.6.1.2.1.2.1.0` +- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5` Also as the plugin will append `instance` to the corresponding OID: - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1` +- **ifnumber** => `.1.3.6.1.2.1.2.1.0` +- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1` In this example, the plugin will gather value of OIDS: - `.1.3.6.1.2.1.2.1.0` - `.1.3.6.1.2.1.2.2.1.5.1` - ```toml # Simple example [[inputs.snmp]] @@ -86,36 +161,35 @@ In this example, the plugin will gather value of OIDS: ``` - -#### Simple bulk example +### Simple bulk example In this example, Telegraf gathers value of OIDS: - - named **ifnumber** - - named **interface_speed** - - named **if_out_octets** +- named **ifnumber** +- named **interface_speed** +- named **if_out_octets** With **inputs.snmp.get** section the plugin gets oid number: - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* +- **ifnumber** => `.1.3.6.1.2.1.2.1.0` +- **interface_speed** => *ifSpeed* With **inputs.snmp.bulk** section the plugin gets the oid number: - - **if_out_octets** => *ifOutOctets* +- **if_out_octets** => *ifOutOctets* As you can see *ifSpeed* and *ifOutOctets* are not a valid OID. In order to get the valid OID, the plugin uses `snmptranslate_file` to match the OID: - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5` - - **if_out_octets** => *ifOutOctets* => `.1.3.6.1.2.1.2.2.1.16` +- **ifnumber** => `.1.3.6.1.2.1.2.1.0` +- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5` +- **if_out_octets** => *ifOutOctets* => `.1.3.6.1.2.1.2.2.1.16` Also, the plugin will append `instance` to the corresponding OID: - - **ifnumber** => `.1.3.6.1.2.1.2.1.0` - - **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1` +- **ifnumber** => `.1.3.6.1.2.1.2.1.0` +- **interface_speed** => *ifSpeed* => `.1.3.6.1.2.1.2.2.1.5.1` And **if_out_octets** is a bulk request, the plugin will gathers all OIDS in the table. @@ -138,7 +212,6 @@ In this example, the plugin will gather value of OIDS: - `.1.3.6.1.2.1.2.2.1.16.5` - `...` - ```toml # Simple bulk example [[inputs.snmp]] @@ -172,8 +245,7 @@ In this example, the plugin will gather value of OIDS: oid = "ifOutOctets" ``` - -#### Table example +### Table example In this example, we remove collect attribute to the host section, but you can still use it in combination of the following part. @@ -183,11 +255,11 @@ other configuration Telegraf gathers value of OIDS of the table: - - named **iftable1** +- named **iftable1** With **inputs.snmp.table** section the plugin gets oid number: - - **iftable1** => `.1.3.6.1.2.1.31.1.1.1` +- **iftable1** => `.1.3.6.1.2.1.31.1.1.1` Also **iftable1** is a table, the plugin will gathers all OIDS in the table and in the subtables @@ -237,8 +309,7 @@ OIDS in the table and in the subtables oid = ".1.3.6.1.2.1.31.1.1.1" ``` - -#### Table with subtable example +### Table with subtable example In this example, we remove collect attribute to the host section, but you can still use it in combination of the following part. @@ -248,12 +319,12 @@ other configuration Telegraf gathers value of OIDS of the table: - - named **iftable2** +- named **iftable2** With **inputs.snmp.table** section *AND* **sub_tables** attribute, the plugin will get OIDS from subtables: - - **iftable2** => `.1.3.6.1.2.1.2.2.1.13` +- **iftable2** => `.1.3.6.1.2.1.2.2.1.13` Also **iftable2** is a table, the plugin will gathers all OIDS in subtables: @@ -264,7 +335,6 @@ OIDS in subtables: - `.1.3.6.1.2.1.2.2.1.13.4` - `.1.3.6.1.2.1.2.2.1.13....` - ```toml # Table with subtable example [[inputs.snmp]] @@ -293,19 +363,18 @@ OIDS in subtables: # oid attribute is useless ``` - -#### Table with mapping example +### Table with mapping example In this example, we remove collect attribute to the host section, but you can still use it in combination of the following part. Telegraf gathers value of OIDS of the table: - - named **iftable3** +- named **iftable3** With **inputs.snmp.table** section the plugin gets oid number: - - **iftable3** => `.1.3.6.1.2.1.31.1.1.1` +- **iftable3** => `.1.3.6.1.2.1.31.1.1.1` Also **iftable2** is a table, the plugin will gathers all OIDS in the table and in the subtables @@ -332,11 +401,12 @@ will be gathered; As you see, there is an other attribute, `mapping_table`. `include_instances` and `mapping_table` permit to build a hash table to filter only OIDS you want. Let's say, we have the following data on SNMP server: - - OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1` + +- OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1` The plugin will build the following hash table: @@ -397,20 +467,19 @@ Note: the plugin will add instance name as tag *instance* # if empty, get all subtables ``` - -#### Table with both mapping and subtable example +### Table with both mapping and subtable example In this example, we remove collect attribute to the host section, but you can still use it in combination of the following part. Telegraf gathers value of OIDS of the table: - - named **iftable4** +- named **iftable4** With **inputs.snmp.table** section *AND* **sub_tables** attribute, the plugin will get OIDS from subtables: - - **iftable4** => `.1.3.6.1.2.1.31.1.1.1` +- **iftable4** => `.1.3.6.1.2.1.31.1.1.1` Also **iftable2** is a table, the plugin will gathers all OIDS in the table and in the subtables @@ -431,11 +500,12 @@ will be gathered; As you see, there is an other attribute, `mapping_table`. `include_instances` and `mapping_table` permit to build a hash table to filter only OIDS you want. Let's say, we have the following data on SNMP server: - - OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0` - - OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1` + +- OID: `.1.3.6.1.2.1.31.1.1.1.1.1` has as value: `enp5s0` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.2` has as value: `enp5s1` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.3` has as value: `enp5s2` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.4` has as value: `eth0` +- OID: `.1.3.6.1.2.1.31.1.1.1.1.5` has as value: `eth1` The plugin will build the following hash table: @@ -457,8 +527,6 @@ the following OIDS: Note: the plugin will add instance name as tag *instance* - - ```toml # Table with both mapping and subtable example [[inputs.snmp]] @@ -486,7 +554,7 @@ Note: the plugin will add instance name as tag *instance* # if empty get all instances mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" # if empty get all subtables - # sub_tables could be not "real subtables" + # sub_tables could be not "real subtables" sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] # note # oid attribute is useless @@ -503,7 +571,7 @@ Note: the plugin will add instance name as tag *instance* unit = "octets" ``` -#### Configuration notes +### Configuration notes - In **inputs.snmp.table** section, the `oid` attribute is useless if the `sub_tables` attributes is defined @@ -511,39 +579,41 @@ Note: the plugin will add instance name as tag *instance* - In **inputs.snmp.subtable** section, you can put a name from `snmptranslate_file` as `oid` attribute instead of a valid OID -### Measurements & Fields: +## Metrics With the last example (Table with both mapping and subtable example): - ifHCOutOctets - - ifHCOutOctets + - ifHCOutOctets - ifInDiscards - - ifInDiscards + - ifInDiscards - ifHCInOctets - - ifHCInOctets + - ifHCInOctets -### Tags: +### Tags With the last example (Table with both mapping and subtable example): - ifHCOutOctets - - host - - instance - - unit + - host + - instance + - unit - ifInDiscards - - host - - instance + - host + - instance - ifHCInOctets - - host - - instance - - unit + - host + - instance + - unit -### Example Output: +## Example Output With the last example (Table with both mapping and subtable example): -``` +```shell ifHCOutOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCOutOctets=10565628i 1456878706044462901 ifInDiscards,host=127.0.0.1,instance=enp5s0 ifInDiscards=0i 1456878706044510264 ifHCInOctets,host=127.0.0.1,instance=enp5s0,unit=octets ifHCInOctets=76351777i 1456878706044531312 ``` + +[SNMP input plugin]: /plugins/inputs/snmp diff --git a/plugins/inputs/snmp_legacy/sample.conf b/plugins/inputs/snmp_legacy/sample.conf new file mode 100644 index 0000000000000..f48d5c564b285 --- /dev/null +++ b/plugins/inputs/snmp_legacy/sample.conf @@ -0,0 +1,90 @@ +[[inputs.snmp_legacy]] + ## Use 'oids.txt' file to translate oids to names + ## To generate 'oids.txt' you need to run: + ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt + ## Or if you have an other MIB folder with custom MIBs + ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt + snmptranslate_file = "/tmp/oids.txt" + [[inputs.snmp.host]] + address = "192.168.2.2:161" + # SNMP community + community = "public" # default public + # SNMP version (1, 2 or 3) + # Version 3 not supported yet + version = 2 # default 2 + # SNMP response timeout + timeout = 2.0 # default 2.0 + # SNMP request retries + retries = 2 # default 2 + # Which get/bulk do you want to collect for this host + collect = ["mybulk", "sysservices", "sysdescr"] + # Simple list of OIDs to get, in addition to "collect" + get_oids = [] + [[inputs.snmp.host]] + address = "192.168.2.3:161" + community = "public" + version = 2 + timeout = 2.0 + retries = 2 + collect = ["mybulk"] + get_oids = [ + "ifNumber", + ".1.3.6.1.2.1.1.3.0", + ] + [[inputs.snmp.get]] + name = "ifnumber" + oid = "ifNumber" + [[inputs.snmp.get]] + name = "interface_speed" + oid = "ifSpeed" + instance = "0" + [[inputs.snmp.get]] + name = "sysuptime" + oid = ".1.3.6.1.2.1.1.3.0" + unit = "second" + [[inputs.snmp.bulk]] + name = "mybulk" + max_repetition = 127 + oid = ".1.3.6.1.2.1.1" + [[inputs.snmp.bulk]] + name = "ifoutoctets" + max_repetition = 127 + oid = "ifOutOctets" + [[inputs.snmp.host]] + address = "192.168.2.13:161" + #address = "127.0.0.1:161" + community = "public" + version = 2 + timeout = 2.0 + retries = 2 + #collect = ["mybulk", "sysservices", "sysdescr", "systype"] + collect = ["sysuptime" ] + [[inputs.snmp.host.table]] + name = "iftable3" + include_instances = ["enp5s0", "eth1"] + # SNMP TABLEs + # table without mapping neither subtables + [[inputs.snmp.table]] + name = "iftable1" + oid = ".1.3.6.1.2.1.31.1.1.1" + # table without mapping but with subtables + [[inputs.snmp.table]] + name = "iftable2" + oid = ".1.3.6.1.2.1.31.1.1.1" + sub_tables = [".1.3.6.1.2.1.2.2.1.13"] + # table with mapping but without subtables + [[inputs.snmp.table]] + name = "iftable3" + oid = ".1.3.6.1.2.1.31.1.1.1" + # if empty. get all instances + mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" + # if empty, get all subtables + # table with both mapping and subtables + [[inputs.snmp.table]] + name = "iftable4" + oid = ".1.3.6.1.2.1.31.1.1.1" + # if empty get all instances + mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" + # if empty get all subtables + # sub_tables could be not "real subtables" + sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] diff --git a/plugins/inputs/snmp_legacy/snmp_legacy.go b/plugins/inputs/snmp_legacy/snmp_legacy.go index 62a3966fa451a..0c0bbbf40d708 100644 --- a/plugins/inputs/snmp_legacy/snmp_legacy.go +++ b/plugins/inputs/snmp_legacy/snmp_legacy.go @@ -1,19 +1,25 @@ +//go:generate ../../../tools/readme_config_includer/generator package snmp_legacy import ( - "io/ioutil" + _ "embed" "log" "net" + "os" "strconv" "strings" "time" + "github.com/gosnmp/gosnmp" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" - - "github.com/soniah/gosnmp" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // Snmp is a snmp plugin type Snmp struct { Host []Host @@ -46,9 +52,9 @@ type Host struct { // Table Table []HostTable // Oids - getOids []Data - bulkOids []Data - tables []HostTable + internalGetOids []Data + bulkOids []Data + tables []HostTable // array of processed oids // to skip oid duplication processedOids []string @@ -102,7 +108,7 @@ type Data struct { // Unit Unit string // SNMP getbulk max repetition - MaxRepetition uint8 `toml:"max_repetition"` + MaxRepetition uint32 `toml:"max_repetition"` // SNMP Instance (default 0) // (only used with GET request and if // OID is a name from snmptranslate file) @@ -117,140 +123,27 @@ type Node struct { subnodes map[string]Node } -var sampleConfig = ` - ## Use 'oids.txt' file to translate oids to names - ## To generate 'oids.txt' you need to run: - ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt - ## Or if you have an other MIB folder with custom MIBs - ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt - snmptranslate_file = "/tmp/oids.txt" - [[inputs.snmp.host]] - address = "192.168.2.2:161" - # SNMP community - community = "public" # default public - # SNMP version (1, 2 or 3) - # Version 3 not supported yet - version = 2 # default 2 - # SNMP response timeout - timeout = 2.0 # default 2.0 - # SNMP request retries - retries = 2 # default 2 - # Which get/bulk do you want to collect for this host - collect = ["mybulk", "sysservices", "sysdescr"] - # Simple list of OIDs to get, in addition to "collect" - get_oids = [] - - [[inputs.snmp.host]] - address = "192.168.2.3:161" - community = "public" - version = 2 - timeout = 2.0 - retries = 2 - collect = ["mybulk"] - get_oids = [ - "ifNumber", - ".1.3.6.1.2.1.1.3.0", - ] - - [[inputs.snmp.get]] - name = "ifnumber" - oid = "ifNumber" - - [[inputs.snmp.get]] - name = "interface_speed" - oid = "ifSpeed" - instance = "0" - - [[inputs.snmp.get]] - name = "sysuptime" - oid = ".1.3.6.1.2.1.1.3.0" - unit = "second" - - [[inputs.snmp.bulk]] - name = "mybulk" - max_repetition = 127 - oid = ".1.3.6.1.2.1.1" - - [[inputs.snmp.bulk]] - name = "ifoutoctets" - max_repetition = 127 - oid = "ifOutOctets" - - [[inputs.snmp.host]] - address = "192.168.2.13:161" - #address = "127.0.0.1:161" - community = "public" - version = 2 - timeout = 2.0 - retries = 2 - #collect = ["mybulk", "sysservices", "sysdescr", "systype"] - collect = ["sysuptime" ] - [[inputs.snmp.host.table]] - name = "iftable3" - include_instances = ["enp5s0", "eth1"] - - # SNMP TABLEs - # table without mapping neither subtables - [[inputs.snmp.table]] - name = "iftable1" - oid = ".1.3.6.1.2.1.31.1.1.1" - - # table without mapping but with subtables - [[inputs.snmp.table]] - name = "iftable2" - oid = ".1.3.6.1.2.1.31.1.1.1" - sub_tables = [".1.3.6.1.2.1.2.2.1.13"] - - # table with mapping but without subtables - [[inputs.snmp.table]] - name = "iftable3" - oid = ".1.3.6.1.2.1.31.1.1.1" - # if empty. get all instances - mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" - # if empty, get all subtables - - # table with both mapping and subtables - [[inputs.snmp.table]] - name = "iftable4" - oid = ".1.3.6.1.2.1.31.1.1.1" - # if empty get all instances - mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" - # if empty get all subtables - # sub_tables could be not "real subtables" - sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] -` - -// SampleConfig returns sample configuration message -func (s *Snmp) SampleConfig() string { - return sampleConfig -} - -// Description returns description of Zookeeper plugin -func (s *Snmp) Description() string { - return `DEPRECATED! PLEASE USE inputs.snmp INSTEAD.` -} - -func fillnode(parentNode Node, oid_name string, ids []string) { +func fillnode(parentNode Node, oidName string, ids []string) { // ids = ["1", "3", "6", ...] id, ids := ids[0], ids[1:] node, ok := parentNode.subnodes[id] - if ok == false { + if !ok { node = Node{ id: id, name: "", subnodes: make(map[string]Node), } if len(ids) == 0 { - node.name = oid_name + node.name = oidName } parentNode.subnodes[id] = node } if len(ids) > 0 { - fillnode(node, oid_name, ids) + fillnode(node, oidName, ids) } } -func findnodename(node Node, ids []string) (string, string) { +func findNodeName(node Node, ids []string) (oidName string, instance string) { // ids = ["1", "3", "6", ...] if len(ids) == 1 { return node.name, ids[0] @@ -259,7 +152,7 @@ func findnodename(node Node, ids []string) (string, string) { // Get node subnode, ok := node.subnodes[id] if ok { - return findnodename(subnode, ids) + return findNodeName(subnode, ids) } // We got a node // Get node name @@ -268,7 +161,7 @@ func findnodename(node Node, ids []string) (string, string) { return node.name, "0" } else if node.name != "" && len(ids) == 0 && id != "0" { // node with an instance - return node.name, string(id) + return node.name, id } else if node.name != "" && len(ids) > 0 { // node with subinstances return node.name, strings.Join(ids, ".") @@ -277,6 +170,10 @@ func findnodename(node Node, ids []string) (string, string) { return node.name, "" } +func (*Snmp) SampleConfig() string { + return sampleConfig +} + func (s *Snmp) Gather(acc telegraf.Accumulator) error { // TODO put this in cache on first run // Create subtables mapping @@ -296,19 +193,19 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { subnodes: make(map[string]Node), } - data, err := ioutil.ReadFile(s.SnmptranslateFile) + data, err := os.ReadFile(s.SnmptranslateFile) if err != nil { s.Log.Errorf("Reading SNMPtranslate file error: %s", err.Error()) return err - } else { - for _, line := range strings.Split(string(data), "\n") { - oids := strings.Fields(string(line)) - if len(oids) == 2 && oids[1] != "" { - oid_name := oids[0] - oid := oids[1] - fillnode(s.initNode, oid_name, strings.Split(string(oid), ".")) - s.nameToOid[oid_name] = oid - } + } + + for _, line := range strings.Split(string(data), "\n") { + oids := strings.Fields(line) + if len(oids) == 2 && oids[1] != "" { + oidName := oids[0] + oid := oids[1] + fillnode(s.initNode, oidName, strings.Split(oid, ".")) + s.nameToOid[oidName] = oid } } } @@ -339,19 +236,19 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { } else { oid.Name = oidstring oid.Oid = oidstring - if string(oidstring[:1]) != "." { + if oidstring[:1] != "." { oid.rawOid = "." + oidstring } else { oid.rawOid = oidstring } } - host.getOids = append(host.getOids, oid) + host.internalGetOids = append(host.internalGetOids, oid) } - for _, oid_name := range host.Collect { + for _, oidName := range host.Collect { // Get GET oids for _, oid := range s.Get { - if oid.Name == oid_name { + if oid.Name == oidName { if val, ok := s.nameToOid[oid.Oid]; ok { // TODO should we add the 0 instance ? if oid.Instance != "" { @@ -362,12 +259,12 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { } else { oid.rawOid = oid.Oid } - host.getOids = append(host.getOids, oid) + host.internalGetOids = append(host.internalGetOids, oid) } } // Get GETBULK oids for _, oid := range s.Bulk { - if oid.Name == oid_name { + if oid.Name == oidName { if val, ok := s.nameToOid[oid.Oid]; ok { oid.rawOid = "." + val } else { @@ -395,7 +292,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { // to do it only the first time // only if len(s.OidInstanceMapping) == 0 if len(host.OidInstanceMapping) >= 0 { - if err := host.SNMPMap(acc, s.nameToOid, s.subTableMap); err != nil { + if err := host.SNMPMap(s.nameToOid, s.subTableMap); err != nil { s.Log.Errorf("Mapping error for host %q: %s", host.Address, err.Error()) continue } @@ -412,7 +309,6 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { } func (h *Host) SNMPMap( - acc telegraf.Accumulator, nameToOid map[string]string, subTableMap map[string]Subtable, ) error { @@ -464,7 +360,7 @@ func (h *Host) SNMPMap( } // TODO check oid validity - // Add the new oid to getOids list + // Add the new oid to bulkOids list h.bulkOids = append(h.bulkOids, oid) } } @@ -473,15 +369,15 @@ func (h *Host) SNMPMap( // We need to query this table // To get mapping between instance id // and instance name - oid_asked := table.mappingTable - oid_next := oid_asked - need_more_requests := true + oidAsked := table.mappingTable + oidNext := oidAsked + needMoreRequests := true // Set max repetition - maxRepetition := uint8(32) + maxRepetition := uint32(32) // Launch requests - for need_more_requests { + for needMoreRequests { // Launch request - result, err3 := snmpClient.GetBulk([]string{oid_next}, 0, maxRepetition) + result, err3 := snmpClient.GetBulk([]string{oidNext}, 0, maxRepetition) if err3 != nil { return err3 } @@ -489,7 +385,7 @@ func (h *Host) SNMPMap( lastOid := "" for _, variable := range result.Variables { lastOid = variable.Name - if strings.HasPrefix(variable.Name, oid_asked) { + if strings.HasPrefix(variable.Name, oidAsked) { switch variable.Type { // handle instance names case gosnmp.OctetString: @@ -519,7 +415,7 @@ func (h *Host) SNMPMap( // remove oid table from the complete oid // in order to get the current instance id - key := strings.Replace(variable.Name, oid_asked, "", 1) + key := strings.Replace(variable.Name, oidAsked, "", 1) if len(table.subTables) == 0 { // We have a mapping table @@ -570,8 +466,8 @@ func (h *Host) SNMPMap( } // TODO check oid validity - // Add the new oid to getOids list - h.getOids = append(h.getOids, oid) + // Add the new oid to internalGetOids list + h.internalGetOids = append(h.internalGetOids, oid) } } default: @@ -581,11 +477,11 @@ func (h *Host) SNMPMap( } } // Determine if we need more requests - if strings.HasPrefix(lastOid, oid_asked) { - need_more_requests = true - oid_next = lastOid + if strings.HasPrefix(lastOid, oidAsked) { + needMoreRequests = true + oidNext = lastOid } else { - need_more_requests = false + needMoreRequests = false } } } @@ -607,7 +503,7 @@ func (h *Host) SNMPGet(acc telegraf.Accumulator, initNode Node) error { defer snmpClient.Conn.Close() // Prepare OIDs oidsList := make(map[string]Data) - for _, oid := range h.getOids { + for _, oid := range h.internalGetOids { oidsList[oid.rawOid] = oid } oidsNameList := make([]string, 0, len(oidsList)) @@ -617,15 +513,15 @@ func (h *Host) SNMPGet(acc telegraf.Accumulator, initNode Node) error { // gosnmp.MAX_OIDS == 60 // TODO use gosnmp.MAX_OIDS instead of hard coded value - max_oids := 60 + maxOids := 60 // limit 60 (MAX_OIDS) oids by requests - for i := 0; i < len(oidsList); i = i + max_oids { + for i := 0; i < len(oidsList); i = i + maxOids { // Launch request - max_index := i + max_oids - if i+max_oids > len(oidsList) { - max_index = len(oidsList) + maxIndex := i + maxOids + if i+maxOids > len(oidsList) { + maxIndex = len(oidsList) } - result, err3 := snmpClient.Get(oidsNameList[i:max_index]) // Get() accepts up to g.MAX_OIDS + result, err3 := snmpClient.Get(oidsNameList[i:maxIndex]) // Get() accepts up to g.MAX_OIDS if err3 != nil { return err3 } @@ -658,31 +554,31 @@ func (h *Host) SNMPBulk(acc telegraf.Accumulator, initNode Node) error { // TODO Trying to make requests with more than one OID // to reduce the number of requests for _, oid := range oidsNameList { - oid_asked := oid - need_more_requests := true + oidAsked := oid + needMoreRequests := true // Set max repetition maxRepetition := oidsList[oid].MaxRepetition if maxRepetition <= 0 { maxRepetition = 32 } // Launch requests - for need_more_requests { + for needMoreRequests { // Launch request result, err3 := snmpClient.GetBulk([]string{oid}, 0, maxRepetition) if err3 != nil { return err3 } // Handle response - last_oid, err := h.HandleResponse(oidsList, result, acc, initNode) + lastOid, err := h.HandleResponse(oidsList, result, acc, initNode) if err != nil { return err } // Determine if we need more requests - if strings.HasPrefix(last_oid, oid_asked) { - need_more_requests = true - oid = last_oid + if strings.HasPrefix(lastOid, oidAsked) { + needMoreRequests = true + oid = lastOid } else { - need_more_requests = false + needMoreRequests = false } } } @@ -700,16 +596,16 @@ func (h *Host) GetSNMPClient() (*gosnmp.GoSNMP, error) { version = gosnmp.Version2c } // Prepare host and port - host, port_str, err := net.SplitHostPort(h.Address) + host, portStr, err := net.SplitHostPort(h.Address) if err != nil { - port_str = string("161") + portStr = "161" } // convert port_str to port in uint16 - port_64, err := strconv.ParseUint(port_str, 10, 16) + port64, err := strconv.ParseUint(portStr, 10, 16) if err != nil { return nil, err } - port := uint16(port_64) + port := uint16(port64) // Get SNMP client snmpClient := &gosnmp.GoSNMP{ Target: host, @@ -739,7 +635,7 @@ func (h *Host) HandleResponse( lastOid = variable.Name nextresult: // Get only oid wanted - for oid_key, oid := range oids { + for oidKey, oid := range oids { // Skip oids already processed for _, processedOid := range h.processedOids { if variable.Name == processedOid { @@ -750,7 +646,7 @@ func (h *Host) HandleResponse( // OR // the result is SNMP table which "." comes right after oid_key. // ex: oid_key: .1.3.6.1.2.1.2.2.1.16, variable.Name: .1.3.6.1.2.1.2.2.1.16.1 - if variable.Name == oid_key || strings.HasPrefix(variable.Name, oid_key+".") { + if variable.Name == oidKey || strings.HasPrefix(variable.Name, oidKey+".") { switch variable.Type { // handle Metrics case gosnmp.Boolean, gosnmp.Integer, gosnmp.Counter32, gosnmp.Gauge32, @@ -761,19 +657,19 @@ func (h *Host) HandleResponse( tags["unit"] = oid.Unit } // Get name and instance - var oid_name string + var oidName string var instance string // Get oidname and instance from translate file - oid_name, instance = findnodename(initNode, - strings.Split(string(variable.Name[1:]), ".")) + oidName, instance = findNodeName(initNode, + strings.Split(variable.Name[1:], ".")) // Set instance tag // From mapping table - mapping, inMappingNoSubTable := h.OidInstanceMapping[oid_key] + mapping, inMappingNoSubTable := h.OidInstanceMapping[oidKey] if inMappingNoSubTable { // filter if the instance in not in // OidInstanceMapping mapping map - if instance_name, exists := mapping[instance]; exists { - tags["instance"] = instance_name + if instanceName, exists := mapping[instance]; exists { + tags["instance"] = instanceName } else { continue } @@ -788,24 +684,24 @@ func (h *Host) HandleResponse( } // Set name - var field_name string - if oid_name != "" { + var fieldName string + if oidName != "" { // Set fieldname as oid name from translate file - field_name = oid_name + fieldName = oidName } else { // Set fieldname as oid name from inputs.snmp.get section // Because the result oid is equal to inputs.snmp.get section - field_name = oid.Name + fieldName = oid.Name } tags["snmp_host"], _, _ = net.SplitHostPort(h.Address) fields := make(map[string]interface{}) - fields[string(field_name)] = variable.Value + fields[fieldName] = variable.Value h.processedOids = append(h.processedOids, variable.Name) - acc.AddFields(field_name, fields, tags) + acc.AddFields(fieldName, fields, tags) case gosnmp.NoSuchObject, gosnmp.NoSuchInstance: // Oid not found - log.Printf("E! [inputs.snmp_legacy] oid %q not found", oid_key) + log.Printf("E! [inputs.snmp_legacy] oid %q not found", oidKey) default: // delete other data } diff --git a/plugins/inputs/snmp_trap/README.md b/plugins/inputs/snmp_trap/README.md index 0680376c400db..67f3fec0ef954 100644 --- a/plugins/inputs/snmp_trap/README.md +++ b/plugins/inputs/snmp_trap/README.md @@ -6,22 +6,15 @@ notifications (traps and inform requests). Notifications are received on plain UDP. The port to listen is configurable. -### Prerequisites - -This plugin uses the `snmptranslate` programs from the -[net-snmp][] project. These tools will need to be installed into the `PATH` in -order to be located. Other utilities from the net-snmp project may be useful -for troubleshooting, but are not directly used by the plugin. - -These programs will load available MIBs on the system. Typically the default -directory for MIBs is `/usr/share/snmp/mibs`, but if your MIBs are in a -different location you may need to make the paths known to net-snmp. The -location of these files can be configured in the `snmp.conf` or via the -`MIBDIRS` environment variable. See [`man 1 snmpcmd`][man snmpcmd] for more -information. - -### Configuration -```toml +## Note about Paths + +Path is a global variable, separate snmp instances will append the specified +path onto the global path variable + +## Configuration + +```toml @sample.conf +# Receive SNMP traps [[inputs.snmp_trap]] ## Transport, local address, and port to listen on. Transport must ## be "udp://". Omit local address to listen on all interfaces. @@ -31,6 +24,13 @@ information. ## 1024. See README.md for details ## # service_address = "udp://:162" + ## + ## Path to mib files + ## Used by the gosmi translator. + ## To add paths when translating with netsnmp, use the MIBDIRS environment variable + # path = ["/usr/share/snmp/mibs"] + ## + ## Deprecated in 1.20.0; no longer running snmptranslate ## Timeout running snmptranslate command # timeout = "5s" ## Snmp version @@ -51,7 +51,7 @@ information. # priv_password = "" ``` -#### Using a Privileged Port +### Using a Privileged Port On many operating systems, listening on a privileged port (a port number less than 1024) requires extra permission. Since the default @@ -69,32 +69,33 @@ the privileged port. To use a privileged port on Linux, you can use setcap to enable the CAP_NET_BIND_SERVICE capability on the telegraf binary: -``` +```shell setcap cap_net_bind_service=+ep /usr/bin/telegraf ``` On Mac OS, listening on privileged ports is unrestricted on versions 10.14 and later. -### Metrics +## Metrics - snmp_trap - tags: - - source (string, IP address of trap source) - - name (string, value from SNMPv2-MIB::snmpTrapOID.0 PDU) - - mib (string, MIB from SNMPv2-MIB::snmpTrapOID.0 PDU) - - oid (string, OID string from SNMPv2-MIB::snmpTrapOID.0 PDU) - - version (string, "1" or "2c" or "3") - - context_name (string, value from v3 trap) - - engine_id (string, value from v3 trap) - - community (string, value from 1 or 2c trap) + - source (string, IP address of trap source) + - name (string, value from SNMPv2-MIB::snmpTrapOID.0 PDU) + - mib (string, MIB from SNMPv2-MIB::snmpTrapOID.0 PDU) + - oid (string, OID string from SNMPv2-MIB::snmpTrapOID.0 PDU) + - version (string, "1" or "2c" or "3") + - context_name (string, value from v3 trap) + - engine_id (string, value from v3 trap) + - community (string, value from 1 or 2c trap) - fields: - - Fields are mapped from variables in the trap. Field names are + - Fields are mapped from variables in the trap. Field names are the trap variable names after MIB lookup. Field values are trap variable values. -### Example Output -``` +## Example Output + +```shell snmp_trap,mib=SNMPv2-MIB,name=coldStart,oid=.1.3.6.1.6.3.1.1.5.1,source=192.168.122.102,version=2c,community=public snmpTrapEnterprise.0="linux",sysUpTimeInstance=1i 1574109187723429814 snmp_trap,mib=NET-SNMP-AGENT-MIB,name=nsNotifyShutdown,oid=.1.3.6.1.4.1.8072.4.0.2,source=192.168.122.102,version=2c,community=public sysUpTimeInstance=5803i,snmpTrapEnterprise.0="netSnmpNotificationPrefix" 1574109186555115459 ``` diff --git a/plugins/inputs/snmp_trap/gosmi.go b/plugins/inputs/snmp_trap/gosmi.go new file mode 100644 index 0000000000000..7acc8201ef866 --- /dev/null +++ b/plugins/inputs/snmp_trap/gosmi.go @@ -0,0 +1,21 @@ +package snmp_trap + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/snmp" +) + +type gosmiTranslator struct { +} + +func (t *gosmiTranslator) lookup(oid string) (snmp.MibEntry, error) { + return snmp.TrapLookup(oid) +} + +func newGosmiTranslator(paths []string, log telegraf.Logger) (*gosmiTranslator, error) { + err := snmp.LoadMibsFromPath(paths, log, &snmp.GosmiMibLoader{}) + if err == nil { + return &gosmiTranslator{}, nil + } + return nil, err +} diff --git a/plugins/inputs/snmp_trap/netsnmp.go b/plugins/inputs/snmp_trap/netsnmp.go new file mode 100644 index 0000000000000..25a5ba3e0a3c3 --- /dev/null +++ b/plugins/inputs/snmp_trap/netsnmp.go @@ -0,0 +1,89 @@ +package snmp_trap + +import ( + "bufio" + "bytes" + "fmt" + "os/exec" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/snmp" +) + +type execer func(config.Duration, string, ...string) ([]byte, error) + +func realExecCmd(timeout config.Duration, arg0 string, args ...string) ([]byte, error) { + cmd := exec.Command(arg0, args...) + var out bytes.Buffer + cmd.Stdout = &out + err := internal.RunTimeout(cmd, time.Duration(timeout)) + if err != nil { + return nil, err + } + return out.Bytes(), nil +} + +type netsnmpTranslator struct { + // Each translator has its own cache and each plugin instance has + // its own translator. This is different than the snmp plugin + // which has one global cache. + // + // We may want to change snmp_trap to + // have a global cache although it's not as important for + // snmp_trap to be global because there is usually only one + // instance, while it's common to configure many snmp instances. + cacheLock sync.Mutex + cache map[string]snmp.MibEntry + execCmd execer + Timeout config.Duration +} + +func (s *netsnmpTranslator) lookup(oid string) (e snmp.MibEntry, err error) { + s.cacheLock.Lock() + defer s.cacheLock.Unlock() + var ok bool + if e, ok = s.cache[oid]; !ok { + // cache miss. exec snmptranslate + e, err = s.snmptranslate(oid) + if err == nil { + s.cache[oid] = e + } + return e, err + } + return e, nil +} + +func (s *netsnmpTranslator) snmptranslate(oid string) (e snmp.MibEntry, err error) { + var out []byte + out, err = s.execCmd(s.Timeout, "snmptranslate", "-Td", "-Ob", "-m", "all", oid) + + if err != nil { + return e, err + } + + scanner := bufio.NewScanner(bytes.NewBuffer(out)) + ok := scanner.Scan() + if err = scanner.Err(); !ok && err != nil { + return e, err + } + + e.OidText = scanner.Text() + + i := strings.Index(e.OidText, "::") + if i == -1 { + return e, fmt.Errorf("not found") + } + e.MibName = e.OidText[:i] + e.OidText = e.OidText[i+2:] + return e, nil +} + +func newNetsnmpTranslator() *netsnmpTranslator { + return &netsnmpTranslator{ + execCmd: realExecCmd, + } +} diff --git a/plugins/inputs/snmp_trap/sample.conf b/plugins/inputs/snmp_trap/sample.conf new file mode 100644 index 0000000000000..a9aadde84564b --- /dev/null +++ b/plugins/inputs/snmp_trap/sample.conf @@ -0,0 +1,35 @@ +# Receive SNMP traps +[[inputs.snmp_trap]] + ## Transport, local address, and port to listen on. Transport must + ## be "udp://". Omit local address to listen on all interfaces. + ## example: "udp://127.0.0.1:1234" + ## + ## Special permissions may be required to listen on a port less than + ## 1024. See README.md for details + ## + # service_address = "udp://:162" + ## + ## Path to mib files + ## Used by the gosmi translator. + ## To add paths when translating with netsnmp, use the MIBDIRS environment variable + # path = ["/usr/share/snmp/mibs"] + ## + ## Deprecated in 1.20.0; no longer running snmptranslate + ## Timeout running snmptranslate command + # timeout = "5s" + ## Snmp version + # version = "2c" + ## SNMPv3 authentication and encryption options. + ## + ## Security Name. + # sec_name = "myuser" + ## Authentication protocol; one of "MD5", "SHA" or "". + # auth_protocol = "MD5" + ## Authentication password. + # auth_password = "pass" + ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". + # sec_level = "authNoPriv" + ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "". + # priv_protocol = "" + ## Privacy password used for encrypted messages. + # priv_password = "" diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go index d380d582bad66..05cc7b319e9bb 100644 --- a/plugins/inputs/snmp_trap/snmp_trap.go +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -1,37 +1,36 @@ +//go:generate ../../../tools/readme_config_includer/generator package snmp_trap import ( - "bufio" - "bytes" + _ "embed" "fmt" "net" - "os/exec" "strconv" "strings" - "sync" "time" + "github.com/gosnmp/gosnmp" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/plugins/inputs" - - "github.com/soniah/gosnmp" ) -var defaultTimeout = internal.Duration{Duration: time.Second * 5} - -type handler func(*gosnmp.SnmpPacket, *net.UDPAddr) -type execer func(internal.Duration, string, ...string) ([]byte, error) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string -type mibEntry struct { - mibName string - oidText string +type translator interface { + lookup(oid string) (snmp.MibEntry, error) } type SnmpTrap struct { - ServiceAddress string `toml:"service_address"` - Timeout internal.Duration `toml:"timeout"` - Version string `toml:"version"` + ServiceAddress string `toml:"service_address"` + Timeout config.Duration `toml:"timeout" deprecated:"1.20.0;unused option"` + Version string `toml:"version"` + Translator string `toml:"-"` + Path []string `toml:"path"` // Settings for version 3 // Values: "noAuthNoPriv", "authNoPriv", "authPriv" @@ -49,53 +48,17 @@ type SnmpTrap struct { timeFunc func() time.Time errCh chan error - makeHandlerWrapper func(handler) handler + makeHandlerWrapper func(gosnmp.TrapHandlerFunc) gosnmp.TrapHandlerFunc Log telegraf.Logger `toml:"-"` - cacheLock sync.Mutex - cache map[string]mibEntry - - execCmd execer + translator translator //nolint:revive } -var sampleConfig = ` - ## Transport, local address, and port to listen on. Transport must - ## be "udp://". Omit local address to listen on all interfaces. - ## example: "udp://127.0.0.1:1234" - ## - ## Special permissions may be required to listen on a port less than - ## 1024. See README.md for details - ## - # service_address = "udp://:162" - ## Timeout running snmptranslate command - # timeout = "5s" - ## Snmp version, defaults to 2c - # version = "2c" - ## SNMPv3 authentication and encryption options. - ## - ## Security Name. - # sec_name = "myuser" - ## Authentication protocol; one of "MD5", "SHA" or "". - # auth_protocol = "MD5" - ## Authentication password. - # auth_password = "pass" - ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". - # sec_level = "authNoPriv" - ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "". - # priv_protocol = "" - ## Privacy password used for encrypted messages. - # priv_password = "" -` - -func (s *SnmpTrap) SampleConfig() string { +func (*SnmpTrap) SampleConfig() string { return sampleConfig } -func (s *SnmpTrap) Description() string { - return "Receive SNMP traps" -} - func (s *SnmpTrap) Gather(_ telegraf.Accumulator) error { return nil } @@ -105,26 +68,33 @@ func init() { return &SnmpTrap{ timeFunc: time.Now, ServiceAddress: "udp://:162", - Timeout: defaultTimeout, + Path: []string{"/usr/share/snmp/mibs"}, Version: "2c", } }) } -func realExecCmd(Timeout internal.Duration, arg0 string, args ...string) ([]byte, error) { - cmd := exec.Command(arg0, args...) - var out bytes.Buffer - cmd.Stdout = &out - err := internal.RunTimeout(cmd, Timeout.Duration) - if err != nil { - return nil, err - } - return out.Bytes(), nil +func (s *SnmpTrap) SetTranslator(name string) { + s.Translator = name } func (s *SnmpTrap) Init() error { - s.cache = map[string]mibEntry{} - s.execCmd = realExecCmd + var err error + switch s.Translator { + case "gosmi": + s.translator, err = newGosmiTranslator(s.Path, s.Log) + if err != nil { + return err + } + case "netsnmp": + s.translator = newNetsnmpTranslator() + default: + return fmt.Errorf("invalid translator value") + } + + if err != nil { + s.Log.Errorf("Could not get path %v", err) + } return nil } @@ -206,7 +176,6 @@ func (s *SnmpTrap) Start(acc telegraf.Accumulator) error { AuthenticationPassphrase: s.AuthPassword, AuthenticationProtocol: authenticationProtocol, } - } // wrap the handler, used in unit tests @@ -255,13 +224,13 @@ func (s *SnmpTrap) Stop() { } } -func setTrapOid(tags map[string]string, oid string, e mibEntry) { +func setTrapOid(tags map[string]string, oid string, e snmp.MibEntry) { tags["oid"] = oid - tags["name"] = e.oidText - tags["mib"] = e.mibName + tags["name"] = e.OidText + tags["mib"] = e.MibName } -func makeTrapHandler(s *SnmpTrap) handler { +func makeTrapHandler(s *SnmpTrap) gosnmp.TrapHandlerFunc { return func(packet *gosnmp.SnmpPacket, addr *net.UDPAddr) { tm := s.timeFunc() fields := map[string]interface{}{} @@ -282,9 +251,9 @@ func makeTrapHandler(s *SnmpTrap) handler { } if trapOid != "" { - e, err := s.lookup(trapOid) + e, err := s.translator.lookup(trapOid) if err != nil { - s.Log.Errorf("Error resolving V1 OID: %v", err) + s.Log.Errorf("Error resolving V1 OID, oid=%s, source=%s: %v", trapOid, tags["source"], err) return } setTrapOid(tags, trapOid, e) @@ -318,15 +287,15 @@ func makeTrapHandler(s *SnmpTrap) handler { return } - var e mibEntry + var e snmp.MibEntry var err error - e, err = s.lookup(val) + e, err = s.translator.lookup(val) if nil != err { - s.Log.Errorf("Error resolving value OID: %v", err) + s.Log.Errorf("Error resolving value OID, oid=%s, source=%s: %v", val, tags["source"], err) return } - value = e.oidText + value = e.OidText // 1.3.6.1.6.3.1.1.4.1.0 is SNMPv2-MIB::snmpTrapOID.0. // If v.Name is this oid, set a tag of the trap name. @@ -338,13 +307,13 @@ func makeTrapHandler(s *SnmpTrap) handler { value = v.Value } - e, err := s.lookup(v.Name) + e, err := s.translator.lookup(v.Name) if nil != err { - s.Log.Errorf("Error resolving OID: %v", err) + s.Log.Errorf("Error resolving OID oid=%s, source=%s: %v", v.Name, tags["source"], err) return } - name := e.oidText + name := e.OidText fields[name] = value } @@ -366,55 +335,3 @@ func makeTrapHandler(s *SnmpTrap) handler { s.acc.AddFields("snmp_trap", fields, tags, tm) } } - -func (s *SnmpTrap) lookup(oid string) (e mibEntry, err error) { - s.cacheLock.Lock() - defer s.cacheLock.Unlock() - var ok bool - if e, ok = s.cache[oid]; !ok { - // cache miss. exec snmptranslate - e, err = s.snmptranslate(oid) - if err == nil { - s.cache[oid] = e - } - return e, err - } - return e, nil -} - -func (s *SnmpTrap) clear() { - s.cacheLock.Lock() - defer s.cacheLock.Unlock() - s.cache = map[string]mibEntry{} -} - -func (s *SnmpTrap) load(oid string, e mibEntry) { - s.cacheLock.Lock() - defer s.cacheLock.Unlock() - s.cache[oid] = e -} - -func (s *SnmpTrap) snmptranslate(oid string) (e mibEntry, err error) { - var out []byte - out, err = s.execCmd(s.Timeout, "snmptranslate", "-Td", "-Ob", "-m", "all", oid) - - if err != nil { - return e, err - } - - scanner := bufio.NewScanner(bytes.NewBuffer(out)) - ok := scanner.Scan() - if err = scanner.Err(); !ok && err != nil { - return e, err - } - - e.oidText = scanner.Text() - - i := strings.Index(e.oidText, "::") - if i == -1 { - return e, fmt.Errorf("not found") - } - e.mibName = e.oidText[:i] - e.oidText = e.oidText[i+2:] - return e, nil -} diff --git a/plugins/inputs/snmp_trap/snmp_trap_test.go b/plugins/inputs/snmp_trap/snmp_trap_test.go index b5f8da27aa7b3..a3a16f0de97bb 100644 --- a/plugins/inputs/snmp_trap/snmp_trap_test.go +++ b/plugins/inputs/snmp_trap/snmp_trap_test.go @@ -8,152 +8,149 @@ import ( "testing" "time" - "github.com/soniah/gosnmp" + "github.com/gosnmp/gosnmp" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/snmp" "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/require" ) -func TestLoad(t *testing.T) { - s := &SnmpTrap{} - require.Nil(t, s.Init()) +type entry struct { + oid string + e snmp.MibEntry +} - defer s.clear() - s.load( - ".1.3.6.1.6.3.1.1.5.1", - mibEntry{ - "SNMPv2-MIB", - "coldStart", - }, - ) +type testTranslator struct { + entries []entry +} - e, err := s.lookup(".1.3.6.1.6.3.1.1.5.1") - require.NoError(t, err) - require.Equal(t, "SNMPv2-MIB", e.mibName) - require.Equal(t, "coldStart", e.oidText) +func (t *testTranslator) lookup(input string) (snmp.MibEntry, error) { + for _, entry := range t.entries { + if input == entry.oid { + return snmp.MibEntry{MibName: entry.e.MibName, OidText: entry.e.OidText}, nil + } + } + return snmp.MibEntry{}, fmt.Errorf("unexpected oid") } -func fakeExecCmd(_ internal.Duration, x string, y ...string) ([]byte, error) { - return nil, fmt.Errorf("mock " + x + " " + strings.Join(y, " ")) +func newTestTranslator(entries []entry) *testTranslator { + return &testTranslator{entries: entries} } -func sendTrap(t *testing.T, port uint16, now uint32, trap gosnmp.SnmpTrap, version gosnmp.SnmpVersion, secLevel string, username string, authProto string, authPass string, privProto string, privPass string, contextName string, engineID string) { - var s gosnmp.GoSNMP +func newMsgFlagsV3(secLevel string) gosnmp.SnmpV3MsgFlags { + var msgFlags gosnmp.SnmpV3MsgFlags + switch strings.ToLower(secLevel) { + case "noauthnopriv", "": + msgFlags = gosnmp.NoAuthNoPriv + case "authnopriv": + msgFlags = gosnmp.AuthNoPriv + case "authpriv": + msgFlags = gosnmp.AuthPriv + default: + msgFlags = gosnmp.NoAuthNoPriv + } - if version == gosnmp.Version3 { - var msgFlags gosnmp.SnmpV3MsgFlags - switch strings.ToLower(secLevel) { - case "noauthnopriv", "": - msgFlags = gosnmp.NoAuthNoPriv - case "authnopriv": - msgFlags = gosnmp.AuthNoPriv - case "authpriv": - msgFlags = gosnmp.AuthPriv - default: - msgFlags = gosnmp.NoAuthNoPriv - } + return msgFlags +} - var authenticationProtocol gosnmp.SnmpV3AuthProtocol - switch strings.ToLower(authProto) { - case "md5": - authenticationProtocol = gosnmp.MD5 - case "sha": - authenticationProtocol = gosnmp.SHA - //case "sha224": - // authenticationProtocol = gosnmp.SHA224 - //case "sha256": - // authenticationProtocol = gosnmp.SHA256 - //case "sha384": - // authenticationProtocol = gosnmp.SHA384 - //case "sha512": - // authenticationProtocol = gosnmp.SHA512 - case "": - authenticationProtocol = gosnmp.NoAuth - default: - authenticationProtocol = gosnmp.NoAuth - } +func newUsmSecurityParametersForV3(authProto string, privProto string, username string, privPass string, authPass string) *gosnmp.UsmSecurityParameters { + var authenticationProtocol gosnmp.SnmpV3AuthProtocol + switch strings.ToLower(authProto) { + case "md5": + authenticationProtocol = gosnmp.MD5 + case "sha": + authenticationProtocol = gosnmp.SHA + //case "sha224": + // authenticationProtocol = gosnmp.SHA224 + //case "sha256": + // authenticationProtocol = gosnmp.SHA256 + //case "sha384": + // authenticationProtocol = gosnmp.SHA384 + //case "sha512": + // authenticationProtocol = gosnmp.SHA512 + case "": + authenticationProtocol = gosnmp.NoAuth + default: + authenticationProtocol = gosnmp.NoAuth + } - var privacyProtocol gosnmp.SnmpV3PrivProtocol - switch strings.ToLower(privProto) { - case "aes": - privacyProtocol = gosnmp.AES - case "des": - privacyProtocol = gosnmp.DES - case "aes192": - privacyProtocol = gosnmp.AES192 - case "aes192c": - privacyProtocol = gosnmp.AES192C - case "aes256": - privacyProtocol = gosnmp.AES256 - case "aes256c": - privacyProtocol = gosnmp.AES256C - case "": - privacyProtocol = gosnmp.NoPriv - default: - privacyProtocol = gosnmp.NoPriv - } + var privacyProtocol gosnmp.SnmpV3PrivProtocol + switch strings.ToLower(privProto) { + case "aes": + privacyProtocol = gosnmp.AES + case "des": + privacyProtocol = gosnmp.DES + case "aes192": + privacyProtocol = gosnmp.AES192 + case "aes192c": + privacyProtocol = gosnmp.AES192C + case "aes256": + privacyProtocol = gosnmp.AES256 + case "aes256c": + privacyProtocol = gosnmp.AES256C + case "": + privacyProtocol = gosnmp.NoPriv + default: + privacyProtocol = gosnmp.NoPriv + } - sp := &gosnmp.UsmSecurityParameters{ - AuthoritativeEngineID: "1", - AuthoritativeEngineBoots: 1, - AuthoritativeEngineTime: 1, - UserName: username, - PrivacyProtocol: privacyProtocol, - PrivacyPassphrase: privPass, - AuthenticationPassphrase: authPass, - AuthenticationProtocol: authenticationProtocol, - } - s = gosnmp.GoSNMP{ - Port: port, - Version: version, - Timeout: time.Duration(2) * time.Second, - Retries: 1, - MaxOids: gosnmp.MaxOids, - Target: "127.0.0.1", - SecurityParameters: sp, - SecurityModel: gosnmp.UserSecurityModel, - MsgFlags: msgFlags, - ContextName: contextName, - ContextEngineID: engineID, - } - } else { - s = gosnmp.GoSNMP{ - Port: port, - Version: version, - Timeout: time.Duration(2) * time.Second, - Retries: 1, - MaxOids: gosnmp.MaxOids, - Target: "127.0.0.1", - Community: "public", - } + return &gosnmp.UsmSecurityParameters{ + AuthoritativeEngineID: "1", + AuthoritativeEngineBoots: 1, + AuthoritativeEngineTime: 1, + UserName: username, + PrivacyProtocol: privacyProtocol, + PrivacyPassphrase: privPass, + AuthenticationPassphrase: authPass, + AuthenticationProtocol: authenticationProtocol, + } +} + +func newGoSNMPV3(port uint16, contextName string, engineID string, msgFlags gosnmp.SnmpV3MsgFlags, sp *gosnmp.UsmSecurityParameters) gosnmp.GoSNMP { + return gosnmp.GoSNMP{ + Port: port, + Version: gosnmp.Version3, + Timeout: time.Duration(2) * time.Second, + Retries: 1, + MaxOids: gosnmp.MaxOids, + Target: "127.0.0.1", + SecurityParameters: sp, + SecurityModel: gosnmp.UserSecurityModel, + MsgFlags: msgFlags, + ContextName: contextName, + ContextEngineID: engineID, + } +} + +func newGoSNMP(version gosnmp.SnmpVersion, port uint16) gosnmp.GoSNMP { + return gosnmp.GoSNMP{ + Port: port, + Version: version, + Timeout: time.Duration(2) * time.Second, + Retries: 1, + MaxOids: gosnmp.MaxOids, + Target: "127.0.0.1", + Community: "public", } +} - err := s.Connect() +func sendTrap(t *testing.T, goSNMP gosnmp.GoSNMP, trap gosnmp.SnmpTrap) { + err := goSNMP.Connect() if err != nil { t.Errorf("Connect() err: %v", err) } - defer s.Conn.Close() + defer goSNMP.Conn.Close() - _, err = s.SendTrap(trap) + _, err = goSNMP.SendTrap(trap) if err != nil { t.Errorf("SendTrap() err: %v", err) } } func TestReceiveTrap(t *testing.T) { - var now uint32 - now = 123123123 - - var fakeTime time.Time - fakeTime = time.Unix(456456456, 456) - - type entry struct { - oid string - e mibEntry - } + now := uint32(123123123) + fakeTime := time.Unix(456456456, 456) // If the first pdu isn't type TimeTicks, gosnmp.SendTrap() will // prepend one with time.Now() @@ -200,23 +197,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -283,16 +280,16 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { ".1.2.3.4.5", - mibEntry{ - "valueMIB", - "valueOID", + snmp.MibEntry{ + MibName: "valueMIB", + OidText: "valueOID", }, }, { ".1.2.3.0.55", - mibEntry{ - "enterpriseMIB", - "enterpriseOID", + snmp.MibEntry{ + MibName: "enterpriseMIB", + OidText: "enterpriseOID", }, }, }, @@ -337,16 +334,16 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { ".1.2.3.4.5", - mibEntry{ - "valueMIB", - "valueOID", + snmp.MibEntry{ + MibName: "valueMIB", + OidText: "valueOID", }, }, { ".1.3.6.1.6.3.1.1.5.1", - mibEntry{ - "coldStartMIB", - "coldStartOID", + snmp.MibEntry{ + MibName: "coldStartMIB", + OidText: "coldStartOID", }, }, }, @@ -395,23 +392,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -459,23 +456,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -522,23 +519,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -584,23 +581,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -646,23 +643,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -708,23 +705,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -770,23 +767,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -832,23 +829,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -896,23 +893,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -960,23 +957,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -1024,23 +1021,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -1088,23 +1085,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -1152,23 +1149,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -1216,23 +1213,23 @@ func TestReceiveTrap(t *testing.T) { entries: []entry{ { oid: ".1.3.6.1.6.3.1.1.4.1.0", - e: mibEntry{ - "SNMPv2-MIB", - "snmpTrapOID.0", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "snmpTrapOID.0", }, }, { oid: ".1.3.6.1.6.3.1.1.5.1", - e: mibEntry{ - "SNMPv2-MIB", - "coldStart", + e: snmp.MibEntry{ + MibName: "SNMPv2-MIB", + OidText: "coldStart", }, }, { oid: ".1.3.6.1.2.1.1.3.0", - e: mibEntry{ - "UNUSED_MIB_NAME", - "sysUpTimeInstance", + e: snmp.MibEntry{ + MibName: "UNUSED_MIB_NAME", + OidText: "sysUpTimeInstance", }, }, }, @@ -1266,7 +1263,7 @@ func TestReceiveTrap(t *testing.T) { // Hook into the trap handler so the test knows when the // trap has been received received := make(chan int) - wrap := func(f handler) handler { + wrap := func(f gosnmp.TrapHandlerFunc) gosnmp.TrapHandlerFunc { return func(p *gosnmp.SnmpPacket, a *net.UDPAddr) { f(p, a) received <- 0 @@ -1280,6 +1277,7 @@ func TestReceiveTrap(t *testing.T) { timeFunc: func() time.Time { return fakeTime }, + //if cold start be answer otherwise err Log: testutil.Logger{}, Version: tt.version.String(), SecName: tt.secName, @@ -1288,22 +1286,29 @@ func TestReceiveTrap(t *testing.T) { AuthPassword: tt.authPass, PrivProtocol: tt.privProto, PrivPassword: tt.privPass, + Translator: "netsnmp", } - require.Nil(t, s.Init()) - // Don't look up oid with snmptranslate. - s.execCmd = fakeExecCmd + + require.NoError(t, s.Init()) + + //inject test translator + s.translator = newTestTranslator(tt.entries) + var acc testutil.Accumulator require.Nil(t, s.Start(&acc)) defer s.Stop() - // Preload the cache with the oids we'll use in this test - // so snmptranslate and mibs don't need to be installed. - for _, entry := range tt.entries { - s.load(entry.oid, entry.e) + var goSNMP gosnmp.GoSNMP + if tt.version == gosnmp.Version3 { + msgFlags := newMsgFlagsV3(tt.secLevel) + sp := newUsmSecurityParametersForV3(tt.authProto, tt.privProto, tt.secName, tt.privPass, tt.authPass) + goSNMP = newGoSNMPV3(port, tt.contextName, tt.engineID, msgFlags, sp) + } else { + goSNMP = newGoSNMP(tt.version, port) } // Send the trap - sendTrap(t, port, now, tt.trap, tt.version, tt.secLevel, tt.secName, tt.authProto, tt.authPass, tt.privProto, tt.privPass, tt.contextName, tt.engineID) + sendTrap(t, goSNMP, tt.trap) // Wait for trap to be received select { @@ -1318,5 +1323,4 @@ func TestReceiveTrap(t *testing.T) { testutil.SortMetrics()) }) } - } diff --git a/plugins/inputs/socket_listener/README.md b/plugins/inputs/socket_listener/README.md index f5189a195af9d..92c7351dfd329 100644 --- a/plugins/inputs/socket_listener/README.md +++ b/plugins/inputs/socket_listener/README.md @@ -3,14 +3,12 @@ The Socket Listener is a service input plugin that listens for messages from streaming (tcp, unix) or datagram (udp, unixgram) protocols. -The plugin expects messages in the -[Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). +The plugin expects messages in the [Telegraf Input Data +Formats](../../../docs/DATA_FORMATS_INPUT.md). -### Configuration: +## Configuration -This is a sample configuration for the plugin. - -```toml +```toml @sample.conf # Generic socket listener capable of handling multiple socket types. [[inputs.socket_listener]] ## URL to listen on @@ -74,25 +72,25 @@ This is a sample configuration for the plugin. ## A Note on UDP OS Buffer Sizes -The `read_buffer_size` config option can be used to adjust the size of the socket -buffer, but this number is limited by OS settings. On Linux, `read_buffer_size` -will default to `rmem_default` and will be capped by `rmem_max`. On BSD systems, -`read_buffer_size` is capped by `maxsockbuf`, and there is no OS default -setting. +The `read_buffer_size` config option can be used to adjust the size of the +socket buffer, but this number is limited by OS settings. On Linux, +`read_buffer_size` will default to `rmem_default` and will be capped by +`rmem_max`. On BSD systems, `read_buffer_size` is capped by `maxsockbuf`, and +there is no OS default setting. Instructions on how to adjust these OS settings are available below. Some OSes (most notably, Linux) place very restrictive limits on the performance -of UDP protocols. It is _highly_ recommended that you increase these OS limits to -at least 8MB before trying to run large amounts of UDP traffic to your instance. -8MB is just a recommendation, and can be adjusted higher. +of UDP protocols. It is _highly_ recommended that you increase these OS limits +to at least 8MB before trying to run large amounts of UDP traffic to your +instance. 8MB is just a recommendation, and can be adjusted higher. ### Linux Check the current UDP/IP receive buffer limit & default by typing the following commands: -``` +```sh sysctl net.core.rmem_max sysctl net.core.rmem_default ``` @@ -100,7 +98,7 @@ sysctl net.core.rmem_default If the values are less than 8388608 bytes you should add the following lines to the /etc/sysctl.conf file: -``` +```text net.core.rmem_max=8388608 net.core.rmem_default=8388608 ``` @@ -108,7 +106,7 @@ net.core.rmem_default=8388608 Changes to /etc/sysctl.conf do not take effect until reboot. To update the values immediately, type the following commands as root: -``` +```sh sysctl -w net.core.rmem_max=8388608 sysctl -w net.core.rmem_default=8388608 ``` @@ -117,26 +115,27 @@ sysctl -w net.core.rmem_default=8388608 On BSD/Darwin systems you need to add about a 15% padding to the kernel limit socket buffer. Meaning if you want an 8MB buffer (8388608 bytes) you need to set -the kernel limit to `8388608*1.15 = 9646900`. This is not documented anywhere but -happens -[in the kernel here.](https://github.com/freebsd/freebsd/blob/master/sys/kern/uipc_sockbuf.c#L63-L64) +the kernel limit to `8388608*1.15 = 9646900`. This is not documented anywhere +but can be seen [in the kernel source code][1]. Check the current UDP/IP buffer limit by typing the following command: -``` +```sh sysctl kern.ipc.maxsockbuf ``` If the value is less than 9646900 bytes you should add the following lines to the /etc/sysctl.conf file (create it if necessary): -``` +```text kern.ipc.maxsockbuf=9646900 ``` Changes to /etc/sysctl.conf do not take effect until reboot. To update the values immediately, type the following command as root: -``` +```sh sysctl -w kern.ipc.maxsockbuf=9646900 ``` + +[1]: https://github.com/freebsd/freebsd/blob/master/sys/kern/uipc_sockbuf.c#L63-L64 diff --git a/plugins/inputs/socket_listener/sample.conf b/plugins/inputs/socket_listener/sample.conf new file mode 100644 index 0000000000000..ce297a5b5d1ef --- /dev/null +++ b/plugins/inputs/socket_listener/sample.conf @@ -0,0 +1,59 @@ +# Generic socket listener capable of handling multiple socket types. +[[inputs.socket_listener]] + ## URL to listen on + # service_address = "tcp://:8094" + # service_address = "tcp://127.0.0.1:http" + # service_address = "tcp4://:8094" + # service_address = "tcp6://:8094" + # service_address = "tcp6://[2001:db8::1]:8094" + # service_address = "udp://:8094" + # service_address = "udp4://:8094" + # service_address = "udp6://:8094" + # service_address = "unix:///tmp/telegraf.sock" + # service_address = "unixgram:///tmp/telegraf.sock" + + ## Change the file mode bits on unix sockets. These permissions may not be + ## respected by some platforms, to safely restrict write permissions it is best + ## to place the socket into a directory that has previously been created + ## with the desired permissions. + ## ex: socket_mode = "777" + # socket_mode = "" + + ## Maximum number of concurrent connections. + ## Only applies to stream sockets (e.g. TCP). + ## 0 (default) is unlimited. + # max_connections = 1024 + + ## Read timeout. + ## Only applies to stream sockets (e.g. TCP). + ## 0 (default) is unlimited. + # read_timeout = "30s" + + ## Optional TLS configuration. + ## Only applies to stream sockets (e.g. TCP). + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Enables client authentication if set. + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Maximum socket buffer size (in bytes when no unit specified). + ## For stream sockets, once the buffer fills up, the sender will start backing up. + ## For datagram sockets, once the buffer fills up, metrics will start dropping. + ## Defaults to the OS default. + # read_buffer_size = "64KiB" + + ## Period between keep alive probes. + ## Only applies to TCP sockets. + ## 0 disables keep alive probes. + ## Defaults to the OS configuration. + # keep_alive_period = "5m" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + # data_format = "influx" + + ## Content encoding for message payloads, can be set to "gzip" to or + ## "identity" to apply no encoding. + # content_encoding = "identity" diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go index e412996f38e6e..603db83f01e34 100644 --- a/plugins/inputs/socket_listener/socket_listener.go +++ b/plugins/inputs/socket_listener/socket_listener.go @@ -1,8 +1,10 @@ +//go:generate ../../../tools/readme_config_includer/generator package socket_listener import ( "bufio" "crypto/tls" + _ "embed" "fmt" "io" "net" @@ -13,12 +15,17 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type setReadBufferer interface { SetReadBuffer(bytes int) error } @@ -47,9 +54,12 @@ func (ssl *streamSocketListener) listen() { break } - if ssl.ReadBufferSize.Size > 0 { + if ssl.ReadBufferSize > 0 { if srb, ok := c.(setReadBufferer); ok { - srb.SetReadBuffer(int(ssl.ReadBufferSize.Size)) + if err := srb.SetReadBuffer(int(ssl.ReadBufferSize)); err != nil { + ssl.Log.Error(err.Error()) + break + } } else { ssl.Log.Warnf("Unable to set read buffer on a %s socket", ssl.sockType) } @@ -58,6 +68,8 @@ func (ssl *streamSocketListener) listen() { ssl.connectionsMtx.Lock() if ssl.MaxConnections > 0 && len(ssl.connections) >= ssl.MaxConnections { ssl.connectionsMtx.Unlock() + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive c.Close() continue } @@ -77,6 +89,8 @@ func (ssl *streamSocketListener) listen() { ssl.connectionsMtx.Lock() for _, c := range ssl.connections { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive c.Close() } ssl.connectionsMtx.Unlock() @@ -92,13 +106,13 @@ func (ssl *streamSocketListener) setKeepAlive(c net.Conn) error { if !ok { return fmt.Errorf("cannot set keep alive on a %s socket", strings.SplitN(ssl.ServiceAddress, "://", 2)[0]) } - if ssl.KeepAlivePeriod.Duration == 0 { + if *ssl.KeepAlivePeriod == 0 { return tcpc.SetKeepAlive(false) } if err := tcpc.SetKeepAlive(true); err != nil { return err } - return tcpc.SetKeepAlivePeriod(ssl.KeepAlivePeriod.Duration) + return tcpc.SetKeepAlivePeriod(time.Duration(*ssl.KeepAlivePeriod)) } func (ssl *streamSocketListener) removeConnection(c net.Conn) { @@ -114,12 +128,16 @@ func (ssl *streamSocketListener) read(c net.Conn) { decoder, err := internal.NewStreamContentDecoder(ssl.ContentEncoding, c) if err != nil { ssl.Log.Error("Read error: %v", err) + return } scnr := bufio.NewScanner(decoder) for { - if ssl.ReadTimeout != nil && ssl.ReadTimeout.Duration > 0 { - c.SetReadDeadline(time.Now().Add(ssl.ReadTimeout.Duration)) + if ssl.ReadTimeout != nil && *ssl.ReadTimeout > 0 { + if err := c.SetReadDeadline(time.Now().Add(time.Duration(*ssl.ReadTimeout))); err != nil { + ssl.Log.Error("setting read deadline failed: %v", err) + return + } } if !scnr.Scan() { break @@ -182,13 +200,13 @@ func (psl *packetSocketListener) listen() { } type SocketListener struct { - ServiceAddress string `toml:"service_address"` - MaxConnections int `toml:"max_connections"` - ReadBufferSize internal.Size `toml:"read_buffer_size"` - ReadTimeout *internal.Duration `toml:"read_timeout"` - KeepAlivePeriod *internal.Duration `toml:"keep_alive_period"` - SocketMode string `toml:"socket_mode"` - ContentEncoding string `toml:"content_encoding"` + ServiceAddress string `toml:"service_address"` + MaxConnections int `toml:"max_connections"` + ReadBufferSize config.Size `toml:"read_buffer_size"` + ReadTimeout *config.Duration `toml:"read_timeout"` + KeepAlivePeriod *config.Duration `toml:"keep_alive_period"` + SocketMode string `toml:"socket_mode"` + ContentEncoding string `toml:"content_encoding"` tlsint.ServerConfig wg sync.WaitGroup @@ -200,70 +218,8 @@ type SocketListener struct { io.Closer } -func (sl *SocketListener) Description() string { - return "Generic socket listener capable of handling multiple socket types." -} - -func (sl *SocketListener) SampleConfig() string { - return ` - ## URL to listen on - # service_address = "tcp://:8094" - # service_address = "tcp://127.0.0.1:http" - # service_address = "tcp4://:8094" - # service_address = "tcp6://:8094" - # service_address = "tcp6://[2001:db8::1]:8094" - # service_address = "udp://:8094" - # service_address = "udp4://:8094" - # service_address = "udp6://:8094" - # service_address = "unix:///tmp/telegraf.sock" - # service_address = "unixgram:///tmp/telegraf.sock" - - ## Change the file mode bits on unix sockets. These permissions may not be - ## respected by some platforms, to safely restrict write permissions it is best - ## to place the socket into a directory that has previously been created - ## with the desired permissions. - ## ex: socket_mode = "777" - # socket_mode = "" - - ## Maximum number of concurrent connections. - ## Only applies to stream sockets (e.g. TCP). - ## 0 (default) is unlimited. - # max_connections = 1024 - - ## Read timeout. - ## Only applies to stream sockets (e.g. TCP). - ## 0 (default) is unlimited. - # read_timeout = "30s" - - ## Optional TLS configuration. - ## Only applies to stream sockets (e.g. TCP). - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Enables client authentication if set. - # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - - ## Maximum socket buffer size (in bytes when no unit specified). - ## For stream sockets, once the buffer fills up, the sender will start backing up. - ## For datagram sockets, once the buffer fills up, metrics will start dropping. - ## Defaults to the OS default. - # read_buffer_size = "64KiB" - - ## Period between keep alive probes. - ## Only applies to TCP sockets. - ## 0 disables keep alive probes. - ## Defaults to the OS configuration. - # keep_alive_period = "5m" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - # data_format = "influx" - - ## Content encoding for message payloads, can be set to "gzip" to or - ## "identity" to apply no encoding. - # content_encoding = "identity" -` +func (*SocketListener) SampleConfig() string { + return sampleConfig } func (sl *SocketListener) Gather(_ telegraf.Accumulator) error { @@ -288,6 +244,7 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { // no good way of testing for "file does not exist". // Instead just ignore error and blow up when we try to listen, which will // indicate "address already in use" if file existed and we couldn't remove. + //nolint:errcheck,revive os.Remove(addr) } @@ -318,7 +275,9 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { return err } - os.Chmod(spl[1], os.FileMode(uint32(i))) + if err := os.Chmod(spl[1], os.FileMode(uint32(i))); err != nil { + return err + } } ssl := &streamSocketListener{ @@ -353,12 +312,16 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { return err } - os.Chmod(spl[1], os.FileMode(uint32(i))) + if err := os.Chmod(spl[1], os.FileMode(uint32(i))); err != nil { + return err + } } - if sl.ReadBufferSize.Size > 0 { + if sl.ReadBufferSize > 0 { if srb, ok := pc.(setReadBufferer); ok { - srb.SetReadBuffer(int(sl.ReadBufferSize.Size)) + if err := srb.SetReadBuffer(int(sl.ReadBufferSize)); err != nil { + sl.Log.Warnf("Setting read buffer on a %s socket failed: %v", protocol, err) + } } else { sl.Log.Warnf("Unable to set read buffer on a %s socket", protocol) } @@ -417,6 +380,8 @@ func udpListen(network string, address string) (net.PacketConn, error) { func (sl *SocketListener) Stop() { if sl.Closer != nil { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive sl.Close() sl.Closer = nil } @@ -438,7 +403,9 @@ type unixCloser struct { func (uc unixCloser) Close() error { err := uc.closer.Close() - os.Remove(uc.path) // ignore error + // Ignore the error if e.g. the file does not exist + //nolint:errcheck,revive + os.Remove(uc.path) return err } diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go index a46add15cf61b..91062c6265a29 100644 --- a/plugins/inputs/socket_listener/socket_listener_test.go +++ b/plugins/inputs/socket_listener/socket_listener_test.go @@ -4,26 +4,26 @@ import ( "bytes" "crypto/tls" "io" - "io/ioutil" "log" "net" "os" - "path/filepath" + "runtime" "testing" "time" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/wlog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) var pki = testutil.NewPKI("../../../testutil/pki") -// testEmptyLog is a helper function to ensure no data is written to log. +// prepareLog is a helper function to ensure no data is written to log. // Should be called at the start of the test, and returns a function which should run at the end. -func testEmptyLog(t *testing.T) func() { +func prepareLog(t *testing.T) func() { buf := bytes.NewBuffer(nil) log.SetOutput(wlog.NewWriter(buf)) @@ -36,16 +36,17 @@ func testEmptyLog(t *testing.T) func() { for { line, err := buf.ReadBytes('\n') if err != nil { - assert.Equal(t, io.EOF, err) + require.Equal(t, io.EOF, err) break } - assert.Empty(t, string(line), "log not empty") + require.Empty(t, string(line), "log not empty") } } } func TestSocketListener_tcp_tls(t *testing.T) { - defer testEmptyLog(t)() + testEmptyLog := prepareLog(t) + defer testEmptyLog() sl := newSocketListener() sl.Log = testutil.Logger{} @@ -67,10 +68,7 @@ func TestSocketListener_tcp_tls(t *testing.T) { } func TestSocketListener_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "sl.TestSocketListener_unix_tls.sock") + sock := testutil.TempSocket(t) sl := newSocketListener() sl.Log = testutil.Logger{} @@ -78,13 +76,13 @@ func TestSocketListener_unix_tls(t *testing.T) { sl.ServerConfig = *pki.TLSServerConfig() acc := &testutil.Accumulator{} - err = sl.Start(acc) + err := sl.Start(acc) require.NoError(t, err) defer sl.Stop() tlsCfg, err := pki.TLSClientConfig().TLSConfig() - tlsCfg.InsecureSkipVerify = true require.NoError(t, err) + tlsCfg.InsecureSkipVerify = true secureClient, err := tls.Dial("unix", sock, tlsCfg) require.NoError(t, err) @@ -93,12 +91,13 @@ func TestSocketListener_unix_tls(t *testing.T) { } func TestSocketListener_tcp(t *testing.T) { - defer testEmptyLog(t)() + testEmptyLog := prepareLog(t) + defer testEmptyLog() sl := newSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "tcp://127.0.0.1:0" - sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ReadBufferSize = config.Size(1024) acc := &testutil.Accumulator{} err := sl.Start(acc) @@ -112,12 +111,13 @@ func TestSocketListener_tcp(t *testing.T) { } func TestSocketListener_udp(t *testing.T) { - defer testEmptyLog(t)() + testEmptyLog := prepareLog(t) + defer testEmptyLog() sl := newSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "udp://127.0.0.1:0" - sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ReadBufferSize = config.Size(1024) acc := &testutil.Accumulator{} err := sl.Start(acc) @@ -131,21 +131,20 @@ func TestSocketListener_udp(t *testing.T) { } func TestSocketListener_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "sl.TestSocketListener_unix.sock") + sock := testutil.TempSocket(t) - defer testEmptyLog(t)() + testEmptyLog := prepareLog(t) + defer testEmptyLog() - os.Create(sock) + f, _ := os.Create(sock) + require.NoError(t, f.Close()) sl := newSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "unix://" + sock - sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ReadBufferSize = config.Size(1024) acc := &testutil.Accumulator{} - err = sl.Start(acc) + err := sl.Start(acc) require.NoError(t, err) defer sl.Stop() @@ -156,18 +155,23 @@ func TestSocketListener_unix(t *testing.T) { } func TestSocketListener_unixgram(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "sl.TestSocketListener_unixgram.sock") + if runtime.GOOS == "windows" { + t.Skip("Skipping on Windows, as unixgram sockets are not supported") + } - defer testEmptyLog(t)() + sock := testutil.TempSocket(t) + + testEmptyLog := prepareLog(t) + defer testEmptyLog() + + f, err := os.Create(sock) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, f.Close()) }) - os.Create(sock) sl := newSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "unixgram://" + sock - sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ReadBufferSize = config.Size(1024) acc := &testutil.Accumulator{} err = sl.Start(acc) @@ -181,12 +185,13 @@ func TestSocketListener_unixgram(t *testing.T) { } func TestSocketListenerDecode_tcp(t *testing.T) { - defer testEmptyLog(t)() + testEmptyLog := prepareLog(t) + defer testEmptyLog() sl := newSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "tcp://127.0.0.1:0" - sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ReadBufferSize = config.Size(1024) sl.ContentEncoding = "gzip" acc := &testutil.Accumulator{} @@ -201,12 +206,13 @@ func TestSocketListenerDecode_tcp(t *testing.T) { } func TestSocketListenerDecode_udp(t *testing.T) { - defer testEmptyLog(t)() + testEmptyLog := prepareLog(t) + defer testEmptyLog() sl := newSocketListener() sl.Log = testutil.Logger{} sl.ServiceAddress = "udp://127.0.0.1:0" - sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ReadBufferSize = config.Size(1024) sl.ContentEncoding = "gzip" acc := &testutil.Accumulator{} @@ -236,9 +242,10 @@ func testSocketListener(t *testing.T, sl *SocketListener, client net.Conn) { require.NoError(t, err) } - client.Write(mstr12) - client.Write(mstr3) - + _, err := client.Write(mstr12) + require.NoError(t, err) + _, err = client.Write(mstr3) + require.NoError(t, err) acc := sl.Accumulator.(*testutil.Accumulator) acc.Wait(3) @@ -248,18 +255,18 @@ func testSocketListener(t *testing.T, sl *SocketListener, client net.Conn) { m3 := acc.Metrics[2] acc.Unlock() - assert.Equal(t, "test", m1.Measurement) - assert.Equal(t, map[string]string{"foo": "bar"}, m1.Tags) - assert.Equal(t, map[string]interface{}{"v": int64(1)}, m1.Fields) - assert.True(t, time.Unix(0, 123456789).Equal(m1.Time)) + require.Equal(t, "test", m1.Measurement) + require.Equal(t, map[string]string{"foo": "bar"}, m1.Tags) + require.Equal(t, map[string]interface{}{"v": int64(1)}, m1.Fields) + require.True(t, time.Unix(0, 123456789).Equal(m1.Time)) - assert.Equal(t, "test", m2.Measurement) - assert.Equal(t, map[string]string{"foo": "baz"}, m2.Tags) - assert.Equal(t, map[string]interface{}{"v": int64(2)}, m2.Fields) - assert.True(t, time.Unix(0, 123456790).Equal(m2.Time)) + require.Equal(t, "test", m2.Measurement) + require.Equal(t, map[string]string{"foo": "baz"}, m2.Tags) + require.Equal(t, map[string]interface{}{"v": int64(2)}, m2.Fields) + require.True(t, time.Unix(0, 123456790).Equal(m2.Time)) - assert.Equal(t, "test", m3.Measurement) - assert.Equal(t, map[string]string{"foo": "zab"}, m3.Tags) - assert.Equal(t, map[string]interface{}{"v": int64(3)}, m3.Fields) - assert.True(t, time.Unix(0, 123456791).Equal(m3.Time)) + require.Equal(t, "test", m3.Measurement) + require.Equal(t, map[string]string{"foo": "zab"}, m3.Tags) + require.Equal(t, map[string]interface{}{"v": int64(3)}, m3.Fields) + require.True(t, time.Unix(0, 123456791).Equal(m3.Time)) } diff --git a/plugins/inputs/socketstat/README.md b/plugins/inputs/socketstat/README.md new file mode 100644 index 0000000000000..369e50fdab918 --- /dev/null +++ b/plugins/inputs/socketstat/README.md @@ -0,0 +1,59 @@ +# SocketStat Input Plugin + +The socketstat plugin gathers indicators from established connections, using +iproute2's `ss` command. + +The `ss` command does not require specific privileges. + +**WARNING: The output format will produce series with very high cardinality.** +You should either store those by an engine which doesn't suffer from it, use a +short retention policy or do appropriate filtering. + +## Configuration + +```toml @sample.conf +# Gather indicators from established connections, using iproute2's ss command. +[[inputs.socketstat]] + ## ss can display information about tcp, udp, raw, unix, packet, dccp and sctp sockets + ## Specify here the types you want to gather + socket_types = [ "tcp", "udp" ] + ## The default timeout of 1s for ss execution can be overridden here: + # timeout = "1s" +``` + +## Measurements & Fields + +- socketstat + - state (string) (for tcp, dccp and sctp protocols) + - If ss provides it (it depends on the protocol and ss version): + - bytes_acked (integer, bytes) + - bytes_received (integer, bytes) + - segs_out (integer, count) + - segs_in (integer, count) + - data_segs_out (integer, count) + - data_segs_in (integer, count) + +## Tags + +- All measurements have the following tags: + - proto + - local_addr + - local_port + - remote_addr + - remote_port + +## Example Output + +### recent ss version (iproute2 4.3.0 here) + +```sh +./telegraf --config telegraf.conf --input-filter socketstat --test +> socketstat,host=ubuntu-xenial,local_addr=10.6.231.226,local_port=42716,proto=tcp,remote_addr=192.168.2.21,remote_port=80 bytes_acked=184i,bytes_received=2624519595i,recv_q=4344i,segs_in=1812580i,segs_out=661642i,send_q=0i,state="ESTAB" 1606457205000000000 +``` + +### older ss version (iproute2 3.12.0 here) + +```sh +./telegraf --config telegraf.conf --input-filter socketstat --test +> socketstat,host=ubuntu-trusty,local_addr=10.6.231.163,local_port=35890,proto=tcp,remote_addr=192.168.2.21,remote_port=80 recv_q=0i,send_q=0i,state="ESTAB" 1606456977000000000 +``` diff --git a/plugins/inputs/socketstat/sample.conf b/plugins/inputs/socketstat/sample.conf new file mode 100644 index 0000000000000..ed99025caa29b --- /dev/null +++ b/plugins/inputs/socketstat/sample.conf @@ -0,0 +1,7 @@ +# Gather indicators from established connections, using iproute2's ss command. +[[inputs.socketstat]] + ## ss can display information about tcp, udp, raw, unix, packet, dccp and sctp sockets + ## Specify here the types you want to gather + socket_types = [ "tcp", "udp" ] + ## The default timeout of 1s for ss execution can be overridden here: + # timeout = "1s" diff --git a/plugins/inputs/socketstat/socketstat.go b/plugins/inputs/socketstat/socketstat.go new file mode 100644 index 0000000000000..3f905882234f8 --- /dev/null +++ b/plugins/inputs/socketstat/socketstat.go @@ -0,0 +1,216 @@ +//go:generate ../../../tools/readme_config_includer/generator +//go:build !windows +// +build !windows + +// iproute2 doesn't exist on Windows + +package socketstat + +import ( + "bufio" + "bytes" + _ "embed" + "fmt" + "os/exec" + "regexp" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +const measurement = "socketstat" + +// Socketstat is a telegraf plugin to gather indicators from established connections, using iproute2's `ss` command. +type Socketstat struct { + SocketProto []string `toml:"protocols"` + Timeout config.Duration `toml:"timeout"` + Log telegraf.Logger `toml:"-"` + + isNewConnection *regexp.Regexp + validValues *regexp.Regexp + cmdName string + lister socketLister +} + +type socketLister func(cmdName string, proto string, timeout config.Duration) (*bytes.Buffer, error) + +func (*Socketstat) SampleConfig() string { + return sampleConfig +} + +// Gather gathers indicators from established connections +func (ss *Socketstat) Gather(acc telegraf.Accumulator) error { + // best effort : we continue through the protocols even if an error is encountered, + // but we keep track of the last error. + for _, proto := range ss.SocketProto { + out, err := ss.lister(ss.cmdName, proto, ss.Timeout) + if err != nil { + acc.AddError(err) + continue + } + ss.parseAndGather(acc, out, proto) + } + return nil +} + +func socketList(cmdName string, proto string, timeout config.Duration) (*bytes.Buffer, error) { + // Run ss for the given protocol, return the output as bytes.Buffer + args := []string{"-in", "--" + proto} + cmd := exec.Command(cmdName, args...) + var out bytes.Buffer + cmd.Stdout = &out + err := internal.RunTimeout(cmd, time.Duration(timeout)) + if err != nil { + return &out, fmt.Errorf("error running ss -in --%s: %v", proto, err) + } + return &out, nil +} + +func (ss *Socketstat) parseAndGather(acc telegraf.Accumulator, data *bytes.Buffer, proto string) { + scanner := bufio.NewScanner(data) + tags := map[string]string{} + fields := make(map[string]interface{}) + + // ss output can have blank lines, and/or socket basic info lines and more advanced + // statistics lines, in turns. + // In all non-empty lines, we can have metrics, so we need to group those relevant to + // the same connection. + // To achieve this, we're using the flushData variable which indicates if we should add + // a new measurement or postpone it to a later line. + + // The first line is only headers + scanner.Scan() + + flushData := false + for scanner.Scan() { + line := scanner.Text() + if line == "" { + continue + } + words := strings.Fields(line) + + if ss.isNewConnection.MatchString(line) { + // A line with starting whitespace means metrics about the current connection. + // We should never get 2 consecutive such lines. If we do, log a warning and in + // a best effort, extend the metrics from the 1st line with the metrics of the 2nd + // one, possibly overwriting. + for _, word := range words { + if !ss.validValues.MatchString(word) { + continue + } + // kv will have 2 fields because it matched the regexp + kv := strings.Split(word, ":") + v, err := strconv.ParseUint(kv[1], 10, 64) + if err != nil { + ss.Log.Infof("Couldn't parse metric %q: %v", word, err) + continue + } + fields[kv[0]] = v + } + if !flushData { + ss.Log.Warnf("Found orphaned metrics: %s", words) + ss.Log.Warn("Added them to the last known connection.") + } + acc.AddFields(measurement, fields, tags) + flushData = false + continue + } + // A line with no starting whitespace means we're going to parse a new connection. + // Flush what we gathered about the previous one, if any. + if flushData { + acc.AddFields(measurement, fields, tags) + } + + // Delegate the real parsing to getTagsAndState, which manages various + // formats depending on the protocol. + tags, fields = getTagsAndState(proto, words, ss.Log) + + // This line containted metrics, so record that. + flushData = true + } + if flushData { + acc.AddFields(measurement, fields, tags) + } +} + +func getTagsAndState(proto string, words []string, log telegraf.Logger) (map[string]string, map[string]interface{}) { + tags := map[string]string{ + "proto": proto, + } + fields := make(map[string]interface{}) + switch proto { + case "udp", "raw": + words = append([]string{"dummy"}, words...) + case "tcp", "dccp", "sctp": + fields["state"] = words[0] + } + switch proto { + case "tcp", "udp", "raw", "dccp", "sctp": + // Local and remote addresses are fields 3 and 4 + // Separate addresses and ports with the last ':' + localIndex := strings.LastIndex(words[3], ":") + remoteIndex := strings.LastIndex(words[4], ":") + tags["local_addr"] = words[3][:localIndex] + tags["local_port"] = words[3][localIndex+1:] + tags["remote_addr"] = words[4][:remoteIndex] + tags["remote_port"] = words[4][remoteIndex+1:] + case "unix", "packet": + fields["netid"] = words[0] + tags["local_addr"] = words[4] + tags["local_port"] = words[5] + tags["remote_addr"] = words[6] + tags["remote_port"] = words[7] + } + v, err := strconv.ParseUint(words[1], 10, 64) + if err != nil { + log.Warnf("Couldn't read recv_q in %q: %v", words, err) + } else { + fields["recv_q"] = v + } + v, err = strconv.ParseUint(words[2], 10, 64) + if err != nil { + log.Warnf("Couldn't read send_q in %q: %v", words, err) + } else { + fields["send_q"] = v + } + return tags, fields +} + +func (ss *Socketstat) Init() error { + if len(ss.SocketProto) == 0 { + ss.SocketProto = []string{"tcp", "udp"} + } + + // Initialize regexps to validate input data + validFields := "(bytes_acked|bytes_received|segs_out|segs_in|data_segs_in|data_segs_out)" + ss.validValues = regexp.MustCompile("^" + validFields + ":[0-9]+$") + ss.isNewConnection = regexp.MustCompile(`^\s+.*$`) + + ss.lister = socketList + + // Check that ss is installed, get its path. + // Do it last, because in test environments where `ss` might not be available, + // we still want the other Init() actions to be performed. + ssPath, err := exec.LookPath("ss") + if err != nil { + return err + } + ss.cmdName = ssPath + + return nil +} + +func init() { + inputs.Add("socketstat", func() telegraf.Input { + return &Socketstat{Timeout: config.Duration(time.Second)} + }) +} diff --git a/plugins/inputs/socketstat/socketstat_test.go b/plugins/inputs/socketstat/socketstat_test.go new file mode 100644 index 0000000000000..bd73051d77a9b --- /dev/null +++ b/plugins/inputs/socketstat/socketstat_test.go @@ -0,0 +1,126 @@ +//go:build !windows +// +build !windows + +package socketstat + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "testing" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestSocketstat_Gather(t *testing.T) { + tests := []struct { + name string + proto []string + filename string + tags []map[string]string + fields [][]map[string]interface{} + err error + }{ + { + name: "tcp - no sockets => no results", + proto: []string{"tcp"}, + filename: "tcp_no_sockets.txt", + }, + { + name: "udp - no sockets => no results", + proto: []string{"udp"}, + filename: "udp_no_sockets.txt", + }, + { + name: "tcp sockets captured", + proto: []string{"tcp"}, + filename: "tcp_traffic.txt", + tags: []map[string]string{ + {"proto": "tcp", "local_addr": "192.168.1.21", "local_port": "6514", "remote_addr": "192.168.1.21", "remote_port": "443"}, + {"proto": "tcp", "local_addr": "192.168.122.1", "local_port": "55194", "remote_addr": "192.168.122.1", "remote_port": "6514"}, + {"proto": "tcp", "local_addr": "127.0.0.1", "local_port": "7778", "remote_addr": "127.0.0.1", "remote_port": "50378"}, + }, + fields: [][]map[string]interface{}{ + {map[string]interface{}{"state": "ESTAB", "bytes_acked": uint64(1126), "bytes_received": uint64(532644751), "segs_out": uint64(211249), "segs_in": uint64(211254), "data_segs_out": uint64(2), "data_segs_in": uint64(211251), "recv_q": uint64(0), "send_q": uint64(0)}}, + {map[string]interface{}{"state": "ESTAB", "bytes_acked": uint64(790782896), "bytes_received": uint64(1126), "segs_out": uint64(333361), "segs_in": uint64(333361), "data_segs_out": uint64(333358), "data_segs_in": uint64(2), "recv_q": uint64(0), "send_q": uint64(0)}}, + {map[string]interface{}{"state": "ESTAB", "bytes_acked": uint64(19983121), "bytes_received": uint64(266383), "segs_out": uint64(15431), "segs_in": uint64(17633), "data_segs_out": uint64(15119), "data_segs_in": uint64(5098), "recv_q": uint64(0), "send_q": uint64(0)}}, + }, + }, + { + name: "udp packets captured", + proto: []string{"udp"}, + filename: "udp_traffic.txt", + tags: []map[string]string{ + {"proto": "udp", "local_addr": "10.10.0.4", "local_port": "33149", "remote_addr": "10.10.0.5", "remote_port": "53"}, + {"proto": "udp", "local_addr": "10.10.0.4", "local_port": "54276", "remote_addr": "10.10.0.6", "remote_port": "53"}, + {"proto": "udp", "local_addr": "10.10.0.4", "local_port": "38312", "remote_addr": "10.10.0.7", "remote_port": "53"}, + }, + fields: [][]map[string]interface{}{ + {map[string]interface{}{"recv_q": uint64(0), "send_q": uint64(0)}}, + {map[string]interface{}{"recv_q": uint64(0), "send_q": uint64(0)}}, + {map[string]interface{}{"recv_q": uint64(0), "send_q": uint64(0)}}, + }, + }, + } + for i, tt := range tests { + octets, err := os.ReadFile(filepath.Join("testdata", tt.filename)) + require.NoError(t, err) + + t.Run(tt.name, func(t *testing.T) { + i++ + ss := &Socketstat{ + SocketProto: tt.proto, + } + acc := new(testutil.Accumulator) + + err := ss.Init() + if err != nil { + require.EqualError(t, err, "exec: \"ss\": executable file not found in $PATH") + } + ss.lister = func(cmdName string, proto string, timeout config.Duration) (*bytes.Buffer, error) { + return bytes.NewBuffer(octets), nil + } + + err = acc.GatherError(ss.Gather) + require.ErrorIs(t, err, tt.err) + if len(tt.proto) == 0 { + n := acc.NFields() + require.Equalf(t, 0, n, "%d: expected 0 values got %d", i, n) + return + } + if len(tt.tags) == 0 { + n := acc.NFields() + require.Equalf(t, 0, n, "%d: expected 0 values got %d", i, n) + return + } + n := 0 + for j, tags := range tt.tags { + for k, fields := range tt.fields[j] { + require.Greater(t, len(acc.Metrics), n) + m := acc.Metrics[n] + require.Equal(t, measurement, m.Measurement, "%d %d %d: expected measurement '%#v' got '%#v'\n", i, j, k, measurement, m.Measurement) + require.Equal(t, tags, m.Tags, "%d %d %d: expected tags\n%#v got\n%#v\n", i, j, k, tags, m.Tags) + require.Equal(t, fields, m.Fields, "%d %d %d: expected fields\n%#v got\n%#v\n", i, j, k, fields, m.Fields) + n++ + } + } + }) + } +} + +func TestSocketstat_Gather_listerError(t *testing.T) { + errorMessage := "error foobar" + errFoo := errors.New(errorMessage) + ss := &Socketstat{ + SocketProto: []string{"foobar"}, + } + ss.lister = func(cmdName string, proto string, timeout config.Duration) (*bytes.Buffer, error) { + return new(bytes.Buffer), errFoo + } + acc := new(testutil.Accumulator) + err := acc.GatherError(ss.Gather) + require.EqualError(t, err, errorMessage) +} diff --git a/plugins/inputs/socketstat/socketstat_windows.go b/plugins/inputs/socketstat/socketstat_windows.go new file mode 100644 index 0000000000000..4804257c9f5d6 --- /dev/null +++ b/plugins/inputs/socketstat/socketstat_windows.go @@ -0,0 +1,4 @@ +//go:build windows +// +build windows + +package socketstat diff --git a/plugins/inputs/socketstat/testdata/tcp_no_sockets.txt b/plugins/inputs/socketstat/testdata/tcp_no_sockets.txt new file mode 100644 index 0000000000000..c8fafec2aa7c8 --- /dev/null +++ b/plugins/inputs/socketstat/testdata/tcp_no_sockets.txt @@ -0,0 +1 @@ +State Recv-Q Send-Q Local Address:Port Peer Address:Port diff --git a/plugins/inputs/socketstat/testdata/tcp_traffic.txt b/plugins/inputs/socketstat/testdata/tcp_traffic.txt new file mode 100644 index 0000000000000..eb4bb874e2676 --- /dev/null +++ b/plugins/inputs/socketstat/testdata/tcp_traffic.txt @@ -0,0 +1,7 @@ +State Recv-Q Send-Q Local Address:Port Peer Address:Port +ESTAB 0 0 192.168.1.21:6514 192.168.1.21:443 + cubic wscale:7,7 rto:204 rtt:0.057/0.033 ato:40 mss:22976 cwnd:10 bytes_acked:1126 bytes_received:532644751 segs_out:211249 segs_in:211254 data_segs_out:2 data_segs_in:211251 send 32247.0Mbps lastsnd:299082764 lastrcv:5248 lastack:5252 rcv_rtt:3.532 rcv_space:186557 minrtt:0.047 +ESTAB 0 0 192.168.122.1:55194 192.168.122.1:6514 + cubic wscale:7,7 rto:204 rtt:0.034/0.01 ato:40 mss:65483 cwnd:10 bytes_acked:790782896 bytes_received:1126 segs_out:333361 segs_in:333361 data_segs_out:333358 data_segs_in:2 send 154077.6Mbps lastsnd:5248 lastrcv:443892492 lastack:5248 rcv_rtt:250 rcv_space:43690 minrtt:0.009 +ESTAB 0 0 127.0.0.1:7778 127.0.0.1:50378 + cubic wscale:7,7 rto:220 rtt:16.009/21.064 ato:44 mss:65483 cwnd:10 bytes_acked:19983121 bytes_received:266383 segs_out:15431 segs_in:17633 data_segs_out:15119 data_segs_in:5098 send 327.2Mbps lastsnd:9792 lastrcv:9840 lastack:9748 pacing_rate 654.4Mbps retrans:0/1 rcv_rtt:129800 rcv_space:44057 minrtt:0.043 diff --git a/plugins/inputs/socketstat/testdata/udp_no_sockets.txt b/plugins/inputs/socketstat/testdata/udp_no_sockets.txt new file mode 100644 index 0000000000000..0065bceb4bd4d --- /dev/null +++ b/plugins/inputs/socketstat/testdata/udp_no_sockets.txt @@ -0,0 +1 @@ +Recv-Q Send-Q Local Address:Port Peer Address:Port diff --git a/plugins/inputs/socketstat/testdata/udp_traffic.txt b/plugins/inputs/socketstat/testdata/udp_traffic.txt new file mode 100644 index 0000000000000..e0ad7b2eb5480 --- /dev/null +++ b/plugins/inputs/socketstat/testdata/udp_traffic.txt @@ -0,0 +1,4 @@ +Recv-Q Send-Q Local Address:Port Peer Address:Port +0 0 10.10.0.4:33149 10.10.0.5:53 +0 0 10.10.0.4:54276 10.10.0.6:53 +0 0 10.10.0.4:38312 10.10.0.7:53 diff --git a/plugins/inputs/solr/README.md b/plugins/inputs/solr/README.md index c20fa92836c70..00ca7f4b0f1c5 100644 --- a/plugins/inputs/solr/README.md +++ b/plugins/inputs/solr/README.md @@ -1,15 +1,20 @@ # Solr Input Plugin -The [solr](http://lucene.apache.org/solr/) plugin collects stats via the -[MBean Request Handler](https://cwiki.apache.org/confluence/display/solr/MBean+Request+Handler) +The [solr](http://lucene.apache.org/solr/) plugin collects stats via the [MBean +Request Handler][1]. -More about [performance statistics](https://cwiki.apache.org/confluence/display/solr/Performance+Statistics+Reference) +More about [performance statistics][2]. Tested from 3.5 to 7.* -### Configuration: +[1]: https://cwiki.apache.org/confluence/display/solr/MBean+Request+Handler -```toml +[2]: https://cwiki.apache.org/confluence/display/solr/Performance+Statistics+Reference + +## Configuration + +```toml @sample.conf +# Read stats from one or more Solr servers or cores [[inputs.solr]] ## specify a list of one or more Solr servers servers = ["http://localhost:8983"] @@ -22,9 +27,9 @@ Tested from 3.5 to 7.* # password = "pa$$word" ``` -### Example output of gathered metrics: +## Example output of gathered metrics -``` +```shell ➜ ~ telegraf -config telegraf.conf -input-filter solr -test * Plugin: solr, Collection 1 > solr_core,core=main,handler=searcher,host=testhost deleted_docs=17616645i,max_docs=261848363i,num_docs=244231718i 1478214949000000000 diff --git a/plugins/inputs/solr/sample.conf b/plugins/inputs/solr/sample.conf new file mode 100644 index 0000000000000..30643f7496177 --- /dev/null +++ b/plugins/inputs/solr/sample.conf @@ -0,0 +1,11 @@ +# Read stats from one or more Solr servers or cores +[[inputs.solr]] + ## specify a list of one or more Solr servers + servers = ["http://localhost:8983"] + ## + ## specify a list of one or more Solr cores (default - all) + # cores = ["main"] + ## + ## Optional HTTP Basic Auth Credentials + # username = "username" + # password = "pa$$word" diff --git a/plugins/inputs/solr/solr.go b/plugins/inputs/solr/solr.go index ce44fa0869c20..7b4e74658b614 100644 --- a/plugins/inputs/solr/solr.go +++ b/plugins/inputs/solr/solr.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package solr import ( + _ "embed" "encoding/json" "fmt" "math" @@ -11,36 +13,25 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const mbeansPath = "/admin/mbeans?stats=true&wt=json&cat=CORE&cat=QUERYHANDLER&cat=UPDATEHANDLER&cat=CACHE" const adminCoresPath = "/solr/admin/cores?action=STATUS&wt=json" -type node struct { - Host string `json:"host"` -} - -const sampleConfig = ` - ## specify a list of one or more Solr servers - servers = ["http://localhost:8983"] - - ## specify a list of one or more Solr cores (default - all) - # cores = ["main"] - - ## Optional HTTP Basic Auth Credentials - # username = "username" - # password = "pa$$word" -` - // Solr is a plugin to read stats from one or many Solr servers type Solr struct { Local bool Servers []string Username string Password string - HTTPTimeout internal.Duration + HTTPTimeout config.Duration Cores []string client *http.Client } @@ -125,20 +116,14 @@ type Cache struct { // NewSolr return a new instance of Solr func NewSolr() *Solr { return &Solr{ - HTTPTimeout: internal.Duration{Duration: time.Second * 5}, + HTTPTimeout: config.Duration(time.Second * 5), } } -// SampleConfig returns sample configuration for this plugin. -func (s *Solr) SampleConfig() string { +func (*Solr) SampleConfig() string { return sampleConfig } -// Description returns the plugin description. -func (s *Solr) Description() string { - return "Read stats from one or more Solr servers or cores" -} - // Gather reads the stats from Solr and writes it to the // Accumulator. func (s *Solr) Gather(acc telegraf.Accumulator) error { @@ -205,7 +190,7 @@ func getCoresFromStatus(adminCoresStatus *AdminCoresStatus) []string { // Add core metrics from admin to accumulator // This is the only point where size_in_bytes is available (as far as I checked) -func addAdminCoresStatusToAcc(acc telegraf.Accumulator, adminCoreStatus *AdminCoresStatus, time time.Time) { +func addAdminCoresStatusToAcc(acc telegraf.Accumulator, adminCoreStatus *AdminCoresStatus, measurementTime time.Time) { for core, metrics := range adminCoreStatus.Status { coreFields := map[string]interface{}{ "deleted_docs": metrics.Index.DeletedDocs, @@ -217,13 +202,13 @@ func addAdminCoresStatusToAcc(acc telegraf.Accumulator, adminCoreStatus *AdminCo "solr_admin", coreFields, map[string]string{"core": core}, - time, + measurementTime, ) } } // Add core metrics section to accumulator -func addCoreMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error { +func addCoreMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, measurementTime time.Time) error { var coreMetrics map[string]Core if len(mBeansData.SolrMbeans) < 2 { return fmt.Errorf("no core metric data to unmarshal") @@ -246,14 +231,14 @@ func addCoreMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBea map[string]string{ "core": core, "handler": name}, - time, + measurementTime, ) } return nil } // Add query metrics section to accumulator -func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error { +func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, measurementTime time.Time) error { var queryMetrics map[string]QueryHandler if len(mBeansData.SolrMbeans) < 4 { @@ -287,9 +272,8 @@ func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansDa map[string]string{ "core": core, "handler": name}, - time, + measurementTime, ) - } return nil } @@ -328,7 +312,7 @@ func convertQueryHandlerMap(value map[string]interface{}) map[string]interface{} } // Add update metrics section to accumulator -func addUpdateHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error { +func addUpdateHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, measurementTime time.Time) error { var updateMetrics map[string]UpdateHandler if len(mBeansData.SolrMbeans) < 6 { @@ -367,7 +351,7 @@ func addUpdateHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansD map[string]string{ "core": core, "handler": name}, - time, + measurementTime, ) } return nil @@ -408,7 +392,7 @@ func getInt(unk interface{}) int64 { } // Add cache metrics section to accumulator -func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error { +func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, measurementTime time.Time) error { if len(mBeansData.SolrMbeans) < 8 { return fmt.Errorf("no cache metric data to unmarshal") } @@ -448,7 +432,7 @@ func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBe map[string]string{ "core": core, "handler": name}, - time, + measurementTime, ) } return nil @@ -466,11 +450,11 @@ func (s *Solr) mbeansURL(server string, core string) string { func (s *Solr) createHTTPClient() *http.Client { tr := &http.Transport{ - ResponseHeaderTimeout: s.HTTPTimeout.Duration, + ResponseHeaderTimeout: time.Duration(s.HTTPTimeout), } client := &http.Client{ Transport: tr, - Timeout: s.HTTPTimeout.Duration, + Timeout: time.Duration(s.HTTPTimeout), } return client @@ -497,10 +481,8 @@ func (s *Solr) gatherData(url string, v interface{}) error { return fmt.Errorf("solr: API responded with status-code %d, expected %d, url %s", r.StatusCode, http.StatusOK, url) } - if err = json.NewDecoder(r.Body).Decode(v); err != nil { - return err - } - return nil + + return json.NewDecoder(r.Body).Decode(v) } func init() { diff --git a/plugins/inputs/solr/solr_test.go b/plugins/inputs/solr/solr_test.go index 270816909c37d..42a6753c9b999 100644 --- a/plugins/inputs/solr/solr_test.go +++ b/plugins/inputs/solr/solr_test.go @@ -104,22 +104,29 @@ func TestNoCoreDataHandling(t *testing.T) { acc.AssertDoesNotContainMeasurement(t, "solr_queryhandler") acc.AssertDoesNotContainMeasurement(t, "solr_updatehandler") acc.AssertDoesNotContainMeasurement(t, "solr_handler") - } func createMockServer() *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if strings.Contains(r.URL.Path, "/solr/admin/cores") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, statusResponse) } else if strings.Contains(r.URL.Path, "solr/main/admin") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, mBeansMainResponse) } else if strings.Contains(r.URL.Path, "solr/core1/admin") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, mBeansCore1Response) } else { w.WriteHeader(http.StatusNotFound) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, "nope") } })) @@ -130,15 +137,23 @@ func createMockNoCoreDataServer() *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if strings.Contains(r.URL.Path, "/solr/admin/cores") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, statusResponse) } else if strings.Contains(r.URL.Path, "solr/main/admin") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, nodata) } else if strings.Contains(r.URL.Path, "solr/core1/admin") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, nodata) } else { w.WriteHeader(http.StatusNotFound) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, "nope") } })) @@ -148,15 +163,23 @@ func createMockSolr3Server() *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if strings.Contains(r.URL.Path, "/solr/admin/cores") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, statusResponse) } else if strings.Contains(r.URL.Path, "solr/main/admin") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, mBeansSolr3MainResponse) } else if strings.Contains(r.URL.Path, "solr/core1/admin") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, mBeansSolr3MainResponse) } else { w.WriteHeader(http.StatusNotFound) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, "nope") } })) @@ -166,12 +189,18 @@ func createMockSolr7Server() *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if strings.Contains(r.URL.Path, "/solr/admin/cores") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, statusResponse) } else if strings.Contains(r.URL.Path, "solr/main/admin") { w.WriteHeader(http.StatusOK) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, mBeansSolr7Response) } else { w.WriteHeader(http.StatusNotFound) + // Ignore the returned error as the test will fail anyway + //nolint:errcheck,revive fmt.Fprintln(w, "nope") } })) diff --git a/plugins/inputs/solr/testdata3_test.go b/plugins/inputs/solr/testdata3_test.go index cc5a4960d4849..308333534bfc7 100644 --- a/plugins/inputs/solr/testdata3_test.go +++ b/plugins/inputs/solr/testdata3_test.go @@ -727,8 +727,7 @@ const mBeansSolr3MainResponse = `{ } } ] -} -` +}` var solr3CoreExpected = map[string]interface{}{ "num_docs": int64(117166), diff --git a/plugins/inputs/solr/testdata7_test.go b/plugins/inputs/solr/testdata7_test.go index b58b3f131bd77..b8beb89485964 100644 --- a/plugins/inputs/solr/testdata7_test.go +++ b/plugins/inputs/solr/testdata7_test.go @@ -41,8 +41,7 @@ const mBeansSolr7Response = ` } } ] -} -` +}` var solr7CacheExpected = map[string]interface{}{ "evictions": int64(141485), diff --git a/plugins/inputs/solr/testdata_test.go b/plugins/inputs/solr/testdata_test.go index 30ae0127ac2d2..19fa1b16370b5 100644 --- a/plugins/inputs/solr/testdata_test.go +++ b/plugins/inputs/solr/testdata_test.go @@ -62,8 +62,7 @@ const statusResponse = ` "QTime": 13, "status": 0 } -} -` +}` const mBeansMainResponse = `{ "solr-mbeans": [ @@ -905,8 +904,7 @@ const mBeansMainResponse = `{ "QTime": 8, "status": 0 } -} -` +}` const mBeansCore1Response = `{ "solr-mbeans": [ @@ -1748,8 +1746,7 @@ const mBeansCore1Response = `{ "QTime": 5, "status": 0 } -} -` +}` var solrAdminMainCoreStatusExpected = map[string]interface{}{ "num_docs": int64(168943425), diff --git a/plugins/inputs/sql/README.md b/plugins/inputs/sql/README.md new file mode 100644 index 0000000000000..aeece26d9fd75 --- /dev/null +++ b/plugins/inputs/sql/README.md @@ -0,0 +1,178 @@ +# SQL Input Plugin + +This plugin reads metrics from performing SQL queries against a SQL +server. Different server types are supported and their settings might differ +(especially the connection parameters). Please check the list of [supported SQL +drivers](../../../docs/SQL_DRIVERS_INPUT.md) for the `driver` name and options +for the data-source-name (`dsn`) options. + +## Configuration + +```toml @sample.conf +# Read metrics from SQL queries +[[inputs.sql]] + ## Database Driver + ## See https://github.com/influxdata/telegraf/blob/master/docs/SQL_DRIVERS_INPUT.md for + ## a list of supported drivers. + driver = "mysql" + + ## Data source name for connecting + ## The syntax and supported options depends on selected driver. + dsn = "username:password@mysqlserver:3307/dbname?param=value" + + ## Timeout for any operation + ## Note that the timeout for queries is per query not per gather. + # timeout = "5s" + + ## Connection time limits + ## By default the maximum idle time and maximum lifetime of a connection is unlimited, i.e. the connections + ## will not be closed automatically. If you specify a positive time, the connections will be closed after + ## idleing or existing for at least that amount of time, respectively. + # connection_max_idle_time = "0s" + # connection_max_life_time = "0s" + + ## Connection count limits + ## By default the number of open connections is not limited and the number of maximum idle connections + ## will be inferred from the number of queries specified. If you specify a positive number for any of the + ## two options, connections will be closed when reaching the specified limit. The number of idle connections + ## will be clipped to the maximum number of connections limit if any. + # connection_max_open = 0 + # connection_max_idle = auto + + [[inputs.sql.query]] + ## Query to perform on the server + query="SELECT user,state,latency,score FROM Scoreboard WHERE application > 0" + ## Alternatively to specifying the query directly you can select a file here containing the SQL query. + ## Only one of 'query' and 'query_script' can be specified! + # query_script = "/path/to/sql/script.sql" + + ## Name of the measurement + ## In case both measurement and 'measurement_col' are given, the latter takes precedence. + # measurement = "sql" + + ## Column name containing the name of the measurement + ## If given, this will take precedence over the 'measurement' setting. In case a query result + ## does not contain the specified column, we fall-back to the 'measurement' setting. + # measurement_column = "" + + ## Column name containing the time of the measurement + ## If ommited, the time of the query will be used. + # time_column = "" + + ## Format of the time contained in 'time_col' + ## The time must be 'unix', 'unix_ms', 'unix_us', 'unix_ns', or a golang time format. + ## See https://golang.org/pkg/time/#Time.Format for details. + # time_format = "unix" + + ## Column names containing tags + ## An empty include list will reject all columns and an empty exclude list will not exclude any column. + ## I.e. by default no columns will be returned as tag and the tags are empty. + # tag_columns_include = [] + # tag_columns_exclude = [] + + ## Column names containing fields (explicit types) + ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over + ## the automatic (driver-based) conversion below. + ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. + # field_columns_float = [] + # field_columns_int = [] + # field_columns_uint = [] + # field_columns_bool = [] + # field_columns_string = [] + + ## Column names containing fields (automatic types) + ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty + ## exclude list will not exclude any column. I.e. by default all columns will be returned as fields. + ## NOTE: We rely on the database driver to perform automatic datatype conversion. + # field_columns_include = [] + # field_columns_exclude = [] +``` + +## Options + +### Driver + +The `driver` and `dsn` options specify how to connect to the database. As +especially the `dsn` format and values vary with the `driver` refer to the list +of [supported SQL drivers](../../../docs/SQL_DRIVERS_INPUT.md) for possible +values and more details. + +### Connection limits + +With these options you can limit the number of connections kept open by this +plugin. Details about the exact workings can be found in the [golang sql +documentation](https://golang.org/pkg/database/sql/#DB.SetConnMaxIdleTime). + +### Query sections + +Multiple `query` sections can be specified for this plugin. Each specified query +will first be prepared on the server and then executed in every interval using +the column mappings specified. Please note that `tag` and `field` columns are +not exclusive, i.e. a column can be added to both. When using both `include` and +`exclude` lists, the `exclude` list takes precedence over the `include` +list. I.e. given you specify `foo` in both lists, `foo` will _never_ pass the +filter. In case any the columns specified in `measurement_col` or `time_col` are +_not_ returned by the query, the plugin falls-back to the documented +defaults. Fields or tags specified in the includes of the options but missing in +the returned query are silently ignored. + +## Types + +This plugin relies on the driver to do the type conversion. For the different +properties of the metric the following types are accepted. + +### Measurement + +Only columns of type `string` are accepted. + +### Time + +For the metric time columns of type `time` are accepted directly. For numeric +columns, `time_format` should be set to any of `unix`, `unix_ms`, `unix_ns` or +`unix_us` accordingly. By default the a timestamp in `unix` format is +expected. For string columns, please specify the `time_format` accordingly. See +the [golang time documentation](https://golang.org/pkg/time/#Time.Format) for +details. + +### Tags + +For tags columns with textual values (`string` and `bytes`), signed and unsigned +integers (8, 16, 32 and 64 bit), floating-point (32 and 64 bit), `boolean` and +`time` values are accepted. Those values will be converted to string. + +### Fields + +For fields columns with textual values (`string` and `bytes`), signed and +unsigned integers (8, 16, 32 and 64 bit), floating-point (32 and 64 bit), +`boolean` and `time` values are accepted. Here `bytes` will be converted to +`string`, signed and unsigned integer values will be converted to `int64` or +`uint64` respectively. Floating-point values are converted to `float64` and +`time` is converted to a nanosecond timestamp of type `int64`. + +## Example Output + +Using the [MariaDB sample database][maria-sample] and the configuration + +```toml +[[inputs.sql]] + driver = "mysql" + dsn = "root:password@/nation" + + [[inputs.sql.query]] + query="SELECT * FROM guests" + measurement = "nation" + tag_columns_include = ["name"] + field_columns_exclude = ["name"] +``` + +Telegraf will output the following metrics + +```shell +nation,host=Hugin,name=John guest_id=1i 1611332164000000000 +nation,host=Hugin,name=Jane guest_id=2i 1611332164000000000 +nation,host=Hugin,name=Jean guest_id=3i 1611332164000000000 +nation,host=Hugin,name=Storm guest_id=4i 1611332164000000000 +nation,host=Hugin,name=Beast guest_id=5i 1611332164000000000 +``` + +[maria-sample]: https://www.mariadbtutorial.com/getting-started/mariadb-sample-database diff --git a/plugins/inputs/sql/drivers.go b/plugins/inputs/sql/drivers.go new file mode 100644 index 0000000000000..635e2a0318f84 --- /dev/null +++ b/plugins/inputs/sql/drivers.go @@ -0,0 +1,9 @@ +package sql + +import ( + // Blank imports to register the drivers + _ "github.com/ClickHouse/clickhouse-go" + _ "github.com/denisenkom/go-mssqldb" + _ "github.com/go-sql-driver/mysql" + _ "github.com/jackc/pgx/v4/stdlib" +) diff --git a/plugins/inputs/sql/drivers_sqlite.go b/plugins/inputs/sql/drivers_sqlite.go new file mode 100644 index 0000000000000..355b870d54cfb --- /dev/null +++ b/plugins/inputs/sql/drivers_sqlite.go @@ -0,0 +1,14 @@ +//go:build !arm && !mips && !mipsle && !mips64 && !mips64le && !ppc64 && !(freebsd && arm64) +// +build !arm +// +build !mips +// +build !mipsle +// +build !mips64 +// +build !mips64le +// +build !ppc64 +// +build !freebsd !arm64 + +package sql + +import ( + _ "modernc.org/sqlite" +) diff --git a/plugins/inputs/sql/drivers_sqlite_other.go b/plugins/inputs/sql/drivers_sqlite_other.go new file mode 100644 index 0000000000000..7f927a64484ff --- /dev/null +++ b/plugins/inputs/sql/drivers_sqlite_other.go @@ -0,0 +1,4 @@ +//go:build arm || mips || mipsle || mips64 || mips64le || ppc64 || (freebsd && arm64) +// +build arm mips mipsle mips64 mips64le ppc64 freebsd,arm64 + +package sql diff --git a/plugins/inputs/sql/sample.conf b/plugins/inputs/sql/sample.conf new file mode 100644 index 0000000000000..af7a9df8c5dec --- /dev/null +++ b/plugins/inputs/sql/sample.conf @@ -0,0 +1,77 @@ +# Read metrics from SQL queries +[[inputs.sql]] + ## Database Driver + ## See https://github.com/influxdata/telegraf/blob/master/docs/SQL_DRIVERS_INPUT.md for + ## a list of supported drivers. + driver = "mysql" + + ## Data source name for connecting + ## The syntax and supported options depends on selected driver. + dsn = "username:password@mysqlserver:3307/dbname?param=value" + + ## Timeout for any operation + ## Note that the timeout for queries is per query not per gather. + # timeout = "5s" + + ## Connection time limits + ## By default the maximum idle time and maximum lifetime of a connection is unlimited, i.e. the connections + ## will not be closed automatically. If you specify a positive time, the connections will be closed after + ## idleing or existing for at least that amount of time, respectively. + # connection_max_idle_time = "0s" + # connection_max_life_time = "0s" + + ## Connection count limits + ## By default the number of open connections is not limited and the number of maximum idle connections + ## will be inferred from the number of queries specified. If you specify a positive number for any of the + ## two options, connections will be closed when reaching the specified limit. The number of idle connections + ## will be clipped to the maximum number of connections limit if any. + # connection_max_open = 0 + # connection_max_idle = auto + + [[inputs.sql.query]] + ## Query to perform on the server + query="SELECT user,state,latency,score FROM Scoreboard WHERE application > 0" + ## Alternatively to specifying the query directly you can select a file here containing the SQL query. + ## Only one of 'query' and 'query_script' can be specified! + # query_script = "/path/to/sql/script.sql" + + ## Name of the measurement + ## In case both measurement and 'measurement_col' are given, the latter takes precedence. + # measurement = "sql" + + ## Column name containing the name of the measurement + ## If given, this will take precedence over the 'measurement' setting. In case a query result + ## does not contain the specified column, we fall-back to the 'measurement' setting. + # measurement_column = "" + + ## Column name containing the time of the measurement + ## If ommited, the time of the query will be used. + # time_column = "" + + ## Format of the time contained in 'time_col' + ## The time must be 'unix', 'unix_ms', 'unix_us', 'unix_ns', or a golang time format. + ## See https://golang.org/pkg/time/#Time.Format for details. + # time_format = "unix" + + ## Column names containing tags + ## An empty include list will reject all columns and an empty exclude list will not exclude any column. + ## I.e. by default no columns will be returned as tag and the tags are empty. + # tag_columns_include = [] + # tag_columns_exclude = [] + + ## Column names containing fields (explicit types) + ## Convert the given columns to the corresponding type. Explicit type conversions take precedence over + ## the automatic (driver-based) conversion below. + ## NOTE: Columns should not be specified for multiple types or the resulting type is undefined. + # field_columns_float = [] + # field_columns_int = [] + # field_columns_uint = [] + # field_columns_bool = [] + # field_columns_string = [] + + ## Column names containing fields (automatic types) + ## An empty include list is equivalent to '[*]' and all returned columns will be accepted. An empty + ## exclude list will not exclude any column. I.e. by default all columns will be returned as fields. + ## NOTE: We rely on the database driver to perform automatic datatype conversion. + # field_columns_include = [] + # field_columns_exclude = [] diff --git a/plugins/inputs/sql/sql.go b/plugins/inputs/sql/sql.go new file mode 100644 index 0000000000000..aa4a7e9f42085 --- /dev/null +++ b/plugins/inputs/sql/sql.go @@ -0,0 +1,464 @@ +//go:generate ../../../tools/readme_config_includer/generator +package sql + +import ( + "context" + dbsql "database/sql" + _ "embed" + "errors" + "fmt" + "os" + "sort" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +const magicIdleCount int = (-int(^uint(0) >> 1)) + +type Query struct { + Query string `toml:"query"` + Script string `toml:"query_script"` + Measurement string `toml:"measurement"` + MeasurementColumn string `toml:"measurement_column"` + TimeColumn string `toml:"time_column"` + TimeFormat string `toml:"time_format"` + TagColumnsInclude []string `toml:"tag_columns_include"` + TagColumnsExclude []string `toml:"tag_columns_exclude"` + FieldColumnsInclude []string `toml:"field_columns_include"` + FieldColumnsExclude []string `toml:"field_columns_exclude"` + FieldColumnsFloat []string `toml:"field_columns_float"` + FieldColumnsInt []string `toml:"field_columns_int"` + FieldColumnsUint []string `toml:"field_columns_uint"` + FieldColumnsBool []string `toml:"field_columns_bool"` + FieldColumnsString []string `toml:"field_columns_string"` + + statement *dbsql.Stmt + tagFilter filter.Filter + fieldFilter filter.Filter + fieldFilterFloat filter.Filter + fieldFilterInt filter.Filter + fieldFilterUint filter.Filter + fieldFilterBool filter.Filter + fieldFilterString filter.Filter +} + +func (q *Query) parse(ctx context.Context, acc telegraf.Accumulator, rows *dbsql.Rows, t time.Time) (int, error) { + columnNames, err := rows.Columns() + if err != nil { + return 0, err + } + + // Prepare the list of datapoints according to the received row + columnData := make([]interface{}, len(columnNames)) + columnDataPtr := make([]interface{}, len(columnNames)) + + for i := range columnData { + columnDataPtr[i] = &columnData[i] + } + + rowCount := 0 + for rows.Next() { + measurement := q.Measurement + timestamp := t + tags := make(map[string]string) + fields := make(map[string]interface{}, len(columnNames)) + + // Do the parsing with (hopefully) automatic type conversion + if err := rows.Scan(columnDataPtr...); err != nil { + return 0, err + } + + for i, name := range columnNames { + if q.MeasurementColumn != "" && name == q.MeasurementColumn { + var ok bool + if measurement, ok = columnData[i].(string); !ok { + return 0, fmt.Errorf("measurement column type \"%T\" unsupported", columnData[i]) + } + } + + if q.TimeColumn != "" && name == q.TimeColumn { + var fieldvalue interface{} + var skipParsing bool + + switch v := columnData[i].(type) { + case string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64: + fieldvalue = v + case []byte: + fieldvalue = string(v) + case time.Time: + timestamp = v + skipParsing = true + case fmt.Stringer: + fieldvalue = v.String() + default: + return 0, fmt.Errorf("time column %q of type \"%T\" unsupported", name, columnData[i]) + } + if !skipParsing { + if timestamp, err = internal.ParseTimestamp(q.TimeFormat, fieldvalue, ""); err != nil { + return 0, fmt.Errorf("parsing time failed: %v", err) + } + } + } + + if q.tagFilter.Match(name) { + tagvalue, err := internal.ToString(columnData[i]) + if err != nil { + return 0, fmt.Errorf("converting tag column %q failed: %v", name, err) + } + if v := strings.TrimSpace(tagvalue); v != "" { + tags[name] = v + } + } + + // Explicit type conversions take precedence + if q.fieldFilterFloat.Match(name) { + v, err := internal.ToFloat64(columnData[i]) + if err != nil { + return 0, fmt.Errorf("converting field column %q to float failed: %v", name, err) + } + fields[name] = v + continue + } + + if q.fieldFilterInt.Match(name) { + v, err := internal.ToInt64(columnData[i]) + if err != nil { + return 0, fmt.Errorf("converting field column %q to int failed: %v", name, err) + } + fields[name] = v + continue + } + + if q.fieldFilterUint.Match(name) { + v, err := internal.ToUint64(columnData[i]) + if err != nil { + return 0, fmt.Errorf("converting field column %q to uint failed: %v", name, err) + } + fields[name] = v + continue + } + + if q.fieldFilterBool.Match(name) { + v, err := internal.ToBool(columnData[i]) + if err != nil { + return 0, fmt.Errorf("converting field column %q to bool failed: %v", name, err) + } + fields[name] = v + continue + } + + if q.fieldFilterString.Match(name) { + v, err := internal.ToString(columnData[i]) + if err != nil { + return 0, fmt.Errorf("converting field column %q to string failed: %v", name, err) + } + fields[name] = v + continue + } + + // Try automatic conversion for all remaining fields + if q.fieldFilter.Match(name) { + var fieldvalue interface{} + switch v := columnData[i].(type) { + case string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool: + fieldvalue = v + case []byte: + fieldvalue = string(v) + case time.Time: + fieldvalue = v.UnixNano() + case nil: + fieldvalue = nil + case fmt.Stringer: + fieldvalue = v.String() + default: + return 0, fmt.Errorf("field column %q of type \"%T\" unsupported", name, columnData[i]) + } + if fieldvalue != nil { + fields[name] = fieldvalue + } + } + } + acc.AddFields(measurement, fields, tags, timestamp) + rowCount++ + } + + if err := rows.Err(); err != nil { + return rowCount, err + } + + return rowCount, nil +} + +type SQL struct { + Driver string `toml:"driver"` + Dsn string `toml:"dsn"` + Timeout config.Duration `toml:"timeout"` + MaxIdleTime config.Duration `toml:"connection_max_idle_time"` + MaxLifetime config.Duration `toml:"connection_max_life_time"` + MaxOpenConnections int `toml:"connection_max_open"` + MaxIdleConnections int `toml:"connection_max_idle"` + Queries []Query `toml:"query"` + Log telegraf.Logger `toml:"-"` + + driverName string + db *dbsql.DB +} + +func (*SQL) SampleConfig() string { + return sampleConfig +} + +func (s *SQL) Init() error { + // Option handling + if s.Driver == "" { + return errors.New("missing SQL driver option") + } + + if s.Dsn == "" { + return errors.New("missing data source name (DSN) option") + } + + if s.Timeout <= 0 { + s.Timeout = config.Duration(5 * time.Second) + } + + if s.MaxIdleConnections == magicIdleCount { + // Determine the number by the number of queries + the golang default value + s.MaxIdleConnections = len(s.Queries) + 2 + } + + for i, q := range s.Queries { + if q.Query == "" && q.Script == "" { + return errors.New("neither 'query' nor 'query_script' specified") + } + + if q.Query != "" && q.Script != "" { + return errors.New("only one of 'query' and 'query_script' can be specified") + } + + // In case we got a script, we should read the query now. + if q.Script != "" { + query, err := os.ReadFile(q.Script) + if err != nil { + return fmt.Errorf("reading script %q failed: %v", q.Script, err) + } + s.Queries[i].Query = string(query) + } + + // Time format + if q.TimeFormat == "" { + s.Queries[i].TimeFormat = "unix" + } + + // Compile the tag-filter + tagfilter, err := filter.NewIncludeExcludeFilterDefaults(q.TagColumnsInclude, q.TagColumnsExclude, false, false) + if err != nil { + return fmt.Errorf("creating tag filter failed: %v", err) + } + s.Queries[i].tagFilter = tagfilter + + // Compile the explicit type field-filter + fieldfilterFloat, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsFloat, nil, false, false) + if err != nil { + return fmt.Errorf("creating field filter for float failed: %v", err) + } + s.Queries[i].fieldFilterFloat = fieldfilterFloat + + fieldfilterInt, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsInt, nil, false, false) + if err != nil { + return fmt.Errorf("creating field filter for int failed: %v", err) + } + s.Queries[i].fieldFilterInt = fieldfilterInt + + fieldfilterUint, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsUint, nil, false, false) + if err != nil { + return fmt.Errorf("creating field filter for uint failed: %v", err) + } + s.Queries[i].fieldFilterUint = fieldfilterUint + + fieldfilterBool, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsBool, nil, false, false) + if err != nil { + return fmt.Errorf("creating field filter for bool failed: %v", err) + } + s.Queries[i].fieldFilterBool = fieldfilterBool + + fieldfilterString, err := filter.NewIncludeExcludeFilterDefaults(q.FieldColumnsString, nil, false, false) + if err != nil { + return fmt.Errorf("creating field filter for string failed: %v", err) + } + s.Queries[i].fieldFilterString = fieldfilterString + + // Compile the field-filter + fieldfilter, err := filter.NewIncludeExcludeFilter(q.FieldColumnsInclude, q.FieldColumnsExclude) + if err != nil { + return fmt.Errorf("creating field filter failed: %v", err) + } + s.Queries[i].fieldFilter = fieldfilter + + if q.Measurement == "" { + s.Queries[i].Measurement = "sql" + } + } + + // Derive the sql-framework driver name from our config name. This abstracts the actual driver + // from the database-type the user wants. + aliases := map[string]string{ + "cockroach": "pgx", + "tidb": "mysql", + "mssql": "sqlserver", + "maria": "mysql", + "postgres": "pgx", + } + s.driverName = s.Driver + if driver, ok := aliases[s.Driver]; ok { + s.driverName = driver + } + + availDrivers := dbsql.Drivers() + if !choice.Contains(s.driverName, availDrivers) { + for d, r := range aliases { + if choice.Contains(r, availDrivers) { + availDrivers = append(availDrivers, d) + } + } + + // Sort the list of drivers and make them unique + sort.Strings(availDrivers) + last := 0 + for _, d := range availDrivers { + if d != availDrivers[last] { + last++ + availDrivers[last] = d + } + } + availDrivers = availDrivers[:last+1] + + return fmt.Errorf("driver %q not supported use one of %v", s.Driver, availDrivers) + } + + return nil +} + +func (s *SQL) Start(_ telegraf.Accumulator) error { + var err error + + // Connect to the database server + s.Log.Debugf("Connecting to %q...", s.Dsn) + s.db, err = dbsql.Open(s.driverName, s.Dsn) + if err != nil { + return err + } + + // Set the connection limits + // s.db.SetConnMaxIdleTime(time.Duration(s.MaxIdleTime)) // Requires go >= 1.15 + s.db.SetConnMaxLifetime(time.Duration(s.MaxLifetime)) + s.db.SetMaxOpenConns(s.MaxOpenConnections) + s.db.SetMaxIdleConns(s.MaxIdleConnections) + + // Test if the connection can be established + s.Log.Debugf("Testing connectivity...") + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout)) + err = s.db.PingContext(ctx) + cancel() + if err != nil { + return fmt.Errorf("connecting to database failed: %v", err) + } + + // Prepare the statements + for i, q := range s.Queries { + s.Log.Debugf("Preparing statement %q...", q.Query) + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout)) + stmt, err := s.db.PrepareContext(ctx, q.Query) //nolint:sqlclosecheck // Closed in Stop() + cancel() + if err != nil { + return fmt.Errorf("preparing query %q failed: %v", q.Query, err) + } + s.Queries[i].statement = stmt + } + + return nil +} + +func (s *SQL) Stop() { + // Free the statements + for _, q := range s.Queries { + if q.statement != nil { + if err := q.statement.Close(); err != nil { + s.Log.Errorf("closing statement for query %q failed: %v", q.Query, err) + } + } + } + + // Close the connection to the server + if s.db != nil { + if err := s.db.Close(); err != nil { + s.Log.Errorf("closing database connection failed: %v", err) + } + } +} + +func (s *SQL) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + tstart := time.Now() + for _, query := range s.Queries { + wg.Add(1) + go func(q Query) { + defer wg.Done() + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(s.Timeout)) + defer cancel() + if err := s.executeQuery(ctx, acc, q, tstart); err != nil { + acc.AddError(err) + } + }(query) + } + wg.Wait() + s.Log.Debugf("Executed %d queries in %s", len(s.Queries), time.Since(tstart).String()) + + return nil +} + +func init() { + inputs.Add("sql", func() telegraf.Input { + return &SQL{ + MaxIdleTime: config.Duration(0), // unlimited + MaxLifetime: config.Duration(0), // unlimited + MaxOpenConnections: 0, // unlimited + MaxIdleConnections: magicIdleCount, // will trigger auto calculation + } + }) +} + +func (s *SQL) executeQuery(ctx context.Context, acc telegraf.Accumulator, q Query, tquery time.Time) error { + if q.statement == nil { + return fmt.Errorf("statement is nil for query %q", q.Query) + } + + // Execute the query + rows, err := q.statement.QueryContext(ctx) + if err != nil { + return err + } + defer rows.Close() + + // Handle the rows + columnNames, err := rows.Columns() + if err != nil { + return err + } + rowCount, err := q.parse(ctx, acc, rows, tquery) + s.Log.Debugf("Received %d rows and %d columns for query %q", rowCount, len(columnNames), q.Query) + + return err +} diff --git a/plugins/inputs/sql/sql_test.go b/plugins/inputs/sql/sql_test.go new file mode 100644 index 0000000000000..9ad7dcf7e3be2 --- /dev/null +++ b/plugins/inputs/sql/sql_test.go @@ -0,0 +1,346 @@ +package sql + +import ( + "fmt" + "testing" + "time" + + "math/rand" + "path/filepath" + + "github.com/docker/go-connections/nat" + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go/wait" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" +) + +func pwgen(n int) string { + charset := []byte("abcdedfghijklmnopqrstABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") + + nchars := len(charset) + buffer := make([]byte, n) + + for i := range buffer { + buffer[i] = charset[rand.Intn(nchars)] + } + + return string(buffer) +} + +func TestMariaDBIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + logger := testutil.Logger{} + + port := "3306" + passwd := pwgen(32) + database := "foo" + + // Determine the test-data mountpoint + testdata, err := filepath.Abs("testdata/mariadb") + require.NoError(t, err, "determining absolute path of test-data failed") + + container := testutil.Container{ + Image: "mariadb", + ExposedPorts: []string{port}, + Env: map[string]string{ + "MYSQL_ROOT_PASSWORD": passwd, + "MYSQL_DATABASE": database, + }, + BindMounts: map[string]string{ + "/docker-entrypoint-initdb.d": testdata, + }, + WaitingFor: wait.ForAll( + wait.ForLog("Buffer pool(s) load completed at"), + wait.ForListeningPort(nat.Port(port)), + ), + } + err = container.Start() + require.NoError(t, err, "failed to start container") + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + + // Define the testset + var testset = []struct { + name string + queries []Query + expected []telegraf.Metric + }{ + { + name: "metric_one", + queries: []Query{ + { + Query: "SELECT * FROM metric_one", + TagColumnsInclude: []string{"tag_*"}, + FieldColumnsExclude: []string{"tag_*", "timestamp"}, + TimeColumn: "timestamp", + TimeFormat: "2006-01-02 15:04:05", + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "sql", + map[string]string{ + "tag_one": "tag1", + "tag_two": "tag2", + }, + map[string]interface{}{ + "int64_one": int64(1234), + "int64_two": int64(2345), + }, + time.Date(2021, 5, 17, 22, 4, 45, 0, time.UTC), + ), + }, + }, + } + + for _, tt := range testset { + t.Run(tt.name, func(t *testing.T) { + // Setup the plugin-under-test + plugin := &SQL{ + Driver: "maria", + Dsn: fmt.Sprintf("root:%s@tcp(%s:%s)/%s", + passwd, + container.Address, + container.Ports[port], + database, + ), + Queries: tt.queries, + Log: logger, + } + + var acc testutil.Accumulator + + // Startup the plugin + err := plugin.Init() + require.NoError(t, err) + err = plugin.Start(&acc) + require.NoError(t, err) + + // Gather + err = plugin.Gather(&acc) + require.NoError(t, err) + require.Len(t, acc.Errors, 0) + + // Stopping the plugin + plugin.Stop() + + // Do the comparison + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics()) + }) + } +} + +func TestPostgreSQLIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + logger := testutil.Logger{} + + port := "5432" + passwd := pwgen(32) + database := "foo" + + // Determine the test-data mountpoint + testdata, err := filepath.Abs("testdata/postgres") + require.NoError(t, err, "determining absolute path of test-data failed") + + container := testutil.Container{ + Image: "postgres", + ExposedPorts: []string{port}, + Env: map[string]string{ + "POSTGRES_PASSWORD": passwd, + "POSTGRES_DB": database, + }, + BindMounts: map[string]string{ + "/docker-entrypoint-initdb.d": testdata, + }, + WaitingFor: wait.ForAll( + wait.ForLog("database system is ready to accept connections"), + wait.ForListeningPort(nat.Port(port)), + ), + } + err = container.Start() + require.NoError(t, err, "failed to start container") + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + + // Define the testset + var testset = []struct { + name string + queries []Query + expected []telegraf.Metric + }{ + { + name: "metric_one", + queries: []Query{ + { + Query: "SELECT * FROM metric_one", + TagColumnsInclude: []string{"tag_*"}, + FieldColumnsExclude: []string{"tag_*", "timestamp"}, + TimeColumn: "timestamp", + TimeFormat: "2006-01-02 15:04:05", + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "sql", + map[string]string{ + "tag_one": "tag1", + "tag_two": "tag2", + }, + map[string]interface{}{ + "int64_one": int64(1234), + "int64_two": int64(2345), + }, + time.Date(2021, 5, 17, 22, 4, 45, 0, time.UTC), + ), + }, + }, + } + + for _, tt := range testset { + t.Run(tt.name, func(t *testing.T) { + // Setup the plugin-under-test + plugin := &SQL{ + Driver: "pgx", + Dsn: fmt.Sprintf("postgres://postgres:%v@%v:%v/%v", + passwd, + container.Address, + container.Ports[port], + database, + ), + Queries: tt.queries, + Log: logger, + } + + var acc testutil.Accumulator + + // Startup the plugin + err := plugin.Init() + require.NoError(t, err) + err = plugin.Start(&acc) + require.NoError(t, err) + + // Gather + err = plugin.Gather(&acc) + require.NoError(t, err) + require.Len(t, acc.Errors, 0) + + // Stopping the plugin + plugin.Stop() + + // Do the comparison + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics()) + }) + } +} + +func TestClickHouseIntegration(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + logger := testutil.Logger{} + + port := "9000" + user := "default" + + // Determine the test-data mountpoint + testdata, err := filepath.Abs("testdata/clickhouse") + require.NoError(t, err, "determining absolute path of test-data failed") + + container := testutil.Container{ + Image: "yandex/clickhouse-server", + ExposedPorts: []string{port, "8123"}, + BindMounts: map[string]string{ + "/docker-entrypoint-initdb.d": testdata, + }, + WaitingFor: wait.ForAll( + wait.NewHTTPStrategy("/").WithPort(nat.Port("8123")), + wait.ForListeningPort(nat.Port(port)), + wait.ForLog("Saved preprocessed configuration to '/var/lib/clickhouse/preprocessed_configs/users.xml'"), + ), + } + err = container.Start() + require.NoError(t, err, "failed to start container") + defer func() { + require.NoError(t, container.Terminate(), "terminating container failed") + }() + + // Define the testset + var testset = []struct { + name string + queries []Query + expected []telegraf.Metric + }{ + { + name: "metric_one", + queries: []Query{ + { + Query: "SELECT * FROM default.metric_one", + TagColumnsInclude: []string{"tag_*"}, + FieldColumnsExclude: []string{"tag_*", "timestamp"}, + TimeColumn: "timestamp", + TimeFormat: "unix", + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "sql", + map[string]string{ + "tag_one": "tag1", + "tag_two": "tag2", + }, + map[string]interface{}{ + "int64_one": int64(1234), + "int64_two": int64(2345), + }, + time.Unix(1621289085, 0), + ), + }, + }, + } + + for _, tt := range testset { + t.Run(tt.name, func(t *testing.T) { + // Setup the plugin-under-test + plugin := &SQL{ + Driver: "clickhouse", + Dsn: fmt.Sprintf("tcp://%v:%v?username=%v", + container.Address, + container.Ports[port], + user, + ), + Queries: tt.queries, + Log: logger, + } + + var acc testutil.Accumulator + + // Startup the plugin + err := plugin.Init() + require.NoError(t, err) + err = plugin.Start(&acc) + require.NoError(t, err) + + // Gather + err = plugin.Gather(&acc) + require.NoError(t, err) + require.Len(t, acc.Errors, 0) + + // Stopping the plugin + plugin.Stop() + + // Do the comparison + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics()) + }) + } +} diff --git a/plugins/inputs/sql/testdata/clickhouse/expected.sql b/plugins/inputs/sql/testdata/clickhouse/expected.sql new file mode 100644 index 0000000000000..f9ed63f963db6 --- /dev/null +++ b/plugins/inputs/sql/testdata/clickhouse/expected.sql @@ -0,0 +1,15 @@ +CREATE TABLE IF NOT EXISTS default.metric_one ( + tag_one String, + tag_two String, + int64_one Int64, + int64_two Int64, + timestamp Int64 +) ENGINE MergeTree() ORDER BY timestamp; + +INSERT INTO default.metric_one ( + tag_one, + tag_two, + int64_one, + int64_two, + timestamp +) VALUES ('tag1', 'tag2', 1234, 2345, 1621289085); diff --git a/plugins/inputs/sql/testdata/mariadb/expected.sql b/plugins/inputs/sql/testdata/mariadb/expected.sql new file mode 100644 index 0000000000000..49a3095db4da2 --- /dev/null +++ b/plugins/inputs/sql/testdata/mariadb/expected.sql @@ -0,0 +1,36 @@ +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `bar` ( + `baz` int(11) DEFAULT NULL +); +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `bar` VALUES (1); +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `metric three` ( + `timestamp` timestamp NOT NULL DEFAULT current_timestamp(), + `tag four` text DEFAULT NULL, + `string two` text DEFAULT NULL +); +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `metric three` VALUES ('2021-05-17 22:04:45','tag4','string2'); +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `metric_one` ( + `timestamp` timestamp NOT NULL DEFAULT current_timestamp(), + `tag_one` text DEFAULT NULL, + `tag_two` text DEFAULT NULL, + `int64_one` int(11) DEFAULT NULL, + `int64_two` int(11) DEFAULT NULL +); +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `metric_one` VALUES ('2021-05-17 22:04:45','tag1','tag2',1234,2345); +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `metric_two` ( + `timestamp` timestamp NOT NULL DEFAULT current_timestamp(), + `tag_three` text DEFAULT NULL, + `string_one` text DEFAULT NULL +); +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `metric_two` VALUES ('2021-05-17 22:04:45','tag3','string1'); diff --git a/plugins/inputs/sql/testdata/postgres/expected.sql b/plugins/inputs/sql/testdata/postgres/expected.sql new file mode 100644 index 0000000000000..8bc2b2fc83018 --- /dev/null +++ b/plugins/inputs/sql/testdata/postgres/expected.sql @@ -0,0 +1,41 @@ +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET xmloption = content; +SET client_min_messages = warning; +SET row_security = off; +SET default_tablespace = ''; +SET default_table_access_method = heap; +CREATE TABLE public."metric three" ( + "timestamp" timestamp without time zone, + "tag four" text, + "string two" text +); +ALTER TABLE public."metric three" OWNER TO postgres; +CREATE TABLE public.metric_one ( + "timestamp" timestamp without time zone, + tag_one text, + tag_two text, + int64_one integer, + int64_two integer +); +ALTER TABLE public.metric_one OWNER TO postgres; +CREATE TABLE public.metric_two ( + "timestamp" timestamp without time zone, + tag_three text, + string_one text +); +ALTER TABLE public.metric_two OWNER TO postgres; +COPY public."metric three" ("timestamp", "tag four", "string two") FROM stdin; +2021-05-17 22:04:45 tag4 string2 +\. +COPY public.metric_one ("timestamp", tag_one, tag_two, int64_one, int64_two) FROM stdin; +2021-05-17 22:04:45 tag1 tag2 1234 2345 +\. +COPY public.metric_two ("timestamp", tag_three, string_one) FROM stdin; +2021-05-17 22:04:45 tag3 string1 +\. diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index 27c6da1cd7571..9cea80a5aee2c 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -1,18 +1,23 @@ # SQL Server Input Plugin -The `sqlserver` plugin provides metrics for your SQL Server instance. It -currently works with SQL Server 2008 SP3 and newer. Recorded metrics are -lightweight and use Dynamic Management Views supplied by SQL Server. -### The SQL Server plugin supports the following editions/versions of SQL Server +The `sqlserver` plugin provides metrics for your SQL Server instance. +Recorded metrics are lightweight and use Dynamic Management Views +supplied by SQL Server. + +## The SQL Server plugin supports the following editions/versions of SQL Server + - SQL Server - - 2008 SP3 (with CU3) - - SQL Server 2008 R2 SP3 and newer versions + - 2012 or newer (Plugin support aligned with the [official Microsoft SQL Server support](https://docs.microsoft.com/en-us/sql/sql-server/end-of-support/sql-server-end-of-life-overview?view=sql-server-ver15#lifecycle-dates)) + - End-of-life SQL Server versions are not guaranteed to be supported by Telegraf. Any issues with the SQL Server plugin for these EOL versions will need to be addressed by the community. - Azure SQL Database (Single) - Azure SQL Managed Instance +- Azure SQL Elastic Pool + +## Additional Setup -### Additional Setup: +You have to create a login on every SQL Server instance or Azure SQL +Managed instance you want to monitor, with following script: -You have to create a login on every SQL Server instance or Azure SQL Managed instance you want to monitor, with following script: ```sql USE master; GO @@ -24,7 +29,9 @@ GRANT VIEW ANY DEFINITION TO [telegraf]; GO ``` -For Azure SQL Database, you require the View Database State permission and can create a user with a password directly in the database. +For Azure SQL Database, you require the View Database State permission +and can create a user with a password directly in the database. + ```sql CREATE USER [telegraf] WITH PASSWORD = N'mystrongpassword'; GO @@ -32,13 +39,70 @@ GRANT VIEW DATABASE STATE TO [telegraf]; GO ``` -### Configuration: +For Azure SQL Elastic Pool, please follow the following instructions +to collect metrics. + +On master logical database, create an SQL login 'telegraf' and assign +it to the server-level role ##MS_ServerStateReader##. + +```sql +CREATE LOGIN [telegraf] WITH PASSWORD = N'mystrongpassword'; +GO +ALTER SERVER ROLE ##MS_ServerStateReader## + ADD MEMBER [telegraf]; +GO +``` + +Elastic pool metrics can be collected from any database in the pool if a user +for the `telegraf` login is created in that database. For collection to work, +this database must remain in the pool, and must not be renamed. If you plan +to add/remove databases from this pool, create a separate database for +monitoring purposes that will remain in the pool. + +> Note: To avoid duplicate monitoring data, do not collect elastic pool metrics +from more than one database in the same pool. + +```sql +GO +CREATE USER [telegraf] FOR LOGIN telegraf; +``` + +For Service SID authentication to SQL Server (Windows service installations +only). + +- [More information about using service SIDs to grant permissions in SQL Server](https://docs.microsoft.com/en-us/sql/relational-databases/security/using-service-sids-to-grant-permissions-to-services-in-sql-server) + +In an administrative command prompt configure the telegraf service for use +with a service SID + +```Batchfile +sc.exe sidtype "telegraf" unrestricted +``` + +To create the login for the telegraf service run the following script: + +```sql +USE master; +GO +CREATE LOGIN [NT SERVICE\telegraf] FROM WINDOWS; +GO +GRANT VIEW SERVER STATE TO [NT SERVICE\telegraf]; +GO +GRANT VIEW ANY DEFINITION TO [NT SERVICE\telegraf]; +GO +``` + +Remove User Id and Password keywords from the connection string in your +config file to use windows authentication. ```toml -[agent] - ## Default data collection interval for all inputs, can be changed as per collection interval needs - interval = "10s" +[[inputs.sqlserver]] + servers = ["Server=192.168.1.10;Port=1433;app name=telegraf;log=1;",] +``` +## Configuration + +```toml @sample.conf # Read metrics from Microsoft SQL Server [[inputs.sqlserver]] ## Specify instances to monitor with a list of connection strings. @@ -48,33 +112,68 @@ GO ## See https://github.com/denisenkom/go-mssqldb for detailed connection ## parameters, in particular, tls connections can be created like so: ## "encrypt=true;certificate=;hostNameInCertificate=" - # servers = [ - # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", - # ] - - ## This enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 - ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. - ## Possible values for database_type are - ## "AzureSQLDB" - ## "SQLServer" - ## "AzureSQLManagedInstance" - # database_type = "AzureSQLDB" + servers = [ + "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", + ] + + ## Authentication method + ## valid methods: "connection_string", "AAD" + # auth_method = "connection_string" + + ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 + ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. + ## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool" + + database_type = "SQLServer" + + ## A list of queries to include. If not specified, all the below listed queries are used. + include_query = [] + + ## A list of queries to explicitly ignore. + exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] + + ## Queries enabled by default for database_type = "SQLServer" are - + ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, + ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu, SQLServerAvailabilityReplicaStates, SQLServerDatabaseReplicaStates, + ## SQLServerRecentBackups + + ## Queries enabled by default for database_type = "AzureSQLDB" are - + ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, + ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers + + ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - + ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, + ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers + + ## Queries enabled by default for database_type = "AzureSQLPool" are - + ## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats, + ## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers + + ## Following are old config settings + ## You may use them only if you are using the earlier flavor of queries, however it is recommended to use + ## the new mechanism of identifying the database_type there by use it's corresponding queries ## Optional parameter, setting this to 2 will use a new version - ## of the collection queries that break compatibility with the original dashboards. - ## Version 2 - is compatible from SQL Server 2008 Sp3 and later versions and also for SQL Azure DB - ## Version 2 is in the process of being deprecated, please consider using database_type. + ## of the collection queries that break compatibility with the original + ## dashboards. + ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB # query_version = 2 ## If you are using AzureDB, setting this to true will gather resource utilization metrics # azuredb = false + ## Toggling this to true will emit an additional metric called "sqlserver_telegraf_health". + ## This metric tracks the count of attempted queries and successful queries for each SQL instance specified in "servers". + ## The purpose of this metric is to assist with identifying and diagnosing any connectivity or query issues. + ## This setting/metric is optional and is disabled by default. + # health_metric = false + ## Possible queries accross different versions of the collectors ## Queries enabled by default for specific Database Type - + ## database_type = AzureSQLDB by default collects the following queries ## - AzureSQLDBWaitStats - ## - AzureSQLDBResourceStats + ## - AzureSQLDBResourceStats ## - AzureSQLDBResourceGovernance ## - AzureSQLDBDatabaseIO ## - AzureSQLDBServerProperties @@ -84,27 +183,40 @@ GO ## - AzureSQLDBRequests ## - AzureSQLDBSchedulers - ## database_type = AzureSQLManagedInstance by default collects the following queries - ## - AzureSQLMIResourceStats - ## - AzureSQLMIResourceGovernance - ## - AzureSQLMIDatabaseIO - ## - AzureSQLMIServerProperties - ## - AzureSQLMIOsWaitstats - ## - AzureSQLMIMemoryClerks - ## - AzureSQLMIPerformanceCounters - ## - AzureSQLMIRequests - ## - AzureSQLMISchedulers - - ## database_type = SQLServer by default collects the following queries - ## - SQLServerPerformanceCounters - ## - SQLServerWaitStatsCategorized - ## - SQLServerDatabaseIO - ## - SQLServerProperties - ## - SQLServerMemoryClerks - ## - SQLServerSchedulers - ## - SQLServerRequests - ## - SQLServerVolumeSpace - ## - SQLServerCpu + ## database_type = AzureSQLManagedInstance by default collects the following queries + ## - AzureSQLMIResourceStats + ## - AzureSQLMIResourceGovernance + ## - AzureSQLMIDatabaseIO + ## - AzureSQLMIServerProperties + ## - AzureSQLMIOsWaitstats + ## - AzureSQLMIMemoryClerks + ## - AzureSQLMIPerformanceCounters + ## - AzureSQLMIRequests + ## - AzureSQLMISchedulers + + ## database_type = AzureSQLPool by default collects the following queries + ## - AzureSQLPoolResourceStats + ## - AzureSQLPoolResourceGovernance + ## - AzureSQLPoolDatabaseIO + ## - AzureSQLPoolOsWaitStats, + ## - AzureSQLPoolMemoryClerks + ## - AzureSQLPoolPerformanceCounters + ## - AzureSQLPoolSchedulers + + ## database_type = SQLServer by default collects the following queries + ## - SQLServerPerformanceCounters + ## - SQLServerWaitStatsCategorized + ## - SQLServerDatabaseIO + ## - SQLServerProperties + ## - SQLServerMemoryClerks + ## - SQLServerSchedulers + ## - SQLServerRequests + ## - SQLServerVolumeSpace + ## - SQLServerCpu + ## - SQLServerRecentBackups + ## and following as optional (if mentioned in the include_query list) + ## - SQLServerAvailabilityReplicaStates + ## - SQLServerDatabaseReplicaStates ## Version 2 by default collects the following queries ## Version 2 is being deprecated, please consider using database_type. @@ -130,26 +242,63 @@ GO ## - MemoryClerk ## - VolumeSpace ## - PerformanceMetrics +``` +## Support for Azure Active Directory (AAD) authentication using [Managed Identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview) +- Azure SQL Database supports 2 main methods of authentication: [SQL authentication and AAD authentication](https://docs.microsoft.com/en-us/azure/azure-sql/database/security-overview#authentication). +- The recommended practice is to [use AAD authentication when possible](https://docs.microsoft.com/en-us/azure/azure-sql/database/authentication-aad-overview). - ## A list of queries to include. If not specified, all the above listed queries are used. - # include_query = [] +AAD is a more modern authentication protocol, allows for easier +credential/role management, and can eliminate the need to include passwords +in a connection string. - ## A list of queries to explicitly ignore. - exclude_query = [ 'Schedulers' , 'SqlRequests' ] +To enable support for AAD authentication, we leverage the existing AAD +authentication support. + +- Please see [SQL Server driver for Go](https://github.com/denisenkom/go-mssqldb#azure-active-directory-authentication---preview) + +### How to use AAD Auth with MSI +- Please note AAD based auth is currently only supported for Azure SQL Database and Azure SQL Managed Instance (but not for SQL Server), as described [here](https://docs.microsoft.com/en-us/azure/azure-sql/database/security-overview#authentication). +- Configure "system-assigned managed identity" for Azure resources on the Monitoring VM (the VM that'd connect to the SQL server/database) [using the Azure portal](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm). +- On the database being monitored, create/update a USER with the name of the Monitoring VM as the principal using the below script. This might require allow-listing the client machine's IP address (from where the below SQL script is being run) on the SQL Server resource. +```sql +EXECUTE ('IF EXISTS(SELECT * FROM sys.database_principals WHERE name = '''') + BEGIN + DROP USER [] + END') +EXECUTE ('CREATE USER [] FROM EXTERNAL PROVIDER') +EXECUTE ('GRANT VIEW DATABASE STATE TO []') +``` + +- On the SQL Server resource of the database(s) being monitored, go to "Firewalls and Virtual Networks" tab and allowlist the monitoring VM IP address. +- On the Monitoring VM, update the telegraf config file with the database connection string in the following format. The connection string only provides the server and database name, but no password (since the VM's system-assigned managed identity would be used for authentication). The auth method must be set to "AAD" + +```toml + servers = [ + "Server=.database.windows.net;Port=1433;Database=;app name=telegraf;log=1;", + ] + auth_method = "AAD" ``` -### Metrics: -To provide backwards compatibility, this plugin support two versions of metrics queries. +## Metrics -**Note**: Version 2 queries are not backwards compatible with the old queries. Any dashboards or queries based on the old query format will not work with the new format. The version 2 queries only report raw metrics, no math has been done to calculate deltas. To graph this data you must calculate deltas in your dashboarding software. +To provide backwards compatibility, this plugin support two versions of +metrics queries. + +**Note**: Version 2 queries are not backwards compatible with the old queries. +Any dashboards or queries based on the old query format will not work with +the new format. The version 2 queries only report raw metrics, no math has +been done to calculate deltas. To graph this data you must calculate deltas +in your dashboarding software. + +### Version 1 (query_version=1): This is Deprecated in 1.6, all future development will be under configuration option database_type -#### Version 1 (query_version=1): This is Deprecated in 1.6, all future development will be under configuration option database_type. The original metrics queries provide: + - *Performance counters*: 1000+ metrics from `sys.dm_os_performance_counters` - *Performance metrics*: special performance and ratio metrics - *Wait stats*: wait tasks categorized from `sys.dm_os_wait_stats` @@ -162,12 +311,15 @@ The original metrics queries provide: - *CPU*: cpu usage from `sys.dm_os_ring_buffers` If you are using the original queries all stats have the following tags: + - `servername`: hostname:instance - `type`: type of stats to easily filter measurements -#### Version 2 (query_version=2): Being deprecated, All future development will be under configuration option database_type. +### Version 2 (query_version=2): Being deprecated, All future development will be under configuration option database_type + The new (version 2) metrics provide: -- *Database IO*: IO stats from `sys.dm_io_virtual_file_stats` + +- *Database IO*: IO stats from `sys.dm_io_virtual_file_stats`. - *Memory Clerk*: Memory clerk breakdown from `sys.dm_os_memory_clerks`, most clerks have been given a friendly name. - *Performance Counters*: A select list of performance counters from `sys.dm_os_performance_counters`. Some of the important metrics included: - *Activity*: Transactions/sec/database, Batch requests/sec, blocked processes, + more @@ -176,7 +328,7 @@ The new (version 2) metrics provide: - *Memory*: PLE, Page reads/sec, Page writes/sec, + more - *TempDB*: Free space, Version store usage, Active temp tables, temp table creation rate, + more - *Resource Governor*: CPU Usage, Requests/sec, Queued Requests, and Blocked tasks per workload group + more -- *Server properties*: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevent properties such as Tier, #Vcores, Memory etc. +- *Server properties*: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, SQL Server SPID, and SQL Server version. In the case of Azure SQL relevant properties such as Tier, #Vcores, Memory etc. - *Wait stats*: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. - *Schedulers* - This captures `sys.dm_os_schedulers`. - *SqlRequests* - This captures a snapshot of `sys.dm_exec_requests` and `sys.dm_exec_sessions` that gives you running requests as well as wait types and @@ -196,101 +348,163 @@ The new (version 2) metrics provide: - Stats from `sys.dm_db_wait_stats` - Resource governance stats from `sys.dm_user_db_resource_governance` - Stats from `sys.dm_db_resource_stats` - +### database_type = "AzureSQLDB" + +These are metrics for Azure SQL Database (single database) and are very +similar to version 2 but split out for maintenance reasons, better ability +to test,differences in DMVs: -#### database_type = "AzureSQLDB -These are metrics for Azure SQL Database (single database) and are very similar to version 2 but split out for maintenance reasons, better ability to test,differences in DMVs: -- AzureSQLDBDatabaseIO: IO stats from `sys.dm_io_virtual_file_stats` including resource governance time, RBPEX, IO for Hyperscale. -- AzureSQLDBMemoryClerks: Memory clerk breakdown from `sys.dm_os_memory_clerks`. -= AzureSQLDBResourceGovernance: Relevant properties indicatign resource limits from `sys.dm_user_db_resource_governance` -- AzureSQLDBPerformanceCounters: A select list of performance counters from `sys.dm_os_performance_counters` including cloud specific counters for SQL Hyperscale. -- AzureSQLDBServerProperties: Relevant Azure SQL relevent properties from such as Tier, #Vcores, Memory etc, storage, etc. -- AzureSQLDBWaitstats: Wait time in ms from `sys.dm_db_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected only as of the end of the a statement. and for a specific database only. +- *AzureSQLDBDatabaseIO*: IO stats from `sys.dm_io_virtual_file_stats` including resource governance time, RBPEX, IO for Hyperscale. +- *AzureSQLDBMemoryClerks*: Memory clerk breakdown from `sys.dm_os_memory_clerks`. +- *AzureSQLDBResourceGovernance*: Relevant properties indicatign resource limits from `sys.dm_user_db_resource_governance` +- *AzureSQLDBPerformanceCounters*: A select list of performance counters from `sys.dm_os_performance_counters` including cloud specific counters for SQL Hyperscale. +- *AzureSQLDBServerProperties*: Relevant Azure SQL relevant properties from such as Tier, #Vcores, Memory etc, storage, etc. +- *AzureSQLDBWaitstats*: Wait time in ms from `sys.dm_db_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected only as of the end of the a statement. and for a specific database only. - *AzureSQLOsWaitstats*: Wait time in ms from `sys.dm_os_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected as they occur and instance wide - *AzureSQLDBRequests: Requests which are blocked or have a wait type from `sys.dm_exec_sessions` and `sys.dm_exec_requests` - *AzureSQLDBSchedulers* - This captures `sys.dm_os_schedulers` snapshots. +### database_type = "AzureSQLManagedInstance" + +These are metrics for Azure SQL Managed instance, are very similar to version +2 but split out for maintenance reasons, better ability to test, differences +in DMVs: + +- *AzureSQLMIDatabaseIO*: IO stats from `sys.dm_io_virtual_file_stats` including resource governance time, RBPEX, IO for Hyperscale. +- *AzureSQLMIMemoryClerks*: Memory clerk breakdown from `sys.dm_os_memory_clerks`. +- *AzureSQLMIResourceGovernance*: Relevant properties indicatign resource limits from `sys.dm_instance_resource_governance` +- *AzureSQLMIPerformanceCounters*: A select list of performance counters from `sys.dm_os_performance_counters` including cloud specific counters for SQL Hyperscale. +- *AzureSQLMIServerProperties*: Relevant Azure SQL relevant properties such as Tier, #Vcores, Memory etc, storage, etc. +- *AzureSQLMIOsWaitstats*: Wait time in ms from `sys.dm_os_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected as they occur and instance wide +- *AzureSQLMIRequests*: Requests which are blocked or have a wait type from `sys.dm_exec_sessions` and `sys.dm_exec_requests` +- *AzureSQLMISchedulers*: This captures `sys.dm_os_schedulers` snapshots. + +### database_type = "AzureSQLPool" -#### database_type = "AzureSQLManagedInstance -These are metrics for Azure SQL Managed instance, are very similar to version 2 but split out for maintenance reasons, better ability to test, differences in DMVs: -- AzureSQLMIDatabaseIO: IO stats from `sys.dm_io_virtual_file_stats` including resource governance time, RBPEX, IO for Hyperscale. -- AzureSQLMIMemoryClerks: Memory clerk breakdown from `sys.dm_os_memory_clerks`. -- AzureSQLMIResourceGovernance: Relevant properties indicatign resource limits from `sys.dm_instance_resource_governance` -- AzureSQLMIPerformanceCounters: A select list of performance counters from `sys.dm_os_performance_counters` including cloud specific counters for SQL Hyperscale. -- AzureSQLMIServerProperties: Relevant Azure SQL relevent properties such as Tier, #Vcores, Memory etc, storage, etc. -- AzureSQLMIOsWaitstats: Wait time in ms from `sys.dm_os_wait_stats`, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. These waits are collected as they occur and instance wide -- AzureSQLMIRequests: Requests which are blocked or have a wait type from `sys.dm_exec_sessions` and `sys.dm_exec_requests` -- AzureSQLMISchedulers - This captures `sys.dm_os_schedulers` snapshots. - -#### database_type = "SQLServer -- SQLServerDatabaseIO: IO stats from `sys.dm_io_virtual_file_stats` -- SQLServerMemoryClerks: Memory clerk breakdown from `sys.dm_os_memory_clerks`, most clerks have been given a friendly name. -- SQLServerPerformanceCounters: A select list of performance counters from `sys.dm_os_performance_counters`. Some of the important metrics included: +These are metrics for Azure SQL to monitor resources usage at Elastic Pool +level. These metrics require additional permissions to be collected, please +ensure to check additional setup section in this documentation. + +- *AzureSQLPoolResourceStats*: Returns resource usage statistics for the current elastic pool in a SQL Database server. Queried from `sys.dm_resource_governor_resource_pools_history_ex`. +- *AzureSQLPoolResourceGovernance*: Returns actual configuration and capacity settings used by resource governance mechanisms in the current elastic pool. Queried from `sys.dm_user_db_resource_governance`. +- *AzureSQLPoolDatabaseIO*: Returns I/O statistics for data and log files for each database in the pool. Queried from `sys.dm_io_virtual_file_stats`. +- *AzureSQLPoolOsWaitStats*: Returns information about all the waits encountered by threads that executed. Queried from `sys.dm_os_wait_stats`. +- *AzureSQLPoolMemoryClerks*: Memory clerk breakdown from `sys.dm_os_memory_clerks`. +- *AzureSQLPoolPerformanceCounters*: A selected list of performance counters from `sys.dm_os_performance_counters`. Note: Performance counters where the cntr_type column value is 537003264 are already returned with a percentage format between 0 and 100. For other counters, please check [sys.dm_os_performance_counters](https://docs.microsoft.com/en-us/sql/relational-databases/system-dynamic-management-views/sys-dm-os-performance-counters-transact-sql?view=azuresqldb-current) documentation. +- *AzureSQLPoolSchedulers*: This captures `sys.dm_os_schedulers` snapshots. + +### database_type = "SQLServer" + +- *SQLServerDatabaseIO*: IO stats from `sys.dm_io_virtual_file_stats` +- *SQLServerMemoryClerks*: Memory clerk breakdown from `sys.dm_os_memory_clerks`, most clerks have been given a friendly name. +- *SQLServerPerformanceCounters*: A select list of performance counters from `sys.dm_os_performance_counters`. Some of the important metrics included: - *Activity*: Transactions/sec/database, Batch requests/sec, blocked processes, + more - *Availability Groups*: Bytes sent to replica, Bytes received from replica, Log bytes received, Log send queue, transaction delay, + more - *Log activity*: Log bytes flushed/sec, Log flushes/sec, Log Flush Wait Time - *Memory*: PLE, Page reads/sec, Page writes/sec, + more - *TempDB*: Free space, Version store usage, Active temp tables, temp table creation rate, + more - *Resource Governor*: CPU Usage, Requests/sec, Queued Requests, and Blocked tasks per workload group + more -- SQLServerProperties: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevent properties such as Tier, #Vcores, Memory etc. -- SQLServerWaitStatsCategorized: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. -- SQLServerSchedulers - This captures `sys.dm_os_schedulers`. -- SQLServerRequests - This captures a snapshot of `sys.dm_exec_requests` and `sys.dm_exec_sessions` that gives you running requests as well as wait types and +- *SQLServerProperties*: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, SQL Server SPID and SQL Server version. In the case of Azure SQL relevant properties such as Tier, #Vcores, Memory etc. +- *SQLServerWaitStatsCategorized*: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. +- *SQLServerSchedulers*: This captures `sys.dm_os_schedulers`. +- *SQLServerRequests*: This captures a snapshot of `sys.dm_exec_requests` and `sys.dm_exec_sessions` that gives you running requests as well as wait types and blocking sessions. -- SQLServerVolumeSpace - uses `sys.dm_os_volume_stats` to get total, used and occupied space on every disk that contains a data or log file. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). It is pointless to run this with high frequency (ie: every 10s), but it won't cause any problem. -- SQLServerCpu - uses the buffer ring (`sys.dm_os_ring_buffers`) to get CPU data, the table is updated once per minute. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). - - -#### Output Measures -The guiding principal is that all data collected from the same primary DMV ends up in the same measure irrespective of database_type. -`sqlserver_database_io` - Used by AzureSQLDBDatabaseIO, AzureSQLMIDatabaseIO, SQLServerDatabaseIO, DatabaseIO given the data is from `sys.dm_io_virtual_file_stats` -`sqlserver_waitstats` - Used by WaitStatsCategorized,AzureSQLDBOsWaitstats,AzureSQLMIOsWaitstats -`sqlserver_server_properties` - Used by SQLServerProperties, AzureSQLDBServerProperties , AzureSQLMIServerProperties,ServerProperties -`sqlserver_memory_clerks` - Used by SQLServerMemoryClerks, AzureSQLDBMemoryClerks, AzureSQLMIMemoryClerks,MemoryClerk -`sqlserver_performance` - Used by SQLServerPerformanceCounters, AzureSQLDBPerformanceCounters, AzureSQLMIPerformanceCounters,PerformanceCounters -`sys.dm_os_schedulers` - Used by SQLServerSchedulers,AzureSQLDBServerSchedulers, AzureSQLMIServerSchedulers - - - -The following Performance counter metrics can be used directly, with no delta calculations: - - SQLServer:Buffer Manager\Buffer cache hit ratio - - SQLServer:Buffer Manager\Page life expectancy - - SQLServer:Buffer Node\Page life expectancy - - SQLServer:Database Replica\Log Apply Pending Queue - - SQLServer:Database Replica\Log Apply Ready Queue - - SQLServer:Database Replica\Log Send Queue - - SQLServer:Database Replica\Recovery Queue - - SQLServer:Databases\Data File(s) Size (KB) - - SQLServer:Databases\Log File(s) Size (KB) - - SQLServer:Databases\Log File(s) Used Size (KB) - - SQLServer:Databases\XTP Memory Used (KB) - - SQLServer:General Statistics\Active Temp Tables - - SQLServer:General Statistics\Processes blocked - - SQLServer:General Statistics\Temp Tables For Destruction - - SQLServer:General Statistics\User Connections - - SQLServer:Memory Broker Clerks\Memory broker clerk size - - SQLServer:Memory Manager\Memory Grants Pending - - SQLServer:Memory Manager\Target Server Memory (KB) - - SQLServer:Memory Manager\Total Server Memory (KB) - - SQLServer:Resource Pool Stats\Active memory grant amount (KB) - - SQLServer:Resource Pool Stats\Disk Read Bytes/sec - - SQLServer:Resource Pool Stats\Disk Read IO Throttled/sec - - SQLServer:Resource Pool Stats\Disk Read IO/sec - - SQLServer:Resource Pool Stats\Disk Write Bytes/sec - - SQLServer:Resource Pool Stats\Disk Write IO Throttled/sec - - SQLServer:Resource Pool Stats\Disk Write IO/sec - - SQLServer:Resource Pool Stats\Used memory (KB) - - SQLServer:Transactions\Free Space in tempdb (KB) - - SQLServer:Transactions\Version Store Size (KB) - - SQLServer:User Settable\Query - - SQLServer:Workload Group Stats\Blocked tasks - - SQLServer:Workload Group Stats\CPU usage % - - SQLServer:Workload Group Stats\Queued requests - - SQLServer:Workload Group Stats\Requests completed/sec +- *SQLServerVolumeSpace*: Uses `sys.dm_os_volume_stats` to get total, used and occupied space on every disk that contains a data or log file. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). It is pointless to run this with high frequency (ie: every 10s), but it won't cause any problem. +- SQLServerCpu: Uses the buffer ring (`sys.dm_os_ring_buffers`) to get CPU data, the table is updated once per minute. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). +- SQLServerAvailabilityReplicaStates: Collects availability replica state information from `sys.dm_hadr_availability_replica_states` for a High Availability / Disaster Recovery (HADR) setup +- SQLServerDatabaseReplicaStates: Collects database replica state information from `sys.dm_hadr_database_replica_states` for a High Availability / Disaster Recovery (HADR) setup +- SQLServerRecentBackups: Collects latest full, differential and transaction log backup date and size from `msdb.dbo.backupset` + +### Output Measures + +The guiding principal is that all data collected from the same primary DMV ends +up in the same measure irrespective of database_type. + +- `sqlserver_database_io` - Used by AzureSQLDBDatabaseIO, AzureSQLMIDatabaseIO, SQLServerDatabaseIO, DatabaseIO given the data is from `sys.dm_io_virtual_file_stats` +- `sqlserver_waitstats` - Used by WaitStatsCategorized,AzureSQLDBOsWaitstats,AzureSQLMIOsWaitstats +- `sqlserver_server_properties` - Used by SQLServerProperties, AzureSQLDBServerProperties , AzureSQLMIServerProperties,ServerProperties +- `sqlserver_memory_clerks` - Used by SQLServerMemoryClerks, AzureSQLDBMemoryClerks, AzureSQLMIMemoryClerks,MemoryClerk +- `sqlserver_performance` - Used by SQLServerPerformanceCounters, AzureSQLDBPerformanceCounters, AzureSQLMIPerformanceCounters,PerformanceCounters +- `sys.dm_os_schedulers` - Used by SQLServerSchedulers,AzureSQLDBServerSchedulers, AzureSQLMIServerSchedulers + +The following Performance counter metrics can be used directly, with no delta +calculations: + +- SQLServer:Buffer Manager\Buffer cache hit ratio +- SQLServer:Buffer Manager\Page life expectancy +- SQLServer:Buffer Node\Page life expectancy +- SQLServer:Database Replica\Log Apply Pending Queue +- SQLServer:Database Replica\Log Apply Ready Queue +- SQLServer:Database Replica\Log Send Queue +- SQLServer:Database Replica\Recovery Queue +- SQLServer:Databases\Data File(s) Size (KB) +- SQLServer:Databases\Log File(s) Size (KB) +- SQLServer:Databases\Log File(s) Used Size (KB) +- SQLServer:Databases\XTP Memory Used (KB) +- SQLServer:General Statistics\Active Temp Tables +- SQLServer:General Statistics\Processes blocked +- SQLServer:General Statistics\Temp Tables For Destruction +- SQLServer:General Statistics\User Connections +- SQLServer:Memory Broker Clerks\Memory broker clerk size +- SQLServer:Memory Manager\Memory Grants Pending +- SQLServer:Memory Manager\Target Server Memory (KB) +- SQLServer:Memory Manager\Total Server Memory (KB) +- SQLServer:Resource Pool Stats\Active memory grant amount (KB) +- SQLServer:Resource Pool Stats\Disk Read Bytes/sec +- SQLServer:Resource Pool Stats\Disk Read IO Throttled/sec +- SQLServer:Resource Pool Stats\Disk Read IO/sec +- SQLServer:Resource Pool Stats\Disk Write Bytes/sec +- SQLServer:Resource Pool Stats\Disk Write IO Throttled/sec +- SQLServer:Resource Pool Stats\Disk Write IO/sec +- SQLServer:Resource Pool Stats\Used memory (KB) +- SQLServer:Transactions\Free Space in tempdb (KB) +- SQLServer:Transactions\Version Store Size (KB) +- SQLServer:User Settable\Query +- SQLServer:Workload Group Stats\Blocked tasks +- SQLServer:Workload Group Stats\CPU usage % +- SQLServer:Workload Group Stats\Queued requests +- SQLServer:Workload Group Stats\Requests completed/sec Version 2 queries have the following tags: + - `sql_instance`: Physical host and instance name (hostname:instance) - `database_name`: For Azure SQLDB, database_name denotes the name of the Azure SQL Database as server name is a logical construct. +### Health Metric + +All collection versions (version 1, version 2, and database_type) support an +optional plugin health metric called `sqlserver_telegraf_health`. This metric +tracks if connections to SQL Server are succeeding or failing. Users can +leverage this metric to detect if their SQL Server monitoring is not working +as intended. + +In the configuration file, toggling `health_metric` to `true` will enable +collection of this metric. By default, this value is set to `false` and +the metric is not collected. The health metric emits one record for each +connection specified by `servers` in the configuration file. + +The health metric emits the following tags: + +- `sql_instance` - Name of the server specified in the connection string. This value is emitted as-is in the connection string. If the server could not be parsed from the connection string, a constant placeholder value is emitted +- `database_name` - Name of the database or (initial catalog) specified in the connection string. This value is emitted as-is in the connection string. If the database could not be parsed from the connection string, a constant placeholder value is emitted + +The health metric emits the following fields: + +- `attempted_queries` - Number of queries that were attempted for this connection +- `successful_queries` - Number of queries that completed successfully for this connection +- `database_type` - Type of database as specified by `database_type`. If `database_type` is empty, the `QueryVersion` and `AzureDB` fields are concatenated instead + +If `attempted_queries` and `successful_queries` are not equal for +a given connection, some metrics were not successfully gathered for +that connection. If `successful_queries` is 0, no metrics were successfully +gathered. + [cardinality]: /docs/FAQ.md#user-content-q-how-can-i-manage-series-cardinality + +## Example Output + +```shell +sqlserver_cpu_other_process_cpu{host="servername",measurement_db_type="SQLServer",sql_instance="SERVERNAME:INST"} 9 +sqlserver_performance{counter="Log File(s) Size (KB)",counter_type="65792",host="servername",instance="instance_name",measurement_db_type="SQLServer",object="MSSQL$INSTANCE_NAME:Databases",sql_instance="SERVERNAME:INSTANCE_NAME"} 1.048568e+06 +``` diff --git a/plugins/inputs/sqlserver/azuresqldbqueries.go b/plugins/inputs/sqlserver/azuresqldbqueries.go new file mode 100644 index 0000000000000..78cfaafc16260 --- /dev/null +++ b/plugins/inputs/sqlserver/azuresqldbqueries.go @@ -0,0 +1,696 @@ +package sqlserver + +import ( + _ "github.com/denisenkom/go-mssqldb" // go-mssqldb initialization +) + +//------------------------------------------------------------------------------------------------ +//------------------ Azure SQL Database ---------------------------------------------------------- +//------------------------------------------------------------------------------------------------ +// Only executed if AzureDB flag is set +const sqlAzureDBResourceStats string = ` +IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL DB. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT TOP(1) + 'sqlserver_azure_db_resource_stats' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,DB_NAME() as [database_name] + ,cast([avg_cpu_percent] as float) as [avg_cpu_percent] + ,cast([avg_data_io_percent] as float) as [avg_data_io_percent] + ,cast([avg_log_write_percent] as float) as [avg_log_write_percent] + ,cast([avg_memory_usage_percent] as float) as [avg_memory_usage_percent] + ,cast([xtp_storage_percent] as float) as [xtp_storage_percent] + ,cast([max_worker_percent] as float) as [max_worker_percent] + ,cast([max_session_percent] as float) as [max_session_percent] + ,[dtu_limit] + ,cast([avg_login_rate_percent] as float) as [avg_login_rate_percent] + ,[end_time] + ,cast([avg_instance_memory_percent] as float) as [avg_instance_memory_percent] + ,cast([avg_instance_cpu_percent] as float) as [avg_instance_cpu_percent] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM + sys.dm_db_resource_stats WITH (NOLOCK) +ORDER BY + [end_time] DESC; +` + +// Resource Governance is only relevant to Azure SQL DB into separate collector +// This will only be collected for Azure SQL Database. +const sqlAzureDBResourceGovernance string = ` +IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL DB. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_db_resource_governance' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,DB_NAME() as [database_name] + ,[slo_name] + ,[dtu_limit] + ,[max_cpu] + ,[cap_cpu] + ,[instance_cap_cpu] + ,[max_db_memory] + ,[max_db_max_size_in_mb] + ,[db_file_growth_in_mb] + ,[log_size_in_mb] + ,[instance_max_worker_threads] + ,[primary_group_max_workers] + ,[instance_max_log_rate] + ,[primary_min_log_rate] + ,[primary_max_log_rate] + ,[primary_group_min_io] + ,[primary_group_max_io] + ,[primary_group_min_cpu] + ,[primary_group_max_cpu] + ,[primary_pool_max_workers] + ,[pool_max_io] + ,[checkpoint_rate_mbps] + ,[checkpoint_rate_io] + ,[volume_local_iops] + ,[volume_managed_xstore_iops] + ,[volume_external_xstore_iops] + ,[volume_type_local_iops] + ,[volume_type_managed_xstore_iops] + ,[volume_type_external_xstore_iops] + ,[volume_pfs_iops] + ,[volume_type_pfs_iops] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM + sys.dm_user_db_resource_governance WITH (NOLOCK); +` + +// DB level wait stats that are only relevant to Azure SQL DB into separate collector +// This will only be collected for Azure SQL Database. +const sqlAzureDBWaitStats string = ` +IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL DB. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_azuredb_waitstats' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,DB_NAME() as [database_name] + ,dbws.[wait_type] + ,dbws.[wait_time_ms] + ,dbws.[wait_time_ms] - [signal_wait_time_ms] AS [resource_wait_ms] + ,dbws.[signal_wait_time_ms] + ,dbws.[max_wait_time_ms] + ,dbws.[waiting_tasks_count] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM + sys.dm_db_wait_stats AS dbws WITH (NOLOCK) +WHERE + dbws.[wait_type] NOT IN ( + N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP', + N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE', + N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE', + N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_WORKER_QUEUE', + N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE', + N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX', + N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT', + N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE', + N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE', + N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE', + N'PARALLEL_REDO_WORKER_WAIT_WORK', + N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS', + N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS', + N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', + N'PREEMPTIVE_OS_DEVICEOPS', + N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER', + N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT', + N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE', + N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT', + N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP', + N'QDS_ASYNC_QUEUE', + N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH', + N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP', + N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY', + N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK', + N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP', + N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + N'SQLTRACE_WAIT_ENTRIES', + N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT', + N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE', + N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN', + N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT', + N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT') + AND [waiting_tasks_count] > 0 + AND [wait_time_ms] > 100; +` + +const sqlAzureDBDatabaseIO = ` +SET DEADLOCK_PRIORITY -10; +IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL DB. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_database_io' As [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,DB_NAME() as [database_name] + ,vfs.[database_id] /*needed as tempdb is different for each Azure SQL DB as grouping has to be by logical server + db_name + database_id*/ + ,vfs.[file_id] + ,vfs.[io_stall_read_ms] AS [read_latency_ms] + ,vfs.[num_of_reads] AS [reads] + ,vfs.[num_of_bytes_read] AS [read_bytes] + ,vfs.[io_stall_write_ms] AS [write_latency_ms] + ,vfs.[num_of_writes] AS [writes] + ,vfs.[num_of_bytes_written] AS [write_bytes] + ,vfs.[io_stall_queued_read_ms] AS [rg_read_stall_ms] + ,vfs.[io_stall_queued_write_ms] AS [rg_write_stall_ms] + ,CASE + WHEN (vfs.[database_id] = 0) THEN 'RBPEX' + ELSE b.[logical_filename] + END as [logical_filename] + ,CASE + WHEN (vfs.[database_id] = 0) THEN 'RBPEX' + ELSE b.[physical_filename] + END as [physical_filename] + ,CASE + WHEN vfs.[file_id] = 2 THEN 'LOG' + ELSE 'DATA' + END AS [file_type] + ,ISNULL([size],0)/128 AS [current_size_mb] + ,ISNULL(FILEPROPERTY(b.[logical_filename],'SpaceUsed')/128,0) as [space_used_mb] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM + [sys].[dm_io_virtual_file_stats](NULL,NULL) AS vfs + -- needed to get Tempdb file names on Azure SQL DB so you can join appropriately. Without this had a bug where join was only on file_id +LEFT OUTER join ( + SELECT + DB_ID() as [database_id] + ,[file_id] + ,[logical_filename]= [name] COLLATE SQL_Latin1_General_CP1_CI_AS + ,[physical_filename] = [physical_name] COLLATE SQL_Latin1_General_CP1_CI_AS + ,[size] + FROM sys.database_files + WHERE + [type] <> 2 + UNION ALL + SELECT + 2 as [database_id] + ,[file_id] + ,[logical_filename] = [name] + ,[physical_filename] = [physical_name] + ,[size] + FROM tempdb.sys.database_files +) b + ON + b.[database_id] = vfs.[database_id] + AND b.[file_id] = vfs.[file_id] +WHERE + vfs.[database_id] IN (DB_ID(),0,2) +` + +const sqlAzureDBProperties = ` +IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL DB. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_server_properties' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,DB_NAME() as [database_name] + ,(SELECT count(*) FROM sys.dm_os_schedulers WHERE status = 'VISIBLE ONLINE') AS [cpu_count] + ,(SELECT [process_memory_limit_mb] FROM sys.dm_os_job_object) AS [server_memory] + ,slo.[edition] as [sku] + ,SERVERPROPERTY('EngineEdition') AS [engine_edition] + ,slo.[service_objective] AS [hardware_type] + ,CASE + WHEN slo.[edition] = 'Hyperscale' then NULL + ELSE CAST(DATABASEPROPERTYEX(DB_NAME(),'MaxSizeInBytes') as bigint)/(1024*1024) + END AS [total_storage_mb] + ,CASE + WHEN slo.[edition] = 'Hyperscale' then NULL + ELSE ( + cast(DATABASEPROPERTYEX(DB_NAME(),'MaxSizeInBytes') as bigint)/(1024*1024) - + (select SUM([size]/128 - CAST(FILEPROPERTY(name, 'SpaceUsed') AS int)/128) FROM sys.database_files) + ) + END AS [available_storage_mb] + ,(select DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) from sys.dm_os_sys_info) as [uptime] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability + FROM sys.[databases] AS d + -- sys.databases.database_id may not match current DB_ID on Azure SQL DB + CROSS JOIN sys.[database_service_objectives] AS slo + WHERE + d.[name] = DB_NAME() + AND slo.[database_id] = DB_ID(); +` + +const sqlAzureDBOsWaitStats = ` +IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL DB. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_waitstats' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,DB_NAME() as [database_name] + ,ws.[wait_type] + ,[wait_time_ms] + ,[wait_time_ms] - [signal_wait_time_ms] AS [resource_wait_ms] + ,[signal_wait_time_ms] + ,[max_wait_time_ms] + ,[waiting_tasks_count] + ,CASE + WHEN ws.[wait_type] LIKE 'SOS_SCHEDULER_YIELD' then 'CPU' + WHEN ws.[wait_type] = 'THREADPOOL' THEN 'Worker Thread' + WHEN ws.[wait_type] LIKE 'LCK[_]%' THEN 'Lock' + WHEN ws.[wait_type] LIKE 'LATCH[_]%' THEN 'Latch' + WHEN ws.[wait_type] LIKE 'PAGELATCH[_]%' THEN 'Buffer Latch' + WHEN ws.[wait_type] LIKE 'PAGEIOLATCH[_]%' THEN 'Buffer IO' + WHEN ws.[wait_type] LIKE 'RESOURCE_SEMAPHORE_QUERY_COMPILE%' THEN 'Compilation' + WHEN ws.[wait_type] LIKE 'CLR[_]%' or ws.[wait_type] like 'SQLCLR%' THEN 'SQL CLR' + WHEN ws.[wait_type] LIKE 'DBMIRROR_%' THEN 'Mirroring' + WHEN ws.[wait_type] LIKE 'DTC[_]%' or ws.[wait_type] LIKE 'DTCNEW%' or ws.[wait_type] LIKE 'TRAN_%' + or ws.[wait_type] LIKE 'XACT%' or ws.[wait_type] like 'MSQL_XACT%' THEN 'Transaction' + WHEN ws.[wait_type] LIKE 'SLEEP[_]%' + or ws.[wait_type] IN ( + 'LAZYWRITER_SLEEP', 'SQLTRACE_BUFFER_FLUSH', 'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + 'SQLTRACE_WAIT_ENTRIES', 'FT_IFTS_SCHEDULER_IDLE_WAIT', 'XE_DISPATCHER_WAIT', + 'REQUEST_FOR_DEADLOCK_SEARCH', 'LOGMGR_QUEUE', 'ONDEMAND_TASK_QUEUE', + 'CHECKPOINT_QUEUE', 'XE_TIMER_EVENT') THEN 'Idle' + WHEN ws.[wait_type] IN( + 'ASYNC_IO_COMPLETION','BACKUPIO','CHKPT','WRITE_COMPLETION', + 'IO_QUEUE_LIMIT', 'IO_RETRY') THEN 'Other Disk IO' + WHEN ws.[wait_type] LIKE 'PREEMPTIVE_%' THEN 'Preemptive' + WHEN ws.[wait_type] LIKE 'BROKER[_]%' THEN 'Service Broker' + WHEN ws.[wait_type] IN ( + 'WRITELOG','LOGBUFFER','LOGMGR_RESERVE_APPEND', + 'LOGMGR_FLUSH', 'LOGMGR_PMM_LOG') THEN 'Tran Log IO' + WHEN ws.[wait_type] LIKE 'LOG_RATE%' then 'Log Rate Governor' + WHEN ws.[wait_type] LIKE 'HADR_THROTTLE[_]%' + or ws.[wait_type] = 'THROTTLE_LOG_RATE_LOG_STORAGE' THEN 'HADR Log Rate Governor' + WHEN ws.[wait_type] LIKE 'RBIO_RG%' or ws.[wait_type] like 'WAIT_RBIO_RG%' then 'VLDB Log Rate Governor' + WHEN ws.[wait_type] LIKE 'RBIO[_]%' or ws.[wait_type] like 'WAIT_RBIO[_]%' then 'VLDB RBIO' + WHEN ws.[wait_type] IN( + 'ASYNC_NETWORK_IO','EXTERNAL_SCRIPT_NETWORK_IOF', + 'NET_WAITFOR_PACKET','PROXY_NETWORK_IO') THEN 'Network IO' + WHEN ws.[wait_type] IN ( 'CXPACKET', 'CXCONSUMER') + or ws.[wait_type] like 'HT%' or ws.[wait_type] like 'BMP%' + or ws.[wait_type] like 'BP%' THEN 'Parallelism' + WHEN ws.[wait_type] IN( + 'CMEMTHREAD','CMEMPARTITIONED','EE_PMOLOCK','EXCHANGE', + 'RESOURCE_SEMAPHORE','MEMORY_ALLOCATION_EXT', + 'RESERVED_MEMORY_ALLOCATION_EXT', 'MEMORY_GRANT_UPDATE') THEN 'Memory' + WHEN ws.[wait_type] IN ('WAITFOR','WAIT_FOR_RESULTS') THEN 'User Wait' + WHEN ws.[wait_type] LIKE 'HADR[_]%' or ws.[wait_type] LIKE 'PWAIT_HADR%' + or ws.[wait_type] LIKE 'REPLICA[_]%' or ws.[wait_type] LIKE 'REPL_%' + or ws.[wait_type] LIKE 'SE_REPL[_]%' + or ws.[wait_type] LIKE 'FCB_REPLICA%' THEN 'Replication' + WHEN ws.[wait_type] LIKE 'SQLTRACE[_]%' + or ws.[wait_type] IN ( + 'TRACEWRITE', 'SQLTRACE_LOCK', 'SQLTRACE_FILE_BUFFER', 'SQLTRACE_FILE_WRITE_IO_COMPLETION', + 'SQLTRACE_FILE_READ_IO_COMPLETION', 'SQLTRACE_PENDING_BUFFER_WRITERS', 'SQLTRACE_SHUTDOWN', + 'QUERY_TRACEOUT', 'TRACE_EVTNOTIF') THEN 'Tracing' + WHEN ws.[wait_type] IN ( + 'FT_RESTART_CRAWL', 'FULLTEXT GATHERER', 'MSSEARCH', 'FT_METADATA_MUTEX', + 'FT_IFTSHC_MUTEX', 'FT_IFTSISM_MUTEX', 'FT_IFTS_RWLOCK', 'FT_COMPROWSET_RWLOCK', + 'FT_MASTER_MERGE', 'FT_PROPERTYLIST_CACHE', 'FT_MASTER_MERGE_COORDINATOR', + 'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN 'Full Text Search' + ELSE 'Other' + END as [wait_category] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.dm_os_wait_stats AS ws WITH (NOLOCK) +WHERE + ws.[wait_type] NOT IN ( + N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP', + N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE', + N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE', + N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_QUEUE', + N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE', + N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX', + N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT', + N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE', + N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE', + N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE', + N'PARALLEL_REDO_WORKER_WAIT_WORK', + N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS', + N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS', + N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', + N'PREEMPTIVE_OS_DEVICEOPS', + N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER', + N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT', + N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE', + N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT', + N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP', + N'QDS_ASYNC_QUEUE', + N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH', + N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP', + N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY', + N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK', + N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP', + N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + N'SQLTRACE_WAIT_ENTRIES', + N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT', + N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE', + N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN', + N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT', + N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT','SQLTRACE_WAIT_ENTRIES', + N'RBIO_COMM_RETRY') +AND [waiting_tasks_count] > 10 +AND [wait_time_ms] > 100; +` + +const sqlAzureDBMemoryClerks = ` +IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL DB. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_memory_clerks' AS [measurement] + ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] + ,DB_NAME() AS [database_name] + ,mc.[type] AS [clerk_type] + ,SUM(mc.[pages_kb]) AS [size_kb] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.[dm_os_memory_clerks] AS mc WITH (NOLOCK) +GROUP BY + mc.[type] +HAVING + SUM(mc.[pages_kb]) >= 1024 +OPTION(RECOMPILE); +` + +const sqlAzureDBPerformanceCounters = ` +SET DEADLOCK_PRIORITY -10; +IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL DB. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +DECLARE @PCounters TABLE +( + [object_name] nvarchar(128), + [counter_name] nvarchar(128), + [instance_name] nvarchar(128), + [cntr_value] bigint, + [cntr_type] INT , + Primary Key([object_name],[counter_name],[instance_name]) +); + +WITH PerfCounters AS ( + SELECT DISTINCT + RTrim(spi.[object_name]) [object_name] + ,RTrim(spi.[counter_name]) [counter_name] + ,CASE WHEN ( + RTRIM(spi.[object_name]) LIKE '%:Databases' + OR RTRIM(spi.[object_name]) LIKE '%:Database Replica' + OR RTRIM(spi.[object_name]) LIKE '%:Catalog Metadata' + OR RTRIM(spi.[object_name]) LIKE '%:Query Store' + OR RTRIM(spi.[object_name]) LIKE '%:Columnstore' + OR RTRIM(spi.[object_name]) LIKE '%:Advanced Analytics') + AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only + THEN ISNULL(d.[name],RTRIM(spi.instance_name)) -- Elastic Pools counters exist for all databases but sys.databases only has current DB value + WHEN + RTRIM([object_name]) LIKE '%:Availability Replica' + AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only + THEN ISNULL(d.[name],RTRIM(spi.[instance_name])) + RTRIM(SUBSTRING(spi.[instance_name], 37, LEN(spi.[instance_name]))) + ELSE RTRIM(spi.instance_name) + END AS [instance_name] + ,CAST(spi.[cntr_value] AS BIGINT) AS [cntr_value] + ,spi.[cntr_type] + FROM sys.dm_os_performance_counters AS spi + LEFT JOIN sys.databases AS d + ON LEFT(spi.[instance_name], 36) -- some instance_name values have an additional identifier appended after the GUID + = CASE + /*in SQL DB standalone, physical_database_name for master is the GUID of the user database*/ + WHEN d.[name] = 'master' AND TRY_CONVERT([uniqueidentifier], d.[physical_database_name]) IS NOT NULL + THEN d.[name] + ELSE d.[physical_database_name] + END + WHERE + /*filter out unnecessary SQL DB system database counters, other than master and tempdb*/ + NOT (spi.object_name LIKE 'MSSQL%:Databases%' AND spi.instance_name IN ('model','model_masterdb','model_userdb','msdb','mssqlsystemresource')) + AND + ( + counter_name IN ( + 'SQL Compilations/sec' + ,'SQL Re-Compilations/sec' + ,'User Connections' + ,'Batch Requests/sec' + ,'Logouts/sec' + ,'Logins/sec' + ,'Processes blocked' + ,'Latch Waits/sec' + ,'Full Scans/sec' + ,'Index Searches/sec' + ,'Page Splits/sec' + ,'Page lookups/sec' + ,'Page reads/sec' + ,'Page writes/sec' + ,'Readahead pages/sec' + ,'Lazy writes/sec' + ,'Checkpoint pages/sec' + ,'Table Lock Escalations/sec' + ,'Page life expectancy' + ,'Log File(s) Size (KB)' + ,'Log File(s) Used Size (KB)' + ,'Data File(s) Size (KB)' + ,'Transactions/sec' + ,'Write Transactions/sec' + ,'Active Transactions' + ,'Log Growths' + ,'Active Temp Tables' + ,'Logical Connections' + ,'Temp Tables Creation Rate' + ,'Temp Tables For Destruction' + ,'Free Space in tempdb (KB)' + ,'Version Store Size (KB)' + ,'Memory Grants Pending' + ,'Memory Grants Outstanding' + ,'Free list stalls/sec' + ,'Buffer cache hit ratio' + ,'Buffer cache hit ratio base' + ,'Backup/Restore Throughput/sec' + ,'Total Server Memory (KB)' + ,'Target Server Memory (KB)' + ,'Log Flushes/sec' + ,'Log Flush Wait Time' + ,'Memory broker clerk size' + ,'Log Bytes Flushed/sec' + ,'Bytes Sent to Replica/sec' + ,'Log Send Queue' + ,'Bytes Sent to Transport/sec' + ,'Sends to Replica/sec' + ,'Bytes Sent to Transport/sec' + ,'Sends to Transport/sec' + ,'Bytes Received from Replica/sec' + ,'Receives from Replica/sec' + ,'Flow Control Time (ms/sec)' + ,'Flow Control/sec' + ,'Resent Messages/sec' + ,'Redone Bytes/sec' + ,'XTP Memory Used (KB)' + ,'Transaction Delay' + ,'Log Bytes Received/sec' + ,'Log Apply Pending Queue' + ,'Redone Bytes/sec' + ,'Recovery Queue' + ,'Log Apply Ready Queue' + ,'CPU usage %' + ,'CPU usage % base' + ,'Queued requests' + ,'Requests completed/sec' + ,'Blocked tasks' + ,'Active memory grant amount (KB)' + ,'Disk Read Bytes/sec' + ,'Disk Read IO Throttled/sec' + ,'Disk Read IO/sec' + ,'Disk Write Bytes/sec' + ,'Disk Write IO Throttled/sec' + ,'Disk Write IO/sec' + ,'Used memory (KB)' + ,'Forwarded Records/sec' + ,'Background Writer pages/sec' + ,'Percent Log Used' + ,'Log Send Queue KB' + ,'Redo Queue KB' + ,'Mirrored Write Transactions/sec' + ,'Group Commit Time' + ,'Group Commits/Sec' + ,'Workfiles Created/sec' + ,'Worktables Created/sec' + ,'Query Store CPU usage' + ) OR ( + spi.[object_name] LIKE '%User Settable%' + OR spi.[object_name] LIKE '%SQL Errors%' + OR spi.[object_name] LIKE '%Batch Resp Statistics%' + ) OR ( + spi.[instance_name] IN ('_Total') + AND spi.[counter_name] IN ( + 'Lock Timeouts/sec' + ,'Lock Timeouts (timeout > 0)/sec' + ,'Number of Deadlocks/sec' + ,'Lock Waits/sec' + ,'Latch Waits/sec' + ) + ) + ) +) + +INSERT INTO @PCounters select * from PerfCounters + +SELECT + 'sqlserver_performance' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,DB_NAME() as [database_name] + ,pc.[object_name] AS [object] + ,pc.[counter_name] AS [counter] + ,CASE pc.[instance_name] + WHEN '_Total' THEN 'Total' + ELSE ISNULL(pc.[instance_name],'') + END AS [instance] + ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value] + ,cast(pc.[cntr_type] as varchar(25)) as [counter_type] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +from @PCounters pc +LEFT OUTER JOIN @PCounters AS pc1 + ON ( + pc.[counter_name] = REPLACE(pc1.[counter_name],' base','') + OR pc.[counter_name] = REPLACE(pc1.[counter_name],' base',' (ms)') + ) + AND pc.[object_name] = pc1.[object_name] + AND pc.[instance_name] = pc1.[instance_name] + AND pc1.[counter_name] LIKE '%base' +WHERE + pc.[counter_name] NOT LIKE '% base' +OPTION (RECOMPILE); +` + +const sqlAzureDBRequests string = ` +IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL DB. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + + + +SELECT + [measurement],[sql_instance],[database_name],[session_id] + ,ISNULL([request_id],0) AS [request_id] + ,[blocking_session_id],[status],[cpu_time_ms] + ,[total_elapsed_time_ms],[logical_reads],[writes] + ,[command],[wait_time_ms],[wait_type] + ,[wait_resource],[program_name] + ,[host_name],[nt_user_name],[login_name] + ,[transaction_isolation_level],[granted_query_memory_pages],[percent_complete] + ,[statement_text],[objectid],[stmt_object_name] + ,[stmt_db_name],[query_hash],[query_plan_hash] + ,replica_updateability + ,[session_db_name],[open_transaction] +FROM ( + SELECT + 'sqlserver_requests' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,DB_NAME() as [database_name] + ,s.[session_id] + ,ISNULL(r.[request_id], 0) as [request_id] + ,DB_NAME(COALESCE(r.[database_id], s.[database_id])) AS [session_db_name] + ,COALESCE(r.[status], s.[status]) AS [status] + ,COALESCE(r.[cpu_time], s.[cpu_time]) AS [cpu_time_ms] + ,COALESCE(r.[total_elapsed_time], s.[total_elapsed_time]) AS [total_elapsed_time_ms] + ,COALESCE(r.[logical_reads], s.[logical_reads]) AS [logical_reads] + ,COALESCE(r.[writes], s.[writes]) AS [writes] + ,r.[command] + ,r.[wait_time] AS [wait_time_ms] + ,r.[wait_type] + ,r.[wait_resource] + ,NULLIF(r.[blocking_session_id],0) AS [blocking_session_id] + ,s.[program_name] + ,s.[host_name] + ,s.[nt_user_name] + ,s.[login_name] + ,COALESCE(r.[open_transaction_count], s.[open_transaction_count]) AS [open_transaction] + ,LEFT (CASE COALESCE(r.[transaction_isolation_level], s.[transaction_isolation_level]) + WHEN 0 THEN '0-Read Committed' + WHEN 1 THEN '1-Read Uncommitted (NOLOCK)' + WHEN 2 THEN '2-Read Committed' + WHEN 3 THEN '3-Repeatable Read' + WHEN 4 THEN '4-Serializable' + WHEN 5 THEN '5-Snapshot' + ELSE CONVERT (varchar(30), r.[transaction_isolation_level]) + '-UNKNOWN' + END, 30) AS [transaction_isolation_level] + ,r.[granted_query_memory] AS [granted_query_memory_pages] + ,r.[percent_complete] + ,SUBSTRING( + qt.[text], + r.[statement_start_offset] / 2 + 1, + (CASE WHEN r.[statement_end_offset] = -1 + THEN DATALENGTH(qt.[text]) + ELSE r.[statement_end_offset] + END - r.[statement_start_offset]) / 2 + 1 + ) AS [statement_text] + ,qt.[objectid] + ,QUOTENAME(OBJECT_SCHEMA_NAME(qt.[objectid], qt.[dbid])) + '.' + QUOTENAME(OBJECT_NAME(qt.[objectid], qt.[dbid])) as [stmt_object_name] + ,DB_NAME(qt.[dbid]) AS [stmt_db_name] + ,CONVERT(varchar(20),r.[query_hash],1) AS [query_hash] + ,CONVERT(varchar(20),r.[query_plan_hash],1) AS [query_plan_hash] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability + ,s.[is_user_process] + ,[blocking_or_blocked] = COUNT(*) OVER(PARTITION BY ISNULL(NULLIF(r.[blocking_session_id], 0),s.[session_id])) + FROM sys.dm_exec_sessions AS s + LEFT OUTER JOIN sys.dm_exec_requests AS r + ON s.[session_id] = r.[session_id] + OUTER APPLY sys.dm_exec_sql_text(r.[sql_handle]) AS qt +) AS data +WHERE + [blocking_or_blocked] > 1 --Always include blocking or blocked sessions/requests + OR ( + [request_id] IS NOT NULL --A request must exists + AND ( --Always fetch user process (in any state), fetch system process only if active + [is_user_process] = 1 + OR [status] COLLATE Latin1_General_BIN NOT IN ('background', 'sleeping') + ) + ) +OPTION(MAXDOP 1); +` + +const sqlAzureDBSchedulers string = ` +IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL DB. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_schedulers' AS [measurement] + ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] + ,CAST(s.[scheduler_id] AS VARCHAR(4)) AS [scheduler_id] + ,CAST(s.[cpu_id] AS VARCHAR(4)) AS [cpu_id] + ,s.[is_online] + ,s.[is_idle] + ,s.[preemptive_switches_count] + ,s.[context_switches_count] + ,s.[current_tasks_count] + ,s.[runnable_tasks_count] + ,s.[current_workers_count] + ,s.[active_workers_count] + ,s.[work_queue_count] + ,s.[pending_disk_io_count] + ,s.[load_factor] + ,s.[yield_count] + ,s.[total_cpu_usage_ms] + ,s.[total_scheduler_delay_ms] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.dm_os_schedulers AS s +` diff --git a/plugins/inputs/sqlserver/azuresqldbqueries_test.go b/plugins/inputs/sqlserver/azuresqldbqueries_test.go new file mode 100644 index 0000000000000..533c5e35b9ad2 --- /dev/null +++ b/plugins/inputs/sqlserver/azuresqldbqueries_test.go @@ -0,0 +1,450 @@ +package sqlserver + +import ( + "os" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestAzureSQLIntegration_Database_ResourceStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBResourceStats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_azure_db_resource_stats")) + require.True(t, acc.HasTag("sqlserver_azure_db_resource_stats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_azure_db_resource_stats", "database_name")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_cpu_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_data_io_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_log_write_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_memory_usage_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "xtp_storage_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "max_worker_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "max_session_percent")) + require.True(t, acc.HasField("sqlserver_azure_db_resource_stats", "dtu_limit")) // Can be null. + require.True(t, acc.HasField("sqlserver_azure_db_resource_stats", "avg_login_rate_percent")) // Can be null. + require.True(t, acc.HasField("sqlserver_azure_db_resource_stats", "end_time")) // Time field. + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_instance_memory_percent")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_instance_cpu_percent")) + require.True(t, acc.HasTag("sqlserver_azure_db_resource_stats", "replica_updateability")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQLIntegration_Database_ResourceGovernance_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBResourceGovernance"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_db_resource_governance")) + require.True(t, acc.HasTag("sqlserver_db_resource_governance", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_db_resource_governance", "database_name")) + require.True(t, acc.HasTag("sqlserver_db_resource_governance", "slo_name")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "dtu_limit")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "max_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "cap_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "instance_cap_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "max_db_memory")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "max_db_max_size_in_mb")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "db_file_growth_in_mb")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "log_size_in_mb")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "instance_max_worker_threads")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_group_max_workers")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "instance_max_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_min_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_max_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_group_min_io")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_group_max_io")) + require.True(t, acc.HasFloatField("sqlserver_db_resource_governance", "primary_group_min_cpu")) + require.True(t, acc.HasFloatField("sqlserver_db_resource_governance", "primary_group_max_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "primary_pool_max_workers")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "pool_max_io")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "checkpoint_rate_mbps")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "checkpoint_rate_io")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_managed_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_external_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_type_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_type_managed_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_type_external_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_pfs_iops")) + require.True(t, acc.HasInt64Field("sqlserver_db_resource_governance", "volume_type_pfs_iops")) + require.True(t, acc.HasTag("sqlserver_db_resource_governance", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQLIntegration_Database_WaitStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBWaitStats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_azuredb_waitstats")) + require.True(t, acc.HasTag("sqlserver_azuredb_waitstats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_azuredb_waitstats", "database_name")) + require.True(t, acc.HasTag("sqlserver_azuredb_waitstats", "wait_type")) + require.True(t, acc.HasInt64Field("sqlserver_azuredb_waitstats", "wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_azuredb_waitstats", "resource_wait_ms")) + require.True(t, acc.HasInt64Field("sqlserver_azuredb_waitstats", "signal_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_azuredb_waitstats", "max_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_azuredb_waitstats", "waiting_tasks_count")) + require.True(t, acc.HasTag("sqlserver_azuredb_waitstats", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQLIntegration_Database_DatabaseIO_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBDatabaseIO"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_database_io")) + require.True(t, acc.HasTag("sqlserver_database_io", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_database_io", "database_name")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "database_id")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "file_id")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "reads")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "writes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_read_stall_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_write_stall_ms")) + require.True(t, acc.HasTag("sqlserver_database_io", "logical_filename")) + require.True(t, acc.HasTag("sqlserver_database_io", "physical_filename")) + require.True(t, acc.HasTag("sqlserver_database_io", "file_type")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "current_size_mb")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "space_used_mb")) + require.True(t, acc.HasTag("sqlserver_database_io", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQLIntegration_Database_ServerProperties_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBServerProperties"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_server_properties")) + require.True(t, acc.HasTag("sqlserver_server_properties", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_server_properties", "database_name")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "cpu_count")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "server_memory")) + require.True(t, acc.HasTag("sqlserver_server_properties", "sku")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "engine_edition")) + require.True(t, acc.HasTag("sqlserver_server_properties", "hardware_type")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "total_storage_mb")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "available_storage_mb")) + require.True(t, acc.HasField("sqlserver_server_properties", "uptime")) // Time field. + require.True(t, acc.HasTag("sqlserver_server_properties", "replica_updateability")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQLIntegration_Database_OsWaitstats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBOsWaitstats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_waitstats")) + require.True(t, acc.HasTag("sqlserver_waitstats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_waitstats", "database_name")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_type")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "resource_wait_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "signal_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "max_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "waiting_tasks_count")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_category")) + require.True(t, acc.HasTag("sqlserver_waitstats", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQLIntegration_Database_MemoryClerks_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBMemoryClerks"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_memory_clerks")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "database_name")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "clerk_type")) + require.True(t, acc.HasInt64Field("sqlserver_memory_clerks", "size_kb")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQLIntegration_Database_PerformanceCounters_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBPerformanceCounters"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_performance")) + require.True(t, acc.HasTag("sqlserver_performance", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_performance", "database_name")) + require.True(t, acc.HasTag("sqlserver_performance", "object")) + require.True(t, acc.HasTag("sqlserver_performance", "counter")) + require.True(t, acc.HasTag("sqlserver_performance", "instance")) + require.True(t, acc.HasFloatField("sqlserver_performance", "value")) + require.True(t, acc.HasTag("sqlserver_performance", "counter_type")) + require.True(t, acc.HasTag("sqlserver_performance", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQLIntegration_Database_Requests_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBRequests"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_requests")) + require.True(t, acc.HasTag("sqlserver_requests", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_requests", "database_name")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "session_id")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "request_id")) + require.True(t, acc.HasTag("sqlserver_requests", "session_db_name")) + require.True(t, acc.HasTag("sqlserver_requests", "status")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "cpu_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "total_elapsed_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "logical_reads")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "writes")) + require.True(t, acc.HasTag("sqlserver_requests", "command")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "wait_time_ms")) + require.True(t, acc.HasField("sqlserver_requests", "wait_type")) // Can be null. + require.True(t, acc.HasTag("sqlserver_requests", "wait_resource")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "blocking_session_id")) + require.True(t, acc.HasTag("sqlserver_requests", "program_name")) + require.True(t, acc.HasTag("sqlserver_requests", "host_name")) + require.True(t, acc.HasField("sqlserver_requests", "nt_user_name")) // Can be null. + require.True(t, acc.HasTag("sqlserver_requests", "login_name")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "open_transaction")) + require.True(t, acc.HasTag("sqlserver_requests", "transaction_isolation_level")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "granted_query_memory_pages")) + require.True(t, acc.HasFloatField("sqlserver_requests", "percent_complete")) + require.True(t, acc.HasTag("sqlserver_requests", "statement_text")) + require.True(t, acc.HasField("sqlserver_requests", "objectid")) // Can be null. + require.True(t, acc.HasField("sqlserver_requests", "stmt_object_name")) // Can be null. + require.True(t, acc.HasField("sqlserver_requests", "stmt_db_name")) // Can be null. + require.True(t, acc.HasTag("sqlserver_requests", "query_hash")) + require.True(t, acc.HasTag("sqlserver_requests", "query_plan_hash")) + require.True(t, acc.HasTag("sqlserver_requests", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQLIntegration_Database_Schedulers_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_DB_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_DB_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_DB_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLDBSchedulers"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLDB", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_schedulers")) + require.True(t, acc.HasTag("sqlserver_schedulers", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_schedulers", "scheduler_id")) + require.True(t, acc.HasTag("sqlserver_schedulers", "cpu_id")) + require.True(t, acc.HasField("sqlserver_schedulers", "is_online")) // Bool field. + require.True(t, acc.HasField("sqlserver_schedulers", "is_idle")) // Bool field. + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "preemptive_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "context_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "runnable_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "active_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "work_queue_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "pending_disk_io_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "load_factor")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "yield_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_cpu_usage_ms")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_scheduler_delay_ms")) + require.True(t, acc.HasTag("sqlserver_schedulers", "replica_updateability")) + + server.Stop() +} diff --git a/plugins/inputs/sqlserver/azuresqlmanagedqueries.go b/plugins/inputs/sqlserver/azuresqlmanagedqueries.go new file mode 100644 index 0000000000000..716eeae7a54c7 --- /dev/null +++ b/plugins/inputs/sqlserver/azuresqlmanagedqueries.go @@ -0,0 +1,562 @@ +package sqlserver + +import ( + _ "github.com/denisenkom/go-mssqldb" // go-mssqldb initialization +) + +//------------------------------------------------------------------------------------------------ +//------------------ Azure Managed Instance ------------------------------------------------------ +//------------------------------------------------------------------------------------------------ +const sqlAzureMIProperties = ` +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT TOP 1 + 'sqlserver_server_properties' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,[virtual_core_count] AS [cpu_count] + ,(SELECT [process_memory_limit_mb] FROM sys.dm_os_job_object) AS [server_memory] + ,[sku] + ,SERVERPROPERTY('EngineEdition') AS [engine_edition] + ,[hardware_generation] AS [hardware_type] + ,cast([reserved_storage_mb] as bigint) AS [total_storage_mb] + ,cast(([reserved_storage_mb] - [storage_space_used_mb]) as bigint) AS [available_storage_mb] + ,(SELECT DATEDIFF(MINUTE,[sqlserver_start_time],GETDATE()) from sys.dm_os_sys_info) as [uptime] + ,SERVERPROPERTY('ProductVersion') AS [sql_version] + ,LEFT(@@VERSION,CHARINDEX(' - ',@@VERSION)) AS [sql_version_desc] + ,[db_online] + ,[db_restoring] + ,[db_recovering] + ,[db_recoveryPending] + ,[db_suspect] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.server_resource_stats +CROSS APPLY ( + SELECT + SUM( CASE WHEN [state] = 0 THEN 1 ELSE 0 END ) AS [db_online] + ,SUM( CASE WHEN [state] = 1 THEN 1 ELSE 0 END ) AS [db_restoring] + ,SUM( CASE WHEN [state] = 2 THEN 1 ELSE 0 END ) AS [db_recovering] + ,SUM( CASE WHEN [state] = 3 THEN 1 ELSE 0 END ) AS [db_recoveryPending] + ,SUM( CASE WHEN [state] = 4 THEN 1 ELSE 0 END ) AS [db_suspect] + ,SUM( CASE WHEN [state] IN (6,10) THEN 1 ELSE 0 END ) AS [db_offline] + FROM sys.databases +) AS dbs +ORDER BY + [start_time] DESC; +` + +const sqlAzureMIResourceStats = ` +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT TOP(1) + 'sqlserver_azure_db_resource_stats' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,cast([avg_cpu_percent] as float) as [avg_cpu_percent] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM + sys.server_resource_stats +ORDER BY + [end_time] DESC; +` + +const sqlAzureMIResourceGovernance string = ` +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_instance_resource_governance' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,[instance_cap_cpu] + ,[instance_max_log_rate] + ,[instance_max_worker_threads] + ,[tempdb_log_file_number] + ,[volume_local_iops] + ,[volume_external_xstore_iops] + ,[volume_managed_xstore_iops] + ,[volume_type_local_iops] as [voltype_local_iops] + ,[volume_type_managed_xstore_iops] as [voltype_man_xtore_iops] + ,[volume_type_external_xstore_iops] as [voltype_ext_xtore_iops] + ,[volume_external_xstore_iops] as [vol_ext_xtore_iops] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.dm_instance_resource_governance; +` + +const sqlAzureMIDatabaseIO = ` +SET DEADLOCK_PRIORITY -10; +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_database_io' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,COALESCE(mf.[physical_name],'RBPEX') AS [physical_filename] --RPBEX = Resilient Buffer Pool Extension + ,COALESCE(mf.[name],'RBPEX') AS [logical_filename] --RPBEX = Resilient Buffer Pool Extension + ,mf.[type_desc] AS [file_type] + ,vfs.[io_stall_read_ms] AS [read_latency_ms] + ,vfs.[num_of_reads] AS [reads] + ,vfs.[num_of_bytes_read] AS [read_bytes] + ,vfs.[io_stall_write_ms] AS [write_latency_ms] + ,vfs.[num_of_writes] AS [writes] + ,vfs.[num_of_bytes_written] AS [write_bytes] + ,vfs.io_stall_queued_read_ms AS [rg_read_stall_ms] + ,vfs.io_stall_queued_write_ms AS [rg_write_stall_ms] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS vfs +LEFT OUTER JOIN sys.master_files AS mf WITH (NOLOCK) + ON vfs.[database_id] = mf.[database_id] + AND vfs.[file_id] = mf.[file_id] +WHERE + vfs.[database_id] < 32760 +` + +const sqlAzureMIMemoryClerks = ` +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_memory_clerks' AS [measurement] + ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] + ,mc.[type] AS [clerk_type] + ,SUM(mc.[pages_kb]) AS [size_kb] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.[dm_os_memory_clerks] AS mc WITH (NOLOCK) +GROUP BY + mc.[type] +HAVING + SUM(mc.[pages_kb]) >= 1024 +OPTION(RECOMPILE); +` + +const sqlAzureMIOsWaitStats = ` +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_waitstats' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,ws.[wait_type] + ,[wait_time_ms] + ,[wait_time_ms] - [signal_wait_time_ms] AS [resource_wait_ms] + ,[signal_wait_time_ms] + ,[max_wait_time_ms] + ,[waiting_tasks_count] + ,CASE + WHEN ws.[wait_type] LIKE 'SOS_SCHEDULER_YIELD' then 'CPU' + WHEN ws.[wait_type] = 'THREADPOOL' THEN 'Worker Thread' + WHEN ws.[wait_type] LIKE 'LCK[_]%' THEN 'Lock' + WHEN ws.[wait_type] LIKE 'LATCH[_]%' THEN 'Latch' + WHEN ws.[wait_type] LIKE 'PAGELATCH[_]%' THEN 'Buffer Latch' + WHEN ws.[wait_type] LIKE 'PAGEIOLATCH[_]%' THEN 'Buffer IO' + WHEN ws.[wait_type] LIKE 'RESOURCE_SEMAPHORE_QUERY_COMPILE%' THEN 'Compilation' + WHEN ws.[wait_type] LIKE 'CLR[_]%' or ws.[wait_type] like 'SQLCLR%' THEN 'SQL CLR' + WHEN ws.[wait_type] LIKE 'DBMIRROR_%' THEN 'Mirroring' + WHEN ws.[wait_type] LIKE 'DTC[_]%' or ws.[wait_type] LIKE 'DTCNEW%' or ws.[wait_type] LIKE 'TRAN_%' + or ws.[wait_type] LIKE 'XACT%' or ws.[wait_type] like 'MSQL_XACT%' THEN 'Transaction' + WHEN ws.[wait_type] LIKE 'SLEEP[_]%' + or ws.[wait_type] IN ( + 'LAZYWRITER_SLEEP', 'SQLTRACE_BUFFER_FLUSH', 'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + 'SQLTRACE_WAIT_ENTRIES', 'FT_IFTS_SCHEDULER_IDLE_WAIT', 'XE_DISPATCHER_WAIT', + 'REQUEST_FOR_DEADLOCK_SEARCH', 'LOGMGR_QUEUE', 'ONDEMAND_TASK_QUEUE', + 'CHECKPOINT_QUEUE', 'XE_TIMER_EVENT') THEN 'Idle' + WHEN ws.[wait_type] IN( + 'ASYNC_IO_COMPLETION','BACKUPIO','CHKPT','WRITE_COMPLETION', + 'IO_QUEUE_LIMIT', 'IO_RETRY') THEN 'Other Disk IO' + WHEN ws.[wait_type] LIKE 'PREEMPTIVE_%' THEN 'Preemptive' + WHEN ws.[wait_type] LIKE 'BROKER[_]%' THEN 'Service Broker' + WHEN ws.[wait_type] IN ( + 'WRITELOG','LOGBUFFER','LOGMGR_RESERVE_APPEND', + 'LOGMGR_FLUSH', 'LOGMGR_PMM_LOG') THEN 'Tran Log IO' + WHEN ws.[wait_type] LIKE 'LOG_RATE%' then 'Log Rate Governor' + WHEN ws.[wait_type] LIKE 'HADR_THROTTLE[_]%' + or ws.[wait_type] = 'THROTTLE_LOG_RATE_LOG_STORAGE' THEN 'HADR Log Rate Governor' + WHEN ws.[wait_type] LIKE 'RBIO_RG%' or ws.[wait_type] like 'WAIT_RBIO_RG%' then 'VLDB Log Rate Governor' + WHEN ws.[wait_type] LIKE 'RBIO[_]%' or ws.[wait_type] like 'WAIT_RBIO[_]%' then 'VLDB RBIO' + WHEN ws.[wait_type] IN( + 'ASYNC_NETWORK_IO','EXTERNAL_SCRIPT_NETWORK_IOF', + 'NET_WAITFOR_PACKET','PROXY_NETWORK_IO') THEN 'Network IO' + WHEN ws.[wait_type] IN ( 'CXPACKET', 'CXCONSUMER') + or ws.[wait_type] like 'HT%' or ws.[wait_type] like 'BMP%' + or ws.[wait_type] like 'BP%' THEN 'Parallelism' + WHEN ws.[wait_type] IN( + 'CMEMTHREAD','CMEMPARTITIONED','EE_PMOLOCK','EXCHANGE', + 'RESOURCE_SEMAPHORE','MEMORY_ALLOCATION_EXT', + 'RESERVED_MEMORY_ALLOCATION_EXT', 'MEMORY_GRANT_UPDATE') THEN 'Memory' + WHEN ws.[wait_type] IN ('WAITFOR','WAIT_FOR_RESULTS') THEN 'User Wait' + WHEN ws.[wait_type] LIKE 'HADR[_]%' or ws.[wait_type] LIKE 'PWAIT_HADR%' + or ws.[wait_type] LIKE 'REPLICA[_]%' or ws.[wait_type] LIKE 'REPL_%' + or ws.[wait_type] LIKE 'SE_REPL[_]%' + or ws.[wait_type] LIKE 'FCB_REPLICA%' THEN 'Replication' + WHEN ws.[wait_type] LIKE 'SQLTRACE[_]%' + or ws.[wait_type] IN ( + 'TRACEWRITE', 'SQLTRACE_LOCK', 'SQLTRACE_FILE_BUFFER', 'SQLTRACE_FILE_WRITE_IO_COMPLETION', + 'SQLTRACE_FILE_READ_IO_COMPLETION', 'SQLTRACE_PENDING_BUFFER_WRITERS', 'SQLTRACE_SHUTDOWN', + 'QUERY_TRACEOUT', 'TRACE_EVTNOTIF') THEN 'Tracing' + WHEN ws.[wait_type] IN ( + 'FT_RESTART_CRAWL', 'FULLTEXT GATHERER', 'MSSEARCH', 'FT_METADATA_MUTEX', + 'FT_IFTSHC_MUTEX', 'FT_IFTSISM_MUTEX', 'FT_IFTS_RWLOCK', 'FT_COMPROWSET_RWLOCK', + 'FT_MASTER_MERGE', 'FT_PROPERTYLIST_CACHE', 'FT_MASTER_MERGE_COORDINATOR', + 'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN 'Full Text Search' + ELSE 'Other' + END as [wait_category] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.dm_os_wait_stats AS ws WITH (NOLOCK) +WHERE + ws.[wait_type] NOT IN ( + N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP', + N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE', + N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE', + N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_QUEUE', + N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE', + N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX', + N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT', + N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE', + N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE', + N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE', + N'PARALLEL_REDO_WORKER_WAIT_WORK', + N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS', + N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS', + N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', + N'PREEMPTIVE_OS_DEVICEOPS', + N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER', + N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT', + N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE', + N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT', + N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP', + N'QDS_ASYNC_QUEUE', + N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH', + N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP', + N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY', + N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK', + N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP', + N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + N'SQLTRACE_WAIT_ENTRIES', + N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT', + N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE', + N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN', + N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT', + N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT','SQLTRACE_WAIT_ENTRIES', + N'RBIO_COMM_RETRY') +AND [waiting_tasks_count] > 10 +AND [wait_time_ms] > 100; +` + +const sqlAzureMIPerformanceCounters = ` +SET DEADLOCK_PRIORITY -10; +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +DECLARE @PCounters TABLE +( + [object_name] nvarchar(128), + [counter_name] nvarchar(128), + [instance_name] nvarchar(128), + [cntr_value] bigint, + [cntr_type] INT , + Primary Key([object_name],[counter_name],[instance_name]) +); + +WITH PerfCounters AS ( + SELECT DISTINCT + RTrim(spi.[object_name]) [object_name] + ,RTrim(spi.[counter_name]) [counter_name] + ,CASE WHEN ( + RTRIM(spi.[object_name]) LIKE '%:Databases' + OR RTRIM(spi.[object_name]) LIKE '%:Database Replica' + OR RTRIM(spi.[object_name]) LIKE '%:Catalog Metadata' + OR RTRIM(spi.[object_name]) LIKE '%:Query Store' + OR RTRIM(spi.[object_name]) LIKE '%:Columnstore' + OR RTRIM(spi.[object_name]) LIKE '%:Advanced Analytics') + AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only + THEN ISNULL(d.[name],RTRIM(spi.instance_name)) -- Elastic Pools counters exist for all databases but sys.databases only has current DB value + WHEN + RTRIM([object_name]) LIKE '%:Availability Replica' + AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only + THEN ISNULL(d.[name],RTRIM(spi.[instance_name])) + RTRIM(SUBSTRING(spi.[instance_name], 37, LEN(spi.[instance_name]))) + ELSE RTRIM(spi.instance_name) + END AS [instance_name] + ,CAST(spi.[cntr_value] AS BIGINT) AS [cntr_value] + ,spi.[cntr_type] + FROM sys.dm_os_performance_counters AS spi + LEFT JOIN sys.databases AS d + ON LEFT(spi.[instance_name], 36) -- some instance_name values have an additional identifier appended after the GUID + = CASE + /*in SQL DB standalone, physical_database_name for master is the GUID of the user database*/ + WHEN d.[name] = 'master' AND TRY_CONVERT([uniqueidentifier], d.[physical_database_name]) IS NOT NULL + THEN d.[name] + ELSE d.[physical_database_name] + END + WHERE + counter_name IN ( + 'SQL Compilations/sec' + ,'SQL Re-Compilations/sec' + ,'User Connections' + ,'Batch Requests/sec' + ,'Logouts/sec' + ,'Logins/sec' + ,'Processes blocked' + ,'Latch Waits/sec' + ,'Full Scans/sec' + ,'Index Searches/sec' + ,'Page Splits/sec' + ,'Page lookups/sec' + ,'Page reads/sec' + ,'Page writes/sec' + ,'Readahead pages/sec' + ,'Lazy writes/sec' + ,'Checkpoint pages/sec' + ,'Table Lock Escalations/sec' + ,'Page life expectancy' + ,'Log File(s) Size (KB)' + ,'Log File(s) Used Size (KB)' + ,'Data File(s) Size (KB)' + ,'Transactions/sec' + ,'Write Transactions/sec' + ,'Active Transactions' + ,'Log Growths' + ,'Active Temp Tables' + ,'Logical Connections' + ,'Temp Tables Creation Rate' + ,'Temp Tables For Destruction' + ,'Free Space in tempdb (KB)' + ,'Version Store Size (KB)' + ,'Memory Grants Pending' + ,'Memory Grants Outstanding' + ,'Free list stalls/sec' + ,'Buffer cache hit ratio' + ,'Buffer cache hit ratio base' + ,'Backup/Restore Throughput/sec' + ,'Total Server Memory (KB)' + ,'Target Server Memory (KB)' + ,'Log Flushes/sec' + ,'Log Flush Wait Time' + ,'Memory broker clerk size' + ,'Log Bytes Flushed/sec' + ,'Bytes Sent to Replica/sec' + ,'Log Send Queue' + ,'Bytes Sent to Transport/sec' + ,'Sends to Replica/sec' + ,'Bytes Sent to Transport/sec' + ,'Sends to Transport/sec' + ,'Bytes Received from Replica/sec' + ,'Receives from Replica/sec' + ,'Flow Control Time (ms/sec)' + ,'Flow Control/sec' + ,'Resent Messages/sec' + ,'Redone Bytes/sec' + ,'XTP Memory Used (KB)' + ,'Transaction Delay' + ,'Log Bytes Received/sec' + ,'Log Apply Pending Queue' + ,'Redone Bytes/sec' + ,'Recovery Queue' + ,'Log Apply Ready Queue' + ,'CPU usage %' + ,'CPU usage % base' + ,'Queued requests' + ,'Requests completed/sec' + ,'Blocked tasks' + ,'Active memory grant amount (KB)' + ,'Disk Read Bytes/sec' + ,'Disk Read IO Throttled/sec' + ,'Disk Read IO/sec' + ,'Disk Write Bytes/sec' + ,'Disk Write IO Throttled/sec' + ,'Disk Write IO/sec' + ,'Used memory (KB)' + ,'Forwarded Records/sec' + ,'Background Writer pages/sec' + ,'Percent Log Used' + ,'Log Send Queue KB' + ,'Redo Queue KB' + ,'Mirrored Write Transactions/sec' + ,'Group Commit Time' + ,'Group Commits/Sec' + ,'Workfiles Created/sec' + ,'Worktables Created/sec' + ,'Distributed Query' + ,'DTC calls' + ,'Query Store CPU usage' + ) OR ( + spi.[object_name] LIKE '%User Settable%' + OR spi.[object_name] LIKE '%SQL Errors%' + OR spi.[object_name] LIKE '%Batch Resp Statistics%' + ) OR ( + spi.[instance_name] IN ('_Total') + AND spi.[counter_name] IN ( + 'Lock Timeouts/sec' + ,'Lock Timeouts (timeout > 0)/sec' + ,'Number of Deadlocks/sec' + ,'Lock Waits/sec' + ,'Latch Waits/sec' + ) + ) +) + +INSERT INTO @PCounters select * from PerfCounters + +SELECT + 'sqlserver_performance' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,pc.[object_name] AS [object] + ,pc.[counter_name] AS [counter] + ,CASE pc.[instance_name] + WHEN '_Total' THEN 'Total' + ELSE ISNULL(pc.[instance_name],'') + END AS [instance] + ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value] + ,cast(pc.[cntr_type] as varchar(25)) as [counter_type] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +from @PCounters pc +LEFT OUTER JOIN @PCounters AS pc1 + ON ( + pc.[counter_name] = REPLACE(pc1.[counter_name],' base','') + OR pc.[counter_name] = REPLACE(pc1.[counter_name],' base',' (ms)') + ) + AND pc.[object_name] = pc1.[object_name] + AND pc.[instance_name] = pc1.[instance_name] + AND pc1.[counter_name] LIKE '%base' +WHERE + pc.[counter_name] NOT LIKE '% base' +OPTION (RECOMPILE); +` + +const sqlAzureMIRequests string = ` +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + + + +SELECT + [measurement],[sql_instance],[database_name],[session_id] + ,ISNULL([request_id],0) AS [request_id] + ,[blocking_session_id],[status],[cpu_time_ms] + ,[total_elapsed_time_ms],[logical_reads],[writes] + ,[command],[wait_time_ms],[wait_type] + ,[wait_resource],[program_name] + ,[host_name],[nt_user_name],[login_name] + ,[transaction_isolation_level],[granted_query_memory_pages],[percent_complete] + ,[statement_text],[objectid],[stmt_object_name] + ,[stmt_db_name],[query_hash],[query_plan_hash] + ,replica_updateability + ,[session_db_name],[open_transaction] +FROM ( + SELECT + 'sqlserver_requests' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,DB_NAME() as [database_name] + ,s.[session_id] + ,ISNULL(r.[request_id], 0) as [request_id] + ,DB_NAME(COALESCE(r.[database_id], s.[database_id])) AS [session_db_name] + ,COALESCE(r.[status], s.[status]) AS [status] + ,COALESCE(r.[cpu_time], s.[cpu_time]) AS [cpu_time_ms] + ,COALESCE(r.[total_elapsed_time], s.[total_elapsed_time]) AS [total_elapsed_time_ms] + ,COALESCE(r.[logical_reads], s.[logical_reads]) AS [logical_reads] + ,COALESCE(r.[writes], s.[writes]) AS [writes] + ,r.[command] + ,r.[wait_time] AS [wait_time_ms] + ,r.[wait_type] + ,r.[wait_resource] + ,NULLIF(r.[blocking_session_id],0) AS [blocking_session_id] + ,s.[program_name] + ,s.[host_name] + ,s.[nt_user_name] + ,s.[login_name] + ,COALESCE(r.[open_transaction_count], s.[open_transaction_count]) AS [open_transaction] + ,LEFT (CASE COALESCE(r.[transaction_isolation_level], s.[transaction_isolation_level]) + WHEN 0 THEN '0-Read Committed' + WHEN 1 THEN '1-Read Uncommitted (NOLOCK)' + WHEN 2 THEN '2-Read Committed' + WHEN 3 THEN '3-Repeatable Read' + WHEN 4 THEN '4-Serializable' + WHEN 5 THEN '5-Snapshot' + ELSE CONVERT (varchar(30), r.[transaction_isolation_level]) + '-UNKNOWN' + END, 30) AS [transaction_isolation_level] + ,r.[granted_query_memory] AS [granted_query_memory_pages] + ,r.[percent_complete] + ,SUBSTRING( + qt.[text], + r.[statement_start_offset] / 2 + 1, + (CASE WHEN r.[statement_end_offset] = -1 + THEN DATALENGTH(qt.[text]) + ELSE r.[statement_end_offset] + END - r.[statement_start_offset]) / 2 + 1 + ) AS [statement_text] + ,qt.[objectid] + ,QUOTENAME(OBJECT_SCHEMA_NAME(qt.[objectid], qt.[dbid])) + '.' + QUOTENAME(OBJECT_NAME(qt.[objectid], qt.[dbid])) as [stmt_object_name] + ,DB_NAME(qt.[dbid]) AS [stmt_db_name] + ,CONVERT(varchar(20),r.[query_hash],1) AS [query_hash] + ,CONVERT(varchar(20),r.[query_plan_hash],1) AS [query_plan_hash] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability + ,s.[is_user_process] + ,[blocking_or_blocked] = COUNT(*) OVER(PARTITION BY ISNULL(NULLIF(r.[blocking_session_id], 0),s.[session_id])) + FROM sys.dm_exec_sessions AS s + LEFT OUTER JOIN sys.dm_exec_requests AS r + ON s.[session_id] = r.[session_id] + OUTER APPLY sys.dm_exec_sql_text(r.[sql_handle]) AS qt +) AS data +WHERE + [blocking_or_blocked] > 1 --Always include blocking or blocked sessions/requests + OR ( + [request_id] IS NOT NULL --A request must exists + AND ( --Always fetch user process (in any state), fetch system process only if active + [is_user_process] = 1 + OR [status] COLLATE Latin1_General_BIN NOT IN ('background', 'sleeping') + ) + ) +OPTION(MAXDOP 1); +` + +const sqlAzureMISchedulers string = ` +IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_schedulers' AS [measurement] + ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] + ,CAST(s.[scheduler_id] AS VARCHAR(4)) AS [scheduler_id] + ,CAST(s.[cpu_id] AS VARCHAR(4)) AS [cpu_id] + ,s.[is_online] + ,s.[is_idle] + ,s.[preemptive_switches_count] + ,s.[context_switches_count] + ,s.[current_tasks_count] + ,s.[runnable_tasks_count] + ,s.[current_workers_count] + ,s.[active_workers_count] + ,s.[work_queue_count] + ,s.[pending_disk_io_count] + ,s.[load_factor] + ,s.[yield_count] + ,s.[total_cpu_usage_ms] + ,s.[total_scheduler_delay_ms] + ,DATABASEPROPERTYEX(DB_NAME(), 'Updateability') as replica_updateability +FROM sys.dm_os_schedulers AS s +` diff --git a/plugins/inputs/sqlserver/azuresqlmanagedqueries_test.go b/plugins/inputs/sqlserver/azuresqlmanagedqueries_test.go new file mode 100644 index 0000000000000..61c0fb16a6f84 --- /dev/null +++ b/plugins/inputs/sqlserver/azuresqlmanagedqueries_test.go @@ -0,0 +1,379 @@ +package sqlserver + +import ( + "os" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestAzureSQLIntegration_Managed_ResourceStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIResourceStats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_azure_db_resource_stats")) + require.True(t, acc.HasTag("sqlserver_azure_db_resource_stats", "sql_instance")) + require.True(t, acc.HasFloatField("sqlserver_azure_db_resource_stats", "avg_cpu_percent")) + require.True(t, acc.HasTag("sqlserver_azure_db_resource_stats", "replica_updateability")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQLIntegration_Managed_ResourceGovernance_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIResourceGovernance"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_instance_resource_governance")) + require.True(t, acc.HasTag("sqlserver_instance_resource_governance", "sql_instance")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "instance_cap_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "instance_max_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "instance_max_worker_threads")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "tempdb_log_file_number")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "volume_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "volume_external_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "volume_managed_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "voltype_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "voltype_man_xtore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "voltype_ext_xtore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_instance_resource_governance", "vol_ext_xtore_iops")) + require.True(t, acc.HasTag("sqlserver_instance_resource_governance", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQLIntegration_Managed_DatabaseIO_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIDatabaseIO"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_database_io")) + require.True(t, acc.HasTag("sqlserver_database_io", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_database_io", "physical_filename")) + require.True(t, acc.HasTag("sqlserver_database_io", "logical_filename")) + require.True(t, acc.HasTag("sqlserver_database_io", "file_type")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "reads")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "writes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_read_stall_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_write_stall_ms")) + require.True(t, acc.HasTag("sqlserver_database_io", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQLIntegration_Managed_ServerProperties_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIServerProperties"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_server_properties")) + require.True(t, acc.HasTag("sqlserver_server_properties", "sql_instance")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "cpu_count")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "server_memory")) + require.True(t, acc.HasTag("sqlserver_server_properties", "sku")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "engine_edition")) + require.True(t, acc.HasTag("sqlserver_server_properties", "hardware_type")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "total_storage_mb")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "available_storage_mb")) + require.True(t, acc.HasField("sqlserver_server_properties", "uptime")) // Time field. + require.True(t, acc.HasTag("sqlserver_server_properties", "sql_version")) + require.True(t, acc.HasTag("sqlserver_server_properties", "sql_version_desc")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "db_online")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "db_restoring")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "db_recovering")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "db_recoveryPending")) + require.True(t, acc.HasInt64Field("sqlserver_server_properties", "db_suspect")) + require.True(t, acc.HasTag("sqlserver_server_properties", "replica_updateability")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQLIntegration_Managed_OsWaitStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIOsWaitstats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_waitstats")) + require.True(t, acc.HasTag("sqlserver_waitstats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_type")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "waiting_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "max_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "signal_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "resource_wait_ms")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_category")) + require.True(t, acc.HasTag("sqlserver_waitstats", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQLIntegration_Managed_MemoryClerks_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIMemoryClerks"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_memory_clerks")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "clerk_type")) + require.True(t, acc.HasInt64Field("sqlserver_memory_clerks", "size_kb")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQLIntegration_Managed_PerformanceCounters_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIPerformanceCounters"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_performance")) + require.True(t, acc.HasTag("sqlserver_performance", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_performance", "object")) + require.True(t, acc.HasTag("sqlserver_performance", "counter")) + require.True(t, acc.HasTag("sqlserver_performance", "instance")) + require.True(t, acc.HasFloatField("sqlserver_performance", "value")) + require.True(t, acc.HasTag("sqlserver_performance", "counter_type")) + require.True(t, acc.HasTag("sqlserver_performance", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQLIntegration_Managed_Requests_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMIRequests"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_requests")) + require.True(t, acc.HasTag("sqlserver_requests", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_requests", "database_name")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "session_id")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "request_id")) + require.True(t, acc.HasTag("sqlserver_requests", "status")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "cpu_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "total_elapsed_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "logical_reads")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "writes")) + require.True(t, acc.HasTag("sqlserver_requests", "command")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "wait_time_ms")) + require.True(t, acc.HasTag("sqlserver_requests", "wait_type")) + require.True(t, acc.HasTag("sqlserver_requests", "wait_resource")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "blocking_session_id")) + require.True(t, acc.HasTag("sqlserver_requests", "program_name")) + require.True(t, acc.HasTag("sqlserver_requests", "host_name")) + require.True(t, acc.HasTag("sqlserver_requests", "nt_user_name")) + require.True(t, acc.HasTag("sqlserver_requests", "login_name")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "open_transaction")) + require.True(t, acc.HasTag("sqlserver_requests", "transaction_isolation_level")) + require.True(t, acc.HasInt64Field("sqlserver_requests", "granted_query_memory_pages")) + require.True(t, acc.HasFloatField("sqlserver_requests", "percent_complete")) + require.True(t, acc.HasTag("sqlserver_requests", "statement_text")) + require.True(t, acc.HasField("sqlserver_requests", "objectid")) // Can be null. + require.True(t, acc.HasField("sqlserver_requests", "stmt_object_name")) // Can be null. + require.True(t, acc.HasField("sqlserver_requests", "stmt_db_name")) // Can be null. + require.True(t, acc.HasTag("sqlserver_requests", "query_hash")) + require.True(t, acc.HasTag("sqlserver_requests", "query_plan_hash")) + require.True(t, acc.HasTag("sqlserver_requests", "session_db_name")) + require.True(t, acc.HasTag("sqlserver_requests", "replica_updateability")) + + server.Stop() +} + +func TestAzureSQLIntegration_Managed_Schedulers_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_MI_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_MI_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_MI_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLMISchedulers"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLManagedInstance", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_schedulers")) + require.True(t, acc.HasTag("sqlserver_schedulers", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_schedulers", "scheduler_id")) + require.True(t, acc.HasTag("sqlserver_schedulers", "cpu_id")) + require.True(t, acc.HasField("sqlserver_schedulers", "is_online")) // Bool field. + require.True(t, acc.HasField("sqlserver_schedulers", "is_idle")) // Bool field. + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "preemptive_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "context_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "runnable_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "active_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "work_queue_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "pending_disk_io_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "load_factor")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "yield_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_cpu_usage_ms")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_scheduler_delay_ms")) + require.True(t, acc.HasTag("sqlserver_schedulers", "replica_updateability")) + + server.Stop() +} diff --git a/plugins/inputs/sqlserver/azuresqlpoolqueries.go b/plugins/inputs/sqlserver/azuresqlpoolqueries.go new file mode 100644 index 0000000000000..36fe087fc57e6 --- /dev/null +++ b/plugins/inputs/sqlserver/azuresqlpoolqueries.go @@ -0,0 +1,477 @@ +package sqlserver + +import ( + _ "github.com/denisenkom/go-mssqldb" // go-mssqldb initialization +) + +//------------------------------------------------------------------------------------------------ +//------------------ Azure Sql Elastic Pool ------------------------------------------------------ +//------------------------------------------------------------------------------------------------ +const sqlAzurePoolResourceStats = ` +IF SERVERPROPERTY('EngineEdition') <> 5 + OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT TOP(1) + 'sqlserver_pool_resource_stats' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,(SELECT [elastic_pool_name] FROM sys.database_service_objectives WHERE database_id = DB_ID()) AS [elastic_pool_name] + ,[snapshot_time] + ,cast([cap_vcores_used_percent] as float) AS [avg_cpu_percent] + ,cast([avg_data_io_percent] as float) AS [avg_data_io_percent] + ,cast([avg_log_write_percent] as float) AS [avg_log_write_percent] + ,cast([avg_storage_percent] as float) AS [avg_storage_percent] + ,cast([max_worker_percent] as float) AS [max_worker_percent] + ,cast([max_session_percent] as float) AS [max_session_percent] + ,cast([max_data_space_kb]/1024. as int) AS [storage_limit_mb] + ,cast([avg_instance_cpu_percent] as float) AS [avg_instance_cpu_percent] + ,cast([avg_allocated_storage_percent] as float) AS [avg_allocated_storage_percent] +FROM + sys.dm_resource_governor_resource_pools_history_ex +WHERE + [name] = 'SloSharedPool1' +ORDER BY + [snapshot_time] DESC; +` + +const sqlAzurePoolResourceGovernance = ` +IF SERVERPROPERTY('EngineEdition') <> 5 + OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_pool_resource_governance' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,(SELECT [elastic_pool_name] FROM sys.database_service_objectives WHERE database_id = DB_ID()) AS [elastic_pool_name] + ,[slo_name] + ,[dtu_limit] + ,[cpu_limit] + ,[max_cpu] + ,[cap_cpu] + ,[max_db_memory] + ,[max_db_max_size_in_mb] + ,[db_file_growth_in_mb] + ,[log_size_in_mb] + ,[instance_cap_cpu] + ,[instance_max_log_rate] + ,[instance_max_worker_threads] + ,[checkpoint_rate_mbps] + ,[checkpoint_rate_io] + ,[primary_group_max_workers] + ,[primary_min_log_rate] + ,[primary_max_log_rate] + ,[primary_group_min_io] + ,[primary_group_max_io] + ,[primary_group_min_cpu] + ,[primary_group_max_cpu] + ,[primary_pool_max_workers] + ,[pool_max_io] + ,[volume_local_iops] + ,[volume_managed_xstore_iops] + ,[volume_external_xstore_iops] + ,[volume_type_local_iops] + ,[volume_type_managed_xstore_iops] + ,[volume_type_external_xstore_iops] + ,[volume_pfs_iops] + ,[volume_type_pfs_iops] +FROM + sys.dm_user_db_resource_governance +WHERE database_id = DB_ID(); +` + +const sqlAzurePoolDatabaseIO = ` +IF SERVERPROPERTY('EngineEdition') <> 5 + OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_database_io' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,(SELECT [elastic_pool_name] FROM sys.database_service_objectives WHERE database_id = DB_ID()) AS [elastic_pool_name] + ,CASE + WHEN vfs.[database_id] = 1 THEN 'master' + WHEN vfs.[database_id] = 2 THEN 'tempdb' + WHEN vfs.[database_id] = 3 THEN 'model' + WHEN vfs.[database_id] = 4 THEN 'msdb' + ELSE gov.[database_name] + END AS [database_name] + ,vfs.[database_id] + ,vfs.[file_id] + ,CASE + WHEN vfs.[file_id] = 2 THEN 'LOG' + ELSE 'ROWS' + END AS [file_type] + ,vfs.[num_of_reads] AS [reads] + ,vfs.[num_of_bytes_read] AS [read_bytes] + ,vfs.[io_stall_read_ms] AS [read_latency_ms] + ,vfs.[io_stall_write_ms] AS [write_latency_ms] + ,vfs.[num_of_writes] AS [writes] + ,vfs.[num_of_bytes_written] AS [write_bytes] + ,vfs.[io_stall_queued_read_ms] AS [rg_read_stall_ms] + ,vfs.[io_stall_queued_write_ms] AS [rg_write_stall_ms] + ,[size_on_disk_bytes] + ,ISNULL([size_on_disk_bytes],0)/(1024*1024) AS [size_on_disk_mb] +FROM + sys.dm_io_virtual_file_stats(NULL,NULL) AS vfs +LEFT OUTER JOIN + sys.dm_user_db_resource_governance AS gov +ON vfs.[database_id] = gov.[database_id]; +` + +const sqlAzurePoolOsWaitStats = ` +IF SERVERPROPERTY('EngineEdition') <> 5 + OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_waitstats' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,(SELECT [elastic_pool_name] FROM sys.database_service_objectives WHERE database_id = DB_ID()) AS [elastic_pool_name] + ,[wait_type] + ,[waiting_tasks_count] + ,[wait_time_ms] + ,[max_wait_time_ms] + ,[signal_wait_time_ms] + ,[wait_time_ms]-[signal_wait_time_ms] AS [resource_wait_ms] + ,CASE + WHEN ws.[wait_type] LIKE 'SOS_SCHEDULER_YIELD' THEN 'CPU' + WHEN ws.[wait_type] = 'THREADPOOL' THEN 'Worker Thread' + WHEN ws.[wait_type] LIKE 'LCK[_]%' THEN 'Lock' + WHEN ws.[wait_type] LIKE 'LATCH[_]%' THEN 'Latch' + WHEN ws.[wait_type] LIKE 'PAGELATCH[_]%' THEN 'Buffer Latch' + WHEN ws.[wait_type] LIKE 'PAGEIOLATCH[_]%' THEN 'Buffer IO' + WHEN ws.[wait_type] LIKE 'RESOURCE_SEMAPHORE_QUERY_COMPILE%' THEN 'Compilation' + WHEN ws.[wait_type] LIKE 'CLR[_]%' OR ws.[wait_type] LIKE 'SQLCLR%' THEN 'SQL CLR' + WHEN ws.[wait_type] LIKE 'DBMIRROR_%' THEN 'Mirroring' + WHEN ws.[wait_type] LIKE 'DTC[_]%' OR ws.[wait_type] LIKE 'DTCNEW%' OR ws.[wait_type] LIKE 'TRAN_%' + OR ws.[wait_type] LIKE 'XACT%' OR ws.[wait_type] LIKE 'MSQL_XACT%' THEN 'Transaction' + WHEN ws.[wait_type] LIKE 'SLEEP[_]%' OR ws.[wait_type] IN ( + 'LAZYWRITER_SLEEP', 'SQLTRACE_BUFFER_FLUSH', 'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + 'SQLTRACE_WAIT_ENTRIES', 'FT_IFTS_SCHEDULER_IDLE_WAIT', 'XE_DISPATCHER_WAIT', + 'REQUEST_FOR_DEADLOCK_SEARCH', 'LOGMGR_QUEUE', 'ONDEMAND_TASK_QUEUE', + 'CHECKPOINT_QUEUE', 'XE_TIMER_EVENT') THEN 'Idle' + WHEN ws.[wait_type] IN ( + 'ASYNC_IO_COMPLETION','BACKUPIO','CHKPT','WRITE_COMPLETION', + 'IO_QUEUE_LIMIT', 'IO_RETRY') THEN 'Other Disk IO' + WHEN ws.[wait_type] LIKE 'PREEMPTIVE_%' THEN 'Preemptive' + WHEN ws.[wait_type] LIKE 'BROKER[_]%' THEN 'Service Broker' + WHEN ws.[wait_type] IN ( + 'WRITELOG','LOGBUFFER','LOGMGR_RESERVE_APPEND', + 'LOGMGR_FLUSH', 'LOGMGR_PMM_LOG') THEN 'Tran Log IO' + WHEN ws.[wait_type] LIKE 'LOG_RATE%' then 'Log Rate Governor' + WHEN ws.[wait_type] LIKE 'HADR_THROTTLE[_]%' + OR ws.[wait_type] = 'THROTTLE_LOG_RATE_LOG_STORAGE' THEN 'HADR Log Rate Governor' + WHEN ws.[wait_type] LIKE 'RBIO_RG%' OR ws.[wait_type] LIKE 'WAIT_RBIO_RG%' THEN 'VLDB Log Rate Governor' + WHEN ws.[wait_type] LIKE 'RBIO[_]%' OR ws.[wait_type] LIKE 'WAIT_RBIO[_]%' THEN 'VLDB RBIO' + WHEN ws.[wait_type] IN( + 'ASYNC_NETWORK_IO','EXTERNAL_SCRIPT_NETWORK_IOF', + 'NET_WAITFOR_PACKET','PROXY_NETWORK_IO') THEN 'Network IO' + WHEN ws.[wait_type] IN ( 'CXPACKET', 'CXCONSUMER') + OR ws.[wait_type] LIKE 'HT%' or ws.[wait_type] LIKE 'BMP%' + OR ws.[wait_type] LIKE 'BP%' THEN 'Parallelism' + WHEN ws.[wait_type] IN( + 'CMEMTHREAD','CMEMPARTITIONED','EE_PMOLOCK','EXCHANGE', + 'RESOURCE_SEMAPHORE','MEMORY_ALLOCATION_EXT', + 'RESERVED_MEMORY_ALLOCATION_EXT', 'MEMORY_GRANT_UPDATE') THEN 'Memory' + WHEN ws.[wait_type] IN ('WAITFOR','WAIT_FOR_RESULTS') THEN 'User Wait' + WHEN ws.[wait_type] LIKE 'HADR[_]%' or ws.[wait_type] LIKE 'PWAIT_HADR%' + OR ws.[wait_type] LIKE 'REPLICA[_]%' or ws.[wait_type] LIKE 'REPL_%' + OR ws.[wait_type] LIKE 'SE_REPL[_]%' + OR ws.[wait_type] LIKE 'FCB_REPLICA%' THEN 'Replication' + WHEN ws.[wait_type] LIKE 'SQLTRACE[_]%' + OR ws.[wait_type] IN ( + 'TRACEWRITE', 'SQLTRACE_LOCK', 'SQLTRACE_FILE_BUFFER', 'SQLTRACE_FILE_WRITE_IO_COMPLETION', + 'SQLTRACE_FILE_READ_IO_COMPLETION', 'SQLTRACE_PENDING_BUFFER_WRITERS', 'SQLTRACE_SHUTDOWN', + 'QUERY_TRACEOUT', 'TRACE_EVTNOTIF') THEN 'Tracing' + WHEN ws.[wait_type] IN ( + 'FT_RESTART_CRAWL', 'FULLTEXT GATHERER', 'MSSEARCH', 'FT_METADATA_MUTEX', + 'FT_IFTSHC_MUTEX', 'FT_IFTSISM_MUTEX', 'FT_IFTS_RWLOCK', 'FT_COMPROWSET_RWLOCK', + 'FT_MASTER_MERGE', 'FT_PROPERTYLIST_CACHE', 'FT_MASTER_MERGE_COORDINATOR', + 'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN 'Full Text Search' + ELSE 'Other' + END AS [wait_category] +FROM sys.dm_os_wait_stats AS ws +WHERE + ws.[wait_type] NOT IN ( + N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP', + N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE', + N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE', + N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_QUEUE', + N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE', + N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX', + N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT', + N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE', + N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE', + N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE', + N'PARALLEL_REDO_WORKER_WAIT_WORK', + N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS', + N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS', + N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', + N'PREEMPTIVE_OS_DEVICEOPS', + N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER', + N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT', + N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE', + N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT', + N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP', N'QDS_ASYNC_QUEUE', + N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH', + N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP', + N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY', + N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK', + N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP', + N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + N'SQLTRACE_WAIT_ENTRIES', N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', + N'WAIT_XTP_HOST_WAIT', N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE', + N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN', + N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT', + N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT','SQLTRACE_WAIT_ENTRIES', + N'RBIO_COMM_RETRY') +AND [waiting_tasks_count] > 10 +AND [wait_time_ms] > 100; +` + +const sqlAzurePoolMemoryClerks = ` +IF SERVERPROPERTY('EngineEdition') <> 5 + OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_memory_clerks' AS [measurement] + ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] + ,(SELECT [elastic_pool_name] FROM sys.database_service_objectives WHERE database_id = DB_ID()) AS [elastic_pool_name] + ,mc.[type] AS [clerk_type] + ,SUM(mc.[pages_kb]) AS [size_kb] +FROM + sys.dm_os_memory_clerks AS mc +GROUP BY + mc.[type] +HAVING + SUM(mc.[pages_kb]) >= 1024 +OPTION(RECOMPILE); +` + +// Specific case on this query when cntr_type = 537003264 to return a percentage value between 0 and 100 +// cf. https://docs.microsoft.com/en-us/sql/relational-databases/system-dynamic-management-views/sys-dm-os-performance-counters-transact-sql?view=azuresqldb-current +// Performance counters where the cntr_type column value is 537003264 display the ratio of a subset to its set as a percentage. +// For example, the Buffer Manager:Buffer cache hit ratio counter compares the total number of cache hits and the total number of cache lookups. +// As such, to get a snapshot-like reading of the last second only, you must compare the delta between the current value and the base value (denominator) +// between two collection points that are one second apart. +// The corresponding base value is the performance counter Buffer Manager:Buffer cache hit ratio base where the cntr_type column value is 1073939712. +const sqlAzurePoolPerformanceCounters = ` +SET DEADLOCK_PRIORITY -10; +IF SERVERPROPERTY('EngineEdition') <> 5 + OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +DECLARE @PCounters TABLE +( + [object_name] nvarchar(128), + [counter_name] nvarchar(128), + [instance_name] nvarchar(128), + [cntr_value] bigint, + [cntr_type] int + Primary Key([object_name],[counter_name],[instance_name]) +); + +WITH PerfCounters AS ( + SELECT DISTINCT + RTRIM(pc.[object_name]) AS [object_name] + ,RTRIM(pc.[counter_name]) AS [counter_name] + ,ISNULL(gov.[database_name], RTRIM(pc.instance_name)) AS [instance_name] + ,pc.[cntr_value] AS [cntr_value] + ,pc.[cntr_type] AS [cntr_type] + FROM sys.dm_os_performance_counters AS pc + LEFT JOIN sys.dm_user_db_resource_governance AS gov + ON + TRY_CONVERT([uniqueidentifier], pc.[instance_name]) = gov.[physical_database_guid] + WHERE + /*filter out unnecessary SQL DB system database counters, other than master and tempdb*/ + NOT (pc.[object_name] LIKE 'MSSQL%:Databases%' AND pc.[instance_name] IN ('model','model_masterdb','model_userdb','msdb','mssqlsystemresource')) + AND + ( + pc.[counter_name] IN ( + 'SQL Compilations/sec' + ,'SQL Re-Compilations/sec' + ,'User Connections' + ,'Batch Requests/sec' + ,'Logouts/sec' + ,'Logins/sec' + ,'Processes blocked' + ,'Latch Waits/sec' + ,'Full Scans/sec' + ,'Index Searches/sec' + ,'Page Splits/sec' + ,'Page lookups/sec' + ,'Page reads/sec' + ,'Page writes/sec' + ,'Readahead pages/sec' + ,'Lazy writes/sec' + ,'Checkpoint pages/sec' + ,'Table Lock Escalations/sec' + ,'Page life expectancy' + ,'Log File(s) Size (KB)' + ,'Log File(s) Used Size (KB)' + ,'Data File(s) Size (KB)' + ,'Transactions/sec' + ,'Write Transactions/sec' + ,'Active Transactions' + ,'Log Growths' + ,'Active Temp Tables' + ,'Logical Connections' + ,'Temp Tables Creation Rate' + ,'Temp Tables For Destruction' + ,'Free Space in tempdb (KB)' + ,'Version Store Size (KB)' + ,'Memory Grants Pending' + ,'Memory Grants Outstanding' + ,'Free list stalls/sec' + ,'Buffer cache hit ratio' + ,'Buffer cache hit ratio base' + ,'Backup/Restore Throughput/sec' + ,'Total Server Memory (KB)' + ,'Target Server Memory (KB)' + ,'Log Flushes/sec' + ,'Log Flush Wait Time' + ,'Memory broker clerk size' + ,'Log Bytes Flushed/sec' + ,'Bytes Sent to Replica/sec' + ,'Log Send Queue' + ,'Bytes Sent to Transport/sec' + ,'Sends to Replica/sec' + ,'Bytes Sent to Transport/sec' + ,'Sends to Transport/sec' + ,'Bytes Received from Replica/sec' + ,'Receives from Replica/sec' + ,'Flow Control Time (ms/sec)' + ,'Flow Control/sec' + ,'Resent Messages/sec' + ,'Redone Bytes/sec' + ,'XTP Memory Used (KB)' + ,'Transaction Delay' + ,'Log Bytes Received/sec' + ,'Log Apply Pending Queue' + ,'Redone Bytes/sec' + ,'Recovery Queue' + ,'Log Apply Ready Queue' + ,'CPU usage %' + ,'CPU usage % base' + ,'Queued requests' + ,'Requests completed/sec' + ,'Blocked tasks' + ,'Active memory grant amount (KB)' + ,'Disk Read Bytes/sec' + ,'Disk Read IO Throttled/sec' + ,'Disk Read IO/sec' + ,'Disk Write Bytes/sec' + ,'Disk Write IO Throttled/sec' + ,'Disk Write IO/sec' + ,'Used memory (KB)' + ,'Forwarded Records/sec' + ,'Background Writer pages/sec' + ,'Percent Log Used' + ,'Log Send Queue KB' + ,'Redo Queue KB' + ,'Mirrored Write Transactions/sec' + ,'Group Commit Time' + ,'Group Commits/Sec' + ,'Workfiles Created/sec' + ,'Worktables Created/sec' + ,'Query Store CPU usage' + ) OR ( + pc.[object_name] LIKE '%User Settable%' + OR pc.[object_name] LIKE '%SQL Errors%' + OR pc.[object_name] LIKE '%Batch Resp Statistics%' + ) OR ( + pc.[instance_name] IN ('_Total') + AND pc.[counter_name] IN ( + 'Lock Timeouts/sec' + ,'Lock Timeouts (timeout > 0)/sec' + ,'Number of Deadlocks/sec' + ,'Lock Waits/sec' + ,'Latch Waits/sec' + ) + ) + ) +) + +INSERT INTO @PCounters select * from PerfCounters + +SELECT + 'sqlserver_performance' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,pc.[object_name] AS [object] + ,pc.[counter_name] AS [counter] + ,CASE pc.[instance_name] WHEN '_Total' THEN 'Total' ELSE ISNULL(pc.[instance_name],'') END AS [instance] + ,CAST( + CASE WHEN pc.[cntr_type] = 537003264 AND base.[cntr_value] > 0 + THEN (pc.[cntr_value] * 1.0) / (base.[cntr_value] * 1.0) * 100 + ELSE pc.[cntr_value] + END + AS float) AS [value] + ,CAST(pc.[cntr_type] AS varchar(25)) AS [counter_type] +FROM @PCounters AS pc +LEFT OUTER JOIN @PCounters AS base +ON + pc.[counter_name] = REPLACE(base.[counter_name],' base','') + AND pc.[object_name] = base.[object_name] + AND pc.[instance_name] = base.[instance_name] + AND base.[cntr_type] = 1073939712 +WHERE + pc.[cntr_type] <> 1073939712 +OPTION(RECOMPILE) +` + +const sqlAzurePoolSchedulers = ` +IF SERVERPROPERTY('EngineEdition') <> 5 + OR NOT EXISTS (SELECT 1 FROM sys.database_service_objectives WHERE database_id = DB_ID() AND elastic_pool_name IS NOT NULL) BEGIN + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL database in an elastic pool. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +SELECT + 'sqlserver_schedulers' AS [measurement] + ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] + ,(SELECT [elastic_pool_name] FROM sys.database_service_objectives WHERE database_id = DB_ID()) AS [elastic_pool_name] + ,[scheduler_id] + ,[cpu_id] + ,[status] + ,[is_online] + ,[is_idle] + ,[preemptive_switches_count] + ,[context_switches_count] + ,[idle_switches_count] + ,[current_tasks_count] + ,[runnable_tasks_count] + ,[current_workers_count] + ,[active_workers_count] + ,[work_queue_count] + ,[pending_disk_io_count] + ,[load_factor] + ,[failed_to_create_worker] + ,[quantum_length_us] + ,[yield_count] + ,[total_cpu_usage_ms] + ,[total_cpu_idle_capped_ms] + ,[total_scheduler_delay_ms] + ,[ideal_workers_limit] +FROM + sys.dm_os_schedulers; +` diff --git a/plugins/inputs/sqlserver/azuresqlpoolqueries_test.go b/plugins/inputs/sqlserver/azuresqlpoolqueries_test.go new file mode 100644 index 0000000000000..1f5c9fce683c5 --- /dev/null +++ b/plugins/inputs/sqlserver/azuresqlpoolqueries_test.go @@ -0,0 +1,313 @@ +package sqlserver + +import ( + "os" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestAzureSQLIntegration_ElasticPool_ResourceStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLPoolResourceStats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLPool", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_pool_resource_stats")) + require.True(t, acc.HasTag("sqlserver_pool_resource_stats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_pool_resource_stats", "elastic_pool_name")) + require.True(t, acc.HasField("sqlserver_pool_resource_stats", "snapshot_time")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_cpu_percent")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_data_io_percent")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_log_write_percent")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_storage_percent")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "max_worker_percent")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "max_session_percent")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_stats", "storage_limit_mb")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_instance_cpu_percent")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_stats", "avg_allocated_storage_percent")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQLIntegration_ElasticPool_ResourceGovernance_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLPoolResourceGovernance"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLPool", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_pool_resource_governance")) + require.True(t, acc.HasTag("sqlserver_pool_resource_governance", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_pool_resource_governance", "elastic_pool_name")) + require.True(t, acc.HasTag("sqlserver_pool_resource_governance", "slo_name")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "dtu_limit")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "cpu_limit")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "max_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "cap_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "max_db_memory")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "max_db_max_size_in_mb")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "db_file_growth_in_mb")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "log_size_in_mb")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "instance_cap_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "instance_max_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "instance_max_worker_threads")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "checkpoint_rate_mbps")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "checkpoint_rate_io")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_group_max_workers")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_min_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_max_log_rate")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_group_min_io")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_group_max_io")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_governance", "primary_group_min_cpu")) + require.True(t, acc.HasFloatField("sqlserver_pool_resource_governance", "primary_group_max_cpu")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "primary_pool_max_workers")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "pool_max_io")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_managed_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_external_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_type_local_iops")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_type_managed_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_type_external_xstore_iops")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_pfs_iops")) + require.True(t, acc.HasInt64Field("sqlserver_pool_resource_governance", "volume_type_pfs_iops")) + + // This query should only return one row + require.Equal(t, 1, len(acc.Metrics)) + server.Stop() +} + +func TestAzureSQLIntegration_ElasticPool_DatabaseIO_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLPoolDatabaseIO"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLPool", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_database_io")) + require.True(t, acc.HasTag("sqlserver_database_io", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_database_io", "elastic_pool_name")) + require.True(t, acc.HasTag("sqlserver_database_io", "database_name")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "database_id")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "file_id")) + require.True(t, acc.HasTag("sqlserver_database_io", "file_type")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "reads")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "read_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_latency_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "writes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "write_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_read_stall_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "rg_write_stall_ms")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "size_on_disk_bytes")) + require.True(t, acc.HasInt64Field("sqlserver_database_io", "size_on_disk_mb")) + + server.Stop() +} + +func TestAzureSQLIntegration_ElasticPool_OsWaitStats_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLPoolOsWaitStats"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLPool", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_waitstats")) + require.True(t, acc.HasTag("sqlserver_waitstats", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_waitstats", "elastic_pool_name")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_type")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "waiting_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "max_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "signal_wait_time_ms")) + require.True(t, acc.HasInt64Field("sqlserver_waitstats", "resource_wait_ms")) + require.True(t, acc.HasTag("sqlserver_waitstats", "wait_category")) + + server.Stop() +} + +func TestAzureSQLIntegration_ElasticPool_MemoryClerks_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLPoolMemoryClerks"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLPool", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_memory_clerks")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "elastic_pool_name")) + require.True(t, acc.HasTag("sqlserver_memory_clerks", "clerk_type")) + require.True(t, acc.HasInt64Field("sqlserver_memory_clerks", "size_kb")) + + server.Stop() +} + +func TestAzureSQLIntegration_ElasticPool_PerformanceCounters_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLPoolPerformanceCounters"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLPool", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_performance")) + require.True(t, acc.HasTag("sqlserver_performance", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_performance", "object")) + require.True(t, acc.HasTag("sqlserver_performance", "counter")) + require.True(t, acc.HasTag("sqlserver_performance", "instance")) + require.True(t, acc.HasFloatField("sqlserver_performance", "value")) + require.True(t, acc.HasTag("sqlserver_performance", "counter_type")) + + server.Stop() +} + +func TestAzureSQLIntegration_ElasticPool_Schedulers_Query(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + + connectionString := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + server := &SQLServer{ + Servers: []string{connectionString}, + IncludeQuery: []string{"AzureSQLPoolSchedulers"}, + AuthMethod: "connection_string", + DatabaseType: "AzureSQLPool", + } + + var acc testutil.Accumulator + + require.NoError(t, server.Start(&acc)) + require.NoError(t, server.Gather(&acc)) + + require.True(t, acc.HasMeasurement("sqlserver_schedulers")) + require.True(t, acc.HasTag("sqlserver_schedulers", "sql_instance")) + require.True(t, acc.HasTag("sqlserver_schedulers", "elastic_pool_name")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "scheduler_id")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "cpu_id")) + require.True(t, acc.HasTag("sqlserver_schedulers", "status")) + require.True(t, acc.HasField("sqlserver_schedulers", "is_online")) + require.True(t, acc.HasField("sqlserver_schedulers", "is_idle")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "preemptive_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "context_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "idle_switches_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "runnable_tasks_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "current_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "active_workers_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "work_queue_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "pending_disk_io_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "load_factor")) + require.True(t, acc.HasField("sqlserver_schedulers", "failed_to_create_worker")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "quantum_length_us")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "yield_count")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_cpu_usage_ms")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_cpu_idle_capped_ms")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "total_scheduler_delay_ms")) + require.True(t, acc.HasInt64Field("sqlserver_schedulers", "ideal_workers_limit")) + + server.Stop() +} diff --git a/plugins/inputs/sqlserver/azuresqlqueries.go b/plugins/inputs/sqlserver/azuresqlqueries.go deleted file mode 100644 index fa4eb197723b1..0000000000000 --- a/plugins/inputs/sqlserver/azuresqlqueries.go +++ /dev/null @@ -1,1180 +0,0 @@ -package sqlserver - -import ( - _ "github.com/denisenkom/go-mssqldb" // go-mssqldb initialization -) - -//------------------------------------------------------------------------------------------------ -//------------------ Azure SQL Database ------------------------------------------------------ -//------------------------------------------------------------------------------------------------ -// Only executed if AzureDB flag is set -const sqlAzureDBResourceStats string = ` -IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL DB. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT TOP(1) - 'sqlserver_azure_db_resource_stats' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,DB_NAME() as [database_name] - ,cast([avg_cpu_percent] as float) as [avg_cpu_percent] - ,cast([avg_data_io_percent] as float) as [avg_data_io_percent] - ,cast([avg_log_write_percent] as float) as [avg_log_write_percent] - ,cast([avg_memory_usage_percent] as float) as [avg_memory_usage_percent] - ,cast([xtp_storage_percent] as float) as [xtp_storage_percent] - ,cast([max_worker_percent] as float) as [max_worker_percent] - ,cast([max_session_percent] as float) as [max_session_percent] - ,[dtu_limit] - ,cast([avg_login_rate_percent] as float) as [avg_login_rate_percent] - ,[end_time] - ,cast([avg_instance_memory_percent] as float) as [avg_instance_memory_percent] - ,cast([avg_instance_cpu_percent] as float) as [avg_instance_cpu_percent] -FROM - sys.dm_db_resource_stats WITH (NOLOCK) -ORDER BY - [end_time] DESC; -` - -// Resource Governamce is only relevant to Azure SQL DB into separate collector -// This will only be collected for Azure SQL Database. -const sqlAzureDBResourceGovernance string = ` -IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL DB. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_db_resource_governance' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,DB_NAME() as [database_name] - ,[slo_name] - ,[dtu_limit] - ,[max_cpu] - ,[cap_cpu] - ,[instance_cap_cpu] - ,[max_db_memory] - ,[max_db_max_size_in_mb] - ,[db_file_growth_in_mb] - ,[log_size_in_mb] - ,[instance_max_worker_threads] - ,[primary_group_max_workers] - ,[instance_max_log_rate] - ,[primary_min_log_rate] - ,[primary_max_log_rate] - ,[primary_group_min_io] - ,[primary_group_max_io] - ,[primary_group_min_cpu] - ,[primary_group_max_cpu] - ,[primary_pool_max_workers] - ,[pool_max_io] - ,[checkpoint_rate_mbps] - ,[checkpoint_rate_io] - ,[volume_local_iops] - ,[volume_managed_xstore_iops] - ,[volume_external_xstore_iops] - ,[volume_type_local_iops] - ,[volume_type_managed_xstore_iops] - ,[volume_type_external_xstore_iops] - ,[volume_pfs_iops] - ,[volume_type_pfs_iops] -FROM - sys.dm_user_db_resource_governance WITH (NOLOCK); -` - -// DB level wait stats that are only relevant to Azure SQL DB into separate collector -// This will only be collected for Azure SQL Database. -const sqlAzureDBWaitStats string = ` -IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL DB. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_azuredb_waitstats' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,DB_NAME() as [database_name'] - ,dbws.[wait_type] - ,dbws.[wait_time_ms] - ,dbws.[wait_time_ms] - [signal_wait_time_ms] AS [resource_wait_ms] - ,dbws.[signal_wait_time_ms] - ,dbws.[max_wait_time_ms] - ,dbws.[waiting_tasks_count] -FROM - sys.dm_db_wait_stats AS dbws WITH (NOLOCK) -WHERE - dbws.[wait_type] NOT IN ( - N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP', - N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE', - N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE', - N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_WORKER_QUEUE', - N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE', - N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX', - N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT', - N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE', - N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE', - N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE', - N'PARALLEL_REDO_WORKER_WAIT_WORK', - N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS', - N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS', - N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', - N'PREEMPTIVE_OS_DEVICEOPS', - N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER', - N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT', - N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE', - N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT', - N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP', - N'QDS_ASYNC_QUEUE', - N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH', - N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP', - N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY', - N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK', - N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP', - N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', - N'SQLTRACE_WAIT_ENTRIES', - N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT', - N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE', - N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN', - N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT', - N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT') - AND [waiting_tasks_count] > 0 - AND [wait_time_ms] > 100; -` - -const sqlAzureDBDatabaseIO = ` -SET DEADLOCK_PRIORITY -10; -IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL DB. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_database_io' As [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,DB_NAME() as [database_name] - ,vfs.[database_id] /*needed as tempdb is different for each Azure SQL DB as grouping has to be by logical server + db_name + database_id*/ - ,vfs.[file_id] - ,vfs.[io_stall_read_ms] AS [read_latency_ms] - ,vfs.[num_of_reads] AS [reads] - ,vfs.[num_of_bytes_read] AS [read_bytes] - ,vfs.[io_stall_write_ms] AS [write_latency_ms] - ,vfs.[num_of_writes] AS [writes] - ,vfs.[num_of_bytes_written] AS [write_bytes] - ,vfs.[io_stall_queued_read_ms] AS [rg_read_stall_ms] - ,vfs.[io_stall_queued_write_ms] AS [rg_write_stall_ms] - ,CASE - WHEN (vfs.[database_id] = 0) THEN 'RBPEX' - ELSE b.[logical_filename] - END as [logical_filename] - ,CASE - WHEN (vfs.[database_id] = 0) THEN 'RBPEX' - ELSE b.[physical_filename] - END as [physical_filename] - ,CASE - WHEN vfs.[file_id] = 2 THEN 'LOG' - ELSE 'DATA' - END AS [file_type] - ,ISNULL([size],0)/128 AS [current_size_mb] - ,ISNULL(FILEPROPERTY(b.[logical_filename],'SpaceUsed')/128,0) as [space_used_mb] -FROM - [sys].[dm_io_virtual_file_stats](NULL,NULL) AS vfs - -- needed to get Tempdb file names on Azure SQL DB so you can join appropriately. Without this had a bug where join was only on file_id -LEFT OUTER join ( - SELECT - DB_ID() as [database_id] - ,[file_id] - ,[logical_filename]= [name] COLLATE SQL_Latin1_General_CP1_CI_AS - ,[physical_filename] = [physical_name] COLLATE SQL_Latin1_General_CP1_CI_AS - ,[size] - FROM sys.database_files - WHERE - [type] <> 2 - UNION ALL - SELECT - 2 as [database_id] - ,[file_id] - ,[logical_filename] = [name] - ,[physical_filename] = [physical_name] - ,[size] - FROM tempdb.sys.database_files -) b - ON - b.[database_id] = vfs.[database_id] - AND b.[file_id] = vfs.[file_id] -WHERE - vfs.[database_id] IN (DB_ID(),0,2) -` - -const sqlAzureDBProperties = ` -IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL DB. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_server_properties' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,DB_NAME() as [database_name] - ,(SELECT count(*) FROM sys.dm_os_schedulers WHERE status = 'VISIBLE ONLINE') AS [cpu_count] - ,(SELECT [process_memory_limit_mb] FROM sys.dm_os_job_object) AS [server_memory] - ,slo.[edition] as [sku] - ,SERVERPROPERTY('EngineEdition') AS [engine_edition] - ,slo.[service_objective] AS [hardware_type] - ,CASE - WHEN slo.[edition] = 'Hyperscale' then NULL - ELSE CAST(DATABASEPROPERTYEX(DB_NAME(),'MaxSizeInBytes') as bigint)/(1024*1024) - END AS [total_storage_mb] - ,CASE - WHEN slo.[edition] = 'Hyperscale' then NULL - ELSE ( - cast(DATABASEPROPERTYEX(DB_NAME(),'MaxSizeInBytes') as bigint)/(1024*1024) - - (select SUM([size]/128 - CAST(FILEPROPERTY(name, 'SpaceUsed') AS int)/128) FROM sys.database_files) - ) - END AS [available_storage_mb] - ,(select DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) from sys.dm_os_sys_info) as [uptime] - FROM sys.[databases] AS d - -- sys.databases.database_id may not match current DB_ID on Azure SQL DB - CROSS JOIN sys.[database_service_objectives] AS slo - WHERE - d.[name] = DB_NAME() - AND slo.[database_id] = DB_ID(); -` - -const sqlAzureDBOsWaitStats = ` -IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL DB. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_waitstats' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,DB_NAME() as [database_name] - ,ws.[wait_type] - ,[wait_time_ms] - ,[wait_time_ms] - [signal_wait_time_ms] AS [resource_wait_ms] - ,[signal_wait_time_ms] - ,[max_wait_time_ms] - ,[waiting_tasks_count] - ,CASE - WHEN ws.[wait_type] LIKE 'SOS_SCHEDULER_YIELD' then 'CPU' - WHEN ws.[wait_type] = 'THREADPOOL' THEN 'Worker Thread' - WHEN ws.[wait_type] LIKE 'LCK[_]%' THEN 'Lock' - WHEN ws.[wait_type] LIKE 'LATCH[_]%' THEN 'Latch' - WHEN ws.[wait_type] LIKE 'PAGELATCH[_]%' THEN 'Buffer Latch' - WHEN ws.[wait_type] LIKE 'PAGEIOLATCH[_]%' THEN 'Buffer IO' - WHEN ws.[wait_type] LIKE 'RESOURCE_SEMAPHORE_QUERY_COMPILE%' THEN 'Compilation' - WHEN ws.[wait_type] LIKE 'CLR[_]%' or ws.[wait_type] like 'SQLCLR%' THEN 'SQL CLR' - WHEN ws.[wait_type] LIKE 'DBMIRROR_%' THEN 'Mirroring' - WHEN ws.[wait_type] LIKE 'DTC[_]%' or ws.[wait_type] LIKE 'DTCNEW%' or ws.[wait_type] LIKE 'TRAN_%' - or ws.[wait_type] LIKE 'XACT%' or ws.[wait_type] like 'MSQL_XACT%' THEN 'Transaction' - WHEN ws.[wait_type] LIKE 'SLEEP[_]%' - or ws.[wait_type] IN ( - 'LAZYWRITER_SLEEP', 'SQLTRACE_BUFFER_FLUSH', 'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', - 'SQLTRACE_WAIT_ENTRIES', 'FT_IFTS_SCHEDULER_IDLE_WAIT', 'XE_DISPATCHER_WAIT', - 'REQUEST_FOR_DEADLOCK_SEARCH', 'LOGMGR_QUEUE', 'ONDEMAND_TASK_QUEUE', - 'CHECKPOINT_QUEUE', 'XE_TIMER_EVENT') THEN 'Idle' - WHEN ws.[wait_type] IN( - 'ASYNC_IO_COMPLETION','BACKUPIO','CHKPT','WRITE_COMPLETION', - 'IO_QUEUE_LIMIT', 'IO_RETRY') THEN 'Other Disk IO' - WHEN ws.[wait_type] LIKE 'PREEMPTIVE_%' THEN 'Preemptive' - WHEN ws.[wait_type] LIKE 'BROKER[_]%' THEN 'Service Broker' - WHEN ws.[wait_type] IN ( - 'WRITELOG','LOGBUFFER','LOGMGR_RESERVE_APPEND', - 'LOGMGR_FLUSH', 'LOGMGR_PMM_LOG') THEN 'Tran Log IO' - WHEN ws.[wait_type] LIKE 'LOG_RATE%' then 'Log Rate Governor' - WHEN ws.[wait_type] LIKE 'HADR_THROTTLE[_]%' - or ws.[wait_type] = 'THROTTLE_LOG_RATE_LOG_STORAGE' THEN 'HADR Log Rate Governor' - WHEN ws.[wait_type] LIKE 'RBIO_RG%' or ws.[wait_type] like 'WAIT_RBIO_RG%' then 'VLDB Log Rate Governor' - WHEN ws.[wait_type] LIKE 'RBIO[_]%' or ws.[wait_type] like 'WAIT_RBIO[_]%' then 'VLDB RBIO' - WHEN ws.[wait_type] IN( - 'ASYNC_NETWORK_IO','EXTERNAL_SCRIPT_NETWORK_IOF', - 'NET_WAITFOR_PACKET','PROXY_NETWORK_IO') THEN 'Network IO' - WHEN ws.[wait_type] IN ( 'CXPACKET', 'CXCONSUMER') - or ws.[wait_type] like 'HT%' or ws.[wait_type] like 'BMP%' - or ws.[wait_type] like 'BP%' THEN 'Parallelism' - WHEN ws.[wait_type] IN( - 'CMEMTHREAD','CMEMPARTITIONED','EE_PMOLOCK','EXCHANGE', - 'RESOURCE_SEMAPHORE','MEMORY_ALLOCATION_EXT', - 'RESERVED_MEMORY_ALLOCATION_EXT', 'MEMORY_GRANT_UPDATE') THEN 'Memory' - WHEN ws.[wait_type] IN ('WAITFOR','WAIT_FOR_RESULTS') THEN 'User Wait' - WHEN ws.[wait_type] LIKE 'HADR[_]%' or ws.[wait_type] LIKE 'PWAIT_HADR%' - or ws.[wait_type] LIKE 'REPLICA[_]%' or ws.[wait_type] LIKE 'REPL_%' - or ws.[wait_type] LIKE 'SE_REPL[_]%' - or ws.[wait_type] LIKE 'FCB_REPLICA%' THEN 'Replication' - WHEN ws.[wait_type] LIKE 'SQLTRACE[_]%' - or ws.[wait_type] IN ( - 'TRACEWRITE', 'SQLTRACE_LOCK', 'SQLTRACE_FILE_BUFFER', 'SQLTRACE_FILE_WRITE_IO_COMPLETION', - 'SQLTRACE_FILE_READ_IO_COMPLETION', 'SQLTRACE_PENDING_BUFFER_WRITERS', 'SQLTRACE_SHUTDOWN', - 'QUERY_TRACEOUT', 'TRACE_EVTNOTIF') THEN 'Tracing' - WHEN ws.[wait_type] IN ( - 'FT_RESTART_CRAWL', 'FULLTEXT GATHERER', 'MSSEARCH', 'FT_METADATA_MUTEX', - 'FT_IFTSHC_MUTEX', 'FT_IFTSISM_MUTEX', 'FT_IFTS_RWLOCK', 'FT_COMPROWSET_RWLOCK', - 'FT_MASTER_MERGE', 'FT_PROPERTYLIST_CACHE', 'FT_MASTER_MERGE_COORDINATOR', - 'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN 'Full Text Search' - ELSE 'Other' - END as [wait_category] -FROM sys.dm_os_wait_stats AS ws WITH (NOLOCK) -WHERE - ws.[wait_type] NOT IN ( - N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP', - N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE', - N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE', - N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_QUEUE', - N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE', - N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX', - N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT', - N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE', - N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE', - N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE', - N'PARALLEL_REDO_WORKER_WAIT_WORK', - N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS', - N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS', - N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', - N'PREEMPTIVE_OS_DEVICEOPS', - N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER', - N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT', - N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE', - N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT', - N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP', - N'QDS_ASYNC_QUEUE', - N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH', - N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP', - N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY', - N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK', - N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP', - N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', - N'SQLTRACE_WAIT_ENTRIES', - N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT', - N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE', - N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN', - N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT', - N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT','SQLTRACE_WAIT_ENTRIES', - N'RBIO_COMM_RETRY') -AND [waiting_tasks_count] > 10 -AND [wait_time_ms] > 100; -` - -const sqlAzureDBMemoryClerks = ` -IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL DB. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_memory_clerks' AS [measurement] - ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] - ,DB_NAME() AS [database_name] - ,mc.[type] AS [clerk_type] - ,SUM(mc.[pages_kb]) AS [size_kb] -FROM sys.[dm_os_memory_clerks] AS mc WITH (NOLOCK) -GROUP BY - mc.[type] -HAVING - SUM(mc.[pages_kb]) >= 1024 -OPTION(RECOMPILE); -` - -const sqlAzureDBPerformanceCounters = ` -SET DEADLOCK_PRIORITY -10; -IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL DB. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -DECLARE @PCounters TABLE -( - [object_name] nvarchar(128), - [counter_name] nvarchar(128), - [instance_name] nvarchar(128), - [cntr_value] bigint, - [cntr_type] INT , - Primary Key([object_name],[counter_name],[instance_name]) -); - -WITH PerfCounters AS ( - SELECT DISTINCT - RTrim(spi.[object_name]) [object_name] - ,RTrim(spi.[counter_name]) [counter_name] - ,CASE WHEN ( - RTRIM(spi.[object_name]) LIKE '%:Databases' - OR RTRIM(spi.[object_name]) LIKE '%:Database Replica' - OR RTRIM(spi.[object_name]) LIKE '%:Catalog Metadata' - OR RTRIM(spi.[object_name]) LIKE '%:Query Store' - OR RTRIM(spi.[object_name]) LIKE '%:Columnstore' - OR RTRIM(spi.[object_name]) LIKE '%:Advanced Analytics') - AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only - THEN ISNULL(d.[name],RTRIM(spi.instance_name)) -- Elastic Pools counters exist for all databases but sys.databases only has current DB value - WHEN - RTRIM([object_name]) LIKE '%:Availability Replica' - AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only - THEN ISNULL(d.[name],RTRIM(spi.[instance_name])) + RTRIM(SUBSTRING(spi.[instance_name], 37, LEN(spi.[instance_name]))) - ELSE RTRIM(spi.instance_name) - END AS [instance_name] - ,CAST(spi.[cntr_value] AS BIGINT) AS [cntr_value] - ,spi.[cntr_type] - FROM sys.dm_os_performance_counters AS spi - LEFT JOIN sys.databases AS d - ON LEFT(spi.[instance_name], 36) -- some instance_name values have an additional identifier appended after the GUID - = CASE - /*in SQL DB standalone, physical_database_name for master is the GUID of the user database*/ - WHEN d.[name] = 'master' AND TRY_CONVERT([uniqueidentifier], d.[physical_database_name]) IS NOT NULL - THEN d.[name] - ELSE d.[physical_database_name] - END - WHERE - counter_name IN ( - 'SQL Compilations/sec' - ,'SQL Re-Compilations/sec' - ,'User Connections' - ,'Batch Requests/sec' - ,'Logouts/sec' - ,'Logins/sec' - ,'Processes blocked' - ,'Latch Waits/sec' - ,'Full Scans/sec' - ,'Index Searches/sec' - ,'Page Splits/sec' - ,'Page lookups/sec' - ,'Page reads/sec' - ,'Page writes/sec' - ,'Readahead pages/sec' - ,'Lazy writes/sec' - ,'Checkpoint pages/sec' - ,'Page life expectancy' - ,'Log File(s) Size (KB)' - ,'Log File(s) Used Size (KB)' - ,'Data File(s) Size (KB)' - ,'Transactions/sec' - ,'Write Transactions/sec' - ,'Active Temp Tables' - ,'Temp Tables Creation Rate' - ,'Temp Tables For Destruction' - ,'Free Space in tempdb (KB)' - ,'Version Store Size (KB)' - ,'Memory Grants Pending' - ,'Memory Grants Outstanding' - ,'Free list stalls/sec' - ,'Buffer cache hit ratio' - ,'Buffer cache hit ratio base' - ,'Backup/Restore Throughput/sec' - ,'Total Server Memory (KB)' - ,'Target Server Memory (KB)' - ,'Log Flushes/sec' - ,'Log Flush Wait Time' - ,'Memory broker clerk size' - ,'Log Bytes Flushed/sec' - ,'Bytes Sent to Replica/sec' - ,'Log Send Queue' - ,'Bytes Sent to Transport/sec' - ,'Sends to Replica/sec' - ,'Bytes Sent to Transport/sec' - ,'Sends to Transport/sec' - ,'Bytes Received from Replica/sec' - ,'Receives from Replica/sec' - ,'Flow Control Time (ms/sec)' - ,'Flow Control/sec' - ,'Resent Messages/sec' - ,'Redone Bytes/sec' - ,'XTP Memory Used (KB)' - ,'Transaction Delay' - ,'Log Bytes Received/sec' - ,'Log Apply Pending Queue' - ,'Redone Bytes/sec' - ,'Recovery Queue' - ,'Log Apply Ready Queue' - ,'CPU usage %' - ,'CPU usage % base' - ,'Queued requests' - ,'Requests completed/sec' - ,'Blocked tasks' - ,'Active memory grant amount (KB)' - ,'Disk Read Bytes/sec' - ,'Disk Read IO Throttled/sec' - ,'Disk Read IO/sec' - ,'Disk Write Bytes/sec' - ,'Disk Write IO Throttled/sec' - ,'Disk Write IO/sec' - ,'Used memory (KB)' - ,'Forwarded Records/sec' - ,'Background Writer pages/sec' - ,'Percent Log Used' - ,'Log Send Queue KB' - ,'Redo Queue KB' - ,'Mirrored Write Transactions/sec' - ,'Group Commit Time' - ,'Group Commits/Sec' - ) OR ( - spi.[object_name] LIKE '%User Settable%' - OR spi.[object_name] LIKE '%SQL Errors%' - OR spi.[object_name] LIKE '%Batch Resp Statistics%' - ) OR ( - spi.[instance_name] IN ('_Total') - AND spi.[counter_name] IN ( - 'Lock Timeouts/sec' - ,'Lock Timeouts (timeout > 0)/sec' - ,'Number of Deadlocks/sec' - ,'Lock Waits/sec' - ,'Latch Waits/sec' - ) - ) -) - -INSERT INTO @PCounters select * from PerfCounters - -SELECT - 'sqlserver_performance' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,DB_NAME() as [database_name] - ,pc.[object_name] AS [object] - ,pc.[counter_name] AS [counter] - ,CASE pc.[instance_name] - WHEN '_Total' THEN 'Total' - ELSE ISNULL(pc.[instance_name],'') - END AS [instance] - ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value] - ,cast(pc.[cntr_type] as varchar(25)) as [counter_type] -from @PCounters pc -LEFT OUTER JOIN @PCounters AS pc1 - ON ( - pc.[counter_name] = REPLACE(pc1.[counter_name],' base','') - OR pc.[counter_name] = REPLACE(pc1.[counter_name],' base',' (ms)') - ) - AND pc.[object_name] = pc1.[object_name] - AND pc.[instance_name] = pc1.[instance_name] - AND pc1.[counter_name] LIKE '%base' -WHERE - pc.[counter_name] NOT LIKE '% base' -OPTION (RECOMPILE); -` - -const sqlAzureDBRequests string = ` -IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL DB. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT [blocking_session_id] INTO #blockingSessions FROM sys.dm_exec_requests WHERE [blocking_session_id] != 0 -CREATE INDEX ix_blockingSessions_1 on #blockingSessions ([blocking_session_id]) - -SELECT - 'sqlserver_requests' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,DB_NAME() as [database_name] - ,s.[session_id] - ,ISNULL(r.[request_id], 0) as [request_id] - ,DB_NAME(s.[database_id]) as [session_db_name] - ,COALESCE(r.[status], s.[status]) AS [status] - ,COALESCE(r.[cpu_time], s.[cpu_time]) AS [cpu_time_ms] - ,COALESCE(r.[total_elapsed_time], s.[total_elapsed_time]) AS [total_elapsed_time_ms] - ,COALESCE(r.[logical_reads], s.[logical_reads]) AS [logical_reads] - ,COALESCE(r.[writes], s.[writes]) AS [writes] - ,r.[command] - ,r.[wait_time] as [wait_time_ms] - ,r.[wait_type] - ,r.[wait_resource] - ,r.[blocking_session_id] - ,s.[program_name] - ,s.[host_name] - ,s.[nt_user_name] - ,COALESCE(r.[open_transaction_count], s.[open_transaction_count]) AS [open_transaction] - ,LEFT (CASE COALESCE(r.[transaction_isolation_level], s.[transaction_isolation_level]) - WHEN 0 THEN '0-Read Committed' - WHEN 1 THEN '1-Read Uncommitted (NOLOCK)' - WHEN 2 THEN '2-Read Committed' - WHEN 3 THEN '3-Repeatable Read' - WHEN 4 THEN '4-Serializable' - WHEN 5 THEN '5-Snapshot' - ELSE CONVERT (varchar(30), r.[transaction_isolation_level]) + '-UNKNOWN' - END, 30) AS [transaction_isolation_level] - ,r.[granted_query_memory] as [granted_query_memory_pages] - ,r.[percent_complete] - ,SUBSTRING( - qt.[text], - r.[statement_start_offset] / 2 + 1, - (CASE WHEN r.[statement_end_offset] = -1 - THEN DATALENGTH(qt.text) - ELSE r.[statement_end_offset] - END - r.[statement_start_offset]) / 2 + 1 - ) AS [statement_text] - ,qt.[objectid] - ,QUOTENAME(OBJECT_SCHEMA_NAME(qt.[objectid], qt.[dbid])) + '.' + QUOTENAME(OBJECT_NAME(qt.[objectid], qt.[dbid])) as [stmt_object_name] - ,DB_NAME(qt.[dbid]) [stmt_db_name] - ,CONVERT(varchar(20),[query_hash],1) as [query_hash] - ,CONVERT(varchar(20),[query_plan_hash],1) as [query_plan_hash] -FROM sys.dm_exec_sessions AS s -LEFT OUTER JOIN sys.dm_exec_requests AS r - ON s.[session_id] = r.[session_id] -OUTER APPLY sys.dm_exec_sql_text(r.sql_handle) AS qt -WHERE - (s.session_id IN (SELECT blocking_session_id FROM #blockingSessions)) - OR ( - r.session_id IS NOT NULL - AND ( - s.is_user_process = 1 - OR r.status COLLATE Latin1_General_BIN NOT IN ('background', 'sleeping') - ) - ) -OPTION(MAXDOP 1); -` - -const sqlAzureDBSchedulers string = ` -IF SERVERPROPERTY('EngineEdition') <> 5 BEGIN /*not Azure SQL DB*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure SQL DB. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_schedulers' AS [measurement] - ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] - ,CAST(s.[scheduler_id] AS VARCHAR(4)) AS [scheduler_id] - ,CAST(s.[cpu_id] AS VARCHAR(4)) AS [cpu_id] - ,s.[is_online] - ,s.[is_idle] - ,s.[preemptive_switches_count] - ,s.[context_switches_count] - ,s.[current_tasks_count] - ,s.[runnable_tasks_count] - ,s.[current_workers_count] - ,s.[active_workers_count] - ,s.[work_queue_count] - ,s.[pending_disk_io_count] - ,s.[load_factor] - ,s.[yield_count] - ,s.[total_cpu_usage_ms] - ,s.[total_scheduler_delay_ms] -FROM sys.dm_os_schedulers AS s -` - -//------------------------------------------------------------------------------------------------ -//------------------ Azure Managed Instance ------------------------------------------------------ -//------------------------------------------------------------------------------------------------ -const sqlAzureMIProperties = ` -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT TOP 1 - 'sqlserver_server_properties' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,[virtual_core_count] AS [cpu_count] - ,(SELECT [process_memory_limit_mb] FROM sys.dm_os_job_object) AS [server_memory] - ,[sku] - ,SERVERPROPERTY('EngineEdition') AS [engine_edition] - ,[hardware_generation] AS [hardware_type] - ,cast([reserved_storage_mb] as bigint) AS [total_storage_mb] - ,cast(([reserved_storage_mb] - [storage_space_used_mb]) as bigint) AS [available_storage_mb] - ,(SELECT DATEDIFF(MINUTE,[sqlserver_start_time],GETDATE()) from sys.dm_os_sys_info) as [uptime] - ,SERVERPROPERTY('ProductVersion') AS [sql_version] - ,LEFT(@@VERSION,CHARINDEX(' - ',@@VERSION)) AS [sql_version_desc] - ,[db_online] - ,[db_restoring] - ,[db_recovering] - ,[db_recoveryPending] - ,[db_suspect] -FROM sys.server_resource_stats -CROSS APPLY ( - SELECT - SUM( CASE WHEN [state] = 0 THEN 1 ELSE 0 END ) AS [db_online] - ,SUM( CASE WHEN [state] = 1 THEN 1 ELSE 0 END ) AS [db_restoring] - ,SUM( CASE WHEN [state] = 2 THEN 1 ELSE 0 END ) AS [db_recovering] - ,SUM( CASE WHEN [state] = 3 THEN 1 ELSE 0 END ) AS [db_recoveryPending] - ,SUM( CASE WHEN [state] = 4 THEN 1 ELSE 0 END ) AS [db_suspect] - ,SUM( CASE WHEN [state] IN (6,10) THEN 1 ELSE 0 END ) AS [db_offline] - FROM sys.databases -) AS dbs -ORDER BY - [start_time] DESC; -` - -const sqlAzureMIResourceStats = ` -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT TOP(1) - 'sqlserver_azure_db_resource_stats' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,cast([avg_cpu_percent] as float) as [avg_cpu_percent] -FROM - sys.server_resource_stats -ORDER BY - [end_time] DESC; -` - -const sqlAzureMIResourceGovernance string = ` -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_instance_resource_governance' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,[instance_cap_cpu] - ,[instance_max_log_rate] - ,[instance_max_worker_threads] - ,[tempdb_log_file_number] - ,[volume_local_iops] - ,[volume_external_xstore_iops] - ,[volume_managed_xstore_iops] - ,[volume_type_local_iops] as [voltype_local_iops] - ,[volume_type_managed_xstore_iops] as [voltype_man_xtore_iops] - ,[volume_type_external_xstore_iops] as [voltype_ext_xtore_iops] - ,[volume_external_xstore_iops] as [vol_ext_xtore_iops] -FROM sys.dm_instance_resource_governance; -` - -const sqlAzureMIDatabaseIO = ` -SET DEADLOCK_PRIORITY -10; -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_database_io' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,COALESCE(mf.[physical_name],'RBPEX') AS [physical_filename] --RPBEX = Resilient Buffer Pool Extension - ,COALESCE(mf.[name],'RBPEX') AS [logical_filename] --RPBEX = Resilient Buffer Pool Extension - ,mf.[type_desc] AS [file_type] - ,vfs.[io_stall_read_ms] AS [read_latency_ms] - ,vfs.[num_of_reads] AS [reads] - ,vfs.[num_of_bytes_read] AS [read_bytes] - ,vfs.[io_stall_write_ms] AS [write_latency_ms] - ,vfs.[num_of_writes] AS [writes] - ,vfs.[num_of_bytes_written] AS [write_bytes] - ,vfs.io_stall_queued_read_ms AS [rg_read_stall_ms] - ,vfs.io_stall_queued_write_ms AS [rg_write_stall_ms] -FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS vfs -LEFT OUTER JOIN sys.master_files AS mf WITH (NOLOCK) - ON vfs.[database_id] = mf.[database_id] - AND vfs.[file_id] = mf.[file_id] -WHERE - vfs.[database_id] < 32760 -` - -const sqlAzureMIMemoryClerks = ` -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_memory_clerks' AS [measurement] - ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] - ,mc.[type] AS [clerk_type] - ,SUM(mc.[pages_kb]) AS [size_kb] -FROM sys.[dm_os_memory_clerks] AS mc WITH (NOLOCK) -GROUP BY - mc.[type] -HAVING - SUM(mc.[pages_kb]) >= 1024 -OPTION(RECOMPILE); -` - -const sqlAzureMIOsWaitStats = ` -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_waitstats' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,ws.[wait_type] - ,[wait_time_ms] - ,[wait_time_ms] - [signal_wait_time_ms] AS [resource_wait_ms] - ,[signal_wait_time_ms] - ,[max_wait_time_ms] - ,[waiting_tasks_count] - ,CASE - WHEN ws.[wait_type] LIKE 'SOS_SCHEDULER_YIELD' then 'CPU' - WHEN ws.[wait_type] = 'THREADPOOL' THEN 'Worker Thread' - WHEN ws.[wait_type] LIKE 'LCK[_]%' THEN 'Lock' - WHEN ws.[wait_type] LIKE 'LATCH[_]%' THEN 'Latch' - WHEN ws.[wait_type] LIKE 'PAGELATCH[_]%' THEN 'Buffer Latch' - WHEN ws.[wait_type] LIKE 'PAGEIOLATCH[_]%' THEN 'Buffer IO' - WHEN ws.[wait_type] LIKE 'RESOURCE_SEMAPHORE_QUERY_COMPILE%' THEN 'Compilation' - WHEN ws.[wait_type] LIKE 'CLR[_]%' or ws.[wait_type] like 'SQLCLR%' THEN 'SQL CLR' - WHEN ws.[wait_type] LIKE 'DBMIRROR_%' THEN 'Mirroring' - WHEN ws.[wait_type] LIKE 'DTC[_]%' or ws.[wait_type] LIKE 'DTCNEW%' or ws.[wait_type] LIKE 'TRAN_%' - or ws.[wait_type] LIKE 'XACT%' or ws.[wait_type] like 'MSQL_XACT%' THEN 'Transaction' - WHEN ws.[wait_type] LIKE 'SLEEP[_]%' - or ws.[wait_type] IN ( - 'LAZYWRITER_SLEEP', 'SQLTRACE_BUFFER_FLUSH', 'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', - 'SQLTRACE_WAIT_ENTRIES', 'FT_IFTS_SCHEDULER_IDLE_WAIT', 'XE_DISPATCHER_WAIT', - 'REQUEST_FOR_DEADLOCK_SEARCH', 'LOGMGR_QUEUE', 'ONDEMAND_TASK_QUEUE', - 'CHECKPOINT_QUEUE', 'XE_TIMER_EVENT') THEN 'Idle' - WHEN ws.[wait_type] IN( - 'ASYNC_IO_COMPLETION','BACKUPIO','CHKPT','WRITE_COMPLETION', - 'IO_QUEUE_LIMIT', 'IO_RETRY') THEN 'Other Disk IO' - WHEN ws.[wait_type] LIKE 'PREEMPTIVE_%' THEN 'Preemptive' - WHEN ws.[wait_type] LIKE 'BROKER[_]%' THEN 'Service Broker' - WHEN ws.[wait_type] IN ( - 'WRITELOG','LOGBUFFER','LOGMGR_RESERVE_APPEND', - 'LOGMGR_FLUSH', 'LOGMGR_PMM_LOG') THEN 'Tran Log IO' - WHEN ws.[wait_type] LIKE 'LOG_RATE%' then 'Log Rate Governor' - WHEN ws.[wait_type] LIKE 'HADR_THROTTLE[_]%' - or ws.[wait_type] = 'THROTTLE_LOG_RATE_LOG_STORAGE' THEN 'HADR Log Rate Governor' - WHEN ws.[wait_type] LIKE 'RBIO_RG%' or ws.[wait_type] like 'WAIT_RBIO_RG%' then 'VLDB Log Rate Governor' - WHEN ws.[wait_type] LIKE 'RBIO[_]%' or ws.[wait_type] like 'WAIT_RBIO[_]%' then 'VLDB RBIO' - WHEN ws.[wait_type] IN( - 'ASYNC_NETWORK_IO','EXTERNAL_SCRIPT_NETWORK_IOF', - 'NET_WAITFOR_PACKET','PROXY_NETWORK_IO') THEN 'Network IO' - WHEN ws.[wait_type] IN ( 'CXPACKET', 'CXCONSUMER') - or ws.[wait_type] like 'HT%' or ws.[wait_type] like 'BMP%' - or ws.[wait_type] like 'BP%' THEN 'Parallelism' - WHEN ws.[wait_type] IN( - 'CMEMTHREAD','CMEMPARTITIONED','EE_PMOLOCK','EXCHANGE', - 'RESOURCE_SEMAPHORE','MEMORY_ALLOCATION_EXT', - 'RESERVED_MEMORY_ALLOCATION_EXT', 'MEMORY_GRANT_UPDATE') THEN 'Memory' - WHEN ws.[wait_type] IN ('WAITFOR','WAIT_FOR_RESULTS') THEN 'User Wait' - WHEN ws.[wait_type] LIKE 'HADR[_]%' or ws.[wait_type] LIKE 'PWAIT_HADR%' - or ws.[wait_type] LIKE 'REPLICA[_]%' or ws.[wait_type] LIKE 'REPL_%' - or ws.[wait_type] LIKE 'SE_REPL[_]%' - or ws.[wait_type] LIKE 'FCB_REPLICA%' THEN 'Replication' - WHEN ws.[wait_type] LIKE 'SQLTRACE[_]%' - or ws.[wait_type] IN ( - 'TRACEWRITE', 'SQLTRACE_LOCK', 'SQLTRACE_FILE_BUFFER', 'SQLTRACE_FILE_WRITE_IO_COMPLETION', - 'SQLTRACE_FILE_READ_IO_COMPLETION', 'SQLTRACE_PENDING_BUFFER_WRITERS', 'SQLTRACE_SHUTDOWN', - 'QUERY_TRACEOUT', 'TRACE_EVTNOTIF') THEN 'Tracing' - WHEN ws.[wait_type] IN ( - 'FT_RESTART_CRAWL', 'FULLTEXT GATHERER', 'MSSEARCH', 'FT_METADATA_MUTEX', - 'FT_IFTSHC_MUTEX', 'FT_IFTSISM_MUTEX', 'FT_IFTS_RWLOCK', 'FT_COMPROWSET_RWLOCK', - 'FT_MASTER_MERGE', 'FT_PROPERTYLIST_CACHE', 'FT_MASTER_MERGE_COORDINATOR', - 'PWAIT_RESOURCE_SEMAPHORE_FT_PARALLEL_QUERY_SYNC') THEN 'Full Text Search' - ELSE 'Other' - END as [wait_category] -FROM sys.dm_os_wait_stats AS ws WITH (NOLOCK) -WHERE - ws.[wait_type] NOT IN ( - N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP', - N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE', - N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE', - N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_QUEUE', - N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE', - N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX', - N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT', - N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE', - N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE', - N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE', - N'PARALLEL_REDO_WORKER_WAIT_WORK', - N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS', - N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS', - N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', - N'PREEMPTIVE_OS_DEVICEOPS', - N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER', - N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT', - N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE', - N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT', - N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP', - N'QDS_ASYNC_QUEUE', - N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH', - N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP', - N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY', - N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK', - N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP', - N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', - N'SQLTRACE_WAIT_ENTRIES', - N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT', - N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE', - N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN', - N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT', - N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT','SQLTRACE_WAIT_ENTRIES', - N'RBIO_COMM_RETRY') -AND [waiting_tasks_count] > 10 -AND [wait_time_ms] > 100; -` - -const sqlAzureMIPerformanceCounters = ` -SET DEADLOCK_PRIORITY -10; -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -DECLARE @PCounters TABLE -( - [object_name] nvarchar(128), - [counter_name] nvarchar(128), - [instance_name] nvarchar(128), - [cntr_value] bigint, - [cntr_type] INT , - Primary Key([object_name],[counter_name],[instance_name]) -); - -WITH PerfCounters AS ( - SELECT DISTINCT - RTrim(spi.[object_name]) [object_name] - ,RTrim(spi.[counter_name]) [counter_name] - ,CASE WHEN ( - RTRIM(spi.[object_name]) LIKE '%:Databases' - OR RTRIM(spi.[object_name]) LIKE '%:Database Replica' - OR RTRIM(spi.[object_name]) LIKE '%:Catalog Metadata' - OR RTRIM(spi.[object_name]) LIKE '%:Query Store' - OR RTRIM(spi.[object_name]) LIKE '%:Columnstore' - OR RTRIM(spi.[object_name]) LIKE '%:Advanced Analytics') - AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only - THEN ISNULL(d.[name],RTRIM(spi.instance_name)) -- Elastic Pools counters exist for all databases but sys.databases only has current DB value - WHEN - RTRIM([object_name]) LIKE '%:Availability Replica' - AND TRY_CONVERT([uniqueidentifier], spi.[instance_name]) IS NOT NULL -- for cloud only - THEN ISNULL(d.[name],RTRIM(spi.[instance_name])) + RTRIM(SUBSTRING(spi.[instance_name], 37, LEN(spi.[instance_name]))) - ELSE RTRIM(spi.instance_name) - END AS [instance_name] - ,CAST(spi.[cntr_value] AS BIGINT) AS [cntr_value] - ,spi.[cntr_type] - FROM sys.dm_os_performance_counters AS spi - LEFT JOIN sys.databases AS d - ON LEFT(spi.[instance_name], 36) -- some instance_name values have an additional identifier appended after the GUID - = CASE - /*in SQL DB standalone, physical_database_name for master is the GUID of the user database*/ - WHEN d.[name] = 'master' AND TRY_CONVERT([uniqueidentifier], d.[physical_database_name]) IS NOT NULL - THEN d.[name] - ELSE d.[physical_database_name] - END - WHERE - counter_name IN ( - 'SQL Compilations/sec' - ,'SQL Re-Compilations/sec' - ,'User Connections' - ,'Batch Requests/sec' - ,'Logouts/sec' - ,'Logins/sec' - ,'Processes blocked' - ,'Latch Waits/sec' - ,'Full Scans/sec' - ,'Index Searches/sec' - ,'Page Splits/sec' - ,'Page lookups/sec' - ,'Page reads/sec' - ,'Page writes/sec' - ,'Readahead pages/sec' - ,'Lazy writes/sec' - ,'Checkpoint pages/sec' - ,'Page life expectancy' - ,'Log File(s) Size (KB)' - ,'Log File(s) Used Size (KB)' - ,'Data File(s) Size (KB)' - ,'Transactions/sec' - ,'Write Transactions/sec' - ,'Active Temp Tables' - ,'Temp Tables Creation Rate' - ,'Temp Tables For Destruction' - ,'Free Space in tempdb (KB)' - ,'Version Store Size (KB)' - ,'Memory Grants Pending' - ,'Memory Grants Outstanding' - ,'Free list stalls/sec' - ,'Buffer cache hit ratio' - ,'Buffer cache hit ratio base' - ,'Backup/Restore Throughput/sec' - ,'Total Server Memory (KB)' - ,'Target Server Memory (KB)' - ,'Log Flushes/sec' - ,'Log Flush Wait Time' - ,'Memory broker clerk size' - ,'Log Bytes Flushed/sec' - ,'Bytes Sent to Replica/sec' - ,'Log Send Queue' - ,'Bytes Sent to Transport/sec' - ,'Sends to Replica/sec' - ,'Bytes Sent to Transport/sec' - ,'Sends to Transport/sec' - ,'Bytes Received from Replica/sec' - ,'Receives from Replica/sec' - ,'Flow Control Time (ms/sec)' - ,'Flow Control/sec' - ,'Resent Messages/sec' - ,'Redone Bytes/sec' - ,'XTP Memory Used (KB)' - ,'Transaction Delay' - ,'Log Bytes Received/sec' - ,'Log Apply Pending Queue' - ,'Redone Bytes/sec' - ,'Recovery Queue' - ,'Log Apply Ready Queue' - ,'CPU usage %' - ,'CPU usage % base' - ,'Queued requests' - ,'Requests completed/sec' - ,'Blocked tasks' - ,'Active memory grant amount (KB)' - ,'Disk Read Bytes/sec' - ,'Disk Read IO Throttled/sec' - ,'Disk Read IO/sec' - ,'Disk Write Bytes/sec' - ,'Disk Write IO Throttled/sec' - ,'Disk Write IO/sec' - ,'Used memory (KB)' - ,'Forwarded Records/sec' - ,'Background Writer pages/sec' - ,'Percent Log Used' - ,'Log Send Queue KB' - ,'Redo Queue KB' - ,'Mirrored Write Transactions/sec' - ,'Group Commit Time' - ,'Group Commits/Sec' - ) OR ( - spi.[object_name] LIKE '%User Settable%' - OR spi.[object_name] LIKE '%SQL Errors%' - OR spi.[object_name] LIKE '%Batch Resp Statistics%' - ) OR ( - spi.[instance_name] IN ('_Total') - AND spi.[counter_name] IN ( - 'Lock Timeouts/sec' - ,'Lock Timeouts (timeout > 0)/sec' - ,'Number of Deadlocks/sec' - ,'Lock Waits/sec' - ,'Latch Waits/sec' - ) - ) -) - -INSERT INTO @PCounters select * from PerfCounters - -SELECT - 'sqlserver_performance' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,pc.[object_name] AS [object] - ,pc.[counter_name] AS [counter] - ,CASE pc.[instance_name] - WHEN '_Total' THEN 'Total' - ELSE ISNULL(pc.[instance_name],'') - END AS [instance] - ,CAST(CASE WHEN pc.[cntr_type] = 537003264 AND pc1.[cntr_value] > 0 THEN (pc.[cntr_value] * 1.0) / (pc1.[cntr_value] * 1.0) * 100 ELSE pc.[cntr_value] END AS float(10)) AS [value] - ,cast(pc.[cntr_type] as varchar(25)) as [counter_type] -from @PCounters pc -LEFT OUTER JOIN @PCounters AS pc1 - ON ( - pc.[counter_name] = REPLACE(pc1.[counter_name],' base','') - OR pc.[counter_name] = REPLACE(pc1.[counter_name],' base',' (ms)') - ) - AND pc.[object_name] = pc1.[object_name] - AND pc.[instance_name] = pc1.[instance_name] - AND pc1.[counter_name] LIKE '%base' -WHERE - pc.[counter_name] NOT LIKE '% base' -OPTION (RECOMPILE); -` - -const sqlAzureMIRequests string = ` -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT [blocking_session_id] INTO #blockingSessions FROM sys.dm_exec_requests WHERE [blocking_session_id] != 0 -CREATE INDEX ix_blockingSessions_1 on #blockingSessions ([blocking_session_id]) - -SELECT - 'sqlserver_requests' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,DB_NAME() as [database_name] - ,s.[session_id] - ,ISNULL(r.[request_id], 0) as [request_id] - ,DB_NAME(s.[database_id]) as [session_db_name] - ,COALESCE(r.[status], s.[status]) AS [status] - ,COALESCE(r.[cpu_time], s.[cpu_time]) AS [cpu_time_ms] - ,COALESCE(r.[total_elapsed_time], s.[total_elapsed_time]) AS [total_elapsed_time_ms] - ,COALESCE(r.[logical_reads], s.[logical_reads]) AS [logical_reads] - ,COALESCE(r.[writes], s.[writes]) AS [writes] - ,r.[command] - ,r.[wait_time] as [wait_time_ms] - ,r.[wait_type] - ,r.[wait_resource] - ,r.[blocking_session_id] - ,s.[program_name] - ,s.[host_name] - ,s.[nt_user_name] - ,COALESCE(r.[open_transaction_count], s.[open_transaction_count]) AS [open_transaction] - ,LEFT (CASE COALESCE(r.[transaction_isolation_level], s.[transaction_isolation_level]) - WHEN 0 THEN '0-Read Committed' - WHEN 1 THEN '1-Read Uncommitted (NOLOCK)' - WHEN 2 THEN '2-Read Committed' - WHEN 3 THEN '3-Repeatable Read' - WHEN 4 THEN '4-Serializable' - WHEN 5 THEN '5-Snapshot' - ELSE CONVERT (varchar(30), r.[transaction_isolation_level]) + '-UNKNOWN' - END, 30) AS [transaction_isolation_level] - ,r.[granted_query_memory] as [granted_query_memory_pages] - ,r.[percent_complete] - ,SUBSTRING( - qt.[text], - r.[statement_start_offset] / 2 + 1, - (CASE WHEN r.[statement_end_offset] = -1 - THEN DATALENGTH(qt.text) - ELSE r.[statement_end_offset] - END - r.[statement_start_offset]) / 2 + 1 - ) AS [statement_text] - ,qt.[objectid] - ,QUOTENAME(OBJECT_SCHEMA_NAME(qt.[objectid], qt.[dbid])) + '.' + QUOTENAME(OBJECT_NAME(qt.[objectid], qt.[dbid])) as [stmt_object_name] - ,DB_NAME(qt.[dbid]) [stmt_db_name] - ,CONVERT(varchar(20),[query_hash],1) as [query_hash] - ,CONVERT(varchar(20),[query_plan_hash],1) as [query_plan_hash] - ,DB_NAME(COALESCE(r.[database_id], s.[database_id])) AS [session_db_name] -FROM sys.dm_exec_sessions AS s -LEFT OUTER JOIN sys.dm_exec_requests AS r - ON s.[session_id] = r.[session_id] -OUTER APPLY sys.dm_exec_sql_text(r.sql_handle) AS qt -WHERE - (s.session_id IN (SELECT blocking_session_id FROM #blockingSessions)) - OR ( - r.session_id IS NOT NULL - AND ( - s.is_user_process = 1 - OR r.status COLLATE Latin1_General_BIN NOT IN ('background', 'sleeping') - ) - ) -OPTION(MAXDOP 1); -` - -const sqlAzureMISchedulers string = ` -IF SERVERPROPERTY('EngineEdition') <> 8 BEGIN /*not Azure Managed Instance*/ - DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@SERVERNAME + ',Database:' + DB_NAME() +' is not an Azure Managed Instance. Check the database_type parameter in the telegraf configuration.'; - RAISERROR (@ErrorMessage,11,1) - RETURN -END - -SELECT - 'sqlserver_schedulers' AS [measurement] - ,REPLACE(@@SERVERNAME, '\', ':') AS [sql_instance] - ,CAST(s.[scheduler_id] AS VARCHAR(4)) AS [scheduler_id] - ,CAST(s.[cpu_id] AS VARCHAR(4)) AS [cpu_id] - ,s.[is_online] - ,s.[is_idle] - ,s.[preemptive_switches_count] - ,s.[context_switches_count] - ,s.[current_tasks_count] - ,s.[runnable_tasks_count] - ,s.[current_workers_count] - ,s.[active_workers_count] - ,s.[work_queue_count] - ,s.[pending_disk_io_count] - ,s.[load_factor] - ,s.[yield_count] - ,s.[total_cpu_usage_ms] - ,s.[total_scheduler_delay_ms] -FROM sys.dm_os_schedulers AS s -` diff --git a/plugins/inputs/sqlserver/connectionstring.go b/plugins/inputs/sqlserver/connectionstring.go new file mode 100644 index 0000000000000..b5f530b9f9510 --- /dev/null +++ b/plugins/inputs/sqlserver/connectionstring.go @@ -0,0 +1,100 @@ +package sqlserver + +import ( + "net/url" + "strings" +) + +const ( + emptySQLInstance = "" + emptyDatabaseName = "" +) + +// getConnectionIdentifiers returns the sqlInstance and databaseName from the given connection string. +// The name of the SQL instance is returned as-is in the connection string +// If the connection string could not be parsed or sqlInstance/databaseName were not present, a placeholder value is returned +func getConnectionIdentifiers(connectionString string) (sqlInstance string, databaseName string) { + if len(connectionString) == 0 { + return emptySQLInstance, emptyDatabaseName + } + + trimmedConnectionString := strings.TrimSpace(connectionString) + + if strings.HasPrefix(trimmedConnectionString, "odbc:") { + connectionStringWithoutOdbc := strings.TrimPrefix(trimmedConnectionString, "odbc:") + return parseConnectionStringKeyValue(connectionStringWithoutOdbc) + } + if strings.HasPrefix(trimmedConnectionString, "sqlserver://") { + return parseConnectionStringURL(trimmedConnectionString) + } + return parseConnectionStringKeyValue(trimmedConnectionString) +} + +// parseConnectionStringKeyValue parses a "key=value;" connection string and returns the SQL instance and database name +func parseConnectionStringKeyValue(connectionString string) (sqlInstance string, databaseName string) { + sqlInstance = "" + databaseName = "" + + keyValuePairs := strings.Split(connectionString, ";") + for _, keyValuePair := range keyValuePairs { + if len(keyValuePair) == 0 { + continue + } + + keyAndValue := strings.SplitN(keyValuePair, "=", 2) + key := strings.TrimSpace(strings.ToLower(keyAndValue[0])) + if len(key) == 0 { + continue + } + + value := "" + if len(keyAndValue) > 1 { + value = strings.TrimSpace(keyAndValue[1]) + } + if strings.EqualFold("server", key) { + sqlInstance = value + continue + } + if strings.EqualFold("database", key) { + databaseName = value + } + } + + if sqlInstance == "" { + sqlInstance = emptySQLInstance + } + if databaseName == "" { + databaseName = emptyDatabaseName + } + + return sqlInstance, databaseName +} + +// parseConnectionStringURL parses a URL-formatted connection string and returns the SQL instance and database name +func parseConnectionStringURL(connectionString string) (sqlInstance string, databaseName string) { + sqlInstance = emptySQLInstance + databaseName = emptyDatabaseName + + u, err := url.Parse(connectionString) + if err != nil { + return emptySQLInstance, emptyDatabaseName + } + + sqlInstance = u.Hostname() + + if len(u.Path) > 1 { + // There was a SQL instance name specified in addition to the host + // E.g. "the.host.com:1234/InstanceName" or "the.host.com/InstanceName" + sqlInstance = sqlInstance + "\\" + u.Path[1:] + } + + query := u.Query() + for key, value := range query { + if strings.EqualFold("database", key) { + databaseName = value[0] + break + } + } + + return sqlInstance, databaseName +} diff --git a/plugins/inputs/sqlserver/sample.conf b/plugins/inputs/sqlserver/sample.conf new file mode 100644 index 0000000000000..ad19f28f182e5 --- /dev/null +++ b/plugins/inputs/sqlserver/sample.conf @@ -0,0 +1,139 @@ +# Read metrics from Microsoft SQL Server +[[inputs.sqlserver]] + ## Specify instances to monitor with a list of connection strings. + ## All connection parameters are optional. + ## By default, the host is localhost, listening on default port, TCP 1433. + ## for Windows, the user is the currently running AD user (SSO). + ## See https://github.com/denisenkom/go-mssqldb for detailed connection + ## parameters, in particular, tls connections can be created like so: + ## "encrypt=true;certificate=;hostNameInCertificate=" + servers = [ + "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", + ] + + ## Authentication method + ## valid methods: "connection_string", "AAD" + # auth_method = "connection_string" + + ## "database_type" enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 + ## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. + ## Possible values for database_type are - "SQLServer" or "AzureSQLDB" or "AzureSQLManagedInstance" or "AzureSQLPool" + + database_type = "SQLServer" + + ## A list of queries to include. If not specified, all the below listed queries are used. + include_query = [] + + ## A list of queries to explicitly ignore. + exclude_query = ["SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"] + + ## Queries enabled by default for database_type = "SQLServer" are - + ## SQLServerPerformanceCounters, SQLServerWaitStatsCategorized, SQLServerDatabaseIO, SQLServerProperties, SQLServerMemoryClerks, + ## SQLServerSchedulers, SQLServerRequests, SQLServerVolumeSpace, SQLServerCpu, SQLServerAvailabilityReplicaStates, SQLServerDatabaseReplicaStates, + ## SQLServerRecentBackups + + ## Queries enabled by default for database_type = "AzureSQLDB" are - + ## AzureSQLDBResourceStats, AzureSQLDBResourceGovernance, AzureSQLDBWaitStats, AzureSQLDBDatabaseIO, AzureSQLDBServerProperties, + ## AzureSQLDBOsWaitstats, AzureSQLDBMemoryClerks, AzureSQLDBPerformanceCounters, AzureSQLDBRequests, AzureSQLDBSchedulers + + ## Queries enabled by default for database_type = "AzureSQLManagedInstance" are - + ## AzureSQLMIResourceStats, AzureSQLMIResourceGovernance, AzureSQLMIDatabaseIO, AzureSQLMIServerProperties, AzureSQLMIOsWaitstats, + ## AzureSQLMIMemoryClerks, AzureSQLMIPerformanceCounters, AzureSQLMIRequests, AzureSQLMISchedulers + + ## Queries enabled by default for database_type = "AzureSQLPool" are - + ## AzureSQLPoolResourceStats, AzureSQLPoolResourceGovernance, AzureSQLPoolDatabaseIO, AzureSQLPoolWaitStats, + ## AzureSQLPoolMemoryClerks, AzureSQLPoolPerformanceCounters, AzureSQLPoolSchedulers + + ## Following are old config settings + ## You may use them only if you are using the earlier flavor of queries, however it is recommended to use + ## the new mechanism of identifying the database_type there by use it's corresponding queries + + ## Optional parameter, setting this to 2 will use a new version + ## of the collection queries that break compatibility with the original + ## dashboards. + ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB + # query_version = 2 + + ## If you are using AzureDB, setting this to true will gather resource utilization metrics + # azuredb = false + + ## Toggling this to true will emit an additional metric called "sqlserver_telegraf_health". + ## This metric tracks the count of attempted queries and successful queries for each SQL instance specified in "servers". + ## The purpose of this metric is to assist with identifying and diagnosing any connectivity or query issues. + ## This setting/metric is optional and is disabled by default. + # health_metric = false + + ## Possible queries accross different versions of the collectors + ## Queries enabled by default for specific Database Type + + ## database_type = AzureSQLDB by default collects the following queries + ## - AzureSQLDBWaitStats + ## - AzureSQLDBResourceStats + ## - AzureSQLDBResourceGovernance + ## - AzureSQLDBDatabaseIO + ## - AzureSQLDBServerProperties + ## - AzureSQLDBOsWaitstats + ## - AzureSQLDBMemoryClerks + ## - AzureSQLDBPerformanceCounters + ## - AzureSQLDBRequests + ## - AzureSQLDBSchedulers + + ## database_type = AzureSQLManagedInstance by default collects the following queries + ## - AzureSQLMIResourceStats + ## - AzureSQLMIResourceGovernance + ## - AzureSQLMIDatabaseIO + ## - AzureSQLMIServerProperties + ## - AzureSQLMIOsWaitstats + ## - AzureSQLMIMemoryClerks + ## - AzureSQLMIPerformanceCounters + ## - AzureSQLMIRequests + ## - AzureSQLMISchedulers + + ## database_type = AzureSQLPool by default collects the following queries + ## - AzureSQLPoolResourceStats + ## - AzureSQLPoolResourceGovernance + ## - AzureSQLPoolDatabaseIO + ## - AzureSQLPoolOsWaitStats, + ## - AzureSQLPoolMemoryClerks + ## - AzureSQLPoolPerformanceCounters + ## - AzureSQLPoolSchedulers + + ## database_type = SQLServer by default collects the following queries + ## - SQLServerPerformanceCounters + ## - SQLServerWaitStatsCategorized + ## - SQLServerDatabaseIO + ## - SQLServerProperties + ## - SQLServerMemoryClerks + ## - SQLServerSchedulers + ## - SQLServerRequests + ## - SQLServerVolumeSpace + ## - SQLServerCpu + ## - SQLServerRecentBackups + ## and following as optional (if mentioned in the include_query list) + ## - SQLServerAvailabilityReplicaStates + ## - SQLServerDatabaseReplicaStates + + ## Version 2 by default collects the following queries + ## Version 2 is being deprecated, please consider using database_type. + ## - PerformanceCounters + ## - WaitStatsCategorized + ## - DatabaseIO + ## - ServerProperties + ## - MemoryClerk + ## - Schedulers + ## - SqlRequests + ## - VolumeSpace + ## - Cpu + + ## Version 1 by default collects the following queries + ## Version 1 is deprecated, please consider using database_type. + ## - PerformanceCounters + ## - WaitStatsCategorized + ## - CPUHistory + ## - DatabaseIO + ## - DatabaseSize + ## - DatabaseStats + ## - DatabaseProperties + ## - MemoryClerk + ## - VolumeSpace + ## - PerformanceMetrics diff --git a/plugins/inputs/sqlserver/sqlqueriesV2.go b/plugins/inputs/sqlserver/sqlqueriesV2.go index 66b1bdf5976b5..3521cc9571661 100644 --- a/plugins/inputs/sqlserver/sqlqueriesV2.go +++ b/plugins/inputs/sqlserver/sqlqueriesV2.go @@ -1348,37 +1348,62 @@ IF @EngineEdition IN (2,3,4) AND @MajorMinorVersion >= 1050 END ` -const sqlServerCpuV2 string = ` +const sqlServerCPUV2 string = ` /*The ring buffer has a new value every minute*/ IF SERVERPROPERTY('EngineEdition') IN (2,3,4) /*Standard,Enterpris,Express*/ BEGIN -SELECT - 'sqlserver_cpu' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,[SQLProcessUtilization] AS [sqlserver_process_cpu] - ,[SystemIdle] AS [system_idle_cpu] - ,100 - [SystemIdle] - [SQLProcessUtilization] AS [other_process_cpu] -FROM ( - SELECT TOP 1 - [record_id] - /*,dateadd(ms, (y.[timestamp] - (SELECT CAST([ms_ticks] AS BIGINT) FROM sys.dm_os_sys_info)), GETDATE()) AS [EventTime] --use for check/debug purpose*/ - ,[SQLProcessUtilization] - ,[SystemIdle] +;WITH utilization_cte AS +( + SELECT + [SQLProcessUtilization] AS [sqlserver_process_cpu] + ,[SystemIdle] AS [system_idle_cpu] + ,100 - [SystemIdle] - [SQLProcessUtilization] AS [other_process_cpu] FROM ( - SELECT record.value('(./Record/@id)[1]', 'int') AS [record_id] - ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') AS [SystemIdle] - ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') AS [SQLProcessUtilization] - ,[TIMESTAMP] + SELECT TOP 1 + [record_id] + ,[SQLProcessUtilization] + ,[SystemIdle] FROM ( - SELECT [TIMESTAMP] - ,convert(XML, [record]) AS [record] - FROM sys.dm_os_ring_buffers - WHERE [ring_buffer_type] = N'RING_BUFFER_SCHEDULER_MONITOR' - AND [record] LIKE '%%' - ) AS x - ) AS y - ORDER BY record_id DESC -) as z + SELECT + record.value('(./Record/@id)[1]', 'int') AS [record_id] + ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') AS [SystemIdle] + ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') AS [SQLProcessUtilization] + ,[TIMESTAMP] + FROM ( + SELECT + [TIMESTAMP] + ,convert(XML, [record]) AS [record] + FROM sys.dm_os_ring_buffers + WHERE + [ring_buffer_type] = N'RING_BUFFER_SCHEDULER_MONITOR' + AND [record] LIKE '%%' + ) AS x + ) AS y + ORDER BY [record_id] DESC + ) AS z +), +processor_Info_cte AS +( + SELECT (cpu_count / hyperthread_ratio) as number_of_physical_cpus +  FROM sys.dm_os_sys_info +) +SELECT + 'sqlserver_cpu' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,[sqlserver_process_cpu] + ,[system_idle_cpu] + ,100 - [system_idle_cpu] - [sqlserver_process_cpu] AS [other_process_cpu] +FROM + ( + SELECT + (case + when [other_process_cpu] < 0 then [sqlserver_process_cpu] / a.number_of_physical_cpus + else [sqlserver_process_cpu] +  end) as [sqlserver_process_cpu] + ,[system_idle_cpu] + FROM utilization_cte + CROSS APPLY processor_Info_cte a + ) AS b END ` diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 2ed4df266598f..5987e06a81545 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -1,28 +1,43 @@ +//go:generate ../../../tools/readme_config_includer/generator package sqlserver import ( "database/sql" + _ "embed" + "errors" "fmt" - "log" + "strings" "sync" "time" - _ "github.com/denisenkom/go-mssqldb" // go-mssqldb initialization + "github.com/Azure/go-autorest/autorest/adal" + mssql "github.com/denisenkom/go-mssqldb" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + // SQLServer struct type SQLServer struct { - Servers []string `toml:"servers"` - QueryVersion int `toml:"query_version"` - AzureDB bool `toml:"azuredb"` - DatabaseType string `toml:"database_type"` - IncludeQuery []string `toml:"include_query"` - ExcludeQuery []string `toml:"exclude_query"` - queries MapQuery - isInitialized bool + Servers []string `toml:"servers"` + AuthMethod string `toml:"auth_method"` + QueryVersion int `toml:"query_version" deprecated:"1.16.0;use 'database_type' instead"` + AzureDB bool `toml:"azuredb" deprecated:"1.16.0;use 'database_type' instead"` + DatabaseType string `toml:"database_type"` + IncludeQuery []string `toml:"include_query"` + ExcludeQuery []string `toml:"exclude_query"` + HealthMetric bool `toml:"health_metric"` + Log telegraf.Logger `toml:"-"` + + pools []*sql.DB + queries MapQuery + adalToken *adal.Token + muCacheLock sync.RWMutex } // Query struct @@ -36,99 +51,48 @@ type Query struct { // MapQuery type type MapQuery map[string]Query +// HealthMetric struct tracking the number of attempted vs successful connections for each connection string +type HealthMetric struct { + AttemptedQueries int + SuccessfulQueries int +} + const defaultServer = "Server=.;app name=telegraf;log=1;" -const sampleConfig = ` -## Specify instances to monitor with a list of connection strings. -## All connection parameters are optional. -## By default, the host is localhost, listening on default port, TCP 1433. -## for Windows, the user is the currently running AD user (SSO). -## See https://github.com/denisenkom/go-mssqldb for detailed connection -## parameters, in particular, tls connections can be created like so: -## "encrypt=true;certificate=;hostNameInCertificate=" -# servers = [ -# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", -# ] - -## This enables a specific set of queries depending on the database type. If specified, it replaces azuredb = true/false and query_version = 2 -## In the config file, the sql server plugin section should be repeated each with a set of servers for a specific database_type. -## Possible values for database_type are -## "AzureSQLDB" -## "SQLServer" -## "AzureSQLManagedInstance" -# database_type = "AzureSQLDB" - - -## Optional parameter, setting this to 2 will use a new version -## of the collection queries that break compatibility with the original -## dashboards. -## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB -query_version = 2 - -## If you are using AzureDB, setting this to true will gather resource utilization metrics -# azuredb = false - -## Possible queries -## Version 2: -## - PerformanceCounters -## - WaitStatsCategorized -## - DatabaseIO -## - ServerProperties -## - MemoryClerk -## - Schedulers -## - SqlRequests -## - VolumeSpace -## - Cpu - -## Version 1: -## - PerformanceCounters -## - WaitStatsCategorized -## - CPUHistory -## - DatabaseIO -## - DatabaseSize -## - DatabaseStats -## - DatabaseProperties -## - MemoryClerk -## - VolumeSpace -## - PerformanceMetrics - - -## Queries enabled by default for specific Database Type -## database_type = AzureSQLDB - ## AzureDBWaitStats, AzureDBResourceStats, AzureDBResourceGovernance, sqlAzureDBDatabaseIO - -## A list of queries to include. If not specified, all the above listed queries are used. -# include_query = [] - -## A list of queries to explicitly ignore. -exclude_query = [ 'Schedulers' , 'SqlRequests'] -` - -// SampleConfig return the sample configuration -func (s *SQLServer) SampleConfig() string { - return sampleConfig -} +const ( + typeAzureSQLDB = "AzureSQLDB" + typeAzureSQLManagedInstance = "AzureSQLManagedInstance" + typeAzureSQLPool = "AzureSQLPool" + typeSQLServer = "SQLServer" +) -// Description return plugin description -func (s *SQLServer) Description() string { - return "Read metrics from Microsoft SQL Server" -} +const ( + healthMetricName = "sqlserver_telegraf_health" + healthMetricInstanceTag = "sql_instance" + healthMetricDatabaseTag = "database_name" + healthMetricAttemptedQueries = "attempted_queries" + healthMetricSuccessfulQueries = "successful_queries" + healthMetricDatabaseType = "database_type" +) + +// resource id for Azure SQL Database +const sqlAzureResourceID = "https://database.windows.net/" type scanner interface { Scan(dest ...interface{}) error } -func initQueries(s *SQLServer) error { +func (s *SQLServer) initQueries() error { s.queries = make(MapQuery) queries := s.queries - log.Printf("I! [inputs.sqlserver] Config: database_type: %s , query_version:%d , azuredb: %t", s.DatabaseType, s.QueryVersion, s.AzureDB) + s.Log.Infof("Config: database_type: %s , query_version:%d , azuredb: %t", s.DatabaseType, s.QueryVersion, s.AzureDB) - // New config option database_type // To prevent query definition conflicts - // Constant defintiions for type "AzureSQLDB" start with sqlAzureDB - // Constant defintiions for type "AzureSQLManagedInstance" start with sqlAzureMI - // Constant defintiions for type "SQLServer" start with sqlServer - if s.DatabaseType == "AzureSQLDB" { + // Constant definitions for type "AzureSQLDB" start with sqlAzureDB + // Constant definitions for type "AzureSQLManagedInstance" start with sqlAzureMI + // Constant definitions for type "AzureSQLPool" start with sqlAzurePool + // Constant definitions for type "SQLServer" start with sqlServer + if s.DatabaseType == typeAzureSQLDB { queries["AzureSQLDBResourceStats"] = Query{ScriptName: "AzureSQLDBResourceStats", Script: sqlAzureDBResourceStats, ResultByRow: false} queries["AzureSQLDBResourceGovernance"] = Query{ScriptName: "AzureSQLDBResourceGovernance", Script: sqlAzureDBResourceGovernance, ResultByRow: false} queries["AzureSQLDBWaitStats"] = Query{ScriptName: "AzureSQLDBWaitStats", Script: sqlAzureDBWaitStats, ResultByRow: false} @@ -139,7 +103,7 @@ func initQueries(s *SQLServer) error { queries["AzureSQLDBPerformanceCounters"] = Query{ScriptName: "AzureSQLDBPerformanceCounters", Script: sqlAzureDBPerformanceCounters, ResultByRow: false} queries["AzureSQLDBRequests"] = Query{ScriptName: "AzureSQLDBRequests", Script: sqlAzureDBRequests, ResultByRow: false} queries["AzureSQLDBSchedulers"] = Query{ScriptName: "AzureSQLDBSchedulers", Script: sqlAzureDBSchedulers, ResultByRow: false} - } else if s.DatabaseType == "AzureSQLManagedInstance" { + } else if s.DatabaseType == typeAzureSQLManagedInstance { queries["AzureSQLMIResourceStats"] = Query{ScriptName: "AzureSQLMIResourceStats", Script: sqlAzureMIResourceStats, ResultByRow: false} queries["AzureSQLMIResourceGovernance"] = Query{ScriptName: "AzureSQLMIResourceGovernance", Script: sqlAzureMIResourceGovernance, ResultByRow: false} queries["AzureSQLMIDatabaseIO"] = Query{ScriptName: "AzureSQLMIDatabaseIO", Script: sqlAzureMIDatabaseIO, ResultByRow: false} @@ -149,7 +113,15 @@ func initQueries(s *SQLServer) error { queries["AzureSQLMIPerformanceCounters"] = Query{ScriptName: "AzureSQLMIPerformanceCounters", Script: sqlAzureMIPerformanceCounters, ResultByRow: false} queries["AzureSQLMIRequests"] = Query{ScriptName: "AzureSQLMIRequests", Script: sqlAzureMIRequests, ResultByRow: false} queries["AzureSQLMISchedulers"] = Query{ScriptName: "AzureSQLMISchedulers", Script: sqlAzureMISchedulers, ResultByRow: false} - } else if s.DatabaseType == "SQLServer" { //These are still V2 queries and have not been refactored yet. + } else if s.DatabaseType == typeAzureSQLPool { + queries["AzureSQLPoolResourceStats"] = Query{ScriptName: "AzureSQLPoolResourceStats", Script: sqlAzurePoolResourceStats, ResultByRow: false} + queries["AzureSQLPoolResourceGovernance"] = Query{ScriptName: "AzureSQLPoolResourceGovernance", Script: sqlAzurePoolResourceGovernance, ResultByRow: false} + queries["AzureSQLPoolDatabaseIO"] = Query{ScriptName: "AzureSQLPoolDatabaseIO", Script: sqlAzurePoolDatabaseIO, ResultByRow: false} + queries["AzureSQLPoolOsWaitStats"] = Query{ScriptName: "AzureSQLPoolOsWaitStats", Script: sqlAzurePoolOsWaitStats, ResultByRow: false} + queries["AzureSQLPoolMemoryClerks"] = Query{ScriptName: "AzureSQLPoolMemoryClerks", Script: sqlAzurePoolMemoryClerks, ResultByRow: false} + queries["AzureSQLPoolPerformanceCounters"] = Query{ScriptName: "AzureSQLPoolPerformanceCounters", Script: sqlAzurePoolPerformanceCounters, ResultByRow: false} + queries["AzureSQLPoolSchedulers"] = Query{ScriptName: "AzureSQLPoolSchedulers", Script: sqlAzurePoolSchedulers, ResultByRow: false} + } else if s.DatabaseType == typeSQLServer { //These are still V2 queries and have not been refactored yet. queries["SQLServerPerformanceCounters"] = Query{ScriptName: "SQLServerPerformanceCounters", Script: sqlServerPerformanceCounters, ResultByRow: false} queries["SQLServerWaitStatsCategorized"] = Query{ScriptName: "SQLServerWaitStatsCategorized", Script: sqlServerWaitStatsCategorized, ResultByRow: false} queries["SQLServerDatabaseIO"] = Query{ScriptName: "SQLServerDatabaseIO", Script: sqlServerDatabaseIO, ResultByRow: false} @@ -158,7 +130,10 @@ func initQueries(s *SQLServer) error { queries["SQLServerSchedulers"] = Query{ScriptName: "SQLServerSchedulers", Script: sqlServerSchedulers, ResultByRow: false} queries["SQLServerRequests"] = Query{ScriptName: "SQLServerRequests", Script: sqlServerRequests, ResultByRow: false} queries["SQLServerVolumeSpace"] = Query{ScriptName: "SQLServerVolumeSpace", Script: sqlServerVolumeSpace, ResultByRow: false} - queries["SQLServerCpu"] = Query{ScriptName: "SQLServerCpu", Script: sqlServerRingBufferCpu, ResultByRow: false} + queries["SQLServerCpu"] = Query{ScriptName: "SQLServerCpu", Script: sqlServerRingBufferCPU, ResultByRow: false} + queries["SQLServerAvailabilityReplicaStates"] = Query{ScriptName: "SQLServerAvailabilityReplicaStates", Script: sqlServerAvailabilityReplicaStates, ResultByRow: false} + queries["SQLServerDatabaseReplicaStates"] = Query{ScriptName: "SQLServerDatabaseReplicaStates", Script: sqlServerDatabaseReplicaStates, ResultByRow: false} + queries["SQLServerRecentBackups"] = Query{ScriptName: "SQLServerRecentBackups", Script: sqlServerRecentBackups, ResultByRow: false} } else { // If this is an AzureDB instance, grab some extra metrics if s.AzureDB { @@ -167,7 +142,6 @@ func initQueries(s *SQLServer) error { } // Decide if we want to run version 1 or version 2 queries if s.QueryVersion == 2 { - log.Println("W! DEPRECATION NOTICE: query_version=2 is being deprecated in favor of database_type.") queries["PerformanceCounters"] = Query{ScriptName: "PerformanceCounters", Script: sqlPerformanceCountersV2, ResultByRow: true} queries["WaitStatsCategorized"] = Query{ScriptName: "WaitStatsCategorized", Script: sqlWaitStatsCategorizedV2, ResultByRow: false} queries["DatabaseIO"] = Query{ScriptName: "DatabaseIO", Script: sqlDatabaseIOV2, ResultByRow: false} @@ -176,9 +150,8 @@ func initQueries(s *SQLServer) error { queries["Schedulers"] = Query{ScriptName: "Schedulers", Script: sqlServerSchedulersV2, ResultByRow: false} queries["SqlRequests"] = Query{ScriptName: "SqlRequests", Script: sqlServerRequestsV2, ResultByRow: false} queries["VolumeSpace"] = Query{ScriptName: "VolumeSpace", Script: sqlServerVolumeSpaceV2, ResultByRow: false} - queries["Cpu"] = Query{ScriptName: "Cpu", Script: sqlServerCpuV2, ResultByRow: false} + queries["Cpu"] = Query{ScriptName: "Cpu", Script: sqlServerCPUV2, ResultByRow: false} } else { - log.Println("W! DEPRECATED: query_version=1 has been deprecated in favor of database_type.") queries["PerformanceCounters"] = Query{ScriptName: "PerformanceCounters", Script: sqlPerformanceCounters, ResultByRow: true} queries["WaitStatsCategorized"] = Query{ScriptName: "WaitStatsCategorized", Script: sqlWaitStatsCategorized, ResultByRow: false} queries["CPUHistory"] = Query{ScriptName: "CPUHistory", Script: sqlCPUHistory, ResultByRow: false} @@ -203,60 +176,134 @@ func initQueries(s *SQLServer) error { } } - // Set a flag so we know that queries have already been initialized - s.isInitialized = true var querylist []string for query := range queries { querylist = append(querylist, query) } - log.Printf("I! [inputs.sqlserver] Config: Effective Queries: %#v\n", querylist) + s.Log.Infof("Config: Effective Queries: %#v\n", querylist) return nil } +func (*SQLServer) SampleConfig() string { + return sampleConfig +} + // Gather collect data from SQL Server func (s *SQLServer) Gather(acc telegraf.Accumulator) error { - if !s.isInitialized { - if err := initQueries(s); err != nil { - acc.AddError(err) - return err - } - } - - if len(s.Servers) == 0 { - s.Servers = append(s.Servers, defaultServer) - } - var wg sync.WaitGroup + var mutex sync.Mutex + var healthMetrics = make(map[string]*HealthMetric) - for _, serv := range s.Servers { + for i, pool := range s.pools { for _, query := range s.queries { wg.Add(1) - go func(serv string, query Query) { + go func(pool *sql.DB, query Query, serverIndex int) { defer wg.Done() - acc.AddError(s.gatherServer(serv, query, acc)) - }(serv, query) + connectionString := s.Servers[serverIndex] + queryError := s.gatherServer(pool, query, acc, connectionString) + + if s.HealthMetric { + mutex.Lock() + s.gatherHealth(healthMetrics, connectionString, queryError) + mutex.Unlock() + } + + acc.AddError(queryError) + }(pool, query, i) } } wg.Wait() + + if s.HealthMetric { + s.accHealth(healthMetrics, acc) + } + return nil } -func (s *SQLServer) gatherServer(server string, query Query, acc telegraf.Accumulator) error { - // deferred opening - conn, err := sql.Open("mssql", server) - if err != nil { +// Start initialize a list of connection pools +func (s *SQLServer) Start(acc telegraf.Accumulator) error { + if err := s.initQueries(); err != nil { + acc.AddError(err) return err } - defer conn.Close() + // initialize mutual exclusion lock + s.muCacheLock = sync.RWMutex{} + + for _, serv := range s.Servers { + var pool *sql.DB + + switch strings.ToLower(s.AuthMethod) { + case "connection_string": + // Use the DSN (connection string) directly. In this case, + // empty username/password causes use of Windows + // integrated authentication. + var err error + pool, err = sql.Open("mssql", serv) + + if err != nil { + acc.AddError(err) + continue + } + case "aad": + // AAD Auth with system-assigned managed identity (MSI) + + // AAD Auth is only supported for Azure SQL Database or Azure SQL Managed Instance + if s.DatabaseType == "SQLServer" { + err := errors.New("database connection failed : AAD auth is not supported for SQL VM i.e. DatabaseType=SQLServer") + acc.AddError(err) + continue + } + + // get token from in-memory cache variable or from Azure Active Directory + tokenProvider, err := s.getTokenProvider() + if err != nil { + acc.AddError(fmt.Errorf("error creating AAD token provider for system assigned Azure managed identity : %s", err.Error())) + continue + } + + connector, err := mssql.NewAccessTokenConnector(serv, tokenProvider) + if err != nil { + acc.AddError(fmt.Errorf("error creating the SQL connector : %s", err.Error())) + continue + } + + pool = sql.OpenDB(connector) + default: + return fmt.Errorf("unknown auth method: %v", s.AuthMethod) + } + + s.pools = append(s.pools, pool) + } + + return nil +} + +// Stop cleanup server connection pools +func (s *SQLServer) Stop() { + for _, pool := range s.pools { + _ = pool.Close() + } +} + +func (s *SQLServer) gatherServer(pool *sql.DB, query Query, acc telegraf.Accumulator, connectionString string) error { // execute query - rows, err := conn.Query(query.Script) + rows, err := pool.Query(query.Script) if err != nil { - return fmt.Errorf("Script %s failed: %w", query.ScriptName, err) - //return err + serverName, databaseName := getConnectionIdentifiers(connectionString) + + // Error msg based on the format in SSMS. SQLErrorClass() is another term for severity/level: http://msdn.microsoft.com/en-us/library/dd304156.aspx + if sqlerr, ok := err.(mssql.Error); ok { + return fmt.Errorf("query %s failed for server: %s and database: %s with Msg %d, Level %d, State %d:, Line %d, Error: %w", query.ScriptName, + serverName, databaseName, sqlerr.SQLErrorNumber(), sqlerr.SQLErrorClass(), sqlerr.SQLErrorState(), sqlerr.SQLErrorLineNo(), err) + } + + return fmt.Errorf("query %s failed for server: %s and database: %s with Error: %w", query.ScriptName, serverName, databaseName, err) } + defer rows.Close() // grab the column information from the result @@ -307,6 +354,10 @@ func (s *SQLServer) accRow(query Query, acc telegraf.Accumulator, row scanner) e } } + if s.DatabaseType != "" { + tags["measurement_db_type"] = s.DatabaseType + } + if query.ResultByRow { // add measurement to Accumulator acc.AddFields(measurement, @@ -316,7 +367,7 @@ func (s *SQLServer) accRow(query Query, acc telegraf.Accumulator, row scanner) e // values for header, val := range columnMap { if _, ok := (*val).(string); !ok { - fields[header] = (*val) + fields[header] = *val } } // add fields to Accumulator @@ -325,8 +376,147 @@ func (s *SQLServer) accRow(query Query, acc telegraf.Accumulator, row scanner) e return nil } +// gatherHealth stores info about any query errors in the healthMetrics map +func (s *SQLServer) gatherHealth(healthMetrics map[string]*HealthMetric, serv string, queryError error) { + if healthMetrics[serv] == nil { + healthMetrics[serv] = &HealthMetric{} + } + + healthMetrics[serv].AttemptedQueries++ + if queryError == nil { + healthMetrics[serv].SuccessfulQueries++ + } +} + +// accHealth accumulates the query health data contained within the healthMetrics map +func (s *SQLServer) accHealth(healthMetrics map[string]*HealthMetric, acc telegraf.Accumulator) { + for connectionString, connectionStats := range healthMetrics { + sqlInstance, databaseName := getConnectionIdentifiers(connectionString) + tags := map[string]string{healthMetricInstanceTag: sqlInstance, healthMetricDatabaseTag: databaseName} + fields := map[string]interface{}{ + healthMetricAttemptedQueries: connectionStats.AttemptedQueries, + healthMetricSuccessfulQueries: connectionStats.SuccessfulQueries, + healthMetricDatabaseType: s.getDatabaseTypeToLog(), + } + + acc.AddFields(healthMetricName, fields, tags, time.Now()) + } +} + +// getDatabaseTypeToLog returns the type of database monitored by this plugin instance +func (s *SQLServer) getDatabaseTypeToLog() string { + if s.DatabaseType == typeAzureSQLDB || s.DatabaseType == typeAzureSQLManagedInstance || s.DatabaseType == typeSQLServer { + return s.DatabaseType + } + + logname := fmt.Sprintf("QueryVersion-%d", s.QueryVersion) + if s.AzureDB { + logname += "-AzureDB" + } + return logname +} + +func (s *SQLServer) Init() error { + if len(s.Servers) == 0 { + s.Log.Warn("Warning: Server list is empty.") + } + + return nil +} + +// Get Token Provider by loading cached token or refreshed token +func (s *SQLServer) getTokenProvider() (func() (string, error), error) { + var tokenString string + + // load token + s.muCacheLock.RLock() + token, err := s.loadToken() + s.muCacheLock.RUnlock() + + // if there's error while loading token or found an expired token, refresh token and save it + if err != nil || token.IsExpired() { + // refresh token within a write-lock + s.muCacheLock.Lock() + defer s.muCacheLock.Unlock() + + // load token again, in case it's been refreshed by another thread + token, err = s.loadToken() + + // check loaded token's error/validity, then refresh/save token + if err != nil || token.IsExpired() { + // get new token + spt, err := s.refreshToken() + if err != nil { + return nil, err + } + + // use the refreshed token + tokenString = spt.OAuthToken() + } else { + // use locally cached token + tokenString = token.OAuthToken() + } + } else { + // use locally cached token + tokenString = token.OAuthToken() + } + + // return acquired token + return func() (string, error) { + return tokenString, nil + }, nil +} + +// Load token from in-mem cache +func (s *SQLServer) loadToken() (*adal.Token, error) { + // This method currently does a simplistic task of reading a from variable (in-mem cache), + // however it's been structured here to allow extending the cache mechanism to a different approach in future + + if s.adalToken == nil { + return nil, fmt.Errorf("token is nil or failed to load existing token") + } + + return s.adalToken, nil +} + +// Refresh token for the resource, and save to in-mem cache +func (s *SQLServer) refreshToken() (*adal.Token, error) { + // get MSI endpoint to get a token + msiEndpoint, err := adal.GetMSIVMEndpoint() + if err != nil { + return nil, err + } + + // get new token for the resource id + spt, err := adal.NewServicePrincipalTokenFromMSI(msiEndpoint, sqlAzureResourceID) + if err != nil { + return nil, err + } + + // ensure token is fresh + if err := spt.EnsureFresh(); err != nil { + return nil, err + } + + // save token to local in-mem cache + s.adalToken = &adal.Token{ + AccessToken: spt.Token().AccessToken, + RefreshToken: spt.Token().RefreshToken, + ExpiresIn: spt.Token().ExpiresIn, + ExpiresOn: spt.Token().ExpiresOn, + NotBefore: spt.Token().NotBefore, + Resource: spt.Token().Resource, + Type: spt.Token().Type, + } + + return s.adalToken, nil +} + func init() { inputs.Add("sqlserver", func() telegraf.Input { - return &SQLServer{} + return &SQLServer{ + Servers: []string{defaultServer}, + AuthMethod: "connection_string", + } }) } diff --git a/plugins/inputs/sqlserver/sqlserver_test.go b/plugins/inputs/sqlserver/sqlserver_test.go index 8f5d355ef4df3..c50bbcf4982cd 100644 --- a/plugins/inputs/sqlserver/sqlserver_test.go +++ b/plugins/inputs/sqlserver/sqlserver_test.go @@ -1,15 +1,15 @@ package sqlserver import ( + "os" "strconv" "strings" "testing" "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func TestSqlServer_QueriesInclusionExclusion(t *testing.T) { @@ -33,17 +33,17 @@ func TestSqlServer_QueriesInclusionExclusion(t *testing.T) { QueryVersion: 2, IncludeQuery: test["IncludeQuery"].([]string), ExcludeQuery: test["ExcludeQuery"].([]string), + Log: testutil.Logger{}, } - initQueries(&s) - assert.Equal(t, len(s.queries), test["queriesTotal"].(int)) + require.NoError(t, s.initQueries()) + require.Equal(t, len(s.queries), test["queriesTotal"].(int)) for _, query := range test["queries"].([]string) { - assert.Contains(t, s.queries, query) + require.Contains(t, s.queries, query) } } } func TestSqlServer_ParseMetrics(t *testing.T) { - var acc testutil.Accumulator queries := make(MapQuery) @@ -63,7 +63,6 @@ func TestSqlServer_ParseMetrics(t *testing.T) { var fields = make(map[string]interface{}) for _, query := range queries { - mock = strings.Split(query.Script, "\n") idx := 0 @@ -78,7 +77,6 @@ func TestSqlServer_ParseMetrics(t *testing.T) { tags[headers[2]] = row[2] // tag 'type' if query.ResultByRow { - // set value by converting to float64 value, err := strconv.ParseFloat(row[3], 64) // require @@ -90,11 +88,9 @@ func TestSqlServer_ParseMetrics(t *testing.T) { tags, time.Now()) // assert acc.AssertContainsTaggedFields(t, measurement, map[string]interface{}{"value": value}, tags) - } else { // set fields for i := 3; i < len(row); i++ { - // set value by converting to float64 value, err := strconv.ParseFloat(row[i], 64) // require @@ -113,62 +109,329 @@ func TestSqlServer_ParseMetrics(t *testing.T) { } } -func TestSqlServer_MultipleInstance(t *testing.T) { +func TestSqlServerIntegration_MultipleInstance(t *testing.T) { // Invoke Gather() from two separate configurations and // confirm they don't interfere with each other - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } + t.Skip("Skipping as unable to open tcp connection with host '127.0.0.1:1433") + testServer := "Server=127.0.0.1;Port=1433;User Id=SA;Password=ABCabc01;app name=telegraf;log=1" s := &SQLServer{ Servers: []string{testServer}, ExcludeQuery: []string{"MemoryClerk"}, + Log: testutil.Logger{}, } s2 := &SQLServer{ Servers: []string{testServer}, ExcludeQuery: []string{"DatabaseSize"}, + Log: testutil.Logger{}, } var acc, acc2 testutil.Accumulator + require.NoError(t, s.Start(&acc)) err := s.Gather(&acc) require.NoError(t, err) - assert.Equal(t, s.isInitialized, true) - assert.Equal(t, s2.isInitialized, false) + require.NoError(t, s2.Start(&acc2)) err = s2.Gather(&acc2) require.NoError(t, err) - assert.Equal(t, s.isInitialized, true) - assert.Equal(t, s2.isInitialized, true) // acc includes size metrics, and excludes memory metrics - assert.False(t, acc.HasMeasurement("Memory breakdown (%)")) - assert.True(t, acc.HasMeasurement("Log size (bytes)")) + require.False(t, acc.HasMeasurement("Memory breakdown (%)")) + require.True(t, acc.HasMeasurement("Log size (bytes)")) // acc2 includes memory metrics, and excludes size metrics - assert.True(t, acc2.HasMeasurement("Memory breakdown (%)")) - assert.False(t, acc2.HasMeasurement("Log size (bytes)")) + require.True(t, acc2.HasMeasurement("Memory breakdown (%)")) + require.False(t, acc2.HasMeasurement("Log size (bytes)")) } -func TestSqlServer_MultipleInit(t *testing.T) { +func TestSqlServerIntegration_MultipleInstanceWithHealthMetric(t *testing.T) { + // Invoke Gather() from two separate configurations and + // confirm they don't interfere with each other. + // This test is intentionally similar to TestSqlServer_MultipleInstanceIntegration. + // It is separated to ensure that the health metric code does not affect other metrics + t.Skip("Skipping as unable to open tcp connection with host '127.0.0.1:1433") + + testServer := "Server=127.0.0.1;Port=1433;User Id=SA;Password=ABCabc01;app name=telegraf;log=1" + s := &SQLServer{ + Servers: []string{testServer}, + ExcludeQuery: []string{"MemoryClerk"}, + Log: testutil.Logger{}, + } + s2 := &SQLServer{ + Servers: []string{testServer}, + ExcludeQuery: []string{"DatabaseSize"}, + HealthMetric: true, + Log: testutil.Logger{}, + } + + var acc, acc2 testutil.Accumulator + require.NoError(t, s.Start(&acc)) + err := s.Gather(&acc) + require.NoError(t, err) + + require.NoError(t, s2.Start(&acc)) + err = s2.Gather(&acc2) + require.NoError(t, err) + + // acc includes size metrics, and excludes memory metrics and the health metric + require.False(t, acc.HasMeasurement(healthMetricName)) + require.False(t, acc.HasMeasurement("Memory breakdown (%)")) + require.True(t, acc.HasMeasurement("Log size (bytes)")) + + // acc2 includes memory metrics and the health metric, and excludes size metrics + require.True(t, acc2.HasMeasurement(healthMetricName)) + require.True(t, acc2.HasMeasurement("Memory breakdown (%)")) + require.False(t, acc2.HasMeasurement("Log size (bytes)")) + + sqlInstance, database := getConnectionIdentifiers(testServer) + tags := map[string]string{healthMetricInstanceTag: sqlInstance, healthMetricDatabaseTag: database} + require.True(t, acc2.HasPoint(healthMetricName, tags, healthMetricAttemptedQueries, 9)) + require.True(t, acc2.HasPoint(healthMetricName, tags, healthMetricSuccessfulQueries, 9)) +} + +func TestSqlServer_HealthMetric(t *testing.T) { + fakeServer1 := "localhost\\fakeinstance1;Database=fakedb1;Password=ABCabc01;" + fakeServer2 := "localhost\\fakeinstance2;Database=fakedb2;Password=ABCabc01;" + + s1 := &SQLServer{ + Servers: []string{fakeServer1, fakeServer2}, + IncludeQuery: []string{"DatabaseSize", "MemoryClerk"}, + HealthMetric: true, + AuthMethod: "connection_string", + Log: testutil.Logger{}, + } - s := &SQLServer{} + s2 := &SQLServer{ + Servers: []string{fakeServer1}, + IncludeQuery: []string{"DatabaseSize"}, + AuthMethod: "connection_string", + Log: testutil.Logger{}, + } + + // acc1 should have the health metric because it is specified in the config + var acc1 testutil.Accumulator + require.NoError(t, s1.Start(&acc1)) + require.NoError(t, s1.Gather(&acc1)) + require.True(t, acc1.HasMeasurement(healthMetricName)) + + // There will be 2 attempted queries (because we specified 2 queries in IncludeQuery) + // Both queries should fail because the specified SQL instances do not exist + sqlInstance1, database1 := getConnectionIdentifiers(fakeServer1) + tags1 := map[string]string{healthMetricInstanceTag: sqlInstance1, healthMetricDatabaseTag: database1} + require.True(t, acc1.HasPoint(healthMetricName, tags1, healthMetricAttemptedQueries, 2)) + require.True(t, acc1.HasPoint(healthMetricName, tags1, healthMetricSuccessfulQueries, 0)) + + sqlInstance2, database2 := getConnectionIdentifiers(fakeServer2) + tags2 := map[string]string{healthMetricInstanceTag: sqlInstance2, healthMetricDatabaseTag: database2} + require.True(t, acc1.HasPoint(healthMetricName, tags2, healthMetricAttemptedQueries, 2)) + require.True(t, acc1.HasPoint(healthMetricName, tags2, healthMetricSuccessfulQueries, 0)) + + // acc2 should not have the health metric because it is not specified in the config + var acc2 testutil.Accumulator + require.NoError(t, s2.Gather(&acc2)) + require.False(t, acc2.HasMeasurement(healthMetricName)) +} + +func TestSqlServer_MultipleInit(t *testing.T) { + s := &SQLServer{Log: testutil.Logger{}} s2 := &SQLServer{ ExcludeQuery: []string{"DatabaseSize"}, + Log: testutil.Logger{}, } - initQueries(s) + require.NoError(t, s.initQueries()) _, ok := s.queries["DatabaseSize"] - // acc includes size metrics - assert.True(t, ok) - assert.Equal(t, s.isInitialized, true) - assert.Equal(t, s2.isInitialized, false) + require.True(t, ok) - initQueries(s2) + require.NoError(t, s.initQueries()) _, ok = s2.queries["DatabaseSize"] - // acc2 excludes size metrics - assert.False(t, ok) - assert.Equal(t, s.isInitialized, true) - assert.Equal(t, s2.isInitialized, true) + require.False(t, ok) + s.Stop() + s2.Stop() +} + +func TestSqlServer_ConnectionString(t *testing.T) { + // URL format + connectionString := "sqlserver://username:password@hostname.database.windows.net?database=databasename&connection+timeout=30" + sqlInstance, database := getConnectionIdentifiers(connectionString) + require.Equal(t, "hostname.database.windows.net", sqlInstance) + require.Equal(t, "databasename", database) + + connectionString = " sqlserver://hostname2.somethingelse.net:1433?database=databasename2" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "hostname2.somethingelse.net", sqlInstance) + require.Equal(t, "databasename2", database) + + connectionString = "sqlserver://hostname3:1433/SqlInstanceName3?database=databasename3" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "hostname3\\SqlInstanceName3", sqlInstance) + require.Equal(t, "databasename3", database) + + connectionString = " sqlserver://hostname4/SqlInstanceName4?database=databasename4&connection%20timeout=30" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "hostname4\\SqlInstanceName4", sqlInstance) + require.Equal(t, "databasename4", database) + + connectionString = " sqlserver://username:password@hostname5?connection%20timeout=30" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "hostname5", sqlInstance) + require.Equal(t, emptyDatabaseName, database) + + // odbc format + connectionString = "odbc:server=hostname.database.windows.net;user id=sa;database=master;Trusted_Connection=Yes;Integrated Security=true;" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "hostname.database.windows.net", sqlInstance) + require.Equal(t, "master", database) + + connectionString = " odbc:server=192.168.0.1;user id=somethingelse;Integrated Security=true;Database=mydb " + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "192.168.0.1", sqlInstance) + require.Equal(t, "mydb", database) + + connectionString = " odbc:Server=servername\\instancename;Database=dbname;" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "servername\\instancename", sqlInstance) + require.Equal(t, "dbname", database) + + connectionString = "server=hostname2.database.windows.net;user id=sa;Trusted_Connection=Yes;Integrated Security=true;" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "hostname2.database.windows.net", sqlInstance) + require.Equal(t, emptyDatabaseName, database) + + connectionString = "invalid connection string" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, emptySQLInstance, sqlInstance) + require.Equal(t, emptyDatabaseName, database) + + // Key/value format + connectionString = " server=hostname.database.windows.net;user id=sa;database=master;Trusted_Connection=Yes;Integrated Security=true" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "hostname.database.windows.net", sqlInstance) + require.Equal(t, "master", database) + + connectionString = " server=192.168.0.1;user id=somethingelse;Integrated Security=true;Database=mydb;" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "192.168.0.1", sqlInstance) + require.Equal(t, "mydb", database) + + connectionString = "Server=servername\\instancename;Database=dbname; " + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "servername\\instancename", sqlInstance) + require.Equal(t, "dbname", database) + + connectionString = "server=hostname2.database.windows.net;user id=sa;Trusted_Connection=Yes;Integrated Security=true " + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, "hostname2.database.windows.net", sqlInstance) + require.Equal(t, emptyDatabaseName, database) + + connectionString = "invalid connection string" + sqlInstance, database = getConnectionIdentifiers(connectionString) + require.Equal(t, emptySQLInstance, sqlInstance) + require.Equal(t, emptyDatabaseName, database) +} + +func TestSqlServerIntegration_AGQueriesApplicableForDatabaseTypeSQLServer(t *testing.T) { + // This test case checks where Availability Group (AG / HADR) queries return an output when included for processing for DatabaseType = SQLServer + // And they should not be processed when DatabaseType = AzureSQLDB + + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING") + } + testServer := os.Getenv("AZURESQL_POOL_CONNECTION_STRING") + + s := &SQLServer{ + Servers: []string{testServer}, + DatabaseType: "SQLServer", + IncludeQuery: []string{"SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"}, + Log: testutil.Logger{}, + } + s2 := &SQLServer{ + Servers: []string{testServer}, + DatabaseType: "AzureSQLDB", + IncludeQuery: []string{"SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"}, + Log: testutil.Logger{}, + } + + var acc, acc2 testutil.Accumulator + require.NoError(t, s.Start(&acc)) + err := s.Gather(&acc) + require.NoError(t, err) + + err = s2.Gather(&acc2) + require.NoError(t, s2.Start(&acc)) + require.NoError(t, err) + + // acc includes size metrics, and excludes memory metrics + require.True(t, acc.HasMeasurement("sqlserver_hadr_replica_states")) + require.True(t, acc.HasMeasurement("sqlserver_hadr_dbreplica_states")) + + // acc2 includes memory metrics, and excludes size metrics + require.False(t, acc2.HasMeasurement("sqlserver_hadr_replica_states")) + require.False(t, acc2.HasMeasurement("sqlserver_hadr_dbreplica_states")) + s.Stop() + s2.Stop() +} + +func TestSqlServerIntegration_AGQueryFieldsOutputBasedOnSQLServerVersion(t *testing.T) { + // This test case checks where Availability Group (AG / HADR) queries return specific fields supported by corresponding SQL Server version database being connected to. + + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING_2019") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING_2019") + } + if os.Getenv("AZURESQL_POOL_CONNECTION_STRING_2012") == "" { + t.Skip("Missing environment variable AZURESQL_POOL_CONNECTION_STRING_2012") + } + + testServer2019 := os.Getenv("AZURESQL_POOL_CONNECTION_STRING_2019") + testServer2012 := os.Getenv("AZURESQL_POOL_CONNECTION_STRING_2012") + + s2019 := &SQLServer{ + Servers: []string{testServer2019}, + DatabaseType: "SQLServer", + IncludeQuery: []string{"SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"}, + Log: testutil.Logger{}, + } + s2012 := &SQLServer{ + Servers: []string{testServer2012}, + DatabaseType: "SQLServer", + IncludeQuery: []string{"SQLServerAvailabilityReplicaStates", "SQLServerDatabaseReplicaStates"}, + Log: testutil.Logger{}, + } + + var acc2019, acc2012 testutil.Accumulator + require.NoError(t, s2019.Start(&acc2019)) + err := s2019.Gather(&acc2019) + require.NoError(t, err) + + err = s2012.Gather(&acc2012) + require.NoError(t, s2012.Start(&acc2012)) + require.NoError(t, err) + + // acc2019 includes new HADR query fields + require.True(t, acc2019.HasField("sqlserver_hadr_replica_states", "basic_features")) + require.True(t, acc2019.HasField("sqlserver_hadr_replica_states", "is_distributed")) + require.True(t, acc2019.HasField("sqlserver_hadr_replica_states", "seeding_mode")) + require.True(t, acc2019.HasTag("sqlserver_hadr_replica_states", "seeding_mode_desc")) + require.True(t, acc2019.HasField("sqlserver_hadr_dbreplica_states", "is_primary_replica")) + require.True(t, acc2019.HasField("sqlserver_hadr_dbreplica_states", "secondary_lag_seconds")) + + // acc2012 does not include new HADR query fields + require.False(t, acc2012.HasField("sqlserver_hadr_replica_states", "basic_features")) + require.False(t, acc2012.HasField("sqlserver_hadr_replica_states", "is_distributed")) + require.False(t, acc2012.HasField("sqlserver_hadr_replica_states", "seeding_mode")) + require.False(t, acc2012.HasTag("sqlserver_hadr_replica_states", "seeding_mode_desc")) + require.False(t, acc2012.HasField("sqlserver_hadr_dbreplica_states", "is_primary_replica")) + require.False(t, acc2012.HasField("sqlserver_hadr_dbreplica_states", "secondary_lag_seconds")) + s2019.Stop() + s2012.Stop() } const mockPerformanceMetrics = `measurement;servername;type;Point In Time Recovery;Available physical memory (bytes);Average pending disk IO;Average runnable tasks;Average tasks;Buffer pool rate (bytes/sec);Connection memory per connection (bytes);Memory grant pending;Page File Usage (%);Page lookup per batch request;Page split per batch request;Readahead per page read;Signal wait (%);Sql compilation per batch request;Sql recompilation per batch request;Total target memory ratio diff --git a/plugins/inputs/sqlserver/sqlserverqueries.go b/plugins/inputs/sqlserver/sqlserverqueries.go index f3d3aa3ca34c9..43ace7a09d341 100644 --- a/plugins/inputs/sqlserver/sqlserverqueries.go +++ b/plugins/inputs/sqlserver/sqlserverqueries.go @@ -9,11 +9,11 @@ import ( // Variable @MajorMinorVersion: // - 1000 --> SQL Server 2008 // - 1050 --> SQL Server 2008 R2 -// - 1011 --> SQL Server 2012 -// - 1012 --> SQL Server 2014 -// - 1013 --> SQL Server 2016 -// - 1014 --> SQL Server 2017 -// - 1015 --> SQL Server 2019 +// - 1100 --> SQL Server 2012 +// - 1200 --> SQL Server 2014 +// - 1300 --> SQL Server 2016 +// - 1400 --> SQL Server 2017 +// - 1500 --> SQL Server 2019 // Thanks Bob Ward (http://aka.ms/bobwardms) // and the folks at Stack Overflow (https://github.com/opserver/Opserver/blob/9c89c7e9936b58ad237b30e6f4cc6cd59c406889/Opserver.Core/Data/SQL/SQLInstance.Memory.cs) @@ -151,7 +151,7 @@ DECLARE ,@Columns AS nvarchar(max) = '' ,@Tables AS nvarchar(max) = '' -IF @MajorMinorVersion >= 1050 BEGIN +IF CAST(SERVERPROPERTY('ProductVersion') AS varchar(50)) >= '10.50.2500.0' BEGIN /*in [volume_mount_point] any trailing "\" char will be automatically removed by telegraf */ SET @Columns += N' ,[volume_mount_point]' @@ -170,7 +170,7 @@ SELECT ,REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance] ,DB_NAME(vfs.[database_id]) AS [database_name] ,COALESCE(mf.[physical_name],''RBPEX'') AS [physical_filename] --RPBEX = Resilient Buffer Pool Extension - ,COALESCE(mf.[name],''RBPEX'') AS [logical_filename] --RPBEX = Resilient Buffer Pool Extension + ,COALESCE(mf.[name],''RBPEX'') AS [logical_filename] --RPBEX = Resilient Buffer Pool Extension ,mf.[type_desc] AS [file_type] ,vfs.[io_stall_read_ms] AS [read_latency_ms] ,vfs.[num_of_reads] AS [reads] @@ -216,7 +216,7 @@ SELECT ,CAST(SERVERPROPERTY(''EngineEdition'') AS int) AS [engine_edition] ,DATEDIFF(MINUTE,si.[sqlserver_start_time],GETDATE()) AS [uptime] ,SERVERPROPERTY(''ProductVersion'') AS [sql_version] - ,LEFT(@@VERSION,CHARINDEX(' - ',@@VERSION)) AS [sql_version_desc] + ,LEFT(@@VERSION,CHARINDEX('' - '',@@VERSION)) AS [sql_version_desc] ,dbs.[db_online] ,dbs.[db_restoring] ,dbs.[db_recovering] @@ -282,6 +282,17 @@ FROM sys.dm_os_schedulers AS s' EXEC sp_executesql @SqlStatement ` +/* +This string defines a SQL statements to retrieve Performance Counters as documented here - + SQL Server Performance Objects - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/use-sql-server-objects?view=sql-server-ver15#SQLServerPOs +Some of the specific objects used are - + MSSQL$*:Access Methods - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-access-methods-object?view=sql-server-ver15 + MSSQL$*:Buffer Manager - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-buffer-manager-object?view=sql-server-ver15 + MSSQL$*:Databases - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-databases-object?view=sql-server-ver15 + MSSQL$*:General Statistics - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-general-statistics-object?view=sql-server-ver15 + MSSQL$*:Exec Statistics - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-execstatistics-object?view=sql-server-ver15 + SQLServer:Query Store - https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-query-store-object?view=sql-server-ver15 +*/ const sqlServerPerformanceCounters string = ` SET DEADLOCK_PRIORITY -10; IF SERVERPROPERTY('EngineEdition') NOT IN (2,3,4) BEGIN /*NOT IN Standard,Enterpris,Express*/ @@ -322,6 +333,7 @@ SELECT DISTINCT ,'Logins/sec' ,'Processes blocked' ,'Latch Waits/sec' + ,'Average Latch Wait Time (ms)' ,'Full Scans/sec' ,'Index Searches/sec' ,'Page Splits/sec' @@ -331,13 +343,17 @@ SELECT DISTINCT ,'Readahead pages/sec' ,'Lazy writes/sec' ,'Checkpoint pages/sec' + ,'Table Lock Escalations/sec' ,'Page life expectancy' ,'Log File(s) Size (KB)' ,'Log File(s) Used Size (KB)' ,'Data File(s) Size (KB)' ,'Transactions/sec' ,'Write Transactions/sec' + ,'Active Transactions' + ,'Log Growths' ,'Active Temp Tables' + ,'Logical Connections' ,'Temp Tables Creation Rate' ,'Temp Tables For Destruction' ,'Free Space in tempdb (KB)' @@ -394,6 +410,14 @@ SELECT DISTINCT ,'Mirrored Write Transactions/sec' ,'Group Commit Time' ,'Group Commits/Sec' + ,'Workfiles Created/sec' + ,'Worktables Created/sec' + ,'Distributed Query' + ,'DTC calls' + ,'Query Store CPU usage' + ,'Query Store physical reads' + ,'Query Store logical reads' + ,'Query Store logical writes' ) OR ( spi.[object_name] LIKE '%User Settable%' OR spi.[object_name] LIKE '%SQL Errors%' @@ -1024,63 +1048,77 @@ ELSE BEGIN END SET @SqlStatement = N' -SELECT [blocking_session_id] into #blockingSessions FROM sys.dm_exec_requests WHERE [blocking_session_id] != 0 -CREATE INDEX ix_blockingSessions_1 ON #blockingSessions ([blocking_session_id]) - SELECT - ''sqlserver_requests'' AS [measurement] - ,REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance] - ,s.session_id - ,ISNULL(r.[request_id], 0) AS [request_id] - ,COALESCE(r.[status], s.[status]) AS [status] - ,COALESCE(r.[cpu_time], s.[cpu_time]) AS [cpu_time_ms] - ,COALESCE(r.[total_elapsed_time], s.total_elapsed_time) AS [total_elapsed_time_ms] - ,COALESCE(r.[logical_reads], s.[logical_reads]) AS [logical_reads] - ,COALESCE(r.[writes], s.[writes]) AS [writes] - ,r.[command] - ,r.[wait_time] AS [wait_time_ms] - ,r.[wait_type] - ,r.[wait_resource] - ,r.[blocking_session_id] - ,s.[program_name] - ,s.[host_name] - ,s.[nt_user_name] - ,LEFT (CASE COALESCE(r.[transaction_isolation_level], s.[transaction_isolation_level]) - WHEN 0 THEN ''0-Read Committed'' - WHEN 1 THEN ''1-Read Uncommitted (NOLOCK)'' - WHEN 2 THEN ''2-Read Committed'' - WHEN 3 THEN ''3-Repeatable Read'' - WHEN 4 THEN ''4-Serializable'' - WHEN 5 THEN ''5-Snapshot'' - ELSE CONVERT (varchar(30), r.[transaction_isolation_level]) + ''-UNKNOWN'' - END, 30) AS [transaction_isolation_level] - ,r.[granted_query_memory] AS [granted_query_memory_pages] - ,r.[percent_complete] - ,SUBSTRING( - qt.[text], - r.[statement_start_offset] / 2 + 1, - (CASE WHEN r.[statement_end_offset] = -1 - THEN DATALENGTH(qt.[text]) - ELSE r.[statement_end_offset] - END - r.[statement_start_offset]) / 2 + 1 - ) AS [statement_text] - ,qt.[objectid] - ,QUOTENAME(OBJECT_SCHEMA_NAME(qt.[objectid], qt.[dbid])) + ''.'' + QUOTENAME(OBJECT_NAME(qt.[objectid], qt.[dbid])) AS [stmt_object_name] - ,DB_NAME(qt.dbid) AS [stmt_db_name] - ,CONVERT(varchar(20),[query_hash],1) AS [query_hash] - ,CONVERT(varchar(20),[query_plan_hash],1) AS [query_plan_hash]' - + @Columns + N' -FROM sys.dm_exec_sessions AS s -LEFT OUTER JOIN sys.dm_exec_requests AS r - ON s.[session_id] = r.[session_id] -OUTER APPLY sys.dm_exec_sql_text(r.[sql_handle]) AS qt + [measurement],[sql_instance],[session_id] + ,ISNULL([request_id],0) AS [request_id] + ,[blocking_session_id],[status],[cpu_time_ms] + ,[total_elapsed_time_ms],[logical_reads],[writes] + ,[command],[wait_time_ms],[wait_type] + ,[wait_resource],[program_name] + ,[host_name],[nt_user_name],[login_name] + ,[transaction_isolation_level],[granted_query_memory_pages],[percent_complete] + ,[statement_text],[objectid],[stmt_object_name] + ,[stmt_db_name],[query_hash],[query_plan_hash] + ,[session_db_name],[open_transaction] +FROM ( + SELECT + ''sqlserver_requests'' AS [measurement] + ,REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance] + ,s.[session_id] + ,r.[request_id] + ,COALESCE(r.[status], s.[status]) AS [status] + ,COALESCE(r.[cpu_time], s.[cpu_time]) AS [cpu_time_ms] + ,COALESCE(r.[total_elapsed_time], s.total_elapsed_time) AS [total_elapsed_time_ms] + ,COALESCE(r.[logical_reads], s.[logical_reads]) AS [logical_reads] + ,COALESCE(r.[writes], s.[writes]) AS [writes] + ,r.[command] + ,r.[wait_time] AS [wait_time_ms] + ,r.[wait_type] + ,r.[wait_resource] + ,NULLIF(r.[blocking_session_id],0) AS [blocking_session_id] + ,s.[program_name] + ,s.[host_name] + ,s.[nt_user_name] + ,s.[login_name] + ,LEFT (CASE COALESCE(r.[transaction_isolation_level], s.[transaction_isolation_level]) + WHEN 0 THEN ''0-Read Committed'' + WHEN 1 THEN ''1-Read Uncommitted (NOLOCK)'' + WHEN 2 THEN ''2-Read Committed'' + WHEN 3 THEN ''3-Repeatable Read'' + WHEN 4 THEN ''4-Serializable'' + WHEN 5 THEN ''5-Snapshot'' + ELSE CONVERT (varchar(30), r.[transaction_isolation_level]) + ''-UNKNOWN'' + END, 30) AS [transaction_isolation_level] + ,r.[granted_query_memory] AS [granted_query_memory_pages] + ,r.[percent_complete] + ,SUBSTRING( + qt.[text], + r.[statement_start_offset] / 2 + 1, + (CASE WHEN r.[statement_end_offset] = -1 + THEN DATALENGTH(qt.[text]) + ELSE r.[statement_end_offset] + END - r.[statement_start_offset]) / 2 + 1 + ) AS [statement_text] + ,qt.[objectid] + ,QUOTENAME(OBJECT_SCHEMA_NAME(qt.[objectid], qt.[dbid])) + ''.'' + QUOTENAME(OBJECT_NAME(qt.[objectid], qt.[dbid])) AS [stmt_object_name] + ,DB_NAME(qt.dbid) AS [stmt_db_name] + ,CONVERT(varchar(20),r.[query_hash],1) AS [query_hash] + ,CONVERT(varchar(20),r.[query_plan_hash],1) AS [query_plan_hash] + ,s.[is_user_process] + ,[blocking_or_blocked] = COUNT(*) OVER(PARTITION BY ISNULL(NULLIF(r.[blocking_session_id], 0),s.[session_id]))' + + @Columns + N' + FROM sys.dm_exec_sessions AS s + LEFT OUTER JOIN sys.dm_exec_requests AS r + ON s.[session_id] = r.[session_id] + OUTER APPLY sys.dm_exec_sql_text(r.[sql_handle]) AS qt +) AS data WHERE - (s.[session_id] IN (SELECT blocking_session_id FROM #blockingSessions)) + [blocking_or_blocked] > 1 --Always include blocking or blocked sessions/requests OR ( - r.[session_id] IS NOT NULL - AND ( - s.is_user_process = 1 - OR r.[status] COLLATE Latin1_General_BIN NOT IN (''background'', ''sleeping'') + [request_id] IS NOT NULL --A request must exists + AND ( --Always fetch user process (in any state), fetch system process only if active + [is_user_process] = 1 + OR [status] COLLATE Latin1_General_BIN NOT IN (''background'', ''sleeping'') ) ) OPTION(MAXDOP 1)' @@ -1097,7 +1135,7 @@ END DECLARE @MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),3) AS int) - + IF @MajorMinorVersion >= 1050 BEGIN SELECT DISTINCT 'sqlserver_volume_space' AS [measurement] @@ -1113,40 +1151,237 @@ IF @MajorMinorVersion >= 1050 BEGIN END ` -const sqlServerRingBufferCpu string = ` +const sqlServerRingBufferCPU string = ` IF SERVERPROPERTY('EngineEdition') NOT IN (2,3,4) BEGIN /*NOT IN Standard,Enterpris,Express*/ DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@ServerName + ',Database:' + DB_NAME() +' is not a SQL Server Standard,Enterprise or Express. Check the database_type parameter in the telegraf configuration.'; RAISERROR (@ErrorMessage,11,1) RETURN -END +END; -SELECT - 'sqlserver_cpu' AS [measurement] - ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] - ,[SQLProcessUtilization] AS [sqlserver_process_cpu] - ,[SystemIdle] AS [system_idle_cpu] - ,100 - [SystemIdle] - [SQLProcessUtilization] AS [other_process_cpu] -FROM ( - SELECT TOP 1 - [record_id] - ,[SQLProcessUtilization] - ,[SystemIdle] +WITH utilization_cte AS +( + SELECT + [SQLProcessUtilization] AS [sqlserver_process_cpu] + ,[SystemIdle] AS [system_idle_cpu] + ,100 - [SystemIdle] - [SQLProcessUtilization] AS [other_process_cpu] FROM ( - SELECT - record.value('(./Record/@id)[1]', 'int') AS [record_id] - ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') AS [SystemIdle] - ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') AS [SQLProcessUtilization] - ,[TIMESTAMP] + SELECT TOP 1 + [record_id] + ,[SQLProcessUtilization] + ,[SystemIdle] FROM ( SELECT - [TIMESTAMP] - ,convert(XML, [record]) AS [record] - FROM sys.dm_os_ring_buffers - WHERE - [ring_buffer_type] = N'RING_BUFFER_SCHEDULER_MONITOR' - AND [record] LIKE '%%' - ) AS x - ) AS y - ORDER BY [record_id] DESC -) AS z + record.value('(./Record/@id)[1]', 'int') AS [record_id] + ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') AS [SystemIdle] + ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') AS [SQLProcessUtilization] + ,[TIMESTAMP] + FROM ( + SELECT + [TIMESTAMP] + ,convert(XML, [record]) AS [record] + FROM sys.dm_os_ring_buffers + WHERE + [ring_buffer_type] = N'RING_BUFFER_SCHEDULER_MONITOR' + AND [record] LIKE '%%' + ) AS x + ) AS y + ORDER BY [record_id] DESC + ) AS z +), +processor_Info_cte AS +( + SELECT ([cpu_count] / [hyperthread_ratio]) as [number_of_physical_cpus] + FROM sys.dm_os_sys_info +) +SELECT + 'sqlserver_cpu' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,[sqlserver_process_cpu] + ,[system_idle_cpu] + ,100 - [system_idle_cpu] - [sqlserver_process_cpu] AS [other_process_cpu] +FROM ( + SELECT + (CASE + WHEN u.[other_process_cpu] < 0 THEN u.[sqlserver_process_cpu] / p.[number_of_physical_cpus] + ELSE u.[sqlserver_process_cpu] + END) AS [sqlserver_process_cpu] + ,u.[system_idle_cpu] + FROM utilization_cte AS u + CROSS APPLY processor_Info_cte AS p + ) AS b +` + +// Collects availability replica state information from `sys.dm_hadr_availability_replica_states` for a High Availability / Disaster Recovery (HADR) setup +// Certain fields are only supported on SQL Server 2016 and newer version, identified by check MajorMinorVersion >= 1300 +const sqlServerAvailabilityReplicaStates string = ` +IF SERVERPROPERTY('EngineEdition') NOT IN (2,3,4) BEGIN /*NOT IN Standard,Enterpris,Express*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@ServerName + ',Database:' + DB_NAME() +' is not a SQL Server Standard,Enterprise or Express. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +DECLARE + @SqlStatement AS nvarchar(max) + ,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),3) AS int) + ,@Columns AS nvarchar(MAX) = '' + +IF @MajorMinorVersion >= 1300 BEGIN + SET @Columns += N' + ,ag.basic_features + ,ag.is_distributed + ,ar.seeding_mode + ,ar.seeding_mode_desc' +END + +SET @SqlStatement = N' +IF SERVERPROPERTY(''IsHadrEnabled'') = 1 BEGIN + SELECT + ''sqlserver_hadr_replica_states'' AS [measurement] + ,REPLACE(@@SERVERNAME, ''\'', '':'') AS [sql_instance] + ,convert(nvarchar(36), hars.replica_id) as replica_id + ,ar.replica_server_name + ,convert(nvarchar(36), hars.group_id) as group_id + ,ag.name AS group_name + ,hags.synchronization_health_desc AS ag_synchronization_health_desc + ,ar.replica_metadata_id + ,ar.availability_mode + ,ar.availability_mode_desc + ,ar.failover_mode + ,ar.failover_mode_desc + ,ar.session_timeout + ,ar.primary_role_allow_connections + ,ar.primary_role_allow_connections_desc + ,ar.secondary_role_allow_connections + ,ar.secondary_role_allow_connections_desc + ,hars.is_local + ,hars.role + ,hars.role_desc + ,hars.operational_state + ,hars.operational_state_desc + ,hars.connected_state + ,hars.connected_state_desc + ,hars.recovery_health + ,hars.recovery_health_desc + ,hars.synchronization_health AS replica_synchronization_health + ,hars.synchronization_health_desc AS replica_synchronization_health_desc + ,hars.last_connect_error_number + ,hars.last_connect_error_description + ,hars.last_connect_error_timestamp' + + @Columns + N' + FROM sys.dm_hadr_availability_replica_states AS hars + INNER JOIN sys.availability_replicas AS ar on hars.replica_id = ar.replica_id + INNER JOIN sys.availability_groups AS ag on ar.group_id = ag.group_id + INNER JOIN sys.dm_hadr_availability_group_states AS hags ON hags.group_id = ag.group_id +END' + +EXEC sp_executesql @SqlStatement +` + +// Collects database replica state information from `sys.dm_hadr_database_replica_states` for a High Availability / Disaster Recovery (HADR) setup +// Certain fields are only supported on SQL Server 2016 and newer version, or SQL Server 2014 and newer, identified by check MajorMinorVersion >= 1300 or MajorMinorVersion >= 1200 +const sqlServerDatabaseReplicaStates string = ` +IF SERVERPROPERTY('EngineEdition') NOT IN (2,3,4) BEGIN /*NOT IN Standard,Enterpris,Express*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@ServerName + ',Database:' + DB_NAME() +' is not a SQL Server Standard,Enterprise or Express. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END + +DECLARE + @SqlStatement AS nvarchar(max) + ,@MajorMinorVersion AS int = CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),4) AS int)*100 + CAST(PARSENAME(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar),3) AS int) + ,@Columns AS nvarchar(MAX) = '' + +IF @MajorMinorVersion >= 1200 BEGIN + SET @Columns += N' + ,is_primary_replica' +END + +IF @MajorMinorVersion >= 1300 BEGIN + SET @Columns += N' + ,secondary_lag_seconds' +END + +SET @SqlStatement = N' +IF SERVERPROPERTY(''IsHadrEnabled'') = 1 BEGIN + SELECT + ''sqlserver_hadr_dbreplica_states'' AS [measurement] + ,REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance] + ,database_id + ,db_name(database_id) as database_name + ,convert(nvarchar(36), drs.replica_id) as replica_id + ,ar.replica_server_name + ,convert(nvarchar(36), drs.group_database_id) as group_database_id + ,synchronization_state + ,synchronization_state_desc + ,is_commit_participant + ,synchronization_health + ,synchronization_health_desc + ,database_state + ,database_state_desc + ,is_suspended + ,suspend_reason + ,suspend_reason_desc + ,last_sent_time + ,last_received_time + ,last_hardened_time + ,last_redone_time + ,log_send_queue_size + ,log_send_rate + ,redo_queue_size + ,redo_rate + ,filestream_send_rate + ,last_commit_time' + + @Columns + N' + FROM sys.dm_hadr_database_replica_states AS drs + INNER JOIN sys.availability_replicas AS ar on drs.replica_id = ar.replica_id +END' + +EXEC sp_executesql @SqlStatement +` + +const sqlServerRecentBackups string = ` +IF SERVERPROPERTY('EngineEdition') NOT IN (2,3,4) BEGIN /*NOT IN Standard,Enterpris,Express*/ + DECLARE @ErrorMessage AS nvarchar(500) = 'Telegraf - Connection string Server:'+ @@ServerName + ',Database:' + DB_NAME() +' is not a SQL Server Standard,Enterprise or Express. Check the database_type parameter in the telegraf configuration.'; + RAISERROR (@ErrorMessage,11,1) + RETURN +END; + +WITH MostRecentBackups AS +( + SELECT + database_name AS [Database], + MAX(bus.backup_finish_date) AS LastBackupTime, + CASE bus.type + WHEN 'D' THEN 'Full' + WHEN 'I' THEN 'Differential' + WHEN 'L' THEN 'Transaction Log' + END AS Type + FROM msdb.dbo.backupset bus + WHERE bus.type <> 'F' + GROUP BY bus.database_name,bus.type +), +BackupsWithSize AS +( + SELECT mrb.*, CAST((SELECT TOP 1 b.backup_size FROM msdb.dbo.backupset b WHERE [Database] = b.database_name AND LastBackupTime = b.backup_finish_date) AS int) AS [backup_size] + FROM MostRecentBackups mrb +) + +SELECT + 'sqlserver_recentbackup' AS [measurement], + REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], + d.name AS [database_name], + d.database_id as [database_id], + d.state_desc AS [state], + d.recovery_model_desc AS [recovery_model], + DATEDIFF(SECOND,{d '1970-01-01'}, bf.LastBackupTime) AS [last_full_backup_time], + bf.backup_size AS [full_backup_size_bytes], + DATEDIFF(SECOND,{d '1970-01-01'}, bd.LastBackupTime) AS [last_differential_backup_time], + bd.backup_size AS [differential_backup_size_bytes], + DATEDIFF(SECOND,{d '1970-01-01'}, bt.LastBackupTime) AS [last_transaction_log_backup_time], + bt.backup_size AS [transaction_log_backup_size_bytes] +FROM sys.databases d +LEFT JOIN BackupsWithSize bf ON (d.name = bf.[Database] AND (bf.Type = 'Full' OR bf.Type IS NULL)) +LEFT JOIN BackupsWithSize bd ON (d.name = bd.[Database] AND (bd.Type = 'Differential' OR bd.Type IS NULL)) +LEFT JOIN BackupsWithSize bt ON (d.name = bt.[Database] AND (bt.Type = 'Transaction Log' OR bt.Type IS NULL)) +WHERE d.name <> 'tempdb' AND d.source_database_id IS NULL ` diff --git a/plugins/inputs/stackdriver/README.md b/plugins/inputs/stackdriver/README.md index 6469b259b78ec..f6f80a80764cc 100644 --- a/plugins/inputs/stackdriver/README.md +++ b/plugins/inputs/stackdriver/README.md @@ -6,9 +6,10 @@ Query data from Google Cloud Monitoring (formerly Stackdriver) using the This plugin accesses APIs which are [chargeable][pricing]; you might incur costs. -### Configuration +## Configuration -```toml +```toml @sample.conf +# Gather timeseries from Google Cloud Platform v3 monitoring API [[inputs.stackdriver]] ## GCP Project project = "erudite-bloom-151019" @@ -58,9 +59,9 @@ costs. ## For a list of aligner strings see: ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner # distribution_aggregation_aligners = [ - # "ALIGN_PERCENTILE_99", - # "ALIGN_PERCENTILE_95", - # "ALIGN_PERCENTILE_50", + # "ALIGN_PERCENTILE_99", + # "ALIGN_PERCENTILE_95", + # "ALIGN_PERCENTILE_50", # ] ## Filters can be added to reduce the number of time series matched. All @@ -84,23 +85,24 @@ costs. ## Metric labels refine the time series selection with the following expression: ## metric.labels. = # [[inputs.stackdriver.filter.metric_labels]] - # key = "device_name" - # value = 'one_of("sda", "sdb")' + # key = "device_name" + # value = 'one_of("sda", "sdb")' ``` -#### Authentication +### Authentication It is recommended to use a service account to authenticate with the Stackdriver Monitoring API. [Getting Started with Authentication][auth]. -### Metrics +## Metrics Metrics are created using one of there patterns depending on if the value type is a scalar value, raw distribution buckets, or aligned bucket values. In all cases, the Stackdriver metric type is split on the last component into the measurement and field: -``` + +```sh compute.googleapis.com/instance/disk/read_bytes_count └────────── measurement ─────────┘ └── field ───┘ ``` @@ -114,7 +116,6 @@ compute.googleapis.com/instance/disk/read_bytes_count - fields: - field - **Distributions:** Distributions are represented by a set of fields along with the bucket values @@ -132,7 +133,7 @@ represents the total number of items less than the `lt` tag. - field_range_min - field_range_max -+ measurement +- measurement - tags: - resource_labels - metric_labels @@ -149,14 +150,16 @@ represents the total number of items less than the `lt` tag. - fields: - field_alignment_function -### Troubleshooting +## Troubleshooting When Telegraf is ran with `--debug`, detailed information about the performed queries will be logged. -### Example Output -``` +## Example Output + +```shell ``` + [stackdriver]: https://cloud.google.com/monitoring/api/v3/ [auth]: https://cloud.google.com/docs/authentication/getting-started [pricing]: https://cloud.google.com/stackdriver/pricing#stackdriver_monitoring_services diff --git a/plugins/inputs/stackdriver/sample.conf b/plugins/inputs/stackdriver/sample.conf new file mode 100644 index 0000000000000..b571e9dca9545 --- /dev/null +++ b/plugins/inputs/stackdriver/sample.conf @@ -0,0 +1,78 @@ +# Gather timeseries from Google Cloud Platform v3 monitoring API +[[inputs.stackdriver]] + ## GCP Project + project = "erudite-bloom-151019" + + ## Include timeseries that start with the given metric type. + metric_type_prefix_include = [ + "compute.googleapis.com/", + ] + + ## Exclude timeseries that start with the given metric type. + # metric_type_prefix_exclude = [] + + ## Most metrics are updated no more than once per minute; it is recommended + ## to override the agent level interval with a value of 1m or greater. + interval = "1m" + + ## Maximum number of API calls to make per second. The quota for accounts + ## varies, it can be viewed on the API dashboard: + ## https://cloud.google.com/monitoring/quotas#quotas_and_limits + # rate_limit = 14 + + ## The delay and window options control the number of points selected on + ## each gather. When set, metrics are gathered between: + ## start: now() - delay - window + ## end: now() - delay + # + ## Collection delay; if set too low metrics may not yet be available. + # delay = "5m" + # + ## If unset, the window will start at 1m and be updated dynamically to span + ## the time between calls (approximately the length of the plugin interval). + # window = "1m" + + ## TTL for cached list of metric types. This is the maximum amount of time + ## it may take to discover new metrics. + # cache_ttl = "1h" + + ## If true, raw bucket counts are collected for distribution value types. + ## For a more lightweight collection, you may wish to disable and use + ## distribution_aggregation_aligners instead. + # gather_raw_distribution_buckets = true + + ## Aggregate functions to be used for metrics whose value type is + ## distribution. These aggregate values are recorded in in addition to raw + ## bucket counts; if they are enabled. + ## + ## For a list of aligner strings see: + ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner + # distribution_aggregation_aligners = [ + # "ALIGN_PERCENTILE_99", + # "ALIGN_PERCENTILE_95", + # "ALIGN_PERCENTILE_50", + # ] + + ## Filters can be added to reduce the number of time series matched. All + ## functions are supported: starts_with, ends_with, has_substring, and + ## one_of. Only the '=' operator is supported. + ## + ## The logical operators when combining filters are defined statically using + ## the following values: + ## filter ::= {AND } + ## resource_labels ::= {OR } + ## metric_labels ::= {OR } + ## + ## For more details, see https://cloud.google.com/monitoring/api/v3/filters + # + ## Resource labels refine the time series selection with the following expression: + ## resource.labels. = + # [[inputs.stackdriver.filter.resource_labels]] + # key = "instance_name" + # value = 'starts_with("localhost")' + # + ## Metric labels refine the time series selection with the following expression: + ## metric.labels. = + # [[inputs.stackdriver.filter.metric_labels]] + # key = "device_name" + # value = 'one_of("sda", "sdb")' diff --git a/plugins/inputs/stackdriver/stackdriver.go b/plugins/inputs/stackdriver/stackdriver.go index 431076743101a..cd45f2f62975a 100644 --- a/plugins/inputs/stackdriver/stackdriver.go +++ b/plugins/inputs/stackdriver/stackdriver.go @@ -1,7 +1,9 @@ +//go:generate ../../../tools/readme_config_includer/generator package stackdriver import ( "context" + _ "embed" "fmt" "math" "strconv" @@ -9,108 +11,34 @@ import ( "sync" "time" - monitoring "cloud.google.com/go/monitoring/apiv3" - googlepbduration "github.com/golang/protobuf/ptypes/duration" - googlepbts "github.com/golang/protobuf/ptypes/timestamp" + monitoring "cloud.google.com/go/monitoring/apiv3/v2" + "google.golang.org/api/iterator" + distributionpb "google.golang.org/genproto/googleapis/api/distribution" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal/limiter" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/inputs" // Imports the Stackdriver Monitoring client package. "github.com/influxdata/telegraf/selfstat" - "google.golang.org/api/iterator" - distributionpb "google.golang.org/genproto/googleapis/api/distribution" - metricpb "google.golang.org/genproto/googleapis/api/metric" - monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const ( defaultRateLimit = 14 - description = "Gather timeseries from Google Cloud Platform v3 monitoring API" - sampleConfig = ` - ## GCP Project - project = "erudite-bloom-151019" - - ## Include timeseries that start with the given metric type. - metric_type_prefix_include = [ - "compute.googleapis.com/", - ] - - ## Exclude timeseries that start with the given metric type. - # metric_type_prefix_exclude = [] - - ## Many metrics are updated once per minute; it is recommended to override - ## the agent level interval with a value of 1m or greater. - interval = "1m" - - ## Maximum number of API calls to make per second. The quota for accounts - ## varies, it can be viewed on the API dashboard: - ## https://cloud.google.com/monitoring/quotas#quotas_and_limits - # rate_limit = 14 - - ## The delay and window options control the number of points selected on - ## each gather. When set, metrics are gathered between: - ## start: now() - delay - window - ## end: now() - delay - # - ## Collection delay; if set too low metrics may not yet be available. - # delay = "5m" - # - ## If unset, the window will start at 1m and be updated dynamically to span - ## the time between calls (approximately the length of the plugin interval). - # window = "1m" - - ## TTL for cached list of metric types. This is the maximum amount of time - ## it may take to discover new metrics. - # cache_ttl = "1h" - - ## If true, raw bucket counts are collected for distribution value types. - ## For a more lightweight collection, you may wish to disable and use - ## distribution_aggregation_aligners instead. - # gather_raw_distribution_buckets = true - - ## Aggregate functions to be used for metrics whose value type is - ## distribution. These aggregate values are recorded in in addition to raw - ## bucket counts; if they are enabled. - ## - ## For a list of aligner strings see: - ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner - # distribution_aggregation_aligners = [ - # "ALIGN_PERCENTILE_99", - # "ALIGN_PERCENTILE_95", - # "ALIGN_PERCENTILE_50", - # ] - - ## Filters can be added to reduce the number of time series matched. All - ## functions are supported: starts_with, ends_with, has_substring, and - ## one_of. Only the '=' operator is supported. - ## - ## The logical operators when combining filters are defined statically using - ## the following values: - ## filter ::= {AND } - ## resource_labels ::= {OR } - ## metric_labels ::= {OR } - ## - ## For more details, see https://cloud.google.com/monitoring/api/v3/filters - # - ## Resource labels refine the time series selection with the following expression: - ## resource.labels. = - # [[inputs.stackdriver.filter.resource_labels]] - # key = "instance_name" - # value = 'starts_with("localhost")' - # - ## Metric labels refine the time series selection with the following expression: - ## metric.labels. = - # [[inputs.stackdriver.filter.metric_labels]] - # key = "device_name" - # value = 'one_of("sda", "sdb")' -` ) var ( - defaultCacheTTL = internal.Duration{Duration: 1 * time.Hour} - defaultWindow = internal.Duration{Duration: 1 * time.Minute} - defaultDelay = internal.Duration{Duration: 5 * time.Minute} + defaultCacheTTL = config.Duration(1 * time.Hour) + defaultWindow = config.Duration(1 * time.Minute) + defaultDelay = config.Duration(5 * time.Minute) ) type ( @@ -118,9 +46,9 @@ type ( Stackdriver struct { Project string `toml:"project"` RateLimit int `toml:"rate_limit"` - Window internal.Duration `toml:"window"` - Delay internal.Duration `toml:"delay"` - CacheTTL internal.Duration `toml:"cache_ttl"` + Window config.Duration `toml:"window"` + Delay config.Duration `toml:"delay"` + CacheTTL config.Duration `toml:"cache_ttl"` MetricTypePrefixInclude []string `toml:"metric_type_prefix_include"` MetricTypePrefixExclude []string `toml:"metric_type_prefix_exclude"` GatherRawDistributionBuckets bool `toml:"gather_raw_distribution_buckets"` @@ -201,24 +129,24 @@ func (g *lockedSeriesGrouper) Add( } // ListMetricDescriptors implements metricClient interface -func (c *stackdriverMetricClient) ListMetricDescriptors( +func (smc *stackdriverMetricClient) ListMetricDescriptors( ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, ) (<-chan *metricpb.MetricDescriptor, error) { mdChan := make(chan *metricpb.MetricDescriptor, 1000) go func() { - c.log.Debugf("List metric descriptor request filter: %s", req.Filter) + smc.log.Debugf("List metric descriptor request filter: %s", req.Filter) defer close(mdChan) // Iterate over metric descriptors and send them to buffered channel - mdResp := c.conn.ListMetricDescriptors(ctx, req) - c.listMetricDescriptorsCalls.Incr(1) + mdResp := smc.conn.ListMetricDescriptors(ctx, req) + smc.listMetricDescriptorsCalls.Incr(1) for { mdDesc, mdErr := mdResp.Next() if mdErr != nil { if mdErr != iterator.Done { - c.log.Errorf("Failed iterating metric descriptor responses: %q: %v", req.String(), mdErr) + smc.log.Errorf("Failed iterating metric descriptor responses: %q: %v", req.String(), mdErr) } break } @@ -230,24 +158,24 @@ func (c *stackdriverMetricClient) ListMetricDescriptors( } // ListTimeSeries implements metricClient interface -func (c *stackdriverMetricClient) ListTimeSeries( +func (smc *stackdriverMetricClient) ListTimeSeries( ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, ) (<-chan *monitoringpb.TimeSeries, error) { tsChan := make(chan *monitoringpb.TimeSeries, 1000) go func() { - c.log.Debugf("List time series request filter: %s", req.Filter) + smc.log.Debugf("List time series request filter: %s", req.Filter) defer close(tsChan) // Iterate over timeseries and send them to buffered channel - tsResp := c.conn.ListTimeSeries(ctx, req) - c.listTimeSeriesCalls.Incr(1) + tsResp := smc.conn.ListTimeSeries(ctx, req) + smc.listTimeSeriesCalls.Incr(1) for { tsDesc, tsErr := tsResp.Next() if tsErr != nil { if tsErr != iterator.Done { - c.log.Errorf("Failed iterating time series responses: %q: %v", req.String(), tsErr) + smc.log.Errorf("Failed iterating time series responses: %q: %v", req.String(), tsErr) } break } @@ -259,17 +187,11 @@ func (c *stackdriverMetricClient) ListTimeSeries( } // Close implements metricClient interface -func (s *stackdriverMetricClient) Close() error { - return s.conn.Close() +func (smc *stackdriverMetricClient) Close() error { + return smc.conn.Close() } -// Description implements telegraf.Input interface -func (s *Stackdriver) Description() string { - return description -} - -// SampleConfig implements telegraf.Input interface -func (s *Stackdriver) SampleConfig() string { +func (*Stackdriver) SampleConfig() string { return sampleConfig } @@ -312,8 +234,8 @@ func (s *Stackdriver) Gather(acc telegraf.Accumulator) error { } wg.Wait() - for _, metric := range grouper.Metrics() { - acc.AddMetric(metric) + for _, groupedMetric := range grouper.Metrics() { + acc.AddMetric(groupedMetric) } return nil @@ -322,14 +244,14 @@ func (s *Stackdriver) Gather(acc telegraf.Accumulator) error { // Returns the start and end time for the next collection. func (s *Stackdriver) updateWindow(prevEnd time.Time) (time.Time, time.Time) { var start time.Time - if s.Window.Duration != 0 { - start = time.Now().Add(-s.Delay.Duration).Add(-s.Window.Duration) + if time.Duration(s.Window) != 0 { + start = time.Now().Add(-time.Duration(s.Delay)).Add(-time.Duration(s.Window)) } else if prevEnd.IsZero() { - start = time.Now().Add(-s.Delay.Duration).Add(-defaultWindow.Duration) + start = time.Now().Add(-time.Duration(s.Delay)).Add(-time.Duration(defaultWindow)) } else { start = prevEnd } - end := time.Now().Add(-s.Delay.Duration) + end := time.Now().Add(-time.Duration(s.Delay)) return start, end } @@ -393,11 +315,11 @@ func (s *Stackdriver) newTimeSeriesConf( ) *timeSeriesConf { filter := s.newListTimeSeriesFilter(metricType) interval := &monitoringpb.TimeInterval{ - EndTime: &googlepbts.Timestamp{Seconds: endTime.Unix()}, - StartTime: &googlepbts.Timestamp{Seconds: startTime.Unix()}, + EndTime: ×tamppb.Timestamp{Seconds: endTime.Unix()}, + StartTime: ×tamppb.Timestamp{Seconds: startTime.Unix()}, } tsReq := &monitoringpb.ListTimeSeriesRequest{ - Name: monitoring.MetricProjectPath(s.Project), + Name: fmt.Sprintf("projects/%s", s.Project), Filter: filter, Interval: interval, } @@ -432,7 +354,7 @@ func (t *timeSeriesConf) initForAggregate(alignerStr string) { } aligner := monitoringpb.Aggregation_Aligner(alignerInt) agg := &monitoringpb.Aggregation{ - AlignmentPeriod: &googlepbduration.Duration{Seconds: 60}, + AlignmentPeriod: &durationpb.Duration{Seconds: 60}, PerSeriesAligner: aligner, } t.fieldKey = t.fieldKey + "_" + strings.ToLower(alignerStr) @@ -522,8 +444,8 @@ func (s *Stackdriver) generatetimeSeriesConfs( if s.timeSeriesConfCache != nil && s.timeSeriesConfCache.IsValid() { // Update interval for timeseries requests in timeseries cache interval := &monitoringpb.TimeInterval{ - EndTime: &googlepbts.Timestamp{Seconds: endTime.Unix()}, - StartTime: &googlepbts.Timestamp{Seconds: startTime.Unix()}, + EndTime: ×tamppb.Timestamp{Seconds: endTime.Unix()}, + StartTime: ×tamppb.Timestamp{Seconds: startTime.Unix()}, } for _, timeSeriesConf := range s.timeSeriesConfCache.TimeSeriesConfs { timeSeriesConf.listTimeSeriesRequest.Interval = interval @@ -533,7 +455,7 @@ func (s *Stackdriver) generatetimeSeriesConfs( ret := []*timeSeriesConf{} req := &monitoringpb.ListMetricDescriptorsRequest{ - Name: monitoring.MetricProjectPath(s.Project), + Name: fmt.Sprintf("projects/%s", s.Project), } filters := s.newListMetricDescriptorsFilters() @@ -579,7 +501,7 @@ func (s *Stackdriver) generatetimeSeriesConfs( s.timeSeriesConfCache = &timeSeriesConfCache{ TimeSeriesConfs: ret, Generated: time.Now(), - TTL: s.CacheTTL.Duration, + TTL: time.Duration(s.CacheTTL), } return ret, nil @@ -613,7 +535,9 @@ func (s *Stackdriver) gatherTimeSeries( if tsDesc.ValueType == metricpb.MetricDescriptor_DISTRIBUTION { dist := p.Value.GetDistributionValue() - s.addDistribution(dist, tags, ts, grouper, tsConf) + if err := s.addDistribution(dist, tags, ts, grouper, tsConf); err != nil { + return err + } } else { var value interface{} @@ -630,7 +554,9 @@ func (s *Stackdriver) gatherTimeSeries( value = p.Value.GetStringValue() } - grouper.Add(tsConf.measurement, tags, ts, tsConf.fieldKey, value) + if err := grouper.Add(tsConf.measurement, tags, ts, tsConf.fieldKey, value); err != nil { + return err + } } } } @@ -639,25 +565,34 @@ func (s *Stackdriver) gatherTimeSeries( } // AddDistribution adds metrics from a distribution value type. -func (s *Stackdriver) addDistribution( - metric *distributionpb.Distribution, - tags map[string]string, ts time.Time, grouper *lockedSeriesGrouper, tsConf *timeSeriesConf, -) { +func (s *Stackdriver) addDistribution(dist *distributionpb.Distribution, tags map[string]string, ts time.Time, + grouper *lockedSeriesGrouper, tsConf *timeSeriesConf, +) error { field := tsConf.fieldKey name := tsConf.measurement - grouper.Add(name, tags, ts, field+"_count", metric.Count) - grouper.Add(name, tags, ts, field+"_mean", metric.Mean) - grouper.Add(name, tags, ts, field+"_sum_of_squared_deviation", metric.SumOfSquaredDeviation) + if err := grouper.Add(name, tags, ts, field+"_count", dist.Count); err != nil { + return err + } + if err := grouper.Add(name, tags, ts, field+"_mean", dist.Mean); err != nil { + return err + } + if err := grouper.Add(name, tags, ts, field+"_sum_of_squared_deviation", dist.SumOfSquaredDeviation); err != nil { + return err + } - if metric.Range != nil { - grouper.Add(name, tags, ts, field+"_range_min", metric.Range.Min) - grouper.Add(name, tags, ts, field+"_range_max", metric.Range.Max) + if dist.Range != nil { + if err := grouper.Add(name, tags, ts, field+"_range_min", dist.Range.Min); err != nil { + return err + } + if err := grouper.Add(name, tags, ts, field+"_range_max", dist.Range.Max); err != nil { + return err + } } - linearBuckets := metric.BucketOptions.GetLinearBuckets() - exponentialBuckets := metric.BucketOptions.GetExponentialBuckets() - explicitBuckets := metric.BucketOptions.GetExplicitBuckets() + linearBuckets := dist.BucketOptions.GetLinearBuckets() + exponentialBuckets := dist.BucketOptions.GetExponentialBuckets() + explicitBuckets := dist.BucketOptions.GetExplicitBuckets() var numBuckets int32 if linearBuckets != nil { @@ -690,15 +625,19 @@ func (s *Stackdriver) addDistribution( // Add to the cumulative count; trailing buckets with value 0 are // omitted from the response. - if i < int32(len(metric.BucketCounts)) { - count += metric.BucketCounts[i] + if i < int32(len(dist.BucketCounts)) { + count += dist.BucketCounts[i] + } + if err := grouper.Add(name, tags, ts, field+"_bucket", count); err != nil { + return err } - grouper.Add(name, tags, ts, field+"_bucket", count) } + + return nil } func init() { - f := func() telegraf.Input { + inputs.Add("stackdriver", func() telegraf.Input { return &Stackdriver{ CacheTTL: defaultCacheTTL, RateLimit: defaultRateLimit, @@ -706,7 +645,5 @@ func init() { GatherRawDistributionBuckets: true, DistributionAggregationAligners: []string{}, } - } - - inputs.Add("stackdriver", f) + }) } diff --git a/plugins/inputs/stackdriver/stackdriver_test.go b/plugins/inputs/stackdriver/stackdriver_test.go index 8010ad4817924..ad6b15145031a 100644 --- a/plugins/inputs/stackdriver/stackdriver_test.go +++ b/plugins/inputs/stackdriver/stackdriver_test.go @@ -6,7 +6,6 @@ import ( "testing" "time" - "github.com/golang/protobuf/ptypes/timestamp" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" @@ -15,6 +14,7 @@ import ( metricpb "google.golang.org/genproto/googleapis/api/metric" "google.golang.org/genproto/googleapis/api/monitoredres" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/protobuf/types/known/timestamppb" ) type Call struct { @@ -105,7 +105,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -138,7 +138,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -171,7 +171,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -204,7 +204,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -249,7 +249,7 @@ func TestGather(t *testing.T) { Points: []*monitoringpb.Point{ { Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -283,7 +283,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -378,7 +378,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -473,7 +473,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -556,7 +556,7 @@ func TestGather(t *testing.T) { timeseries: createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -702,7 +702,7 @@ func TestGatherAlign(t *testing.T) { createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -717,7 +717,7 @@ func TestGatherAlign(t *testing.T) { createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -732,7 +732,7 @@ func TestGatherAlign(t *testing.T) { createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -803,7 +803,6 @@ func TestGatherAlign(t *testing.T) { } testutil.RequireMetricsEqual(t, tt.expected, actual) - }) } } @@ -1082,7 +1081,7 @@ func TestListMetricDescriptorFilter(t *testing.T) { ch <- createTimeSeries( &monitoringpb.Point{ Interval: &monitoringpb.TimeInterval{ - EndTime: ×tamp.Timestamp{ + EndTime: ×tamppb.Timestamp{ Seconds: now.Unix(), }, }, @@ -1126,8 +1125,8 @@ func TestListMetricDescriptorFilter(t *testing.T) { } } -func TestNewListTimeSeriesFilter(t *testing.T) { +func TestNewListTimeSeriesFilter(_ *testing.T) { } -func TestTimeSeriesConfCacheIsValid(t *testing.T) { +func TestTimeSeriesConfCacheIsValid(_ *testing.T) { } diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index 57953eed72600..2e3c37b4006f3 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -1,8 +1,10 @@ # StatsD Input Plugin -### Configuration +The StatsD input plugin gathers metrics from a Statsd server. -```toml +## Configuration + +```toml @sample.conf # Statsd Server [[inputs.statsd]] ## Protocol, must be "tcp", "udp4", "udp6" or "udp" (default=udp) @@ -50,6 +52,10 @@ ## http://docs.datadoghq.com/guides/dogstatsd/ datadog_extensions = false + ## Parses distributions metric as specified in the datadog statsd format + ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition + datadog_distributions = false + ## Statsd data translation templates, more info can be read here: ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md # templates = [ @@ -68,57 +74,72 @@ ## Maximum socket buffer size in bytes, once the buffer fills up, metrics ## will start dropping. Defaults to the OS default. # read_buffer_size = 65535 + + ## Max duration (TTL) for each metric to stay cached/reported without being updated. + # max_ttl = "10h" + + ## Sanitize name method + ## By default, telegraf will pass names directly as they are received. + ## However, upstream statsd now does sanitization of names which can be + ## enabled by using the "upstream" method option. This option will a) replace + ## white space with '_', replace '/' with '-', and remove charachters not + ## matching 'a-zA-Z_\-0-9\.;='. + #sanitize_name_method = "" ``` -### Description +## Description The statsd plugin is a special type of plugin which runs a backgrounded statsd listener service while telegraf is running. The format of the statsd messages was based on the format described in the -original [etsy statsd](https://github.com/etsy/statsd/blob/master/docs/metric_types.md) +original [etsy +statsd](https://github.com/etsy/statsd/blob/master/docs/metric_types.md) implementation. In short, the telegraf statsd listener will accept: - Gauges - - `users.current.den001.myapp:32|g` <- standard - - `users.current.den001.myapp:+10|g` <- additive - - `users.current.den001.myapp:-10|g` + - `users.current.den001.myapp:32|g` <- standard + - `users.current.den001.myapp:+10|g` <- additive + - `users.current.den001.myapp:-10|g` - Counters - - `deploys.test.myservice:1|c` <- increments by 1 - - `deploys.test.myservice:101|c` <- increments by 101 - - `deploys.test.myservice:1|c|@0.1` <- with sample rate, increments by 10 + - `deploys.test.myservice:1|c` <- increments by 1 + - `deploys.test.myservice:101|c` <- increments by 101 + - `deploys.test.myservice:1|c|@0.1` <- with sample rate, increments by 10 - Sets - - `users.unique:101|s` - - `users.unique:101|s` - - `users.unique:102|s` <- would result in a count of 2 for `users.unique` + - `users.unique:101|s` + - `users.unique:101|s` + - `users.unique:102|s` <- would result in a count of 2 for `users.unique` - Timings & Histograms - - `load.time:320|ms` - - `load.time.nanoseconds:1|h` - - `load.time:200|ms|@0.1` <- sampled 1/10 of the time + - `load.time:320|ms` + - `load.time.nanoseconds:1|h` + - `load.time:200|ms|@0.1` <- sampled 1/10 of the time +- Distributions + - `load.time:320|d` + - `load.time.nanoseconds:1|d` + - `load.time:200|d|@0.1` <- sampled 1/10 of the time It is possible to omit repetitive names and merge individual stats into a single line by separating them with additional colons: - - `users.current.den001.myapp:32|g:+10|g:-10|g` - - `deploys.test.myservice:1|c:101|c:1|c|@0.1` - - `users.unique:101|s:101|s:102|s` - - `load.time:320|ms:200|ms|@0.1` +- `users.current.den001.myapp:32|g:+10|g:-10|g` +- `deploys.test.myservice:1|c:101|c:1|c|@0.1` +- `users.unique:101|s:101|s:102|s` +- `load.time:320|ms:200|ms|@0.1` This also allows for mixed types in a single line: - - `foo:1|c:200|ms` +- `foo:1|c:200|ms` The string `foo:1|c:200|ms` is internally split into two individual metrics `foo:1|c` and `foo:200|ms` which are added to the aggregator separately. - -### Influx Statsd +## Influx Statsd In order to take advantage of InfluxDB's tagging system, we have made a couple additions to the standard statsd protocol. First, you can specify tags in a manner similar to the line-protocol, like this: -``` +```shell users.current,service=payroll,region=us-west:32|g ``` @@ -126,11 +147,14 @@ users.current,service=payroll,region=us-west:32|g ``` current.users,service=payroll,server=host01:west=10,east=10,central=2,south=10|g -``` --> +``` -### Measurements: +--> + +## Metrics Meta: + - tags: `metric_type=` Outputted measurements will depend entirely on the measurements that the user @@ -138,45 +162,48 @@ sends, but here is a brief rundown of what you can expect to find from each metric type: - Gauges - - Gauges are a constant data type. They are not subject to averaging, and they + - Gauges are a constant data type. They are not subject to averaging, and they don’t change unless you change them. That is, once you set a gauge value, it will be a flat line on the graph until you change it again. - Counters - - Counters are the most basic type. They are treated as a count of a type of + - Counters are the most basic type. They are treated as a count of a type of event. They will continually increase unless you set `delete_counters=true`. - Sets - - Sets count the number of unique values passed to a key. For example, you + - Sets count the number of unique values passed to a key. For example, you could count the number of users accessing your system using `users:|s`. No matter how many times the same user_id is sent, the count will only increase by 1. - Timings & Histograms - - Timers are meant to track how long something took. They are an invaluable + - Timers are meant to track how long something took. They are an invaluable tool for tracking application performance. - - The following aggregate measurements are made for timers: - - `statsd__lower`: The lower bound is the lowest value statsd saw + - The following aggregate measurements are made for timers: + - `statsd__lower`: The lower bound is the lowest value statsd saw for that stat during that interval. - - `statsd__upper`: The upper bound is the highest value statsd saw + - `statsd__upper`: The upper bound is the highest value statsd saw for that stat during that interval. - - `statsd__mean`: The mean is the average of all values statsd saw + - `statsd__mean`: The mean is the average of all values statsd saw for that stat during that interval. - - `statsd__stddev`: The stddev is the sample standard deviation + - `statsd__stddev`: The stddev is the sample standard deviation of all values statsd saw for that stat during that interval. - - `statsd__sum`: The sum is the sample sum of all values statsd saw + - `statsd__sum`: The sum is the sample sum of all values statsd saw for that stat during that interval. - - `statsd__count`: The count is the number of timings statsd saw + - `statsd__count`: The count is the number of timings statsd saw for that stat during that interval. It is not averaged. - - `statsd__percentile_

` The `Pth` percentile is a value x such + - `statsd__percentile_

` The `Pth` percentile is a value x such that `P%` of all the values statsd saw for that stat during that time period are below x. The most common value that people use for `P` is the `90`, this is a great number to try to optimize. +- Distributions + - The Distribution metric represents the global statistical distribution of a set of values calculated across your entire distributed infrastructure in one time interval. A Distribution can be used to instrument logical objects, like services, independently from the underlying hosts. + - Unlike the Histogram metric type, which aggregates on the Agent during a given time interval, a Distribution metric sends all the raw data during a time interval. -### Plugin arguments +## Plugin arguments - **protocol** string: Protocol used in listener - tcp or udp options - **max_tcp_connections** []int: Maximum number of concurrent TCP connections to allow. Used when protocol is set to tcp. - **tcp_keep_alive** boolean: Enable TCP keep alive probes -- **tcp_keep_alive_period** internal.Duration: Specifies the keep-alive period for an active network connection +- **tcp_keep_alive_period** duration: Specifies the keep-alive period for an active network connection - **service_address** string: Address to listen for statsd UDP packets on - **delete_gauges** boolean: Delete gauges on every collection interval - **delete_counters** boolean: Delete counters on every collection interval @@ -190,18 +217,20 @@ per-measurement in the calculation of percentiles. Raising this limit increases the accuracy of percentiles but also increases the memory usage and cpu time. - **templates** []string: Templates for transforming statsd buckets into influx measurements and tags. -- **parse_data_dog_tags** boolean: Enable parsing of tags in DataDog's dogstatsd format (http://docs.datadoghq.com/guides/dogstatsd/) -- **datadog_extensions** boolean: Enable parsing of DataDog's extensions to dogstatsd format (http://docs.datadoghq.com/guides/dogstatsd/) +- **parse_data_dog_tags** boolean: Enable parsing of tags in DataDog's dogstatsd format () +- **datadog_extensions** boolean: Enable parsing of DataDog's extensions to dogstatsd format () +- **datadog_distributions** boolean: Enable parsing of the Distribution metric in DataDog's dogstatsd format () +- **max_ttl** config.Duration: Max duration (TTL) for each metric to stay cached/reported without being updated. -### Statsd bucket -> InfluxDB line-protocol Templates +## Statsd bucket -> InfluxDB line-protocol Templates The plugin supports specifying templates for transforming statsd buckets into InfluxDB measurement names and tags. The templates have a _measurement_ keyword, which can be used to specify parts of the bucket that are to be used in the -measurement name. Other words in the template are used as tag names. For example, -the following template: +measurement name. Other words in the template are used as tag names. For +example, the following template: -``` +```toml templates = [ "measurement.measurement.region" ] @@ -209,7 +238,7 @@ templates = [ would result in the following transformation: -``` +```shell cpu.load.us-west:100|g => cpu_load,region=us-west 100 ``` @@ -217,7 +246,7 @@ cpu.load.us-west:100|g Users can also filter the template to use based on the name of the bucket, using glob matching, like so: -``` +```toml templates = [ "cpu.* measurement.measurement.region", "mem.* measurement.measurement.host" @@ -226,7 +255,7 @@ templates = [ which would result in the following transformation: -``` +```shell cpu.load.us-west:100|g => cpu_load,region=us-west 100 diff --git a/plugins/inputs/statsd/datadog.go b/plugins/inputs/statsd/datadog.go index 377db66e6d3ad..df35198b129d3 100644 --- a/plugins/inputs/statsd/datadog.go +++ b/plugins/inputs/statsd/datadog.go @@ -38,29 +38,29 @@ func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostnam // tag is key:value messageRaw := strings.SplitN(message, ":", 2) if len(messageRaw) < 2 || len(messageRaw[0]) < 7 || len(messageRaw[1]) < 3 { - return fmt.Errorf("Invalid message format") + return fmt.Errorf("invalid message format") } header := messageRaw[0] message = messageRaw[1] rawLen := strings.SplitN(header[3:], ",", 2) if len(rawLen) != 2 { - return fmt.Errorf("Invalid message format") + return fmt.Errorf("invalid message format") } titleLen, err := strconv.ParseInt(rawLen[0], 10, 64) if err != nil { - return fmt.Errorf("Invalid message format, could not parse title.length: '%s'", rawLen[0]) + return fmt.Errorf("invalid message format, could not parse title.length: '%s'", rawLen[0]) } if len(rawLen[1]) < 1 { - return fmt.Errorf("Invalid message format, could not parse text.length: '%s'", rawLen[0]) + return fmt.Errorf("invalid message format, could not parse text.length: '%s'", rawLen[0]) } textLen, err := strconv.ParseInt(rawLen[1][:len(rawLen[1])-1], 10, 64) if err != nil { - return fmt.Errorf("Invalid message format, could not parse text.length: '%s'", rawLen[0]) + return fmt.Errorf("invalid message format, could not parse text.length: '%s'", rawLen[0]) } if titleLen+textLen+1 > int64(len(message)) { - return fmt.Errorf("Invalid message format, title.length and text.length exceed total message length") + return fmt.Errorf("invalid message format, title.length and text.length exceed total message length") } rawTitle := message[:titleLen] @@ -68,14 +68,14 @@ func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostnam message = message[titleLen+1+textLen:] if len(rawTitle) == 0 || len(rawText) == 0 { - return fmt.Errorf("Invalid event message format: empty 'title' or 'text' field") + return fmt.Errorf("invalid event message format: empty 'title' or 'text' field") } name := rawTitle tags := make(map[string]string, strings.Count(message, ",")+2) // allocate for the approximate number of tags fields := make(map[string]interface{}, 9) fields["alert_type"] = eventInfo // default event type - fields["text"] = uncommenter.Replace(string(rawText)) + fields["text"] = uncommenter.Replace(rawText) if defaultHostname != "" { tags["source"] = defaultHostname } @@ -120,11 +120,10 @@ func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostnam case "s:": fields["source_type_name"] = rawMetadataFields[i][2:] default: - if rawMetadataFields[i][0] == '#' { - parseDataDogTags(tags, rawMetadataFields[i][1:]) - } else { + if rawMetadataFields[i][0] != '#' { return fmt.Errorf("unknown metadata type: '%s'", rawMetadataFields[i]) } + parseDataDogTags(tags, rawMetadataFields[i][1:]) } } // Use source tag because host is reserved tag key in Telegraf. diff --git a/plugins/inputs/statsd/running_stats_test.go b/plugins/inputs/statsd/running_stats_test.go index a52209c5665cb..2cf987a69bbf1 100644 --- a/plugins/inputs/statsd/running_stats_test.go +++ b/plugins/inputs/statsd/running_stats_test.go @@ -162,8 +162,5 @@ func TestRunningStats_PercentileLimit(t *testing.T) { } func fuzzyEqual(a, b, epsilon float64) bool { - if math.Abs(a-b) > epsilon { - return false - } - return true + return math.Abs(a-b) <= epsilon } diff --git a/plugins/inputs/statsd/sample.conf b/plugins/inputs/statsd/sample.conf new file mode 100644 index 0000000000000..c60e7b17b98d7 --- /dev/null +++ b/plugins/inputs/statsd/sample.conf @@ -0,0 +1,80 @@ +# Statsd Server +[[inputs.statsd]] + ## Protocol, must be "tcp", "udp4", "udp6" or "udp" (default=udp) + protocol = "udp" + + ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) + max_tcp_connections = 250 + + ## Enable TCP keep alive probes (default=false) + tcp_keep_alive = false + + ## Specifies the keep-alive period for an active network connection. + ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false. + ## Defaults to the OS configuration. + # tcp_keep_alive_period = "2h" + + ## Address and port to host UDP listener on + service_address = ":8125" + + ## The following configuration options control when telegraf clears it's cache + ## of previous values. If set to false, then telegraf will only clear it's + ## cache when the daemon is restarted. + ## Reset gauges every interval (default=true) + delete_gauges = true + ## Reset counters every interval (default=true) + delete_counters = true + ## Reset sets every interval (default=true) + delete_sets = true + ## Reset timings & histograms every interval (default=true) + delete_timings = true + + ## Percentiles to calculate for timing & histogram stats. + percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] + + ## separator to use between elements of a statsd metric + metric_separator = "_" + + ## Parses tags in the datadog statsd format + ## http://docs.datadoghq.com/guides/dogstatsd/ + ## deprecated in 1.10; use datadog_extensions option instead + parse_data_dog_tags = false + + ## Parses extensions to statsd in the datadog statsd format + ## currently supports metrics and datadog tags. + ## http://docs.datadoghq.com/guides/dogstatsd/ + datadog_extensions = false + + ## Parses distributions metric as specified in the datadog statsd format + ## https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition + datadog_distributions = false + + ## Statsd data translation templates, more info can be read here: + ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md + # templates = [ + # "cpu.* measurement*" + # ] + + ## Number of UDP messages allowed to queue up, once filled, + ## the statsd server will start dropping packets + allowed_pending_messages = 10000 + + ## Number of timing/histogram values to track per-measurement in the + ## calculation of percentiles. Raising this limit increases the accuracy + ## of percentiles but also increases the memory usage and cpu time. + percentile_limit = 1000 + + ## Maximum socket buffer size in bytes, once the buffer fills up, metrics + ## will start dropping. Defaults to the OS default. + # read_buffer_size = 65535 + + ## Max duration (TTL) for each metric to stay cached/reported without being updated. + # max_ttl = "10h" + + ## Sanitize name method + ## By default, telegraf will pass names directly as they are received. + ## However, upstream statsd now does sanitization of names which can be + ## enabled by using the "upstream" method option. This option will a) replace + ## white space with '_', replace '/' with '-', and remove charachters not + ## matching 'a-zA-Z_\-0-9\.;='. + #sanitize_name_method = "" diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 9c5780d00a596..97dca4656062c 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -1,28 +1,37 @@ +//go:generate ../../../tools/readme_config_includer/generator package statsd import ( "bufio" "bytes" - "errors" + _ "embed" "fmt" "net" + "regexp" "sort" "strconv" "strings" "sync" "time" + "github.com/pkg/errors" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers/graphite" "github.com/influxdata/telegraf/selfstat" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const ( - // UDP_MAX_PACKET_SIZE is the UDP packet limit, see + // UDPMaxPacketSize is the UDP packet limit, see // https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure - UDP_MAX_PACKET_SIZE int = 64 * 1024 + UDPMaxPacketSize int = 64 * 1024 defaultFieldName = "value" @@ -30,11 +39,25 @@ const ( defaultSeparator = "_" defaultAllowPendingMessage = 10000 - MaxTCPConnections = 250 parserGoRoutines = 5 ) +var errParsing = errors.New("error parsing statsd line") + +// Number will get parsed as an int or float depending on what is passed +type Number float64 + +func (n *Number) UnmarshalTOML(b []byte) error { + value, err := strconv.ParseFloat(string(b), 64) + if err != nil { + return err + } + + *n = Number(value) + return nil +} + // Statsd allows the importing of statsd and dogstatsd data. type Statsd struct { // Protocol used on listener - udp or tcp @@ -49,34 +72,41 @@ type Statsd struct { // Percentiles specifies the percentiles that will be calculated for timing // and histogram stats. - Percentiles []internal.Number + Percentiles []Number PercentileLimit int DeleteGauges bool DeleteCounters bool DeleteSets bool DeleteTimings bool - ConvertNames bool + ConvertNames bool `toml:"convert_names" deprecated:"0.12.0;2.0.0;use 'metric_separator' instead"` // MetricSeparator is the separator between parts of the metric name. MetricSeparator string // This flag enables parsing of tags in the dogstatsd extension to the // statsd protocol (http://docs.datadoghq.com/guides/dogstatsd/) - ParseDataDogTags bool // depreciated in 1.10; use datadog_extensions + ParseDataDogTags bool `toml:"parse_data_dog_tags" deprecated:"1.10.0;use 'datadog_extensions' instead"` // Parses extensions to statsd in the datadog statsd format // currently supports metrics and datadog tags. // http://docs.datadoghq.com/guides/dogstatsd/ DataDogExtensions bool `toml:"datadog_extensions"` + // Parses distribution metrics in the datadog statsd format. + // Requires the DataDogExtension flag to be enabled. + // https://docs.datadoghq.com/developers/metrics/types/?tab=distribution#definition + DataDogDistributions bool `toml:"datadog_distributions"` + // UDPPacketSize is deprecated, it's only here for legacy support // we now always create 1 max size buffer and then copy only what we need // into the in channel // see https://github.com/influxdata/telegraf/pull/992 - UDPPacketSize int `toml:"udp_packet_size"` + UDPPacketSize int `toml:"udp_packet_size" deprecated:"0.12.1;2.0.0;option is ignored"` ReadBufferSize int `toml:"read_buffer_size"` + SanitizeNamesMethod string `toml:"sanitize_name_method"` + sync.Mutex // Lock for preventing a data race during resource cleanup cleanup sync.Mutex @@ -87,8 +117,6 @@ type Statsd struct { accept chan bool // drops tracks the number of dropped metrics. drops int - // malformed tracks the number of malformed packets - malformed int // Channel for all incoming statsd packets in chan input @@ -97,10 +125,12 @@ type Statsd struct { // Cache gauges, counters & sets so they can be aggregated as they arrive // gauges and counters map measurement/tags hash -> field name -> metrics // sets and timings map measurement/tags hash -> metrics - gauges map[string]cachedgauge - counters map[string]cachedcounter - sets map[string]cachedset - timings map[string]cachedtimings + // distributions aggregate measurement/tags and are published directly + gauges map[string]cachedgauge + counters map[string]cachedcounter + sets map[string]cachedset + timings map[string]cachedtimings + distributions []cacheddistributions // bucket -> influx templates Templates []string @@ -114,10 +144,13 @@ type Statsd struct { MaxTCPConnections int `toml:"max_tcp_connections"` - TCPKeepAlive bool `toml:"tcp_keep_alive"` - TCPKeepAlivePeriod *internal.Duration `toml:"tcp_keep_alive_period"` + TCPKeepAlive bool `toml:"tcp_keep_alive"` + TCPKeepAlivePeriod *config.Duration `toml:"tcp_keep_alive_period"` - graphiteParser *graphite.GraphiteParser + // Max duration for each metric to stay cached without being updated. + MaxTTL config.Duration `toml:"max_ttl"` + + graphiteParser *graphite.Parser acc telegraf.Accumulator @@ -131,7 +164,7 @@ type Statsd struct { UDPBytesRecv selfstat.Stat ParseTimeNS selfstat.Stat - Log telegraf.Logger + Log telegraf.Logger `toml:"-"` // A pool of byte slices to handle parsing bufPool sync.Pool @@ -159,93 +192,40 @@ type metric struct { } type cachedset struct { - name string - fields map[string]map[string]bool - tags map[string]string + name string + fields map[string]map[string]bool + tags map[string]string + expiresAt time.Time } type cachedgauge struct { - name string - fields map[string]interface{} - tags map[string]string + name string + fields map[string]interface{} + tags map[string]string + expiresAt time.Time } type cachedcounter struct { - name string - fields map[string]interface{} - tags map[string]string + name string + fields map[string]interface{} + tags map[string]string + expiresAt time.Time } type cachedtimings struct { - name string - fields map[string]RunningStats - tags map[string]string + name string + fields map[string]RunningStats + tags map[string]string + expiresAt time.Time } -func (_ *Statsd) Description() string { - return "Statsd UDP/TCP Server" +type cacheddistributions struct { + name string + value float64 + tags map[string]string } -const sampleConfig = ` - ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp) - protocol = "udp" - - ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) - max_tcp_connections = 250 - - ## Enable TCP keep alive probes (default=false) - tcp_keep_alive = false - - ## Specifies the keep-alive period for an active network connection. - ## Only applies to TCP sockets and will be ignored if tcp_keep_alive is false. - ## Defaults to the OS configuration. - # tcp_keep_alive_period = "2h" - - ## Address and port to host UDP listener on - service_address = ":8125" - - ## The following configuration options control when telegraf clears it's cache - ## of previous values. If set to false, then telegraf will only clear it's - ## cache when the daemon is restarted. - ## Reset gauges every interval (default=true) - delete_gauges = true - ## Reset counters every interval (default=true) - delete_counters = true - ## Reset sets every interval (default=true) - delete_sets = true - ## Reset timings & histograms every interval (default=true) - delete_timings = true - - ## Percentiles to calculate for timing & histogram stats - percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] - - ## separator to use between elements of a statsd metric - metric_separator = "_" - - ## Parses tags in the datadog statsd format - ## http://docs.datadoghq.com/guides/dogstatsd/ - parse_data_dog_tags = false - - ## Parses datadog extensions to the statsd format - datadog_extensions = false - - ## Statsd data translation templates, more info can be read here: - ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md - # templates = [ - # "cpu.* measurement*" - # ] - - ## Number of UDP messages allowed to queue up, once filled, - ## the statsd server will start dropping packets - allowed_pending_messages = 10000 - - ## Number of timing/histogram values to track per-measurement in the - ## calculation of percentiles. Raising this limit increases the accuracy - ## of percentiles but also increases the memory usage and cpu time. - percentile_limit = 1000 -` - -func (_ *Statsd) SampleConfig() string { +func (*Statsd) SampleConfig() string { return sampleConfig } @@ -254,6 +234,14 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error { defer s.Unlock() now := time.Now() + for _, m := range s.distributions { + fields := map[string]interface{}{ + defaultFieldName: m.value, + } + acc.AddFields(m.name, fields, m.tags, now) + } + s.distributions = make([]cacheddistributions, 0) + for _, m := range s.timings { // Defining a template to parse field names for timers allows us to split // out multiple fields per timer. In this case we prefix each stat with the @@ -271,8 +259,8 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error { fields[prefix+"lower"] = stats.Lower() fields[prefix+"count"] = stats.Count() for _, percentile := range s.Percentiles { - name := fmt.Sprintf("%s%v_percentile", prefix, percentile.Value) - fields[name] = stats.Percentile(percentile.Value) + name := fmt.Sprintf("%s%v_percentile", prefix, percentile) + fields[name] = stats.Percentile(float64(percentile)) } } @@ -306,13 +294,15 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error { if s.DeleteSets { s.sets = make(map[string]cachedset) } + + s.expireCachedMetrics() + return nil } func (s *Statsd) Start(ac telegraf.Accumulator) error { if s.ParseDataDogTags { s.DataDogExtensions = true - s.Log.Warn("'parse_data_dog_tags' config option is deprecated, please use 'datadog_extensions' instead") } s.acc = ac @@ -322,6 +312,7 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error { s.counters = make(map[string]cachedcounter) s.sets = make(map[string]cachedset) s.timings = make(map[string]cachedtimings) + s.distributions = make([]cacheddistributions, 0) s.Lock() defer s.Unlock() @@ -353,10 +344,6 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error { s.accept <- true } - if s.ConvertNames { - s.Log.Warn("'convert_names' config option is deprecated, please use 'metric_separator' instead") - } - if s.MetricSeparator == "" { s.MetricSeparator = defaultSeparator } @@ -378,7 +365,9 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error { s.wg.Add(1) go func() { defer s.wg.Done() - s.udpListen(conn) + if err := s.udpListen(conn); err != nil { + ac.AddError(err) + } }() } else { address, err := net.ResolveTCPAddr("tcp", s.ServiceAddress) @@ -396,7 +385,9 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error { s.wg.Add(1) go func() { defer s.wg.Done() - s.tcpListen(listener) + if err := s.tcpListen(listener); err != nil { + ac.AddError(err) + } }() } @@ -405,14 +396,16 @@ func (s *Statsd) Start(ac telegraf.Accumulator) error { s.wg.Add(1) go func() { defer s.wg.Done() - s.parser() + if err := s.parser(); err != nil { + ac.AddError(err) + } }() } s.Log.Infof("Started the statsd service on %q", s.ServiceAddress) return nil } -// tcpListen() starts listening for udp packets on the configured port. +// tcpListen() starts listening for TCP packets on the configured port. func (s *Statsd) tcpListen(listener *net.TCPListener) error { for { select { @@ -431,7 +424,7 @@ func (s *Statsd) tcpListen(listener *net.TCPListener) error { } if s.TCPKeepAlivePeriod != nil { - if err = conn.SetKeepAlivePeriod(s.TCPKeepAlivePeriod.Duration); err != nil { + if err = conn.SetKeepAlivePeriod(time.Duration(*s.TCPKeepAlivePeriod)); err != nil { return err } } @@ -453,13 +446,15 @@ func (s *Statsd) tcpListen(listener *net.TCPListener) error { } } -// udpListen starts listening for udp packets on the configured port. +// udpListen starts listening for UDP packets on the configured port. func (s *Statsd) udpListen(conn *net.UDPConn) error { if s.ReadBufferSize > 0 { - s.UDPlistener.SetReadBuffer(s.ReadBufferSize) + if err := s.UDPlistener.SetReadBuffer(s.ReadBufferSize); err != nil { + return err + } } - buf := make([]byte, UDP_MAX_PACKET_SIZE) + buf := make([]byte, UDPMaxPacketSize) for { select { case <-s.done: @@ -471,13 +466,18 @@ func (s *Statsd) udpListen(conn *net.UDPConn) error { s.Log.Errorf("Error reading: %s", err.Error()) continue } - return err + return nil } s.UDPPacketsRecv.Incr(1) s.UDPBytesRecv.Incr(int64(n)) - b := s.bufPool.Get().(*bytes.Buffer) + b, ok := s.bufPool.Get().(*bytes.Buffer) + if !ok { + return fmt.Errorf("bufPool is not a bytes buffer") + } b.Reset() - b.Write(buf[:n]) + if _, err := b.Write(buf[:n]); err != nil { + return err + } select { case s.in <- input{ Buffer: b, @@ -513,9 +513,17 @@ func (s *Statsd) parser() error { switch { case line == "": case s.DataDogExtensions && strings.HasPrefix(line, "_e"): - s.parseEventMessage(in.Time, line, in.Addr) + if err := s.parseEventMessage(in.Time, line, in.Addr); err != nil { + return err + } default: - s.parseStatsdLine(line) + if err := s.parseStatsdLine(line); err != nil { + if errors.Cause(err) == errParsing { + // parsing errors log when the error occurs + continue + } + return err + } } } elapsed := time.Since(start) @@ -527,9 +535,6 @@ func (s *Statsd) parser() error { // parseStatsdLine will parse the given statsd line, validating it as it goes. // If the line is valid, it will be cached for the next call to Gather() func (s *Statsd) parseStatsdLine(line string) error { - s.Lock() - defer s.Unlock() - lineTags := make(map[string]string) if s.DataDogExtensions { recombinedSegments := make([]string, 0) @@ -554,7 +559,7 @@ func (s *Statsd) parseStatsdLine(line string) error { bits := strings.Split(line, ":") if len(bits) < 2 { s.Log.Errorf("Splitting ':', unable to parse metric: %s", line) - return errors.New("error Parsing statsd line") + return errParsing } // Extract bucket name from individual metric bits @@ -570,7 +575,7 @@ func (s *Statsd) parseStatsdLine(line string) error { pipesplit := strings.Split(bit, "|") if len(pipesplit) < 2 { s.Log.Errorf("Splitting '|', unable to parse metric: %s", line) - return errors.New("error parsing statsd line") + return errParsing } else if len(pipesplit) > 2 { sr := pipesplit[2] @@ -590,28 +595,28 @@ func (s *Statsd) parseStatsdLine(line string) error { // Validate metric type switch pipesplit[1] { - case "g", "c", "s", "ms", "h": + case "g", "c", "s", "ms", "h", "d": m.mtype = pipesplit[1] default: s.Log.Errorf("Metric type %q unsupported", pipesplit[1]) - return errors.New("error parsing statsd line") + return errParsing } // Parse the value if strings.HasPrefix(pipesplit[0], "-") || strings.HasPrefix(pipesplit[0], "+") { if m.mtype != "g" && m.mtype != "c" { s.Log.Errorf("+- values are only supported for gauges & counters, unable to parse metric: %s", line) - return errors.New("error parsing statsd line") + return errParsing } m.additive = true } switch m.mtype { - case "g", "ms", "h": + case "g", "ms", "h", "d": v, err := strconv.ParseFloat(pipesplit[0], 64) if err != nil { s.Log.Errorf("Parsing value to float64, unable to parse metric: %s", line) - return errors.New("error parsing statsd line") + return errParsing } m.floatvalue = v case "c": @@ -621,7 +626,7 @@ func (s *Statsd) parseStatsdLine(line string) error { v2, err2 := strconv.ParseFloat(pipesplit[0], 64) if err2 != nil { s.Log.Errorf("Parsing value to int64, unable to parse metric: %s", line) - return errors.New("error parsing statsd line") + return errParsing } v = int64(v2) } @@ -647,6 +652,8 @@ func (s *Statsd) parseStatsdLine(line string) error { m.tags["metric_type"] = "timing" case "h": m.tags["metric_type"] = "histogram" + case "d": + m.tags["metric_type"] = "distribution" } if len(lineTags) > 0 { for k, v := range lineTags { @@ -673,8 +680,10 @@ func (s *Statsd) parseStatsdLine(line string) error { // config file. If there is a match, it will parse the name of the metric and // map of tags. // Return values are (, , ) -func (s *Statsd) parseName(bucket string) (string, string, map[string]string) { - tags := make(map[string]string) +func (s *Statsd) parseName(bucket string) (name string, field string, tags map[string]string) { + s.Lock() + defer s.Unlock() + tags = make(map[string]string) bucketparts := strings.Split(bucket, ",") // Parse out any tags in the bucket @@ -687,14 +696,25 @@ func (s *Statsd) parseName(bucket string) (string, string, map[string]string) { } } - var field string - name := bucketparts[0] + name = bucketparts[0] + switch s.SanitizeNamesMethod { + case "": + case "upstream": + whitespace := regexp.MustCompile(`\s+`) + name = whitespace.ReplaceAllString(name, "_") + name = strings.ReplaceAll(name, "/", "-") + allowedChars := regexp.MustCompile(`[^a-zA-Z_\-0-9\.;=]`) + name = allowedChars.ReplaceAllString(name, "") + default: + s.Log.Errorf("Unknown sanitizae name method: %s", s.SanitizeNamesMethod) + } p := s.graphiteParser var err error if p == nil || s.graphiteParser.Separator != s.MetricSeparator { - p, err = graphite.NewGraphiteParser(s.MetricSeparator, s.Templates, nil) + p = &graphite.Parser{Separator: s.MetricSeparator, Templates: s.Templates} + err = p.Init() s.graphiteParser = p } @@ -704,8 +724,8 @@ func (s *Statsd) parseName(bucket string) (string, string, map[string]string) { } if s.ConvertNames { - name = strings.Replace(name, ".", "_", -1) - name = strings.Replace(name, "-", "__", -1) + name = strings.ReplaceAll(name, ".", "_") + name = strings.ReplaceAll(name, "-", "__") } if field == "" { field = defaultFieldName @@ -715,16 +735,20 @@ func (s *Statsd) parseName(bucket string) (string, string, map[string]string) { } // Parse the key,value out of a string that looks like "key=value" -func parseKeyValue(keyvalue string) (string, string) { - var key, val string - - split := strings.Split(keyvalue, "=") +func parseKeyValue(keyValue string) (key string, val string) { + split := strings.Split(keyValue, "=") // Must be exactly 2 to get anything meaningful out of them if len(split) == 2 { key = split[0] val = split[1] } else if len(split) == 1 { val = split[0] + } else if len(split) > 2 { + // fix: https://github.com/influxdata/telegraf/issues/10113 + // fix: value has "=" parse error + // uri=/service/endpoint?sampleParam={paramValue} parse value key="uri", val="/service/endpoint?sampleParam\={paramValue}" + key = split[0] + val = strings.Join(split[1:], "=") } return key, val @@ -734,7 +758,19 @@ func parseKeyValue(keyvalue string) (string, string) { // aggregates and caches the current value(s). It does not deal with the // Delete* options, because those are dealt with in the Gather function. func (s *Statsd) aggregate(m metric) { + s.Lock() + defer s.Unlock() + switch m.mtype { + case "d": + if s.DataDogExtensions && s.DataDogDistributions { + cached := cacheddistributions{ + name: m.name, + value: m.floatvalue, + tags: m.tags, + } + s.distributions = append(s.distributions, cached) + } case "ms", "h": // Check if the measurement exists cached, ok := s.timings[m.hash] @@ -761,61 +797,67 @@ func (s *Statsd) aggregate(m metric) { field.AddValue(m.floatvalue) } cached.fields[m.field] = field + cached.expiresAt = time.Now().Add(time.Duration(s.MaxTTL)) s.timings[m.hash] = cached case "c": // check if the measurement exists - _, ok := s.counters[m.hash] + cached, ok := s.counters[m.hash] if !ok { - s.counters[m.hash] = cachedcounter{ + cached = cachedcounter{ name: m.name, fields: make(map[string]interface{}), tags: m.tags, } } // check if the field exists - _, ok = s.counters[m.hash].fields[m.field] + _, ok = cached.fields[m.field] if !ok { - s.counters[m.hash].fields[m.field] = int64(0) + cached.fields[m.field] = int64(0) } - s.counters[m.hash].fields[m.field] = - s.counters[m.hash].fields[m.field].(int64) + m.intvalue + cached.fields[m.field] = cached.fields[m.field].(int64) + m.intvalue + cached.expiresAt = time.Now().Add(time.Duration(s.MaxTTL)) + s.counters[m.hash] = cached case "g": // check if the measurement exists - _, ok := s.gauges[m.hash] + cached, ok := s.gauges[m.hash] if !ok { - s.gauges[m.hash] = cachedgauge{ + cached = cachedgauge{ name: m.name, fields: make(map[string]interface{}), tags: m.tags, } } // check if the field exists - _, ok = s.gauges[m.hash].fields[m.field] + _, ok = cached.fields[m.field] if !ok { - s.gauges[m.hash].fields[m.field] = float64(0) + cached.fields[m.field] = float64(0) } if m.additive { - s.gauges[m.hash].fields[m.field] = - s.gauges[m.hash].fields[m.field].(float64) + m.floatvalue + cached.fields[m.field] = cached.fields[m.field].(float64) + m.floatvalue } else { - s.gauges[m.hash].fields[m.field] = m.floatvalue + cached.fields[m.field] = m.floatvalue } + + cached.expiresAt = time.Now().Add(time.Duration(s.MaxTTL)) + s.gauges[m.hash] = cached case "s": // check if the measurement exists - _, ok := s.sets[m.hash] + cached, ok := s.sets[m.hash] if !ok { - s.sets[m.hash] = cachedset{ + cached = cachedset{ name: m.name, fields: make(map[string]map[string]bool), tags: m.tags, } } // check if the field exists - _, ok = s.sets[m.hash].fields[m.field] + _, ok = cached.fields[m.field] if !ok { - s.sets[m.hash].fields[m.field] = make(map[string]bool) + cached.fields[m.field] = make(map[string]bool) } - s.sets[m.hash].fields[m.field][m.strvalue] = true + cached.fields[m.field][m.strvalue] = true + cached.expiresAt = time.Now().Add(time.Duration(s.MaxTTL)) + s.sets[m.hash] = cached } } @@ -826,7 +868,11 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) { // connection cleanup function defer func() { s.wg.Done() + + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive conn.Close() + // Add one connection potential back to channel when this one closes s.accept <- true s.forget(id) @@ -857,7 +903,10 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) { b := s.bufPool.Get().(*bytes.Buffer) b.Reset() + // Writes to a bytes buffer always succeed, so do not check the errors here + //nolint:errcheck,revive b.Write(scanner.Bytes()) + //nolint:errcheck,revive b.WriteByte('\n') select { @@ -876,6 +925,8 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) { // refuser refuses a TCP connection func (s *Statsd) refuser(conn *net.TCPConn) { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive conn.Close() s.Log.Infof("Refused TCP Connection from %s", conn.RemoteAddr()) s.Log.Warn("Maximum TCP Connections reached, you may want to adjust max_tcp_connections") @@ -900,8 +951,12 @@ func (s *Statsd) Stop() { s.Log.Infof("Stopping the statsd service") close(s.done) if s.isUDP() { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive s.UDPlistener.Close() } else { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive s.TCPlistener.Close() // Close all open TCP connections // - get all conns from the s.conns map and put into slice @@ -914,6 +969,8 @@ func (s *Statsd) Stop() { } s.cleanup.Unlock() for _, conn := range conns { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive conn.Close() } } @@ -932,6 +989,39 @@ func (s *Statsd) isUDP() bool { return strings.HasPrefix(s.Protocol, "udp") } +func (s *Statsd) expireCachedMetrics() { + // If Max TTL wasn't configured, skip expiration. + if s.MaxTTL == 0 { + return + } + + now := time.Now() + + for key, cached := range s.gauges { + if now.After(cached.expiresAt) { + delete(s.gauges, key) + } + } + + for key, cached := range s.sets { + if now.After(cached.expiresAt) { + delete(s.sets, key) + } + } + + for key, cached := range s.timings { + if now.After(cached.expiresAt) { + delete(s.timings, key) + } + } + + for key, cached := range s.counters { + if now.After(cached.expiresAt) { + delete(s.counters, key) + } + } +} + func init() { inputs.Add("statsd", func() telegraf.Input { return &Statsd{ @@ -945,6 +1035,7 @@ func init() { DeleteGauges: true, DeleteSets: true, DeleteTimings: true, + SanitizeNamesMethod: "", } }) } diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index f76681134a094..22d6ee4e30901 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -2,15 +2,15 @@ package statsd import ( "fmt" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "net" "sync" "testing" "time" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/testutil" ) @@ -29,6 +29,7 @@ func NewTestStatsd() *Statsd { s.counters = make(map[string]cachedcounter) s.sets = make(map[string]cachedset) s.timings = make(map[string]cachedtimings) + s.distributions = make([]cacheddistributions, 0) s.MetricSeparator = "_" @@ -51,19 +52,19 @@ func TestConcurrentConns(t *testing.T) { time.Sleep(time.Millisecond * 250) _, err := net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) _, err = net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) // Connection over the limit: conn, err := net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) - net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) + _, err = net.Dial("tcp", "127.0.0.1:8125") + require.NoError(t, err) _, err = conn.Write([]byte(testMsg)) - assert.NoError(t, err) + require.NoError(t, err) time.Sleep(time.Millisecond * 100) - assert.Zero(t, acc.NFields()) + require.Zero(t, acc.NFields()) } // Test that MaxTCPConnections is respected when max==1 @@ -82,17 +83,17 @@ func TestConcurrentConns1(t *testing.T) { time.Sleep(time.Millisecond * 250) _, err := net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) // Connection over the limit: conn, err := net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) - net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) + _, err = net.Dial("tcp", "127.0.0.1:8125") + require.NoError(t, err) _, err = conn.Write([]byte(testMsg)) - assert.NoError(t, err) + require.NoError(t, err) time.Sleep(time.Millisecond * 100) - assert.Zero(t, acc.NFields()) + require.Zero(t, acc.NFields()) } // Test that MaxTCPConnections is respected @@ -110,9 +111,9 @@ func TestCloseConcurrentConns(t *testing.T) { time.Sleep(time.Millisecond * 250) _, err := net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) _, err = net.Dial("tcp", "127.0.0.1:8125") - assert.NoError(t, err) + require.NoError(t, err) listener.Stop() } @@ -129,16 +130,11 @@ func BenchmarkUDP(b *testing.B) { // send multiple messages to socket for n := 0; n < b.N; n++ { - err := listener.Start(acc) - if err != nil { - panic(err) - } + require.NoError(b, listener.Start(acc)) time.Sleep(time.Millisecond * 250) conn, err := net.Dial("udp", "127.0.0.1:8125") - if err != nil { - panic(err) - } + require.NoError(b, err) var wg sync.WaitGroup for i := 1; i <= producerThreads; i++ { @@ -149,7 +145,6 @@ func BenchmarkUDP(b *testing.B) { // wait for 250,000 metrics to get added to accumulator for len(listener.in) > 0 { - fmt.Printf("Left in buffer: %v \n", len(listener.in)) time.Sleep(time.Millisecond) } listener.Stop() @@ -159,7 +154,8 @@ func BenchmarkUDP(b *testing.B) { func sendRequests(conn net.Conn, wg *sync.WaitGroup) { defer wg.Done() for i := 0; i < 25000; i++ { - fmt.Fprintf(conn, testMsg) + //nolint:errcheck,revive + fmt.Fprint(conn, testMsg) } } @@ -176,16 +172,12 @@ func BenchmarkTCP(b *testing.B) { // send multiple messages to socket for n := 0; n < b.N; n++ { - err := listener.Start(acc) - if err != nil { - panic(err) - } + require.NoError(b, listener.Start(acc)) time.Sleep(time.Millisecond * 250) conn, err := net.Dial("tcp", "127.0.0.1:8125") - if err != nil { - panic(err) - } + require.NoError(b, err) + var wg sync.WaitGroup for i := 1; i <= producerThreads; i++ { wg.Add(1) @@ -212,10 +204,7 @@ func TestParse_ValidLines(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoError(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } } @@ -243,10 +232,7 @@ func TestParse_Gauges(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } validations := []struct { @@ -288,10 +274,7 @@ func TestParse_Gauges(t *testing.T) { } for _, test := range validations { - err := testValidateGauge(test.name, test.value, s.gauges) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge(test.name, test.value, s.gauges)) } } @@ -321,10 +304,7 @@ func TestParse_Sets(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } validations := []struct { @@ -350,10 +330,7 @@ func TestParse_Sets(t *testing.T) { } for _, test := range validations { - err := testValidateSet(test.name, test.value, s.sets) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet(test.name, test.value, s.sets)) } } @@ -378,10 +355,7 @@ func TestParse_Counters(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } validations := []struct { @@ -415,20 +389,17 @@ func TestParse_Counters(t *testing.T) { } for _, test := range validations { - err := testValidateCounter(test.name, test.value, s.counters) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter(test.name, test.value, s.counters)) } } // Tests low-level functionality of timings func TestParse_Timings(t *testing.T) { s := NewTestStatsd() - s.Percentiles = []internal.Number{{Value: 90.0}} + s.Percentiles = []Number{90.0} acc := &testutil.Accumulator{} - // Test that counters work + // Test that timings work validLines := []string{ "test.timing:1|ms", "test.timing:11|ms", @@ -438,13 +409,10 @@ func TestParse_Timings(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } - s.Gather(acc) + require.NoError(t, s.Gather(acc)) valid := map[string]interface{}{ "90_percentile": float64(11), @@ -459,6 +427,60 @@ func TestParse_Timings(t *testing.T) { acc.AssertContainsFields(t, "test_timing", valid) } +// Tests low-level functionality of distributions +func TestParse_Distributions(t *testing.T) { + s := NewTestStatsd() + acc := &testutil.Accumulator{} + + parseMetrics := func() { + // Test that distributions work + validLines := []string{ + "test.distribution:1|d", + "test.distribution2:2|d", + "test.distribution3:3|d", + "test.distribution4:1|d", + "test.distribution5:1|d", + } + + for _, line := range validLines { + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) + } + + require.NoError(t, s.Gather(acc)) + } + + validMeasurementMap := map[string]float64{ + "test_distribution": 1, + "test_distribution2": 2, + "test_distribution3": 3, + "test_distribution4": 1, + "test_distribution5": 1, + } + + // Test parsing when DataDogExtensions and DataDogDistributions aren't enabled + parseMetrics() + for key := range validMeasurementMap { + acc.AssertDoesNotContainMeasurement(t, key) + } + + // Test parsing when DataDogDistributions is enabled but not DataDogExtensions + s.DataDogDistributions = true + parseMetrics() + for key := range validMeasurementMap { + acc.AssertDoesNotContainMeasurement(t, key) + } + + // Test parsing when DataDogExtensions and DataDogDistributions are enabled + s.DataDogExtensions = true + parseMetrics() + for key, value := range validMeasurementMap { + field := map[string]interface{}{ + "value": value, + } + acc.AssertContainsFields(t, key, field) + } +} + func TestParseScientificNotation(t *testing.T) { s := NewTestStatsd() sciNotationLines := []string{ @@ -468,10 +490,7 @@ func TestParseScientificNotation(t *testing.T) { "scientific.notation:4.6968460083008E-5|h", } for _, line := range sciNotationLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line [%s] should not have resulted in error: %s\n", line, err) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line [%s] should not have resulted in error", line) } } @@ -490,10 +509,7 @@ func TestParse_InvalidLines(t *testing.T) { "invalid.value:1d1|c", } for _, line := range invalidLines { - err := s.parseStatsdLine(line) - if err == nil { - t.Errorf("Parsing line %s should have resulted in an error\n", line) - } + require.Errorf(t, s.parseStatsdLine(line), "Parsing line %s should have resulted in an error", line) } } @@ -508,10 +524,7 @@ func TestParse_InvalidSampleRate(t *testing.T) { } for _, line := range invalidLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } counterValidations := []struct { @@ -532,21 +545,12 @@ func TestParse_InvalidSampleRate(t *testing.T) { } for _, test := range counterValidations { - err := testValidateCounter(test.name, test.value, test.cache) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter(test.name, test.value, test.cache)) } - err := testValidateGauge("invalid_sample_rate", 45, s.gauges) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge("invalid_sample_rate", 45, s.gauges)) - err = testValidateSet("invalid_sample_rate", 1, s.sets) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet("invalid_sample_rate", 1, s.sets)) } // Names should be parsed like . -> _ @@ -558,10 +562,7 @@ func TestParse_DefaultNameParsing(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } validations := []struct { @@ -579,10 +580,7 @@ func TestParse_DefaultNameParsing(t *testing.T) { } for _, test := range validations { - err := testValidateCounter(test.name, test.value, s.counters) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter(test.name, test.value, s.counters)) } } @@ -599,10 +597,7 @@ func TestParse_Template(t *testing.T) { } for _, line := range lines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } validations := []struct { @@ -621,10 +616,7 @@ func TestParse_Template(t *testing.T) { // Validate counters for _, test := range validations { - err := testValidateCounter(test.name, test.value, s.counters) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter(test.name, test.value, s.counters)) } } @@ -641,10 +633,7 @@ func TestParse_TemplateFilter(t *testing.T) { } for _, line := range lines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } validations := []struct { @@ -663,10 +652,7 @@ func TestParse_TemplateFilter(t *testing.T) { // Validate counters for _, test := range validations { - err := testValidateCounter(test.name, test.value, s.counters) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter(test.name, test.value, s.counters)) } } @@ -683,10 +669,7 @@ func TestParse_TemplateSpecificity(t *testing.T) { } for _, line := range lines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } validations := []struct { @@ -701,10 +684,7 @@ func TestParse_TemplateSpecificity(t *testing.T) { // Validate counters for _, test := range validations { - err := testValidateCounter(test.name, test.value, s.counters) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter(test.name, test.value, s.counters)) } } @@ -731,10 +711,7 @@ func TestParse_TemplateFields(t *testing.T) { } for _, line := range lines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } counterTests := []struct { @@ -760,10 +737,7 @@ func TestParse_TemplateFields(t *testing.T) { } // Validate counters for _, test := range counterTests { - err := testValidateCounter(test.name, test.value, s.counters, test.field) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter(test.name, test.value, s.counters, test.field)) } gaugeTests := []struct { @@ -784,10 +758,7 @@ func TestParse_TemplateFields(t *testing.T) { } // Validate gauges for _, test := range gaugeTests { - err := testValidateGauge(test.name, test.value, s.gauges, test.field) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge(test.name, test.value, s.gauges, test.field)) } setTests := []struct { @@ -808,10 +779,7 @@ func TestParse_TemplateFields(t *testing.T) { } // Validate sets for _, test := range setTests { - err := testValidateSet(test.name, test.value, s.sets, test.field) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet(test.name, test.value, s.sets, test.field)) } } @@ -859,18 +827,12 @@ func TestParse_Tags(t *testing.T) { for _, test := range tests { name, _, tags := s.parseName(test.bucket) - if name != test.name { - t.Errorf("Expected: %s, got %s", test.name, name) - } + require.Equalf(t, name, test.name, "Expected: %s, got %s", test.name, name) for k, v := range test.tags { actual, ok := tags[k] - if !ok { - t.Errorf("Expected key: %s not found", k) - } - if actual != v { - t.Errorf("Expected %s, got %s", v, actual) - } + require.Truef(t, ok, "Expected key: %s not found", k) + require.Equalf(t, actual, v, "Expected %s, got %s", v, actual) } } } @@ -985,10 +947,8 @@ func TestParse_DataDogTags(t *testing.T) { s := NewTestStatsd() s.DataDogExtensions = true - err := s.parseStatsdLine(tt.line) - require.NoError(t, err) - err = s.Gather(&acc) - require.NoError(t, err) + require.NoError(t, s.parseStatsdLine(tt.line)) + require.NoError(t, s.Gather(&acc)) testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.SortMetrics(), testutil.IgnoreTime()) @@ -1020,9 +980,7 @@ func TestParseName(t *testing.T) { for _, test := range tests { name, _, _ := s.parseName(test.inName) - if name != test.outName { - t.Errorf("Expected: %s, got %s", test.outName, name) - } + require.Equalf(t, name, test.outName, "Expected: %s, got %s", test.outName, name) } // Test with separator == "." @@ -1048,9 +1006,7 @@ func TestParseName(t *testing.T) { for _, test := range tests { name, _, _ := s.parseName(test.inName) - if name != test.outName { - t.Errorf("Expected: %s, got %s", test.outName, name) - } + require.Equalf(t, name, test.outName, "Expected: %s, got %s", test.outName, name) } } @@ -1066,15 +1022,72 @@ func TestParse_MeasurementsWithSameName(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } - if len(s.counters) != 2 { - t.Errorf("Expected 2 separate measurements, found %d", len(s.counters)) - } + require.Lenf(t, s.counters, 2, "Expected 2 separate measurements, found %d", len(s.counters)) +} + +// Test that the metric caches expire (clear) an entry after the entry hasn't been updated for the configurable MaxTTL duration. +func TestCachesExpireAfterMaxTTL(t *testing.T) { + s := NewTestStatsd() + s.MaxTTL = config.Duration(100 * time.Microsecond) + + acc := &testutil.Accumulator{} + require.NoError(t, s.parseStatsdLine("valid:45|c")) + require.NoError(t, s.parseStatsdLine("valid:45|c")) + require.NoError(t, s.Gather(acc)) + + // Max TTL goes by, our 'valid' entry is cleared. + time.Sleep(100 * time.Microsecond) + require.NoError(t, s.Gather(acc)) + + // Now when we gather, we should have a counter that is reset to zero. + require.NoError(t, s.parseStatsdLine("valid:45|c")) + require.NoError(t, s.Gather(acc)) + + // Wait for the metrics to arrive + acc.Wait(3) + + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + testutil.MustMetric( + "valid", + map[string]string{ + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 90, + }, + time.Now(), + telegraf.Counter, + ), + testutil.MustMetric( + "valid", + map[string]string{ + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 90, + }, + time.Now(), + telegraf.Counter, + ), + testutil.MustMetric( + "valid", + map[string]string{ + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 45, + }, + time.Now(), + telegraf.Counter, + ), + }, + acc.GetTelegrafMetrics(), + testutil.IgnoreTime(), + ) } // Test that measurements with multiple bits, are treated as different outputs @@ -1119,92 +1132,52 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) { sMultiple := NewTestStatsd() for _, line := range singleLines { - err := sSingle.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, sSingle.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } for _, line := range multipleLines { - err := sMultiple.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, sMultiple.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } - if len(sSingle.timings) != 3 { - t.Errorf("Expected 3 measurement, found %d", len(sSingle.timings)) - } + require.Lenf(t, sSingle.timings, 3, "Expected 3 measurement, found %d", len(sSingle.timings)) - if cachedtiming, ok := sSingle.timings["metric_type=timingvalid_multiple"]; !ok { - t.Errorf("Expected cached measurement with hash 'metric_type=timingvalid_multiple' not found") - } else { - if cachedtiming.name != "valid_multiple" { - t.Errorf("Expected the name to be 'valid_multiple', got %s", cachedtiming.name) - } + cachedtiming, ok := sSingle.timings["metric_type=timingvalid_multiple"] + require.Truef(t, ok, "Expected cached measurement with hash 'metric_type=timingvalid_multiple' not found") + require.Equalf(t, cachedtiming.name, "valid_multiple", "Expected the name to be 'valid_multiple', got %s", cachedtiming.name) - // A 0 at samplerate 0.1 will add 10 values of 0, - // A 0 with invalid samplerate will add a single 0, - // plus the last bit of value 1 - // which adds up to 12 individual datapoints to be cached - if cachedtiming.fields[defaultFieldName].n != 12 { - t.Errorf("Expected 12 additions, got %d", cachedtiming.fields[defaultFieldName].n) - } + // A 0 at samplerate 0.1 will add 10 values of 0, + // A 0 with invalid samplerate will add a single 0, + // plus the last bit of value 1 + // which adds up to 12 individual datapoints to be cached + require.EqualValuesf(t, cachedtiming.fields[defaultFieldName].n, 12, "Expected 12 additions, got %d", cachedtiming.fields[defaultFieldName].n) - if cachedtiming.fields[defaultFieldName].upper != 1 { - t.Errorf("Expected max input to be 1, got %f", cachedtiming.fields[defaultFieldName].upper) - } - } + require.EqualValuesf(t, cachedtiming.fields[defaultFieldName].upper, 1, "Expected max input to be 1, got %f", cachedtiming.fields[defaultFieldName].upper) // test if sSingle and sMultiple did compute the same stats for valid.multiple.duplicate - if err := testValidateSet("valid_multiple_duplicate", 2, sSingle.sets); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet("valid_multiple_duplicate", 2, sSingle.sets)) - if err := testValidateSet("valid_multiple_duplicate", 2, sMultiple.sets); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet("valid_multiple_duplicate", 2, sMultiple.sets)) - if err := testValidateCounter("valid_multiple_duplicate", 5, sSingle.counters); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter("valid_multiple_duplicate", 5, sSingle.counters)) - if err := testValidateCounter("valid_multiple_duplicate", 5, sMultiple.counters); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter("valid_multiple_duplicate", 5, sMultiple.counters)) - if err := testValidateGauge("valid_multiple_duplicate", 1, sSingle.gauges); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge("valid_multiple_duplicate", 1, sSingle.gauges)) - if err := testValidateGauge("valid_multiple_duplicate", 1, sMultiple.gauges); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge("valid_multiple_duplicate", 1, sMultiple.gauges)) // test if sSingle and sMultiple did compute the same stats for valid.multiple.mixed - if err := testValidateSet("valid_multiple_mixed", 1, sSingle.sets); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet("valid_multiple_mixed", 1, sSingle.sets)) - if err := testValidateSet("valid_multiple_mixed", 1, sMultiple.sets); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet("valid_multiple_mixed", 1, sMultiple.sets)) - if err := testValidateCounter("valid_multiple_mixed", 1, sSingle.counters); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter("valid_multiple_mixed", 1, sSingle.counters)) - if err := testValidateCounter("valid_multiple_mixed", 1, sMultiple.counters); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter("valid_multiple_mixed", 1, sMultiple.counters)) - if err := testValidateGauge("valid_multiple_mixed", 1, sSingle.gauges); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge("valid_multiple_mixed", 1, sSingle.gauges)) - if err := testValidateGauge("valid_multiple_mixed", 1, sMultiple.gauges); err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge("valid_multiple_mixed", 1, sMultiple.gauges)) } // Tests low-level functionality of timings when multiple fields is enabled @@ -1212,7 +1185,7 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) { func TestParse_TimingsMultipleFieldsWithTemplate(t *testing.T) { s := NewTestStatsd() s.Templates = []string{"measurement.field"} - s.Percentiles = []internal.Number{{Value: 90.0}} + s.Percentiles = []Number{90.0} acc := &testutil.Accumulator{} validLines := []string{ @@ -1229,12 +1202,9 @@ func TestParse_TimingsMultipleFieldsWithTemplate(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } - s.Gather(acc) + require.NoError(t, s.Gather(acc)) valid := map[string]interface{}{ "success_90_percentile": float64(11), @@ -1263,7 +1233,7 @@ func TestParse_TimingsMultipleFieldsWithTemplate(t *testing.T) { func TestParse_TimingsMultipleFieldsWithoutTemplate(t *testing.T) { s := NewTestStatsd() s.Templates = []string{} - s.Percentiles = []internal.Number{{Value: 90.0}} + s.Percentiles = []Number{90.0} acc := &testutil.Accumulator{} validLines := []string{ @@ -1280,12 +1250,9 @@ func TestParse_TimingsMultipleFieldsWithoutTemplate(t *testing.T) { } for _, line := range validLines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoErrorf(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) } - s.Gather(acc) + require.NoError(t, s.Gather(acc)) expectedSuccess := map[string]interface{}{ "90_percentile": float64(11), @@ -1444,23 +1411,15 @@ func TestParse_Timings_Delete(t *testing.T) { s := NewTestStatsd() s.DeleteTimings = true fakeacc := &testutil.Accumulator{} - var err error line := "timing:100|ms" - err = s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoError(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) - if len(s.timings) != 1 { - t.Errorf("Should be 1 timing, found %d", len(s.timings)) - } + require.Lenf(t, s.timings, 1, "Should be 1 timing, found %d", len(s.timings)) - s.Gather(fakeacc) + require.NoError(t, s.Gather(fakeacc)) - if len(s.timings) != 0 { - t.Errorf("All timings should have been deleted, found %d", len(s.timings)) - } + require.Lenf(t, s.timings, 0, "All timings should have been deleted, found %d", len(s.timings)) } // Tests the delete_gauges option @@ -1468,25 +1427,15 @@ func TestParse_Gauges_Delete(t *testing.T) { s := NewTestStatsd() s.DeleteGauges = true fakeacc := &testutil.Accumulator{} - var err error line := "current.users:100|g" - err = s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoError(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) - err = testValidateGauge("current_users", 100, s.gauges) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateGauge("current_users", 100, s.gauges)) - s.Gather(fakeacc) + require.NoError(t, s.Gather(fakeacc)) - err = testValidateGauge("current_users", 100, s.gauges) - if err == nil { - t.Error("current_users_gauge metric should have been deleted") - } + require.Error(t, testValidateGauge("current_users", 100, s.gauges), "current_users_gauge metric should have been deleted") } // Tests the delete_sets option @@ -1494,25 +1443,15 @@ func TestParse_Sets_Delete(t *testing.T) { s := NewTestStatsd() s.DeleteSets = true fakeacc := &testutil.Accumulator{} - var err error line := "unique.user.ids:100|s" - err = s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoError(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error", line) - err = testValidateSet("unique_user_ids", 1, s.sets) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateSet("unique_user_ids", 1, s.sets)) - s.Gather(fakeacc) + require.NoError(t, s.Gather(fakeacc)) - err = testValidateSet("unique_user_ids", 1, s.sets) - if err == nil { - t.Error("unique_user_ids_set metric should have been deleted") - } + require.Error(t, testValidateSet("unique_user_ids", 1, s.sets), "unique_user_ids_set metric should have been deleted") } // Tests the delete_counters option @@ -1520,43 +1459,25 @@ func TestParse_Counters_Delete(t *testing.T) { s := NewTestStatsd() s.DeleteCounters = true fakeacc := &testutil.Accumulator{} - var err error line := "total.users:100|c" - err = s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } + require.NoError(t, s.parseStatsdLine(line), "Parsing line %s should not have resulted in an error\n", line) - err = testValidateCounter("total_users", 100, s.counters) - if err != nil { - t.Error(err.Error()) - } + require.NoError(t, testValidateCounter("total_users", 100, s.counters)) - s.Gather(fakeacc) + require.NoError(t, s.Gather(fakeacc)) - err = testValidateCounter("total_users", 100, s.counters) - if err == nil { - t.Error("total_users_counter metric should have been deleted") - } + require.Error(t, testValidateCounter("total_users", 100, s.counters), "total_users_counter metric should have been deleted") } func TestParseKeyValue(t *testing.T) { k, v := parseKeyValue("foo=bar") - if k != "foo" { - t.Errorf("Expected %s, got %s", "foo", k) - } - if v != "bar" { - t.Errorf("Expected %s, got %s", "bar", v) - } + require.Equalf(t, k, "foo", "Expected %s, got %s", "foo", k) + require.Equalf(t, v, "bar", "Expected %s, got %s", "bar", v) k2, v2 := parseKeyValue("baz") - if k2 != "" { - t.Errorf("Expected %s, got %s", "", k2) - } - if v2 != "baz" { - t.Errorf("Expected %s, got %s", "baz", v2) - } + require.Equalf(t, k2, "", "Expected %s, got %s", "", k2) + require.Equalf(t, v2, "baz", "Expected %s, got %s", "baz", v2) } // Test utility functions @@ -1648,7 +1569,7 @@ func testValidateGauge( } if valueExpected != valueActual { - return fmt.Errorf("Measurement: %s, expected %f, actual %f", name, valueExpected, valueActual) + return fmt.Errorf("measurement: %s, expected %f, actual %f", name, valueExpected, valueActual) } return nil } @@ -1668,14 +1589,14 @@ func TestTCP(t *testing.T) { addr := statsd.TCPlistener.Addr().String() conn, err := net.Dial("tcp", addr) - _, err = conn.Write([]byte("cpu.time_idle:42|c\n")) require.NoError(t, err) - err = conn.Close() + + _, err = conn.Write([]byte("cpu.time_idle:42|c\n")) require.NoError(t, err) + require.NoError(t, conn.Close()) for { - err = statsd.Gather(&acc) - require.NoError(t, err) + require.NoError(t, statsd.Gather(&acc)) if len(acc.Metrics) > 0 { break @@ -1705,22 +1626,21 @@ func TestUdp(t *testing.T) { statsd := Statsd{ Log: testutil.Logger{}, Protocol: "udp", - ServiceAddress: "localhost:8125", + ServiceAddress: "localhost:14223", AllowedPendingMessages: 250000, } var acc testutil.Accumulator require.NoError(t, statsd.Start(&acc)) defer statsd.Stop() - conn, err := net.Dial("udp", "127.0.0.1:8125") - _, err = conn.Write([]byte("cpu.time_idle:42|c\n")) + conn, err := net.Dial("udp", "127.0.0.1:14223") require.NoError(t, err) - err = conn.Close() + _, err = conn.Write([]byte("cpu.time_idle:42|c\n")) require.NoError(t, err) + require.NoError(t, conn.Close()) for { - err = statsd.Gather(&acc) - require.NoError(t, err) + require.NoError(t, statsd.Gather(&acc)) if len(acc.Metrics) > 0 { break @@ -1745,3 +1665,103 @@ func TestUdp(t *testing.T) { testutil.IgnoreTime(), ) } + +func TestParse_Ints(t *testing.T) { + s := NewTestStatsd() + s.Percentiles = []Number{90} + acc := &testutil.Accumulator{} + + require.NoError(t, s.Gather(acc)) + require.Equal(t, s.Percentiles, []Number{90.0}) +} + +func TestParse_KeyValue(t *testing.T) { + type output struct { + key string + val string + } + + validLines := []struct { + input string + output output + }{ + {"", output{"", ""}}, + {"only value", output{"", "only value"}}, + {"key=value", output{"key", "value"}}, + {"url=/api/querystring?key1=val1&key2=value", output{"url", "/api/querystring?key1=val1&key2=value"}}, + } + + for _, line := range validLines { + key, val := parseKeyValue(line.input) + if key != line.output.key { + t.Errorf("line: %s, key expected %s, actual %s", line, line.output.key, key) + } + if val != line.output.val { + t.Errorf("line: %s, val expected %s, actual %s", line, line.output.val, val) + } + } +} + +func TestParseSanitize(t *testing.T) { + s := NewTestStatsd() + s.SanitizeNamesMethod = "upstream" + + tests := []struct { + inName string + outName string + }{ + { + "regex.ARP flood stats", + "regex_ARP_flood_stats", + }, + { + "regex./dev/null", + "regex_-dev-null", + }, + { + "regex.wow!!!", + "regex_wow", + }, + { + "regex.all*things", + "regex_allthings", + }, + } + + for _, test := range tests { + name, _, _ := s.parseName(test.inName) + require.Equalf(t, name, test.outName, "Expected: %s, got %s", test.outName, name) + } +} + +func TestParseNoSanitize(t *testing.T) { + s := NewTestStatsd() + s.SanitizeNamesMethod = "" + + tests := []struct { + inName string + outName string + }{ + { + "regex.ARP flood stats", + "regex_ARP", + }, + { + "regex./dev/null", + "regex_/dev/null", + }, + { + "regex.wow!!!", + "regex_wow!!!", + }, + { + "regex.all*things", + "regex_all*things", + }, + } + + for _, test := range tests { + name, _, _ := s.parseName(test.inName) + require.Equalf(t, name, test.outName, "Expected: %s, got %s", test.outName, name) + } +} diff --git a/plugins/inputs/suricata/README.md b/plugins/inputs/suricata/README.md index 18b26298e7af4..189ff1e8a5dd2 100644 --- a/plugins/inputs/suricata/README.md +++ b/plugins/inputs/suricata/README.md @@ -4,10 +4,12 @@ This plugin reports internal performance counters of the Suricata IDS/IPS engine, such as captured traffic volume, memory usage, uptime, flow counters, and much more. It provides a socket for the Suricata log output to write JSON stats output to, and processes the incoming data to fit Telegraf's format. +It can also report for triggered Suricata IDS/IPS alerts. -### Configuration +## Configuration -```toml +```toml @sample.conf +# Suricata stats and alerts plugin [[inputs.suricata]] ## Data sink for Suricata stats log. # This is expected to be a filename of a @@ -17,16 +19,20 @@ stats output to, and processes the incoming data to fit Telegraf's format. # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" # becomes "detect_alert" when delimiter is "_". delimiter = "_" + + # Detect alert logs + alerts = false ``` -### Metrics +## Metrics Fields in the 'suricata' measurement follow the JSON format used by Suricata's stats output. -See http://suricata.readthedocs.io/en/latest/performance/statistics.html for +See for more information. -All fields are numeric. +All fields for Suricata stats are numeric. + - suricata - tags: - thread: `Global` for global statistics (if enabled), thread IDs (e.g. `W#03-enp0s31f6`) for thread-specific statistics @@ -94,8 +100,24 @@ All fields are numeric. - tcp_synack - ... +Some fields of the Suricata alerts are strings, for example the signatures. See +the Suricata [event docs][1] for more information. + +- suricata_alert + - fields: + - action + - gid + - severity + - signature + - source_ip + - source_port + - target_port + - target_port + - ... + +[1]: https://suricata.readthedocs.io/en/suricata-6.0.0/output/eve/eve-json-format.html?highlight=priority#event-type-alert -#### Suricata configuration +### Suricata configuration Suricata needs to deliver the 'stats' event type to a given unix socket for this plugin to pick up. This can be done, for example, by creating an additional @@ -111,20 +133,19 @@ output in the Suricata configuration file: threads: yes ``` -#### FreeBSD tuning +### FreeBSD tuning - -Under FreeBSD it is necessary to increase the localhost buffer space to at least 16384, default is 8192 -otherwise messages from Suricata are truncated as they exceed the default available buffer space, -consequently no statistics are processed by the plugin. +Under FreeBSD it is necessary to increase the localhost buffer space to at least +16384, default is 8192 otherwise messages from Suricata are truncated as they +exceed the default available buffer space, consequently no statistics are +processed by the plugin. ```text sysctl -w net.local.stream.recvspace=16384 sysctl -w net.local.stream.sendspace=16384 ``` - -### Example Output +## Example Output ```text suricata,host=myhost,thread=FM#01 flow_mgr_rows_empty=0,flow_mgr_rows_checked=65536,flow_mgr_closed_pruned=0,flow_emerg_mode_over=0,flow_mgr_flows_timeout_inuse=0,flow_mgr_rows_skipped=65535,flow_mgr_bypassed_pruned=0,flow_mgr_flows_removed=0,flow_mgr_est_pruned=0,flow_mgr_flows_notimeout=1,flow_mgr_flows_checked=1,flow_mgr_rows_busy=0,flow_spare=10000,flow_mgr_rows_maxlen=1,flow_mgr_new_pruned=0,flow_emerg_mode_entered=0,flow_tcp_reuse=0,flow_mgr_flows_timeout=0 1568368562545197545 diff --git a/plugins/inputs/suricata/sample.conf b/plugins/inputs/suricata/sample.conf new file mode 100644 index 0000000000000..6abf7e28aa65f --- /dev/null +++ b/plugins/inputs/suricata/sample.conf @@ -0,0 +1,13 @@ +# Suricata stats and alerts plugin +[[inputs.suricata]] + ## Data sink for Suricata stats log. + # This is expected to be a filename of a + # unix socket to be created for listening. + source = "/var/run/suricata-stats.sock" + + # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" + # becomes "detect_alert" when delimiter is "_". + delimiter = "_" + + # Detect alert logs + alerts = false diff --git a/plugins/inputs/suricata/suricata.go b/plugins/inputs/suricata/suricata.go index 17c0b571510b0..bbe246fe38ccf 100644 --- a/plugins/inputs/suricata/suricata.go +++ b/plugins/inputs/suricata/suricata.go @@ -1,8 +1,10 @@ +//go:generate ../../../tools/readme_config_includer/generator package suricata import ( "bufio" "context" + _ "embed" "encoding/json" "fmt" "io" @@ -14,6 +16,10 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + const ( // InBufSize is the input buffer size for JSON received via socket. // Set to 10MB, as depending on the number of threads the output might be @@ -25,6 +31,7 @@ const ( type Suricata struct { Source string `toml:"source"` Delimiter string `toml:"delimiter"` + Alerts bool `toml:"alerts"` inputListener *net.UnixListener cancel context.CancelFunc @@ -34,25 +41,7 @@ type Suricata struct { wg sync.WaitGroup } -// Description returns the plugin description. -func (s *Suricata) Description() string { - return "Suricata stats plugin" -} - -const sampleConfig = ` - ## Data sink for Suricata stats log - # This is expected to be a filename of a - # unix socket to be created for listening. - source = "/var/run/suricata-stats.sock" - - # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" - # becomes "detect_alert" when delimiter is "_". - delimiter = "_" -` - -// SampleConfig returns a sample TOML section to illustrate configuration -// options. -func (s *Suricata) SampleConfig() string { +func (*Suricata) SampleConfig() string { return sampleConfig } @@ -81,6 +70,8 @@ func (s *Suricata) Start(acc telegraf.Accumulator) error { // Stop causes the plugin to cease collecting JSON data from the socket provided // to Suricata. func (s *Suricata) Stop() { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive s.inputListener.Close() if s.cancel != nil { s.cancel() @@ -98,8 +89,12 @@ func (s *Suricata) readInput(ctx context.Context, acc telegraf.Accumulator, conn line, rerr := reader.ReadBytes('\n') if rerr != nil { return rerr - } else if len(line) > 0 { - s.parse(acc, line) + } + if len(line) > 0 { + err := s.parse(acc, line) + if err != nil { + acc.AddError(err) + } } } } @@ -146,29 +141,45 @@ func flexFlatten(outmap map[string]interface{}, field string, v interface{}, del return err } } + case []interface{}: + for _, v := range t { + err := flexFlatten(outmap, field, v, delimiter) + if err != nil { + return err + } + } + case string: + outmap[field] = v case float64: - outmap[field] = v.(float64) + outmap[field] = t default: - return fmt.Errorf("Unsupported type %T encountered", t) + return fmt.Errorf("unsupported type %T encountered", t) } return nil } -func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) { - // initial parsing - var result map[string]interface{} - err := json.Unmarshal([]byte(sjson), &result) - if err != nil { - acc.AddError(err) +func (s *Suricata) parseAlert(acc telegraf.Accumulator, result map[string]interface{}) { + if _, ok := result["alert"].(map[string]interface{}); !ok { + s.Log.Debug("'alert' sub-object does not have required structure") return } - // check for presence of relevant stats - if _, ok := result["stats"]; !ok { - s.Log.Debug("Input does not contain necessary 'stats' sub-object") - return + totalmap := make(map[string]interface{}) + for k, v := range result["alert"].(map[string]interface{}) { + //source and target fields are maps + err := flexFlatten(totalmap, k, v, s.Delimiter) + if err != nil { + s.Log.Debugf("Flattening alert failed: %v", err) + // we skip this subitem as something did not parse correctly + continue + } } + //threads field do not exist in alert output, always global + acc.AddFields("suricata_alert", totalmap, nil) +} + +func (s *Suricata) parseStats(acc telegraf.Accumulator, result map[string]interface{}) { if _, ok := result["stats"].(map[string]interface{}); !ok { s.Log.Debug("The 'stats' sub-object does not have required structure") return @@ -182,9 +193,9 @@ func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) { for k, t := range v { outmap := make(map[string]interface{}) if threadStruct, ok := t.(map[string]interface{}); ok { - err = flexFlatten(outmap, "", threadStruct, s.Delimiter) + err := flexFlatten(outmap, "", threadStruct, s.Delimiter) if err != nil { - s.Log.Debug(err) + s.Log.Debugf("Flattening alert failed: %v", err) // we skip this thread as something did not parse correctly continue } @@ -195,10 +206,11 @@ func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) { s.Log.Debug("The 'threads' sub-object does not have required structure") } } else { - err = flexFlatten(totalmap, k, v, s.Delimiter) + err := flexFlatten(totalmap, k, v, s.Delimiter) if err != nil { - s.Log.Debug(err.Error()) + s.Log.Debugf("Flattening alert failed: %v", err) // we skip this subitem as something did not parse correctly + continue } } } @@ -213,9 +225,31 @@ func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) { } } +func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) error { + // initial parsing + var result map[string]interface{} + err := json.Unmarshal(sjson, &result) + if err != nil { + return err + } + // check for presence of relevant stats or alert + _, ok := result["stats"] + _, ok2 := result["alert"] + if !ok && !ok2 { + s.Log.Debugf("Invalid input without 'stats' or 'alert' object: %v", result) + return fmt.Errorf("input does not contain 'stats' or 'alert' object") + } + if ok { + s.parseStats(acc, result) + } else if ok2 && s.Alerts { + s.parseAlert(acc, result) + } + return nil +} + // Gather measures and submits one full set of telemetry to Telegraf. // Not used here, submission is completely input-driven. -func (s *Suricata) Gather(acc telegraf.Accumulator) error { +func (s *Suricata) Gather(_ telegraf.Accumulator) error { return nil } diff --git a/plugins/inputs/suricata/suricata_test.go b/plugins/inputs/suricata/suricata_test.go index 9c9c2ddc3694c..997475de659b6 100644 --- a/plugins/inputs/suricata/suricata_test.go +++ b/plugins/inputs/suricata/suricata_test.go @@ -2,7 +2,6 @@ package suricata import ( "fmt" - "io/ioutil" "log" "math/rand" "net" @@ -12,23 +11,61 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) var ex2 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"capture":{"kernel_packets":905344474,"kernel_drops":78355440,"kernel_packets_delta":2376742,"kernel_drops_delta":82049}}}` var ex3 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": { "W#05-wlp4s0": { "capture":{"kernel_packets":905344474,"kernel_drops":78355440}}}}}` func TestSuricataLarge(t *testing.T) { - dir, err := ioutil.TempDir("", "test") + dir := t.TempDir() + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + s := Suricata{ + Source: tmpfn, + Delimiter: ".", + Alerts: true, + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + acc := testutil.Accumulator{} + require.NoError(t, s.Start(&acc)) + defer s.Stop() + + data, err := os.ReadFile("testdata/test1.json") + require.NoError(t, err) + + c, err := net.Dial("unix", tmpfn) + require.NoError(t, err) + _, err = c.Write(data) require.NoError(t, err) - defer os.RemoveAll(dir) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + + //test suricata alerts + data2, err := os.ReadFile("testdata/test2.json") + require.NoError(t, err) + _, err = c.Write(data2) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) + + acc.Wait(1) +} + +func TestSuricataAlerts(t *testing.T) { + dir := t.TempDir() tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) s := Suricata{ Source: tmpfn, Delimiter: ".", + Alerts: true, Log: testutil.Logger{ Name: "inputs.suricata", }, @@ -37,22 +74,45 @@ func TestSuricataLarge(t *testing.T) { require.NoError(t, s.Start(&acc)) defer s.Stop() - data, err := ioutil.ReadFile("testdata/test1.json") + data, err := os.ReadFile("testdata/test3.json") require.NoError(t, err) c, err := net.Dial("unix", tmpfn) require.NoError(t, err) - c.Write([]byte(data)) - c.Write([]byte("\n")) - c.Close() + _, err = c.Write(data) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) acc.Wait(1) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "suricata_alert", + map[string]string{}, + map[string]interface{}{ + "action": "allowed", + "category": "Misc activity", + "gid": float64(1), + "rev": float64(0), + "signature": "Corrupted HTTP body", + "signature_id": float64(6), + "severity": float64(3), + "source.ip": "10.0.0.5", + "target.ip": "179.60.192.3", + "source.port": float64(18715), + "target.port": float64(80), + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) } func TestSuricata(t *testing.T) { - dir, err := ioutil.TempDir("", "test") - require.NoError(t, err) - defer os.RemoveAll(dir) + dir := t.TempDir() tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) s := Suricata{ @@ -68,9 +128,11 @@ func TestSuricata(t *testing.T) { c, err := net.Dial("unix", tmpfn) require.NoError(t, err) - c.Write([]byte(ex2)) - c.Write([]byte("\n")) - c.Close() + _, err = c.Write([]byte(ex2)) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) acc.Wait(1) @@ -94,9 +156,7 @@ func TestSuricata(t *testing.T) { } func TestThreadStats(t *testing.T) { - dir, err := ioutil.TempDir("", "test") - require.NoError(t, err) - defer os.RemoveAll(dir) + dir := t.TempDir() tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) s := Suricata{ @@ -113,13 +173,18 @@ func TestThreadStats(t *testing.T) { c, err := net.Dial("unix", tmpfn) require.NoError(t, err) - c.Write([]byte("")) - c.Write([]byte("\n")) - c.Write([]byte("foobard}\n")) - c.Write([]byte(ex3)) - c.Write([]byte("\n")) - c.Close() - acc.Wait(1) + _, err = c.Write([]byte("")) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + _, err = c.Write([]byte("foobard}\n")) + require.NoError(t, err) + _, err = c.Write([]byte(ex3)) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) + acc.Wait(2) expected := []telegraf.Metric{ testutil.MustMetric( @@ -139,9 +204,7 @@ func TestThreadStats(t *testing.T) { } func TestSuricataInvalid(t *testing.T) { - dir, err := ioutil.TempDir("", "test") - require.NoError(t, err) - defer os.RemoveAll(dir) + dir := t.TempDir() tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) s := Suricata{ @@ -158,9 +221,11 @@ func TestSuricataInvalid(t *testing.T) { c, err := net.Dial("unix", tmpfn) require.NoError(t, err) - c.Write([]byte("sfjiowef")) - c.Write([]byte("\n")) - c.Close() + _, err = c.Write([]byte("sfjiowef")) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) acc.WaitError(1) } @@ -179,10 +244,7 @@ func TestSuricataInvalidPath(t *testing.T) { } func TestSuricataTooLongLine(t *testing.T) { - dir, err := ioutil.TempDir("", "test") - require.NoError(t, err) - defer os.RemoveAll(dir) - tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + tmpfn := testutil.TempSocket(t) s := Suricata{ Source: tmpfn, @@ -197,19 +259,17 @@ func TestSuricataTooLongLine(t *testing.T) { c, err := net.Dial("unix", tmpfn) require.NoError(t, err) - c.Write([]byte(strings.Repeat("X", 20000000))) - c.Write([]byte("\n")) - c.Close() + _, err = c.Write([]byte(strings.Repeat("X", 20000000))) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) acc.WaitError(1) - } func TestSuricataEmptyJSON(t *testing.T) { - dir, err := ioutil.TempDir("", "test") - require.NoError(t, err) - defer os.RemoveAll(dir) - tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + tmpfn := testutil.TempSocket(t) s := Suricata{ Source: tmpfn, @@ -224,19 +284,16 @@ func TestSuricataEmptyJSON(t *testing.T) { c, err := net.Dial("unix", tmpfn) if err != nil { log.Println(err) - } - c.Write([]byte("\n")) - c.Close() + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) acc.WaitError(1) } func TestSuricataDisconnectSocket(t *testing.T) { - dir, err := ioutil.TempDir("", "test") - require.NoError(t, err) - defer os.RemoveAll(dir) - tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + tmpfn := testutil.TempSocket(t) s := Suricata{ Source: tmpfn, @@ -251,24 +308,25 @@ func TestSuricataDisconnectSocket(t *testing.T) { c, err := net.Dial("unix", tmpfn) require.NoError(t, err) - c.Write([]byte(ex2)) - c.Write([]byte("\n")) - c.Close() + _, err = c.Write([]byte(ex2)) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) c, err = net.Dial("unix", tmpfn) require.NoError(t, err) - c.Write([]byte(ex3)) - c.Write([]byte("\n")) - c.Close() + _, err = c.Write([]byte(ex3)) + require.NoError(t, err) + _, err = c.Write([]byte("\n")) + require.NoError(t, err) + require.NoError(t, c.Close()) acc.Wait(2) } func TestSuricataStartStop(t *testing.T) { - dir, err := ioutil.TempDir("", "test") - require.NoError(t, err) - defer os.RemoveAll(dir) - tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + tmpfn := testutil.TempSocket(t) s := Suricata{ Source: tmpfn, @@ -280,3 +338,43 @@ func TestSuricataStartStop(t *testing.T) { require.NoError(t, s.Start(&acc)) s.Stop() } + +func TestSuricataParse(t *testing.T) { + tests := []struct { + filename string + expected []telegraf.Metric + }{{ + filename: "test2.json", + expected: []telegraf.Metric{ + testutil.MustMetric( + "suricata", + map[string]string{ + "thread": "W#01-ens2f1", + }, + map[string]interface{}{ + "detect_alert": float64(0), + "detect_engines_id": float64(0), + "detect_engines_last_reload": "2021-06-08T06:33:05.084872+0000", + "detect_engines_rules_failed": float64(0), + "detect_engines_rules_loaded": float64(22712), + }, + time.Unix(0, 0), + ), + }, + }, + } + + for _, tc := range tests { + data, err := os.ReadFile("testdata/" + tc.filename) + require.NoError(t, err) + + s := Suricata{ + Delimiter: "_", + } + acc := testutil.Accumulator{} + err = s.parse(&acc, data) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tc.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + } +} diff --git a/plugins/inputs/suricata/suricata_testutil.go b/plugins/inputs/suricata/suricata_testutil.go deleted file mode 100644 index 55aa2bb9bae69..0000000000000 --- a/plugins/inputs/suricata/suricata_testutil.go +++ /dev/null @@ -1,38 +0,0 @@ -package suricata - -import ( - "bytes" - "sync" -) - -// A thread-safe Buffer wrapper to enable concurrent access to log output. -type buffer struct { - b bytes.Buffer - m sync.Mutex -} - -func (b *buffer) Read(p []byte) (n int, err error) { - b.m.Lock() - defer b.m.Unlock() - return b.b.Read(p) -} -func (b *buffer) Write(p []byte) (n int, err error) { - b.m.Lock() - defer b.m.Unlock() - return b.b.Write(p) -} -func (b *buffer) String() string { - b.m.Lock() - defer b.m.Unlock() - return b.b.String() -} -func (b *buffer) Reset() { - b.m.Lock() - defer b.m.Unlock() - b.b.Reset() -} -func (b *buffer) Bytes() []byte { - b.m.Lock() - defer b.m.Unlock() - return b.b.Bytes() -} diff --git a/plugins/inputs/suricata/testdata/test2.json b/plugins/inputs/suricata/testdata/test2.json new file mode 100644 index 0000000000000..edb7d245df1fd --- /dev/null +++ b/plugins/inputs/suricata/testdata/test2.json @@ -0,0 +1,21 @@ +{ + "timestamp": "2021-06-08T06:34:49.237367+0000", + "event_type": "stats", + "stats": { + "threads": { + "W#01-ens2f1": { + "detect": { + "engines": [ + { + "id": 0, + "last_reload": "2021-06-08T06:33:05.084872+0000", + "rules_loaded": 22712, + "rules_failed": 0 + } + ], + "alert": 0 + } + } + } + } +} diff --git a/plugins/inputs/suricata/testdata/test3.json b/plugins/inputs/suricata/testdata/test3.json new file mode 100644 index 0000000000000..3e8649e66a14a --- /dev/null +++ b/plugins/inputs/suricata/testdata/test3.json @@ -0,0 +1 @@ +{"timestamp":"2021-05-30T20:07:13.208777+0200","flow_id":1696236471136137,"in_iface":"s1-suricata","event_type":"alert","src_ip":"10.0.0.5","src_port":18715,"dest_ip":"179.60.192.3","dest_port":80,"proto":"TCP","alert":{"action":"allowed","gid":1,"source":{"ip":"10.0.0.5","port":18715},"target":{"ip":"179.60.192.3","port":80},"signature_id":6,"rev":0,"signature":"Corrupted HTTP body","severity": 3,"category":"Misc activity","severity":3},"flow":{"pkts_toserver":1,"pkts_toclient":0,"bytes_toserver":174,"bytes_toclient":0,"start":"2021-05-30T20:07:13.208777+0200"}} diff --git a/plugins/inputs/swap/README.md b/plugins/inputs/swap/README.md index 98389287180fa..07c4c3aed22fb 100644 --- a/plugins/inputs/swap/README.md +++ b/plugins/inputs/swap/README.md @@ -2,17 +2,18 @@ The swap plugin collects system swap metrics. -For more information on what swap memory is, read [All about Linux swap space](https://www.linux.com/news/all-about-linux-swap-space). +For more information on what swap memory is, read [All about Linux swap +space](https://www.linux.com/news/all-about-linux-swap-space). -### Configuration: +## Configuration -```toml +```toml @sample.conf # Read metrics about swap memory usage [[inputs.swap]] # no configuration ``` -### Metrics: +## Metrics - swap - fields: @@ -23,8 +24,8 @@ For more information on what swap memory is, read [All about Linux swap space](h - in (int, bytes): data swapped in since last boot calculated from page number - out (int, bytes): data swapped out since last boot calculated from page number -### Example Output: +## Example Output -``` +```shell swap total=20855394304i,used_percent=45.43883523785713,used=9476448256i,free=1715331072i 1511894782000000000 ``` diff --git a/plugins/inputs/swap/sample.conf b/plugins/inputs/swap/sample.conf new file mode 100644 index 0000000000000..16c28fdc29ad6 --- /dev/null +++ b/plugins/inputs/swap/sample.conf @@ -0,0 +1,3 @@ +# Read metrics about swap memory usage +[[inputs.swap]] + # no configuration diff --git a/plugins/inputs/swap/swap.go b/plugins/inputs/swap/swap.go index eabb40a038e7d..4e04606666aed 100644 --- a/plugins/inputs/swap/swap.go +++ b/plugins/inputs/swap/swap.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package swap import ( + _ "embed" "fmt" "github.com/influxdata/telegraf" @@ -8,18 +10,20 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/system" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type SwapStats struct { ps system.PS } -func (_ *SwapStats) Description() string { - return "Read metrics about swap memory usage" +func (*SwapStats) SampleConfig() string { + return sampleConfig } -func (_ *SwapStats) SampleConfig() string { return "" } - -func (s *SwapStats) Gather(acc telegraf.Accumulator) error { - swap, err := s.ps.SwapStat() +func (ss *SwapStats) Gather(acc telegraf.Accumulator) error { + swap, err := ss.ps.SwapStat() if err != nil { return fmt.Errorf("error getting swap memory info: %s", err) } diff --git a/plugins/inputs/swap/swap_test.go b/plugins/inputs/swap/swap_test.go index 3f97b354e86b4..85a8adb5c184c 100644 --- a/plugins/inputs/swap/swap_test.go +++ b/plugins/inputs/swap/swap_test.go @@ -5,7 +5,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/mem" + "github.com/shirou/gopsutil/v3/mem" "github.com/stretchr/testify/require" ) diff --git a/plugins/inputs/synproxy/README.md b/plugins/inputs/synproxy/README.md index efb8203515c69..918fbc08df4ec 100644 --- a/plugins/inputs/synproxy/README.md +++ b/plugins/inputs/synproxy/README.md @@ -1,19 +1,20 @@ # Synproxy Input Plugin -The synproxy plugin gathers the synproxy counters. Synproxy is a Linux netfilter module used for SYN attack mitigation. -The use of synproxy is documented in `man iptables-extensions` under the SYNPROXY section. +The synproxy plugin gathers the synproxy counters. Synproxy is a Linux netfilter +module used for SYN attack mitigation. The use of synproxy is documented in +`man iptables-extensions` under the SYNPROXY section. +## Configuration -### Configuration - -The synproxy plugin does not need any configuration - -```toml +```toml @sample.conf +# Get synproxy counter statistics from procfs [[inputs.synproxy]] # no configuration ``` -### Metrics +The synproxy plugin does not need any configuration + +## Metrics The following synproxy counters are gathered @@ -26,24 +27,27 @@ The following synproxy counters are gathered - syn_received (uint32, packets, counter) - SYN received - conn_reopened (uint32, packets, counter) - Connections reopened -### Sample Queries +## Sample Queries + +Get the number of packets per 5 minutes for the measurement in the last hour +from InfluxDB: -Get the number of packets per 5 minutes for the measurement in the last hour from InfluxDB: ```sql SELECT difference(last("cookie_invalid")) AS "cookie_invalid", difference(last("cookie_retrans")) AS "cookie_retrans", difference(last("cookie_valid")) AS "cookie_valid", difference(last("entries")) AS "entries", difference(last("syn_received")) AS "syn_received", difference(last("conn_reopened")) AS "conn_reopened" FROM synproxy WHERE time > NOW() - 1h GROUP BY time(5m) FILL(null); ``` -### Troubleshooting +## Troubleshooting Execute the following CLI command in Linux to test the synproxy counters: + ```sh cat /proc/net/stat/synproxy ``` -### Example Output +## Example Output This section shows example output in Line Protocol format. -``` +```shell synproxy,host=Filter-GW01,rack=filter-node1 conn_reopened=0i,cookie_invalid=235i,cookie_retrans=0i,cookie_valid=8814i,entries=0i,syn_received=8742i 1549550634000000000 ``` diff --git a/plugins/inputs/synproxy/sample.conf b/plugins/inputs/synproxy/sample.conf new file mode 100644 index 0000000000000..de944aa19eb1f --- /dev/null +++ b/plugins/inputs/synproxy/sample.conf @@ -0,0 +1,3 @@ +# Get synproxy counter statistics from procfs +[[inputs.synproxy]] + # no configuration diff --git a/plugins/inputs/synproxy/synproxy.go b/plugins/inputs/synproxy/synproxy.go index 6a5b2b3239ed9..3950f727fd074 100644 --- a/plugins/inputs/synproxy/synproxy.go +++ b/plugins/inputs/synproxy/synproxy.go @@ -1,6 +1,8 @@ +//go:generate ../../../tools/readme_config_includer/generator package synproxy import ( + _ "embed" "os" "path" @@ -8,6 +10,10 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + type Synproxy struct { Log telegraf.Logger `toml:"-"` @@ -15,12 +21,8 @@ type Synproxy struct { statFile string } -func (k *Synproxy) Description() string { - return "Get synproxy counter statistics from procfs" -} - -func (k *Synproxy) SampleConfig() string { - return "" +func (*Synproxy) SampleConfig() string { + return sampleConfig } func getHostProc() string { diff --git a/plugins/inputs/synproxy/synproxy_linux.go b/plugins/inputs/synproxy/synproxy_linux.go index bcc9729384282..93cd26e3343f3 100644 --- a/plugins/inputs/synproxy/synproxy_linux.go +++ b/plugins/inputs/synproxy/synproxy_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package synproxy diff --git a/plugins/inputs/synproxy/synproxy_notlinux.go b/plugins/inputs/synproxy/synproxy_notlinux.go index 71a223644d8ed..f12fc70656eba 100644 --- a/plugins/inputs/synproxy/synproxy_notlinux.go +++ b/plugins/inputs/synproxy/synproxy_notlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package synproxy diff --git a/plugins/inputs/synproxy/synproxy_test.go b/plugins/inputs/synproxy/synproxy_test.go index 83d752ff16f8c..0f50322666fd7 100644 --- a/plugins/inputs/synproxy/synproxy_test.go +++ b/plugins/inputs/synproxy/synproxy_test.go @@ -1,15 +1,15 @@ +//go:build linux // +build linux package synproxy import ( - "io/ioutil" "os" "testing" - "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" - "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf/testutil" ) func TestSynproxyFileNormal(t *testing.T) { @@ -38,8 +38,8 @@ func TestSynproxyFileHeaderMismatch(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) - assert.Contains(t, err.Error(), "invalid number of columns in data") + require.Error(t, err) + require.Contains(t, err.Error(), "invalid number of columns in data") } func TestSynproxyFileInvalidHex(t *testing.T) { @@ -52,13 +52,15 @@ func TestSynproxyFileInvalidHex(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) - assert.Contains(t, err.Error(), "invalid value") + require.Error(t, err) + require.Contains(t, err.Error(), "invalid value") } func TestNoSynproxyFile(t *testing.T) { tmpfile := makeFakeSynproxyFile([]byte(synproxyFileNormal)) // Remove file to generate "no such file" error + // Ignore errors if file does not yet exist + //nolint:errcheck,revive os.Remove(tmpfile) k := Synproxy{ @@ -67,7 +69,7 @@ func TestNoSynproxyFile(t *testing.T) { acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.Error(t, err) + require.Error(t, err) } // Valid Synproxy file @@ -147,13 +149,13 @@ func testSynproxyFileData(t *testing.T, fileData string, telegrafData map[string acc := testutil.Accumulator{} err := k.Gather(&acc) - assert.NoError(t, err) + require.NoError(t, err) acc.AssertContainsFields(t, "synproxy", telegrafData) } func makeFakeSynproxyFile(content []byte) string { - tmpfile, err := ioutil.TempFile("", "synproxy_test") + tmpfile, err := os.CreateTemp("", "synproxy_test") if err != nil { panic(err) } diff --git a/plugins/inputs/syslog/README.md b/plugins/inputs/syslog/README.md index 32c5f2717b630..1190258f53abd 100644 --- a/plugins/inputs/syslog/README.md +++ b/plugins/inputs/syslog/README.md @@ -1,17 +1,17 @@ # Syslog Input Plugin -The syslog plugin listens for syslog messages transmitted over -a Unix Domain socket, -[UDP](https://tools.ietf.org/html/rfc5426), +The syslog plugin listens for syslog messages transmitted over a Unix Domain +socket, [UDP](https://tools.ietf.org/html/rfc5426), [TCP](https://tools.ietf.org/html/rfc6587), or -[TLS](https://tools.ietf.org/html/rfc5425); with or without the octet counting framing. +[TLS](https://tools.ietf.org/html/rfc5425); with or without the octet counting +framing. Syslog messages should be formatted according to [RFC 5424](https://tools.ietf.org/html/rfc5424). -### Configuration +## Configuration -```toml +```toml @sample.conf [[inputs.syslog]] ## Protocol, address and port to host the syslog receiver. ## If no host is specified, then localhost is used. @@ -55,6 +55,11 @@ Syslog messages should be formatted according to ## By default best effort parsing is off. # best_effort = false + ## The RFC standard to use for message parsing + ## By default RFC5424 is used. RFC3164 only supports UDP transport (no streaming support) + ## Must be one of "RFC5424", or "RFC3164". + # syslog_standard = "RFC5424" + ## Character to prepend to SD-PARAMs (default = "_"). ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] @@ -63,23 +68,30 @@ Syslog messages should be formatted according to # sdparam_separator = "_" ``` -#### Message transport +### Message transport + +The `framing` option only applies to streams. It governs the way we expect to +receive messages within the stream. Namely, with the [`"octet counting"`][1] +technique (default) or with the [`"non-transparent"`][2] framing. + +The `trailer` option only applies when `framing` option is +`"non-transparent"`. It must have one of the following values: `"LF"` (default), +or `"NUL"`. -The `framing` option only applies to streams. It governs the way we expect to receive messages within the stream. -Namely, with the [`"octet counting"`](https://tools.ietf.org/html/rfc5425#section-4.3) technique (default) or with the [`"non-transparent"`](https://tools.ietf.org/html/rfc6587#section-3.4.2) framing. +[1]: https://tools.ietf.org/html/rfc5425#section-4.3 -The `trailer` option only applies when `framing` option is `"non-transparent"`. It must have one of the following values: `"LF"` (default), or `"NUL"`. +[2]: https://tools.ietf.org/html/rfc6587#section-3.4.2 -#### Best effort +### Best effort The [`best_effort`](https://github.com/influxdata/go-syslog#best-effort-mode) option instructs the parser to extract partial but valid info from syslog messages. If unset only full messages will be collected. -#### Rsyslog Integration +### Rsyslog Integration Rsyslog can be configured to forward logging messages to Telegraf by configuring -[remote logging](https://www.rsyslog.com/doc/v8-stable/configuration/actions.html#remote-machine). +[remote logging][3]. Most system are setup with a configuration split between `/etc/rsyslog.conf` and the files in the `/etc/rsyslog.d/` directory, it is recommended to add the @@ -88,7 +100,8 @@ config file. Add the following lines to `/etc/rsyslog.d/50-telegraf.conf` making adjustments to the target address as needed: -``` + +```shell $ActionQueueType LinkedList # use asynchronous processing $ActionQueueFileName srvrfwd # set file name, also enables disk mode $ActionResumeRetryCount -1 # infinite retries on insert failure @@ -102,7 +115,8 @@ $ActionQueueSaveOnShutdown on # save in-memory data if rsyslog shuts down ``` You can alternately use `advanced` format (aka RainerScript): -``` + +```bash # forward over tcp with octet framing according to RFC 5425 action(type="omfwd" Protocol="tcp" TCP_Framing="octet-counted" Target="127.0.0.1" Port="6514" Template="RSYSLOG_SyslogProtocol23Format") @@ -110,9 +124,13 @@ action(type="omfwd" Protocol="tcp" TCP_Framing="octet-counted" Target="127.0.0.1 #action(type="omfwd" Protocol="udp" Target="127.0.0.1" Port="6514" Template="RSYSLOG_SyslogProtocol23Format") ``` -To complete TLS setup please refer to [rsyslog docs](https://www.rsyslog.com/doc/v8-stable/tutorials/tls.html). +To complete TLS setup please refer to [rsyslog docs][4]. + +[3]: https://www.rsyslog.com/doc/v8-stable/configuration/actions.html#remote-machine -### Metrics +[4]: https://www.rsyslog.com/doc/v8-stable/tutorials/tls.html + +## Metrics - syslog - tags @@ -131,17 +149,20 @@ To complete TLS setup please refer to [rsyslog docs](https://www.rsyslog.com/doc - *Structured Data* (string) - timestamp: the time the messages was received -#### Structured Data +### Structured Data -Structured data produces field keys by combining the `SD_ID` with the `PARAM_NAME` combined using the `sdparam_separator` as in the following example: -``` +Structured data produces field keys by combining the `SD_ID` with the +`PARAM_NAME` combined using the `sdparam_separator` as in the following example: + +```shell 170 <165>1 2018-10-01:14:15.000Z mymachine.example.com evntslog - ID47 [exampleSDID@32473 iut="3" eventSource="Application" eventID="1011"] An application event log entry... ``` -``` + +```shell syslog,appname=evntslog,facility=local4,hostname=mymachine.example.com,severity=notice exampleSDID@32473_eventID="1011",exampleSDID@32473_eventSource="Application",exampleSDID@32473_iut="3",facility_code=20i,message="An application event log entry...",msgid="ID47",severity_code=5i,timestamp=1065910455003000000i,version=1i 1538421339749472344 ``` -### Troubleshooting +## Troubleshooting You can send debugging messages directly to the input plugin using netcat: @@ -153,11 +174,30 @@ echo "57 <13>1 2018-10-01T12:00:00.0Z example.org root - - - test" | nc 127.0.0. echo "<13>1 2018-10-01T12:00:00.0Z example.org root - - - test" | nc -u 127.0.0.1 6514 ``` -#### RFC3164 +### RFC3164 -RFC3164 encoded messages are not currently supported. You may see the following error if a message encoded in this format: -``` -E! Error in plugin [inputs.syslog]: expecting a version value in the range 1-999 [col 5] +RFC3164 encoded messages are supported for UDP only, but not all vendors output +valid RFC3164 messages by default + +- E.g. Cisco IOS + +If you see the following error, it is due to a message encoded in this format: + + ```shell + E! Error in plugin [inputs.syslog]: expecting a version value in the range 1-999 [col 5] + ``` + +Users can use rsyslog to translate RFC3164 syslog messages into RFC5424 format. +Add the following lines to the rsyslog configuration file +(e.g. `/etc/rsyslog.d/50-telegraf.conf`): + +```s +# This makes rsyslog listen on 127.0.0.1:514 to receive RFC3164 udp +# messages which can them be forwared to telegraf as RFC5424 +$ModLoad imudp #loads the udp module +$UDPServerAddress 127.0.0.1 +$UDPServerRun 514 ``` -You can use rsyslog to translate RFC3164 syslog messages into RFC5424 format. +Make adjustments to the target address as needed and sent your RFC3164 messages +to port 514. diff --git a/plugins/inputs/syslog/commons_test.go b/plugins/inputs/syslog/commons_test.go index 10f2ddf511d22..1764c891ad7b4 100644 --- a/plugins/inputs/syslog/commons_test.go +++ b/plugins/inputs/syslog/commons_test.go @@ -4,7 +4,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" framing "github.com/influxdata/telegraf/internal/syslog" "github.com/influxdata/telegraf/testutil" ) @@ -29,30 +29,30 @@ type testCaseStream struct { werr int // how many errors we expect in the strict mode? } -func newUDPSyslogReceiver(address string, bestEffort bool) *Syslog { +func newUDPSyslogReceiver(address string, bestEffort bool, rfc syslogRFC) *Syslog { return &Syslog{ Address: address, now: func() time.Time { return defaultTime }, - BestEffort: bestEffort, - Separator: "_", + BestEffort: bestEffort, + SyslogStandard: rfc, + Separator: "_", } } -func newTCPSyslogReceiver(address string, keepAlive *internal.Duration, maxConn int, bestEffort bool, f framing.Framing) *Syslog { - d := &internal.Duration{ - Duration: defaultReadTimeout, - } +func newTCPSyslogReceiver(address string, keepAlive *config.Duration, maxConn int, bestEffort bool, f framing.Framing) *Syslog { + d := config.Duration(defaultReadTimeout) s := &Syslog{ Address: address, now: func() time.Time { return defaultTime }, - Framing: f, - ReadTimeout: d, - BestEffort: bestEffort, - Separator: "_", + Framing: f, + ReadTimeout: &d, + BestEffort: bestEffort, + SyslogStandard: syslogRFC5424, + Separator: "_", } if keepAlive != nil { s.KeepAlivePeriod = keepAlive diff --git a/plugins/inputs/syslog/nontransparent_test.go b/plugins/inputs/syslog/nontransparent_test.go index d0352c6ae1c7f..76f8c7081cb99 100644 --- a/plugins/inputs/syslog/nontransparent_test.go +++ b/plugins/inputs/syslog/nontransparent_test.go @@ -2,18 +2,16 @@ package syslog import ( "crypto/tls" - "io/ioutil" "net" - "os" - "path/filepath" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" framing "github.com/influxdata/telegraf/internal/syslog" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func getTestCasesForNonTransparent() []testCaseStream { @@ -135,11 +133,11 @@ func getTestCasesForNonTransparent() []testCaseStream { return testCases } -func testStrictNonTransparent(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { +func testStrictNonTransparent(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *config.Duration) { for _, tc := range getTestCasesForNonTransparent() { t.Run(tc.name, func(t *testing.T) { // Creation of a strict mode receiver - receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, false, framing.NonTransparent) + receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 10, false, framing.NonTransparent) require.NotNil(t, receiver) if wantTLS { receiver.ServerConfig = *pki.TLSServerConfig() @@ -157,12 +155,14 @@ func testStrictNonTransparent(t *testing.T, protocol string, address string, wan require.NoError(t, e) config.ServerName = "localhost" conn, err = tls.Dial(protocol, address, config) + require.NotNil(t, conn) + require.NoError(t, err) } else { conn, err = net.Dial(protocol, address) + require.NotNil(t, conn) + require.NoError(t, err) defer conn.Close() } - require.NotNil(t, conn) - require.NoError(t, err) // Clear acc.ClearMetrics() @@ -191,11 +191,12 @@ func testStrictNonTransparent(t *testing.T, protocol string, address string, wan } } -func testBestEffortNonTransparent(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { +func testBestEffortNonTransparent(t *testing.T, protocol string, address string, wantTLS bool) { + keepAlive := (*config.Duration)(nil) for _, tc := range getTestCasesForNonTransparent() { t.Run(tc.name, func(t *testing.T) { // Creation of a best effort mode receiver - receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, true, framing.NonTransparent) + receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 10, true, framing.NonTransparent) require.NotNil(t, receiver) if wantTLS { receiver.ServerConfig = *pki.TLSServerConfig() @@ -244,7 +245,7 @@ func TestNonTransparentStrict_tcp(t *testing.T) { } func TestNonTransparentBestEffort_tcp(t *testing.T) { - testBestEffortNonTransparent(t, "tcp", address, false, nil) + testBestEffortNonTransparent(t, "tcp", address, false) } func TestNonTransparentStrict_tcp_tls(t *testing.T) { @@ -252,45 +253,35 @@ func TestNonTransparentStrict_tcp_tls(t *testing.T) { } func TestNonTransparentBestEffort_tcp_tls(t *testing.T) { - testBestEffortNonTransparent(t, "tcp", address, true, nil) + testBestEffortNonTransparent(t, "tcp", address, true) } func TestNonTransparentStrictWithKeepAlive_tcp_tls(t *testing.T) { - testStrictNonTransparent(t, "tcp", address, true, &internal.Duration{Duration: time.Minute}) + d := config.Duration(time.Minute) + testStrictNonTransparent(t, "tcp", address, true, &d) } func TestNonTransparentStrictWithZeroKeepAlive_tcp_tls(t *testing.T) { - testStrictNonTransparent(t, "tcp", address, true, &internal.Duration{Duration: 0}) + d := config.Duration(0) + testStrictNonTransparent(t, "tcp", address, true, &d) } func TestNonTransparentStrict_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "syslog.TestStrict_unix.sock") + sock := testutil.TempSocket(t) testStrictNonTransparent(t, "unix", sock, false, nil) } func TestNonTransparentBestEffort_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix.sock") - testBestEffortNonTransparent(t, "unix", sock, false, nil) + sock := testutil.TempSocket(t) + testBestEffortNonTransparent(t, "unix", sock, false) } func TestNonTransparentStrict_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "syslog.TestStrict_unix_tls.sock") + sock := testutil.TempSocket(t) testStrictNonTransparent(t, "unix", sock, true, nil) } func TestNonTransparentBestEffort_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix_tls.sock") - testBestEffortNonTransparent(t, "unix", sock, true, nil) + sock := testutil.TempSocket(t) + testBestEffortNonTransparent(t, "unix", sock, true) } diff --git a/plugins/inputs/syslog/octetcounting_test.go b/plugins/inputs/syslog/octetcounting_test.go index 210b64dbe11c8..0d24b83a3b522 100644 --- a/plugins/inputs/syslog/octetcounting_test.go +++ b/plugins/inputs/syslog/octetcounting_test.go @@ -3,18 +3,16 @@ package syslog import ( "crypto/tls" "fmt" - "io/ioutil" "net" - "os" - "path/filepath" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" framing "github.com/influxdata/telegraf/internal/syslog" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func getTestCasesForOctetCounting() []testCaseStream { @@ -335,7 +333,7 @@ func getTestCasesForOctetCounting() []testCaseStream { return testCases } -func testStrictOctetCounting(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { +func testStrictOctetCounting(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *config.Duration) { for _, tc := range getTestCasesForOctetCounting() { t.Run(tc.name, func(t *testing.T) { // Creation of a strict mode receiver @@ -357,12 +355,14 @@ func testStrictOctetCounting(t *testing.T, protocol string, address string, want require.NoError(t, e) config.ServerName = "localhost" conn, err = tls.Dial(protocol, address, config) + require.NotNil(t, conn) + require.NoError(t, err) } else { conn, err = net.Dial(protocol, address) + require.NotNil(t, conn) + require.NoError(t, err) defer conn.Close() } - require.NotNil(t, conn) - require.NoError(t, err) // Clear acc.ClearMetrics() @@ -391,7 +391,8 @@ func testStrictOctetCounting(t *testing.T, protocol string, address string, want } } -func testBestEffortOctetCounting(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { +func testBestEffortOctetCounting(t *testing.T, protocol string, address string, wantTLS bool) { + keepAlive := (*config.Duration)(nil) for _, tc := range getTestCasesForOctetCounting() { t.Run(tc.name, func(t *testing.T) { // Creation of a best effort mode receiver @@ -444,7 +445,7 @@ func TestOctetCountingStrict_tcp(t *testing.T) { } func TestOctetCountingBestEffort_tcp(t *testing.T) { - testBestEffortOctetCounting(t, "tcp", address, false, nil) + testBestEffortOctetCounting(t, "tcp", address, false) } func TestOctetCountingStrict_tcp_tls(t *testing.T) { @@ -452,45 +453,35 @@ func TestOctetCountingStrict_tcp_tls(t *testing.T) { } func TestOctetCountingBestEffort_tcp_tls(t *testing.T) { - testBestEffortOctetCounting(t, "tcp", address, true, nil) + testBestEffortOctetCounting(t, "tcp", address, true) } func TestOctetCountingStrictWithKeepAlive_tcp_tls(t *testing.T) { - testStrictOctetCounting(t, "tcp", address, true, &internal.Duration{Duration: time.Minute}) + d := config.Duration(time.Minute) + testStrictOctetCounting(t, "tcp", address, true, &d) } func TestOctetCountingStrictWithZeroKeepAlive_tcp_tls(t *testing.T) { - testStrictOctetCounting(t, "tcp", address, true, &internal.Duration{Duration: 0}) + d := config.Duration(0) + testStrictOctetCounting(t, "tcp", address, true, &d) } func TestOctetCountingStrict_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "syslog.TestStrict_unix.sock") + sock := testutil.TempSocket(t) testStrictOctetCounting(t, "unix", sock, false, nil) } func TestOctetCountingBestEffort_unix(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix.sock") - testBestEffortOctetCounting(t, "unix", sock, false, nil) + sock := testutil.TempSocket(t) + testBestEffortOctetCounting(t, "unix", sock, false) } func TestOctetCountingStrict_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "syslog.TestStrict_unix_tls.sock") + sock := testutil.TempSocket(t) testStrictOctetCounting(t, "unix", sock, true, nil) } func TestOctetCountingBestEffort_unix_tls(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") - require.NoError(t, err) - defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix_tls.sock") - testBestEffortOctetCounting(t, "unix", sock, true, nil) + sock := testutil.TempSocket(t) + testBestEffortOctetCounting(t, "unix", sock, true) } diff --git a/plugins/inputs/syslog/rfc3164_test.go b/plugins/inputs/syslog/rfc3164_test.go new file mode 100644 index 0000000000000..bd192a6d92a39 --- /dev/null +++ b/plugins/inputs/syslog/rfc3164_test.go @@ -0,0 +1,123 @@ +package syslog + +import ( + "fmt" + "net" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func timeMustParse(value string) time.Time { + format := "Jan 2 15:04:05 2006" + t, err := time.Parse(format, value) + if err != nil { + panic(fmt.Sprintf("couldn't parse time: %v", value)) + } + return t +} + +func getTestCasesForRFC3164() []testCasePacket { + currentYear := time.Now().Year() + ts := timeMustParse(fmt.Sprintf("Dec 2 16:31:03 %d", currentYear)).UnixNano() + testCases := []testCasePacket{ + { + name: "complete", + data: []byte("<13>Dec 2 16:31:03 host app: Test"), + wantBestEffort: testutil.MustMetric( + "syslog", + map[string]string{ + "appname": "app", + "severity": "notice", + "hostname": "host", + "facility": "user", + }, + map[string]interface{}{ + "timestamp": ts, + "message": "Test", + "facility_code": 1, + "severity_code": 5, + }, + defaultTime, + ), + wantStrict: testutil.MustMetric( + "syslog", + map[string]string{ + "appname": "app", + "severity": "notice", + "hostname": "host", + "facility": "user", + }, + map[string]interface{}{ + "timestamp": ts, + "message": "Test", + "facility_code": 1, + "severity_code": 5, + }, + defaultTime, + ), + }, + } + + return testCases +} + +func testRFC3164(t *testing.T, protocol string, address string, bestEffort bool) { + for _, tc := range getTestCasesForRFC3164() { + t.Run(tc.name, func(t *testing.T) { + // Create receiver + receiver := newUDPSyslogReceiver(protocol+"://"+address, bestEffort, syslogRFC3164) + acc := &testutil.Accumulator{} + require.NoError(t, receiver.Start(acc)) + defer receiver.Stop() + + // Connect + conn, err := net.Dial(protocol, address) + require.NotNil(t, conn) + require.NoError(t, err) + + // Write + _, err = conn.Write(tc.data) + conn.Close() + if err != nil { + if err, ok := err.(*net.OpError); ok { + if err.Err.Error() == "write: message too long" { + return + } + } + } + + // Waiting ... + if tc.wantStrict == nil && tc.werr || bestEffort && tc.werr { + acc.WaitError(1) + } + if tc.wantBestEffort != nil && bestEffort || tc.wantStrict != nil && !bestEffort { + acc.Wait(1) // RFC3164 mandates a syslog message per UDP packet + } + + // Compare + var got telegraf.Metric + var want telegraf.Metric + if len(acc.Metrics) > 0 { + got = acc.GetTelegrafMetrics()[0] + } + if bestEffort { + want = tc.wantBestEffort + } else { + want = tc.wantStrict + } + testutil.RequireMetricEqual(t, want, got) + }) + } +} + +func TestRFC3164BestEffort_udp(t *testing.T) { + testRFC3164(t, "udp", address, true) +} + +func TestRFC3164Strict_udp(t *testing.T) { + testRFC3164(t, "udp", address, false) +} diff --git a/plugins/inputs/syslog/rfc5426_test.go b/plugins/inputs/syslog/rfc5426_test.go index 31007bad928a3..32aea6b983e74 100644 --- a/plugins/inputs/syslog/rfc5426_test.go +++ b/plugins/inputs/syslog/rfc5426_test.go @@ -2,17 +2,17 @@ package syslog import ( "fmt" - "io/ioutil" "net" "os" - "path/filepath" + "runtime" "sync/atomic" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) func getTestCasesForRFC5426() []testCasePacket { @@ -230,7 +230,7 @@ func testRFC5426(t *testing.T, protocol string, address string, bestEffort bool) for _, tc := range getTestCasesForRFC5426() { t.Run(tc.name, func(t *testing.T) { // Create receiver - receiver := newUDPSyslogReceiver(protocol+"://"+address, bestEffort) + receiver := newUDPSyslogReceiver(protocol+"://"+address, bestEffort, syslogRFC5424) acc := &testutil.Accumulator{} require.NoError(t, receiver.Start(acc)) defer receiver.Stop() @@ -284,20 +284,28 @@ func TestStrict_udp(t *testing.T) { } func TestBestEffort_unixgram(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + if runtime.GOOS == "windows" { + t.Skip("Skipping on Windows, as unixgram sockets are not supported") + } + + sock := testutil.TempSocket(t) + f, err := os.Create(sock) require.NoError(t, err) - defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unixgram.sock") - os.Create(sock) + t.Cleanup(func() { require.NoError(t, f.Close()) }) + testRFC5426(t, "unixgram", sock, true) } func TestStrict_unixgram(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "telegraf") + if runtime.GOOS == "windows" { + t.Skip("Skipping on Windows, as unixgram sockets are not supported") + } + + sock := testutil.TempSocket(t) + f, err := os.Create(sock) require.NoError(t, err) - defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "syslog.TestStrict_unixgram.sock") - os.Create(sock) + t.Cleanup(func() { require.NoError(t, f.Close()) }) + testRFC5426(t, "unixgram", sock, false) } @@ -313,10 +321,11 @@ func TestTimeIncrement_udp(t *testing.T) { // Create receiver receiver := &Syslog{ - Address: "udp://" + address, - now: getNow, - BestEffort: false, - Separator: "_", + Address: "udp://" + address, + now: getNow, + BestEffort: false, + SyslogStandard: syslogRFC5424, + Separator: "_", } acc := &testutil.Accumulator{} require.NoError(t, receiver.Start(acc)) diff --git a/plugins/inputs/syslog/sample.conf b/plugins/inputs/syslog/sample.conf new file mode 100644 index 0000000000000..d166134c234c9 --- /dev/null +++ b/plugins/inputs/syslog/sample.conf @@ -0,0 +1,54 @@ +[[inputs.syslog]] + ## Protocol, address and port to host the syslog receiver. + ## If no host is specified, then localhost is used. + ## If no port is specified, 6514 is used (RFC5425#section-4.1). + ## ex: server = "tcp://localhost:6514" + ## server = "udp://:6514" + ## server = "unix:///var/run/telegraf-syslog.sock" + server = "tcp://:6514" + + ## TLS Config + # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Period between keep alive probes. + ## 0 disables keep alive probes. + ## Defaults to the OS configuration. + ## Only applies to stream sockets (e.g. TCP). + # keep_alive_period = "5m" + + ## Maximum number of concurrent connections (default = 0). + ## 0 means unlimited. + ## Only applies to stream sockets (e.g. TCP). + # max_connections = 1024 + + ## Read timeout is the maximum time allowed for reading a single message (default = 5s). + ## 0 means unlimited. + # read_timeout = "5s" + + ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). + ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), + ## or the non-transparent framing technique (RFC6587#section-3.4.2). + ## Must be one of "octect-counting", "non-transparent". + # framing = "octet-counting" + + ## The trailer to be expected in case of non-transparent framing (default = "LF"). + ## Must be one of "LF", or "NUL". + # trailer = "LF" + + ## Whether to parse in best effort mode or not (default = false). + ## By default best effort parsing is off. + # best_effort = false + + ## The RFC standard to use for message parsing + ## By default RFC5424 is used. RFC3164 only supports UDP transport (no streaming support) + ## Must be one of "RFC5424", or "RFC3164". + # syslog_standard = "RFC5424" + + ## Character to prepend to SD-PARAMs (default = "_"). + ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. + ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] + ## For each combination a field is created. + ## Its name is created concatenating identifier, sdparam_separator, and parameter name. + # sdparam_separator = "_" diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go index 6b3615a3e80ce..953560fbea622 100644 --- a/plugins/inputs/syslog/syslog.go +++ b/plugins/inputs/syslog/syslog.go @@ -1,39 +1,53 @@ +//go:generate ../../../tools/readme_config_includer/generator package syslog import ( "crypto/tls" + _ "embed" "fmt" "io" "net" "net/url" "os" + "path/filepath" "strings" "sync" "time" "unicode" - "github.com/influxdata/go-syslog/v2" - "github.com/influxdata/go-syslog/v2/nontransparent" - "github.com/influxdata/go-syslog/v2/octetcounting" - "github.com/influxdata/go-syslog/v2/rfc5424" + "github.com/influxdata/go-syslog/v3" + "github.com/influxdata/go-syslog/v3/nontransparent" + "github.com/influxdata/go-syslog/v3/octetcounting" + "github.com/influxdata/go-syslog/v3/rfc3164" + "github.com/influxdata/go-syslog/v3/rfc5424" + "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/config" framing "github.com/influxdata/telegraf/internal/syslog" tlsConfig "github.com/influxdata/telegraf/plugins/common/tls" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + +type syslogRFC string + const defaultReadTimeout = time.Second * 5 const ipMaxPacketSize = 64 * 1024 +const syslogRFC3164 = "RFC3164" +const syslogRFC5424 = "RFC5424" // Syslog is a syslog plugin type Syslog struct { tlsConfig.ServerConfig Address string `toml:"server"` - KeepAlivePeriod *internal.Duration + KeepAlivePeriod *config.Duration MaxConnections int - ReadTimeout *internal.Duration + ReadTimeout *config.Duration Framing framing.Framing + SyslogStandard syslogRFC Trailer nontransparent.TrailerType BestEffort bool Separator string `toml:"sdparam_separator"` @@ -54,65 +68,10 @@ type Syslog struct { udpListener net.PacketConn } -var sampleConfig = ` - ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 - ## Protocol, address and port to host the syslog receiver. - ## If no host is specified, then localhost is used. - ## If no port is specified, 6514 is used (RFC5425#section-4.1). - server = "tcp://:6514" - - ## TLS Config - # tls_allowed_cacerts = ["/etc/telegraf/ca.pem"] - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - - ## Period between keep alive probes. - ## 0 disables keep alive probes. - ## Defaults to the OS configuration. - ## Only applies to stream sockets (e.g. TCP). - # keep_alive_period = "5m" - - ## Maximum number of concurrent connections (default = 0). - ## 0 means unlimited. - ## Only applies to stream sockets (e.g. TCP). - # max_connections = 1024 - - ## Read timeout is the maximum time allowed for reading a single message (default = 5s). - ## 0 means unlimited. - # read_timeout = "5s" - - ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). - ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), - ## or the non-transparent framing technique (RFC6587#section-3.4.2). - ## Must be one of "octet-counting", "non-transparent". - # framing = "octet-counting" - - ## The trailer to be expected in case of non-transparent framing (default = "LF"). - ## Must be one of "LF", or "NUL". - # trailer = "LF" - - ## Whether to parse in best effort mode or not (default = false). - ## By default best effort parsing is off. - # best_effort = false - - ## Character to prepend to SD-PARAMs (default = "_"). - ## A syslog message can contain multiple parameters and multiple identifiers within structured data section. - ## Eg., [id1 name1="val1" name2="val2"][id2 name1="val1" nameA="valA"] - ## For each combination a field is created. - ## Its name is created concatenating identifier, sdparam_separator, and parameter name. - # sdparam_separator = "_" -` - -// SampleConfig returns sample configuration message -func (s *Syslog) SampleConfig() string { +func (*Syslog) SampleConfig() string { return sampleConfig } -// Description returns the plugin description -func (s *Syslog) Description() string { - return "Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587" -} - // Gather ... func (s *Syslog) Gather(_ telegraf.Accumulator) error { return nil @@ -139,6 +98,8 @@ func (s *Syslog) Start(acc telegraf.Accumulator) error { } if scheme == "unix" || scheme == "unixpacket" || scheme == "unixgram" { + // Accept success and failure in case the file does not exist + //nolint:errcheck,revive os.Remove(s.Address) } @@ -181,6 +142,8 @@ func (s *Syslog) Stop() { defer s.mu.Unlock() if s.Closer != nil { + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive s.Close() } s.wg.Wait() @@ -189,19 +152,21 @@ func (s *Syslog) Stop() { // getAddressParts returns the address scheme and host // it also sets defaults for them when missing // when the input address does not specify the protocol it returns an error -func getAddressParts(a string) (string, string, error) { +func getAddressParts(a string) (scheme string, host string, err error) { parts := strings.SplitN(a, "://", 2) if len(parts) != 2 { return "", "", fmt.Errorf("missing protocol within address '%s'", a) } - u, _ := url.Parse(a) + u, err := url.Parse(filepath.ToSlash(a)) //convert backslashes to slashes (to make Windows path a valid URL) + if err != nil { + return "", "", fmt.Errorf("could not parse address '%s': %v", a, err) + } switch u.Scheme { case "unix", "unixpacket", "unixgram": return parts[0], parts[1], nil } - var host string if u.Hostname() != "" { host = u.Hostname() } @@ -219,10 +184,15 @@ func (s *Syslog) listenPacket(acc telegraf.Accumulator) { defer s.wg.Done() b := make([]byte, ipMaxPacketSize) var p syslog.Machine - if s.BestEffort { - p = rfc5424.NewParser(rfc5424.WithBestEffort()) - } else { + switch { + case !s.BestEffort && s.SyslogStandard == syslogRFC5424: p = rfc5424.NewParser() + case s.BestEffort && s.SyslogStandard == syslogRFC5424: + p = rfc5424.NewParser(rfc5424.WithBestEffort()) + case !s.BestEffort && s.SyslogStandard == syslogRFC3164: + p = rfc3164.NewParser(rfc3164.WithYear(rfc3164.CurrentYear{})) + case s.BestEffort && s.SyslogStandard == syslogRFC3164: + p = rfc3164.NewParser(rfc3164.WithYear(rfc3164.CurrentYear{}), rfc3164.WithBestEffort()) } for { n, _, err := s.udpListener.ReadFrom(b) @@ -235,7 +205,7 @@ func (s *Syslog) listenPacket(acc telegraf.Accumulator) { message, err := p.Parse(b[:n]) if message != nil { - acc.AddFields("syslog", fields(message, s), tags(message), s.time()) + acc.AddFields("syslog", fields(message, s), tags(message), s.currentTime()) } if err != nil { acc.AddError(err) @@ -264,7 +234,9 @@ func (s *Syslog) listenStream(acc telegraf.Accumulator) { s.connectionsMu.Lock() if s.MaxConnections > 0 && len(s.connections) >= s.MaxConnections { s.connectionsMu.Unlock() - conn.Close() + if err := conn.Close(); err != nil { + acc.AddError(err) + } continue } s.connections[conn.RemoteAddr().String()] = conn @@ -279,7 +251,9 @@ func (s *Syslog) listenStream(acc telegraf.Accumulator) { s.connectionsMu.Lock() for _, c := range s.connections { - c.Close() + if err := c.Close(); err != nil { + acc.AddError(err) + } } s.connectionsMu.Unlock() } @@ -293,6 +267,8 @@ func (s *Syslog) removeConnection(c net.Conn) { func (s *Syslog) handle(conn net.Conn, acc telegraf.Accumulator) { defer func() { s.removeConnection(conn) + // Ignore the returned error as we cannot do anything about it anyway + //nolint:errcheck,revive conn.Close() }() @@ -300,8 +276,10 @@ func (s *Syslog) handle(conn net.Conn, acc telegraf.Accumulator) { emit := func(r *syslog.Result) { s.store(*r, acc) - if s.ReadTimeout != nil && s.ReadTimeout.Duration > 0 { - conn.SetReadDeadline(time.Now().Add(s.ReadTimeout.Duration)) + if s.ReadTimeout != nil && time.Duration(*s.ReadTimeout) > 0 { + if err := conn.SetReadDeadline(time.Now().Add(time.Duration(*s.ReadTimeout))); err != nil { + acc.AddError(fmt.Errorf("setting read deadline failed: %v", err)) + } } } @@ -325,8 +303,10 @@ func (s *Syslog) handle(conn net.Conn, acc telegraf.Accumulator) { p.Parse(conn) - if s.ReadTimeout != nil && s.ReadTimeout.Duration > 0 { - conn.SetReadDeadline(time.Now().Add(s.ReadTimeout.Duration)) + if s.ReadTimeout != nil && time.Duration(*s.ReadTimeout) > 0 { + if err := conn.SetReadDeadline(time.Now().Add(time.Duration(*s.ReadTimeout))); err != nil { + acc.AddError(fmt.Errorf("setting read deadline failed: %v", err)) + } } } @@ -335,13 +315,13 @@ func (s *Syslog) setKeepAlive(c *net.TCPConn) error { return nil } - if s.KeepAlivePeriod.Duration == 0 { + if *s.KeepAlivePeriod == 0 { return c.SetKeepAlive(false) } if err := c.SetKeepAlive(true); err != nil { return err } - return c.SetKeepAlivePeriod(s.KeepAlivePeriod.Duration) + return c.SetKeepAlivePeriod(time.Duration(*s.KeepAlivePeriod)) } func (s *Syslog) store(res syslog.Result, acc telegraf.Accumulator) { @@ -349,7 +329,7 @@ func (s *Syslog) store(res syslog.Result, acc telegraf.Accumulator) { acc.AddError(res.Error) } if res.Message != nil { - acc.AddFields("syslog", fields(res.Message, s), tags(res.Message), s.time()) + acc.AddFields("syslog", fields(res.Message, s), tags(res.Message), s.currentTime()) } } @@ -360,58 +340,70 @@ func tags(msg syslog.Message) map[string]string { ts["severity"] = *msg.SeverityShortLevel() ts["facility"] = *msg.FacilityLevel() - if msg.Hostname() != nil { - ts["hostname"] = *msg.Hostname() - } - - if msg.Appname() != nil { - ts["appname"] = *msg.Appname() + switch m := msg.(type) { + case *rfc5424.SyslogMessage: + populateCommonTags(&m.Base, ts) + case *rfc3164.SyslogMessage: + populateCommonTags(&m.Base, ts) } - return ts } func fields(msg syslog.Message, s *Syslog) map[string]interface{} { - // Not checking assuming a minimally valid message - flds := map[string]interface{}{ - "version": msg.Version(), + flds := map[string]interface{}{} + + switch m := msg.(type) { + case *rfc5424.SyslogMessage: + populateCommonFields(&m.Base, flds) + // Not checking assuming a minimally valid message + flds["version"] = m.Version + + if m.StructuredData != nil { + for sdid, sdparams := range *m.StructuredData { + if len(sdparams) == 0 { + // When SD-ID does not have params we indicate its presence with a bool + flds[sdid] = true + continue + } + for name, value := range sdparams { + // Using whitespace as separator since it is not allowed by the grammar within SDID + flds[sdid+s.Separator+name] = value + } + } + } + case *rfc3164.SyslogMessage: + populateCommonFields(&m.Base, flds) } - flds["severity_code"] = int(*msg.Severity()) - flds["facility_code"] = int(*msg.Facility()) - if msg.Timestamp() != nil { - flds["timestamp"] = (*msg.Timestamp()).UnixNano() - } + return flds +} - if msg.ProcID() != nil { - flds["procid"] = *msg.ProcID() +func populateCommonFields(msg *syslog.Base, flds map[string]interface{}) { + flds["facility_code"] = int(*msg.Facility) + flds["severity_code"] = int(*msg.Severity) + if msg.Timestamp != nil { + flds["timestamp"] = (*msg.Timestamp).UnixNano() } - - if msg.MsgID() != nil { - flds["msgid"] = *msg.MsgID() + if msg.ProcID != nil { + flds["procid"] = *msg.ProcID } - - if msg.Message() != nil { - flds["message"] = strings.TrimRightFunc(*msg.Message(), func(r rune) bool { + if msg.MsgID != nil { + flds["msgid"] = *msg.MsgID + } + if msg.Message != nil { + flds["message"] = strings.TrimRightFunc(*msg.Message, func(r rune) bool { return unicode.IsSpace(r) }) } +} - if msg.StructuredData() != nil { - for sdid, sdparams := range *msg.StructuredData() { - if len(sdparams) == 0 { - // When SD-ID does not have params we indicate its presence with a bool - flds[sdid] = true - continue - } - for name, value := range sdparams { - // Using whitespace as separator since it is not allowed by the grammar within SDID - flds[sdid+s.Separator+name] = value - } - } +func populateCommonTags(msg *syslog.Base, ts map[string]string) { + if msg.Hostname != nil { + ts["hostname"] = *msg.Hostname + } + if msg.Appname != nil { + ts["appname"] = *msg.Appname } - - return flds } type unixCloser struct { @@ -421,11 +413,13 @@ type unixCloser struct { func (uc unixCloser) Close() error { err := uc.closer.Close() - os.Remove(uc.path) // ignore error + // Accept success and failure in case the file does not exist + //nolint:errcheck,revive + os.Remove(uc.path) return err } -func (s *Syslog) time() time.Time { +func (s *Syslog) currentTime() time.Time { t := s.now() if t == s.lastTime { t = t.Add(time.Nanosecond) @@ -439,16 +433,16 @@ func getNanoNow() time.Time { } func init() { + defaultTimeout := config.Duration(defaultReadTimeout) inputs.Add("syslog", func() telegraf.Input { return &Syslog{ - Address: ":6514", - now: getNanoNow, - ReadTimeout: &internal.Duration{ - Duration: defaultReadTimeout, - }, - Framing: framing.OctetCounting, - Trailer: nontransparent.LF, - Separator: "_", + Address: ":6514", + now: getNanoNow, + ReadTimeout: &defaultTimeout, + Framing: framing.OctetCounting, + SyslogStandard: syslogRFC5424, + Trailer: nontransparent.LF, + Separator: "_", } }) } diff --git a/plugins/inputs/syslog/syslog_test.go b/plugins/inputs/syslog/syslog_test.go index 66568380e95a6..4985286d62dab 100644 --- a/plugins/inputs/syslog/syslog_test.go +++ b/plugins/inputs/syslog/syslog_test.go @@ -1,15 +1,15 @@ package syslog import ( - "io/ioutil" - "os" "path/filepath" + "runtime" "strings" "testing" "time" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/testutil" ) const ( @@ -44,18 +44,19 @@ func TestAddress(t *testing.T) { require.EqualError(t, err, "unknown protocol 'unsupported' in 'example.com:6514'") require.Error(t, err) - tmpdir, err := ioutil.TempDir("", "telegraf") - defer os.RemoveAll(tmpdir) - require.NoError(t, err) + tmpdir := t.TempDir() sock := filepath.Join(tmpdir, "syslog.TestAddress.sock") - rec = &Syslog{ - Address: "unixgram://" + sock, + if runtime.GOOS != "windows" { + // Skipping on Windows, as unixgram sockets are not supported + rec = &Syslog{ + Address: "unixgram://" + sock, + } + err = rec.Start(&testutil.Accumulator{}) + require.NoError(t, err) + require.Equal(t, sock, rec.Address) + rec.Stop() } - err = rec.Start(&testutil.Accumulator{}) - require.NoError(t, err) - require.Equal(t, sock, rec.Address) - rec.Stop() // Default port is 6514 rec = &Syslog{ diff --git a/plugins/inputs/sysstat/README.md b/plugins/inputs/sysstat/README.md index 9775c1a305c95..afd821da8b3bd 100644 --- a/plugins/inputs/sysstat/README.md +++ b/plugins/inputs/sysstat/README.md @@ -1,20 +1,22 @@ # sysstat Input Plugin -Collect [sysstat](https://github.com/sysstat/sysstat) metrics - requires the sysstat -package installed. +Collect [sysstat](https://github.com/sysstat/sysstat) metrics - requires the +sysstat package installed. -This plugin collects system metrics with the sysstat collector utility `sadc` and parses -the created binary data file with the `sadf` utility. +This plugin collects system metrics with the sysstat collector utility `sadc` +and parses the created binary data file with the `sadf` utility. -### Configuration: +## Configuration -```toml +```toml @sample.conf # Sysstat metrics collector [[inputs.sysstat]] ## Path to the sadc command. # - ## On Debian and Arch Linux the default path is /usr/lib/sa/sadc whereas - ## on RHEL and CentOS the default path is /usr/lib64/sa/sadc + ## Common Defaults: + ## Debian/Ubuntu: /usr/lib/sysstat/sadc + ## Arch: /usr/lib/sa/sadc + ## RHEL/CentOS: /usr/lib64/sa/sadc sadc_path = "/usr/lib/sa/sadc" # required ## Path to the sadf command, if it is not in PATH @@ -38,22 +40,22 @@ the created binary data file with the `sadf` utility. ## ## Run 'sar -h' or 'man sar' to find out the supported options for your sysstat version. [inputs.sysstat.options] - -C = "cpu" - -B = "paging" - -b = "io" - -d = "disk" # requires DISK activity - "-n ALL" = "network" - "-P ALL" = "per_cpu" - -q = "queue" - -R = "mem" - -r = "mem_util" - -S = "swap_util" - -u = "cpu_util" - -v = "inode" - -W = "swap" - -w = "task" - # -H = "hugepages" # only available for newer linux distributions - # "-I ALL" = "interrupts" # requires INT activity + -C = "cpu" + -B = "paging" + -b = "io" + -d = "disk" # requires DISK activity + "-n ALL" = "network" + "-P ALL" = "per_cpu" + -q = "queue" + -R = "mem" + -r = "mem_util" + -S = "swap_util" + -u = "cpu_util" + -v = "inode" + -W = "swap" + -w = "task" + # -H = "hugepages" # only available for newer linux distributions + # "-I ALL" = "interrupts" # requires INT activity ## Device tags can be used to add additional tags for devices. For example the configuration below ## adds a tag vg with value rootvg for all metrics with sda devices. @@ -61,94 +63,100 @@ the created binary data file with the `sadf` utility. # vg = "rootvg" ``` -### Measurements & Fields: -#### If group=true +## Metrics + +### If group=true + - cpu - - pct_idle (float) - - pct_iowait (float) - - pct_nice (float) - - pct_steal (float) - - pct_system (float) - - pct_user (float) + - pct_idle (float) + - pct_iowait (float) + - pct_nice (float) + - pct_steal (float) + - pct_system (float) + - pct_user (float) - disk - - avgqu-sz (float) - - avgrq-sz (float) - - await (float) - - pct_util (float) - - rd_sec_pers (float) - - svctm (float) - - tps (float) + - avgqu-sz (float) + - avgrq-sz (float) + - await (float) + - pct_util (float) + - rd_sec_pers (float) + - svctm (float) + - tps (float) And much more, depending on the options you configure. -#### If group=false +### If group=false + - cpu_pct_idle - - value (float) + - value (float) - cpu_pct_iowait - - value (float) + - value (float) - cpu_pct_nice - - value (float) + - value (float) - cpu_pct_steal - - value (float) + - value (float) - cpu_pct_system - - value (float) + - value (float) - cpu_pct_user - - value (float) + - value (float) - disk_avgqu-sz - - value (float) + - value (float) - disk_avgrq-sz - - value (float) + - value (float) - disk_await - - value (float) + - value (float) - disk_pct_util - - value (float) + - value (float) - disk_rd_sec_per_s - - value (float) + - value (float) - disk_svctm - - value (float) + - value (float) - disk_tps - - value (float) + - value (float) And much more, depending on the options you configure. -### Tags: +### Tags - All measurements have the following tags: - - device + - device And more if you define some `device_tags`. -### Example Output: + +## Example Output With the configuration below: + ```toml [[inputs.sysstat]] sadc_path = "/usr/lib/sa/sadc" # required activities = ["DISK", "SNMP", "INT"] group = true [inputs.sysstat.options] - -C = "cpu" - -B = "paging" - -b = "io" - -d = "disk" # requires DISK activity - -H = "hugepages" - "-I ALL" = "interrupts" # requires INT activity - "-n ALL" = "network" - "-P ALL" = "per_cpu" - -q = "queue" - -R = "mem" - "-r ALL" = "mem_util" - -S = "swap_util" - -u = "cpu_util" - -v = "inode" - -W = "swap" - -w = "task" + -C = "cpu" + -B = "paging" + -b = "io" + -d = "disk" # requires DISK activity + -H = "hugepages" + "-I ALL" = "interrupts" # requires INT activity + "-n ALL" = "network" + "-P ALL" = "per_cpu" + -q = "queue" + -R = "mem" + "-r ALL" = "mem_util" + -S = "swap_util" + -u = "cpu_util" + -v = "inode" + -W = "swap" + -w = "task" [[inputs.sysstat.device_tags.sda]] vg = "rootvg" ``` you get the following output: -``` + +```shell $ telegraf --config telegraf.conf --input-filter sysstat --test * Plugin: sysstat, Collection 1 > cpu_util,device=all pct_idle=98.85,pct_iowait=0,pct_nice=0.38,pct_steal=0,pct_system=0.64,pct_user=0.13 1459255626657883725 @@ -189,34 +197,36 @@ $ telegraf --config telegraf.conf --input-filter sysstat --test ``` If you change the group value to false like below: + ```toml [[inputs.sysstat]] sadc_path = "/usr/lib/sa/sadc" # required activities = ["DISK", "SNMP", "INT"] group = false [inputs.sysstat.options] - -C = "cpu" - -B = "paging" - -b = "io" - -d = "disk" # requires DISK activity - -H = "hugepages" - "-I ALL" = "interrupts" # requires INT activity - "-n ALL" = "network" - "-P ALL" = "per_cpu" - -q = "queue" - -R = "mem" - "-r ALL" = "mem_util" - -S = "swap_util" - -u = "cpu_util" - -v = "inode" - -W = "swap" - -w = "task" + -C = "cpu" + -B = "paging" + -b = "io" + -d = "disk" # requires DISK activity + -H = "hugepages" + "-I ALL" = "interrupts" # requires INT activity + "-n ALL" = "network" + "-P ALL" = "per_cpu" + -q = "queue" + -R = "mem" + "-r ALL" = "mem_util" + -S = "swap_util" + -u = "cpu_util" + -v = "inode" + -W = "swap" + -w = "task" [[inputs.sysstat.device_tags.sda]] vg = "rootvg" ``` you get the following output: -``` + +```shell $ telegraf -config telegraf.conf -input-filter sysstat -test * Plugin: sysstat, Collection 1 > io_tps value=0.5 1459255780126025822 diff --git a/plugins/inputs/sysstat/sample.conf b/plugins/inputs/sysstat/sample.conf new file mode 100644 index 0000000000000..01f8e73fccd26 --- /dev/null +++ b/plugins/inputs/sysstat/sample.conf @@ -0,0 +1,52 @@ +# Sysstat metrics collector +[[inputs.sysstat]] + ## Path to the sadc command. + # + ## Common Defaults: + ## Debian/Ubuntu: /usr/lib/sysstat/sadc + ## Arch: /usr/lib/sa/sadc + ## RHEL/CentOS: /usr/lib64/sa/sadc + sadc_path = "/usr/lib/sa/sadc" # required + + ## Path to the sadf command, if it is not in PATH + # sadf_path = "/usr/bin/sadf" + + ## Activities is a list of activities, that are passed as argument to the + ## sadc collector utility (e.g: DISK, SNMP etc...) + ## The more activities that are added, the more data is collected. + # activities = ["DISK"] + + ## Group metrics to measurements. + ## + ## If group is false each metric will be prefixed with a description + ## and represents itself a measurement. + ## + ## If Group is true, corresponding metrics are grouped to a single measurement. + # group = true + + ## Options for the sadf command. The values on the left represent the sadf options and + ## the values on the right their description (wich are used for grouping and prefixing metrics). + ## + ## Run 'sar -h' or 'man sar' to find out the supported options for your sysstat version. + [inputs.sysstat.options] + -C = "cpu" + -B = "paging" + -b = "io" + -d = "disk" # requires DISK activity + "-n ALL" = "network" + "-P ALL" = "per_cpu" + -q = "queue" + -R = "mem" + -r = "mem_util" + -S = "swap_util" + -u = "cpu_util" + -v = "inode" + -W = "swap" + -w = "task" + # -H = "hugepages" # only available for newer linux distributions + # "-I ALL" = "interrupts" # requires INT activity + + ## Device tags can be used to add additional tags for devices. For example the configuration below + ## adds a tag vg with value rootvg for all metrics with sda devices. + # [[inputs.sysstat.device_tags.sda]] + # vg = "rootvg" diff --git a/plugins/inputs/sysstat/sysstat.go b/plugins/inputs/sysstat/sysstat.go index 9f530024b52d8..d171b6a1309c2 100644 --- a/plugins/inputs/sysstat/sysstat.go +++ b/plugins/inputs/sysstat/sysstat.go @@ -1,25 +1,32 @@ +//go:generate ../../../tools/readme_config_includer/generator +//go:build linux // +build linux package sysstat import ( "bufio" + _ "embed" "encoding/csv" "fmt" "io" "os" "os/exec" - "path" "strconv" "strings" "sync" "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) +// DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the sampleConfig data. +//go:embed sample.conf +var sampleConfig string + var ( firstTimestamp time.Time execCommand = exec.Command // execCommand is used to mock commands in tests. @@ -33,7 +40,7 @@ type Sysstat struct { Sadc string `toml:"sadc_path"` // Force the execution time of sadc - SadcInterval internal.Duration `toml:"sadc_interval"` + SadcInterval config.Duration `toml:"sadc_interval"` // Sadf represents the path to the sadf cmd. Sadf string `toml:"sadf_path"` @@ -64,80 +71,38 @@ type Sysstat struct { // DeviceTags adds the possibility to add additional tags for devices. DeviceTags map[string][]map[string]string `toml:"device_tags"` - tmpFile string interval int Log telegraf.Logger } -func (*Sysstat) Description() string { - return "Sysstat metrics collector" -} - -var sampleConfig = ` - ## Path to the sadc command. - # - ## Common Defaults: - ## Debian/Ubuntu: /usr/lib/sysstat/sadc - ## Arch: /usr/lib/sa/sadc - ## RHEL/CentOS: /usr/lib64/sa/sadc - sadc_path = "/usr/lib/sa/sadc" # required - - ## Path to the sadf command, if it is not in PATH - # sadf_path = "/usr/bin/sadf" - - ## Activities is a list of activities, that are passed as argument to the - ## sadc collector utility (e.g: DISK, SNMP etc...) - ## The more activities that are added, the more data is collected. - # activities = ["DISK"] - - ## Group metrics to measurements. - ## - ## If group is false each metric will be prefixed with a description - ## and represents itself a measurement. - ## - ## If Group is true, corresponding metrics are grouped to a single measurement. - # group = true - - ## Options for the sadf command. The values on the left represent the sadf - ## options and the values on the right their description (which are used for - ## grouping and prefixing metrics). - ## - ## Run 'sar -h' or 'man sar' to find out the supported options for your - ## sysstat version. - [inputs.sysstat.options] - -C = "cpu" - -B = "paging" - -b = "io" - -d = "disk" # requires DISK activity - "-n ALL" = "network" - "-P ALL" = "per_cpu" - -q = "queue" - -R = "mem" - -r = "mem_util" - -S = "swap_util" - -u = "cpu_util" - -v = "inode" - -W = "swap" - -w = "task" - # -H = "hugepages" # only available for newer linux distributions - # "-I ALL" = "interrupts" # requires INT activity - - ## Device tags can be used to add additional tags for devices. - ## For example the configuration below adds a tag vg with value rootvg for - ## all metrics with sda devices. - # [[inputs.sysstat.device_tags.sda]] - # vg = "rootvg" -` +const cmd = "sadf" func (*Sysstat) SampleConfig() string { return sampleConfig } +func (s *Sysstat) Init() error { + // Set defaults + if s.Sadf == "" { + sadf, err := exec.LookPath(cmd) + if err != nil { + return fmt.Errorf("looking up %q failed: %v", cmd, err) + } + s.Sadf = sadf + } + + if s.Sadf == "" { + return fmt.Errorf("no path specified for %q", cmd) + } + + return nil +} + func (s *Sysstat) Gather(acc telegraf.Accumulator) error { - if s.SadcInterval.Duration != 0 { + if time.Duration(s.SadcInterval) != 0 { // Collect interval is calculated as interval - parseInterval - s.interval = int(s.SadcInterval.Duration.Seconds()) + parseInterval + s.interval = int(time.Duration(s.SadcInterval).Seconds()) + parseInterval } if s.interval == 0 { @@ -147,8 +112,15 @@ func (s *Sysstat) Gather(acc telegraf.Accumulator) error { s.interval = int(time.Since(firstTimestamp).Seconds() + 0.5) } } + + tmpfile, err := os.CreateTemp("", "sysstat-*") + if err != nil { + return fmt.Errorf("failed to create tmp file: %s", err) + } + defer os.Remove(tmpfile.Name()) + ts := time.Now().Add(time.Duration(s.interval) * time.Second) - if err := s.collect(); err != nil { + if err := s.collect(tmpfile.Name()); err != nil { return err } var wg sync.WaitGroup @@ -156,15 +128,11 @@ func (s *Sysstat) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(acc telegraf.Accumulator, option string) { defer wg.Done() - acc.AddError(s.parse(acc, option, ts)) + acc.AddError(s.parse(acc, option, tmpfile.Name(), ts)) }(acc, option) } wg.Wait() - if _, err := os.Stat(s.tmpFile); err == nil { - acc.AddError(os.Remove(s.tmpFile)) - } - return nil } @@ -173,27 +141,24 @@ func (s *Sysstat) Gather(acc telegraf.Accumulator) error { // Sadc -S -S ... 2 tmpFile // The above command collects system metrics during and // saves it in binary form to tmpFile. -func (s *Sysstat) collect() error { +func (s *Sysstat) collect(tempfile string) error { options := []string{} for _, act := range s.Activities { options = append(options, "-S", act) } - s.tmpFile = path.Join("/tmp", fmt.Sprintf("sysstat-%d", time.Now().Unix())) + // collectInterval has to be smaller than the telegraf data collection interval collectInterval := s.interval - parseInterval // If true, interval is not defined yet and Gather is run for the first time. - if collectInterval < 0 { + if collectInterval <= 0 { collectInterval = 1 // In that case we only collect for 1 second. } - options = append(options, strconv.Itoa(collectInterval), "2", s.tmpFile) + options = append(options, strconv.Itoa(collectInterval), "2", tempfile) cmd := execCommand(s.Sadc, options...) out, err := internal.CombinedOutputTimeout(cmd, time.Second*time.Duration(collectInterval+parseInterval)) if err != nil { - if err := os.Remove(s.tmpFile); err != nil { - s.Log.Errorf("Failed to remove tmp file after %q command: %s", strings.Join(cmd.Args, " "), err.Error()) - } return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) } return nil @@ -227,8 +192,8 @@ func withCLocale(cmd *exec.Cmd) *exec.Cmd { // parse runs Sadf on the previously saved tmpFile: // Sadf -p -- -p